language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/support/EnvironmentPostProcessorsFactory.java | {
"start": 3031,
"end": 3304
} | class ____
* @return an {@link EnvironmentPostProcessorsFactory} instance
*/
static EnvironmentPostProcessorsFactory of(@Nullable ClassLoader classLoader, String... classNames) {
return new ReflectionEnvironmentPostProcessorsFactory(classLoader, classNames);
}
}
| names |
java | elastic__elasticsearch | x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/existence/LocalStateAutoscalingAndSearchableSnapshotsAndIndexLifecycle.java | {
"start": 822,
"end": 1343
} | class ____ extends LocalStateAutoscalingAndSearchableSnapshots {
public LocalStateAutoscalingAndSearchableSnapshotsAndIndexLifecycle(final Settings settings) {
super(settings);
plugins.add(new IndexLifecycle(settings) {
@Override
protected XPackLicenseState getLicenseState() {
return LocalStateAutoscalingAndSearchableSnapshotsAndIndexLifecycle.this.getLicenseState();
}
});
}
}
| LocalStateAutoscalingAndSearchableSnapshotsAndIndexLifecycle |
java | quarkusio__quarkus | integration-tests/maven/src/test/java/io/quarkus/maven/it/CodeGenIT.java | {
"start": 623,
"end": 1648
} | class ____ extends RunAndCheckMojoTestBase {
@Test
public void shouldCompileAndRunWithCodegenEnabled() throws MavenInvocationException, FileNotFoundException {
testDir = initProject("projects/proto-gen");
run(true);
assertThat(devModeClient.getHttpResponse("/hello")).isEqualTo("Hello, World!");
}
@Test
public void shouldFailToCompileWithCodegenDisabled() throws MavenInvocationException, IOException, InterruptedException {
testDir = initProject("projects/proto-gen", "projects/proto-gen-failing");
final File applicationProps = new File(testDir, "src/main/resources/application.properties");
filter(applicationProps, Collections.singletonMap("quarkus.grpc.codegen.skip=false", "quarkus.grpc.codegen.skip=true"));
running = new RunningInvoker(testDir, false);
MavenProcessInvocationResult compile = running.execute(List.of("compile"), Collections.emptyMap());
assertThat(compile.getProcess().waitFor()).isNotZero();
}
}
| CodeGenIT |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/annotations/Comment.java | {
"start": 888,
"end": 1343
} | class ____ annotated, the comment applies to the primary table.
* </ul>
* <p>
* But when {@link #on} is explicitly specified, the comment applies to the mapped table
* or column with the specified name.
* <p>
* For example:
* <pre>
* @Entity
* @Table(name = "book")
* @SecondaryTable(name = "edition")
* @Comment("The primary table for Book")
* @Comment(on = "edition",
* value = "The secondary table for Book")
* | is |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalCacheDirectoryManager.java | {
"start": 5817,
"end": 7647
} | class ____ {
private final String relativePath;
private int fileCount;
static String getRelativePath(int directoryNo) {
String relativePath = "";
if (directoryNo > 0) {
String tPath = Integer.toString(directoryNo - 1, DIRECTORIES_PER_LEVEL);
StringBuilder sb = new StringBuilder();
if (tPath.length() == 1) {
sb.append(tPath.charAt(0));
} else {
// this is done to make sure we also reuse 0th sub directory
sb.append(Integer.toString(
Integer.parseInt(tPath.substring(0, 1), DIRECTORIES_PER_LEVEL) - 1,
DIRECTORIES_PER_LEVEL));
}
for (int i = 1; i < tPath.length(); i++) {
sb.append(Path.SEPARATOR).append(tPath.charAt(i));
}
relativePath = sb.toString();
}
return relativePath;
}
static int getDirectoryNumber(String relativePath) {
String numStr = relativePath.replace("/", "");
if (relativePath.isEmpty()) {
return 0;
}
if (numStr.length() > 1) {
// undo step from getRelativePath() to reuse 0th sub directory
String firstChar = Integer.toString(
Integer.parseInt(numStr.substring(0, 1),
DIRECTORIES_PER_LEVEL) + 1, DIRECTORIES_PER_LEVEL);
numStr = firstChar + numStr.substring(1);
}
return Integer.parseInt(numStr, DIRECTORIES_PER_LEVEL) + 1;
}
public Directory(int directoryNo) {
fileCount = 0;
relativePath = getRelativePath(directoryNo);
}
public int incrementAndGetCount() {
return ++fileCount;
}
public int decrementAndGetCount() {
return --fileCount;
}
public String getRelativePath() {
return relativePath;
}
public int getCount() {
return fileCount;
}
}
} | Directory |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java | {
"start": 18369,
"end": 24541
} | class ____ not be loaded.
*/
private void loadServices(Class[] classes, List<Service> list) throws ServerException {
for (Class klass : classes) {
try {
Service service = (Service) klass.newInstance();
log.debug("Loading service [{}] implementation [{}]", service.getInterface(),
service.getClass());
if (!service.getInterface().isInstance(service)) {
throw new ServerException(ServerException.ERROR.S04, klass, service.getInterface().getName());
}
list.add(service);
} catch (ServerException ex) {
throw ex;
} catch (Exception ex) {
throw new ServerException(ServerException.ERROR.S07, klass, ex.getMessage(), ex);
}
}
}
/**
* Loads services defined in <code>services</code> and
* <code>services.ext</code> and de-dups them.
*
* @return List of final services to initialize.
*
* @throws ServerException throw if the services could not be loaded.
*/
protected List<Service> loadServices() throws ServerException {
try {
Map<Class, Service> map = new LinkedHashMap<Class, Service>();
Class[] classes = getConfig().getClasses(getPrefixedName(CONF_SERVICES));
Class[] classesExt = getConfig().getClasses(getPrefixedName(CONF_SERVICES_EXT));
List<Service> list = new ArrayList<Service>();
loadServices(classes, list);
loadServices(classesExt, list);
//removing duplicate services, strategy: last one wins
for (Service service : list) {
if (map.containsKey(service.getInterface())) {
log.debug("Replacing service [{}] implementation [{}]", service.getInterface(),
service.getClass());
}
map.put(service.getInterface(), service);
}
list = new ArrayList<Service>();
for (Map.Entry<Class, Service> entry : map.entrySet()) {
list.add(entry.getValue());
}
return list;
} catch (RuntimeException ex) {
throw new ServerException(ServerException.ERROR.S08, ex.getMessage(), ex);
}
}
/**
* Initializes the list of services.
*
* @param services services to initialized, it must be a de-dupped list of
* services.
*
* @throws ServerException thrown if the services could not be initialized.
*/
protected void initServices(List<Service> services) throws ServerException {
for (Service service : services) {
log.debug("Initializing service [{}]", service.getInterface());
checkServiceDependencies(service);
service.init(this);
this.services.put(service.getInterface(), service);
}
for (Service service : services) {
service.postInit();
}
}
/**
* Checks if all service dependencies of a service are available.
*
* @param service service to check if all its dependencies are available.
*
* @throws ServerException thrown if a service dependency is missing.
*/
protected void checkServiceDependencies(Service service) throws ServerException {
if (service.getServiceDependencies() != null) {
for (Class dependency : service.getServiceDependencies()) {
if (services.get(dependency) == null) {
throw new ServerException(ServerException.ERROR.S10, service.getClass(), dependency);
}
}
}
}
/**
* Destroys the server services.
*/
protected void destroyServices() {
List<Service> list = new ArrayList<Service>(services.values());
Collections.reverse(list);
for (Service service : list) {
try {
log.debug("Destroying service [{}]", service.getInterface());
service.destroy();
} catch (Throwable ex) {
log.error("Could not destroy service [{}], {}",
new Object[]{service.getInterface(), ex.getMessage(), ex});
}
}
log.info("Services destroyed");
}
/**
* Destroys the server.
* <p>
* All services are destroyed in reverse order of initialization, then the
* Log4j framework is shutdown.
*/
public void destroy() {
ensureOperational();
destroyServices();
log.info("Server [{}] shutdown!", name);
log.info("======================================================");
if (!Boolean.getBoolean("test.circus")) {
LogManager.shutdown();
}
status = Status.SHUTDOWN;
}
/**
* Returns the name of the server.
*
* @return the server name.
*/
public String getName() {
return name;
}
/**
* Returns the server prefix for server configuration properties.
* <p>
* By default it is the server name.
*
* @return the prefix for server configuration properties.
*/
public String getPrefix() {
return getName();
}
/**
* Returns the prefixed name of a server property.
*
* @param name of the property.
*
* @return prefixed name of the property.
*/
public String getPrefixedName(String name) {
return getPrefix() + "." + Check.notEmpty(name, "name");
}
/**
* Returns the server home dir.
*
* @return the server home dir.
*/
public String getHomeDir() {
return homeDir;
}
/**
* Returns the server config dir.
*
* @return the server config dir.
*/
public String getConfigDir() {
return configDir;
}
/**
* Returns the server log dir.
*
* @return the server log dir.
*/
public String getLogDir() {
return logDir;
}
/**
* Returns the server temp dir.
*
* @return the server temp dir.
*/
public String getTempDir() {
return tempDir;
}
/**
* Returns the server configuration.
*
* @return the server configuration.
*/
public Configuration getConfig() {
return config;
}
/**
* Returns the {@link Service} associated to the specified interface.
*
* @param serviceKlass service interface.
*
* @return the service implementation.
*/
@SuppressWarnings("unchecked")
public <T> T get(Class<T> serviceKlass) {
ensureOperational();
Check.notNull(serviceKlass, "serviceKlass");
return (T) services.get(serviceKlass);
}
/**
* Adds a service programmatically.
* <p>
* If a service with the same | could |
java | quarkusio__quarkus | integration-tests/spring-boot-properties/src/test/java/io/quarkus/it/spring/boot/BeanPropertiesTest.java | {
"start": 261,
"end": 951
} | class ____ {
@Test
void shouldHaveFinalValue() {
when().get("/bean/finalValue")
.then()
.body(is(equalTo("final")));
}
@Test
void shouldHavePackagePrivateValue() {
when().get("/bean/packagePrivateValue")
.then()
.body(is(equalTo("100")));
}
@Test
void shouldHaveValue() {
when().get("/bean/value")
.then()
.body(is(equalTo("1")));
}
@Test
void shouldHaveInnerClassValue() {
when().get("/bean/innerClass/value")
.then()
.body(is(equalTo("inner-class-value")));
}
}
| BeanPropertiesTest |
java | google__guava | android/guava/src/com/google/common/cache/Cache.java | {
"start": 1940,
"end": 8457
} | interface ____<K, V> {
/**
* Returns the value associated with {@code key} in this cache, or {@code null} if there is no
* cached value for {@code key}.
*
* @since 11.0
*/
@CanIgnoreReturnValue // TODO(b/27479612): consider removing this?
@Nullable V getIfPresent(@CompatibleWith("K") Object key);
/**
* Returns the value associated with {@code key} in this cache, obtaining that value from {@code
* loader} if necessary. The method improves upon the conventional "if cached, return; otherwise
* create, cache and return" pattern. For further improvements, use {@link LoadingCache} and its
* {@link LoadingCache#get(Object) get(K)} method instead of this one.
*
* <p>Among the improvements that this method and {@code LoadingCache.get(K)} both provide are:
*
* <ul>
* <li>{@linkplain LoadingCache#get(Object) awaiting the result of a pending load} rather than
* starting a redundant one
* <li>eliminating the error-prone caching boilerplate
* <li>tracking load {@linkplain #stats statistics}
* </ul>
*
* <p>Among the further improvements that {@code LoadingCache} can provide but this method cannot:
*
* <ul>
* <li>consolidation of the loader logic to {@linkplain CacheBuilder#build(CacheLoader) a single
* authoritative location}
* <li>{@linkplain LoadingCache#refresh refreshing of entries}, including {@linkplain
* CacheBuilder#refreshAfterWrite automated refreshing}
* <li>{@linkplain LoadingCache#getAll bulk loading requests}, including {@linkplain
* CacheLoader#loadAll bulk loading implementations}
* </ul>
*
* <p><b>Warning:</b> For any given key, every {@code loader} used with it should compute the same
* value. Otherwise, a call that passes one {@code loader} may return the result of another call
* with a differently behaving {@code loader}. For example, a call that requests a short timeout
* for an RPC may wait for a similar call that requests a long timeout, or a call by an
* unprivileged user may return a resource accessible only to a privileged user making a similar
* call. To prevent this problem, create a key object that includes all values that affect the
* result of the query. Or use {@code LoadingCache.get(K)}, which lacks the ability to refer to
* state other than that in the key.
*
* <p><b>Warning:</b> as with {@link CacheLoader#load}, {@code loader} <b>must not</b> return
* {@code null}; it may either return a non-null value or throw an exception.
*
* <p>No observable state associated with this cache is modified until loading completes.
*
* @throws ExecutionException if a checked exception was thrown while loading the value
* @throws UncheckedExecutionException if an unchecked exception was thrown while loading the
* value
* @throws ExecutionError if an error was thrown while loading the value
* @since 11.0
*/
@CanIgnoreReturnValue // TODO(b/27479612): consider removing this
V get(K key, Callable<? extends V> loader) throws ExecutionException;
/**
* Returns a map of the values associated with {@code keys} in this cache. The returned map will
* only contain entries which are already present in the cache.
*
* @since 11.0
*/
/*
* <? extends Object> is mostly the same as <?> to plain Java. But to nullness checkers, they
* differ: <? extends Object> means "non-null types," while <?> means "all types."
*/
ImmutableMap<K, V> getAllPresent(Iterable<? extends Object> keys);
/**
* Associates {@code value} with {@code key} in this cache. If the cache previously contained a
* value associated with {@code key}, the old value is replaced by {@code value}.
*
* <p>Prefer {@link #get(Object, Callable)} when using the conventional "if cached, return;
* otherwise create, cache and return" pattern.
*
* @since 11.0
*/
void put(K key, V value);
/**
* Copies all of the mappings from the specified map to the cache. The effect of this call is
* equivalent to that of calling {@code put(k, v)} on this map once for each mapping from key
* {@code k} to value {@code v} in the specified map. The behavior of this operation is undefined
* if the specified map is modified while the operation is in progress.
*
* @since 12.0
*/
void putAll(Map<? extends K, ? extends V> m);
/** Discards any cached value for key {@code key}. */
void invalidate(@CompatibleWith("K") Object key);
/**
* Discards any cached values for keys {@code keys}.
*
* @since 11.0
*/
// For discussion of <? extends Object>, see getAllPresent.
void invalidateAll(Iterable<? extends Object> keys);
/** Discards all entries in the cache. */
void invalidateAll();
/** Returns the approximate number of entries in this cache. */
long size();
/**
* Returns a current snapshot of this cache's cumulative statistics, or a set of default values if
* the cache is not recording statistics. All statistics begin at zero and never decrease over the
* lifetime of the cache.
*
* <p><b>Warning:</b> this cache may not be recording statistical data. For example, a cache
* created using {@link CacheBuilder} only does so if the {@link CacheBuilder#recordStats} method
* was called. If statistics are not being recorded, a {@code CacheStats} instance with zero for
* all values is returned.
*
*/
CacheStats stats();
/**
* Returns a view of the entries stored in this cache as a thread-safe map. Modifications made to
* the map directly affect the cache.
*
* <p>Iterators from the returned map are at least <i>weakly consistent</i>: they are safe for
* concurrent use, but if the cache is modified (including by eviction) after the iterator is
* created, it is undefined which of the changes (if any) will be reflected in that iterator.
*
* <p><b>Warning to users of Java 8+:</b> do not call any of the new <i>default methods</i> that
* have been newly added to {@link ConcurrentMap}! These are marked with "Since: 1.8" in the
* {@code ConcurrentMap} documentation. They will not function correctly and it is impossible for
* Guava to fix them until Guava is ready to <i>require</i> Java 8 for all users.
*/
ConcurrentMap<K, V> asMap();
/**
* Performs any pending maintenance operations needed by the cache. Exactly which activities are
* performed -- if any -- is implementation-dependent.
*/
void cleanUp();
}
| Cache |
java | apache__flink | flink-examples/flink-examples-streaming/src/main/java/org/apache/flink/streaming/examples/dsv2/watermark/CountSales.java | {
"start": 4479,
"end": 7050
} | class ____ {
public long productId;
public long timestamp;
public double sales;
public CumulativeSales(long productId, long timestamp, double sales) {
this.productId = productId;
this.timestamp = timestamp;
this.sales = sales;
}
@Override
public String toString() {
return String.format("%d,%d,%.2f", this.productId, this.timestamp, this.sales);
}
}
/**
* Firstly, we define an event time watermark, which represents the time of currently processing
* event. Since the watermark needs to convey the timestamp, its data type is long. To determine
* the minimum event time across all watermarks, we utilize the combineFunctionMin() method to
* combine the watermarks. The default handling strategy is forward, meaning that the watermark
* will typically be advanced to downstream operators in most scenarios. Thus, we create a
* WatermarkDeclaration instance that can be used to declare and generate the watermark.
*/
public static final LongWatermarkDeclaration EVENT_TIME_WATERMARK_DECLARATION =
WatermarkDeclarations.newBuilder("EVENT_TIME")
.typeLong()
.combineFunctionMin()
.combineWaitForAllChannels(true)
.defaultHandlingStrategyForward()
.build();
public static void main(String[] args) throws Exception {
// parse the parameters
final ParameterTool params = ParameterTool.fromArgs(args);
final int parallelism = params.getInt("parallelism", 5);
// obtain execution environment
ExecutionEnvironment env = ExecutionEnvironment.getInstance();
// Create the Order source, the source will declare and generate event time watermarks.
NonKeyedPartitionStream<Order> source =
env.fromSource(new WrappedSource<>(new OrderSource()), "order source")
.withParallelism(parallelism);
source
// key by product id
.keyBy(order -> order.productId)
.process(
// handle event time watermark in downstream
new CountSalesProcessFunction())
.toSink(new WrappedSink<>(new PrintSink<>()));
// execute program
env.execute("Count Sales");
}
/** Source of Orders. We will declare and generate the event time watermark in this source. */
private static | CumulativeSales |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/any/discriminator/explicit/ExplicitValueTests.java | {
"start": 1162,
"end": 3477
} | class ____ {
@Test
void verifyExplicitMappingHandling(SessionFactoryScope sessions) {
sessions.inTransaction( (session) -> {
final Order order = session.find( Order.class, 1 );
final CashPayment cashPayment = session.find( CashPayment.class, 1 );
final CardPayment cardPayment = session.find( CardPayment.class, 1 );
final CheckPayment checkPayment = session.find( CheckPayment.class, 1 );
order.paymentExplicit = cardPayment;
session.flush();
verifyDiscriminatorValue( "explicit_type", "CARD", session );
order.paymentExplicit = checkPayment;
session.flush();
verifyDiscriminatorValue( "explicit_type", "CHECK", session );
// NOTE : cash is not explicitly mapped and implicit mappings are not enabled, so this should be an error
try {
order.paymentExplicit = cashPayment;
session.flush();
fail( "Expecting an error" );
}
catch (HibernateException expected) {
assertThat( expected ).hasMessageContaining( "Cannot determine discriminator value from entity-name" );
}
} );
}
private void verifyDiscriminatorValue(String columnName, String expectedValue, SessionImplementor session) {
final String qry = String.format( "select %s from orders", columnName );
session.doWork( (connection) -> {
try (final Statement stmnt = connection.createStatement() ) {
try (ResultSet resultSet = stmnt.executeQuery( qry )) {
assertThat( resultSet.next() ).isTrue();
final String discriminatorValue = resultSet.getString( columnName );
assertThat( resultSet.next() ).isFalse();
assertThat( discriminatorValue ).isEqualTo( expectedValue );
}
}
} );
}
@BeforeEach
void prepareTestData(SessionFactoryScope sessions) {
sessions.inTransaction( (session) -> {
final Order order = new Order( 1, "1" );
final CashPayment cashPayment = new CashPayment( 1, 50.00 );
final CardPayment cardPayment = new CardPayment( 1, 150.00, "123-456-789" );
final CheckPayment checkPayment = new CheckPayment( 1, 250.00, 1001, "123", "987" );
session.persist( order );
session.persist( cashPayment );
session.persist( cardPayment );
session.persist( checkPayment );
} );
}
@AfterEach
void dropTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
}
| ExplicitValueTests |
java | apache__hadoop | hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java | {
"start": 14534,
"end": 16580
} | class ____ extends DataBlock {
private S3AByteArrayOutputStream buffer;
private final int limit;
// cache data size so that it is consistent after the buffer is reset.
private Integer dataSize;
ByteArrayBlock(long index,
long limit,
BlockOutputStreamStatistics statistics) {
super(index, statistics);
this.limit = (limit > Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int) limit;
buffer = new S3AByteArrayOutputStream(this.limit);
blockAllocated();
}
/**
* Get the amount of data; if there is no buffer then the size is 0.
* @return the amount of data available to upload.
*/
@Override
long dataSize() {
return dataSize != null ? dataSize : buffer.size();
}
@Override
BlockUploadData startUpload() throws IOException {
super.startUpload();
dataSize = buffer.size();
final byte[] bytes = buffer.getBuffer();
buffer = null;
return new BlockUploadData(
byteArrayContentProvider(bytes, 0, dataSize, this::isUploading));
}
@Override
boolean hasCapacity(long bytes) {
return dataSize() + bytes <= limit;
}
@Override
long remainingCapacity() {
return limit - dataSize();
}
@Override
int write(byte[] b, int offset, int len) throws IOException {
super.write(b, offset, len);
int written = (int) Math.min(remainingCapacity(), len);
buffer.write(b, offset, written);
return written;
}
@Override
protected void innerClose() {
buffer = null;
blockReleased();
}
@Override
public String toString() {
return "ByteArrayBlock{"
+"index=" + index +
", state=" + getState() +
", limit=" + limit +
", dataSize=" + dataSize +
'}';
}
}
// ====================================================================
/**
* Stream via Direct ByteBuffers; these are allocated off heap
* via {@link DirectBufferPool}.
*/
static | ByteArrayBlock |
java | google__dagger | javatests/dagger/functional/builder/BuilderTest.java | {
"start": 1361,
"end": 1530
} | interface ____ {
String s();
int i();
long l();
float f();
double d();
byte b();
abstract static | TestChildComponentWithBuilderAbstractClass |
java | netty__netty | transport/src/main/java/io/netty/channel/socket/ChannelOutputShutdownException.java | {
"start": 795,
"end": 1117
} | class ____ extends IOException {
private static final long serialVersionUID = 6712549938359321378L;
public ChannelOutputShutdownException(String msg) {
super(msg);
}
public ChannelOutputShutdownException(String msg, Throwable cause) {
super(msg, cause);
}
}
| ChannelOutputShutdownException |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/FluxTake.java | {
"start": 4304,
"end": 6925
} | class ____<T>
implements ConditionalSubscriber<T>, InnerOperator<T, T> {
final ConditionalSubscriber<? super T> actual;
final long n;
long remaining;
@SuppressWarnings("NotNullFieldNotInitialized") // s initialized in onSubscribe
Subscription s;
boolean done;
volatile int wip;
@SuppressWarnings("rawtypes")
static final AtomicIntegerFieldUpdater<TakeConditionalSubscriber> WIP =
AtomicIntegerFieldUpdater.newUpdater(TakeConditionalSubscriber.class,
"wip");
TakeConditionalSubscriber(ConditionalSubscriber<? super T> actual,
long n) {
this.actual = actual;
this.n = n;
this.remaining = n;
}
@Override
public void onSubscribe(Subscription s) {
if (Operators.validate(this.s, s)) {
if (n == 0) {
s.cancel();
done = true;
Operators.complete(actual);
}
else {
this.s = s;
actual.onSubscribe(this);
}
}
}
@Override
public void onNext(T t) {
if (done) {
Operators.onNextDropped(t, actual.currentContext());
return;
}
long r = remaining;
if (r == 0) {
s.cancel();
onComplete();
return;
}
remaining = --r;
boolean stop = r == 0L;
actual.onNext(t);
if (stop) {
s.cancel();
onComplete();
}
}
@Override
public boolean tryOnNext(T t) {
if (done) {
Operators.onNextDropped(t, actual.currentContext());
return true;
}
long r = remaining;
if (r == 0) {
s.cancel();
onComplete();
return true;
}
remaining = --r;
boolean stop = r == 0L;
boolean b = actual.tryOnNext(t);
if (stop) {
s.cancel();
onComplete();
}
return b;
}
@Override
public void onError(Throwable t) {
if (done) {
Operators.onErrorDropped(t, actual.currentContext());
return;
}
done = true;
actual.onError(t);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
actual.onComplete();
}
@Override
public void request(long n) {
if (wip == 0 && WIP.compareAndSet(this, 0, 1)) {
if (n >= this.n) {
s.request(Long.MAX_VALUE);
}
else {
s.request(n);
}
return;
}
s.request(n);
}
@Override
public void cancel() {
s.cancel();
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.TERMINATED) return done;
if (key == Attr.PARENT) return s;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return InnerOperator.super.scanUnsafe(key);
}
@Override
public CoreSubscriber<? super T> actual() {
return actual;
}
}
static final | TakeConditionalSubscriber |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java | {
"start": 36606,
"end": 38151
} | class ____ implements RMStateFileProcessor {
private RMState rmState;
private List<ApplicationAttemptStateData> attempts;
public RMAppStateFileProcessor(RMState rmState,
List<ApplicationAttemptStateData> attempts) {
this.rmState = rmState;
this.attempts = attempts;
}
@Override
public void processChildNode(String appDirName, String childNodeName,
byte[] childData) throws InvalidProtocolBufferException {
if (childNodeName.startsWith(ApplicationId.appIdStrPrefix)) {
// application
LOG.debug("Loading application from node: {}", childNodeName);
ApplicationStateDataPBImpl appState =
new ApplicationStateDataPBImpl(
ApplicationStateDataProto.parseFrom(childData));
ApplicationId appId =
appState.getApplicationSubmissionContext().getApplicationId();
rmState.appState.put(appId, appState);
} else if (childNodeName.startsWith(
ApplicationAttemptId.appAttemptIdStrPrefix)) {
// attempt
LOG.debug("Loading application attempt from node: {}", childNodeName);
ApplicationAttemptStateDataPBImpl attemptState =
new ApplicationAttemptStateDataPBImpl(
ApplicationAttemptStateDataProto.parseFrom(childData));
attempts.add(attemptState);
} else {
LOG.info("Unknown child node with name: " + childNodeName);
}
}
}
// Interface for common state processing of directory of file layout
private | RMAppStateFileProcessor |
java | apache__camel | components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/internal/streaming/PushTopicHelper.java | {
"start": 1790,
"end": 12882
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(PushTopicHelper.class);
private static final ObjectMapper OBJECT_MAPPER = JsonUtils.createObjectMapper();
private static final String PUSH_TOPIC_OBJECT_NAME = "PushTopic";
private static final long API_TIMEOUT = 60; // Rest API call timeout
private final SalesforceEndpointConfig config;
private final String topicName;
private final RestClient restClient;
private final boolean preApi29;
public PushTopicHelper(SalesforceEndpointConfig config, String topicName, RestClient restClient) {
this.config = config;
this.topicName = topicName;
this.restClient = restClient;
this.preApi29 = Double.valueOf(config.getApiVersion()) < 29.0;
// validate notify fields right away
if (preApi29 && (config.getNotifyForOperationCreate() != null || config.getNotifyForOperationDelete() != null
|| config.getNotifyForOperationUndelete() != null
|| config.getNotifyForOperationUpdate() != null)) {
throw new IllegalArgumentException(
"NotifyForOperationCreate, NotifyForOperationDelete"
+ ", NotifyForOperationUndelete, and NotifyForOperationUpdate"
+ " are only supported since API version 29.0"
+ ", instead use NotifyForOperations");
} else if (!preApi29 && config.getNotifyForOperations() != null) {
throw new IllegalArgumentException(
"NotifyForOperations is readonly since API version 29.0"
+ ", instead use NotifyForOperationCreate, NotifyForOperationDelete"
+ ", NotifyForOperationUndelete, and NotifyForOperationUpdate");
}
}
public void createOrUpdateTopic() throws CamelException {
final String query = config.getSObjectQuery();
final SyncResponseCallback callback = new SyncResponseCallback();
// lookup Topic first
try {
// use SOQL to lookup Topic, since Name is not an external ID!!!
restClient
.query("SELECT Id, Name, Query, ApiVersion, IsActive, "
+ "NotifyForFields, NotifyForOperations, NotifyForOperationCreate, "
+ "NotifyForOperationDelete, NotifyForOperationUndelete, " + "NotifyForOperationUpdate, Description "
+ "FROM PushTopic WHERE Name = '" + topicName + "'",
Collections.emptyMap(), callback);
if (!callback.await(API_TIMEOUT, TimeUnit.SECONDS)) {
throw new SalesforceException("API call timeout!", null);
}
final SalesforceException callbackException = callback.getException();
if (callbackException != null) {
throw callbackException;
}
QueryRecordsPushTopic records = OBJECT_MAPPER.readValue(callback.getResponse(), QueryRecordsPushTopic.class);
if (records.getTotalSize() == 1) {
PushTopic topic = records.getRecords().get(0);
LOG.info("Found existing topic {}: {}", topicName, topic);
// check if we need to update topic
final boolean notifyOperationsChanged;
if (preApi29) {
notifyOperationsChanged = notEquals(config.getNotifyForOperations(), topic.getNotifyForOperations());
} else {
notifyOperationsChanged
= notEquals(config.getNotifyForOperationCreate(), topic.getNotifyForOperationCreate())
|| notEquals(config.getNotifyForOperationDelete(), topic.getNotifyForOperationDelete())
|| notEquals(config.getNotifyForOperationUndelete(), topic.getNotifyForOperationUndelete())
|| notEquals(config.getNotifyForOperationUpdate(), topic.getNotifyForOperationUpdate());
}
if (!query.equals(topic.getQuery()) || notEquals(config.getNotifyForFields(), topic.getNotifyForFields())
|| notifyOperationsChanged) {
if (!config.isUpdateTopic()) {
String msg = "Query doesn't match existing Topic and updateTopic is set to false";
throw new CamelException(msg);
}
// otherwise update the topic
updateTopic(topic.getId());
}
} else {
createTopic();
}
} catch (SalesforceException e) {
throw new CamelException(String.format("Error retrieving Topic %s: %s", topicName, e.getMessage()), e);
} catch (IOException e) {
throw new CamelException(
String.format("Un-marshaling error retrieving Topic %s: %s", topicName, e.getMessage()), e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CamelException(
String.format("Un-marshaling error retrieving Topic %s: %s", topicName, e.getMessage()), e);
} finally {
// close stream to close HttpConnection
if (callback.getResponse() != null) {
try {
callback.getResponse().close();
} catch (IOException e) {
// ignore
}
}
}
}
private void createTopic() throws CamelException {
final PushTopic topic = new PushTopic();
topic.setName(topicName);
topic.setApiVersion(Double.valueOf(config.getApiVersion()));
topic.setQuery(config.getSObjectQuery());
topic.setDescription("Topic created by Camel Salesforce component");
topic.setNotifyForFields(config.getNotifyForFields());
if (preApi29) {
topic.setNotifyForOperations(config.getNotifyForOperations());
} else {
topic.setNotifyForOperationCreate(config.getNotifyForOperationCreate());
topic.setNotifyForOperationDelete(config.getNotifyForOperationDelete());
topic.setNotifyForOperationUndelete(config.getNotifyForOperationUndelete());
topic.setNotifyForOperationUpdate(config.getNotifyForOperationUpdate());
}
LOG.info("Creating Topic {}: {}", topicName, topic);
final SyncResponseCallback callback = new SyncResponseCallback();
try {
restClient.createSObject(PUSH_TOPIC_OBJECT_NAME, new ByteArrayInputStream(OBJECT_MAPPER.writeValueAsBytes(topic)),
Collections.emptyMap(), callback);
if (!callback.await(API_TIMEOUT, TimeUnit.SECONDS)) {
throw new SalesforceException("API call timeout!", null);
}
final SalesforceException callbackException = callback.getException();
if (callbackException != null) {
throw callbackException;
}
CreateSObjectResult result = OBJECT_MAPPER.readValue(callback.getResponse(), CreateSObjectResult.class);
if (!result.getSuccess()) {
final SalesforceException salesforceException
= new SalesforceException(result.getErrors(), HttpStatus.BAD_REQUEST_400);
throw new CamelException(
String.format("Error creating Topic %s: %s", topicName, result.getErrors()), salesforceException);
}
} catch (SalesforceException e) {
throw new CamelException(String.format("Error creating Topic %s: %s", topicName, e.getMessage()), e);
} catch (IOException e) {
throw new CamelException(String.format("Un-marshaling error creating Topic %s: %s", topicName, e.getMessage()), e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CamelException(String.format("Interrupted while creating Topic %s", topicName), e);
} finally {
if (callback.getResponse() != null) {
try {
callback.getResponse().close();
} catch (IOException e) {
// ignore
}
}
}
}
private void updateTopic(String topicId) throws CamelException {
final String query = config.getSObjectQuery();
LOG.info("Updating Topic {} with Query [{}]", topicName, query);
final SyncResponseCallback callback = new SyncResponseCallback();
try {
// update the query, notifyForFields and notifyForOperations fields
final PushTopic topic = new PushTopic();
topic.setQuery(query);
topic.setNotifyForFields(config.getNotifyForFields());
if (preApi29) {
topic.setNotifyForOperations(config.getNotifyForOperations());
} else {
topic.setNotifyForOperationCreate(config.getNotifyForOperationCreate());
topic.setNotifyForOperationDelete(config.getNotifyForOperationDelete());
topic.setNotifyForOperationUndelete(config.getNotifyForOperationUndelete());
topic.setNotifyForOperationUpdate(config.getNotifyForOperationUpdate());
}
restClient.updateSObject("PushTopic", topicId, new ByteArrayInputStream(OBJECT_MAPPER.writeValueAsBytes(topic)),
Collections.emptyMap(), callback);
if (!callback.await(API_TIMEOUT, TimeUnit.SECONDS)) {
throw new SalesforceException("API call timeout!", null);
}
final SalesforceException callbackException = callback.getException();
if (callbackException != null) {
throw callbackException;
}
} catch (SalesforceException e) {
throw new CamelException(
String.format("Error updating topic %s with query [%s] : %s", topicName, query, e.getMessage()), e);
} catch (InterruptedException e) {
// reset interrupt status
Thread.currentThread().interrupt();
throw new CamelException(
String.format("Error updating topic %s with query [%s] : %s", topicName, query, e.getMessage()), e);
} catch (IOException e) {
throw new CamelException(
String.format("Error updating topic %s with query [%s] : %s", topicName, query, e.getMessage()), e);
} finally {
if (callback.getResponse() != null) {
try {
callback.getResponse().close();
} catch (IOException ignore) {
}
}
}
}
private static <T> boolean notEquals(T o1, T o2) {
return o1 != null && !o1.equals(o2);
}
}
| PushTopicHelper |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/runtime/src/main/java/io/quarkus/rest/client/reactive/ReactiveClientHeadersFactory.java | {
"start": 685,
"end": 1581
} | interface ____ not part of a JAX-RS request.
* @param clientOutgoingHeaders the read-only map of header parameters specified on the client interface.
* @return a Uni with a map of HTTP headers to merge with the clientOutgoingHeaders to be sent to the remote service.
*
* @see ClientHeadersFactory#update(MultivaluedMap, MultivaluedMap)
*/
public abstract Uni<MultivaluedMap<String, String>> getHeaders(MultivaluedMap<String, String> incomingHeaders,
MultivaluedMap<String, String> clientOutgoingHeaders);
@Override
public final MultivaluedMap<String, String> update(MultivaluedMap<String, String> incomingHeaders,
MultivaluedMap<String, String> clientOutgoingHeaders) {
throw new RuntimeException(
"Can't call `update` method in a Reactive context. Use `getHeaders` or implement ClientHeadersFactory.");
}
}
| is |
java | quarkusio__quarkus | core/deployment/src/test/java/io/quarkus/deployment/util/JandexUtilTest.java | {
"start": 8185,
"end": 8262
} | class ____ implements Single {
}
public static abstract | SingleImplNoType |
java | quarkusio__quarkus | extensions/spring-web/resteasy-reactive/tests/src/test/java/io/quarkus/spring/web/resteasy/reactive/test/ResponseStatusAndExceptionHandlerTest.java | {
"start": 839,
"end": 1637
} | class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(ExceptionController.class, RestExceptionHandler.class));
@Test
public void testRestControllerAdvice() {
when().get("/exception").then().statusCode(400);
}
@Test
public void testResponseStatusOnException() {
when().get("/exception2").then().statusCode(202);
}
@Test
public void testExceptionHandlingWithHttpRequest() {
when().get("/exception3").then().statusCode(400)
.body(containsString("Request GET /exception3 failed")).header("X-Error-Reason", is("IllegalArgument"));
}
@RestController
public static | ResponseStatusAndExceptionHandlerTest |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/filter/LevelMatchFilter.java | {
"start": 6679,
"end": 7361
} | class ____ extends AbstractFilterBuilder<LevelMatchFilter.Builder>
implements org.apache.logging.log4j.core.util.Builder<LevelMatchFilter> {
@PluginBuilderAttribute
private Level level = Level.ERROR;
/**
* Sets the logging level to use.
* @param level the logging level to use.
* @return this
*/
public LevelMatchFilter.Builder setLevel(final Level level) {
this.level = level;
return this;
}
@Override
public LevelMatchFilter build() {
return new LevelMatchFilter(this.level, this.getOnMatch(), this.getOnMismatch());
}
}
}
| Builder |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/convert/converters/MultiValuesConverterFactory.java | {
"start": 2462,
"end": 10291
} | class ____ {
/**
* Values separated with commas ",". In case of iterables, the values are converted to {@link String} and joined
* with comma delimiter. In case of {@link Map} or a POJO {@link Object} the keys and values are alternating and all
* delimited with commas.
* <table border="1">
* <caption>Examples</caption>
* <tr> <th><b> Type </b></th> <th><b> Example value </b></th> <th><b> Example representation </b></th> </tr>
* <tr> <td> Iterable   </td> <td> param=["Mike", "Adam", "Kate"] </td> <td> "param=Mike,Adam,Kate" </td></tr>
* <tr> <td> Map </td> <td> param=["name": "Mike", "age": "30"]  </td> <td> "param=name,Mike,age,30" </td> </tr>
* <tr> <td> Object </td> <td> param={name: "Mike", age: 30} </td> <td> "param=name,Mike,age,30" </td> </tr>
* </table>
* Note that ambiguity may arise when the values contain commas themselves after being converted to String.
*/
public static final String FORMAT_CSV = "csv";
/**
* Values separated with spaces " " similarly to CSV being separated with commas.
*/
public static final String FORMAT_SSV = "ssv";
/**
* Values separated with the pipe "|" symbol similarly to CSV being separated with commas.
*/
public static final String FORMAT_PIPES = "pipes";
/**
* Values are repeated with the same parameter name for {@link Iterable}, while {@link Map} and POJO {@link Object}
* would be expanded with its property names.
* <table border="1">
* <caption>Examples</caption>
* <tr> <th><b> Type </b></th> <th><b> Example value </b></th> <th><b> Example representation </b></th> </tr>
* <tr> <td> Iterable   </td> <td> param=["Mike", "Adam", "Kate"] </td> <td> "param=Mike&param=Adam&param=Kate </td></tr>
* <tr> <td> Map </td> <td> param=["name": "Mike", "age": "30"]   </td> <td> "name=Mike&age=30" </td> </tr>
* <tr> <td> Object </td> <td> param={name: "Mike", age: 30} </td> <td> "name=Mike&age=30" </td> </tr>
* </table>
*/
public static final String FORMAT_MULTI = "multi";
/**
* Values are put in the representation with property name for {@link Map} and POJO {@link Object} in square
* after the original parameter name.
* <table border="1">
* <caption>Examples</caption>
* <tr> <th><b> Type </b></th> <th><b> Example value </b></th> <th><b> Example representation </b></th> </tr>
* <tr> <td> Iterable   </td> <td> param=["Mike", "Adam", "Kate"] </td> <td> "param[0]=Mike&param[1]=Adam&param[2]=Kate </td></tr>
* <tr> <td> Map </td> <td> param=["name": "Mike", "age": "30"]   </td> <td> "param[name]=Mike&param[age]=30" </td> </tr>
* <tr> <td> Object </td> <td> param={name: "Mike", age: 30} </td> <td> "param[name]=Mike&param[age]=30" </td> </tr>
* </table>
*/
public static final String FORMAT_DEEP_OBJECT = "deepobject";
private static final Character CSV_DELIMITER = ',';
private static final Character SSV_DELIMITER = ' ';
private static final Character PIPES_DELIMITER = '|';
/**
* Convert given string value to normalized format, so that it can be compared independent of case.
*/
private static String normalizeFormatName(String value) {
return value.toLowerCase().replaceAll("[-_]", "");
}
/**
* A common function for {@link MultiValuesToMapConverter} and {@link MultiValuesToObjectConverter}.
* Retrieves parameter that is separated by delimiter given its name from all the parameters
*
* @return All the values in a Map
*/
private static Map<String, String> getSeparatedMapParameters(
ConvertibleMultiValues<String> parameters, String name, String defaultValue, Character delimiter
) {
List<String> paramValues = parameters.getAll(name);
if (paramValues.isEmpty() && defaultValue != null) {
paramValues.add(defaultValue);
}
Map<String, String> values = new HashMap<>();
for (String value: paramValues) {
List<String> delimited = splitByDelimiter(value, delimiter);
for (int i = 1; i < delimited.size(); i += 2) {
values.put(delimited.get(i - 1), delimited.get(i));
}
}
return values;
}
/**
* A common function for {@link MultiValuesToMapConverter} and {@link MultiValuesToObjectConverter}.
* Retrieves the values of parameter from all the parameters that is in MULTI format given its name
*
* @return All the values in a Map
*/
private static Map<String, String> getMultiMapParameters(ConvertibleMultiValues<String> parameters) {
// Convert to map of strings - if multiple values are present, the first one is taken
return parameters.asMap().entrySet().stream()
.filter(v -> !v.getValue().isEmpty())
.collect(Collectors.toMap(Map.Entry::getKey, v -> v.getValue().get(0)));
}
/**
* A common function for {@link MultiValuesToMapConverter} and {@link MultiValuesToObjectConverter}.
* Retrieves the values of parameter from all the parameters that is in DEEP_OBJECT FORMAT given its name
*
* @return All the values in a Map
*/
private static Map<String, String> getDeepObjectMapParameters(ConvertibleMultiValues<String> parameters, String name) {
Map<String, List<String>> paramValues = parameters.asMap();
Map<String, String> values = new HashMap<>();
// Convert to map of strings - if multiple values are present, only first one is taken
for (Map.Entry<String, List<String>> param: paramValues.entrySet()) {
String key = param.getKey();
if (key.startsWith(name) && key.length() > name.length() &&
key.charAt(name.length()) == '[' && key.charAt(key.length() - 1) == ']' &&
!param.getValue().isEmpty()
) {
String mapKey = key.substring(name.length() + 1, key.length() - 1);
values.put(mapKey, param.getValue().get(0));
}
}
return values;
}
/**
* Splits string given a delimiter.
*/
private static List<String> splitByDelimiter(String value, Character delimiter) {
List<String> result = new ArrayList<>();
int startI = 0;
for (int i = 0; i < value.length(); ++i) {
if (value.charAt(i) == delimiter) {
result.add(value.substring(startI, i));
startI = i + 1;
}
}
if (!value.isEmpty()) {
result.add(value.substring(startI));
}
return result;
}
/**
* Join strings given a delimiter.
* @param strings strings to join
* @param delimiter the delimiter
* @return joined string
*/
private static String joinStrings(Iterable<String> strings, Character delimiter) {
if (strings == null) {
return "";
}
StringBuilder builder = new StringBuilder();
boolean first = true;
for (String value: strings) {
if (value != null) {
if (!first) {
builder.append(delimiter);
} else {
first = false;
}
builder.append(value);
}
}
return builder.toString();
}
/**
* An abstract | MultiValuesConverterFactory |
java | apache__kafka | raft/src/test/java/org/apache/kafka/raft/DynamicVotersTest.java | {
"start": 1107,
"end": 4406
} | class ____ {
@Test
public void testParsingEmptyStringFails() {
assertEquals("No voters given.",
assertThrows(IllegalArgumentException.class,
() -> DynamicVoters.parse("")).
getMessage());
}
@Test
public void testParsingSingleDynamicVoter() {
assertEquals(new DynamicVoters(List.of(
new DynamicVoter(
Uuid.fromString("K90IZ-0DRNazJ49kCZ1EMQ"),
2,
"localhost",
(short) 8020))),
DynamicVoters.parse("2@localhost:8020:K90IZ-0DRNazJ49kCZ1EMQ"));
}
@Test
public void testParsingThreeDynamicVoters() {
assertEquals(new DynamicVoters(List.of(
new DynamicVoter(
Uuid.fromString("K90IZ-0DRNazJ49kCZ1EMQ"),
0,
"localhost",
(short) 8020),
new DynamicVoter(
Uuid.fromString("aUARLskQTCW4qCZDtS_cwA"),
1,
"localhost",
(short) 8030),
new DynamicVoter(
Uuid.fromString("2ggvsS4kQb-fSJ_-zC_Ang"),
2,
"localhost",
(short) 8040))),
DynamicVoters.parse(
"0@localhost:8020:K90IZ-0DRNazJ49kCZ1EMQ," +
"1@localhost:8030:aUARLskQTCW4qCZDtS_cwA," +
"2@localhost:8040:2ggvsS4kQb-fSJ_-zC_Ang"));
}
@Test
public void testParsingInvalidStringWithDuplicateNodeIds() {
assertEquals("Node id 1 was specified more than once.",
assertThrows(IllegalArgumentException.class,
() -> DynamicVoters.parse(
"0@localhost:8020:K90IZ-0DRNazJ49kCZ1EMQ," +
"1@localhost:8030:aUARLskQTCW4qCZDtS_cwA," +
"1@localhost:8040:2ggvsS4kQb-fSJ_-zC_Ang")).
getMessage());
}
private static void testRoundTrip(String input) {
DynamicVoters voters = DynamicVoters.parse(input);
assertEquals(input, voters.toString());
}
@Test
public void testRoundTripSingleVoter() {
testRoundTrip("2@localhost:8020:K90IZ-0DRNazJ49kCZ1EMQ");
}
@Test
public void testRoundTripThreeVoters() {
testRoundTrip(
"0@localhost:8020:K90IZ-0DRNazJ49kCZ1EMQ," +
"1@localhost:8030:aUARLskQTCW4qCZDtS_cwA," +
"2@localhost:8040:2ggvsS4kQb-fSJ_-zC_Ang");
}
@Test
public void testToVoterSet() {
Map<Integer, VoterSet.VoterNode> voterMap = new HashMap<>();
voterMap.put(0, DynamicVoter.parse(
"0@localhost:8020:K90IZ-0DRNazJ49kCZ1EMQ").toVoterNode("CONTROLLER2"));
voterMap.put(1, DynamicVoter.parse(
"1@localhost:8030:aUARLskQTCW4qCZDtS_cwA").toVoterNode("CONTROLLER2"));
voterMap.put(2, DynamicVoter.parse(
"2@localhost:8040:2ggvsS4kQb-fSJ_-zC_Ang").toVoterNode("CONTROLLER2"));
assertEquals(VoterSet.fromMap(voterMap),
DynamicVoters.parse(
"0@localhost:8020:K90IZ-0DRNazJ49kCZ1EMQ," +
"1@localhost:8030:aUARLskQTCW4qCZDtS_cwA," +
"2@localhost:8040:2ggvsS4kQb-fSJ_-zC_Ang").toVoterSet("CONTROLLER2"));
}
}
| DynamicVotersTest |
java | spring-projects__spring-boot | module/spring-boot-kafka/src/main/java/org/springframework/boot/kafka/testcontainers/RedpandaContainerConnectionDetailsFactory.java | {
"start": 1867,
"end": 2461
} | class ____ extends ContainerConnectionDetails<RedpandaContainer>
implements KafkaConnectionDetails {
private RedpandaContainerConnectionDetails(ContainerConnectionSource<RedpandaContainer> source) {
super(source);
}
@Override
public List<String> getBootstrapServers() {
return List.of(getContainer().getBootstrapServers());
}
@Override
public @Nullable SslBundle getSslBundle() {
return super.getSslBundle();
}
@Override
public String getSecurityProtocol() {
return (getSslBundle() != null) ? "SSL" : "PLAINTEXT";
}
}
}
| RedpandaContainerConnectionDetails |
java | apache__dubbo | dubbo-config/dubbo-config-api/src/main/java/org/apache/dubbo/config/bootstrap/builders/AbstractServiceBuilder.java | {
"start": 1157,
"end": 7650
} | class ____<T extends AbstractServiceConfig, B extends AbstractServiceBuilder<T, B>>
extends AbstractInterfaceBuilder<T, B> {
/**
* The service version
*/
protected String version;
/**
* The service group
*/
protected String group;
/**
* whether the service is deprecated
*/
protected Boolean deprecated;
/**
* The time delay register service (milliseconds)
*/
protected Integer delay;
/**
* Whether to export the service
*/
protected Boolean export;
/**
* The service weight
*/
protected Integer weight;
/**
* Document center
*/
protected String document;
/**
* Whether to register as a dynamic service or not on register center, it the value is false, the status will be disabled
* after the service registered,and it needs to be enabled manually; if you want to disable the service, you also need
* manual processing
*/
protected Boolean dynamic;
/**
* Whether to use token
*/
protected String token;
/**
* Whether to export access logs to logs
*/
protected String accesslog;
/**
* The protocol list the service will export with
*/
protected List<ProtocolConfig> protocols;
protected String protocolIds;
// max allowed execute times
private Integer executes;
/**
* Whether to register
*/
private Boolean register;
/**
* Warm up period
*/
private Integer warmup;
/**
* The serialization type
*/
private String serialization;
/**
* used for thread pool isolation between services
*/
private Executor executor;
/**
* The prefer serialization type
*/
private String preferSerialization;
public B version(String version) {
this.version = version;
return getThis();
}
public B group(String group) {
this.group = group;
return getThis();
}
public B deprecated(Boolean deprecated) {
this.deprecated = deprecated;
return getThis();
}
public B delay(Integer delay) {
this.delay = delay;
return getThis();
}
public B export(Boolean export) {
this.export = export;
return getThis();
}
public B weight(Integer weight) {
this.weight = weight;
return getThis();
}
public B document(String document) {
this.document = document;
return getThis();
}
public B dynamic(Boolean dynamic) {
this.dynamic = dynamic;
return getThis();
}
public B token(String token) {
this.token = token;
return getThis();
}
public B token(Boolean token) {
if (token != null) {
this.token = token.toString();
} else {
this.token = null;
}
return getThis();
}
public B accesslog(String accesslog) {
this.accesslog = accesslog;
return getThis();
}
public B accesslog(Boolean accesslog) {
if (accesslog != null) {
this.accesslog = accesslog.toString();
} else {
this.accesslog = null;
}
return getThis();
}
public B addProtocols(List<ProtocolConfig> protocols) {
if (this.protocols == null) {
this.protocols = new ArrayList<>();
}
this.protocols.addAll(protocols);
return getThis();
}
public B addProtocol(ProtocolConfig protocol) {
if (this.protocols == null) {
this.protocols = new ArrayList<>();
}
this.protocols.add(protocol);
return getThis();
}
public B protocolIds(String protocolIds) {
this.protocolIds = protocolIds;
return getThis();
}
public B executes(Integer executes) {
this.executes = executes;
return getThis();
}
public B register(Boolean register) {
this.register = register;
return getThis();
}
public B warmup(Integer warmup) {
this.warmup = warmup;
return getThis();
}
public B serialization(String serialization) {
this.serialization = serialization;
return getThis();
}
public B executor(Executor executor) {
this.executor = executor;
return getThis();
}
/**
* The prefer serialization type
*
* @param preferSerialization prefer serialization type
* @return {@link B}
*/
public B preferSerialization(String preferSerialization) {
this.preferSerialization = preferSerialization;
return getThis();
}
@Override
public void build(T instance) {
super.build(instance);
if (!StringUtils.isEmpty(version)) {
instance.setVersion(version);
}
if (!StringUtils.isEmpty(group)) {
instance.setGroup(group);
}
if (deprecated != null) {
instance.setDeprecated(deprecated);
}
if (delay != null) {
instance.setDelay(delay);
}
if (export != null) {
instance.setExport(export);
}
if (weight != null) {
instance.setWeight(weight);
}
if (!StringUtils.isEmpty(document)) {
instance.setDocument(document);
}
if (dynamic != null) {
instance.setDynamic(dynamic);
}
if (!StringUtils.isEmpty(token)) {
instance.setToken(token);
}
if (!StringUtils.isEmpty(accesslog)) {
instance.setAccesslog(accesslog);
}
if (protocols != null) {
instance.setProtocols(protocols);
}
if (!StringUtils.isEmpty(protocolIds)) {
instance.setProtocolIds(protocolIds);
}
if (executes != null) {
instance.setExecutes(executes);
}
if (register != null) {
instance.setRegister(register);
}
if (warmup != null) {
instance.setWarmup(warmup);
}
if (!StringUtils.isEmpty(serialization)) {
instance.setSerialization(serialization);
}
if (executor != null) {
instance.setExecutor(executor);
}
if (StringUtils.isNotBlank(preferSerialization)) {
instance.setPreferSerialization(preferSerialization);
}
}
}
| AbstractServiceBuilder |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/tests/concurrent/InboundReadQueueTest.java | {
"start": 318,
"end": 4591
} | class ____ extends AsyncTestBase {
final MessagePassingQueue.Factory factory = MessagePassingQueue.SPSC;
@Test
public void testAdd() {
MessagePassingQueue<Integer> queue = factory.create(elt -> false);
assertEquals(MessagePassingQueue.DRAIN_REQUIRED_MASK, queue.add(0));
for (int i = 1;i < 15;i++) {
assertEquals(0L, queue.add(i));
}
assertEquals(MessagePassingQueue.UNWRITABLE_MASK, queue.add(17));
}
@Test
public void testDrainSingle() {
MessagePassingQueue<Integer> queue = factory.create(elt -> true);
assertEquals(MessagePassingQueue.DRAIN_REQUIRED_MASK, queue.add(0));
assertEquals(MessagePassingQueue.drainResult(0, 0, false), queue.drain(17));
}
@Test
public void testFoo() {
MessagePassingQueue<Integer> queue = factory.create(elt -> false);
assertEquals(MessagePassingQueue.DRAIN_REQUIRED_MASK, queue.add(0));
assertEquals(drainResult(0, 1, false), queue.drain());
}
@Test
public void testDrainFully() {
LinkedList<Integer> consumed = new LinkedList<>();
MessagePassingQueue<Integer> queue = factory.create(elt -> {
consumed.add(elt);
return true;
});
assertEquals(MessagePassingQueue.DRAIN_REQUIRED_MASK, queue.add(0));
int idx = 1;
while ((queue.add(idx++) & MessagePassingQueue.UNWRITABLE_MASK) == 0) {
//
}
assertEquals(16, idx);
assertEquals(drainResult(0, 0, true), queue.drain() & 0x3);
for (int i = 0;i < 16;i++) {
assertEquals(i, (int)consumed.poll());
}
assertTrue(consumed.isEmpty());
}
@Test
public void testDrainRefuseSingleElement() {
MessagePassingQueue<Integer> queue = factory.create(elt -> false);
assertEquals(MessagePassingQueue.DRAIN_REQUIRED_MASK, queue.add(0));
assertEquals(drainResult(0, 1, false), queue.drain());
}
@Test
public void testConsumeDrain() {
AtomicInteger demand = new AtomicInteger(0);
MessagePassingQueue<Integer> queue = factory.create(elt -> {
if (demand.get() > 0) {
demand.decrementAndGet();
return true;
}
return false;
});
assertEquals(MessagePassingQueue.DRAIN_REQUIRED_MASK, queue.add(0));
int idx = 1;
while ((queue.add(idx++) & MessagePassingQueue.UNWRITABLE_MASK) == 0) {
//
}
assertEquals(16, idx);
for (int i = 0;i < 8;i++) {
demand.set(1);
assertEquals(drainResult(0, (15 - i), false), queue.drain() & 0xFFFF);
}
demand.set(1);
assertEquals(drainResult(0, 7, true), queue.drain() & 0xFFFF);
}
@Test
public void testPartialDrain() {
AtomicInteger demand = new AtomicInteger(0);
MessagePassingQueue<Integer> queue = factory.create(elt -> true);
int idx = 0;
while ((queue.add(idx++) & MessagePassingQueue.UNWRITABLE_MASK) == 0) {
//
}
assertEquals(16, idx);
assertEquals(drainResult(0, 12, false), queue.drain(4) & 0xFFFF);
assertEquals(drainResult(0, 7, true), queue.drain(5) & 0xFFFF);
assertEquals(drainResult(0, 0, false), queue.drain() & 0xFFFF);
}
@Test
public void testUnwritableCount() {
AtomicInteger demand = new AtomicInteger();
MessagePassingQueue<Integer> queue = factory.create(elt-> {
if (demand.get() > 0) {
demand.decrementAndGet();
return true;
} else {
return false;
}
});
int count = 0;
while (true) {
if ((queue.add(count++) & MessagePassingQueue.UNWRITABLE_MASK) != 0) {
break;
}
}
demand.set(1);
assertEquals(drainResult(0, 15, false), queue.drain() & 0xFFFF);
assertFlagsSet(queue.add(count++), MessagePassingQueue.UNWRITABLE_MASK);
demand.set(count - 1);
int flags = queue.drain();
assertFlagsSet(flags, MessagePassingQueue.WRITABLE_MASK);
assertEquals(0, MessagePassingQueue.numberOfPendingElements(flags));
}
private void assertFlagsSet(int flags, int... masks) {
for (int mask : masks) {
assertTrue("Expecting flag " + Integer.toBinaryString(mask) + " to be set", (flags & mask) != 0);
}
}
private void assertFlagsClear(int flags, int... masks) {
for (int mask : masks) {
assertTrue("Expecting flag " + Integer.toBinaryString(mask) + " to be clear", (flags & mask) == 0);
}
}
}
| InboundReadQueueTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/jaxb/hbm/transform/ColumnDefaultsProperty.java | {
"start": 223,
"end": 890
} | class ____ implements ColumnDefaults {
private final Property property;
public ColumnDefaultsProperty(Property property) {
this.property = property;
}
@Override
public Boolean isNullable() {
return property.isOptional();
}
@Override
public Boolean isInsertable() {
return property.isInsertable();
}
@Override
public Boolean isUpdatable() {
return property.isUpdatable();
}
@Override
public Integer getLength() {
return null;
}
@Override
public Integer getScale() {
return null;
}
@Override
public Integer getPrecision() {
return null;
}
@Override
public Boolean isUnique() {
return Boolean.FALSE;
}
}
| ColumnDefaultsProperty |
java | apache__rocketmq | broker/src/test/java/org/apache/rocketmq/broker/BrokerShutdownTest.java | {
"start": 1480,
"end": 6824
} | class ____ {
private MessageStoreConfig messageStoreConfig;
private BrokerConfig brokerConfig;
private NettyServerConfig nettyServerConfig;
private AuthConfig authConfig;
@Before
public void setUp() {
messageStoreConfig = new MessageStoreConfig();
String storePathRootDir = System.getProperty("java.io.tmpdir") + File.separator + "store-"
+ UUID.randomUUID().toString();
messageStoreConfig.setStorePathRootDir(storePathRootDir);
brokerConfig = new BrokerConfig();
nettyServerConfig = new NettyServerConfig();
nettyServerConfig.setListenPort(0);
authConfig = new AuthConfig();
}
@After
public void destroy() {
UtilAll.deleteFile(new File(messageStoreConfig.getStorePathRootDir()));
}
@Test
public void testBrokerGracefulShutdown() throws Exception {
// Test that broker shuts down gracefully with proper resource cleanup
BrokerController brokerController = new BrokerController(
brokerConfig, nettyServerConfig, new NettyClientConfig(), messageStoreConfig, authConfig);
// Initialize and start the broker
assertThat(brokerController.initialize()).isTrue();
brokerController.start();
// Verify broker is running
assertThat(brokerController.getBrokerMetricsManager()).isNotNull();
// Test graceful shutdown
long startTime = System.currentTimeMillis();
brokerController.shutdown();
long shutdownTime = System.currentTimeMillis() - startTime;
// Shutdown should complete within reasonable time (10 seconds)
assertThat(shutdownTime).isLessThan(40000);
}
@Test
public void testChainedShutdownOrdering() throws Exception {
// Test that shutdown components are called in proper order
BrokerController brokerController = new BrokerController(
brokerConfig, nettyServerConfig, new NettyClientConfig(), messageStoreConfig, authConfig);
assertThat(brokerController.initialize()).isTrue();
// Track shutdown order using atomic flags
AtomicBoolean metricsManagerShutdown = new AtomicBoolean(false);
AtomicBoolean brokerStatsShutdown = new AtomicBoolean(false);
// Start broker
brokerController.start();
// Verify services are initialized
assertThat(brokerController.getBrokerMetricsManager()).isNotNull();
assertThat(brokerController.getBrokerStatsManager()).isNotNull();
// Shutdown should not throw exceptions
brokerController.shutdown();
// After shutdown, services should be properly cleaned up
// (We can't easily verify the exact order without modifying the implementation,
// but we can verify shutdown completes successfully)
assertThat(true).isTrue(); // Placeholder for successful completion
}
@Test
public void testShutdownWithConcurrentOperations() throws Exception {
// Test shutdown behavior when concurrent operations are running
BrokerController brokerController = new BrokerController(
brokerConfig, nettyServerConfig, new NettyClientConfig(), messageStoreConfig, authConfig);
assertThat(brokerController.initialize()).isTrue();
brokerController.start();
CountDownLatch shutdownLatch = new CountDownLatch(1);
AtomicBoolean shutdownSuccess = new AtomicBoolean(false);
// Simulate concurrent shutdown from another thread
Thread shutdownThread = new Thread(() -> {
try {
brokerController.shutdown();
shutdownSuccess.set(true);
} catch (Exception e) {
// Should not happen in graceful shutdown
} finally {
shutdownLatch.countDown();
}
});
shutdownThread.start();
// Wait for shutdown to complete
assertThat(shutdownLatch.await(40, TimeUnit.SECONDS)).isTrue();
assertThat(shutdownSuccess.get()).isTrue();
}
@Test
public void testResourceCleanupDuringShutdown() throws Exception {
// Test that resources are properly cleaned up during shutdown
BrokerController brokerController = new BrokerController(
brokerConfig, nettyServerConfig, new NettyClientConfig(), messageStoreConfig, authConfig);
assertThat(brokerController.initialize()).isTrue();
// Verify essential components are initialized
assertThat(brokerController.getBrokerMetricsManager()).isNotNull();
assertThat(brokerController.getBrokerStatsManager()).isNotNull();
assertThat(brokerController.getConsumerOffsetManager()).isNotNull();
assertThat(brokerController.getTopicConfigManager()).isNotNull();
brokerController.start();
// Shutdown should clean up all resources
brokerController.shutdown();
// After shutdown, the broker should be in a clean state
// We verify this by ensuring a second shutdown call doesn't cause issues
brokerController.shutdown(); // Should be safe to call multiple times
}
} | BrokerShutdownTest |
java | grpc__grpc-java | xds/src/test/java/io/grpc/xds/ClusterResolverLoadBalancerTest.java | {
"start": 58716,
"end": 59544
} | class ____ extends LoadBalancer {
private final String name;
private final Helper helper;
private List<EquivalentAddressGroup> addresses;
private Object config;
private Status upstreamError;
private boolean shutdown;
FakeLoadBalancer(String name, Helper helper) {
this.name = name;
this.helper = helper;
}
@Override
public Status acceptResolvedAddresses(ResolvedAddresses resolvedAddresses) {
addresses = resolvedAddresses.getAddresses();
config = resolvedAddresses.getLoadBalancingPolicyConfig();
return Status.OK;
}
@Override
public void handleNameResolutionError(Status error) {
upstreamError = error;
}
@Override
public void shutdown() {
shutdown = true;
childBalancers.remove(this);
}
}
}
| FakeLoadBalancer |
java | google__guava | android/guava-tests/test/com/google/common/reflect/TypeTokenSubtypeTest.java | {
"start": 4400,
"end": 4563
} | class ____ implements List<String> {}
assertThrows(IllegalArgumentException.class, () -> numberList.getSubtype(StringList.class));
}
private static | StringList |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskMaintenanceService.java | {
"start": 2174,
"end": 7917
} | class ____ extends AbstractLifecycleComponent implements ClusterStateListener {
/**
* Controls the interval at which the cleanup is scheduled.
* Defaults to 1h. It is an undocumented/expert setting that
* is mainly used by integration tests to make the garbage
* collection of search responses more reactive.
*/
public static final Setting<TimeValue> ASYNC_SEARCH_CLEANUP_INTERVAL_SETTING = Setting.timeSetting(
"async_search.index_cleanup_interval",
TimeValue.timeValueHours(1),
Setting.Property.NodeScope
);
private static final Logger logger = LogManager.getLogger(AsyncTaskMaintenanceService.class);
private final ClusterService clusterService;
private final ProjectResolver projectResolver;
private final String index;
private final String localNodeId;
private final ThreadPool threadPool;
private final Client clientWithOrigin;
private final TimeValue delay;
private final AtomicBoolean isPaused = new AtomicBoolean(false); // allow tests to simulate restarts
private boolean isCleanupRunning;
private Collection<ProjectId> projectsToCleanup;
private volatile Scheduler.Cancellable cancellable;
public AsyncTaskMaintenanceService(
ClusterService clusterService,
ProjectResolver projectResolver,
String localNodeId,
Settings nodeSettings,
ThreadPool threadPool,
Client clientWithOrigin
) {
this.clusterService = clusterService;
this.projectResolver = projectResolver;
this.index = XPackPlugin.ASYNC_RESULTS_INDEX;
this.localNodeId = localNodeId;
this.threadPool = threadPool;
this.clientWithOrigin = clientWithOrigin;
this.delay = ASYNC_SEARCH_CLEANUP_INTERVAL_SETTING.get(nodeSettings);
}
@Override
protected void doStart() {
clusterService.addListener(this);
}
@Override
protected void doStop() {
clusterService.removeListener(this);
stopCleanup();
}
// exposed for tests
public void pause() {
if (isPaused.compareAndSet(false, true)) {
synchronized (lifecycle) {
assert lifecycle.started();
doStop();
}
}
}
// exposed for tests
public boolean unpause() {
if (isPaused.compareAndSet(true, false)) {
synchronized (lifecycle) {
assert lifecycle.started();
doStart();
}
return true;
} else {
return false;
}
}
@Override
protected final void doClose() throws IOException {}
@Override
public void clusterChanged(ClusterChangedEvent event) {
final ClusterState state = event.state();
if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) {
// Wait until the gateway has recovered from disk.
return;
}
tryStartCleanup(state);
}
synchronized void tryStartCleanup(ClusterState state) {
if (lifecycle.stoppedOrClosed()) {
return;
}
final List<ProjectId> projectsOnLocalNode = state.metadata().projects().keySet().stream().filter(project -> {
final IndexRoutingTable indexRouting = state.routingTable(project).index(index);
if (indexRouting == null) {
return false;
}
final String primaryNodeId = indexRouting.shard(0).primaryShard().currentNodeId();
return localNodeId.equals(primaryNodeId);
}).toList();
if (projectsOnLocalNode.isEmpty()) {
this.projectsToCleanup = null;
if (isCleanupRunning) {
stopCleanup();
}
} else {
this.projectsToCleanup = List.copyOf(projectsOnLocalNode);
if (isCleanupRunning == false) {
isCleanupRunning = true;
executeNextCleanup();
}
}
}
synchronized void executeNextCleanup() {
if (isCleanupRunning) {
ActionListener<Void> listener = new CountDownActionListener(
this.projectsToCleanup.size(),
ActionListener.running(this::scheduleNextCleanup)
);
for (ProjectId project : this.projectsToCleanup) {
cleanupIndex(project, listener);
}
}
}
private void cleanupIndex(ProjectId projectId, ActionListener<Void> listener) {
final long nowInMillis = System.currentTimeMillis();
final DeleteByQueryRequest toDelete = new DeleteByQueryRequest(index).setQuery(
QueryBuilders.rangeQuery(EXPIRATION_TIME_FIELD).lte(nowInMillis)
);
projectResolver.executeOnProject(
projectId,
() -> clientWithOrigin.execute(DeleteByQueryAction.INSTANCE, toDelete, listener.map(ignore -> null))
);
}
synchronized void scheduleNextCleanup() {
if (isCleanupRunning) {
try {
cancellable = threadPool.schedule(this::executeNextCleanup, delay, threadPool.generic());
} catch (EsRejectedExecutionException e) {
if (e.isExecutorShutdown()) {
logger.debug("failed to schedule next maintenance task; shutting down", e);
} else {
throw e;
}
}
}
}
synchronized void stopCleanup() {
if (isCleanupRunning) {
if (cancellable != null && cancellable.isCancelled() == false) {
cancellable.cancel();
}
isCleanupRunning = false;
}
}
}
| AsyncTaskMaintenanceService |
java | quarkusio__quarkus | extensions/smallrye-reactive-messaging-kafka/deployment/src/test/java/io/quarkus/smallrye/reactivemessaging/kafka/deployment/DefaultSerdeConfigTest.java | {
"start": 142699,
"end": 143935
} | class ____ {
@Incoming("channel1")
@Incoming("channel2")
void method1(String msg) {
}
@Incoming("channel3")
@Incoming("channel4")
void method2(JsonObject msg) {
}
}
@Test
void repeatableOutgoings() {
Tuple[] expectations = {
tuple("mp.messaging.outgoing.channel1.value.serializer", "org.apache.kafka.common.serialization.StringSerializer"),
tuple("mp.messaging.outgoing.channel2.value.serializer", "org.apache.kafka.common.serialization.StringSerializer"),
tuple("mp.messaging.outgoing.channel3.value.serializer", "io.quarkus.kafka.client.serialization.JsonObjectSerializer"),
tuple("mp.messaging.outgoing.channel4.value.serializer", "io.quarkus.kafka.client.serialization.JsonObjectSerializer"),
tuple("mp.messaging.outgoing.channel5.value.serializer", "org.apache.kafka.common.serialization.LongSerializer"),
tuple("mp.messaging.outgoing.channel6.value.serializer", "org.apache.kafka.common.serialization.LongSerializer"),
};
doTest(expectations, RepeatableOutgoingsChannels.class);
}
private static | RepeatableIncomingsChannels |
java | quarkusio__quarkus | extensions/arc/deployment/src/test/java/io/quarkus/arc/test/autoinject/AutoFieldInjectionTest.java | {
"start": 1365,
"end": 1771
} | class ____ {
// @Inject should not be added here
@MyQualifier
static String staticFoo;
// @Inject is added automatically
@MyQualifier
String foo;
@MyQualifier
Long bar;
// @Inject should not be added here
@MyQualifier
final Long baz;
Client() {
this.baz = null;
}
}
static | Client |
java | apache__hadoop | hadoop-tools/hadoop-federation-balance/src/main/java/org/apache/hadoop/tools/fedbalance/procedure/BalanceProcedure.java | {
"start": 1435,
"end": 3183
} | class ____<T extends BalanceProcedure>
implements Writable {
public static final Logger LOG =
LoggerFactory.getLogger(BalanceProcedure.class);
private String nextProcedure; // the procedure after this procedure.
private String name; // the name of this procedure.
private long delayDuration; // this specifies how long will this procedure be
// delayed. The delay is triggered by throwing a
// RetryException.
private BalanceJob job;
public BalanceProcedure() {
}
/**
* The constructor of BalanceProcedure.
*
* @param name the name of the procedure.
* @param nextProcedure the name of the next procedure.
* @param delayDuration the delay duration when this procedure is delayed.
*/
public BalanceProcedure(String name, String nextProcedure,
long delayDuration) {
this();
this.name = name;
this.nextProcedure = nextProcedure;
this.delayDuration = delayDuration;
}
public BalanceProcedure(String name, long delayDuration) {
this(name, NEXT_PROCEDURE_NONE, delayDuration);
}
/**
* The main process. This is called by the ProcedureScheduler.
* Make sure the process quits fast when it's interrupted and the scheduler is
* shut down.
*
* One procedure may have many phases and all the phases share the same member
* variables. Each time this method returns, the journal is saved. User can
* serialize the current phase in write(DataOutput) so the job can continue
* with the last unfinished phase after it is recovered.
* The return value indicates whether the job should go to the next procedure.
* Return true after all the phases finish.
*
* Example:
* | BalanceProcedure |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueuePreemption.java | {
"start": 2342,
"end": 7838
} | class ____
extends TestCapacitySchedulerSurgicalPreemption {
@Override
@BeforeEach
public void setUp() throws Exception {
super.setUp();
}
public static CapacitySchedulerConfiguration
setupQueueConfigurationForSimpleSurgicalPreemption(
CapacitySchedulerConfiguration conf) {
//set up auto created queue configs
TestCapacitySchedulerAutoCreatedQueueBase.setupQueueMappings(conf, "c",
true, new int[] { 1, 2 });
//setup new queues with one of them auto enabled
// Define top-level queues
// Set childQueue for root
conf.setQueues(ROOT, new String[] {"c"});
conf.setCapacity(C, 100f);
conf.setUserLimitFactor(C, 1.0f);
conf.setAutoCreateChildQueueEnabled(C, true);
//Setup leaf queue template configs
conf.setAutoCreatedLeafQueueConfigCapacity(C, 30.0f);
conf.setAutoCreatedLeafQueueConfigMaxCapacity(C, 100.0f);
conf.setAutoCreatedLeafQueueConfigUserLimit(C, 100);
conf.setAutoCreatedLeafQueueConfigUserLimitFactor(C, 3.0f);
return conf;
}
protected CapacitySchedulerConfiguration
setupQueueConfigurationForPriorityBasedPreemption(
CapacitySchedulerConfiguration conf) {
//set up auto created queue configs
TestCapacitySchedulerAutoCreatedQueueBase.setupQueueMappings(conf, "c",
true, new int[] { 1, 2 });
TestCapacitySchedulerAutoCreatedQueueBase.setupQueueMappings(conf, "d",
true, new int[] { 3, 4 });
TestCapacitySchedulerAutoCreatedQueueBase.setupQueueMappings(conf, "e",
true, new int[] { 0 });
//setup new queues with one of them auto enabled
// Define top-level queues
// Set childQueue for root
conf.setQueues(ROOT,
new String[] { "c", "d", "e" });
conf.setCapacity(C, 45f);
conf.setCapacity(D, 45f);
conf.setCapacity(E, 10f);
conf.setUserLimitFactor(E, 3.0f);
conf.setUserLimitFactor(C, 3.0f);
conf.setUserLimitFactor(D, 3.0f);
conf.setAutoCreateChildQueueEnabled(C, true);
conf.setAutoCreateChildQueueEnabled(D, true);
conf.setAutoCreateChildQueueEnabled(E, true);
//Setup leaf queue template configs
conf.setAutoCreatedLeafQueueConfigCapacity(C, 100f);
conf.setAutoCreatedLeafQueueConfigMaxCapacity(C, 100.0f);
conf.setAutoCreatedLeafQueueConfigUserLimit(C, 100);
conf.setAutoCreatedLeafQueueConfigUserLimitFactor(C, 3.0f);
conf.setAutoCreatedLeafQueueConfigCapacity(D, 100.0f);
conf.setAutoCreatedLeafQueueConfigMaxCapacity(D, 100.0f);
conf.setAutoCreatedLeafQueueConfigUserLimit(D, 100);
conf.setAutoCreatedLeafQueueConfigUserLimitFactor(D, 3.0f);
conf.setAutoCreatedLeafQueueConfigCapacity(E, 100.0f);
conf.setAutoCreatedLeafQueueConfigMaxCapacity(E, 100.0f);
conf.setAutoCreatedLeafQueueConfigUserLimit(E, 100);
conf.setAutoCreatedLeafQueueConfigUserLimitFactor(E, 3.0f);
conf.setQueuePriority(C, 1);
conf.setQueuePriority(D, 2);
return conf;
}
@Test
@Timeout(value = 60)
public void testSimpleSurgicalPreemptionOnAutoCreatedLeafQueues()
throws Exception {
/**
* Test case: Submit two application (app1/app2) to different queues, queue
* structure:
*
* <pre>
* C
* / | \
* USER1 USER2 USER3
* 30 30 30
* </pre>
*
* 1) Two nodes (n1/n2) in the cluster, each of them has 20G.
*
* 2) app1 submit to queue-USER1 first, it asked 32 * 1G containers
* We will allocate 16 on n1 and 16 on n2.
*
* 3) app2 submit to queue-USER2, ask for one 1G container (for AM)
*
* 4) app2 asks for another 6G container, it will be reserved on n1
*
* Now: we have:
* n1: 17 from app1, 1 from app2, and 1 reserved from app2
* n2: 16 from app1.
*
* After preemption, we should expect:
* Preempt 4 containers from app1 on n1.
*/
setupQueueConfigurationForSimpleSurgicalPreemption(conf);
testSimpleSurgicalPreemption(USER1, USER2, USER1, USER2);
}
@Test
@Timeout(value = 600)
public void
testPreemptionFromHighestPriorityManagedParentQueueAndOldestContainer()
throws Exception {
/**
* Test case: Submit two application (app1/app2) to different queues, queue
* structure:
*
* <pre>
* Root
* / | \
* c d e
* 45 45 10
* </pre>
*
* Priority of queue_c = 1
* Priority of queue_d = 2
*
* 1) 5 nodes (n0-n4) in the cluster, each of them has 4G.
*
* 2) app1 submit to queue-e first (AM=1G), it asked 4 * 1G containers
* We will allocate 1 container on each of n0-n4. AM on n4.
*
* 3) app2 submit to queue-c, AM container=0.5G, allocated on n0
* Ask for 2 * 3.5G containers. (Reserved on n0/n1)
*
* 4) app2 submit to queue-d, AM container=0.5G, allocated on n2
* Ask for 2 * 3.5G containers. (Reserved on n2/n3)
*
* First we will preempt container on n2 since it is the oldest container of
* Highest priority queue (d)
*/
// Total preemption = 1G per round, which is 5% of cluster resource (20G)
setupQueueConfigurationForPriorityBasedPreemption(conf);
testPriorityPreemptionFromHighestPriorityQueueAndOldestContainer(
new String[] { USER1, USER3, USER0 },
new String[] { USER1, USER3, USER0 });
}
}
| TestCapacitySchedulerAutoCreatedQueuePreemption |
java | google__guice | extensions/persist/src/com/google/inject/persist/jpa/JpaFinderProxy.java | {
"start": 9901,
"end": 10241
} | class ____ {
private volatile boolean isKeyedQuery = false;
volatile boolean isBindAsRawParameters = true;
//should we treat the query as having ? instead of :named params
volatile JpaFinderProxy.ReturnType returnType;
volatile Class<?> returnClass;
@SuppressWarnings("rawtypes") // Unavoidable because | FinderDescriptor |
java | quarkusio__quarkus | test-framework/junit5/src/main/java/io/quarkus/test/junit/AbstractJvmQuarkusTestExtension.java | {
"start": 5582,
"end": 10344
} | class ____");
}
if (context.getTestInstance().isPresent()) {
return ConditionEvaluationResult.enabled("Quarkus Test Profile tags only affect classes");
}
// At this point, the TCCL is sometimes a deployment classloader (for multimodule tests), or the runtime classloader (for nested tests), and sometimes a FacadeClassLoader in continuous cases
// Getting back to a FacadeClassLoader is non-trivial. We can't use the singleton on the class, because we will be accessing it from different classloaders.
// We can't have a hook back from the runtime classloader to the facade classloader, because
// when evaluating execution conditions for native tests, the test will have been loaded with the system classloader, not the runtime classloader.
// The one classloader we can reliably get to when evaluating test execution is the system classloader, so hook our config on that.
// To avoid instanceof check, check for the system classloader instead of checking for the quarkusclassloader
boolean isFlatClasspath = this.getClass().getClassLoader() == ClassLoader.getSystemClassLoader();
ClassLoader original = Thread.currentThread().getContextClassLoader();
// In native mode tests, a testconfig will not have been registered on the system classloader with a testconfig instance of our classloader, so in those cases, we do not want to set the TCCL
if (!isFlatClasspath && !(original instanceof FacadeClassLoader)) {
// In most cases, we reset the TCCL to the system classloader after discovery finishes, so we could get away without this setting of the TCCL
// However, in multi-module and continuous tests the TCCL lifecycle is more complex, so this setting is still needed (for now)
Thread.currentThread().setContextClassLoader(ClassLoader.getSystemClassLoader());
}
TestConfig testConfig;
try {
testConfig = ConfigProvider.getConfig()
.unwrap(SmallRyeConfig.class)
.getConfigMapping(TestConfig.class);
} catch (Exception | ServiceConfigurationError e) {
String javaCommand = System.getProperty("sun.java.command");
boolean isEclipse = javaCommand != null
&& javaCommand.contains("JUnit5TestLoader");
// VS Code has the exact same java command and runner as Eclipse, but needs its own message
boolean isVSCode = isEclipse && (System.getProperty("java.class.path").contains("vscode"));
boolean isMaybeVSCode = isEclipse && (javaCommand.contains("testNames") && javaCommand.contains("testNameFile"));
if (isVSCode) {
// Will need https://github.com/eclipse-jdt/eclipse.jdt.ui/issues/2257 and a reconsume by VSCode
log.error(
"Could not read configuration while evaluating whether to run a test. This is a known issue when running tests in the VS Code IDE. To work around the problem, run individual test methods.");
} else if (isMaybeVSCode) {
// Will need https://github.com/eclipse-jdt/eclipse.jdt.ui/issues/2257 and a reconsume by VSCode
log.error(
"Could not read configuration while evaluating whether to run a test. It looks like you're probably running tests with VS Code. This is a known issue when running tests in the VS Code IDE. To work around the problem, run individual test methods.");
} else if (isEclipse) {
// Tracked by https://github.com/eclipse-jdt/eclipse.jdt.ui/issues/2257; fixed in Eclipse 4.37
log.error(
"Could not read configuration while evaluating whether to run a test. This is a known issue when running tests in the Eclipse IDE. To work around the problem, edit the run configuration and add `-uniqueId [engine:junit-jupiter]/[class:"
+ context.getRequiredTestClass().getName()
+ "]` in the program arguments. Running the whole package, or running individual test methods, will also work without any extra configuration.");
} else {
log.error("Internal error: Could not read configuration while evaluating whether to run "
+ context.getRequiredTestClass()
+ ". Please let the Quarkus team know what you were doing when this error happened.");
}
log.debug("Underlying exception: " + e);
log.debug("Thread Context ClassLoader: " + Thread.currentThread().getContextClassLoader());
log.debug("The classloader of the | specified |
java | quarkusio__quarkus | extensions/grpc/runtime/src/main/java/io/quarkus/grpc/runtime/supports/context/GrpcDuplicatedContextGrpcInterceptor.java | {
"start": 961,
"end": 3337
} | class ____ implements ServerInterceptor, Prioritized {
private static final Logger log = Logger.getLogger(GrpcDuplicatedContextGrpcInterceptor.class.getName());
@Inject
ExceptionHandlerProvider ehp;
@Override
public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(ServerCall<ReqT, RespT> call,
Metadata headers,
ServerCallHandler<ReqT, RespT> next) {
// This interceptor is called first, so, we should be on the event loop.
Context capturedVertxContext = Vertx.currentContext();
if (capturedVertxContext != null) {
// If we are not on a duplicated context, create and switch.
Context local = VertxContext.getOrCreateDuplicatedContext(capturedVertxContext);
setContextSafe(local, true);
// Must be sure to call next.startCall on the right context
return new ListenedOnDuplicatedContext<>(ehp, call, nextCall(call, headers, next), local);
} else {
log.warn("Unable to run on a duplicated context - interceptor not called on the Vert.x event loop");
return next.startCall(call, headers);
}
}
private <ReqT, RespT> Function<Runnable, ServerCall.Listener<ReqT>> nextCall(ServerCall<ReqT, RespT> call,
Metadata headers,
ServerCallHandler<ReqT, RespT> next) {
// Must be sure to call next.startCall on the right context
io.grpc.Context current = io.grpc.Context.current();
return onClose -> {
io.grpc.Context previous = current.attach();
try {
var forwardingCall = new ForwardingServerCall<ReqT, RespT>() {
@Override
protected ServerCall<ReqT, RespT> delegate() {
return call;
}
@Override
public void close(Status status, Metadata trailers) {
onClose.run();
super.close(status, trailers);
}
};
return next.startCall(forwardingCall, headers);
} finally {
current.detach(previous);
}
};
}
@Override
public int getPriority() {
return Interceptors.DUPLICATE_CONTEXT;
}
static | GrpcDuplicatedContextGrpcInterceptor |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java | {
"start": 2331,
"end": 2454
} | class ____ the Tool interface.
Here test that CLI sends correct command with options and parameters.
*/
public | implemented |
java | dropwizard__dropwizard | dropwizard-jersey/src/test/java/io/dropwizard/jersey/sessions/FlashResource.java | {
"start": 335,
"end": 627
} | class ____ {
@POST
public void setName(@Session Flash<String> flash,
String name) {
flash.set(name);
}
@GET
public String getName(@Session Flash<String> flash) {
return Objects.toString(flash.get().orElse(null));
}
}
| FlashResource |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PutTransformActionRequestTests.java | {
"start": 671,
"end": 1995
} | class ____ extends AbstractWireSerializingTransformTestCase<Request> {
private String transformId;
@Before
public void setupTransformId() {
transformId = randomAlphaOfLengthBetween(1, 10);
}
@Override
protected Writeable.Reader<Request> instanceReader() {
return Request::new;
}
@Override
protected Request createTestInstance() {
TransformConfig config = TransformConfigTests.randomTransformConfigWithoutHeaders(transformId);
return new Request(config, randomBoolean(), randomTimeValue());
}
@Override
protected Request mutateInstance(Request instance) {
TransformConfig config = instance.getConfig();
boolean deferValidation = instance.isDeferValidation();
TimeValue timeout = instance.ackTimeout();
switch (between(0, 2)) {
case 0 -> config = new TransformConfig.Builder(config).setId(config.getId() + randomAlphaOfLengthBetween(1, 5)).build();
case 1 -> deferValidation ^= true;
case 2 -> timeout = new TimeValue(timeout.duration() + randomLongBetween(1, 5), timeout.timeUnit());
default -> throw new AssertionError("Illegal randomization branch");
}
return new Request(config, deferValidation, timeout);
}
}
| PutTransformActionRequestTests |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/internal/pool/SimpleConnectionPool.java | {
"start": 21794,
"end": 22924
} | class ____<C> implements Executor.Action<SimpleConnectionPool<C>> {
private final Slot<C> slot;
public Recycle(Slot<C> slot) {
this.slot = slot;
}
@Override
public Task execute(SimpleConnectionPool<C> pool) {
if (!pool.closed && slot.connection != null) {
PoolWaiter<C> waiter;
if (slot.usage <= slot.concurrency && (waiter = pool.waiters.poll()) != null) {
LeaseImpl<C> lease = new LeaseImpl<>(slot, waiter.handler);
return new Task() {
@Override
public void run() {
lease.emit();
}
};
} else {
slot.usage--;
}
}
return null;
}
}
private void recycle(LeaseImpl<C> lease) {
if (lease.recycled) {
throw new IllegalStateException("Attempt to recycle more than permitted");
}
lease.recycled = true;
execute(new Recycle<>(lease.slot));
}
public int waiters() {
return waiters.size();
}
public int capacity() {
return capacity;
}
@Override
public int requests() {
return requests;
}
private static | Recycle |
java | apache__maven | impl/maven-di/src/test/java/org/apache/maven/di/impl/InjectorImplTest.java | {
"start": 5116,
"end": 5210
} | interface ____<T> {
T getObj();
}
@Named
static | TestInterface |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/StreamsUtils.java | {
"start": 1154,
"end": 2230
} | class ____ [" + classLoader + "]");
}
return org.elasticsearch.common.io.Streams.copyToString(new InputStreamReader(is, StandardCharsets.UTF_8));
}
public static String copyToStringFromClasspath(String path) throws IOException {
InputStream is = Streams.class.getResourceAsStream(path);
if (is == null) {
throw new FileNotFoundException("Resource [" + path + "] not found in classpath");
}
return org.elasticsearch.common.io.Streams.copyToString(new InputStreamReader(is, StandardCharsets.UTF_8));
}
public static byte[] copyToBytesFromClasspath(String path) throws IOException {
try (InputStream is = Streams.class.getResourceAsStream(path)) {
if (is == null) {
throw new FileNotFoundException("Resource [" + path + "] not found in classpath");
}
try (BytesStreamOutput out = new BytesStreamOutput()) {
Streams.copy(is, out);
return BytesReference.toBytes(out.bytes());
}
}
}
}
| loader |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProgramDriver.java | {
"start": 3342,
"end": 4003
} | class ____ you want to add to the repository
* @param description The description of the class
* @throws NoSuchMethodException when a particular method cannot be found.
* @throws SecurityException security manager to indicate a security violation.
*/
public void addClass(String name, Class<?> mainClass, String description)
throws Throwable {
programs.put(name , new ProgramDescription(mainClass, description));
}
/**
* This is a driver for the example programs.
* It looks at the first command line argument and tries to find an
* example program with that name.
* If it is found, it calls the main method in that | that |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/inject/JacksonInject1381WithOptionalDeserializationFeatureDisabledTest.java | {
"start": 2492,
"end": 6661
} | class ____
{
private final String _field;
@JsonCreator
public InputFalseConstructor(@JacksonInject(value = "key", useInput = OptBoolean.FALSE, optional = OptBoolean.TRUE)
@JsonProperty("field") final String field) {
_field = field;
}
public String getField() {
return _field;
}
}
private final String empty = "{}";
private final String input = "{\"field\": \"input\"}";
private final ObjectMapper plainMapper = jsonMapperBuilder()
.disable(DeserializationFeature.FAIL_ON_UNKNOWN_INJECT_VALUE)
.build();
private final ObjectMapper injectedMapper = jsonMapperBuilder()
.injectableValues(new InjectableValues.Std().addValue("key", "injected"))
.disable(DeserializationFeature.FAIL_ON_UNKNOWN_INJECT_VALUE)
.build();
// When optional = YES, missing injectable should NOT fail
@Test
@DisplayName("FAIL_ON_UNKNOWN_INJECT_VALUE NO, optional YES, input NO, injectable NO, useInput DEFAULT|TRUE|FALSE => exception")
void test1() throws Exception {
assertNull(plainMapper.readValue(empty, InputDefault.class).getField());
assertNull(plainMapper.readValue(empty, InputDefaultConstructor.class).getField());
assertNull(plainMapper.readValue(empty, InputTrue.class).getField());
assertNull(plainMapper.readValue(empty, InputTrueConstructor.class).getField());
assertNull(plainMapper.readValue(empty, InputFalse.class).getField());
assertNull(plainMapper.readValue(empty, InputFalseConstructor.class).getField());
}
@Test
@DisplayName("FAIL_ON_UNKNOWN_INJECT_VALUE NO, optional YES, input NO, injectable YES, useInput DEFAULT|TRUE|FALSE => injected")
void test2() throws Exception {
assertEquals("injected", injectedMapper.readValue(empty, InputDefault.class).getField());
assertEquals("injected", injectedMapper.readValue(empty, InputDefaultConstructor.class).getField());
assertEquals("injected", injectedMapper.readValue(empty, InputTrue.class).getField());
assertEquals("injected", injectedMapper.readValue(empty, InputTrueConstructor.class).getField());
assertEquals("injected", injectedMapper.readValue(empty, InputFalse.class).getField());
assertEquals("injected", injectedMapper.readValue(empty, InputFalseConstructor.class).getField());
}
@Test
@DisplayName("FAIL_ON_UNKNOWN_INJECT_VALUE NO, optional YES, input YES, injectable NO, useInput DEFAULT|TRUE|FALSE => [varied]")
void test3() throws Exception {
assertEquals("input", plainMapper.readValue(input, InputDefault.class).getField());
assertEquals("input", plainMapper.readValue(input, InputDefaultConstructor.class).getField());
assertNull(plainMapper.readValue(input, InputFalse.class).getField());
assertNull(plainMapper.readValue(input, InputFalseConstructor.class).getField());
assertEquals("input", plainMapper.readValue(input, InputTrue.class).getField());
assertEquals("input", plainMapper.readValue(input, InputTrueConstructor.class).getField());
}
@Test
@DisplayName("FAIL_ON_UNKNOWN_INJECT_VALUE NO, optional YES, input YES, injectable YES, useInput DEFAULT|FALSE => injected")
void test4() throws Exception {
assertEquals("injected", injectedMapper.readValue(input, InputDefault.class).getField());
assertEquals("injected", injectedMapper.readValue(input, InputDefaultConstructor.class).getField());
assertEquals("injected", injectedMapper.readValue(input, InputFalse.class).getField());
assertEquals("injected", injectedMapper.readValue(input, InputFalseConstructor.class).getField());
}
@Test
@DisplayName("FAIL_ON_UNKNOWN_INJECT_VALUE NO, optional YES, input YES, injectable YES, useInput TRUE => input")
void test5() throws Exception {
assertEquals("input", injectedMapper.readValue(input, InputTrue.class).getField());
assertEquals("input", injectedMapper.readValue(input, InputTrueConstructor.class).getField());
}
}
| InputFalseConstructor |
java | elastic__elasticsearch | modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RegisteredDomainProcessorTests.java | {
"start": 1350,
"end": 8352
} | class ____ extends ESTestCase {
public void testGetRegisteredDomain() {
assertThat(getRegisteredDomain("www.google.com"), is(new DomainInfo("www.google.com", "google.com", "com", "www")));
assertThat(getRegisteredDomain("google.com"), is(new DomainInfo("google.com", "google.com", "com", null)));
assertThat(getRegisteredDomain(null), nullValue());
assertThat(getRegisteredDomain(""), nullValue());
assertThat(getRegisteredDomain(" "), nullValue());
assertThat(getRegisteredDomain("."), nullValue());
assertThat(getRegisteredDomain("$"), nullValue());
assertThat(getRegisteredDomain("foo.bar.baz"), nullValue());
assertThat(
getRegisteredDomain("www.books.amazon.co.uk"),
is(new DomainInfo("www.books.amazon.co.uk", "amazon.co.uk", "co.uk", "www.books"))
);
// Verify "com" is returned as the eTLD, for that FQDN or subdomain
assertThat(getRegisteredDomain("com"), is(new DomainInfo("com", null, "com", null)));
assertThat(getRegisteredDomain("example.com"), is(new DomainInfo("example.com", "example.com", "com", null)));
assertThat(getRegisteredDomain("googleapis.com"), is(new DomainInfo("googleapis.com", "googleapis.com", "com", null)));
assertThat(
getRegisteredDomain("content-autofill.googleapis.com"),
is(new DomainInfo("content-autofill.googleapis.com", "googleapis.com", "com", "content-autofill"))
);
// Verify "ssl.fastly.net" is returned as the eTLD, for that FQDN or subdomain
assertThat(
getRegisteredDomain("global.ssl.fastly.net"),
is(new DomainInfo("global.ssl.fastly.net", "global.ssl.fastly.net", "ssl.fastly.net", null))
);
assertThat(
getRegisteredDomain("1.www.global.ssl.fastly.net"),
is(new DomainInfo("1.www.global.ssl.fastly.net", "global.ssl.fastly.net", "ssl.fastly.net", "1.www"))
);
}
public void testBasic() throws Exception {
var processor = new RegisteredDomainProcessor(null, null, "input", "output", false);
{
IngestDocument document = TestIngestDocument.withDefaultVersion(Map.of("input", "www.google.co.uk"));
processor.execute(document);
assertThat(
document.getSource(),
is(
Map.ofEntries(
entry("input", "www.google.co.uk"),
entry(
"output",
Map.ofEntries(
entry("domain", "www.google.co.uk"),
entry("registered_domain", "google.co.uk"),
entry("top_level_domain", "co.uk"),
entry("subdomain", "www")
)
)
)
)
);
}
{
IngestDocument document = TestIngestDocument.withDefaultVersion(Map.of("input", "example.com"));
processor.execute(document);
assertThat(
document.getSource(),
is(
Map.ofEntries(
entry("input", "example.com"),
entry(
"output",
Map.ofEntries(
entry("domain", "example.com"),
entry("registered_domain", "example.com"),
entry("top_level_domain", "com")
)
)
)
)
);
}
{
IngestDocument document = TestIngestDocument.withDefaultVersion(Map.of("input", "com"));
processor.execute(document);
assertThat(
document.getSource(),
is(
Map.ofEntries(
entry("input", "com"),
entry(
"output",
Map.ofEntries(
entry("domain", "com"), //
entry("top_level_domain", "com")
)
)
)
)
);
}
}
public void testUseRoot() throws Exception {
var processor = new RegisteredDomainProcessor(null, null, "domain", "", false);
IngestDocument document = TestIngestDocument.withDefaultVersion(Map.of("domain", "www.google.co.uk"));
processor.execute(document);
assertThat(
document.getSource(),
is(
Map.ofEntries(
entry("domain", "www.google.co.uk"),
entry("registered_domain", "google.co.uk"),
entry("top_level_domain", "co.uk"),
entry("subdomain", "www")
)
)
);
}
public void testError() throws Exception {
var processor = new RegisteredDomainProcessor(null, null, "domain", "", false);
{
IngestDocument document = TestIngestDocument.withDefaultVersion(Map.of("domain", "foo.bar.baz"));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(document));
assertThat(e.getMessage(), is("unable to set domain information for document"));
assertThat(document.getSource(), is(Map.of("domain", "foo.bar.baz")));
}
{
IngestDocument document = TestIngestDocument.withDefaultVersion(Map.of("domain", "$"));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(document));
assertThat(e.getMessage(), is("unable to set domain information for document"));
assertThat(document.getSource(), is(Map.of("domain", "$")));
}
}
public void testIgnoreMissing() throws Exception {
{
var processor = new RegisteredDomainProcessor(null, null, "domain", "", false);
IngestDocument document = TestIngestDocument.withDefaultVersion(Map.of());
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(document));
assertThat(e.getMessage(), is("field [domain] not present as part of path [domain]"));
assertThat(document.getSource(), is(anEmptyMap()));
}
{
var processor = new RegisteredDomainProcessor(null, null, "domain", "", true);
IngestDocument document = TestIngestDocument.withDefaultVersion(Collections.singletonMap("domain", null));
processor.execute(document);
assertThat(document.getSource(), is(Collections.singletonMap("domain", null)));
}
}
}
| RegisteredDomainProcessorTests |
java | spring-projects__spring-boot | buildpack/spring-boot-buildpack-platform/src/test/java/org/springframework/boot/buildpack/platform/io/DefaultOwnerTests.java | {
"start": 848,
"end": 1283
} | class ____ {
@Test
void getUidReturnsUid() {
DefaultOwner owner = new DefaultOwner(123, 456);
assertThat(owner.getUid()).isEqualTo(123);
}
@Test
void getGidReturnsGid() {
DefaultOwner owner = new DefaultOwner(123, 456);
assertThat(owner.getGid()).isEqualTo(456);
}
@Test
void toStringReturnsString() {
DefaultOwner owner = new DefaultOwner(123, 456);
assertThat(owner).hasToString("123/456");
}
}
| DefaultOwnerTests |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/exception/ContextedExceptionTest.java | {
"start": 1284,
"end": 4922
} | class ____ extends AbstractExceptionContextTest<ContextedException> {
@BeforeEach
@Override
public void setUp() throws Exception {
exceptionContext = new ContextedException(new Exception(TEST_MESSAGE));
super.setUp();
}
@Test
void testContextedException() {
exceptionContext = new ContextedException();
final String message = exceptionContext.getMessage();
final String trace = ExceptionUtils.getStackTrace(exceptionContext);
assertTrue(trace.contains("ContextedException"));
assertTrue(StringUtils.isEmpty(message));
}
@Test
void testContextedExceptionString() {
exceptionContext = new ContextedException(TEST_MESSAGE);
assertEquals(TEST_MESSAGE, exceptionContext.getMessage());
final String trace = ExceptionUtils.getStackTrace(exceptionContext);
assertTrue(trace.contains(TEST_MESSAGE));
}
@Test
void testContextedExceptionStringThrowable() {
exceptionContext = new ContextedException(TEST_MESSAGE_2, new Exception(TEST_MESSAGE));
final String message = exceptionContext.getMessage();
final String trace = ExceptionUtils.getStackTrace(exceptionContext);
assertTrue(trace.contains("ContextedException"));
assertTrue(trace.contains(TEST_MESSAGE));
assertTrue(trace.contains(TEST_MESSAGE_2));
assertTrue(message.contains(TEST_MESSAGE_2));
}
@Test
void testContextedExceptionStringThrowableContext() {
exceptionContext = new ContextedException(TEST_MESSAGE_2, new Exception(TEST_MESSAGE), new DefaultExceptionContext());
final String message = exceptionContext.getMessage();
final String trace = ExceptionUtils.getStackTrace(exceptionContext);
assertTrue(trace.contains("ContextedException"));
assertTrue(trace.contains(TEST_MESSAGE));
assertTrue(trace.contains(TEST_MESSAGE_2));
assertTrue(message.contains(TEST_MESSAGE_2));
}
@Test
void testContextedExceptionThrowable() {
exceptionContext = new ContextedException(new Exception(TEST_MESSAGE));
final String message = exceptionContext.getMessage();
final String trace = ExceptionUtils.getStackTrace(exceptionContext);
assertTrue(trace.contains("ContextedException"));
assertTrue(trace.contains(TEST_MESSAGE));
assertTrue(message.contains(TEST_MESSAGE));
}
@Test
void testNullException() {
assertEquals("", ExceptionUtils.getStackTrace(null), "Empty response.");
}
@Test
void testNullExceptionPassing() {
exceptionContext = new ContextedException(TEST_MESSAGE_2, new Exception(TEST_MESSAGE), null)
.addContextValue("test1", null)
.addContextValue("test2", "some value")
.addContextValue("test Date", new Date())
.addContextValue("test Nbr", Integer.valueOf(5))
.addContextValue("test Poorly written obj", new ObjectWithFaultyToString());
final String message = exceptionContext.getMessage();
assertNotNull(message);
}
@Test
void testRawMessage() {
assertEquals(Exception.class.getName() + ": " + TEST_MESSAGE, exceptionContext.getRawMessage());
exceptionContext = new ContextedException(TEST_MESSAGE_2, new Exception(TEST_MESSAGE), new DefaultExceptionContext());
assertEquals(TEST_MESSAGE_2, exceptionContext.getRawMessage());
exceptionContext = new ContextedException(null, new Exception(TEST_MESSAGE), new DefaultExceptionContext());
assertNull(exceptionContext.getRawMessage());
}
}
| ContextedExceptionTest |
java | elastic__elasticsearch | test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/Pair.java | {
"start": 555,
"end": 1209
} | class ____<L, R> {
public final L left;
public final R right;
private Pair(L left, R right) {
this.left = left;
this.right = right;
}
public static <L, R> Pair<L, R> of(L left, R right) {
return new Pair<>(left, right);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Pair<?, ?> pair = (Pair<?, ?>) o;
return Objects.equals(left, pair.left) && Objects.equals(right, pair.right);
}
@Override
public int hashCode() {
return Objects.hash(left, right);
}
}
| Pair |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java | {
"start": 2030,
"end": 3420
} | class ____<K, V> extends OutputFormat<K, V> {
private static final Logger LOG =
LoggerFactory.getLogger(FileOutputFormat.class);
/** Construct output file names so that, when an output directory listing is
* sorted lexicographically, positions correspond to output partitions.*/
private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance();
protected static final String BASE_OUTPUT_NAME = "mapreduce.output.basename";
protected static final String PART = "part";
static {
NUMBER_FORMAT.setMinimumIntegerDigits(5);
NUMBER_FORMAT.setGroupingUsed(false);
}
private PathOutputCommitter committer = null;
/** Configuration option: should output be compressed? {@value}. */
public static final String COMPRESS =
"mapreduce.output.fileoutputformat.compress";
/** If compression is enabled, name of codec: {@value}. */
public static final String COMPRESS_CODEC =
"mapreduce.output.fileoutputformat.compress.codec";
/**
* Type of compression {@value}: NONE, RECORD, BLOCK.
* Generally only used in {@code SequenceFileOutputFormat}.
*/
public static final String COMPRESS_TYPE =
"mapreduce.output.fileoutputformat.compress.type";
/** Destination directory of work: {@value}. */
public static final String OUTDIR =
"mapreduce.output.fileoutputformat.outputdir";
@Deprecated
public | FileOutputFormat |
java | quarkusio__quarkus | integration-tests/kubernetes/quarkus-standard-way/src/test/java/io/quarkus/it/kubernetes/OpenshiftWithDockerBuildStrategyTest.java | {
"start": 645,
"end": 3180
} | class ____ {
@RegisterExtension
static final QuarkusProdModeTest config = new QuarkusProdModeTest()
.withApplicationRoot((jar) -> jar.addClasses(GreetingResource.class))
.setApplicationName("openshift-docker").setApplicationVersion("0.1-SNAPSHOT")
.withConfigurationResource("openshift-with-docker-build-strategy.properties")
.overrideConfigKey("quarkus.openshift.deployment-kind", "deployment-config")
.setForcedDependencies(List.of(Dependency.of("io.quarkus", "quarkus-openshift", Version.getVersion())));
@ProdBuildResults
private ProdModeTestResults prodModeTestResults;
@Test
public void assertGeneratedResources() throws IOException {
Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes");
assertThat(kubernetesDir).isDirectoryContaining(p -> p.getFileName().endsWith("openshift.json"))
.isDirectoryContaining(p -> p.getFileName().endsWith("openshift.yml"));
List<HasMetadata> openshiftList = DeserializationUtil.deserializeAsList(kubernetesDir.resolve("openshift.yml"));
//Assert that the container contains neither command nor arguments
assertThat(openshiftList).filteredOn(d -> "DeploymentConfig".equals(d.getKind())).singleElement().satisfies(d -> {
assertThat(d).isInstanceOfSatisfying(DeploymentConfig.class, dc -> {
assertThat(dc.getSpec().getTemplate().getSpec().getContainers()).singleElement().satisfies(c -> {
assertThat(c.getCommand()).isNullOrEmpty();
assertThat(c.getArgs()).isNullOrEmpty();
//We explicitly remove them when using the `docker build strategy`.
assertThat(c.getEnv()).extracting("name").doesNotContain("JAVA_APP_JAR");
});
});
});
assertThat(openshiftList).filteredOn(h -> "BuildConfig".equals(h.getKind())).singleElement().satisfies(h -> {
assertThat(h.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo("openshift-docker");
assertThat(m.getLabels().get("app.openshift.io/runtime")).isEqualTo("quarkus");
});
assertThat(h).isInstanceOfSatisfying(BuildConfig.class, bc -> {
assertThat(bc.getSpec().getSource()).satisfies(s -> {
assertThat(s.getDockerfile()).isNotNull();
});
});
});
}
}
| OpenshiftWithDockerBuildStrategyTest |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Kurtosis.java | {
"start": 506,
"end": 1026
} | class ____ extends NumericAggregate implements MatrixStatsEnclosed {
public Kurtosis(Source source, Expression field) {
super(source, field);
}
@Override
protected NodeInfo<Kurtosis> info() {
return NodeInfo.create(this, Kurtosis::new, field());
}
@Override
public Kurtosis replaceChildren(List<Expression> newChildren) {
return new Kurtosis(source(), newChildren.get(0));
}
@Override
public String innerName() {
return "kurtosis";
}
}
| Kurtosis |
java | junit-team__junit5 | documentation/src/tools/java/org/junit/api/tools/ApiReportGenerator.java | {
"start": 1389,
"end": 2145
} | class ____ {
private static final Logger LOGGER = LoggerFactory.getLogger(ApiReportGenerator.class);
private static final String EOL = System.lineSeparator();
public static void main(String... args) {
// CAUTION: The output produced by this method is used to
// generate a table in the User Guide.
try (var scanResult = scanClasspath()) {
var apiReport = generateReport(scanResult);
// ApiReportWriter reportWriter = new MarkdownApiReportWriter(apiReport);
ApiReportWriter reportWriter = new AsciidocApiReportWriter(apiReport);
// ApiReportWriter reportWriter = new HtmlApiReportWriter(apiReport);
// reportWriter.printReportHeader(new PrintWriter(System.out, true));
// Print report for all Usage | ApiReportGenerator |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/typeutils/AbstractRowDataSerializer.java | {
"start": 1122,
"end": 1479
} | class ____<T extends RowData> extends PagedTypeSerializer<T> {
private static final long serialVersionUID = 1L;
/** Get the number of fields. */
public abstract int getArity();
/** Convert a {@link RowData} to a {@link BinaryRowData}. */
public abstract BinaryRowData toBinaryRow(T rowData) throws IOException;
}
| AbstractRowDataSerializer |
java | quarkusio__quarkus | devtools/cli/src/main/java/io/quarkus/cli/Version.java | {
"start": 448,
"end": 1227
} | class ____ implements CommandLine.IVersionProvider, Callable<Integer> {
private static String version;
@CommandLine.Mixin(name = "output")
OutputOptionMixin output;
@CommandLine.Mixin
HelpOption helpOption;
@CommandLine.ArgGroup(exclusive = false, validate = false)
protected PropertiesOptions propertiesOptions = new PropertiesOptions();
@CommandLine.Spec
CommandSpec spec;
@Override
public Integer call() throws Exception {
// Gather/interpolate the usual version information via IVersionProvider handling
output.printText(getVersion());
return CommandLine.ExitCode.OK;
}
@Override
public String[] getVersion() throws Exception {
return new String[] { clientVersion() };
}
}
| Version |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestServiceAuthorization.java | {
"start": 2085,
"end": 2152
} | interface ____ extends TestProtocol {};
private static | TestProtocol1 |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/impl/AcceptAllHeaderFilterStrategyTest.java | {
"start": 1179,
"end": 2051
} | class ____ extends ContextTestSupport {
@Test
public void testAcceptAll() {
HeaderFilterStrategy comp = new AcceptAllHeaderFilterStrategy();
Exchange exchange = new DefaultExchange(context);
exchange.getIn().setHeader("bar", 123);
exchange.getIn().setHeader("foo", "cheese");
exchange.getIn().setHeader("CamelVersion", "3.7");
exchange.getIn().setHeader("org.apache.camel.component.jetty.session", "true");
assertFalse(comp.applyFilterToExternalHeaders("bar", 123, exchange));
assertFalse(comp.applyFilterToExternalHeaders("foo", "cheese", exchange));
assertFalse(comp.applyFilterToExternalHeaders("CamelVersion", "3.7", exchange));
assertFalse(comp.applyFilterToExternalHeaders("org.apache.camel.component.jetty.session", "true", exchange));
}
}
| AcceptAllHeaderFilterStrategyTest |
java | google__auto | value/src/it/functional/src/test/java/com/google/auto/value/AutoValueJava8Test.java | {
"start": 14137,
"end": 15139
} | class ____ implements GenericListParent<String> {}
// We'd like AutoValue to realize that the effective type of `things()` in `StringList` is
// `List<@Nullable String>`. Unfortunately it doesn't, because Types.asMemberOf deletes
// annotations. The workaround that we have to restore them only works for top-level annotations,
// like the `@Nullable T` in `GenericParent`, but not like the `List<@Nullable T>` here.
@Test
public void testInheritedListGetterRemainsNullable() throws NoSuchMethodException {
StringList instance = new AutoValue_AutoValueJava8Test_StringList(ImmutableList.of());
Method getter = instance.getClass().getDeclaredMethod("things");
AnnotatedParameterizedType returnType =
(AnnotatedParameterizedType) getter.getAnnotatedReturnType();
assertThat(returnType.getAnnotatedActualTypeArguments()[0].getAnnotations())
.asList()
.doesNotContain(nullable());
// This should be .contains(nullable()).
}
public static | StringList |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java | {
"start": 3779,
"end": 3843
} | class ____ the {@link SequenceFile}
*
* @return the value | for |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/transaction/GetTransactionTest.java | {
"start": 562,
"end": 1350
} | class ____ {
@Test
public void testMultipleCallsReturnTheSameTransaction(EntityManagerFactoryScope scope) {
scope.inEntityManager(
entityManager -> {
EntityTransaction t = entityManager.getTransaction();
assertSame( t, entityManager.getTransaction() );
assertFalse( t.isActive() );
try {
t.begin();
assertSame( t, entityManager.getTransaction() );
assertTrue( t.isActive() );
t.commit();
}
catch (Exception e) {
if ( t.isActive() ) {
t.rollback();
}
throw e;
}
assertSame( t, entityManager.getTransaction() );
assertFalse( t.isActive() );
entityManager.close();
assertSame( t, entityManager.getTransaction() );
assertFalse( t.isActive() );
}
);
}
}
| GetTransactionTest |
java | apache__kafka | streams/integration-tests/src/test/java/org/apache/kafka/streams/integration/PurgeRepartitionTopicIntegrationTest.java | {
"start": 5121,
"end": 9774
} | class ____ implements TestCondition {
private final TopicSizeVerifier verifier;
RepartitionTopicVerified(final TopicSizeVerifier verifier) {
this.verifier = verifier;
}
@Override
public final boolean conditionMet() {
time.sleep(PURGE_INTERVAL_MS);
try {
final Collection<LogDirDescription> logDirInfo =
adminClient.describeLogDirs(Collections.singleton(0)).descriptions().get(0).get().values();
for (final LogDirDescription partitionInfo : logDirInfo) {
final ReplicaInfo replicaInfo =
partitionInfo.replicaInfos().get(new TopicPartition(REPARTITION_TOPIC, 0));
if (replicaInfo != null && verifier.verify(replicaInfo.size())) {
return true;
}
}
} catch (final Exception e) {
// swallow
}
return false;
}
}
@BeforeEach
public void setup() {
// create admin client for verification
final Properties adminConfig = new Properties();
adminConfig.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
adminClient = Admin.create(adminConfig);
final Properties streamsConfiguration = new Properties();
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID);
streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, PURGE_INTERVAL_MS);
streamsConfiguration.put(StreamsConfig.REPARTITION_PURGE_INTERVAL_MS_CONFIG, PURGE_INTERVAL_MS);
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.Integer().getClass());
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.Integer().getClass());
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory(APPLICATION_ID).getPath());
streamsConfiguration.put(StreamsConfig.topicPrefix(TopicConfig.SEGMENT_MS_CONFIG), PURGE_INTERVAL_MS);
streamsConfiguration.put(StreamsConfig.topicPrefix(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG), PURGE_SEGMENT_BYTES);
streamsConfiguration.put(StreamsConfig.producerPrefix(ProducerConfig.BATCH_SIZE_CONFIG), PURGE_SEGMENT_BYTES / 2); // we cannot allow batch size larger than segment size
final StreamsBuilder builder = new StreamsBuilder();
builder.stream(INPUT_TOPIC)
.groupBy(MockMapper.selectKeyKeyValueMapper())
.count();
kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration, time);
}
@AfterEach
public void shutdown() {
if (adminClient != null) {
adminClient.close();
}
if (kafkaStreams != null) {
kafkaStreams.close(Duration.ofSeconds(30));
}
}
@Test
public void shouldRestoreState() throws Exception {
// produce some data to input topic
final List<KeyValue<Integer, Integer>> messages = new ArrayList<>();
for (int i = 0; i < 1000; i++) {
messages.add(new KeyValue<>(i, i));
}
IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(INPUT_TOPIC,
messages,
TestUtils.producerConfig(CLUSTER.bootstrapServers(),
IntegerSerializer.class,
IntegerSerializer.class),
time.milliseconds());
kafkaStreams.start();
TestUtils.waitForCondition(new RepartitionTopicCreatedWithExpectedConfigs(), 60000,
"Repartition topic " + REPARTITION_TOPIC + " not created with the expected configs after 60000 ms.");
// wait until we received more than 1 segment of data, so that we can confirm the purge succeeds in next verification
TestUtils.waitForCondition(
new RepartitionTopicVerified(currentSize -> currentSize > PURGE_SEGMENT_BYTES),
60000,
"Repartition topic " + REPARTITION_TOPIC + " not received more than " + PURGE_SEGMENT_BYTES + "B of data after 60000 ms."
);
final long waitForPurgeMs = 60000;
TestUtils.waitForCondition(
new RepartitionTopicVerified(currentSize -> currentSize <= PURGE_SEGMENT_BYTES),
waitForPurgeMs,
"Repartition topic " + REPARTITION_TOPIC + " not purged data after " + waitForPurgeMs + " ms."
);
}
}
| RepartitionTopicVerified |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/builtin/java8time/mapper/CalendarToZonedDateTimeMapper.java | {
"start": 457,
"end": 661
} | interface ____ {
CalendarToZonedDateTimeMapper INSTANCE = Mappers.getMapper( CalendarToZonedDateTimeMapper.class );
ZonedDateTimeProperty map(CalendarProperty source);
}
| CalendarToZonedDateTimeMapper |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestCapacitySchedulerPlanFollower.java | {
"start": 3150,
"end": 8421
} | class ____ extends
TestSchedulerPlanFollowerBase {
private RMContext rmContext;
private RMContext spyRMContext;
private CapacitySchedulerContext csContext;
private CapacityScheduler cs;
@BeforeEach
public void setUp() throws Exception {
CapacityScheduler spyCs = new CapacityScheduler();
cs = spy(spyCs);
scheduler = cs;
rmContext = TestUtils.getMockRMContext();
spyRMContext = spy(rmContext);
ConcurrentMap<ApplicationId, RMApp> spyApps =
spy(new ConcurrentHashMap<ApplicationId, RMApp>());
RMApp rmApp = mock(RMApp.class);
RMAppAttempt rmAppAttempt = mock(RMAppAttempt.class);
when(rmApp.getRMAppAttempt(any()))
.thenReturn(rmAppAttempt);
when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt);
Mockito.doReturn(rmApp)
.when(spyApps).get(ArgumentMatchers.<ApplicationId>any());
Mockito.doReturn(true)
.when(spyApps).containsKey(ArgumentMatchers.<ApplicationId>any());
when(spyRMContext.getRMApps()).thenReturn(spyApps);
when(spyRMContext.getScheduler()).thenReturn(scheduler);
CapacitySchedulerConfiguration csConf =
new CapacitySchedulerConfiguration();
ReservationSystemTestUtil.setupQueueConfiguration(csConf);
cs.setConf(csConf);
csContext = mock(CapacitySchedulerContext.class);
when(csContext.getConfiguration()).thenReturn(csConf);
when(csContext.getConf()).thenReturn(csConf);
when(csContext.getMinimumResourceCapability()).thenReturn(minAlloc);
when(csContext.getMaximumResourceCapability()).thenReturn(maxAlloc);
when(csContext.getClusterResource()).thenReturn(
Resources.createResource(100 * 16 * GB, 100 * 32));
when(scheduler.getClusterResource()).thenReturn(
Resources.createResource(125 * GB, 125));
when(csContext.getResourceCalculator()).thenReturn(
new DefaultResourceCalculator());
RMContainerTokenSecretManager containerTokenSecretManager =
new RMContainerTokenSecretManager(csConf);
containerTokenSecretManager.rollMasterKey();
when(csContext.getContainerTokenSecretManager()).thenReturn(
containerTokenSecretManager);
cs.setRMContext(spyRMContext);
cs.init(csConf);
cs.start();
setupPlanFollower();
}
private void setupPlanFollower() throws Exception {
mClock = mock(Clock.class);
mAgent = mock(ReservationAgent.class);
String reservationQ =
ReservationSystemTestUtil.getFullReservationQueueName();
QueuePath reservationQueuePath =
ReservationSystemTestUtil.getFullReservationQueuePath();
CapacitySchedulerConfiguration csConf = cs.getConfiguration();
csConf.setReservationWindow(reservationQueuePath, 20L);
csConf.setMaximumCapacity(reservationQueuePath, 40);
csConf.setAverageCapacity(reservationQueuePath, 20);
policy.init(reservationQ, csConf);
}
@Test
public void testWithMoveOnExpiry() throws PlanningException,
InterruptedException, AccessControlException {
// invoke plan follower test with move
testPlanFollower(true);
}
@Test
public void testWithKillOnExpiry() throws PlanningException,
InterruptedException, AccessControlException {
// invoke plan follower test with kill
testPlanFollower(false);
}
@Override
protected void verifyCapacity(Queue defQ) {
CSQueue csQueue = (CSQueue) defQ;
assertTrue(csQueue.getCapacity() > 0.9);
}
@Override
protected void checkDefaultQueueBeforePlanFollowerRun(){
Queue defQ = getDefaultQueue();
assertEquals(0, getNumberOfApplications(defQ));
assertNotNull(defQ);
}
@Override
protected Queue getDefaultQueue() {
return cs.getQueue("dedicated" + ReservationConstants.DEFAULT_QUEUE_SUFFIX);
}
@Override
protected int getNumberOfApplications(Queue queue) {
CSQueue csQueue = (CSQueue) queue;
int numberOfApplications = csQueue.getNumApplications();
return numberOfApplications;
}
@Override
protected CapacitySchedulerPlanFollower createPlanFollower() {
CapacitySchedulerPlanFollower planFollower =
new CapacitySchedulerPlanFollower();
planFollower.init(mClock, scheduler, Collections.singletonList(plan));
return planFollower;
}
@Override
protected void assertReservationQueueExists(ReservationId r) {
CSQueue q = cs.getQueue(r.toString());
assertNotNull(q);
}
@Override
protected void assertReservationQueueExists(ReservationId r2,
double expectedCapacity, double expectedMaxCapacity) {
CSQueue q = cs.getQueue(r2.toString());
assertNotNull(q);
assertEquals(expectedCapacity, q.getCapacity(), 0.01);
assertEquals(expectedMaxCapacity, q.getMaximumCapacity(), 1.0);
}
@Override
protected void assertReservationQueueDoesNotExist(ReservationId r2) {
CSQueue q2 = cs.getQueue(r2.toString());
assertNull(q2);
}
public static ApplicationACLsManager mockAppACLsManager() {
Configuration conf = new Configuration();
return new ApplicationACLsManager(conf);
}
@AfterEach
public void tearDown() throws Exception {
if (scheduler != null) {
cs.stop();
}
}
protected Queue getReservationQueue(String reservationId) {
return cs.getQueue(reservationId);
}
}
| TestCapacitySchedulerPlanFollower |
java | apache__camel | components/camel-kamelet/src/test/java/org/apache/camel/component/kamelet/KameletEipAggregateGroovyTest.java | {
"start": 2035,
"end": 2232
} | class ____ has the method with how to aggregate the messages
// the logic can of course be much more than just to append with comma
" | that |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/storage/ConfigBackingStore.java | {
"start": 5735,
"end": 7358
} | interface ____ {
/**
* Invoked when a connector configuration has been removed
* @param connector name of the connector
*/
void onConnectorConfigRemove(String connector);
/**
* Invoked when a connector configuration has been updated.
* @param connector name of the connector
*/
void onConnectorConfigUpdate(String connector);
/**
* Invoked when task configs are updated.
* @param tasks all the tasks whose configs have been updated
*/
void onTaskConfigUpdate(Collection<ConnectorTaskId> tasks);
/**
* Invoked when the user has set a new target state (e.g. paused)
* @param connector name of the connector
*/
void onConnectorTargetStateChange(String connector);
/**
* Invoked when the leader has distributed a new session key
* @param sessionKey the {@link SessionKey session key}
*/
void onSessionKeyUpdate(SessionKey sessionKey);
/**
* Invoked when a connector and possibly its tasks have been requested to be restarted.
* @param restartRequest the {@link RestartRequest restart request}
*/
void onRestartRequest(RestartRequest restartRequest);
/**
* Invoked when a dynamic log level adjustment has been read
* @param namespace the namespace to adjust; never null
* @param level the level to set the namespace to; never null
*/
void onLoggingLevelUpdate(String namespace, String level);
}
}
| UpdateListener |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/Aws2RedshiftDataComponentBuilderFactory.java | {
"start": 1969,
"end": 16371
} | interface ____ extends ComponentBuilder<RedshiftData2Component> {
/**
* Component configuration.
*
* The option is a:
* <code>org.apache.camel.component.aws2.redshift.data.RedshiftData2Configuration</code> type.
*
* Group: producer
*
* @param configuration the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder configuration(org.apache.camel.component.aws2.redshift.data.RedshiftData2Configuration configuration) {
doSetProperty("configuration", configuration);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* The operation to perform. It can be batchExecuteStatement,
* cancelStatement, describeStatement, describeTable, executeStatement,
* getStatementResult, listDatabases, listSchemas, listStatements or
* listTables.
*
* The option is a:
* <code>org.apache.camel.component.aws2.redshift.data.RedshiftData2Operations</code> type.
*
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder operation(org.apache.camel.component.aws2.redshift.data.RedshiftData2Operations operation) {
doSetProperty("operation", operation);
return this;
}
/**
* Set the need for overriding the endpoint. This option needs to be
* used in combination with the uriEndpointOverride option.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param overrideEndpoint the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder overrideEndpoint(boolean overrideEndpoint) {
doSetProperty("overrideEndpoint", overrideEndpoint);
return this;
}
/**
* If we want to use a POJO request as body or not.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param pojoRequest the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder pojoRequest(boolean pojoRequest) {
doSetProperty("pojoRequest", pojoRequest);
return this;
}
/**
* If using a profile credentials provider, this parameter will set the
* profile name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param profileCredentialsName the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder profileCredentialsName(java.lang.String profileCredentialsName) {
doSetProperty("profileCredentialsName", profileCredentialsName);
return this;
}
/**
* The region in which RedshiftData client needs to work. When using
* this parameter, the configuration will expect the lowercase name of
* the region (for example, ap-east-1) You'll need to use the name
* Region.EU_WEST_1.id().
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param region the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder region(java.lang.String region) {
doSetProperty("region", region);
return this;
}
/**
* If we want to trust all certificates in case of overriding the
* endpoint.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param trustAllCertificates the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder trustAllCertificates(boolean trustAllCertificates) {
doSetProperty("trustAllCertificates", trustAllCertificates);
return this;
}
/**
* Set the overriding uri endpoint. This option needs to be used in
* combination with overrideEndpoint option.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param uriEndpointOverride the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder uriEndpointOverride(java.lang.String uriEndpointOverride) {
doSetProperty("uriEndpointOverride", uriEndpointOverride);
return this;
}
/**
* Set whether the RedshiftData client should expect to load credentials
* through a default credentials provider or to expect static
* credentials to be passed in.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param useDefaultCredentialsProvider the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder useDefaultCredentialsProvider(boolean useDefaultCredentialsProvider) {
doSetProperty("useDefaultCredentialsProvider", useDefaultCredentialsProvider);
return this;
}
/**
* Set whether the RedshiftData client should expect to load credentials
* through a profile credentials provider.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param useProfileCredentialsProvider the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder useProfileCredentialsProvider(boolean useProfileCredentialsProvider) {
doSetProperty("useProfileCredentialsProvider", useProfileCredentialsProvider);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
/**
* To use an existing configured AwsRedshiftDataClient client.
*
* The option is a:
* <code>software.amazon.awssdk.services.redshiftdata.RedshiftDataClient</code> type.
*
* Group: advanced
*
* @param awsRedshiftDataClient the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder awsRedshiftDataClient(software.amazon.awssdk.services.redshiftdata.RedshiftDataClient awsRedshiftDataClient) {
doSetProperty("awsRedshiftDataClient", awsRedshiftDataClient);
return this;
}
/**
* Used for enabling or disabling all consumer based health checks from
* this component.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: health
*
* @param healthCheckConsumerEnabled the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder healthCheckConsumerEnabled(boolean healthCheckConsumerEnabled) {
doSetProperty("healthCheckConsumerEnabled", healthCheckConsumerEnabled);
return this;
}
/**
* Used for enabling or disabling all producer based health checks from
* this component. Notice: Camel has by default disabled all producer
* based health-checks. You can turn on producer checks globally by
* setting camel.health.producersEnabled=true.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: health
*
* @param healthCheckProducerEnabled the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder healthCheckProducerEnabled(boolean healthCheckProducerEnabled) {
doSetProperty("healthCheckProducerEnabled", healthCheckProducerEnabled);
return this;
}
/**
* To define a proxy host when instantiating the RedshiftData client.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyHost the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder proxyHost(java.lang.String proxyHost) {
doSetProperty("proxyHost", proxyHost);
return this;
}
/**
* To define a proxy port when instantiating the RedshiftData client.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: proxy
*
* @param proxyPort the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder proxyPort(java.lang.Integer proxyPort) {
doSetProperty("proxyPort", proxyPort);
return this;
}
/**
* To define a proxy protocol when instantiating the RedshiftData
* client.
*
* The option is a:
* <code>software.amazon.awssdk.core.Protocol</code> type.
*
* Default: HTTPS
* Group: proxy
*
* @param proxyProtocol the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder proxyProtocol(software.amazon.awssdk.core.Protocol proxyProtocol) {
doSetProperty("proxyProtocol", proxyProtocol);
return this;
}
/**
* Amazon AWS Access Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param accessKey the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder accessKey(java.lang.String accessKey) {
doSetProperty("accessKey", accessKey);
return this;
}
/**
* Amazon AWS Secret Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param secretKey the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder secretKey(java.lang.String secretKey) {
doSetProperty("secretKey", secretKey);
return this;
}
/**
* Amazon AWS Session Token used when the user needs to assume an IAM
* role.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param sessionToken the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder sessionToken(java.lang.String sessionToken) {
doSetProperty("sessionToken", sessionToken);
return this;
}
/**
* Set whether the Redshift client should expect to use Session
* Credentials. This is useful in a situation in which the user needs to
* assume an IAM role for doing operations in Redshift.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useSessionCredentials the value to set
* @return the dsl builder
*/
default Aws2RedshiftDataComponentBuilder useSessionCredentials(boolean useSessionCredentials) {
doSetProperty("useSessionCredentials", useSessionCredentials);
return this;
}
}
| Aws2RedshiftDataComponentBuilder |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/util/jdbc/ResultSetBase.java | {
"start": 840,
"end": 30356
} | class ____ implements ResultSet {
protected boolean closed;
protected boolean wasNull;
private SQLWarning warning;
private String cursorName;
private int fetchSize;
private int fetchDirection;
protected Statement statement;
protected ResultSetMetaData metaData;
public ResultSetBase(Statement statement) {
super();
this.statement = statement;
}
@Override
public boolean isClosed() throws SQLException {
return closed;
}
@Override
public void updateNString(int columnIndex, String x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateNString(String columnLabel, String x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateNClob(int columnIndex, NClob x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateNClob(String columnLabel, NClob x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public NClob getNClob(int columnIndex) throws SQLException {
return (NClob) getObject(columnIndex);
}
@Override
public NClob getNClob(String columnLabel) throws SQLException {
return (NClob) getObject(columnLabel);
}
@Override
public SQLXML getSQLXML(int columnIndex) throws SQLException {
return (SQLXML) getObject(columnIndex);
}
@Override
public SQLXML getSQLXML(String columnLabel) throws SQLException {
return (SQLXML) getObject(columnLabel);
}
@Override
public void updateSQLXML(int columnIndex, SQLXML x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateSQLXML(String columnLabel, SQLXML x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public String getNString(int columnIndex) throws SQLException {
return (String) getObject(columnIndex);
}
@Override
public String getNString(String columnLabel) throws SQLException {
return (String) getObject(columnLabel);
}
@Override
public Reader getNCharacterStream(int columnIndex) throws SQLException {
return (Reader) getObject(columnIndex);
}
@Override
public Reader getNCharacterStream(String columnLabel) throws SQLException {
return (Reader) getObject(columnLabel);
}
@Override
public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateNCharacterStream(String columnLabel, Reader x, long length) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateCharacterStream(String columnLabel, Reader x, long length) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateBlob(int columnIndex, InputStream x, long length) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateBlob(String columnLabel, InputStream x, long length) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateClob(int columnIndex, Reader x, long length) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateClob(String columnLabel, Reader x, long length) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateNClob(int columnIndex, Reader x, long length) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateNClob(String columnLabel, Reader x, long length) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateNCharacterStream(String columnLabel, Reader x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateCharacterStream(int columnIndex, Reader x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateCharacterStream(String columnLabel, Reader x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateBlob(int columnIndex, InputStream x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateBlob(String columnLabel, InputStream x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateClob(int columnIndex, Reader x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateClob(String columnLabel, Reader x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateNClob(int columnIndex, Reader x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateNClob(String columnLabel, Reader x) throws SQLException {
updateObject(columnLabel, x);
}
public <T> T getObject(int columnIndex, Class<T> type) throws SQLException {
throw new SQLFeatureNotSupportedException();
}
public <T> T getObject(String columnLabel, Class<T> type) throws SQLException {
throw new SQLFeatureNotSupportedException();
}
@SuppressWarnings("unchecked")
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
if (iface == null) {
return null;
}
if (iface.isInstance(this)) {
return (T) this;
}
return null;
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
if (iface == null) {
return false;
}
return iface.isInstance(this);
}
@Override
public void close() throws SQLException {
this.closed = true;
}
@Override
public boolean wasNull() throws SQLException {
if (closed) {
throw new SQLException();
}
return wasNull;
}
public Object getObjectInternal(int columnIndex) throws SQLException {
if (this.getMetaData() != null) {
String columnName = this.getMetaData().getColumnName(columnIndex);
return getObject(columnName);
}
return null;
}
@Override
public Object getObject(int columnIndex) throws SQLException {
Object obj = getObjectInternal(columnIndex);
wasNull = (obj == null);
return obj;
}
@Override
public Object getObject(String columnLabel) throws SQLException {
return getObject(findColumn(columnLabel));
}
@Override
public int findColumn(String columnLabel) throws SQLException {
return Integer.parseInt(columnLabel);
}
@Override
public Reader getCharacterStream(int columnIndex) throws SQLException {
return (Reader) getObject(columnIndex);
}
@Override
public Reader getCharacterStream(String columnLabel) throws SQLException {
return (Reader) getObject(columnLabel);
}
@Override
public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
return (BigDecimal) getObject(columnIndex);
}
@Override
public BigDecimal getBigDecimal(String columnLabel) throws SQLException {
return getBigDecimal(findColumn(columnLabel));
}
@Override
public void clearWarnings() throws SQLException {
if (closed) {
throw new SQLException();
}
warning = null;
}
public void setWarning(SQLWarning warning) {
this.warning = warning;
}
@Override
public String getCursorName() throws SQLException {
if (closed) {
throw new SQLException();
}
return cursorName;
}
public void setCursorName(String cursorName) {
this.cursorName = cursorName;
}
@Override
public SQLWarning getWarnings() throws SQLException {
if (closed) {
throw new SQLException();
}
return warning;
}
@Override
public void setFetchDirection(int direction) throws SQLException {
if (closed) {
throw new SQLException();
}
this.fetchDirection = direction;
}
@Override
public int getFetchDirection() throws SQLException {
if (closed) {
throw new SQLException();
}
return fetchDirection;
}
@Override
public void setFetchSize(int rows) throws SQLException {
if (closed) {
throw new SQLException();
}
this.fetchSize = rows;
}
@Override
public int getFetchSize() throws SQLException {
if (closed) {
throw new SQLException();
}
return fetchSize;
}
@Override
public boolean rowUpdated() throws SQLException {
if (closed) {
throw new SQLException();
}
return false;
}
@Override
public boolean rowInserted() throws SQLException {
if (closed) {
throw new SQLException();
}
return false;
}
@Override
public boolean rowDeleted() throws SQLException {
if (closed) {
throw new SQLException();
}
return false;
}
@Override
public void updateNull(int columnIndex) throws SQLException {
updateObject(columnIndex, null);
}
@Override
public void updateBoolean(int columnIndex, boolean x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateByte(int columnIndex, byte x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateShort(int columnIndex, short x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateInt(int columnIndex, int x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateLong(int columnIndex, long x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateFloat(int columnIndex, float x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateDouble(int columnIndex, double x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateString(int columnIndex, String x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateBytes(int columnIndex, byte[] x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateDate(int columnIndex, Date x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateTime(int columnIndex, Time x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateNull(String columnLabel) throws SQLException {
updateObject(columnLabel, null);
}
@Override
public void updateBoolean(String columnLabel, boolean x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateByte(String columnLabel, byte x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateShort(String columnLabel, short x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateInt(String columnLabel, int x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateLong(String columnLabel, long x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateFloat(String columnLabel, float x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateDouble(String columnLabel, double x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateString(String columnLabel, String x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateBytes(String columnLabel, byte[] x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateDate(String columnLabel, Date x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateTime(String columnLabel, Time x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException {
updateObject(columnLabel, reader);
}
@Override
public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateObject(String columnLabel, Object x) throws SQLException {
updateObject(findColumn(columnLabel), x);
}
@Override
public void insertRow() throws SQLException {
if (closed) {
throw new SQLException("resultSet closed");
}
}
@Override
public void updateRow() throws SQLException {
if (closed) {
throw new SQLException("resultSet closed");
}
}
@Override
public void deleteRow() throws SQLException {
if (closed) {
throw new SQLException("resultSet closed");
}
}
@Override
public void refreshRow() throws SQLException {
if (closed) {
throw new SQLException("resultSet closed");
}
}
@Override
public void cancelRowUpdates() throws SQLException {
if (closed) {
throw new SQLException("resultSet closed");
}
}
@Override
public void moveToInsertRow() throws SQLException {
if (closed) {
throw new SQLException("resultSet closed");
}
}
@Override
public void moveToCurrentRow() throws SQLException {
if (closed) {
throw new SQLException("resultSet closed");
}
}
@Override
public Statement getStatement() throws SQLException {
if (closed) {
throw new SQLException("resultSet closed");
}
return statement;
}
@Override
public Object getObject(int columnIndex, Map<String, Class<?>> map) throws SQLException {
return getObject(columnIndex);
}
@Override
public Ref getRef(int columnIndex) throws SQLException {
return (Ref) getObject(columnIndex);
}
@Override
public Blob getBlob(int columnIndex) throws SQLException {
return (Blob) getObject(columnIndex);
}
@Override
public Clob getClob(int columnIndex) throws SQLException {
return (Clob) getObject(columnIndex);
}
@Override
public Array getArray(int columnIndex) throws SQLException {
return (Array) getObject(columnIndex);
}
@Override
public Object getObject(String columnLabel, Map<String, Class<?>> map) throws SQLException {
return getObject(columnLabel);
}
@Override
public Ref getRef(String columnLabel) throws SQLException {
return (Ref) getObject(columnLabel);
}
@Override
public Blob getBlob(String columnLabel) throws SQLException {
return (Blob) getObject(columnLabel);
}
@Override
public Clob getClob(String columnLabel) throws SQLException {
return (Clob) getObject(columnLabel);
}
@Override
public Array getArray(String columnLabel) throws SQLException {
return (Array) getObject(columnLabel);
}
@Override
public Date getDate(int columnIndex, Calendar cal) throws SQLException {
return (Date) getObject(columnIndex);
}
@Override
public Date getDate(String columnLabel, Calendar cal) throws SQLException {
return (Date) getObject(columnLabel);
}
@Override
public Time getTime(int columnIndex, Calendar cal) throws SQLException {
return (Time) getObject(columnIndex);
}
@Override
public Time getTime(String columnLabel, Calendar cal) throws SQLException {
return (Time) getObject(columnLabel);
}
@Override
public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException {
return (Timestamp) getObject(columnIndex);
}
@Override
public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException {
return (Timestamp) getObject(columnLabel);
}
@Override
public URL getURL(int columnIndex) throws SQLException {
return (URL) getObject(columnIndex);
}
@Override
public URL getURL(String columnLabel) throws SQLException {
return (URL) getObject(columnLabel);
}
@Override
public void updateRef(int columnIndex, Ref x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateRef(String columnLabel, Ref x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateBlob(int columnIndex, Blob x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateBlob(String columnLabel, Blob x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateClob(int columnIndex, Clob x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateClob(String columnLabel, Clob x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public void updateArray(int columnIndex, Array x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateArray(String columnLabel, Array x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public RowId getRowId(int columnIndex) throws SQLException {
return (RowId) getObject(columnIndex);
}
@Override
public RowId getRowId(String columnLabel) throws SQLException {
return (RowId) getObject(columnLabel);
}
@Override
public void updateRowId(int columnIndex, RowId x) throws SQLException {
updateObject(columnIndex, x);
}
@Override
public void updateRowId(String columnLabel, RowId x) throws SQLException {
updateObject(columnLabel, x);
}
@Override
public int getHoldability() throws SQLException {
if (closed) {
throw new SQLException("resultSet closed");
}
return 0;
}
@Override
public String getString(int columnIndex) throws SQLException {
return (String) getObject(columnIndex);
}
@Override
public boolean getBoolean(int columnIndex) throws SQLException {
Object obj = getObject(columnIndex);
if (obj == null) {
return false;
}
return (Boolean) obj;
}
@Override
public byte getByte(int columnIndex) throws SQLException {
Number number = (Number) getObject(columnIndex);
if (number == null) {
return 0;
}
return number.byteValue();
}
@Override
public short getShort(int columnIndex) throws SQLException {
Number number = (Number) getObject(columnIndex);
if (number == null) {
return 0;
}
return number.shortValue();
}
@Override
public int getInt(int columnIndex) throws SQLException {
Number number = (Number) getObject(columnIndex);
if (number == null) {
return 0;
}
return number.intValue();
}
@Override
public long getLong(int columnIndex) throws SQLException {
Number number = (Number) getObject(columnIndex);
if (number == null) {
return 0;
}
return number.longValue();
}
@Override
public float getFloat(int columnIndex) throws SQLException {
Number number = (Number) getObject(columnIndex);
if (number == null) {
return 0;
}
return number.floatValue();
}
@Override
public double getDouble(int columnIndex) throws SQLException {
Number number = (Number) getObject(columnIndex);
if (number == null) {
return 0;
}
return number.doubleValue();
}
@Override
public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException {
return (BigDecimal) getObject(columnIndex);
}
@Override
public byte[] getBytes(int columnIndex) throws SQLException {
return (byte[]) getObject(columnIndex);
}
@Override
public Date getDate(int columnIndex) throws SQLException {
return (Date) getObject(columnIndex);
}
@Override
public Time getTime(int columnIndex) throws SQLException {
return (Time) getObject(columnIndex);
}
@Override
public Timestamp getTimestamp(int columnIndex) throws SQLException {
return (Timestamp) getObject(columnIndex);
}
@Override
public InputStream getAsciiStream(int columnIndex) throws SQLException {
return (InputStream) getObject(columnIndex);
}
@Override
public InputStream getUnicodeStream(int columnIndex) throws SQLException {
return (InputStream) getObject(columnIndex);
}
@Override
public InputStream getBinaryStream(int columnIndex) throws SQLException {
return (InputStream) getObject(columnIndex);
}
@Override
public String getString(String columnLabel) throws SQLException {
return getString(findColumn(columnLabel));
}
@Override
public boolean getBoolean(String columnLabel) throws SQLException {
return getBoolean(findColumn(columnLabel));
}
@Override
public byte getByte(String columnLabel) throws SQLException {
return getByte(findColumn(columnLabel));
}
@Override
public short getShort(String columnLabel) throws SQLException {
return getShort(findColumn(columnLabel));
}
@Override
public int getInt(String columnLabel) throws SQLException {
return getInt(findColumn(columnLabel));
}
@Override
public long getLong(String columnLabel) throws SQLException {
return getLong(findColumn(columnLabel));
}
@Override
public float getFloat(String columnLabel) throws SQLException {
return getFloat(findColumn(columnLabel));
}
@Override
public double getDouble(String columnLabel) throws SQLException {
return getDouble(findColumn(columnLabel));
}
@Override
public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException {
return getBigDecimal(findColumn(columnLabel), scale);
}
@Override
public byte[] getBytes(String columnLabel) throws SQLException {
return getBytes(findColumn(columnLabel));
}
@Override
public Date getDate(String columnLabel) throws SQLException {
return getDate(findColumn(columnLabel));
}
@Override
public Time getTime(String columnLabel) throws SQLException {
return getTime(findColumn(columnLabel));
}
@Override
public Timestamp getTimestamp(String columnLabel) throws SQLException {
return getTimestamp(findColumn(columnLabel));
}
@Override
public InputStream getAsciiStream(String columnLabel) throws SQLException {
return getAsciiStream(findColumn(columnLabel));
}
@Override
public InputStream getUnicodeStream(String columnLabel) throws SQLException {
return getUnicodeStream(findColumn(columnLabel));
}
@Override
public InputStream getBinaryStream(String columnLabel) throws SQLException {
return getBinaryStream(findColumn(columnLabel));
}
@Override
public boolean isBeforeFirst() throws SQLException {
if (closed) {
throw new SQLException();
}
return false;
}
@Override
public boolean isAfterLast() throws SQLException {
if (closed) {
throw new SQLException();
}
return false;
}
@Override
public boolean isFirst() throws SQLException {
if (closed) {
throw new SQLException();
}
return false;
}
@Override
public boolean isLast() throws SQLException {
if (closed) {
throw new SQLException();
}
return false;
}
@Override
public void beforeFirst() throws SQLException {
if (closed) {
throw new SQLException();
}
}
@Override
public void afterLast() throws SQLException {
if (closed) {
throw new SQLException();
}
}
@Override
public boolean first() throws SQLException {
if (closed) {
throw new SQLException();
}
return false;
}
@Override
public boolean last() throws SQLException {
if (closed) {
throw new SQLException();
}
return false;
}
@Override
public int getRow() throws SQLException {
if (closed) {
throw new SQLException();
}
return 0;
}
@Override
public boolean absolute(int row) throws SQLException {
if (closed) {
throw new SQLException();
}
return false;
}
@Override
public boolean relative(int rows) throws SQLException {
if (closed) {
throw new SQLException();
}
return false;
}
@Override
public int getType() throws SQLException {
if (closed) {
throw new SQLException();
}
return 0;
}
@Override
public int getConcurrency() throws SQLException {
if (closed) {
throw new SQLException();
}
return 0;
}
@Override
public ResultSetMetaData getMetaData() throws SQLException {
if (closed) {
throw new SQLException("resultSet closed");
}
return metaData;
}
}
| ResultSetBase |
java | junit-team__junit5 | junit-platform-suite-api/src/main/java/org/junit/platform/suite/api/ConfigurationParametersResources.java | {
"start": 1296,
"end": 1517
} | interface ____ {
/**
* An array of one or more {@link ConfigurationParametersResource @ConfigurationParameterResource}
* declarations.
*/
ConfigurationParametersResource[] value();
}
| ConfigurationParametersResources |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/ast/statement/SQLBuildTableStatement.java | {
"start": 869,
"end": 1892
} | class ____ extends SQLStatementImpl {
private SQLName table;
private SQLIntegerExpr version;
private boolean withSplit;
private boolean force;
@Override
protected void accept0(SQLASTVisitor visitor) {
if (visitor.visit(this)) {
acceptChild(visitor, this.table);
acceptChild(visitor, this.version);
}
visitor.endVisit(this);
}
public SQLName getTable() {
return table;
}
public void setTable(SQLName table) {
this.table = table;
}
public SQLIntegerExpr getVersion() {
return version;
}
public void setVersion(SQLIntegerExpr version) {
this.version = version;
}
public boolean isWithSplit() {
return withSplit;
}
public void setWithSplit(boolean withSplit) {
this.withSplit = withSplit;
}
public boolean isForce() {
return force;
}
public void setForce(boolean force) {
this.force = force;
}
}
| SQLBuildTableStatement |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MixedMutabilityReturnTypeTest.java | {
"start": 16043,
"end": 16610
} | class ____ {
List<Object> foo() {
if (hashCode() > 0) {
return Collections.emptyList();
}
var ints = new ArrayList<>();
ints.add(1);
return ints;
}
}
""")
.addOutputLines(
"Test.java",
"""
import com.google.common.collect.ImmutableList;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
final | Test |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/TemplateAnalysisTest.java | {
"start": 488,
"end": 2622
} | class ____ {
@Test
public void testSortedParamDeclarations() {
List<ParameterDeclaration> sorted = TemplateAnalysis.getSortedParameterDeclarations(List.of(paramDeclaration("foo", -1),
paramDeclaration("bar", -1), paramDeclaration("qux", 10), paramDeclaration("baz", 1)));
assertEquals(4, sorted.size());
assertEquals("baz", sorted.get(0).getKey());
assertEquals("qux", sorted.get(1).getKey());
assertTrue(sorted.get(2).getKey().equals("foo") || sorted.get(2).getKey().equals("bar"));
assertTrue(sorted.get(3).getKey().equals("foo") || sorted.get(3).getKey().equals("bar"));
}
ParameterDeclaration paramDeclaration(String key, int line) {
return new ParameterDeclaration() {
@Override
public String getTypeInfo() {
return null;
}
@Override
public Origin getOrigin() {
return new Origin() {
@Override
public Optional<Variant> getVariant() {
return Optional.empty();
}
@Override
public String getTemplateId() {
return null;
}
@Override
public String getTemplateGeneratedId() {
return null;
}
@Override
public int getLineCharacterStart() {
return 0;
}
@Override
public int getLineCharacterEnd() {
return 0;
}
@Override
public int getLine() {
return line;
}
};
}
@Override
public String getKey() {
return key;
}
@Override
public Expression getDefaultValue() {
return null;
}
};
}
}
| TemplateAnalysisTest |
java | apache__rocketmq | store/src/main/java/org/apache/rocketmq/store/index/IndexService.java | {
"start": 1634,
"end": 14411
} | class ____ {
private static final Logger LOGGER = LoggerFactory.getLogger(LoggerName.STORE_LOGGER_NAME);
/**
* Maximum times to attempt index file creation.
*/
private static final int MAX_TRY_IDX_CREATE = 3;
private final DefaultMessageStore defaultMessageStore;
private final int hashSlotNum;
private final int indexNum;
private final String storePath;
private final ArrayList<IndexFile> indexFileList = new ArrayList<>();
private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
public IndexService(final DefaultMessageStore store) {
this.defaultMessageStore = store;
this.hashSlotNum = store.getMessageStoreConfig().getMaxHashSlotNum();
this.indexNum = store.getMessageStoreConfig().getMaxIndexNum();
this.storePath =
StorePathConfigHelper.getStorePathIndex(defaultMessageStore.getMessageStoreConfig().getStorePathRootDir());
}
public boolean load(final boolean lastExitOK) {
File dir = new File(this.storePath);
File[] files = dir.listFiles();
if (files != null) {
// ascending order
Arrays.sort(files);
for (File file : files) {
try {
IndexFile f = new IndexFile(file.getPath(), this.hashSlotNum, this.indexNum, 0, 0);
f.load();
if (!lastExitOK) {
if (f.getEndTimestamp() > this.defaultMessageStore.getStoreCheckpoint()
.getIndexMsgTimestamp()) {
f.destroy(0);
continue;
}
}
LOGGER.info("load index file OK, " + f.getFileName());
this.indexFileList.add(f);
} catch (IOException e) {
LOGGER.error("load file {} error", file, e);
return false;
} catch (NumberFormatException e) {
LOGGER.error("load file {} error", file, e);
}
}
}
return true;
}
public long getTotalSize() {
if (indexFileList.isEmpty()) {
return 0;
}
return (long) indexFileList.get(0).getFileSize() * indexFileList.size();
}
public void deleteExpiredFile(long offset) {
Object[] files = null;
try {
this.readWriteLock.readLock().lock();
if (this.indexFileList.isEmpty()) {
return;
}
long endPhyOffset = this.indexFileList.get(0).getEndPhyOffset();
if (endPhyOffset < offset) {
files = this.indexFileList.toArray();
}
} catch (Exception e) {
LOGGER.error("destroy exception", e);
} finally {
this.readWriteLock.readLock().unlock();
}
if (files != null) {
List<IndexFile> fileList = new ArrayList<>();
for (int i = 0; i < (files.length - 1); i++) {
IndexFile f = (IndexFile) files[i];
if (f.getEndPhyOffset() < offset) {
fileList.add(f);
} else {
break;
}
}
this.deleteExpiredFile(fileList);
}
}
private void deleteExpiredFile(List<IndexFile> files) {
if (!files.isEmpty()) {
try {
this.readWriteLock.writeLock().lock();
for (IndexFile file : files) {
boolean destroyed = file.destroy(3000);
destroyed = destroyed && this.indexFileList.remove(file);
if (!destroyed) {
LOGGER.error("deleteExpiredFile remove failed.");
break;
}
}
} catch (Exception e) {
LOGGER.error("deleteExpiredFile has exception.", e);
} finally {
this.readWriteLock.writeLock().unlock();
}
}
}
public void destroy() {
try {
this.readWriteLock.writeLock().lock();
for (IndexFile f : this.indexFileList) {
f.destroy(1000 * 3);
}
this.indexFileList.clear();
} catch (Exception e) {
LOGGER.error("destroy exception", e);
} finally {
this.readWriteLock.writeLock().unlock();
}
}
public QueryOffsetResult queryOffset(String topic, String key, int maxNum, long begin, long end) {
long indexLastUpdateTimestamp = 0;
long indexLastUpdatePhyoffset = 0;
maxNum = Math.min(maxNum, this.defaultMessageStore.getMessageStoreConfig().getMaxMsgsNumBatch());
List<Long> phyOffsets = new ArrayList<>(maxNum);
try {
this.readWriteLock.readLock().lock();
if (!this.indexFileList.isEmpty()) {
for (int i = this.indexFileList.size(); i > 0; i--) {
IndexFile f = this.indexFileList.get(i - 1);
boolean lastFile = i == this.indexFileList.size();
if (lastFile) {
indexLastUpdateTimestamp = f.getEndTimestamp();
indexLastUpdatePhyoffset = f.getEndPhyOffset();
}
if (f.isTimeMatched(begin, end)) {
f.selectPhyOffset(phyOffsets, buildKey(topic, key), maxNum, begin, end);
}
if (f.getBeginTimestamp() < begin) {
break;
}
if (phyOffsets.size() >= maxNum) {
break;
}
}
}
} catch (Exception e) {
LOGGER.error("queryMsg exception", e);
} finally {
this.readWriteLock.readLock().unlock();
}
return new QueryOffsetResult(phyOffsets, indexLastUpdateTimestamp, indexLastUpdatePhyoffset);
}
private String buildKey(final String topic, final String key) {
return topic + "#" + key;
}
public void buildIndex(DispatchRequest req) {
IndexFile indexFile = retryGetAndCreateIndexFile();
if (indexFile != null) {
long endPhyOffset = indexFile.getEndPhyOffset();
DispatchRequest msg = req;
String topic = msg.getTopic();
String keys = msg.getKeys();
if (msg.getCommitLogOffset() < endPhyOffset) {
return;
}
final int tranType = MessageSysFlag.getTransactionValue(msg.getSysFlag());
switch (tranType) {
case MessageSysFlag.TRANSACTION_NOT_TYPE:
case MessageSysFlag.TRANSACTION_PREPARED_TYPE:
case MessageSysFlag.TRANSACTION_COMMIT_TYPE:
break;
case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE:
return;
}
if (req.getUniqKey() != null) {
indexFile = putKey(indexFile, msg, buildKey(topic, req.getUniqKey()));
if (indexFile == null) {
LOGGER.error("putKey error commitlog {} uniqkey {}", req.getCommitLogOffset(), req.getUniqKey());
return;
}
}
if (keys != null && keys.length() > 0) {
String[] keyset = keys.split(MessageConst.KEY_SEPARATOR);
for (int i = 0; i < keyset.length; i++) {
String key = keyset[i];
if (key.length() > 0) {
indexFile = putKey(indexFile, msg, buildKey(topic, key));
if (indexFile == null) {
LOGGER.error("putKey error commitlog {} uniqkey {}", req.getCommitLogOffset(), req.getUniqKey());
return;
}
}
}
}
} else {
LOGGER.error("build index error, stop building index");
}
}
private IndexFile putKey(IndexFile indexFile, DispatchRequest msg, String idxKey) {
for (boolean ok = indexFile.putKey(idxKey, msg.getCommitLogOffset(), msg.getStoreTimestamp()); !ok; ) {
LOGGER.warn("Index file [" + indexFile.getFileName() + "] is full, trying to create another one");
indexFile = retryGetAndCreateIndexFile();
if (null == indexFile) {
return null;
}
ok = indexFile.putKey(idxKey, msg.getCommitLogOffset(), msg.getStoreTimestamp());
}
return indexFile;
}
/**
* Retries to get or create index file.
*
* @return {@link IndexFile} or null on failure.
*/
public IndexFile retryGetAndCreateIndexFile() {
IndexFile indexFile = null;
for (int times = 0; null == indexFile && times < MAX_TRY_IDX_CREATE; times++) {
indexFile = this.getAndCreateLastIndexFile();
if (null != indexFile) {
break;
}
try {
LOGGER.info("Tried to create index file " + times + " times");
Thread.sleep(1000);
} catch (InterruptedException e) {
LOGGER.error("Interrupted", e);
}
}
if (null == indexFile) {
this.defaultMessageStore.getRunningFlags().makeIndexFileError();
LOGGER.error("Mark index file cannot build flag");
}
return indexFile;
}
public IndexFile getAndCreateLastIndexFile() {
IndexFile indexFile = null;
IndexFile prevIndexFile = null;
long lastUpdateEndPhyOffset = 0;
long lastUpdateIndexTimestamp = 0;
{
this.readWriteLock.readLock().lock();
if (!this.indexFileList.isEmpty()) {
IndexFile tmp = this.indexFileList.get(this.indexFileList.size() - 1);
if (!tmp.isWriteFull()) {
indexFile = tmp;
} else {
lastUpdateEndPhyOffset = tmp.getEndPhyOffset();
lastUpdateIndexTimestamp = tmp.getEndTimestamp();
prevIndexFile = tmp;
}
}
this.readWriteLock.readLock().unlock();
}
if (indexFile == null) {
try {
String fileName =
this.storePath + File.separator
+ UtilAll.timeMillisToHumanString(System.currentTimeMillis());
indexFile =
new IndexFile(fileName, this.hashSlotNum, this.indexNum, lastUpdateEndPhyOffset,
lastUpdateIndexTimestamp);
this.readWriteLock.writeLock().lock();
this.indexFileList.add(indexFile);
} catch (Exception e) {
LOGGER.error("getLastIndexFile exception ", e);
} finally {
this.readWriteLock.writeLock().unlock();
}
if (indexFile != null) {
final IndexFile flushThisFile = prevIndexFile;
Thread flushThread = new Thread(new AbstractBrokerRunnable(defaultMessageStore.getBrokerConfig()) {
@Override
public void run0() {
IndexService.this.flush(flushThisFile);
}
}, "FlushIndexFileThread");
flushThread.setDaemon(true);
flushThread.start();
}
}
return indexFile;
}
public void flush(final IndexFile f) {
if (null == f) {
return;
}
long indexMsgTimestamp = 0;
if (f.isWriteFull()) {
indexMsgTimestamp = f.getEndTimestamp();
}
f.flush();
if (indexMsgTimestamp > 0) {
this.defaultMessageStore.getStoreCheckpoint().setIndexMsgTimestamp(indexMsgTimestamp);
this.defaultMessageStore.getStoreCheckpoint().flush();
}
}
public void start() {
}
public void shutdown() {
try {
this.readWriteLock.writeLock().lock();
for (IndexFile f : this.indexFileList) {
try {
f.shutdown();
} catch (Exception e) {
LOGGER.error("shutdown " + f.getFileName() + " exception", e);
}
}
this.indexFileList.clear();
} catch (Exception e) {
LOGGER.error("shutdown exception", e);
} finally {
this.readWriteLock.writeLock().unlock();
}
}
}
| IndexService |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java | {
"start": 3120,
"end": 30947
} | class ____ {
private static final int NUM_DIRS_IN_LOG = 200000;
protected static int NUM_NNS = 3;
protected MiniDFSCluster cluster;
protected NameNode[] nns = new NameNode[NUM_NNS];
protected FileSystem fs;
private final Random random = new Random();
protected File tmpOivImgDir;
private static final Logger LOG = LoggerFactory.getLogger(TestStandbyCheckpoints.class);
@SuppressWarnings("rawtypes")
@BeforeEach
public void setupCluster() throws Exception {
Configuration conf = setupCommonConfig();
// Dial down the retention of extra edits and checkpoints. This is to
// help catch regressions of HDFS-4238 (SBN should not purge shared edits)
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
int retryCount = 0;
while (true) {
try {
int basePort = 10060 + random.nextInt(100) * 2;
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1))
.addNN(new MiniDFSNNTopology.NNConf("nn3").setHttpPort(basePort + 2)));
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
.numDataNodes(1)
.build();
cluster.waitActive();
setNNs();
fs = HATestUtil.configureFailoverFs(cluster, conf);
cluster.transitionToActive(0);
++retryCount;
break;
} catch (BindException e) {
LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
+ retryCount + " times");
}
}
}
protected void setNNs(){
for (int i = 0; i < NUM_NNS; i++) {
nns[i] = cluster.getNameNode(i);
}
}
protected Configuration setupCommonConfig() {
tmpOivImgDir = GenericTestUtils.getTestDir("TestStandbyCheckpoints");
tmpOivImgDir.mkdirs();
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.set(DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY,
tmpOivImgDir.getAbsolutePath());
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
SlowCodec.class.getCanonicalName());
CompressionCodecFactory.setCodecClasses(conf,
ImmutableList.<Class>of(SlowCodec.class));
return conf;
}
@AfterEach
public void shutdownCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
if (tmpOivImgDir != null) {
FileUtil.fullyDelete(tmpOivImgDir);
}
}
@Test
@Timeout(value = 300)
public void testSBNCheckpoints() throws Exception {
JournalSet standbyJournalSet = NameNodeAdapterMockitoUtil.spyOnJournalSet(nns[1]);
doEdits(0, 10);
HATestUtil.waitForStandbyToCatchUp(nns[0], nns[1]);
// Once the standby catches up, it should notice that it needs to
// do a checkpoint and save one to its local directories.
HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
if (tmpOivImgDir.list().length > 0) {
return true;
} else {
return false;
}
}
}, 1000, 60000);
// It should have saved the oiv image too.
assertEquals(1, tmpOivImgDir.list().length, "One file is expected");
// It should also upload it back to the active.
HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
// The standby should never try to purge edit logs on shared storage.
Mockito.verify(standbyJournalSet, Mockito.never()).
purgeLogsOlderThan(Mockito.anyLong());
}
@Test
public void testNewDirInitAfterCheckpointing() throws Exception {
File hdfsDir = new File(PathUtils.getTestDir(TestStandbyCheckpoints.class),
"testNewDirInitAfterCheckpointing");
File nameDir = new File(hdfsDir, "name1");
assert nameDir.mkdirs();
// Restart nn0 with an additional name dir.
String existingDir = cluster.getConfiguration(0).
get(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
cluster.getConfiguration(0).set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
existingDir + "," + Util.fileAsURI(nameDir).toString());
cluster.restartNameNode(0);
nns[0] = cluster.getNameNode(0);
cluster.transitionToActive(0);
// "current" is created, but current/VERSION isn't.
File currDir = new File(nameDir, "current");
File versionFile = new File(currDir, "VERSION");
assert currDir.exists();
assert !versionFile.exists();
// Trigger a checkpointing and upload.
doEdits(0, 10);
HATestUtil.waitForStandbyToCatchUp(nns[0], nns[1]);
// The version file will be created if a checkpoint is uploaded.
// Wait for it to happen up to 10 seconds.
for (int i = 0; i < 20; i++) {
if (versionFile.exists()) {
break;
}
Thread.sleep(500);
}
// VERSION must have been created.
assert versionFile.exists();
}
/**
* Test for the case when both of the NNs in the cluster are
* in the standby state, and thus are both creating checkpoints
* and uploading them to each other.
* In this circumstance, they should receive the error from the
* other node indicating that the other node already has a
* checkpoint for the given txid, but this should not cause
* an abort, etc.
*/
@Test
@Timeout(value = 300)
public void testBothNodesInStandbyState() throws Exception {
doEdits(0, 10);
cluster.transitionToStandby(0);
// Transitioning to standby closed the edit log on the active,
// so the standby will catch up. Then, both will be in standby mode
// with enough uncheckpointed txns to cause a checkpoint, and they
// will each try to take a checkpoint and upload to each other.
HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
assertEquals(12, nns[0].getNamesystem().getFSImage().getMostRecentCheckpointTxId());
assertEquals(12, nns[1].getNamesystem().getFSImage().getMostRecentCheckpointTxId());
List<File> dirs = Lists.newArrayList();
dirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0));
dirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 1));
FSImageTestUtil.assertParallelFilesAreIdentical(dirs, ImmutableSet.<String>of());
}
/**
* Test for the case of when there are observer NameNodes, Standby node is
* able to upload fsImage to Observer node as well.
*/
@Test
@Timeout(value = 300)
public void testStandbyAndObserverState() throws Exception {
// Transition 2 to observer
cluster.transitionToObserver(2);
doEdits(0, 10);
// After a rollEditLog, Standby(nn1) 's next checkpoint would be
// ahead of observer(nn2).
nns[0].getRpcServer().rollEditLog();
// After standby creating a checkpoint, it will try to push the image to
// active and all observer, updating it's own txid to the most recent.
HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
HATestUtil.waitForCheckpoint(cluster, 2, ImmutableList.of(12));
assertEquals(12, nns[2].getNamesystem().getFSImage().getMostRecentCheckpointTxId());
assertEquals(12, nns[1].getNamesystem().getFSImage().getMostRecentCheckpointTxId());
List<File> dirs = Lists.newArrayList();
// observer and standby both have this same image.
dirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 2));
dirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 1));
FSImageTestUtil.assertParallelFilesAreIdentical(dirs, ImmutableSet.of());
// Restore 2 back to standby
cluster.transitionToStandby(2);
}
/**
* Tests that a null FSImage is handled gracefully by the ImageServlet.
* If putImage is called while a NameNode is still starting up, the FSImage
* may not have been initialized yet. See HDFS-15290.
*/
@Test
@Timeout(value = 30)
public void testCheckpointBeforeNameNodeInitializationIsComplete()
throws Exception {
final LogVerificationAppender appender = new LogVerificationAppender();
final org.apache.log4j.Logger logger = org.apache.log4j.Logger
.getRootLogger();
logger.addAppender(appender);
// Transition 2 to observer
cluster.transitionToObserver(2);
doEdits(0, 10);
// After a rollEditLog, Standby(nn1)'s next checkpoint would be
// ahead of observer(nn2).
nns[0].getRpcServer().rollEditLog();
NameNode nn2 = nns[2];
FSImage nnFSImage = NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, null);
// After standby creating a checkpoint, it will try to push the image to
// active and all observer, updating it's own txid to the most recent.
HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
NameNodeAdapter.getAndSetFSImageInHttpServer(nn2, nnFSImage);
cluster.transitionToStandby(2);
logger.removeAppender(appender);
for (LoggingEvent event : appender.getLog()) {
String message = event.getRenderedMessage();
if (message.contains("PutImage failed") &&
message.contains("FSImage has not been set in the NameNode.")) {
//Logs have the expected exception.
return;
}
}
fail("Expected exception not present in logs.");
}
/**
* Test for the case when the SBN is configured to checkpoint based
* on a time period, but no transactions are happening on the
* active. Thus, it would want to save a second checkpoint at the
* same txid, which is a no-op. This test makes sure this doesn't
* cause any problem.
*/
@Test
@Timeout(value = 300)
public void testCheckpointWhenNoNewTransactionsHappened()
throws Exception {
// Checkpoint as fast as we can, in a tight loop.
cluster.getConfiguration(1).setInt(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
cluster.restartNameNode(1);
nns[1] = cluster.getNameNode(1);
FSImage spyImage1 = NameNodeAdapterMockitoUtil.spyOnFsImage(nns[1]);
// We shouldn't save any checkpoints at txid=0
Thread.sleep(1000);
Mockito.verify(spyImage1, Mockito.never())
.saveNamespace(any());
// Roll the primary and wait for the standby to catch up
HATestUtil.waitForStandbyToCatchUp(nns[0], nns[1]);
Thread.sleep(2000);
// We should make exactly one checkpoint at this new txid.
Mockito.verify(spyImage1, Mockito.times(1)).saveNamespace(
any(), Mockito.eq(NameNodeFile.IMAGE), any());
}
/**
* Test cancellation of ongoing checkpoints when failover happens
* mid-checkpoint.
*/
@Test
@Timeout(value = 120)
public void testCheckpointCancellation() throws Exception {
cluster.transitionToStandby(0);
// Create an edit log in the shared edits dir with a lot
// of mkdirs operations. This is solely so that the image is
// large enough to take a non-trivial amount of time to load.
// (only ~15MB)
URI sharedUri = cluster.getSharedEditsDir(0, 1);
File sharedDir = new File(sharedUri.getPath(), "current");
File tmpDir = new File(MiniDFSCluster.getBaseDirectory(),
"testCheckpointCancellation-tmp");
FSNamesystem fsn = cluster.getNamesystem(0);
FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG, 3,
fsn.getFSDirectory().getLastInodeId() + 1);
String fname = NNStorage.getInProgressEditsFileName(3);
new File(tmpDir, fname).renameTo(new File(sharedDir, fname));
// Checkpoint as fast as we can, in a tight loop.
cluster.getConfiguration(1).setInt(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
cluster.restartNameNode(1);
nns[1] = cluster.getNameNode(1);
cluster.transitionToActive(0);
boolean canceledOne = false;
for (int i = 0; i < 10 && !canceledOne; i++) {
doEdits(i*10, i*10 + 10);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.transitionToStandby(1);
cluster.transitionToActive(0);
canceledOne = StandbyCheckpointer.getCanceledCount() > 0;
}
assertTrue(canceledOne);
}
/**
* Test cancellation of ongoing checkpoints when failover happens
* mid-checkpoint during image upload from standby to active NN.
*/
@Test
@Timeout(value = 60)
public void testCheckpointCancellationDuringUpload() throws Exception {
// Set dfs.namenode.checkpoint.txns differently on the first NN to avoid it
// doing checkpoint when it becomes a standby
cluster.getConfiguration(0).setInt(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1000);
// don't compress, we want a big image
for (int i = 0; i < NUM_NNS; i++) {
cluster.getConfiguration(i).setBoolean(
DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
}
// Throttle SBN upload to make it hang during upload to ANN
for (int i = 1; i < NUM_NNS; i++) {
cluster.getConfiguration(i).setLong(
DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY, 100);
}
for (int i = 0; i < NUM_NNS; i++) {
cluster.restartNameNode(i);
}
// update references to each of the nns
setNNs();
cluster.transitionToActive(0);
doEdits(0, 100);
for (int i = 1; i < NUM_NNS; i++) {
HATestUtil.waitForStandbyToCatchUp(nns[0], nns[i]);
HATestUtil.waitForCheckpoint(cluster, i, ImmutableList.of(104));
}
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
// Wait to make sure background TransferFsImageUpload thread was cancelled.
// This needs to be done before the next test in the suite starts, so that a
// file descriptor is not held open during the next cluster init.
cluster.shutdown();
cluster = null;
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
ThreadInfo[] threads = threadBean.getThreadInfo(
threadBean.getAllThreadIds(), 1);
for (ThreadInfo thread: threads) {
if (thread.getThreadName().startsWith("TransferFsImageUpload")) {
return false;
}
}
return true;
}
}, 1000, 30000);
// Assert that former active did not accept the canceled checkpoint file.
assertEquals(0, nns[0].getFSImage().getMostRecentCheckpointTxId());
}
/**
* Test standby namenode upload fsiamge to multiple other namenodes in parallel, in the
* cluster with observer namenodes.
*/
@Test
@Timeout(value = 300)
public void testCheckpointParallelUpload() throws Exception {
// Set dfs.namenode.checkpoint.txns differently on the first NN to avoid it
// doing checkpoint when it becomes a standby
cluster.getConfiguration(0).setInt(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1000);
// don't compress, we want a big image
for (int i = 0; i < NUM_NNS; i++) {
cluster.getConfiguration(i).setBoolean(
DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
}
// Throttle SBN upload to make it hang during upload to ANN, and enable parallel upload fsimage.
for (int i = 1; i < NUM_NNS; i++) {
cluster.getConfiguration(i).setLong(
DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY, 100);
cluster.getConfiguration(i).setBoolean(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PARALLEL_UPLOAD_ENABLED_KEY, true);
}
for (int i = 0; i < NUM_NNS; i++) {
cluster.restartNameNode(i);
}
// update references to each of the nns
setNNs();
cluster.transitionToActive(0);
doEdits(0, 100);
for (int i = 1; i < NUM_NNS; i++) {
HATestUtil.waitForStandbyToCatchUp(nns[0], nns[i]);
HATestUtil.waitForCheckpoint(cluster, i, ImmutableList.of(104));
}
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
int transferThreadCount = 0;
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
ThreadInfo[] threads = threadBean.getThreadInfo(
threadBean.getAllThreadIds(), 1);
for (ThreadInfo thread: threads) {
if (thread.getThreadName().startsWith("TransferFsImageUpload")) {
transferThreadCount++;
}
}
return transferThreadCount == NUM_NNS - 1;
}
}, 1000, 30000);
}
/**
* Make sure that clients will receive StandbyExceptions even when a
* checkpoint is in progress on the SBN, and therefore the StandbyCheckpointer
* thread will have FSNS lock. Regression test for HDFS-4591.
*/
@Test
@Timeout(value = 300)
public void testStandbyExceptionThrownDuringCheckpoint() throws Exception {
// Set it up so that we know when the SBN checkpoint starts and ends.
FSImage spyImage1 = NameNodeAdapterMockitoUtil.spyOnFsImage(nns[1]);
DelayAnswer answerer = new DelayAnswer(LOG);
Mockito.doAnswer(answerer).when(spyImage1)
.saveNamespace(any(FSNamesystem.class),
Mockito.eq(NameNodeFile.IMAGE), any(Canceler.class));
// Perform some edits and wait for a checkpoint to start on the SBN.
doEdits(0, 1000);
nns[0].getRpcServer().rollEditLog();
answerer.waitForCall();
assertTrue(answerer.getFireCount() == 1 && answerer.getResultCount() == 0,
"SBN is not performing checkpoint but it should be.");
// Make sure that the lock has actually been taken by the checkpointing
// thread.
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
try {
// Perform an RPC to the SBN and make sure it throws a StandbyException.
nns[1].getRpcServer().getFileInfo("/");
fail("Should have thrown StandbyException, but instead succeeded.");
} catch (StandbyException se) {
GenericTestUtils.assertExceptionContains("is not supported", se);
}
// Make sure new incremental block reports are processed during
// checkpointing on the SBN.
assertEquals(0, cluster.getNamesystem(1).getPendingDataNodeMessageCount());
doCreate();
Thread.sleep(1000);
assertTrue(cluster.getNamesystem(1).getPendingDataNodeMessageCount() > 0);
// Make sure that the checkpoint is still going on, implying that the client
// RPC to the SBN happened during the checkpoint.
assertTrue(answerer.getFireCount() == 1 && answerer.getResultCount() == 0,
"SBN should have still been checkpointing.");
answerer.proceed();
answerer.waitForResult();
assertTrue(answerer.getFireCount() == 1 && answerer.getResultCount() == 1,
"SBN should have finished checkpointing.");
}
@Test
@Timeout(value = 300)
public void testReadsAllowedDuringCheckpoint() throws Exception {
// Set it up so that we know when the SBN checkpoint starts and ends.
FSImage spyImage1 = NameNodeAdapterMockitoUtil.spyOnFsImage(nns[1]);
DelayAnswer answerer = new DelayAnswer(LOG);
Mockito.doAnswer(answerer).when(spyImage1)
.saveNamespace(any(FSNamesystem.class),
any(NameNodeFile.class),
any(Canceler.class));
// Perform some edits and wait for a checkpoint to start on the SBN.
doEdits(0, 1000);
nns[0].getRpcServer().rollEditLog();
answerer.waitForCall();
assertTrue(answerer.getFireCount() == 1 && answerer.getResultCount() == 0,
"SBN is not performing checkpoint but it should be.");
// Make sure that the lock has actually been taken by the checkpointing
// thread.
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
// Perform an RPC that needs to take the write lock.
SubjectInheritingThread t = new SubjectInheritingThread() {
@Override
public void work() {
try {
nns[1].getRpcServer().restoreFailedStorage("false");
} catch (IOException e) {
e.printStackTrace();
}
}
};
t.start();
// Make sure that our thread is waiting for the lock.
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
assertFalse(nns[1].getNamesystem().getFsLockForTests().hasQueuedThreads());
assertFalse(nns[1].getNamesystem().getFsLockForTests().isWriteLocked());
assertTrue(nns[1].getNamesystem().getCpLockForTests().hasQueuedThreads());
// Get /jmx of the standby NN web UI, which will cause the FSNS read lock to
// be taken.
String pageContents = DFSTestUtil.urlGet(new URL("http://" +
nns[1].getHttpAddress().getHostName() + ":" +
nns[1].getHttpAddress().getPort() + "/jmx"));
assertTrue(pageContents.contains("NumLiveDataNodes"));
// Make sure that the checkpoint is still going on, implying that the client
// RPC to the SBN happened during the checkpoint.
assertTrue(answerer.getFireCount() == 1 && answerer.getResultCount() == 0,
"SBN should have still been checkpointing.");
answerer.proceed();
answerer.waitForResult();
assertTrue(answerer.getFireCount() == 1 && answerer.getResultCount() == 1,
"SBN should have finished checkpointing.");
t.join();
}
/**
* Test for the case standby NNs can upload FSImage to ANN after
* become non-primary standby NN. HDFS-9787
*/
@Test
@Timeout(value = 300)
public void testNonPrimarySBNUploadFSImage() throws Exception {
// Shutdown all standby NNs.
for (int i = 1; i < NUM_NNS; i++) {
cluster.shutdownNameNode(i);
// Checkpoint as fast as we can, in a tight loop.
cluster.getConfiguration(i).setInt(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
}
doEdits(0, 10);
cluster.transitionToStandby(0);
// Standby NNs do checkpoint without active NN available.
for (int i = 1; i < NUM_NNS; i++) {
cluster.restartNameNode(i, false);
}
cluster.waitClusterUp();
for (int i = 0; i < NUM_NNS; i++) {
// Once the standby catches up, it should do a checkpoint
// and save to local directories.
HATestUtil.waitForCheckpoint(cluster, i, ImmutableList.of(12));
}
cluster.transitionToActive(0);
// Wait for 2 seconds to expire last upload time.
Thread.sleep(2000);
doEdits(11, 20);
nns[0].getRpcServer().rollEditLog();
// One of standby NNs should also upload it back to the active.
HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(23));
}
/**
* Test that checkpointing is still successful even if an issue
* was encountered while writing the legacy OIV image.
*/
@Test
@Timeout(value = 300)
public void testCheckpointSucceedsWithLegacyOIVException() throws Exception {
// Delete the OIV image dir to cause an IOException while saving
FileUtil.fullyDelete(tmpOivImgDir);
doEdits(0, 10);
HATestUtil.waitForStandbyToCatchUp(nns[0], nns[1]);
// Once the standby catches up, it should notice that it needs to
// do a checkpoint and save one to its local directories.
HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
// It should also upload it back to the active.
HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
}
/**
* Test that lastCheckpointTime is correctly updated at each checkpoint.
*/
@Test
@Timeout(value = 300)
public void testLastCheckpointTime() throws Exception {
for (int i = 1; i < NUM_NNS; i++) {
cluster.shutdownNameNode(i);
// Make true checkpoint for DFS_NAMENODE_CHECKPOINT_PERIOD_KEY
cluster.getConfiguration(i).setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 3);
cluster.getConfiguration(i).setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1000);
}
doEdits(0, 10);
cluster.transitionToStandby(0);
// Standby NNs do checkpoint without active NN available.
for (int i = 1; i < NUM_NNS; i++) {
cluster.restartNameNode(i, false);
}
cluster.waitClusterUp();
setNNs();
for (int i = 0; i < NUM_NNS; i++) {
// Once the standby catches up, it should do a checkpoint
// and save to local directories.
HATestUtil.waitForCheckpoint(cluster, i, ImmutableList.of(12));
}
long snnCheckpointTime1 = nns[1].getNamesystem().getStandbyLastCheckpointTime();
long annCheckpointTime1 = nns[0].getNamesystem().getLastCheckpointTime();
cluster.transitionToActive(0);
cluster.transitionToObserver(2);
doEdits(11, 20);
nns[0].getRpcServer().rollEditLog();
HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(23));
long snnCheckpointTime2 = nns[1].getNamesystem().getStandbyLastCheckpointTime();
long annCheckpointTime2 = nns[0].getNamesystem().getLastCheckpointTime();
// Make sure that both standby and active NNs' lastCheckpointTime intervals are larger
// than 3 DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY.
assertTrue(snnCheckpointTime2 - snnCheckpointTime1 >= 3000
&& annCheckpointTime2 - annCheckpointTime1 >= 3000);
}
private void doEdits(int start, int stop) throws IOException {
for (int i = start; i < stop; i++) {
Path p = new Path("/test" + i);
fs.mkdirs(p);
}
}
private void doCreate() throws IOException {
Path p = new Path("/testFile");
fs.delete(p, false);
FSDataOutputStream out = fs.create(p, (short)1);
out.write(42);
out.close();
}
/**
* Test checkpoint still succeeds when no more than half of the fsimages upload failed.
*/
@Test
@Timeout(value = 300)
public void testPutFsimagePartFailed() throws Exception {
for (int i = 1; i < NUM_NNS; i++) {
cluster.shutdownNameNode(i);
// Make true checkpoint for DFS_NAMENODE_CHECKPOINT_PERIOD_KEY
cluster.getConfiguration(i).setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 3);
cluster.getConfiguration(i).setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1000);
}
doEdits(0, 10);
cluster.transitionToStandby(0);
for (int i = 1; i < NUM_NNS; i++) {
cluster.restartNameNode(i, false);
}
cluster.waitClusterUp();
setNNs();
for (int i = 0; i < NUM_NNS; i++) {
// Once the standby catches up, it should do a checkpoint
// and save to local directories.
HATestUtil.waitForCheckpoint(cluster, i, ImmutableList.of(12));
}
long snnCheckpointTime1 = nns[1].getNamesystem().getStandbyLastCheckpointTime();
cluster.transitionToActive(0);
cluster.transitionToObserver(2);
cluster.shutdownNameNode(2);
doEdits(11, 20);
nns[0].getRpcServer().rollEditLog();
HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(23));
long snnCheckpointTime2 = nns[1].getNamesystem().getStandbyLastCheckpointTime();
// Make sure that standby namenode checkpoint success and update the lastCheckpointTime
// even though it send fsimage to nn2 failed because nn2 is shut down.
assertTrue(snnCheckpointTime2 > snnCheckpointTime1);
}
/**
* A codec which just slows down the saving of the image significantly
* by sleeping a few milliseconds on every write. This makes it easy to
* catch the standby in the middle of saving a checkpoint.
*/
public static | TestStandbyCheckpoints |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/bytecode/internal/bytebuddy/EnhancerWildFlyNamesTest.java | {
"start": 757,
"end": 2162
} | class ____ {
@Test
@JiraKey( value = "HHH-12545" )
public void test() {
Enhancer enhancer = createByteBuddyEnhancer();
String internalName = SimpleEntity.class.getName().replace( '.', '/' );
String resourceName = internalName + ".class";
byte[] buffer = new byte[0];
try {
buffer = readResource( resourceName );
}
catch (IOException e) {
Assertions.fail( "Should not have an IOException here" );
}
byte[] enhanced = enhancer.enhance( internalName, buffer );
Assertions.assertNotNull( enhanced, "This is null when there have been swallowed exceptions during enhancement. Check Logs!" );
}
private byte[] readResource(String resourceName) throws IOException {
final int BUF_SIZE = 256;
byte[] buffer = new byte[BUF_SIZE];
ByteArrayOutputStream os = new ByteArrayOutputStream();
int readSize = 0;
try ( InputStream inputStream = this.getClass().getClassLoader().getResourceAsStream( resourceName ) ) {
while ( ( readSize = inputStream.read( buffer ) ) != -1 ) {
os.write( buffer, 0, readSize );
}
os.flush();
os.close();
}
return os.toByteArray();
}
private Enhancer createByteBuddyEnhancer() {
ByteBuddyState bytebuddy = new ByteBuddyState();
DefaultEnhancementContext enhancementContext = new DefaultEnhancementContext();
EnhancerImpl impl = new EnhancerImpl( enhancementContext, bytebuddy );
return impl;
}
}
| EnhancerWildFlyNamesTest |
java | apache__flink | flink-core/src/main/java/org/apache/flink/types/Either.java | {
"start": 1445,
"end": 2682
} | class ____<L, R> {
/** Create a Left value of Either */
public static <L, R> Either<L, R> Left(L value) {
return new Left<L, R>(value);
}
/** Create a Right value of Either */
public static <L, R> Either<L, R> Right(R value) {
return new Right<L, R>(value);
}
/**
* Retrieve the Left value of Either.
*
* @return the Left value
* @throws IllegalStateException if called on a Right
*/
public abstract L left() throws IllegalStateException;
/**
* Retrieve the Right value of Either.
*
* @return the Right value
* @throws IllegalStateException if called on a Left
*/
public abstract R right() throws IllegalStateException;
/**
* @return true if this is a Left value, false if this is a Right value
*/
public final boolean isLeft() {
return getClass() == Left.class;
}
/**
* @return true if this is a Right value, false if this is a Left value
*/
public final boolean isRight() {
return getClass() == Right.class;
}
/**
* A left value of {@link Either}
*
* @param <L> the type of Left
* @param <R> the type of Right
*/
public static | Either |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/dynamic/ExecutableCommand.java | {
"start": 213,
"end": 591
} | interface ____ {
/**
* Executes the {@link ExecutableCommand} with the given parameters.
*
* @param parameters
* @return
*/
Object execute(Object[] parameters) throws ExecutionException, InterruptedException;
/**
* Returns the {@link CommandMethod}.
*
* @return
*/
CommandMethod getCommandMethod();
}
| ExecutableCommand |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GitEndpointBuilderFactory.java | {
"start": 21410,
"end": 29228
} | interface ____
extends
EndpointConsumerBuilder {
default GitEndpointConsumerBuilder basic() {
return (GitEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedGitEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedGitEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedGitEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedGitEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedGitEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedGitEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing
* you to provide your custom implementation to control error handling
* usually occurred during the poll operation before an Exchange have
* been created and being routed in Camel.
*
* The option is a:
* <code>org.apache.camel.spi.PollingConsumerPollStrategy</code> type.
*
* Group: consumer (advanced)
*
* @param pollStrategy the value to set
* @return the dsl builder
*/
default AdvancedGitEndpointConsumerBuilder pollStrategy(org.apache.camel.spi.PollingConsumerPollStrategy pollStrategy) {
doSetProperty("pollStrategy", pollStrategy);
return this;
}
/**
* A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing
* you to provide your custom implementation to control error handling
* usually occurred during the poll operation before an Exchange have
* been created and being routed in Camel.
*
* The option will be converted to a
* <code>org.apache.camel.spi.PollingConsumerPollStrategy</code> type.
*
* Group: consumer (advanced)
*
* @param pollStrategy the value to set
* @return the dsl builder
*/
default AdvancedGitEndpointConsumerBuilder pollStrategy(String pollStrategy) {
doSetProperty("pollStrategy", pollStrategy);
return this;
}
/**
* A String with path to a .gitconfig file.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: advanced
*
* @param gitConfigFile the value to set
* @return the dsl builder
*/
default AdvancedGitEndpointConsumerBuilder gitConfigFile(String gitConfigFile) {
doSetProperty("gitConfigFile", gitConfigFile);
return this;
}
}
/**
* Builder for endpoint producers for the Git component.
*/
public | AdvancedGitEndpointConsumerBuilder |
java | apache__camel | components/camel-dynamic-router/src/main/java/org/apache/camel/component/dynamicrouter/filter/DynamicRouterFilterService.java | {
"start": 1987,
"end": 12996
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(DynamicRouterFilterService.class);
/**
* Lists of {@link PrioritizedFilter}s, mapped by their channel.
* <p>
* Each list holds the filters for that routing channel.
*/
private final Map<String, ConcurrentSkipListSet<PrioritizedFilter>> filterMap = new ConcurrentHashMap<>();
/**
* Lists of {@link PrioritizedFilterStatistics}, mapped by their channel.
* <p>
* Each list holds the routing statistics for filters for that routing channel.
*/
private final Map<String, List<PrioritizedFilterStatistics>> filterStatisticsMap = new ConcurrentHashMap<>();
/**
* Supplier for the {@link PrioritizedFilterFactory} instance.
*/
private final Supplier<PrioritizedFilterFactory> filterFactorySupplier;
public DynamicRouterFilterService() {
this.filterFactorySupplier = FILTER_FACTORY_SUPPLIER;
LOG.debug("Created Dynamic Router component");
}
/**
* Constructor that allows the {@link PrioritizedFilterFactory} supplier to be specified.
*
* @param filterFactorySupplier the {@link PrioritizedFilterFactory} supplier
*/
public DynamicRouterFilterService(final Supplier<PrioritizedFilterFactory> filterFactorySupplier) {
this.filterFactorySupplier = filterFactorySupplier;
LOG.debug("Created Dynamic Router component");
}
/**
* Initialize the filter list for the specified channel.
*
* @param channel channel to initialize filter list for
*/
public void initializeChannelFilters(final String channel) {
filterMap.computeIfAbsent(channel, c -> new ConcurrentSkipListSet<>(DynamicRouterConstants.FILTER_COMPARATOR));
filterStatisticsMap.computeIfAbsent(channel, c -> Collections.synchronizedList(new ArrayList<>()));
}
/**
* Get a copy of the {@link PrioritizedFilter}s for the specified channel.
*
* @param channel channel to obtain {@link PrioritizedFilter}s for
* @return {@link PrioritizedFilter}s for the specified channel
*/
public Collection<PrioritizedFilter> getFiltersForChannel(final String channel) {
return List.copyOf(filterMap.get(channel));
}
/**
* Retrieves a copy of the filter map.
*
* @return a copy of the filter map
*/
public Map<String, ConcurrentSkipListSet<PrioritizedFilter>> getFilterMap() {
return Map.copyOf(filterMap);
}
/**
* Get a copy of the {@link PrioritizedFilterStatistics} for the specified channel.
*
* @param channel channel to obtain {@link PrioritizedFilterStatistics} for
* @return {@link PrioritizedFilterStatistics} for the specified channel
*/
public List<PrioritizedFilterStatistics> getStatisticsForChannel(final String channel) {
return List.copyOf(filterStatisticsMap.get(channel));
}
/**
* Retrieves a copy of the filter statistics map.
*
* @return a copy of the filter statistics map
*/
public Map<String, List<PrioritizedFilterStatistics>> getFilterStatisticsMap() {
return Map.copyOf(filterStatisticsMap);
}
/**
* Convenience method to create a {@link PrioritizedFilter} from the supplied parameters.
*
* @param id the filter identifier
* @param priority the filter priority
* @param predicate the filter predicate
* @param endpoint the filter endpoint
* @return a {@link PrioritizedFilter} built from the supplied parameters
*/
public PrioritizedFilter createFilter(
final String id, final int priority, final Predicate predicate, final String endpoint,
final PrioritizedFilterStatistics statistics) {
return filterFactorySupplier.get().getInstance(id, priority, predicate, endpoint, statistics);
}
/**
* Creates a {@link PrioritizedFilter} from the supplied parameters, and adds it to the filters for the specified
* channel.
*
* @param id the filter identifier
* @param priority the filter priority
* @param predicate the filter predicate
* @param endpoint the filter endpoint
* @param channel the channel that contains the filter
* @param update flag if this is an update to the filter
* @return the ID of the added filter
*/
public String addFilterForChannel(
final String id, final int priority, final Predicate predicate, final String endpoint,
final String channel, final boolean update) {
return addFilterForChannel(createFilter(id, priority, predicate, endpoint, new PrioritizedFilterStatistics(id)),
channel, update);
}
/**
* Adds the filter to the list of filters, and ensure that the filters are sorted by priority after the insertion.
*
* @param filter the filter to add
* @return the ID of the added filter
*/
public String addFilterForChannel(final PrioritizedFilter filter, final String channel, final boolean update) {
boolean filterExists = !filterMap.isEmpty() &&
filterMap.get(channel).stream().anyMatch(f -> filter.id().equals(f.id()));
boolean okToAdd = update == filterExists;
if (okToAdd) {
Set<PrioritizedFilter> filters = filterMap.computeIfAbsent(channel,
c -> new ConcurrentSkipListSet<>(DynamicRouterConstants.FILTER_COMPARATOR));
filters.add(filter);
List<PrioritizedFilterStatistics> filterStatistics = filterStatisticsMap.computeIfAbsent(channel,
c -> Collections.synchronizedList(new ArrayList<>()));
filterStatistics.add(filter.statistics());
LOG.debug("Added subscription: {}", filter);
return filter.id();
}
return String.format("Error: Filter could not be %s -- existing filter found with matching ID: %b",
update ? "updated" : "added", filterExists);
}
/**
* Return the filter with the supplied filter identifier. If there is no such filter, then return null.
*
* @param filterId the filter identifier
* @param channel the channel that contains the filter
* @return the filter with the supplied ID, or null
*/
public PrioritizedFilter getFilterById(final String filterId, final String channel) {
return (ObjectHelper.isEmpty(channel)
? filterMap.values().stream().flatMap(Collection::stream) : filterMap.get(channel).stream())
.filter(f -> filterId.equals(f.id()))
.findFirst()
.orElseThrow(() -> new IllegalArgumentException("No filter exists with ID: " + filterId));
}
/**
* Removes a filter with the ID from the control message. This does not remove the
* {@link PrioritizedFilterStatistics} instance, because the statistics still represent actions that happened, so
* they should remain for statistics reporting.
*
* @param filterId the ID of the filter to remove
*/
public boolean removeFilterById(final String filterId, final String channel) {
String routerChannel = (ObjectHelper.isEmpty(channel))
? filterMap.keySet().stream()
.filter(ch -> filterMap.get(ch).stream().anyMatch(f -> filterId.equals(f.id())))
.findFirst()
.orElseThrow(() -> new IllegalArgumentException("No filter exists with ID: " + filterId))
: channel;
if (filterMap.get(routerChannel).removeIf(f -> filterId.equals(f.id()))) {
LOG.debug("Removed subscription: {}", filterId);
return true;
} else {
LOG.debug("No subscription exists with ID: {}", filterId);
return false;
}
}
/**
* Match the exchange against all {@link PrioritizedFilter}s for the specified channel to determine if any of them
* are suitable to handle the exchange, then create a comma-delimited string of the filters' endpoints.
* <p>
* <strong>SIDE-EFFECT</strong>: If there are no matching filters, this method will modify the {@link Exchange}!
* Without a matching filter, a message would otherwise be dropped without any notification, including log messages.
* Instead, if no matching filters can be found, this method will store the original message body in a header named
* by {@link DynamicRouterConstants#ORIGINAL_BODY_HEADER}. The message body will be changed to a string indicating
* that "no filters matched" the exchange. If the {@link DynamicRouterConfiguration#isWarnDroppedMessage()} flag is
* set to true, the message will be logged as a warning. Otherwise, it will be logged at the DEBUG level.
*
* @param exchange the message exchange
* @param channel the dynamic router channel to get filters for
* @param firstMatchOnly to only return the first match
* @param warnDroppedMessage if there are no matching filters found, this flag determines if the message will be
* logged as a warning; otherwise, it will be logged at the DEBUG level
* @return a comma-delimited string of endpoints from matching filters
*/
public String getMatchingEndpointsForExchangeByChannel(
final Exchange exchange,
final String channel,
final boolean firstMatchOnly,
final boolean warnDroppedMessage) {
List<String> matchingEndpoints = new ArrayList<>();
for (PrioritizedFilter filter : filterMap.get(channel)) {
if (filter.predicate().matches(exchange)) {
matchingEndpoints.add(filter.endpoint());
filter.statistics().incrementCount();
if (firstMatchOnly) {
break;
}
}
}
String recipients = String.join(",", matchingEndpoints);
if (ObjectHelper.isEmpty(recipients)) {
Message message = exchange.getMessage();
message.setHeader(ORIGINAL_BODY_HEADER, message.getBody());
recipients = String.format(DynamicRouterConstants.LOG_ENDPOINT, this.getClass().getCanonicalName(), channel,
warnDroppedMessage ? LoggingLevel.WARN : LoggingLevel.DEBUG);
String error = String.format(
"DynamicRouter channel '%s': no filters matched for an exchange from route: '%s'. " +
"The 'originalBody' header contains the original message body.",
channel, exchange.getFromEndpoint());
message.setBody(error, String.class);
}
return recipients;
}
/**
* Factory to create a {@link DynamicRouterFilterService}.
*/
public static | DynamicRouterFilterService |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/dataview/ListViewSerializer.java | {
"start": 1695,
"end": 5004
} | class ____<T> extends TypeSerializer<ListView<T>>
implements LegacySerializerSnapshotTransformer<ListView<T>> {
private static final long serialVersionUID = -2030398712359267867L;
private final TypeSerializer<List<T>> listSerializer;
public ListViewSerializer(TypeSerializer<List<T>> listSerializer) {
this.listSerializer = listSerializer;
}
@Override
public boolean isImmutableType() {
return false;
}
@Override
public TypeSerializer<ListView<T>> duplicate() {
return new ListViewSerializer<>(listSerializer.duplicate());
}
@Override
public ListView<T> createInstance() {
return new ListView<>();
}
@Override
public ListView<T> copy(ListView<T> from) {
final ListView<T> view = new ListView<>();
view.setList(listSerializer.copy(from.getList()));
return view;
}
@Override
public ListView<T> copy(ListView<T> from, ListView<T> reuse) {
return copy(from);
}
@Override
public int getLength() {
return -1;
}
@Override
public void serialize(ListView<T> record, DataOutputView target) throws IOException {
listSerializer.serialize(record.getList(), target);
}
@Override
public ListView<T> deserialize(DataInputView source) throws IOException {
final ListView<T> view = new ListView<>();
view.setList(listSerializer.deserialize(source));
return view;
}
@Override
public ListView<T> deserialize(ListView<T> reuse, DataInputView source) throws IOException {
return deserialize(source);
}
@Override
public void copy(DataInputView source, DataOutputView target) throws IOException {
listSerializer.copy(source, target);
}
@Override
public boolean equals(Object obj) {
if (obj instanceof ListViewSerializer) {
ListViewSerializer<?> other = (ListViewSerializer<?>) obj;
return listSerializer.equals(other.listSerializer);
} else {
return false;
}
}
@Override
public int hashCode() {
return listSerializer.hashCode();
}
@Override
public TypeSerializerSnapshot<ListView<T>> snapshotConfiguration() {
return new ListViewSerializerSnapshot<>(this);
}
/**
* We need to override this as a {@link LegacySerializerSnapshotTransformer} because in Flink
* 1.6.x and below, this serializer was incorrectly returning directly the snapshot of the
* nested list serializer as its own snapshot.
*
* <p>This method transforms the incorrect list serializer snapshot to be a proper {@link
* ListViewSerializerSnapshot}.
*/
@Override
public <U> TypeSerializerSnapshot<ListView<T>> transformLegacySerializerSnapshot(
TypeSerializerSnapshot<U> legacySnapshot) {
if (legacySnapshot instanceof ListViewSerializerSnapshot) {
return (TypeSerializerSnapshot<ListView<T>>) legacySnapshot;
} else {
throw new UnsupportedOperationException(
legacySnapshot.getClass().getCanonicalName() + " is not supported.");
}
}
public TypeSerializer<List<T>> getListSerializer() {
return listSerializer;
}
}
| ListViewSerializer |
java | google__gson | gson/src/main/java/com/google/gson/TypeAdapterFactory.java | {
"start": 3092,
"end": 4169
} | enum ____ is computed eagerly.
*
* <p>As with type adapters, factories must be <i>registered</i> with a {@link
* com.google.gson.GsonBuilder} for them to take effect:
*
* <pre>{@code
* GsonBuilder builder = new GsonBuilder();
* builder.registerTypeAdapterFactory(new LowercaseEnumTypeAdapterFactory());
* ...
* Gson gson = builder.create();
* }</pre>
*
* If multiple factories support the same type, the factory registered earlier takes precedence.
*
* <h3>Example: Composing other type adapters</h3>
*
* In this example we implement a factory for Guava's {@code Multiset} collection type. The factory
* can be used to create type adapters for multisets of any element type: the type adapter for
* {@code Multiset<String>} is different from the type adapter for {@code Multiset<URL>}.
*
* <p>The type adapter <i>delegates</i> to another type adapter for the multiset elements. It
* figures out the element type by reflecting on the multiset's type token. A {@code Gson} is passed
* in to {@code create} for just this purpose:
*
* <pre>{@code
* public | value |
java | apache__dubbo | dubbo-cluster/src/test/java/org/apache/dubbo/rpc/cluster/router/file/FileRouterEngineTest.java | {
"start": 7458,
"end": 8208
} | class ____<T> extends AbstractClusterInvoker<T> {
private Invoker<T> selectedInvoker;
public MockClusterInvoker(Directory<T> directory) {
super(directory);
}
public MockClusterInvoker(Directory<T> directory, URL url) {
super(directory, url);
}
@Override
protected Result doInvoke(Invocation invocation, List<Invoker<T>> invokers, LoadBalance loadbalance)
throws RpcException {
Invoker<T> invoker = select(loadbalance, invocation, invokers, null);
selectedInvoker = invoker;
return null;
}
public Invoker<T> getSelectedInvoker() {
return selectedInvoker;
}
}
}
| MockClusterInvoker |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/network/SslSender.java | {
"start": 2667,
"end": 3124
} | class ____ implements X509TrustManager {
@Override
public void checkClientTrusted(X509Certificate[] x509Certificates, String s) {
//nop
}
@Override
public void checkServerTrusted(X509Certificate[] x509Certificates, String s) {
//nop
}
@Override
public X509Certificate[] getAcceptedIssuers() {
return new X509Certificate[0];
}
}
}
| NaiveTrustManager |
java | elastic__elasticsearch | x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java | {
"start": 1777,
"end": 6789
} | class ____ extends AbstractLicensesIntegrationTestCase {
@Before
public void resetLicensing() throws Exception {
enableJdbcLicensing();
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
// Add Netty so we can test JDBC licensing because only exists on the REST layer.
return CollectionUtils.appendToCopy(super.nodePlugins(), Netty4Plugin.class);
}
@Override
protected boolean addMockHttpTransport() {
return false; // enable http
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
// Enable http so we can test JDBC licensing because only exists on the REST layer.
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME)
.build();
}
private static OperationMode randomValidSqlLicenseType() {
return randomTrialBasicStandardGoldOrPlatinumMode();
}
private static OperationMode randomInvalidSqlLicenseType() {
return OperationMode.MISSING;
}
private static OperationMode randomValidJdbcLicenseType() {
return randomTrialOrPlatinumMode();
}
private static OperationMode randomInvalidJdbcLicenseType() {
return randomBasicStandardOrGold();
}
public void enableSqlLicensing() throws Exception {
updateLicensing(randomValidSqlLicenseType());
}
public void disableSqlLicensing() throws Exception {
updateLicensing(randomInvalidSqlLicenseType());
}
public void enableJdbcLicensing() throws Exception {
updateLicensing(randomValidJdbcLicenseType());
}
public void disableJdbcLicensing() throws Exception {
updateLicensing(randomInvalidJdbcLicenseType());
}
public void updateLicensing(OperationMode licenseOperationMode) throws Exception {
String licenseType = licenseOperationMode.name().toLowerCase(Locale.ROOT);
wipeAllLicenses();
if (licenseType.equals("missing")) {
putLicenseTombstone();
} else {
License license = org.elasticsearch.license.TestUtils.generateSignedLicense(licenseType, TimeValue.timeValueMinutes(1));
putLicense(license);
}
}
public void testSqlQueryActionLicense() throws Exception {
setupTestIndex();
disableSqlLicensing();
ElasticsearchSecurityException e = expectThrows(
ElasticsearchSecurityException.class,
new SqlQueryRequestBuilder(client()).query("SELECT * FROM test")
);
assertThat(e.getMessage(), equalTo("current license is non-compliant for [sql]"));
enableSqlLicensing();
SqlQueryResponse response = new SqlQueryRequestBuilder(client()).query("SELECT * FROM test").get();
assertThat(response.size(), Matchers.equalTo(2L));
}
public void testSqlQueryActionJdbcModeLicense() throws Exception {
setupTestIndex();
disableJdbcLicensing();
ElasticsearchSecurityException e = expectThrows(
ElasticsearchSecurityException.class,
new SqlQueryRequestBuilder(client()).query("SELECT * FROM test").mode("jdbc")
);
assertThat(e.getMessage(), equalTo("current license is non-compliant for [jdbc]"));
enableJdbcLicensing();
SqlQueryResponse response = new SqlQueryRequestBuilder(client()).query("SELECT * FROM test").mode("jdbc").get();
assertThat(response.size(), Matchers.equalTo(2L));
}
public void testSqlTranslateActionLicense() throws Exception {
setupTestIndex();
disableSqlLicensing();
ElasticsearchSecurityException e = expectThrows(
ElasticsearchSecurityException.class,
new SqlTranslateRequestBuilder(client()).query("SELECT * FROM test")
);
assertThat(e.getMessage(), equalTo("current license is non-compliant for [sql]"));
enableSqlLicensing();
SqlTranslateResponse response = new SqlTranslateRequestBuilder(client()).query("SELECT * FROM test").get();
SearchSourceBuilder source = response.source();
assertThat(source.docValueFields(), Matchers.contains(new FieldAndFormat("count", null)));
FetchSourceContext fetchSource = source.fetchSource();
assertThat(fetchSource.includes(), Matchers.arrayContaining("data"));
}
// TODO test SqlGetIndicesAction. Skipping for now because of lack of serialization support.
private void setupTestIndex() {
ElasticsearchAssertions.assertAcked(indicesAdmin().prepareCreate("test").get());
client().prepareBulk()
.add(new IndexRequest("test").id("1").source("data", "bar", "count", 42))
.add(new IndexRequest("test").id("2").source("data", "baz", "count", 43))
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.get();
}
}
| SqlLicenseIT |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/factory/multiple/AFactory.java | {
"start": 801,
"end": 1033
} | class ____ {
@Bean
@Requires(beans = X.class, missingBeans = Y.class)
A a(X x) {
return new A();
}
@Bean
@Requires(beans= {X.class, Y.class})
A a(X x, Y y) {
return new A();
}
}
| AFactory |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/integration/JmsToDSendDynamicTwoDisabledIT.java | {
"start": 1322,
"end": 2706
} | class ____ extends AbstractPersistentJMSTest {
@Test
public void testToD() {
template.sendBodyAndHeader("direct:start", "Hello bar", "where", "JmsToDSendDynamicIT.bar");
template.sendBodyAndHeader("direct:start", "Hello beer", "where", "JmsToDSendDynamicIT.beer");
template.sendBodyAndHeader("direct:start", "Hello gin", "where", "JmsToDSendDynamicIT.gin");
template.sendBodyAndHeader("direct:start2", "Hello beer", "where2", "JmsToDSendDynamicIT.beer");
template.sendBodyAndHeader("direct:start2", "Hello whiskey", "where2", "JmsToDSendDynamicIT.whiskey");
// there should be 4 activemq endpoint
long count = context.getEndpoints().stream().filter(e -> e.getEndpointUri().startsWith("activemq:")).count();
assertEquals(4, count, "There should be 4 activemq endpoint");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// route message dynamic using toD but turn off send dynamic aware
from("direct:start").toD().allowOptimisedComponents(false).uri("activemq:queue:${header.where}");
from("direct:start2").toD().allowOptimisedComponents(false).uri("activemq:queue:${header.where2}");
}
};
}
}
| JmsToDSendDynamicTwoDisabledIT |
java | apache__rocketmq | proxy/src/main/java/org/apache/rocketmq/proxy/service/message/MessageService.java | {
"start": 2645,
"end": 6272
} | interface ____ {
CompletableFuture<List<SendResult>> sendMessage(
ProxyContext ctx,
AddressableMessageQueue messageQueue,
List<Message> msgList,
SendMessageRequestHeader requestHeader,
long timeoutMillis
);
CompletableFuture<RemotingCommand> sendMessageBack(
ProxyContext ctx,
ReceiptHandle handle,
String messageId,
ConsumerSendMsgBackRequestHeader requestHeader,
long timeoutMillis
);
CompletableFuture<Void> endTransactionOneway(
ProxyContext ctx,
String brokerName,
EndTransactionRequestHeader requestHeader,
long timeoutMillis
);
CompletableFuture<PopResult> popMessage(
ProxyContext ctx,
AddressableMessageQueue messageQueue,
PopMessageRequestHeader requestHeader,
long timeoutMillis
);
CompletableFuture<AckResult> changeInvisibleTime(
ProxyContext ctx,
ReceiptHandle handle,
String messageId,
ChangeInvisibleTimeRequestHeader requestHeader,
long timeoutMillis
);
CompletableFuture<AckResult> ackMessage(
ProxyContext ctx,
ReceiptHandle handle,
String messageId,
AckMessageRequestHeader requestHeader,
long timeoutMillis
);
CompletableFuture<AckResult> batchAckMessage(
ProxyContext ctx,
List<ReceiptHandleMessage> handleList,
String consumerGroup,
String topic,
long timeoutMillis
);
CompletableFuture<PullResult> pullMessage(
ProxyContext ctx,
AddressableMessageQueue messageQueue,
PullMessageRequestHeader requestHeader,
long timeoutMillis
);
CompletableFuture<Long> queryConsumerOffset(
ProxyContext ctx,
AddressableMessageQueue messageQueue,
QueryConsumerOffsetRequestHeader requestHeader,
long timeoutMillis
);
CompletableFuture<Void> updateConsumerOffset(
ProxyContext ctx,
AddressableMessageQueue messageQueue,
UpdateConsumerOffsetRequestHeader requestHeader,
long timeoutMillis
);
CompletableFuture<Void> updateConsumerOffsetAsync(
ProxyContext ctx,
AddressableMessageQueue messageQueue,
UpdateConsumerOffsetRequestHeader requestHeader,
long timeoutMillis
);
CompletableFuture<Set<MessageQueue>> lockBatchMQ(
ProxyContext ctx,
AddressableMessageQueue messageQueue,
LockBatchRequestBody requestBody,
long timeoutMillis
);
CompletableFuture<Void> unlockBatchMQ(
ProxyContext ctx,
AddressableMessageQueue messageQueue,
UnlockBatchRequestBody requestBody,
long timeoutMillis
);
CompletableFuture<Long> getMaxOffset(
ProxyContext ctx,
AddressableMessageQueue messageQueue,
GetMaxOffsetRequestHeader requestHeader,
long timeoutMillis
);
CompletableFuture<Long> getMinOffset(
ProxyContext ctx,
AddressableMessageQueue messageQueue,
GetMinOffsetRequestHeader requestHeader,
long timeoutMillis
);
CompletableFuture<String> recallMessage(
ProxyContext ctx,
String brokerName,
RecallMessageRequestHeader requestHeader,
long timeoutMillis
);
CompletableFuture<RemotingCommand> request(ProxyContext ctx, String brokerName, RemotingCommand request,
long timeoutMillis);
CompletableFuture<Void> requestOneway(ProxyContext ctx, String brokerName, RemotingCommand request,
long timeoutMillis);
}
| MessageService |
java | bumptech__glide | library/test/src/test/java/com/bumptech/glide/load/model/DataUrlLoaderTest.java | {
"start": 5224,
"end": 5546
} | class ____ implements DataFetcher.DataCallback<Object> {
public Object data;
public Exception exception;
@Override
public void onDataReady(@Nullable Object data) {
this.data = data;
}
@Override
public void onLoadFailed(@NonNull Exception e) {
this.exception = e;
}
}
}
| CallBack |
java | dropwizard__dropwizard | dropwizard-auth/src/main/java/io/dropwizard/auth/basic/BasicCredentialAuthFilter.java | {
"start": 536,
"end": 2608
} | class ____<P extends Principal> extends AuthFilter<BasicCredentials, P> {
private BasicCredentialAuthFilter() {
}
@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
final BasicCredentials credentials =
getCredentials(requestContext.getHeaders().getFirst(HttpHeaders.AUTHORIZATION));
if (!authenticate(requestContext, credentials, SecurityContext.BASIC_AUTH)) {
throw unauthorizedHandler.buildException(prefix, realm);
}
}
/**
* Parses a Base64-encoded value of the `Authorization` header
* in the form of `Basic dXNlcm5hbWU6cGFzc3dvcmQ=`.
*
* @param header the value of the `Authorization` header
* @return a username and a password as {@link BasicCredentials}
*/
@Nullable
private BasicCredentials getCredentials(String header) {
if (header == null) {
return null;
}
final int space = header.indexOf(' ');
if (space <= 0) {
return null;
}
final String method = header.substring(0, space);
if (!prefix.equalsIgnoreCase(method)) {
return null;
}
final String decoded;
try {
decoded = new String(Base64.getDecoder().decode(header.substring(space + 1)), StandardCharsets.UTF_8);
} catch (IllegalArgumentException e) {
logger.warn("Error decoding credentials", e);
return null;
}
// Decoded credentials is 'username:password'
final int i = decoded.indexOf(':');
if (i <= 0) {
return null;
}
final String username = decoded.substring(0, i);
final String password = decoded.substring(i + 1);
return new BasicCredentials(username, password);
}
/**
* Builder for {@link BasicCredentialAuthFilter}.
* <p>An {@link Authenticator} must be provided during the building process.</p>
*
* @param <P> the principal
*/
public static | BasicCredentialAuthFilter |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/metrics/InputGateMetrics.java | {
"start": 1297,
"end": 6304
} | class ____ {
private final SingleInputGate inputGate;
// ------------------------------------------------------------------------
private InputGateMetrics(SingleInputGate inputGate) {
this.inputGate = checkNotNull(inputGate);
}
// ------------------------------------------------------------------------
// these methods are package private to make access from the nested classes faster
/**
* Iterates over all input channels and collects the total number of queued buffers in a
* best-effort way.
*
* @return total number of queued buffers
*/
long refreshAndGetTotal() {
long total = 0;
for (InputChannel channel : inputGate.inputChannels()) {
if (channel instanceof RemoteInputChannel) {
RemoteInputChannel rc = (RemoteInputChannel) channel;
total += rc.unsynchronizedGetNumberOfQueuedBuffers();
}
}
return total;
}
/**
* Iterates over all input channels and collects the minimum number of queued buffers in a
* channel in a best-effort way.
*
* @return minimum number of queued buffers per channel (<tt>0</tt> if no channels exist)
*/
int refreshAndGetMin() {
int min = Integer.MAX_VALUE;
for (InputChannel channel : inputGate.inputChannels()) {
if (channel instanceof RemoteInputChannel) {
RemoteInputChannel rc = (RemoteInputChannel) channel;
int size = rc.unsynchronizedGetNumberOfQueuedBuffers();
min = Math.min(min, size);
}
}
if (min == Integer.MAX_VALUE) { // in case all channels are local, or the channel collection
// was empty
return 0;
}
return min;
}
/**
* Iterates over all input channels and collects the maximum number of queued buffers in a
* channel in a best-effort way.
*
* @return maximum number of queued buffers per channel
*/
int refreshAndGetMax() {
int max = 0;
for (InputChannel channel : inputGate.inputChannels()) {
if (channel instanceof RemoteInputChannel) {
RemoteInputChannel rc = (RemoteInputChannel) channel;
int size = rc.unsynchronizedGetNumberOfQueuedBuffers();
max = Math.max(max, size);
}
}
return max;
}
/**
* Iterates over all input channels and collects the average number of queued buffers in a
* channel in a best-effort way.
*
* @return average number of queued buffers per channel
*/
float refreshAndGetAvg() {
long total = 0;
int count = 0;
for (InputChannel channel : inputGate.inputChannels()) {
if (channel instanceof RemoteInputChannel) {
RemoteInputChannel rc = (RemoteInputChannel) channel;
int size = rc.unsynchronizedGetNumberOfQueuedBuffers();
total += size;
++count;
}
}
return count == 0 ? 0 : total / (float) count;
}
// ------------------------------------------------------------------------
// Gauges to access the stats
// ------------------------------------------------------------------------
private Gauge<Long> getTotalQueueLenGauge() {
return new Gauge<Long>() {
@Override
public Long getValue() {
return refreshAndGetTotal();
}
};
}
private Gauge<Integer> getMinQueueLenGauge() {
return new Gauge<Integer>() {
@Override
public Integer getValue() {
return refreshAndGetMin();
}
};
}
private Gauge<Integer> getMaxQueueLenGauge() {
return new Gauge<Integer>() {
@Override
public Integer getValue() {
return refreshAndGetMax();
}
};
}
private Gauge<Float> getAvgQueueLenGauge() {
return new Gauge<Float>() {
@Override
public Float getValue() {
return refreshAndGetAvg();
}
};
}
// ------------------------------------------------------------------------
// Static access
// ------------------------------------------------------------------------
public static void registerQueueLengthMetrics(MetricGroup parent, SingleInputGate[] gates) {
for (int i = 0; i < gates.length; i++) {
InputGateMetrics metrics = new InputGateMetrics(gates[i]);
MetricGroup group = parent.addGroup(i);
group.gauge("totalQueueLen", metrics.getTotalQueueLenGauge());
group.gauge("minQueueLen", metrics.getMinQueueLenGauge());
group.gauge("maxQueueLen", metrics.getMaxQueueLenGauge());
group.gauge("avgQueueLen", metrics.getAvgQueueLenGauge());
}
}
}
| InputGateMetrics |
java | spring-projects__spring-security | oauth2/oauth2-client/src/test/java/org/springframework/security/oauth2/client/web/client/ClientRegistrationIdProcessorTests.java | {
"start": 1532,
"end": 4228
} | class ____ {
private static final String REGISTRATION_ID = "registrationId";
ClientRegistrationIdProcessor processor = ClientRegistrationIdProcessor.DEFAULT_INSTANCE;
@Test
void processWhenClientRegistrationIdPresentThenSet() {
HttpRequestValues.Builder builder = HttpRequestValues.builder();
Method hasClientRegistrationId = ReflectionUtils.findMethod(RestService.class, "hasClientRegistrationId");
this.processor.process(hasClientRegistrationId, null, null, builder);
String registrationId = ClientAttributes.resolveClientRegistrationId(builder.build().getAttributes());
assertThat(registrationId).isEqualTo(REGISTRATION_ID);
}
@Test
void processWhenMetaClientRegistrationIdPresentThenSet() {
HttpRequestValues.Builder builder = HttpRequestValues.builder();
Method hasClientRegistrationId = ReflectionUtils.findMethod(RestService.class, "hasMetaClientRegistrationId");
this.processor.process(hasClientRegistrationId, null, null, builder);
String registrationId = ClientAttributes.resolveClientRegistrationId(builder.build().getAttributes());
assertThat(registrationId).isEqualTo(REGISTRATION_ID);
}
@Test
void processWhenNoClientRegistrationIdPresentThenNull() {
HttpRequestValues.Builder builder = HttpRequestValues.builder();
Method hasClientRegistrationId = ReflectionUtils.findMethod(RestService.class, "noClientRegistrationId");
this.processor.process(hasClientRegistrationId, null, null, builder);
String registrationId = ClientAttributes.resolveClientRegistrationId(builder.build().getAttributes());
assertThat(registrationId).isNull();
}
@Test
void processWhenClientRegistrationIdPresentOnDeclaringClassThenSet() {
HttpRequestValues.Builder builder = HttpRequestValues.builder();
Method declaringClassHasClientRegistrationId = ReflectionUtils.findMethod(TypeAnnotatedRestService.class,
"declaringClassHasClientRegistrationId");
this.processor.process(declaringClassHasClientRegistrationId, null, null, builder);
String registrationId = ClientAttributes.resolveClientRegistrationId(builder.build().getAttributes());
assertThat(registrationId).isEqualTo(REGISTRATION_ID);
}
@Test
void processWhenDuplicateClientRegistrationIdPresentOnAggregateServiceThenException() {
HttpRequestValues.Builder builder = HttpRequestValues.builder();
Method shouldFailDueToDuplicateClientRegistrationId = ReflectionUtils.findMethod(AggregateRestService.class,
"shouldFailDueToDuplicateClientRegistrationId");
assertThatExceptionOfType(AnnotationConfigurationException.class).isThrownBy(
() -> this.processor.process(shouldFailDueToDuplicateClientRegistrationId, null, null, builder));
}
| ClientRegistrationIdProcessorTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/type/OracleNamedEnumTest.java | {
"start": 2002,
"end": 2060
} | enum ____ not exported");
}
}
);
});
}
}
| type |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/stream/StreamWriterTest_writeString1.java | {
"start": 202,
"end": 1078
} | class ____ extends TestCase {
public void test_0() throws Exception {
StringWriter out = new StringWriter();
SerializeWriter writer = new SerializeWriter(out, 10);
Assert.assertEquals(10, writer.getBufferLength());
int blockSize = 21;
for (char ch = 'a'; ch <= 'z'; ++ch) {
char[] chars = new char[blockSize];
for (int i = 0; i < blockSize; ++i) {
chars[i] = ch;
}
writer.write(new String(chars));
}
writer.close();
String text = out.toString();
Assert.assertEquals(26 * blockSize, text.length());
for (int i = 0; i < 26; ++i) {
for (int j = 0; j < blockSize; ++j) {
Assert.assertEquals(text.charAt(i * blockSize + j), (char) ('a' + i));
}
}
}
}
| StreamWriterTest_writeString1 |
java | redisson__redisson | redisson/src/main/java/org/redisson/executor/RedissonScheduledFuture.java | {
"start": 835,
"end": 1865
} | class ____<V> extends RedissonExecutorFuture<V> implements RScheduledFuture<V> {
private final long scheduledExecutionTime;
private final RemotePromise<V> promise;
public RedissonScheduledFuture(RemotePromise<V> promise, long scheduledExecutionTime) {
super(promise);
this.scheduledExecutionTime = scheduledExecutionTime;
this.promise = promise;
}
public RemotePromise<V> getInnerPromise() {
return promise;
}
@Override
public int compareTo(Delayed other) {
if (this == other) {
return 0;
}
long diff = getDelay(TimeUnit.MILLISECONDS) - other.getDelay(TimeUnit.MILLISECONDS);
if (diff == 0) {
return 0;
}
if (diff < 0) {
return -1;
}
return 1;
}
@Override
public long getDelay(TimeUnit unit) {
return unit.convert(scheduledExecutionTime - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
}
}
| RedissonScheduledFuture |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenIT0019PluginVersionMgmtBySuperPomTest.java | {
"start": 903,
"end": 1590
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that a version is managed by pluginManagement in the super POM
*
* @throws Exception in case of failure
*/
@Test
public void testit0019() throws Exception {
File testDir = extractResources("/it0019");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArgument("process-resources");
verifier.execute();
verifier.verifyFilePresent("target/classes/test.txt");
verifier.verifyErrorFreeLog();
}
}
| MavenIT0019PluginVersionMgmtBySuperPomTest |
java | apache__camel | core/camel-management/src/main/java/org/apache/camel/management/JmxManagementStrategy.java | {
"start": 1864,
"end": 5067
} | class ____ extends DefaultManagementStrategy {
private static final Logger LOG = LoggerFactory.getLogger(JmxManagementStrategy.class);
private final List<Object> managed = new ArrayList<>();
private int counter;
public JmxManagementStrategy() {
}
public JmxManagementStrategy(CamelContext context, ManagementAgent managementAgent) {
super(context, managementAgent);
// add JMX capable CamelContext as extension
context.getCamelContextExtension().addContextPlugin(ManagedCamelContext.class, new ManagedCamelContextImpl(context));
}
@Override
public void manageObject(Object managedObject) throws Exception {
if (!isStartingOrStarted()) {
managed.add(managedObject);
return;
}
ObjectName objectName = getManagementObjectNameStrategy().getObjectName(managedObject);
if (objectName != null) {
getManagementAgent().register(managedObject, objectName);
counter++;
}
}
@Override
public void unmanageObject(Object managedObject) throws Exception {
if (!isStartingOrStarted()) {
managed.remove(managedObject);
return;
}
ObjectName objectName = getManagementObjectNameStrategy().getObjectName(managedObject);
if (objectName != null) {
getManagementAgent().unregister(objectName);
counter--;
}
}
@Override
public boolean isManaged(Object managedObject) {
try {
ObjectName name = getManagementObjectNameStrategy().getObjectName(managedObject);
if (name != null) {
return getManagementAgent().isRegistered(name);
}
} catch (Exception e) {
LOG.warn("Cannot check whether the managed object is registered. This exception will be ignored.", e);
}
return false;
}
@Override
public boolean isManagedName(Object name) {
try {
if (name instanceof ObjectName objectName) {
return getManagementAgent().isRegistered(objectName);
}
} catch (Exception e) {
LOG.warn("Cannot check whether the managed object is registered. This exception will be ignored.", e);
}
return false;
}
@Override
public boolean manageProcessor(NamedNode definition) {
return true;
}
@ManagedAttribute(description = "Number of managed MBean instances")
public int getManagedCount() {
return counter;
}
@Override
protected void doInit() throws Exception {
LOG.debug("JMX is enabled");
super.doInit();
}
@Override
protected void doStart() throws Exception {
super.doStart();
for (Object o : managed) {
manageObject(o);
}
}
@Override
protected ManagementObjectNameStrategy createManagementObjectNameStrategy(String domain) {
return new DefaultManagementObjectNameStrategy(domain);
}
@Override
protected ManagementObjectStrategy createManagementObjectStrategy() {
return new DefaultManagementObjectStrategy();
}
}
| JmxManagementStrategy |
java | micronaut-projects__micronaut-core | http-server-tck/src/main/java/io/micronaut/http/server/tck/tests/FilterErrorTest.java | {
"start": 9960,
"end": 10574
} | class ____ implements HttpServerFilter {
AtomicReference<RouteMatch<?>> routeMatch = new AtomicReference<>();
@Override
public Publisher<MutableHttpResponse<?>> doFilter(HttpRequest<?> request, ServerFilterChain chain) {
return Publishers.then(chain.proceed(request),
httpResponse -> routeMatch.set(RouteAttributes.getRouteMatch(httpResponse).get()));
}
@Override
public int getOrder() {
return 10;
}
}
@Requires(condition = FilterCondition.class)
@Controller("/filter-error-spec")
static | ExceptionRoute |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/state/operator/restore/StreamOperatorSnapshotRestoreTest.java | {
"start": 12441,
"end": 16703
} | class ____ extends AbstractStreamOperator<Integer>
implements OneInputStreamOperator<Integer, Integer> {
private static final long serialVersionUID = -8942866418598856475L;
TestOneInputStreamOperator(boolean verifyRestore) {
this.verifyRestore = verifyRestore;
}
private final boolean verifyRestore;
private ValueState<Integer> keyedState;
private ListState<Integer> opState;
@Override
public void processElement(StreamRecord<Integer> element) throws Exception {
if (verifyRestore) {
// check restored managed keyed state
long exp = element.getValue() + 1;
long act = keyedState.value();
Assert.assertEquals(exp, act);
} else {
// write managed keyed state that goes into snapshot
keyedState.update(element.getValue() + 1);
// write managed operator state that goes into snapshot
opState.add(element.getValue());
}
}
@Override
public void processWatermark(Watermark mark) {}
@Override
public void snapshotState(StateSnapshotContext context) throws Exception {
KeyedStateCheckpointOutputStream out = context.getRawKeyedOperatorStateOutput();
DataOutputView dov = new DataOutputViewStreamWrapper(out);
// write raw keyed state that goes into snapshot
int count = 0;
for (int kg : out.getKeyGroupList()) {
out.startNewKeyGroup(kg);
dov.writeInt(kg + 2);
++count;
}
Assert.assertEquals(MAX_PARALLELISM, count);
// write raw operator state that goes into snapshot
OperatorStateCheckpointOutputStream outOp = context.getRawOperatorStateOutput();
dov = new DataOutputViewStreamWrapper(outOp);
for (int i = 0; i < 13; ++i) {
outOp.startNewPartition();
dov.writeInt(42 + i);
}
}
@Override
public void initializeState(StateInitializationContext context) throws Exception {
Assert.assertEquals(verifyRestore, context.isRestored());
keyedState =
context.getKeyedStateStore()
.getState(
new ValueStateDescriptor<>("managed-keyed", Integer.class, 0));
opState =
context.getOperatorStateStore()
.getListState(
new ListStateDescriptor<>(
"managed-op-state", IntSerializer.INSTANCE));
if (context.isRestored()) {
// check restored raw keyed state
int count = 0;
for (KeyGroupStatePartitionStreamProvider streamProvider :
context.getRawKeyedStateInputs()) {
try (InputStream in = streamProvider.getStream()) {
DataInputView div = new DataInputViewStreamWrapper(in);
Assert.assertEquals(streamProvider.getKeyGroupId() + 2, div.readInt());
++count;
}
}
Assert.assertEquals(MAX_PARALLELISM, count);
// check restored managed operator state
BitSet check = new BitSet(10);
for (int v : opState.get()) {
check.set(v);
}
Assert.assertEquals(10, check.cardinality());
// check restored raw operator state
check = new BitSet(13);
for (StatePartitionStreamProvider streamProvider :
context.getRawOperatorStateInputs()) {
try (InputStream in = streamProvider.getStream()) {
DataInputView div = new DataInputViewStreamWrapper(in);
check.set(div.readInt() - 42);
}
}
Assert.assertEquals(13, check.cardinality());
}
}
}
}
| TestOneInputStreamOperator |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/TestResourcePluginManager.java | {
"start": 14589,
"end": 16619
} | interface ____
@Test
@Timeout(value = 30)
public void testLoadInvalidPluggableDeviceClasses() {
ResourcePluginManager rpm = new ResourcePluginManager();
ResourcePluginManager rpmSpy = spy(rpm);
nm = new ResourcePluginMockNM(rpmSpy);
conf.setBoolean(YarnConfiguration.NM_PLUGGABLE_DEVICE_FRAMEWORK_ENABLED,
true);
conf.setStrings(
YarnConfiguration.NM_PLUGGABLE_DEVICE_FRAMEWORK_DEVICE_CLASSES,
FakeTestDevicePlugin2.class.getCanonicalName());
String expectedMessage = "Class: "
+ FakeTestDevicePlugin2.class.getCanonicalName()
+ " not instance of " + DevicePlugin.class.getCanonicalName();
String actualMessage = "";
try {
nm.init(conf);
nm.start();
} catch (YarnRuntimeException e) {
actualMessage = e.getMessage();
}
assertThat(actualMessage).isEqualTo(expectedMessage);
}
// Fail to register duplicated resource name.
@Test
@Timeout(value = 30)
public void testLoadDuplicateResourceNameDevicePlugin() {
ResourcePluginManager rpm = new ResourcePluginManager();
ResourcePluginManager rpmSpy = spy(rpm);
nm = new ResourcePluginMockNM(rpmSpy);
conf.setBoolean(YarnConfiguration.NM_PLUGGABLE_DEVICE_FRAMEWORK_ENABLED,
true);
conf.setStrings(
YarnConfiguration.NM_PLUGGABLE_DEVICE_FRAMEWORK_DEVICE_CLASSES,
FakeTestDevicePlugin1.class.getCanonicalName() + "," +
FakeTestDevicePlugin3.class.getCanonicalName());
String expectedMessage = "cmpA.com/hdwA" +
" already registered! Please change resource type name"
+ " or configure correct resource type name"
+ " in resource-types.xml for "
+ FakeTestDevicePlugin3.class.getCanonicalName();
String actualMessage = "";
try {
nm.init(conf);
nm.start();
} catch (YarnRuntimeException e) {
actualMessage = e.getMessage();
}
assertThat(actualMessage).isEqualTo(expectedMessage);
}
/**
* Fail a plugin due to incompatible | DevicePlugin |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/annotation/ConfigurationClassParser.java | {
"start": 23157,
"end": 23311
} | class ____ also considered. This allows imports to
* be triggered indirectly via marker interfaces or shared base interfaces.
* @param sourceClass the | are |
java | apache__camel | components/camel-opensearch/src/main/java/org/apache/camel/component/opensearch/OpensearchConfiguration.java | {
"start": 1183,
"end": 8917
} | class ____ {
private List<HttpHost> hostAddressesList;
private String user;
private String password;
@UriPath
@Metadata(required = true)
private String clusterName;
@UriParam
private OpensearchOperation operation;
@UriParam
private Integer size;
@UriParam
private Integer from;
@UriParam
private String indexName;
@UriParam(defaultValue = "" + OpensearchConstants.DEFAULT_FOR_WAIT_ACTIVE_SHARDS)
private int waitForActiveShards = OpensearchConstants.DEFAULT_FOR_WAIT_ACTIVE_SHARDS;
@UriParam
private String hostAddresses;
@UriParam(defaultValue = "" + OpensearchConstants.DEFAULT_SOCKET_TIMEOUT)
private int socketTimeout = OpensearchConstants.DEFAULT_SOCKET_TIMEOUT;
@UriParam(defaultValue = "" + OpensearchConstants.MAX_RETRY_TIMEOUT)
private int maxRetryTimeout = OpensearchConstants.MAX_RETRY_TIMEOUT;
@UriParam(defaultValue = "" + OpensearchConstants.DEFAULT_CONNECTION_TIMEOUT)
private int connectionTimeout = OpensearchConstants.DEFAULT_CONNECTION_TIMEOUT;
@UriParam
private boolean disconnect;
@UriParam(label = "security")
private boolean enableSSL;
@UriParam(label = "security")
@Metadata(supportFileReference = true)
private String certificatePath;
@UriParam
private boolean useScroll;
@UriParam(defaultValue = "" + OpensearchConstants.DEFAULT_SCROLL_KEEP_ALIVE_MS)
private int scrollKeepAliveMs = OpensearchConstants.DEFAULT_SCROLL_KEEP_ALIVE_MS;
@UriParam(label = "advanced")
private boolean enableSniffer;
@UriParam(label = "advanced", defaultValue = "" + OpensearchConstants.DEFAULT_SNIFFER_INTERVAL)
private int snifferInterval = OpensearchConstants.DEFAULT_SNIFFER_INTERVAL;
@UriParam(label = "advanced", defaultValue = "" + OpensearchConstants.DEFAULT_AFTER_FAILURE_DELAY)
private int sniffAfterFailureDelay = OpensearchConstants.DEFAULT_AFTER_FAILURE_DELAY;
@UriParam(label = "advanced", defaultValue = "ObjectNode")
private Class<?> documentClass = ObjectNode.class;
@UriParam(label = "advanced")
private HostnameVerifier hostnameVerifier;
/**
* Starting index of the response.
*/
public Integer getFrom() {
return from;
}
public void setFrom(Integer from) {
this.from = from;
}
/**
* Size of the response.
*/
public Integer getSize() {
return size;
}
public void setSize(Integer size) {
this.size = size;
}
/**
* Name of the cluster
*/
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
/**
* What operation to perform
*/
public OpensearchOperation getOperation() {
return operation;
}
public void setOperation(OpensearchOperation operation) {
this.operation = operation;
}
/**
* The name of the index to act against
*/
public String getIndexName() {
return indexName;
}
public void setIndexName(String indexName) {
this.indexName = indexName;
}
/**
* Comma separated list with ip:port formatted remote transport addresses to use.
*/
public String getHostAddresses() {
return hostAddresses;
}
public void setHostAddresses(String hostAddresses) {
this.hostAddresses = hostAddresses;
}
/**
* Index creation waits for the write consistency number of shards to be available
*/
public int getWaitForActiveShards() {
return waitForActiveShards;
}
public void setWaitForActiveShards(int waitForActiveShards) {
this.waitForActiveShards = waitForActiveShards;
}
public List<HttpHost> getHostAddressesList() {
return hostAddressesList;
}
public void setHostAddressesList(List<HttpHost> hostAddressesList) {
this.hostAddressesList = hostAddressesList;
}
/**
* The timeout in ms to wait before the socket will time out.
*/
public int getSocketTimeout() {
return socketTimeout;
}
public void setSocketTimeout(int socketTimeout) {
this.socketTimeout = socketTimeout;
}
/**
* The time in ms to wait before connection will time out.
*/
public int getConnectionTimeout() {
return connectionTimeout;
}
public void setConnectionTimeout(int connectionTimeout) {
this.connectionTimeout = connectionTimeout;
}
/**
* Basic authenticate user
*/
public String getUser() {
return user;
}
public void setUser(String user) {
this.user = user;
}
/**
* Password for authenticating
*/
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
/**
* Enable SSL
*/
public boolean isEnableSSL() {
return enableSSL;
}
public void setEnableSSL(boolean enableSSL) {
this.enableSSL = enableSSL;
}
/**
* The certificate that can be used to access the ES Cluster. It can be loaded by default from classpath, but you
* can prefix with classpath:, file:, or http: to load the resource from different systems.
*/
public String getCertificatePath() {
return certificatePath;
}
public void setCertificatePath(String certificatePath) {
this.certificatePath = certificatePath;
}
/**
* The time in ms before retry
*/
public int getMaxRetryTimeout() {
return maxRetryTimeout;
}
public void setMaxRetryTimeout(int maxRetryTimeout) {
this.maxRetryTimeout = maxRetryTimeout;
}
/**
* Disconnect after it finish calling the producer
*/
public boolean isDisconnect() {
return disconnect;
}
public void setDisconnect(boolean disconnect) {
this.disconnect = disconnect;
}
/**
* Enable automatically discover nodes from a running OpenSearch cluster. If this option is used in conjunction with
* Spring Boot, then it's managed by the Spring Boot configuration (see: Disable Sniffer in Spring Boot).
*/
public boolean isEnableSniffer() {
return enableSniffer;
}
public void setEnableSniffer(boolean enableSniffer) {
this.enableSniffer = enableSniffer;
}
/**
* The interval between consecutive ordinary sniff executions in milliseconds. Will be honoured when sniffOnFailure
* is disabled or when there are no failures between consecutive sniff executions
*/
public int getSnifferInterval() {
return snifferInterval;
}
public void setSnifferInterval(int snifferInterval) {
this.snifferInterval = snifferInterval;
}
/**
* The delay of a sniff execution scheduled after a failure (in milliseconds)
*/
public int getSniffAfterFailureDelay() {
return sniffAfterFailureDelay;
}
public void setSniffAfterFailureDelay(int sniffAfterFailureDelay) {
this.sniffAfterFailureDelay = sniffAfterFailureDelay;
}
/**
* Enable scroll usage
*/
public boolean isUseScroll() {
return useScroll;
}
public void setUseScroll(boolean useScroll) {
this.useScroll = useScroll;
}
/**
* Time in ms during which OpenSearch will keep search context alive
*/
public int getScrollKeepAliveMs() {
return scrollKeepAliveMs;
}
public void setScrollKeepAliveMs(int scrollKeepAliveMs) {
this.scrollKeepAliveMs = scrollKeepAliveMs;
}
/**
* The | OpensearchConfiguration |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.