language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java | {
"start": 846,
"end": 4294
} | class ____ extends MasterNodeOperationRequestBuilder<RolloverRequest, RolloverResponse, RolloverRequestBuilder> {
public RolloverRequestBuilder(ElasticsearchClient client) {
super(client, RolloverAction.INSTANCE, new RolloverRequest());
}
public RolloverRequestBuilder setRolloverTarget(String rolloverTarget) {
this.request.setRolloverTarget(rolloverTarget);
return this;
}
public RolloverRequestBuilder setNewIndexName(String newIndexName) {
this.request.setNewIndexName(newIndexName);
return this;
}
public RolloverRequestBuilder setConditions(RolloverConditions rolloverConditions) {
this.request.setConditions(rolloverConditions);
return this;
}
public RolloverRequestBuilder setConditions(RolloverConditions.Builder rolloverConditions) {
this.request.setConditions(rolloverConditions.build());
return this;
}
public RolloverRequestBuilder dryRun(boolean dryRun) {
this.request.dryRun(dryRun);
return this;
}
public RolloverRequestBuilder lazy(boolean lazy) {
this.request.lazy(lazy);
return this;
}
public RolloverRequestBuilder settings(Settings settings) {
this.request.getCreateIndexRequest().settings(settings);
return this;
}
public RolloverRequestBuilder alias(Alias alias) {
this.request.getCreateIndexRequest().alias(alias);
return this;
}
public RolloverRequestBuilder simpleMapping(String... source) {
this.request.getCreateIndexRequest().simpleMapping(source);
return this;
}
public RolloverRequestBuilder mapping(String source) {
this.request.getCreateIndexRequest().mapping(source);
return this;
}
/**
* Sets the number of shard copies that should be active for creation of the
* new rollover index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will
* wait for one shard copy (the primary) to become active. Set this value to
* {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active
* before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
* to wait for the desired amount of shard copies to become active before returning.
* Index creation will only wait up until the timeout value for the number of shard copies
* to be active before returning. Check {@link RolloverResponse#isShardsAcknowledged()} to
* determine if the requisite shard copies were all started before returning or timing out.
*
* @param waitForActiveShards number of active shard copies to wait on
*/
public RolloverRequestBuilder waitForActiveShards(ActiveShardCount waitForActiveShards) {
this.request.getCreateIndexRequest().waitForActiveShards(waitForActiveShards);
return this;
}
/**
* A shortcut for {@link #waitForActiveShards(ActiveShardCount)} where the numerical
* shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
* to get the ActiveShardCount.
*/
public RolloverRequestBuilder waitForActiveShards(final int waitForActiveShards) {
return waitForActiveShards(ActiveShardCount.from(waitForActiveShards));
}
}
| RolloverRequestBuilder |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/tests/vertx/GlobalEventExecutorNotificationTest.java | {
"start": 983,
"end": 3564
} | class ____ extends AsyncTestBase {
private Vertx vertx;
@After
public void after() throws Exception {
if (vertx != null) {
CountDownLatch latch = new CountDownLatch(1);
vertx.close().onComplete(v -> latch.countDown());
awaitLatch(latch);
}
}
@Test
public void testConnectError() {
testConnectErrorNotifiesOnEventLoop(new NetClientOptions());
}
@Test
public void testProxyConnectError() {
testConnectErrorNotifiesOnEventLoop(new NetClientOptions()
.setProxyOptions(new ProxyOptions()
.setPort(1234)
.setType(ProxyType.SOCKS5)
.setHost("localhost")));
}
private void testConnectErrorNotifiesOnEventLoop(NetClientOptions options) {
RuntimeException cause = new RuntimeException();
vertx = VertxBootstrap.create().transport(new NioTransport() {
@Override
public ChannelFactory<? extends Channel> channelFactory(boolean domainSocket) {
return (ChannelFactory<Channel>) () -> {
throw cause;
};
}
}).init().vertx();
vertx.createNetServer().connectHandler(so -> {
fail();
}).listen(1234, "localhost").onComplete( onSuccess(v -> {
vertx.createNetClient(options).connect(1234, "localhost").onComplete(onFailure(err -> {
assertSame(err, cause);
testComplete();
}));
}));
await();
}
@Test
public void testNetBindError() {
RuntimeException cause = new RuntimeException();
vertx = VertxBootstrap.create().transport(new NioTransport() {
@Override
public ChannelFactory<? extends ServerChannel> serverChannelFactory(boolean domainSocket) {
return (ChannelFactory<ServerChannel>) () -> {
throw cause;
};
}
}).init().vertx();
vertx.createNetServer()
.connectHandler(so -> fail())
.listen(1234, "localhost").onComplete(onFailure(err -> {
testComplete();
}));
await();
}
@Test
public void testHttpBindError() {
RuntimeException cause = new RuntimeException();
vertx = VertxBootstrap.create().transport(new NioTransport() {
@Override
public ChannelFactory<? extends ServerChannel> serverChannelFactory(boolean domainSocket) {
return (ChannelFactory<ServerChannel>) () -> {
throw cause;
};
}
}).init().vertx();
vertx.createHttpServer()
.requestHandler(req -> fail())
.listen(HttpTestBase.DEFAULT_HTTP_PORT, "localhost").onComplete(onFailure(err -> {
testComplete();
}));
await();
}
}
| GlobalEventExecutorNotificationTest |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/util/AnsiColour.java | {
"start": 775,
"end": 7142
} | enum ____ {
//Color end string, color reset
RESET("\033[0m"),
// Regular Colors. Normal color, no bold, background color etc.
BLACK("\033[0;30m"), // BLACK
RED("\033[0;31m"), // RED
GREEN("\033[0;32m"), // GREEN
YELLOW("\033[0;33m"), // YELLOW
BLUE("\033[0;34m"), // BLUE
MAGENTA("\033[0;35m"), // MAGENTA
CYAN("\033[0;36m"), // CYAN
WHITE("\033[0;37m"), // WHITE
// Bold
BLACK_BOLD("\033[1;30m"), // BLACK
RED_BOLD("\033[1;31m"), // RED
GREEN_BOLD("\033[1;32m"), // GREEN
YELLOW_BOLD("\033[1;33m"), // YELLOW
BLUE_BOLD("\033[1;34m"), // BLUE
MAGENTA_BOLD("\033[1;35m"), // MAGENTA
CYAN_BOLD("\033[1;36m"), // CYAN
WHITE_BOLD("\033[1;37m"), // WHITE
// Underline
BLACK_UNDERLINED("\033[4;30m"), // BLACK
RED_UNDERLINED("\033[4;31m"), // RED
GREEN_UNDERLINED("\033[4;32m"), // GREEN
YELLOW_UNDERLINED("\033[4;33m"), // YELLOW
BLUE_UNDERLINED("\033[4;34m"), // BLUE
MAGENTA_UNDERLINED("\033[4;35m"), // MAGENTA
CYAN_UNDERLINED("\033[4;36m"), // CYAN
WHITE_UNDERLINED("\033[4;37m"), // WHITE
// Background
BLACK_BACKGROUND("\033[40m"), // BLACK
RED_BACKGROUND("\033[41m"), // RED
GREEN_BACKGROUND("\033[42m"), // GREEN
YELLOW_BACKGROUND("\033[43m"), // YELLOW
BLUE_BACKGROUND("\033[44m"), // BLUE
MAGENTA_BACKGROUND("\033[45m"), // MAGENTA
CYAN_BACKGROUND("\033[46m"), // CYAN
WHITE_BACKGROUND("\033[47m"), // WHITE
// High Intensity
BLACK_BRIGHT("\033[0;90m"), // BLACK
RED_BRIGHT("\033[0;91m"), // RED
GREEN_BRIGHT("\033[0;92m"), // GREEN
YELLOW_BRIGHT("\033[0;93m"), // YELLOW
BLUE_BRIGHT("\033[0;94m"), // BLUE
MAGENTA_BRIGHT("\033[0;95m"), // MAGENTA
CYAN_BRIGHT("\033[0;96m"), // CYAN
WHITE_BRIGHT("\033[0;97m"), // WHITE
// Bold High Intensity
BLACK_BOLD_BRIGHT("\033[1;90m"), // BLACK
RED_BOLD_BRIGHT("\033[1;91m"), // RED
GREEN_BOLD_BRIGHT("\033[1;92m"), // GREEN
YELLOW_BOLD_BRIGHT("\033[1;93m"), // YELLOW
BLUE_BOLD_BRIGHT("\033[1;94m"), // BLUE
MAGENTA_BOLD_BRIGHT("\033[1;95m"), // MAGENTA
CYAN_BOLD_BRIGHT("\033[1;96m"), // CYAN
WHITE_BOLD_BRIGHT("\033[1;97m"), // WHITE
// High Intensity backgrounds
BLACK_BACKGROUND_BRIGHT("\033[0;100m"), // BLACK
RED_BACKGROUND_BRIGHT("\033[0;101m"), // RED
GREEN_BACKGROUND_BRIGHT("\033[0;102m"), // GREEN
YELLOW_BACKGROUND_BRIGHT("\033[0;103m"), // YELLOW
BLUE_BACKGROUND_BRIGHT("\033[0;104m"), // BLUE
MAGENTA_BACKGROUND_BRIGHT("\033[0;105m"), // MAGENTA
CYAN_BACKGROUND_BRIGHT("\033[0;106m"), // CYAN
WHITE_BACKGROUND_BRIGHT("\033[0;107m"); // WHITE
private final String code;
AnsiColour(String code) {
this.code = code;
}
/**
* Highlight cyan if supported.
* @param text The text
* @return the string
*/
public static String cyan(String text) {
if (isSupported()) {
return AnsiColour.CYAN + text + AnsiColour.RESET;
} else {
return text;
}
}
/**
* Highlight bright cyan if supported.
* @param text The text
* @return the string
*/
public static String brightCyan(String text) {
if (isSupported()) {
return AnsiColour.CYAN_BRIGHT + text + AnsiColour.RESET;
} else {
return text;
}
}
/**
* Highlight in yellow.
* @param text The text
* @return The formatted string
*/
public static String yellow(@NonNull String text) {
if (isSupported()) {
return AnsiColour.YELLOW + text + AnsiColour.RESET;
} else {
return text;
}
}
/**
* Highlight in bright blue.
* @param text The text
* @return The formatted string
*/
public static String brightBlue(String text) {
if (isSupported()) {
return AnsiColour.BLUE_BRIGHT + text + AnsiColour.RESET;
} else {
return text;
}
}
/**
* Output in magenta bold.
* @param text The text
* @return The formatted text.
*/
public static String magentaBold(String text) {
if (isSupported()) {
return AnsiColour.MAGENTA_BOLD + text + AnsiColour.RESET;
} else {
return text;
}
}
/**
* Output green.
* @param text The text
* @return The formatted text
*/
public static String green(String text) {
if (isSupported()) {
return AnsiColour.GREEN + text + AnsiColour.RESET;
} else {
return text;
}
}
/**
* Output bright yellow.
* @param text The text
* @return The formatted text
*/
public static String brightYellow(String text) {
if (isSupported()) {
return AnsiColour.YELLOW_BRIGHT + text + AnsiColour.RESET;
} else {
return text;
}
}
/**
* Format an object for display.
* @param object The object
* @return The formatted object
*/
public static @NonNull String formatObject(@Nullable Object object) {
if (object instanceof CharSequence charSequence) {
return green("\"" + charSequence + "\"");
} else if (object instanceof Number number) {
return brightBlue(number.toString());
} else if (object == null) {
return brightBlue("null");
} else {
return brightYellow(object.toString());
}
}
/**
* Format blue.
* @param text The text
* @return The formatted text
*/
public static @NonNull String blue(@NonNull String text) {
if (isSupported()) {
return AnsiColour.BLUE + text + AnsiColour.RESET;
} else {
return text;
}
}
@Override
public String toString() {
return code;
}
/**
* Are ANSI colors supported.
* @return True if they are
*/
public static boolean isSupported() {
String os = System.getProperty("os.name").toLowerCase();
return !os.contains("win") || System.console() != null;
}
}
| AnsiColour |
java | apache__kafka | tools/src/main/java/org/apache/kafka/tools/consumer/ConsoleConsumer.java | {
"start": 5721,
"end": 8425
} | class ____ {
final Time time = Time.SYSTEM;
final long timeoutMs;
final Consumer<byte[], byte[]> consumer;
Iterator<ConsumerRecord<byte[], byte[]>> recordIter = Collections.emptyIterator();
public ConsumerWrapper(ConsoleConsumerOptions opts, Consumer<byte[], byte[]> consumer) {
this.consumer = consumer;
timeoutMs = opts.timeoutMs();
Optional<String> topic = opts.topicArg();
if (topic.isPresent()) {
if (opts.partitionArg().isPresent()) {
seek(topic.get(), opts.partitionArg().getAsInt(), opts.offsetArg());
} else {
consumer.subscribe(List.of(topic.get()));
}
} else {
opts.includedTopicsArg().ifPresent(topics -> consumer.subscribe(Pattern.compile(topics)));
}
}
private void seek(String topic, int partitionId, long offset) {
TopicPartition topicPartition = new TopicPartition(topic, partitionId);
consumer.assign(List.of(topicPartition));
if (offset == ListOffsetsRequest.EARLIEST_TIMESTAMP) {
consumer.seekToBeginning(List.of(topicPartition));
} else if (offset == ListOffsetsRequest.LATEST_TIMESTAMP) {
consumer.seekToEnd(List.of(topicPartition));
} else {
consumer.seek(topicPartition, offset);
}
}
void resetUnconsumedOffsets() {
Map<TopicPartition, Long> smallestUnconsumedOffsets = new HashMap<>();
while (recordIter.hasNext()) {
ConsumerRecord<byte[], byte[]> record = recordIter.next();
TopicPartition tp = new TopicPartition(record.topic(), record.partition());
// avoid auto-committing offsets which haven't been consumed
smallestUnconsumedOffsets.putIfAbsent(tp, record.offset());
}
smallestUnconsumedOffsets.forEach(consumer::seek);
}
ConsumerRecord<byte[], byte[]> receive() {
long startTimeMs = time.milliseconds();
while (!recordIter.hasNext()) {
recordIter = consumer.poll(Duration.ofMillis(timeoutMs)).iterator();
if (!recordIter.hasNext() && (time.milliseconds() - startTimeMs > timeoutMs)) {
throw new TimeoutException();
}
}
return recordIter.next();
}
void wakeup() {
this.consumer.wakeup();
}
void cleanup() {
resetUnconsumedOffsets();
this.consumer.close();
}
}
}
| ConsumerWrapper |
java | apache__maven | compat/maven-model/src/main/java/org/apache/maven/model/merge/ModelMerger.java | {
"start": 96821,
"end": 97164
} | class ____ implements KeyComputer<Exclusion> {
@Override
public Object key(Exclusion exclusion) {
return getExclusionKey(exclusion);
}
}
/**
* Return the second value if <code>sourceDominant</code> is true, the first one otherwise.
* @param <T>
*/
private static | ExclusionKeyComputer |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/jaxb/mapping/spi/JaxbManagedType.java | {
"start": 254,
"end": 482
} | interface ____ JAXB bindings representing entities, mapped-superclasses and embeddables (JPA collective
* calls these "managed types" in terms of its Metamodel api).
*
* @author Strong Liu
* @author Steve Ebersole
*/
public | for |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/sequencedmultisetstate/SequencedMultiSetStateTest.java | {
"start": 25803,
"end": 28415
} | class ____ implements RecordEqualiser, HashFunction {
private final int keyPos;
private TestRecordEqualiser(int keyPos) {
this.keyPos = keyPos;
}
@Override
public boolean equals(RowData row1, RowData row2) {
return row1.getRowKind() == row2.getRowKind()
&& row1.getString(keyPos).equals(row2.getString(keyPos));
}
@Override
public int hashCode(Object data) {
RowData rd = (RowData) data;
return Objects.hash(rd.getRowKind(), rd.getString(keyPos));
}
}
private static void assertStateContents(
SequencedMultiSetState<RowData> state, RowData rowData, Long timestamp)
throws Exception {
assertStateContents(state, Tuple2.of(rowData, timestamp));
}
@SafeVarargs
private static void assertStateContents(
SequencedMultiSetState<RowData> state, Tuple2<RowData, Long>... expectedArr)
throws Exception {
List<Tuple2<RowData, Long>> actual = new ArrayList<>();
state.iterator().forEachRemaining(actual::add);
assertEquals(expectedArr.length == 0, state.isEmpty());
assertEquals(expectedArr.length, actual.size());
Assertions.assertArrayEquals(expectedArr, actual.toArray());
}
private static void removeAndAssert(
SequencedMultiSetState<RowData> state,
RowData key,
StateChangeType expectedResultType,
RowData... expectedReturnedRow)
throws Exception {
StateChangeInfo<RowData> ret = state.remove(key);
assertEquals(expectedResultType, ret.getChangeType());
switch (ret.getChangeType()) {
case REMOVAL_NOT_FOUND:
assertEquals(Optional.empty(), ret.getPayload());
break;
case REMOVAL_ALL:
assertEquals(0, ret.getSizeAfter());
assertTrue(state.isEmpty(), "state is expected to be empty");
assertEquals(Optional.of(expectedReturnedRow[0]), ret.getPayload());
break;
case REMOVAL_OTHER:
assertFalse(state.isEmpty(), "state is expected to be non-empty");
assertEquals(Optional.empty(), ret.getPayload());
break;
case REMOVAL_LAST_ADDED:
assertFalse(state.isEmpty(), "state is expected to be non-empty");
assertEquals(Optional.of(expectedReturnedRow[0]), ret.getPayload());
break;
}
}
private static | TestRecordEqualiser |
java | apache__camel | dsl/camel-jbang/camel-jbang-core/src/main/java/org/apache/camel/dsl/jbang/core/commands/process/ListProperties.java | {
"start": 1732,
"end": 8033
} | class ____ implements Iterable<String> {
public PidNameKeyCompletionCandidates() {
}
@Override
public Iterator<String> iterator() {
return List.of("pid", "name", "key").iterator();
}
}
@CommandLine.Parameters(description = "Name or pid of running Camel integration", arity = "0..1")
String name = "*";
@CommandLine.Option(names = { "--sort" }, completionCandidates = PidNameKeyCompletionCandidates.class,
description = "Sort by pid, name or key", defaultValue = "pid")
String sort;
@CommandLine.Option(names = { "--startup" }, description = "List only startup configuration")
boolean startup;
@CommandLine.Option(names = { "--verbose" }, description = "Whether to include more details")
boolean verbose;
@CommandLine.Option(names = { "--internal" }, description = "Whether to include internal configuration")
boolean internal;
@CommandLine.Option(names = { "--mask" },
description = "Whether to mask configuration values to avoid printing sensitive information such as password or access keys",
defaultValue = "true")
boolean mask = true;
public ListProperties(CamelJBangMain main) {
super(main);
}
@Override
public Integer doProcessWatchCall() throws Exception {
List<Row> rows = new ArrayList<>();
List<Long> pids = findPids(name);
ProcessHandle.allProcesses()
.filter(ph -> pids.contains(ph.pid()))
.forEach(ph -> {
JsonObject root = loadStatus(ph.pid());
// there must be a status file for the running Camel integration
if (root != null) {
Row row = new Row();
JsonObject context = (JsonObject) root.get("context");
if (context == null) {
return;
}
row.name = context.getString("name");
if ("CamelJBang".equals(row.name)) {
row.name = ProcessHelper.extractName(root, ph);
}
row.pid = Long.toString(ph.pid());
JsonArray arr;
if (startup) {
JsonObject jv = (JsonObject) root.get("main-configuration");
arr = jv.getCollectionOrDefault("configurations", null);
} else {
JsonObject jv = (JsonObject) root.get("properties");
arr = jv.getCollectionOrDefault("properties", null);
}
for (int i = 0; arr != null && i < arr.size(); i++) {
row = row.copy();
JsonObject jo = (JsonObject) arr.get(i);
row.key = jo.getString("key");
String value = jo.getString("value");
if (mask && SensitiveUtils.containsSensitive(row.key)) {
value = "xxxxxx";
}
row.value = value;
value = jo.getString("originalValue");
if (mask && SensitiveUtils.containsSensitive(row.key)) {
value = "xxxxxx";
}
row.originalValue = value;
row.internalLoc = jo.getBooleanOrDefault("internal", false);
row.source = jo.getString("source");
row.loc = sanitizeLocation(jo.getString("location"));
boolean accept = internal || !row.internalLoc;
if (accept) {
rows.add(row);
}
}
}
});
// sort rows
rows.sort(this::sortRow);
if (!rows.isEmpty()) {
printer().println(AsciiTable.getTable(AsciiTable.NO_BORDERS, rows, Arrays.asList(
new Column().header("PID").headerAlign(HorizontalAlign.CENTER).with(r -> r.pid),
new Column().header("NAME").dataAlign(HorizontalAlign.LEFT).maxWidth(30, OverflowBehaviour.ELLIPSIS_RIGHT)
.with(r -> r.name),
new Column().header("LOCATION").dataAlign(HorizontalAlign.LEFT).maxWidth(80, OverflowBehaviour.NEWLINE)
.with(r -> r.loc),
new Column().header("KEY").dataAlign(HorizontalAlign.LEFT).maxWidth(50, OverflowBehaviour.ELLIPSIS_RIGHT)
.with(r -> r.key),
new Column().header("VALUE").dataAlign(HorizontalAlign.LEFT).maxWidth(80, OverflowBehaviour.NEWLINE)
.with(r -> "" + r.value),
new Column().header("FUNCTION").visible(verbose).dataAlign(HorizontalAlign.LEFT)
.maxWidth(50, OverflowBehaviour.ELLIPSIS_RIGHT)
.with(this::getFunction),
new Column().header("ORIGINAL VALUE").visible(verbose).dataAlign(HorizontalAlign.LEFT)
.maxWidth(80, OverflowBehaviour.NEWLINE)
.with(r -> "" + r.originalValue))));
}
return 0;
}
protected String getFunction(Row r) {
return StringHelper.before(r.source, ":", r.source);
}
protected int sortRow(Row o1, Row o2) {
String s = sort;
int negate = 1;
if (s.startsWith("-")) {
s = s.substring(1);
negate = -1;
}
switch (s) {
case "pid":
return Long.compare(Long.parseLong(o1.pid), Long.parseLong(o2.pid)) * negate;
case "name":
return o1.name.compareToIgnoreCase(o2.name) * negate;
case "key":
return o1.key.compareToIgnoreCase(o2.key) * negate;
default:
return 0;
}
}
private static | PidNameKeyCompletionCandidates |
java | quarkusio__quarkus | extensions/websockets-next/runtime/src/main/java/io/quarkus/websockets/next/runtime/ConnectionManager.java | {
"start": 705,
"end": 4327
} | class ____ implements OpenConnections {
private static final Logger LOG = Logger.getLogger(ConnectionManager.class);
// generatedEndpointClassName -> open connections
private final ConcurrentMap<String, Set<WebSocketConnection>> endpointToConnections = new ConcurrentHashMap<>();
private final List<ConnectionListener> listeners = new CopyOnWriteArrayList<>();
private final Event<WebSocketConnection> openEvent;
private final Event<WebSocketConnection> closedEvent;
ConnectionManager(@Open Event<WebSocketConnection> openEvent, @Closed Event<WebSocketConnection> closedEvent) {
ArcContainer container = Arc.container();
this.openEvent = container.resolveObserverMethods(WebSocketConnection.class, Open.Literal.INSTANCE).isEmpty()
? null
: openEvent;
this.closedEvent = container.resolveObserverMethods(WebSocketConnection.class, Closed.Literal.INSTANCE)
.isEmpty() ? null : closedEvent;
}
@Override
public Iterator<WebSocketConnection> iterator() {
return stream().iterator();
}
@Override
public Stream<WebSocketConnection> stream() {
return endpointToConnections.values().stream().flatMap(Set::stream).filter(WebSocketConnection::isOpen);
}
void add(String endpoint, WebSocketConnection connection) {
LOG.debugf("Add connection: %s", connection);
if (endpointToConnections.computeIfAbsent(endpoint, e -> ConcurrentHashMap.newKeySet()).add(connection)) {
if (openEvent != null) {
openEvent.fireAsync(connection);
}
if (!listeners.isEmpty()) {
for (ConnectionListener listener : listeners) {
try {
listener.connectionAdded(endpoint, connection);
} catch (Exception e) {
LOG.warnf("Unable to call listener#connectionAdded() on [%s]: %s", listener.getClass(),
e.toString());
}
}
}
}
}
void remove(String endpoint, WebSocketConnection connection) {
LOG.debugf("Remove connection: %s", connection);
Set<WebSocketConnection> connections = endpointToConnections.get(endpoint);
if (connections != null) {
if (connections.remove(connection)) {
if (closedEvent != null) {
closedEvent.fireAsync(connection);
}
if (!listeners.isEmpty()) {
for (ConnectionListener listener : listeners) {
try {
listener.connectionRemoved(endpoint, connection.id());
} catch (Exception e) {
LOG.warnf("Unable to call listener#connectionRemoved() on [%s]: %s", listener.getClass(),
e.toString());
}
}
}
}
}
}
/**
*
* @param endpoint
* @return the connections for the given endpoint, never {@code null}
*/
public Set<WebSocketConnection> getConnections(String endpoint) {
Set<WebSocketConnection> ret = endpointToConnections.get(endpoint);
if (ret == null) {
return Set.of();
}
return ret;
}
public void addListener(ConnectionListener listener) {
this.listeners.add(listener);
}
@PreDestroy
void destroy() {
endpointToConnections.clear();
}
public | ConnectionManager |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AbstractRequestInterceptor.java | {
"start": 1708,
"end": 5225
} | class ____ implements
RequestInterceptor {
private Configuration conf;
private AMRMProxyApplicationContext appContext;
private RequestInterceptor nextInterceptor;
/**
* Sets the {@link RequestInterceptor} in the chain.
*/
@Override
public void setNextInterceptor(RequestInterceptor nextInterceptor) {
this.nextInterceptor = nextInterceptor;
}
/**
* Sets the {@link Configuration}.
*/
@Override
public void setConf(Configuration conf) {
this.conf = conf;
if (this.nextInterceptor != null) {
this.nextInterceptor.setConf(conf);
}
}
/**
* Gets the {@link Configuration}.
*/
@Override
public Configuration getConf() {
return this.conf;
}
/**
* Initializes the {@link RequestInterceptor}.
*/
@Override
public void init(AMRMProxyApplicationContext appContext) {
Preconditions.checkState(this.appContext == null,
"init is called multiple times on this interceptor: "
+ this.getClass().getName());
this.appContext = appContext;
if (this.nextInterceptor != null) {
this.nextInterceptor.init(appContext);
}
}
/**
* Recover {@link RequestInterceptor} state from store.
*/
@Override
public void recover(Map<String, byte[]> recoveredDataMap) {
if (this.nextInterceptor != null) {
this.nextInterceptor.recover(recoveredDataMap);
}
}
/**
* Disposes the {@link RequestInterceptor}.
*/
@Override
public void shutdown() {
if (this.nextInterceptor != null) {
this.nextInterceptor.shutdown();
}
}
/**
* Gets the next {@link RequestInterceptor} in the chain.
*/
@Override
public RequestInterceptor getNextInterceptor() {
return this.nextInterceptor;
}
/**
* Gets the {@link AMRMProxyApplicationContext}.
*/
public AMRMProxyApplicationContext getApplicationContext() {
return this.appContext;
}
/**
* Default implementation that invokes the distributed scheduling version
* of the register method.
*
* @param request ApplicationMaster allocate request
* @return Distribtued Scheduler Allocate Response
* @throws YarnException if fails
* @throws IOException if fails
*/
@Override
public DistributedSchedulingAllocateResponse allocateForDistributedScheduling(
DistributedSchedulingAllocateRequest request)
throws YarnException, IOException {
return (this.nextInterceptor != null) ?
this.nextInterceptor.allocateForDistributedScheduling(request) : null;
}
/**
* Default implementation that invokes the distributed scheduling version
* of the allocate method.
*
* @param request ApplicationMaster registration request
* @return Distributed Scheduler Register Response
* @throws YarnException if fails
* @throws IOException if fails
*/
@Override
public RegisterDistributedSchedulingAMResponse
registerApplicationMasterForDistributedScheduling(
RegisterApplicationMasterRequest request)
throws YarnException, IOException {
return (this.nextInterceptor != null) ? this.nextInterceptor
.registerApplicationMasterForDistributedScheduling(request) : null;
}
/**
* A helper method for getting NM state store.
*
* @return the NMSS instance
*/
public NMStateStoreService getNMStateStore() {
if (this.appContext == null || this.appContext.getNMContext() == null) {
return null;
}
return this.appContext.getNMContext().getNMStateStore();
}
}
| AbstractRequestInterceptor |
java | google__dagger | javatests/dagger/hilt/android/testing/HiltAndroidRuleTest.java | {
"start": 1039,
"end": 3069
} | class ____ {}
@Test
@Config(application = HiltTestApplication.class)
public void testMissingHiltAndroidTest_fails() throws Exception {
IllegalStateException exception =
assertThrows(
IllegalStateException.class,
() -> new HiltAndroidRule(new NonHiltTest()));
assertThat(exception)
.hasMessageThat()
.isEqualTo(
"Expected dagger.hilt.android.testing.HiltAndroidRuleTest$NonHiltTest to be "
+ "annotated with @HiltAndroidTest.");
}
@Test
@Config(application = Application.class)
public void testNonHiltTestApplication_fails() throws Exception {
IllegalStateException exception =
assertThrows(
IllegalStateException.class,
() -> new HiltAndroidRule(HiltAndroidRuleTest.this));
assertThat(exception)
.hasMessageThat()
.isEqualTo(
"Hilt test, dagger.hilt.android.testing.HiltAndroidRuleTest, must use a Hilt test "
+ "application but found android.app.Application. To fix, configure the test to "
+ "use HiltTestApplication or a custom Hilt test application generated with "
+ "@CustomTestApplication.");
}
@Test
@Config(application = HiltAndroidRuleTestApp.class)
public void testHiltAndroidApplication_fails() throws Exception {
IllegalStateException exception =
assertThrows(
IllegalStateException.class,
() -> new HiltAndroidRule(HiltAndroidRuleTest.this));
assertThat(exception)
.hasMessageThat()
.isEqualTo(
"Hilt test, dagger.hilt.android.testing.HiltAndroidRuleTest, cannot use a "
+ "@HiltAndroidApp application but found "
+ "dagger.hilt.android.testing.HiltAndroidRuleTestApp. To fix, configure the "
+ "test to use HiltTestApplication or a custom Hilt test application generated "
+ "with @CustomTestApplication.");
}
}
| NonHiltTest |
java | hibernate__hibernate-orm | hibernate-envers/src/main/java/org/hibernate/envers/query/order/NullPrecedence.java | {
"start": 226,
"end": 398
} | enum ____ {
/**
* Null values will be rendered before non-null values.
*/
FIRST,
/**
* Null values will be rendered after non-null values.
*/
LAST
}
| NullPrecedence |
java | apache__camel | components/camel-cxf/camel-cxf-soap/src/main/java/org/apache/camel/component/cxf/jaxws/CxfComponent.java | {
"start": 7307,
"end": 7637
} | class ____ {
//A snapshot of a CxfEndpoint Bean URI
CxfEndpoint cxfEndpoint;
Map<String, Object> parameters;
BeanCacheEntry(CxfEndpoint cxfEndpoint, Map<String, Object> parameters) {
this.cxfEndpoint = cxfEndpoint;
this.parameters = parameters;
}
}
}
| BeanCacheEntry |
java | apache__camel | dsl/camel-jbang/camel-jbang-core/src/main/java/org/apache/camel/dsl/jbang/core/common/ProcessHelper.java | {
"start": 1115,
"end": 1598
} | class ____ {
private static final String[] DSL_EXT = new String[] { "java", "xml", "yaml" };
private static final Pattern PATTERN = Pattern.compile("([\\w|\\-.])+");
private ProcessHelper() {
}
public static String extractName(JsonObject root, ProcessHandle ph) {
String name = doExtractName(root, ph);
return FileUtil.stripPath(name);
}
static String doExtractName(JsonObject root, ProcessHandle ph) {
// favour main | ProcessHelper |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/secondarytable/SecondaryTableQuotingTest.java | {
"start": 1490,
"end": 2055
} | class ____ {
@Test
public void test(SessionFactoryScope scope) {
final EntityPersister entityDescriptor = scope.getSessionFactory().getRuntimeMetamodels().getMappingMetamodel()
.getEntityDescriptor( Foo.class );
final EntityTableMapping secondaryTableMapping = entityDescriptor.getTableMappings()[1];
assertFalse( secondaryTableMapping.isOptional() );
}
@Entity(name = "Foo")
@SecondaryTable(name = "bar", pkJoinColumns = {@PrimaryKeyJoinColumn(name = "foo_id")})
@SecondaryRow(table = "bar", optional = false)
public static | SecondaryTableQuotingTest |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/bean/override/mockito/MockitoBeans.java | {
"start": 1311,
"end": 1364
} | interface ____ {
MockitoBean[] value();
}
| MockitoBeans |
java | apache__maven | its/core-it-support/core-it-javaagent/src/main/java/org/apache/maven/coreits/javaagent/mng5669/Premain.java | {
"start": 3099,
"end": 3676
} | class ____ extends AdviceAdapter {
GetSourceMethodAdvice(int api, MethodVisitor mv, int access, String name, String desc) {
super(api, mv, access, name, desc);
}
@Override
protected void onMethodEnter() {
// System.out.println( options ),
mv.visitFieldInsn(GETSTATIC, "java/lang/System", "out", "Ljava/io/PrintStream;");
mv.visitVarInsn(ALOAD, 1);
mv.visitMethodInsn(INVOKEVIRTUAL, "java/io/PrintStream", "println", "(Ljava/lang/Object;)V", false);
}
}
}
| GetSourceMethodAdvice |
java | apache__logging-log4j2 | log4j-1.2-api/src/main/java/org/apache/log4j/spi/DefaultRepositorySelector.java | {
"start": 841,
"end": 1166
} | class ____ implements RepositorySelector {
final LoggerRepository repository;
public DefaultRepositorySelector(final LoggerRepository repository) {
this.repository = repository;
}
@Override
public LoggerRepository getLoggerRepository() {
return repository;
}
}
| DefaultRepositorySelector |
java | spring-projects__spring-boot | module/spring-boot-health/src/test/java/org/springframework/boot/health/contributor/AbstractHealthIndicatorTests.java | {
"start": 3716,
"end": 4165
} | class ____ extends AbstractHealthIndicator {
private final Consumer<Health.Builder> action;
TestHealthIndicator(String message, Consumer<Health.Builder> action) {
super(message);
this.action = action;
}
TestHealthIndicator(Consumer<Health.Builder> action) {
this.action = action;
}
@Override
protected void doHealthCheck(Health.Builder builder) throws Exception {
this.action.accept(builder);
}
}
}
| TestHealthIndicator |
java | google__jimfs | jimfs/src/main/java/com/google/common/jimfs/PathType.java | {
"start": 1311,
"end": 7255
} | class ____ {
/**
* Returns a Unix-style path type. "/" is both the root and the only separator. Any path starting
* with "/" is considered absolute. The nul character ('\0') is disallowed in paths.
*/
public static PathType unix() {
return UnixPathType.INSTANCE;
}
/**
* Returns a Windows-style path type. The canonical separator character is "\". "/" is also
* treated as a separator when parsing paths.
*
* <p>As much as possible, this implementation follows the information provided in <a
* href="http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx">this
* article</a>. Paths with drive-letter roots (e.g. "C:\") and paths with UNC roots (e.g.
* "\\host\share\") are supported.
*
* <p>Two Windows path features are not currently supported as they are too Windows-specific:
*
* <ul>
* <li>Relative paths containing a drive-letter root, for example "C:" or "C:foo\bar". Such
* paths have a root component and optionally have names, but are <i>relative</i> paths,
* relative to the working directory of the drive identified by the root.
* <li>Absolute paths with no root, for example "\foo\bar". Such paths are absolute paths on the
* current drive.
* </ul>
*/
public static PathType windows() {
return WindowsPathType.INSTANCE;
}
private final boolean allowsMultipleRoots;
private final String separator;
private final String otherSeparators;
private final Joiner joiner;
private final Splitter splitter;
protected PathType(boolean allowsMultipleRoots, char separator, char... otherSeparators) {
this.separator = String.valueOf(separator);
this.allowsMultipleRoots = allowsMultipleRoots;
this.otherSeparators = String.valueOf(otherSeparators);
this.joiner = Joiner.on(separator);
this.splitter = createSplitter(separator, otherSeparators);
}
private static final char[] regexReservedChars = "^$.?+*\\[]{}()".toCharArray();
static {
Arrays.sort(regexReservedChars);
}
private static boolean isRegexReserved(char c) {
return Arrays.binarySearch(regexReservedChars, c) >= 0;
}
private static Splitter createSplitter(char separator, char... otherSeparators) {
if (otherSeparators.length == 0) {
return Splitter.on(separator).omitEmptyStrings();
}
// TODO(cgdecker): When CharMatcher is out of @Beta, us Splitter.on(CharMatcher)
StringBuilder patternBuilder = new StringBuilder();
patternBuilder.append("[");
appendToRegex(separator, patternBuilder);
for (char other : otherSeparators) {
appendToRegex(other, patternBuilder);
}
patternBuilder.append("]");
return Splitter.onPattern(patternBuilder.toString()).omitEmptyStrings();
}
private static void appendToRegex(char separator, StringBuilder patternBuilder) {
if (isRegexReserved(separator)) {
patternBuilder.append("\\");
}
patternBuilder.append(separator);
}
/** Returns whether or not this type of path allows multiple root directories. */
public final boolean allowsMultipleRoots() {
return allowsMultipleRoots;
}
/**
* Returns the canonical separator for this path type. The returned string always has a length of
* one.
*/
public final String getSeparator() {
return separator;
}
/**
* Returns the other separators that are recognized when parsing a path. If no other separators
* are recognized, the empty string is returned.
*/
public final String getOtherSeparators() {
return otherSeparators;
}
/** Returns the path joiner for this path type. */
public final Joiner joiner() {
return joiner;
}
/** Returns the path splitter for this path type. */
public final Splitter splitter() {
return splitter;
}
/** Returns an empty path. */
protected final ParseResult emptyPath() {
return new ParseResult(null, ImmutableList.of(""));
}
/**
* Parses the given strings as a path.
*
* @throws InvalidPathException if the path isn't valid for this path type
*/
public abstract ParseResult parsePath(String path);
@Override
public String toString() {
return getClass().getSimpleName();
}
/** Returns the string form of the given path. */
public abstract String toString(@Nullable String root, Iterable<String> names);
/**
* Returns the string form of the given path for use in the path part of a URI. The root element
* is not nullable as the path must be absolute. The elements of the returned path <i>do not</i>
* need to be escaped. The {@code directory} boolean indicates whether the file the URI is for is
* known to be a directory.
*/
protected abstract String toUriPath(String root, Iterable<String> names, boolean directory);
/**
* Parses a path from the given URI path.
*
* @throws InvalidPathException if the given path isn't valid for this path type
*/
protected abstract ParseResult parseUriPath(String uriPath);
/**
* Creates a URI for the path with the given root and names in the file system with the given URI.
*/
public final URI toUri(
URI fileSystemUri, String root, Iterable<String> names, boolean directory) {
String path = toUriPath(root, names, directory);
try {
// it should not suck this much to create a new URI that's the same except with a path set =(
// need to do it this way for automatic path escaping
return new URI(
fileSystemUri.getScheme(),
fileSystemUri.getUserInfo(),
fileSystemUri.getHost(),
fileSystemUri.getPort(),
path,
null,
null);
} catch (URISyntaxException e) {
throw new AssertionError(e);
}
}
/** Parses a path from the given URI. */
public final ParseResult fromUri(URI uri) {
return parseUriPath(uri.getPath());
}
/** Simple result of parsing a path. */
public static final | PathType |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/EqualsGetClassTest.java | {
"start": 994,
"end": 1436
} | class ____ {
private final CompilationTestHelper helper =
CompilationTestHelper.newInstance(EqualsGetClass.class, getClass());
private final BugCheckerRefactoringTestHelper refactoringHelper =
BugCheckerRefactoringTestHelper.newInstance(EqualsGetClass.class, getClass());
@Test
public void fixes_inline() {
refactoringHelper
.addInputLines(
"Test.java",
"""
| EqualsGetClassTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/DuplicateDateFormatFieldTest.java | {
"start": 2567,
"end": 3012
} | class ____ {
// BUG: Diagnostic contains: uses the field 'm' more than once
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("mm/dd/yyyy hh:mm:ss");
}
""")
.doTest();
}
@Test
public void simpleDateFormat_applyPattern() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import java.text.SimpleDateFormat;
| Test |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/configuration/ScopingTests.java | {
"start": 8908,
"end": 9751
} | class ____ implements org.springframework.beans.factory.config.Scope {
public boolean createNewScope = true;
private Map<String, Object> beans = new HashMap<>();
@Override
public Object get(String name, ObjectFactory<?> objectFactory) {
if (createNewScope) {
beans.clear();
// reset the flag back
createNewScope = false;
}
Object bean = beans.get(name);
// if a new object is requested or none exists under the current
// name, create one
if (bean == null) {
beans.put(name, objectFactory.getObject());
}
return beans.get(name);
}
@Override
public void registerDestructionCallback(String name, Runnable callback) {
throw new IllegalStateException("Not supposed to be called");
}
@Override
public Object remove(String name) {
return beans.remove(name);
}
}
}
| CustomScope |
java | spring-projects__spring-boot | loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java | {
"start": 14947,
"end": 25231
} | class ____ {
private final ByteBuffer buffer = ByteBuffer.allocate(ZipString.BUFFER_SIZE);
private final Source source;
private final FileDataBlock data;
private final long centralDirectoryPos;
private final int[] index;
private int[] nameHashLookups;
private int[] relativeCentralDirectoryOffsetLookups;
private final NameOffsetLookups nameOffsetLookups;
private int cursor;
private Loader(Source source, Entry directoryEntry, FileDataBlock data, long centralDirectoryPos, int maxSize) {
this.source = source;
this.data = data;
this.centralDirectoryPos = centralDirectoryPos;
this.index = new int[maxSize];
this.nameHashLookups = new int[maxSize];
this.relativeCentralDirectoryOffsetLookups = new int[maxSize];
this.nameOffsetLookups = (directoryEntry != null)
? new NameOffsetLookups(directoryEntry.getName().length(), maxSize) : NameOffsetLookups.NONE;
}
private void add(ZipCentralDirectoryFileHeaderRecord centralRecord, long pos, boolean enableNameOffset)
throws IOException {
int nameOffset = this.nameOffsetLookups.enable(this.cursor, enableNameOffset);
int hash = ZipString.hash(this.buffer, this.data,
pos + ZipCentralDirectoryFileHeaderRecord.FILE_NAME_OFFSET + nameOffset,
centralRecord.fileNameLength() - nameOffset, true);
this.nameHashLookups[this.cursor] = hash;
this.relativeCentralDirectoryOffsetLookups[this.cursor] = (int) ((pos - this.centralDirectoryPos));
this.index[this.cursor] = this.cursor;
this.cursor++;
}
private ZipContent finish(Kind kind, long commentPos, long commentLength, boolean hasJarSignatureFile) {
if (this.cursor != this.nameHashLookups.length) {
this.nameHashLookups = Arrays.copyOf(this.nameHashLookups, this.cursor);
this.relativeCentralDirectoryOffsetLookups = Arrays.copyOf(this.relativeCentralDirectoryOffsetLookups,
this.cursor);
}
int size = this.nameHashLookups.length;
sort(0, size - 1);
int[] lookupIndexes = new int[size];
for (int i = 0; i < size; i++) {
lookupIndexes[this.index[i]] = i;
}
return new ZipContent(this.source, kind, this.data, this.centralDirectoryPos, commentPos, commentLength,
lookupIndexes, this.nameHashLookups, this.relativeCentralDirectoryOffsetLookups,
this.nameOffsetLookups, hasJarSignatureFile);
}
private void sort(int left, int right) {
// Quick sort algorithm, uses nameHashCode as the source but sorts all arrays
if (left < right) {
int pivot = this.nameHashLookups[left + (right - left) / 2];
int i = left;
int j = right;
while (i <= j) {
while (this.nameHashLookups[i] < pivot) {
i++;
}
while (this.nameHashLookups[j] > pivot) {
j--;
}
if (i <= j) {
swap(i, j);
i++;
j--;
}
}
if (left < j) {
sort(left, j);
}
if (right > i) {
sort(i, right);
}
}
}
private void swap(int i, int j) {
swap(this.index, i, j);
swap(this.nameHashLookups, i, j);
swap(this.relativeCentralDirectoryOffsetLookups, i, j);
this.nameOffsetLookups.swap(i, j);
}
private static void swap(int[] array, int i, int j) {
int temp = array[i];
array[i] = array[j];
array[j] = temp;
}
static ZipContent load(Source source) throws IOException {
if (!source.isNested()) {
return loadNonNested(source);
}
try (ZipContent zip = open(source.path())) {
Entry entry = zip.getEntry(source.nestedEntryName());
if (entry == null) {
throw new IOException("Nested entry '%s' not found in container zip '%s'"
.formatted(source.nestedEntryName(), source.path()));
}
return (!entry.isDirectory()) ? loadNestedZip(source, entry) : loadNestedDirectory(source, zip, entry);
}
}
private static ZipContent loadNonNested(Source source) throws IOException {
debug.log("Loading non-nested zip '%s'", source.path());
return openAndLoad(source, Kind.ZIP, new FileDataBlock(source.path()));
}
private static ZipContent loadNestedZip(Source source, Entry entry) throws IOException {
if (entry.centralRecord.compressionMethod() != ZipEntry.STORED) {
throw new IOException("Nested entry '%s' in container zip '%s' must not be compressed"
.formatted(source.nestedEntryName(), source.path()));
}
debug.log("Loading nested zip entry '%s' from '%s'", source.nestedEntryName(), source.path());
return openAndLoad(source, Kind.NESTED_ZIP, entry.getContent());
}
private static ZipContent openAndLoad(Source source, Kind kind, FileDataBlock data) throws IOException {
try {
data.open();
return loadContent(source, kind, data);
}
catch (IOException | RuntimeException ex) {
data.close();
throw ex;
}
}
private static ZipContent loadContent(Source source, Kind kind, FileDataBlock data) throws IOException {
ZipEndOfCentralDirectoryRecord.Located locatedEocd = ZipEndOfCentralDirectoryRecord.load(data);
ZipEndOfCentralDirectoryRecord eocd = locatedEocd.endOfCentralDirectoryRecord();
long eocdPos = locatedEocd.pos();
Zip64EndOfCentralDirectoryLocator zip64Locator = Zip64EndOfCentralDirectoryLocator.find(data, eocdPos);
Zip64EndOfCentralDirectoryRecord zip64Eocd = Zip64EndOfCentralDirectoryRecord.load(data, zip64Locator);
data = data.slice(getStartOfZipContent(data, eocd, zip64Eocd));
long centralDirectoryPos = (zip64Eocd != null) ? zip64Eocd.offsetToStartOfCentralDirectory()
: Integer.toUnsignedLong(eocd.offsetToStartOfCentralDirectory());
long numberOfEntries = (zip64Eocd != null) ? zip64Eocd.totalNumberOfCentralDirectoryEntries()
: Short.toUnsignedInt(eocd.totalNumberOfCentralDirectoryEntries());
if (numberOfEntries < 0) {
throw new IllegalStateException("Invalid number of zip entries in " + source);
}
if (numberOfEntries > Integer.MAX_VALUE) {
throw new IllegalStateException("Too many zip entries in " + source);
}
Loader loader = new Loader(source, null, data, centralDirectoryPos, (int) numberOfEntries);
ByteBuffer signatureNameSuffixBuffer = ByteBuffer.allocate(SIGNATURE_SUFFIX.length);
boolean hasJarSignatureFile = false;
long pos = centralDirectoryPos;
for (int i = 0; i < numberOfEntries; i++) {
ZipCentralDirectoryFileHeaderRecord centralRecord = ZipCentralDirectoryFileHeaderRecord.load(data, pos);
if (!hasJarSignatureFile) {
long filenamePos = pos + ZipCentralDirectoryFileHeaderRecord.FILE_NAME_OFFSET;
if (centralRecord.fileNameLength() > SIGNATURE_SUFFIX.length && ZipString.startsWith(loader.buffer,
data, filenamePos, centralRecord.fileNameLength(), META_INF) >= 0) {
signatureNameSuffixBuffer.clear();
data.readFully(signatureNameSuffixBuffer,
filenamePos + centralRecord.fileNameLength() - SIGNATURE_SUFFIX.length);
hasJarSignatureFile = Arrays.equals(SIGNATURE_SUFFIX, signatureNameSuffixBuffer.array());
}
}
loader.add(centralRecord, pos, false);
pos += centralRecord.size();
}
long commentPos = locatedEocd.pos() + ZipEndOfCentralDirectoryRecord.COMMENT_OFFSET;
return loader.finish(kind, commentPos, eocd.commentLength(), hasJarSignatureFile);
}
/**
* Returns the location in the data that the archive actually starts. For most
* files the archive data will start at 0, however, it is possible to have
* prefixed bytes (often used for startup scripts) at the beginning of the data.
* @param data the source data
* @param eocd the end of central directory record
* @param zip64Eocd the zip64 end of central directory record or {@code null}
* @return the offset within the data where the archive begins
* @throws IOException on I/O error
*/
private static long getStartOfZipContent(FileDataBlock data, ZipEndOfCentralDirectoryRecord eocd,
Zip64EndOfCentralDirectoryRecord zip64Eocd) throws IOException {
long specifiedOffsetToStartOfCentralDirectory = (zip64Eocd != null)
? zip64Eocd.offsetToStartOfCentralDirectory()
: Integer.toUnsignedLong(eocd.offsetToStartOfCentralDirectory());
long sizeOfCentralDirectoryAndEndRecords = getSizeOfCentralDirectoryAndEndRecords(eocd, zip64Eocd);
long actualOffsetToStartOfCentralDirectory = data.size() - sizeOfCentralDirectoryAndEndRecords;
return actualOffsetToStartOfCentralDirectory - specifiedOffsetToStartOfCentralDirectory;
}
private static long getSizeOfCentralDirectoryAndEndRecords(ZipEndOfCentralDirectoryRecord eocd,
Zip64EndOfCentralDirectoryRecord zip64Eocd) {
long result = 0;
result += eocd.size();
if (zip64Eocd != null) {
result += Zip64EndOfCentralDirectoryLocator.SIZE;
result += zip64Eocd.size();
}
result += (zip64Eocd != null) ? zip64Eocd.sizeOfCentralDirectory()
: Integer.toUnsignedLong(eocd.sizeOfCentralDirectory());
return result;
}
private static ZipContent loadNestedDirectory(Source source, ZipContent zip, Entry directoryEntry)
throws IOException {
debug.log("Loading nested directory entry '%s' from '%s'", source.nestedEntryName(), source.path());
if (!source.nestedEntryName().endsWith("/")) {
throw new IllegalArgumentException("Nested entry name must end with '/'");
}
String directoryName = directoryEntry.getName();
zip.data.open();
try {
Loader loader = new Loader(source, directoryEntry, zip.data, zip.centralDirectoryPos, zip.size());
for (int cursor = 0; cursor < zip.size(); cursor++) {
int index = zip.lookupIndexes[cursor];
if (index != directoryEntry.getLookupIndex()) {
long pos = zip.getCentralDirectoryFileHeaderRecordPos(index);
ZipCentralDirectoryFileHeaderRecord centralRecord = ZipCentralDirectoryFileHeaderRecord
.load(zip.data, pos);
long namePos = pos + ZipCentralDirectoryFileHeaderRecord.FILE_NAME_OFFSET;
short nameLen = centralRecord.fileNameLength();
if (ZipString.startsWith(loader.buffer, zip.data, namePos, nameLen, directoryName) != -1) {
loader.add(centralRecord, pos, true);
}
}
}
return loader.finish(Kind.NESTED_DIRECTORY, zip.commentPos, zip.commentLength, zip.hasJarSignatureFile);
}
catch (IOException | RuntimeException ex) {
zip.data.close();
throw ex;
}
}
}
/**
* A single zip content entry.
*/
public | Loader |
java | grpc__grpc-java | benchmarks/src/generated/main/grpc/io/grpc/benchmarks/proto/WorkerServiceGrpc.java | {
"start": 21739,
"end": 25142
} | class ____<Req, Resp> implements
io.grpc.stub.ServerCalls.UnaryMethod<Req, Resp>,
io.grpc.stub.ServerCalls.ServerStreamingMethod<Req, Resp>,
io.grpc.stub.ServerCalls.ClientStreamingMethod<Req, Resp>,
io.grpc.stub.ServerCalls.BidiStreamingMethod<Req, Resp> {
private final AsyncService serviceImpl;
private final int methodId;
MethodHandlers(AsyncService serviceImpl, int methodId) {
this.serviceImpl = serviceImpl;
this.methodId = methodId;
}
@java.lang.Override
@java.lang.SuppressWarnings("unchecked")
public void invoke(Req request, io.grpc.stub.StreamObserver<Resp> responseObserver) {
switch (methodId) {
case METHODID_CORE_COUNT:
serviceImpl.coreCount((io.grpc.benchmarks.proto.Control.CoreRequest) request,
(io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Control.CoreResponse>) responseObserver);
break;
case METHODID_QUIT_WORKER:
serviceImpl.quitWorker((io.grpc.benchmarks.proto.Control.Void) request,
(io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Control.Void>) responseObserver);
break;
default:
throw new AssertionError();
}
}
@java.lang.Override
@java.lang.SuppressWarnings("unchecked")
public io.grpc.stub.StreamObserver<Req> invoke(
io.grpc.stub.StreamObserver<Resp> responseObserver) {
switch (methodId) {
case METHODID_RUN_SERVER:
return (io.grpc.stub.StreamObserver<Req>) serviceImpl.runServer(
(io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Control.ServerStatus>) responseObserver);
case METHODID_RUN_CLIENT:
return (io.grpc.stub.StreamObserver<Req>) serviceImpl.runClient(
(io.grpc.stub.StreamObserver<io.grpc.benchmarks.proto.Control.ClientStatus>) responseObserver);
default:
throw new AssertionError();
}
}
}
public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) {
return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor())
.addMethod(
getRunServerMethod(),
io.grpc.stub.ServerCalls.asyncBidiStreamingCall(
new MethodHandlers<
io.grpc.benchmarks.proto.Control.ServerArgs,
io.grpc.benchmarks.proto.Control.ServerStatus>(
service, METHODID_RUN_SERVER)))
.addMethod(
getRunClientMethod(),
io.grpc.stub.ServerCalls.asyncBidiStreamingCall(
new MethodHandlers<
io.grpc.benchmarks.proto.Control.ClientArgs,
io.grpc.benchmarks.proto.Control.ClientStatus>(
service, METHODID_RUN_CLIENT)))
.addMethod(
getCoreCountMethod(),
io.grpc.stub.ServerCalls.asyncUnaryCall(
new MethodHandlers<
io.grpc.benchmarks.proto.Control.CoreRequest,
io.grpc.benchmarks.proto.Control.CoreResponse>(
service, METHODID_CORE_COUNT)))
.addMethod(
getQuitWorkerMethod(),
io.grpc.stub.ServerCalls.asyncUnaryCall(
new MethodHandlers<
io.grpc.benchmarks.proto.Control.Void,
io.grpc.benchmarks.proto.Control.Void>(
service, METHODID_QUIT_WORKER)))
.build();
}
private static abstract | MethodHandlers |
java | apache__camel | components/camel-mina/src/test/java/org/apache/camel/component/mina/MinaNoResponseFromServerTest.java | {
"start": 2806,
"end": 4002
} | class ____ implements ProtocolCodecFactory {
@Override
public ProtocolEncoder getEncoder(IoSession session) {
return new ProtocolEncoder() {
public void encode(IoSession ioSession, Object message, ProtocolEncoderOutput out) {
// close session instead of returning a reply
ioSession.closeNow();
}
public void dispose(IoSession ioSession) {
// do nothing
}
};
}
@Override
public ProtocolDecoder getDecoder(IoSession session) {
return new ProtocolDecoder() {
public void decode(IoSession ioSession, IoBuffer in, ProtocolDecoderOutput out) {
// close session instead of returning a reply
ioSession.closeNow();
}
public void finishDecode(IoSession ioSession, ProtocolDecoderOutput protocolDecoderOutput) {
// do nothing
}
public void dispose(IoSession ioSession) {
// do nothing
}
};
}
}
}
| MyCodec |
java | apache__dubbo | dubbo-metadata/dubbo-metadata-api/src/main/java/org/apache/dubbo/metadata/MappingCacheManager.java | {
"start": 1720,
"end": 3348
} | class ____ extends AbstractCacheManager<Set<String>> {
private static final String DEFAULT_FILE_NAME = ".mapping";
private static final int DEFAULT_ENTRY_SIZE = 10000;
public static MappingCacheManager getInstance(ScopeModel scopeModel) {
return scopeModel.getBeanFactory().getOrRegisterBean(MappingCacheManager.class);
}
public MappingCacheManager(boolean enableFileCache, String name, ScheduledExecutorService executorService) {
String filePath = SystemPropertyConfigUtils.getSystemProperty(DUBBO_MAPPING_CACHE_FILEPATH);
String fileName = SystemPropertyConfigUtils.getSystemProperty(DUBBO_MAPPING_CACHE_FILENAME);
if (StringUtils.isEmpty(fileName)) {
fileName = DEFAULT_FILE_NAME;
}
if (StringUtils.isNotEmpty(name)) {
fileName = fileName + "." + name;
}
String rawEntrySize = SystemPropertyConfigUtils.getSystemProperty(DUBBO_MAPPING_CACHE_ENTRYSIZE);
int entrySize = StringUtils.parseInteger(rawEntrySize);
entrySize = (entrySize == 0 ? DEFAULT_ENTRY_SIZE : entrySize);
String rawMaxFileSize = SystemPropertyConfigUtils.getSystemProperty(DUBBO_MAPPING_CACHE_MAXFILESIZE);
long maxFileSize = StringUtils.parseLong(rawMaxFileSize);
init(enableFileCache, filePath, fileName, entrySize, maxFileSize, 50, executorService);
}
@Override
protected Set<String> toValueType(String value) {
return new HashSet<>(JsonUtils.toJavaList(value, String.class));
}
@Override
protected String getName() {
return "mapping";
}
}
| MappingCacheManager |
java | apache__camel | test-infra/camel-test-infra-elasticsearch/src/main/java/org/apache/camel/test/infra/elasticsearch/services/ElasticSearchInfraService.java | {
"start": 1009,
"end": 1399
} | interface ____ extends InfrastructureService {
int getPort();
String getElasticSearchHost();
default String getHttpHostAddress() {
return String.format("%s:%d", getElasticSearchHost(), getPort());
}
Optional<String> getCertificatePath();
Optional<SSLContext> getSslContext();
String getUsername();
String getPassword();
}
| ElasticSearchInfraService |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/filter/CurrentValueDeser4184Test.java | {
"start": 610,
"end": 933
} | enum ____ {
ADMIN(1),
USER(2);
final int value;
UserType(int value) {
this.value = value;
}
public Integer getValue() {
return this.value;
}
public String getName() {
return this.name();
}
}
static | UserType |
java | spring-projects__spring-framework | spring-jdbc/src/main/java/org/springframework/jdbc/core/simple/JdbcClient.java | {
"start": 2805,
"end": 5276
} | interface ____ {
/**
* The starting point for any JDBC operation: a custom SQL String.
* @param sql the SQL query or update statement as a String
* @return a chained statement specification
*/
StatementSpec sql(String sql);
// Static factory methods
/**
* Create a {@code JdbcClient} for the given {@link DataSource}.
* @param dataSource the DataSource to obtain connections from
*/
static JdbcClient create(DataSource dataSource) {
return new DefaultJdbcClient(dataSource);
}
/**
* Create a {@code JdbcClient} for the given {@link JdbcOperations} delegate,
* typically an {@link org.springframework.jdbc.core.JdbcTemplate}.
* <p>Use this factory method to reuse existing {@code JdbcTemplate} configuration,
* including its {@code DataSource}.
* @param jdbcTemplate the delegate to perform operations on
*/
static JdbcClient create(JdbcOperations jdbcTemplate) {
return new DefaultJdbcClient(jdbcTemplate);
}
/**
* Create a {@code JdbcClient} for the given {@link NamedParameterJdbcOperations} delegate,
* typically an {@link org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate}.
* <p>Use this factory method to reuse existing {@code NamedParameterJdbcTemplate}
* configuration, including its underlying {@code JdbcTemplate} and {@code DataSource}.
* @param jdbcTemplate the delegate to perform operations on
*/
static JdbcClient create(NamedParameterJdbcOperations jdbcTemplate) {
return new DefaultJdbcClient(jdbcTemplate, null);
}
/**
* Create a {@code JdbcClient} for the given {@link NamedParameterJdbcOperations} delegate,
* typically an {@link org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate}.
* <p>Use this factory method to reuse existing {@code NamedParameterJdbcTemplate}
* configuration, including its underlying {@code JdbcTemplate} and {@code DataSource},
* along with a custom {@link ConversionService} for queries with mapped classes.
* @param jdbcTemplate the delegate to perform operations on
* @param conversionService a {@link ConversionService} for converting fetched JDBC values
* to mapped classes in {@link StatementSpec#query(Class)}
* @since 7.0
*/
static JdbcClient create(NamedParameterJdbcOperations jdbcTemplate, ConversionService conversionService) {
return new DefaultJdbcClient(jdbcTemplate, conversionService);
}
/**
* A statement specification for parameter bindings and query/update execution.
*/
| JdbcClient |
java | netty__netty | codec-http/src/main/java/io/netty/handler/codec/http/HttpClientCodec.java | {
"start": 12687,
"end": 18053
} | class ____ extends HttpResponseDecoder {
Decoder(HttpDecoderConfig config) {
super(config);
}
@Override
protected void decode(
ChannelHandlerContext ctx, ByteBuf buffer, List<Object> out) throws Exception {
if (done) {
int readable = actualReadableBytes();
if (readable == 0) {
// if non is readable just return null
// https://github.com/netty/netty/issues/1159
return;
}
out.add(buffer.readBytes(readable));
} else {
int oldSize = out.size();
super.decode(ctx, buffer, out);
if (failOnMissingResponse) {
int size = out.size();
for (int i = oldSize; i < size; i++) {
decrement(out.get(i));
}
}
}
}
private void decrement(Object msg) {
if (msg == null) {
return;
}
// check if it's an Header and its transfer encoding is not chunked.
if (msg instanceof LastHttpContent) {
requestResponseCounter.decrementAndGet();
}
}
@Override
protected boolean isContentAlwaysEmpty(HttpMessage msg) {
// Get the method of the HTTP request that corresponds to the
// current response.
//
// Even if we do not use the method to compare we still need to poll it to ensure we keep
// request / response pairs in sync.
HttpMethod method = queue.poll();
final HttpResponseStatus status = ((HttpResponse) msg).status();
final HttpStatusClass statusClass = status.codeClass();
final int statusCode = status.code();
if (statusClass == HttpStatusClass.INFORMATIONAL) {
// An informational response should be excluded from paired comparison.
// Just delegate to super method which has all the needed handling.
return super.isContentAlwaysEmpty(msg);
}
// If the remote peer did for example send multiple responses for one request (which is not allowed per
// spec but may still be possible) method will be null so guard against it.
if (method != null) {
char firstChar = method.name().charAt(0);
switch (firstChar) {
case 'H':
// According to 4.3, RFC2616:
// All responses to the HEAD request method MUST NOT include a
// message-body, even though the presence of entity-header fields
// might lead one to believe they do.
if (HttpMethod.HEAD.equals(method)) {
return true;
// The following code was inserted to work around the servers
// that behave incorrectly. It has been commented out
// because it does not work with well behaving servers.
// Please note, even if the 'Transfer-Encoding: chunked'
// header exists in the HEAD response, the response should
// have absolutely no content.
//
//// Interesting edge case:
//// Some poorly implemented servers will send a zero-byte
//// chunk if Transfer-Encoding of the response is 'chunked'.
////
//// return !msg.isChunked();
}
break;
case 'C':
// Successful CONNECT request results in a response with empty body.
if (statusCode == 200) {
if (HttpMethod.CONNECT.equals(method)) {
// Proxy connection established - Parse HTTP only if configured by
// parseHttpAfterConnectRequest, else pass through.
if (!parseHttpAfterConnectRequest) {
done = true;
queue.clear();
}
return true;
}
}
break;
default:
break;
}
}
return super.isContentAlwaysEmpty(msg);
}
@Override
public void channelInactive(ChannelHandlerContext ctx)
throws Exception {
super.channelInactive(ctx);
if (failOnMissingResponse) {
long missingResponses = requestResponseCounter.get();
if (missingResponses > 0) {
ctx.fireExceptionCaught(new PrematureChannelClosureException(
"channel gone inactive with " + missingResponses +
" missing response(s)"));
}
}
}
}
}
| Decoder |
java | quarkusio__quarkus | extensions/flyway/deployment/src/test/java/io/quarkus/flyway/test/FlywayExtensionRepairAtStartTest.java | {
"start": 2913,
"end": 3150
} | class ____ {
@Inject
Flyway flyway;
@Path("current-version")
@GET
public String currentVersion() {
return flyway.info().current().getVersion().toString();
}
}
}
| FlywayResource |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/orm/junit/FailureExpectedGroup.java | {
"start": 658,
"end": 721
} | interface ____ {
FailureExpected[] value();
}
| FailureExpectedGroup |
java | apache__hadoop | hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/emulators/resourceusage/TotalHeapUsageEmulatorPlugin.java | {
"start": 2148,
"end": 4486
} | class ____
implements ResourceUsageEmulatorPlugin {
// Configuration parameters
// the core engine to emulate heap usage
protected HeapUsageEmulatorCore emulatorCore;
// the progress bar
private Progressive progress;
// decides if this plugin can emulate heap usage or not
private boolean enabled = true;
// the progress boundaries/interval where emulation should be done
private float emulationInterval;
// target heap usage to emulate
private long targetHeapUsageInMB = 0;
/**
* The frequency (based on task progress) with which memory-emulation code is
* run. If the value is set to 0.1 then the emulation will happen at 10% of
* the task's progress. The default value of this parameter is
* {@link #DEFAULT_EMULATION_PROGRESS_INTERVAL}.
*/
public static final String HEAP_EMULATION_PROGRESS_INTERVAL =
"gridmix.emulators.resource-usage.heap.emulation-interval";
// Default value for emulation interval
private static final float DEFAULT_EMULATION_PROGRESS_INTERVAL = 0.1F; // 10 %
private float prevEmulationProgress = 0F;
/**
* The minimum buffer reserved for other non-emulation activities.
*/
public static final String MIN_HEAP_FREE_RATIO =
"gridmix.emulators.resource-usage.heap.min-free-ratio";
private float minFreeHeapRatio;
private static final float DEFAULT_MIN_FREE_HEAP_RATIO = 0.3F;
/**
* Determines the unit increase per call to the core engine's load API. This
* is expressed as a percentage of the difference between the expected total
* heap usage and the current usage.
*/
public static final String HEAP_LOAD_RATIO =
"gridmix.emulators.resource-usage.heap.load-ratio";
private float heapLoadRatio;
private static final float DEFAULT_HEAP_LOAD_RATIO = 0.1F;
public static final int ONE_MB = 1024 * 1024;
/**
* Defines the core heap usage emulation algorithm. This engine is expected
* to perform certain memory intensive operations to consume some
* amount of heap. {@link #load(long)} should load the current heap and
* increase the heap usage by the specified value. This core engine can be
* initialized using the {@link #initialize(ResourceCalculatorPlugin, long)}
* API to suit the underlying hardware better.
*/
public | TotalHeapUsageEmulatorPlugin |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginType.java | {
"start": 1434,
"end": 2204
} | enum ____ {
SOURCE(SourceConnector.class),
SINK(SinkConnector.class),
CONVERTER(Converter.class),
HEADER_CONVERTER(HeaderConverter.class),
TRANSFORMATION(Transformation.class),
PREDICATE(Predicate.class),
CONFIGPROVIDER(ConfigProvider.class),
REST_EXTENSION(ConnectRestExtension.class),
CONNECTOR_CLIENT_CONFIG_OVERRIDE_POLICY(ConnectorClientConfigOverridePolicy.class);
private final Class<?> klass;
PluginType(Class<?> klass) {
this.klass = klass;
}
public String simpleName() {
return klass.getSimpleName();
}
public Class<?> superClass() {
return klass;
}
@Override
public String toString() {
return super.toString().toLowerCase(Locale.ROOT);
}
}
| PluginType |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/deploying/DeployingTasksInStreamingJobBenchmarkTest.java | {
"start": 1151,
"end": 1508
} | class ____ {
@Test
void deployAllTasks() throws Exception {
DeployingTasksInStreamingJobBenchmark benchmark =
new DeployingTasksInStreamingJobBenchmark();
benchmark.setup(JobConfiguration.STREAMING_TEST);
benchmark.deployAllTasks();
benchmark.teardown();
}
}
| DeployingTasksInStreamingJobBenchmarkTest |
java | quarkusio__quarkus | extensions/security/deployment/src/test/java/io/quarkus/security/test/permissionsallowed/MethodLevelCustomPermissionsAllowedTest.java | {
"start": 6046,
"end": 7748
} | class ____ implements PermissionsAllowedNameOnlyBeanI {
@PermissionsAllowed(value = WRITE_PERMISSION, permission = CustomPermission.class)
public final String write() {
return WRITE_PERMISSION;
}
@PermissionsAllowed(value = READ_PERMISSION, permission = CustomPermission.class)
public final String read() {
return READ_PERMISSION;
}
@PermissionsAllowed(value = WRITE_PERMISSION, permission = CustomPermission.class)
public final Uni<String> writeNonBlocking() {
return Uni.createFrom().item(WRITE_PERMISSION);
}
@PermissionsAllowed(value = READ_PERMISSION, permission = CustomPermission.class)
public final Uni<String> readNonBlocking() {
return Uni.createFrom().item(READ_PERMISSION);
}
@PermissionsAllowed(value = "prohibited", permission = CustomPermission.class)
public final void prohibited() {
}
@PermissionsAllowed(value = "prohibited", permission = CustomPermission.class)
public final Uni<Void> prohibitedNonBlocking() {
return Uni.createFrom().nullItem();
}
@PermissionsAllowed(value = { "one", "two", "three", READ_PERMISSION }, permission = CustomPermission.class)
public final String multiple() {
return MULTIPLE_PERMISSION;
}
@PermissionsAllowed(value = { "one", "two", "three", READ_PERMISSION }, permission = CustomPermission.class)
public final Uni<String> multipleNonBlocking() {
return Uni.createFrom().item(MULTIPLE_PERMISSION);
}
}
@Singleton
public static | PermissionsAllowedNameOnlyBean |
java | spring-projects__spring-security | test/src/main/java/org/springframework/security/test/web/servlet/request/SecurityMockMvcRequestBuilders.java | {
"start": 3135,
"end": 5193
} | class ____ implements RequestBuilder, Mergeable {
private String logoutUrl = "/logout";
private RequestPostProcessor postProcessor = csrf();
private @Nullable Mergeable parent;
private LogoutRequestBuilder() {
}
@Override
public MockHttpServletRequest buildRequest(ServletContext servletContext) {
MockHttpServletRequestBuilder logoutRequest = post(this.logoutUrl).accept(MediaType.TEXT_HTML,
MediaType.ALL);
if (this.parent != null) {
logoutRequest = (MockHttpServletRequestBuilder) logoutRequest.merge(this.parent);
}
MockHttpServletRequest request = logoutRequest.buildRequest(servletContext);
logoutRequest.postProcessRequest(request);
return this.postProcessor.postProcessRequest(request);
}
/**
* Specifies the logout URL to POST to. Defaults to "/logout".
* @param logoutUrl the logout URL to POST to. Defaults to "/logout".
* @return the {@link LogoutRequestBuilder} for additional customizations
*/
public LogoutRequestBuilder logoutUrl(String logoutUrl) {
this.logoutUrl = logoutUrl;
return this;
}
/**
* Specifies the logout URL to POST to.
* @param logoutUrl the logout URL to POST to.
* @param uriVars the URI variables
* @return the {@link LogoutRequestBuilder} for additional customizations
*/
public LogoutRequestBuilder logoutUrl(String logoutUrl, Object... uriVars) {
this.logoutUrl = UriComponentsBuilder.fromPath(logoutUrl).buildAndExpand(uriVars).encode().toString();
return this;
}
@Override
public boolean isMergeEnabled() {
return true;
}
@Override
public Object merge(@Nullable Object parent) {
if (parent == null) {
return this;
}
if (parent instanceof Mergeable) {
this.parent = (Mergeable) parent;
return this;
}
throw new IllegalArgumentException("Cannot merge with [" + parent.getClass().getName() + "]");
}
}
/**
* Creates a form based login request including any necessary {@link CsrfToken}.
*
* @author Rob Winch
* @since 4.0
*/
public static final | LogoutRequestBuilder |
java | quarkusio__quarkus | independent-projects/tools/registry-client/src/main/java/io/quarkus/registry/config/ConfigSource.java | {
"start": 72,
"end": 1451
} | interface ____ {
ConfigSource DEFAULT = new ConfigSource() {
@Override
public Path getFilePath() {
return null;
}
@Override
public String describe() {
return "default configuration";
}
};
ConfigSource MANUAL = new ConfigSource() {
@Override
public Path getFilePath() {
return null;
}
@Override
public String describe() {
return "manually configured (programmatic)";
}
};
ConfigSource ENV = new ConfigSource() {
@Override
public Path getFilePath() {
return null;
}
@Override
public String describe() {
return String.format(
"environment variables: registries defined in %s, with supporting configuration in variables prefixed with %s",
RegistriesConfigLocator.QUARKUS_REGISTRIES,
RegistriesConfigLocator.QUARKUS_REGISTRY_ENV_VAR_PREFIX);
}
};
/**
* @return Path to source file, or null if config is not file-based.
*/
Path getFilePath();
/**
* Describe the source of this registry configuration for use with info
* and error messages.
*
* @return String describing configuration source
*/
String describe();
| ConfigSource |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/runtime/io/benchmark/ReceiverThread.java | {
"start": 1101,
"end": 1329
} | class ____ for {@code expectedRepetitionsOfExpectedRecord} number of occurrences of the
* {@code expectedRecord}. {@code expectedRepetitionsOfExpectedRecord} is correlated with number of
* input channels.
*/
public abstract | waits |
java | spring-projects__spring-security | cas/src/main/java/org/springframework/security/cas/authentication/CasAuthenticationToken.java | {
"start": 1254,
"end": 6549
} | class ____ extends AbstractAuthenticationToken implements Serializable {
private static final long serialVersionUID = 620L;
private final Object credentials;
private final Object principal;
private final UserDetails userDetails;
private final int keyHash;
private final Assertion assertion;
/**
* Constructor.
* @param key to identify if this object made by a given
* {@link CasAuthenticationProvider}
* @param principal typically the UserDetails object (cannot be <code>null</code>)
* @param credentials the service/proxy ticket ID from CAS (cannot be
* <code>null</code>)
* @param authorities the authorities granted to the user (from the
* {@link org.springframework.security.core.userdetails.UserDetailsService}) (cannot
* be <code>null</code>)
* @param userDetails the user details (from the
* {@link org.springframework.security.core.userdetails.UserDetailsService}) (cannot
* be <code>null</code>)
* @param assertion the assertion returned from the CAS servers. It contains the
* principal and how to obtain a proxy ticket for the user.
* @throws IllegalArgumentException if a <code>null</code> was passed
*/
public CasAuthenticationToken(final String key, final Object principal, final Object credentials,
final Collection<? extends GrantedAuthority> authorities, final UserDetails userDetails,
final Assertion assertion) {
this(extractKeyHash(key), principal, credentials, authorities, userDetails, assertion);
}
/**
* Private constructor for Jackson Deserialization support
* @param keyHash hashCode of provided key to identify if this object made by a given
* {@link CasAuthenticationProvider}
* @param principal typically the UserDetails object (cannot be <code>null</code>)
* @param credentials the service/proxy ticket ID from CAS (cannot be
* <code>null</code>)
* @param authorities the authorities granted to the user (from the
* {@link org.springframework.security.core.userdetails.UserDetailsService}) (cannot
* be <code>null</code>)
* @param userDetails the user details (from the
* {@link org.springframework.security.core.userdetails.UserDetailsService}) (cannot
* be <code>null</code>)
* @param assertion the assertion returned from the CAS servers. It contains the
* principal and how to obtain a proxy ticket for the user.
* @throws IllegalArgumentException if a <code>null</code> was passed
* @since 4.2
*/
private CasAuthenticationToken(final Integer keyHash, final Object principal, final Object credentials,
final Collection<? extends GrantedAuthority> authorities, final UserDetails userDetails,
final Assertion assertion) {
super(authorities);
if ((principal == null) || "".equals(principal) || (credentials == null) || "".equals(credentials)
|| (authorities == null) || (userDetails == null) || (assertion == null)) {
throw new IllegalArgumentException("Cannot pass null or empty values to constructor");
}
this.keyHash = keyHash;
this.principal = principal;
this.credentials = credentials;
this.userDetails = userDetails;
this.assertion = assertion;
setAuthenticated(true);
}
protected CasAuthenticationToken(Builder<?> builder) {
super(builder);
Assert.isTrue(!"".equals(builder.principal), "principal cannot be null or empty");
Assert.notNull(!"".equals(builder.credentials), "credentials cannot be null or empty");
Assert.notNull(builder.userDetails, "userDetails cannot be null");
Assert.notNull(builder.assertion, "assertion cannot be null");
this.keyHash = builder.keyHash;
this.principal = builder.principal;
this.credentials = builder.credentials;
this.userDetails = builder.userDetails;
this.assertion = builder.assertion;
}
private static Integer extractKeyHash(String key) {
Assert.hasLength(key, "key cannot be null or empty");
return key.hashCode();
}
@Override
public boolean equals(final Object obj) {
if (!super.equals(obj)) {
return false;
}
if (obj instanceof CasAuthenticationToken test) {
return this.assertion.equals(test.getAssertion()) && this.getKeyHash() == test.getKeyHash();
}
return false;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + this.credentials.hashCode();
result = 31 * result + this.principal.hashCode();
result = 31 * result + this.userDetails.hashCode();
result = 31 * result + this.keyHash;
result = 31 * result + ObjectUtils.nullSafeHashCode(this.assertion);
return result;
}
@Override
public Object getCredentials() {
return this.credentials;
}
public int getKeyHash() {
return this.keyHash;
}
@Override
public Object getPrincipal() {
return this.principal;
}
public Assertion getAssertion() {
return this.assertion;
}
public UserDetails getUserDetails() {
return this.userDetails;
}
@Override
public Builder<?> toBuilder() {
return new Builder<>(this);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(super.toString());
sb.append(" Assertion: ").append(this.assertion);
sb.append(" Credentials (Service/Proxy Ticket): ").append(this.credentials);
return (sb.toString());
}
/**
* A builder of {@link CasAuthenticationToken} instances
*
* @since 7.0
*/
public static | CasAuthenticationToken |
java | junit-team__junit5 | junit-jupiter-api/src/main/java/org/junit/jupiter/api/parallel/Isolated.java | {
"start": 840,
"end": 1255
} | class ____ executed
* concurrently. This can be used to enable parallel test execution for the
* entire test suite while running some tests in isolation (e.g. if they modify
* some global resource).
*
* @since 5.7
* @see ExecutionMode
* @see ResourceLock
*/
@API(status = STABLE, since = "5.10")
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
@Inherited
@ResourceLock(Resources.GLOBAL)
public @ | is |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/appender/FileAppenderTest.java | {
"start": 12994,
"end": 13972
} | class ____ implements Runnable {
private final boolean createOnDemand;
private final boolean lock;
private final int logEventCount;
private final AtomicReference<Throwable> throwableRef;
public FileWriterRunnable(
final boolean createOnDemand,
final boolean lock,
final int logEventCount,
final AtomicReference<Throwable> throwableRef) {
this.createOnDemand = createOnDemand;
this.lock = lock;
this.logEventCount = logEventCount;
this.throwableRef = throwableRef;
}
@Override
public void run() {
final Thread thread = Thread.currentThread();
try {
writer(lock, logEventCount, thread.getName(), createOnDemand, true);
} catch (final Throwable e) {
throwableRef.set(e);
}
}
}
public static | FileWriterRunnable |
java | apache__camel | components/camel-nitrite/src/main/java/org/apache/camel/component/nitrite/operation/common/InsertOperation.java | {
"start": 1332,
"end": 1984
} | class ____ extends AbstractPayloadAwareOperation implements CommonOperation {
public InsertOperation(Object payload) {
super(payload);
}
public InsertOperation() {
}
@Override
protected void execute(Exchange exchange, NitriteEndpoint endpoint) throws Exception {
Object payload = getPayload(exchange, endpoint);
Object[] payloadArray = (Object[]) Array.newInstance(payload.getClass(), 1);
payloadArray[0] = payload;
exchange.getMessage().setHeader(
NitriteConstants.WRITE_RESULT,
endpoint.getNitriteCollection().insert(payloadArray));
}
}
| InsertOperation |
java | elastic__elasticsearch | client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java | {
"start": 2320,
"end": 2819
} | class ____ extends RestClientTestCase {
private static HttpsServer httpsServer;
@BeforeClass
public static void startHttpServer() throws Exception {
httpsServer = MockHttpServer.createHttps(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
httpsServer.setHttpsConfigurator(new HttpsConfigurator(getSslContext()));
httpsServer.createContext("/", new ResponseHandler());
httpsServer.start();
}
private static | RestClientBuilderIntegTests |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java | {
"start": 8767,
"end": 13757
} | class ____ {
private final SelectorProvider provider;
private final Selector selector;
private long lastActivityTime;
private SelectorInfo(SelectorProvider provider, Selector selector) {
this.provider = provider;
this.selector = selector;
}
void close() {
if (selector != null) {
try {
selector.close();
} catch (IOException e) {
LOG.warn("Unexpected exception while closing selector : ", e);
}
}
}
}
private static ConcurrentHashMap<SelectorProvider, ConcurrentLinkedDeque
<SelectorInfo>> providerMap = new ConcurrentHashMap<>();
private static final long IDLE_TIMEOUT = 10 * 1000; // 10 seconds.
/**
* Waits on the channel with the given timeout using one of the
* cached selectors. It also removes any cached selectors that are
* idle for a few seconds.
*
* @param channel
* @param ops
* @param timeout
* @return
* @throws IOException
*/
static int select(SelectableChannel channel, int ops, long timeout)
throws IOException {
SelectorInfo info = get(channel);
SelectionKey key = null;
int ret = 0;
long timeoutLeft = timeout;
try {
while (true) {
long start = (timeout == 0) ? 0 : Time.now();
key = channel.register(info.selector, ops);
ret = info.selector.select(timeoutLeft);
if (ret != 0) {
return ret;
}
/* Sometimes select() returns 0 much before timeout for
* unknown reasons. So select again if required.
*/
if (timeout > 0) {
timeoutLeft -= Time.now() - start;
timeoutLeft = Math.max(0, timeoutLeft);
}
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedIOException("Interrupted while waiting for "
+ "IO on channel " + channel + ". Total timeout mills is "
+ timeout + ", " + timeoutLeft + " millis timeout left.");
}
if (timeoutLeft == 0) {
return 0;
}
}
} finally {
if (key != null) {
key.cancel();
}
//clear the canceled key.
try {
info.selector.selectNow();
} catch (IOException e) {
LOG.info("Unexpected Exception while clearing selector : ", e);
// don't put the selector back.
info.close();
return ret;
}
release(info);
}
}
/**
* Takes one selector from end of LRU list of free selectors.
* If there are no selectors awailable, it creates a new selector.
* Also invokes trimIdleSelectors().
*
* @param channel
* @return
* @throws IOException
*/
private static SelectorInfo get(SelectableChannel channel)
throws IOException {
SelectorProvider provider = channel.provider();
// pick the list : rarely there is more than one provider in use.
ConcurrentLinkedDeque<SelectorInfo> infoQ = providerMap.computeIfAbsent(
provider, k -> new ConcurrentLinkedDeque<>());
SelectorInfo selInfo = infoQ.pollLast(); // last in first out
if (selInfo == null) {
Selector selector = provider.openSelector();
// selInfo will be put into infoQ after `#release()`
selInfo = new SelectorInfo(provider, selector);
}
trimIdleSelectors(Time.now());
return selInfo;
}
/**
* puts selector back at the end of LRU list of free selectos.
* Also invokes trimIdleSelectors().
*
* @param info
*/
private static void release(SelectorInfo info) {
long now = Time.now();
trimIdleSelectors(now);
info.lastActivityTime = now;
// SelectorInfos in queue are sorted by lastActivityTime
providerMap.get(info.provider).addLast(info);
}
private static AtomicBoolean trimming = new AtomicBoolean(false);
/**
* Closes selectors that are idle for IDLE_TIMEOUT (10 sec). It does not
* traverse the whole list, just over the one that have crossed
* the timeout.
*/
private static void trimIdleSelectors(long now) {
if (!trimming.compareAndSet(false, true)) {
return;
}
long cutoff = now - IDLE_TIMEOUT;
for (ConcurrentLinkedDeque<SelectorInfo> infoQ : providerMap.values()) {
SelectorInfo oldest;
while ((oldest = infoQ.peekFirst()) != null) {
if (oldest.lastActivityTime <= cutoff && infoQ.remove(oldest)) {
oldest.close();
} else {
break;
}
}
}
trimming.set(false);
}
}
}
| SelectorInfo |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/SpringMDCWithBreadcrumbTest.java | {
"start": 1428,
"end": 1815
} | class ____ implements Processor {
@Override
public void process(Exchange exchange) throws Exception {
assertEquals("route-a", MDC.get("camel.routeId"));
assertEquals(exchange.getExchangeId(), MDC.get("camel.exchangeId"));
assertEquals(exchange.getExchangeId(), MDC.get("camel.breadcrumbId"));
}
}
public static | ProcessorA |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/event/connection/JfrConnectionDeactivatedEvent.java | {
"start": 358,
"end": 909
} | class ____ extends Event {
private final String redisUri;
private final String epId;
private final String channelId;
private final String local;
private final String remote;
public JfrConnectionDeactivatedEvent(ConnectionEventSupport event) {
this.redisUri = event.getRedisUri();
this.epId = event.getChannelId();
this.channelId = event.getChannelId();
this.local = event.localAddress().toString();
this.remote = event.remoteAddress().toString();
}
}
| JfrConnectionDeactivatedEvent |
java | spring-projects__spring-boot | documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/actuator/metrics/gettingstarted/commontags/MyMeterRegistryConfiguration.java | {
"start": 1014,
"end": 1210
} | class ____ {
@Bean
public MeterRegistryCustomizer<MeterRegistry> metricsCommonTags() {
return (registry) -> registry.config().commonTags("region", "us-east-1");
}
}
| MyMeterRegistryConfiguration |
java | apache__flink | flink-libraries/flink-cep/src/main/java/org/apache/flink/cep/nfa/StateTransitionAction.java | {
"start": 934,
"end": 1193
} | enum ____ {
TAKE, // take the current event and assign it to the current state
IGNORE, // ignore the current event
PROCEED // do the state transition and keep the current event for further processing (epsilon
// transition)
}
| StateTransitionAction |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/basic/ConcatTest.java | {
"start": 988,
"end": 2801
} | class ____ {
@BeforeEach
public void setUp(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
TestEntity testEntity = new TestEntity();
testEntity.setName( "test_1" );
entityManager.persist( testEntity );
} );
}
@AfterEach
public void tearDown(EntityManagerFactoryScope scope) {
scope.getEntityManagerFactory().getSchemaManager().truncate();
}
@Test
public void testSelectCaseWithConcat(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaQuery<Object[]> query = cb.createQuery( Object[].class );
Root<TestEntity> testEntity = query.from( TestEntity.class );
query.multiselect(
cb.selectCase()
.when( cb.isNotNull( testEntity.get( "id" ) ), cb.concat( "test", cb.literal( "_1" ) ) )
.otherwise( cb.literal( "Empty" ) ),
cb.trim( cb.concat( ".", cb.literal( "Test " ) ) )
);
final List<Object[]> results = entityManager.createQuery( query ).getResultList();
assertThat( results.size(), is( 1 ) );
assertThat( results.get( 0 )[0], is( "test_1" ) );
assertThat( results.get( 0 )[1], is( ".Test" ) );
} );
}
@Test
public void testConcat(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaQuery query = cb.createQuery();
Root<TestEntity> testEntity = query.from( TestEntity.class );
query.select( testEntity ).where( cb.equal( testEntity.get( "name" ), cb.concat( "test", cb.literal( "_1" ) ) ) );
final List<?> results = entityManager.createQuery( query ).getResultList();
assertThat( results.size(), is( 1 ) );
} );
}
@Entity(name = "TestEntity")
@Table(name = "TEST_ENTITY")
public static | ConcatTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/records/TestURL.java | {
"start": 1169,
"end": 1940
} | class ____ {
@Test
void testConversion() throws Exception {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.IPC_RECORD_FACTORY_CLASS,
RecordFactoryForTest.class.getName());
String[] pathStrs = new String[] {"/", ".", "foo/bar", "foo",
"/foo/bar/baz", "moo://bar/baz", "moo://bar:123/baz", "moo:///foo",
"moo://foo@bar:123/baz/foo", "moo://foo@bar/baz/foo", "moo://foo@bar",
"moo://foo:123"};
for (String s : pathStrs) {
Path path = new Path(s);
assertEquals(path, URL.fromPath(path, conf).toPath());
}
Path p = new Path("/foo/bar#baz");
assertEquals(p, URL.fromPath(p, conf).toPath());
}
/** Record factory that instantiates URLs for this test. */
public static | TestURL |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/scheduler/DelegateServiceSchedulerTest.java | {
"start": 1457,
"end": 9564
} | class ____ extends AbstractSchedulerTest {
@Override
protected Scheduler scheduler() {
return freshScheduler();
}
@Override
protected Scheduler freshScheduler() {
return Schedulers.fromExecutor(Executors.newSingleThreadScheduledExecutor());
}
@Override
protected boolean shouldCheckDisposeTask() {
return false;
}
@Override
protected boolean shouldCheckSupportRestart() {
return false;
}
@Override
protected boolean shouldCheckMultipleDisposeGracefully() {
return true;
}
@Override
protected boolean isTerminated(Scheduler s) {
DelegateServiceScheduler scheduler = (DelegateServiceScheduler) s;
return scheduler.original.isTerminated();
}
@Test
public void startAndDecorationImplicit() {
AtomicInteger decorationCount = new AtomicInteger();
Schedulers.setExecutorServiceDecorator("startAndDecorationImplicit", (s, srv) -> {
decorationCount.incrementAndGet();
return srv;
});
final Scheduler scheduler = afterTest.autoDispose(new DelegateServiceScheduler("startAndDecorationImplicitExecutorService", Executors.newSingleThreadExecutor()));
afterTest.autoDispose(() -> Schedulers.removeExecutorServiceDecorator("startAndDecorationImplicit"));
assertThat(decorationCount).as("before schedule").hasValue(0);
//first scheduled task implicitly starts the scheduler and thus creates the executor service
scheduler.schedule(ThrowingRunnable.unchecked(() -> Thread.sleep(100)));
assertThat(decorationCount).as("after schedule").hasValue(1);
//second scheduled task runs on a started scheduler and doesn't create further executors
scheduler.schedule(() -> {});
assertThat(decorationCount).as("after 2nd schedule").hasValue(1);
}
@Test
public void notScheduledRejects() {
Scheduler s = afterTest.autoDispose(Schedulers.fromExecutorService(Executors.newSingleThreadExecutor()));
assertThatExceptionOfType(RejectedExecutionException.class)
.isThrownBy(() -> s.schedule(() -> {}, 100, TimeUnit.MILLISECONDS))
.describedAs("direct delayed scheduling")
.isSameAs(Exceptions.failWithRejectedNotTimeCapable());
assertThatExceptionOfType(RejectedExecutionException.class)
.isThrownBy(() -> s.schedulePeriodically(() -> {}, 100, 100, TimeUnit.MILLISECONDS))
.describedAs("direct periodic scheduling")
.isSameAs(Exceptions.failWithRejectedNotTimeCapable());
Worker w = afterTest.autoDispose(s.createWorker());
assertThatExceptionOfType(RejectedExecutionException.class)
.isThrownBy(() -> w.schedule(() -> {}, 100, TimeUnit.MILLISECONDS))
.describedAs("worker delayed scheduling")
.isSameAs(Exceptions.failWithRejectedNotTimeCapable());
assertThatExceptionOfType(RejectedExecutionException.class)
.isThrownBy(() -> w.schedulePeriodically(() -> {}, 100, 100, TimeUnit.MILLISECONDS))
.describedAs("worker periodic scheduling")
.isSameAs(Exceptions.failWithRejectedNotTimeCapable());
}
@Test
public void scheduledDoesntReject() {
Scheduler s = afterTest.autoDispose(Schedulers.fromExecutorService(Executors.newSingleThreadScheduledExecutor()));
assertThat(s.schedule(() -> {}, 100, TimeUnit.MILLISECONDS))
.describedAs("direct delayed scheduling")
.isNotNull();
assertThat(s.schedulePeriodically(() -> {}, 100, 100, TimeUnit.MILLISECONDS))
.describedAs("direct periodic scheduling")
.isNotNull();
Worker w = afterTest.autoDispose(s.createWorker());
assertThat(w.schedule(() -> {}, 100, TimeUnit.MILLISECONDS))
.describedAs("worker delayed scheduling")
.isNotNull();
assertThat(w.schedulePeriodically(() -> {}, 100, 100, TimeUnit.MILLISECONDS))
.describedAs("worker periodic scheduling")
.isNotNull();
}
@Test
public void smokeTestDelay() {
for (int i = 0; i < 20; i++) {
Scheduler s = afterTest.autoDispose(Schedulers.fromExecutorService(Executors.newScheduledThreadPool(1)));
AtomicLong start = new AtomicLong();
AtomicLong end = new AtomicLong();
StepVerifier.create(Mono
.delay(Duration.ofMillis(100), s)
.log()
.doOnSubscribe(sub -> start.set(System.nanoTime()))
.doOnTerminate(() -> end.set(System.nanoTime()))
)
.expectSubscription()
.expectNext(0L)
.verifyComplete();
long endValue = end.longValue();
long startValue = start.longValue();
long measuredDelay = endValue - startValue;
long measuredDelayMs = TimeUnit.NANOSECONDS.toMillis(measuredDelay);
assertThat(measuredDelayMs)
.as("iteration %s, measured delay %s nanos, start at %s nanos, end at %s nanos", i, measuredDelay, startValue, endValue)
.isGreaterThanOrEqualTo(100L)
.isLessThan(200L);
}
}
@Test
public void smokeTestInterval() {
Scheduler s = afterTest.autoDispose(scheduler());
StepVerifier.create(Flux.interval(Duration.ofMillis(100), Duration.ofMillis(200), s))
.expectSubscription()
.expectNoEvent(Duration.ofMillis(100))
.expectNext(0L)
.expectNoEvent(Duration.ofMillis(200))
.expectNext(1L)
.expectNoEvent(Duration.ofMillis(200))
.expectNext(2L)
.thenCancel();
}
@Test
public void scanNameAnonymous() {
final ExecutorService fixedExecutor = Executors.newFixedThreadPool(3);
final ExecutorService cachedExecutor = Executors.newCachedThreadPool();
final ExecutorService singleExecutor = Executors.newSingleThreadExecutor();
Scheduler fixedThreadPool = afterTest.autoDispose(Schedulers.fromExecutorService(fixedExecutor));
Scheduler cachedThreadPool = afterTest.autoDispose(Schedulers.fromExecutorService(cachedExecutor));
Scheduler singleThread = afterTest.autoDispose(Schedulers.fromExecutorService(singleExecutor));
String fixedId = Integer.toHexString(System.identityHashCode(fixedExecutor));
String cachedId = Integer.toHexString(System.identityHashCode(cachedExecutor));
String singleId = Integer.toHexString(System.identityHashCode(singleExecutor));
assertThat(Scannable.from(fixedThreadPool).scan(Scannable.Attr.NAME))
.as("fixedThreadPool")
.isEqualTo("fromExecutorService(anonymousExecutor@" + fixedId + ")");
assertThat(Scannable.from(cachedThreadPool).scan(Scannable.Attr.NAME))
.as("cachedThreadPool")
.isEqualTo("fromExecutorService(anonymousExecutor@" + cachedId + ")");
assertThat(Scannable.from(singleThread).scan(Scannable.Attr.NAME))
.as("singleThread")
.isEqualTo("fromExecutorService(anonymousExecutor@" + singleId + ")");
}
@Test
public void scanNameExplicit() {
Scheduler fixedThreadPool = afterTest.autoDispose(Schedulers.fromExecutorService(Executors.newFixedThreadPool(3), "fixedThreadPool(3)"));
Scheduler cachedThreadPool = afterTest.autoDispose(Schedulers.fromExecutorService(Executors.newCachedThreadPool(), "cachedThreadPool"));
Scheduler singleThread = afterTest.autoDispose(Schedulers.fromExecutorService(Executors.newSingleThreadExecutor(), "singleThreadExecutor"));
assertThat(Scannable.from(fixedThreadPool).scan(Scannable.Attr.NAME))
.as("fixedThreadPool")
.isEqualTo("fromExecutorService(fixedThreadPool(3))");
assertThat(Scannable.from(cachedThreadPool).scan(Scannable.Attr.NAME))
.as("cachedThreadPool")
.isEqualTo("fromExecutorService(cachedThreadPool)");
assertThat(Scannable.from(singleThread).scan(Scannable.Attr.NAME))
.as("singleThread")
.isEqualTo("fromExecutorService(singleThreadExecutor)");
}
@Test
public void scanExecutorAttributes() {
Scheduler fixedThreadPool = afterTest.autoDispose(Schedulers.fromExecutorService(Executors.newFixedThreadPool(3)));
assertThat(Scannable.from(fixedThreadPool).scan(Scannable.Attr.CAPACITY)).isEqualTo(3);
assertThat(Scannable.from(fixedThreadPool).scan(Scannable.Attr.BUFFERED)).isZero();
assertThat(Scannable.from(fixedThreadPool).scan(Scannable.Attr.LARGE_BUFFERED)).isZero();
fixedThreadPool.schedule(() -> {
assertThat(Scannable.from(fixedThreadPool).scan(Scannable.Attr.BUFFERED)).isNotZero();
assertThat(Scannable.from(fixedThreadPool).scan(Scannable.Attr.LARGE_BUFFERED)).isNotZero();
});
}
}
| DelegateServiceSchedulerTest |
java | google__guava | android/guava-testlib/src/com/google/common/testing/RelationshipTester.java | {
"start": 5659,
"end": 6055
} | class ____<T> {
final T value;
final int groupNumber;
final int itemNumber;
Item(T value, int groupNumber, int itemNumber) {
this.value = value;
this.groupNumber = groupNumber;
this.itemNumber = itemNumber;
}
@Override
public String toString() {
return value + " [group " + (groupNumber + 1) + ", item " + (itemNumber + 1) + ']';
}
}
}
| Item |
java | apache__dubbo | dubbo-metadata/dubbo-metadata-api/src/test/java/org/apache/dubbo/metadata/store/InterfaceNameTestService2.java | {
"start": 872,
"end": 931
} | interface ____ {
void test2();
}
| InterfaceNameTestService2 |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/GetMountTableEntriesResponsePBImpl.java | {
"start": 1808,
"end": 3859
} | class ____
extends GetMountTableEntriesResponse implements PBRecord {
private FederationProtocolPBTranslator<GetMountTableEntriesResponseProto,
GetMountTableEntriesResponseProto.Builder,
GetMountTableEntriesResponseProtoOrBuilder> translator =
new FederationProtocolPBTranslator<GetMountTableEntriesResponseProto,
GetMountTableEntriesResponseProto.Builder,
GetMountTableEntriesResponseProtoOrBuilder>(
GetMountTableEntriesResponseProto.class);
public GetMountTableEntriesResponsePBImpl() {
}
public GetMountTableEntriesResponsePBImpl(
GetMountTableEntriesResponseProto proto) {
this.translator.setProto(proto);
}
@Override
public GetMountTableEntriesResponseProto getProto() {
return this.translator.build();
}
@Override
public void setProto(Message proto) {
this.translator.setProto(proto);
}
@Override
public void readInstance(String base64String) throws IOException {
this.translator.readInstance(base64String);
}
@Override
public List<MountTable> getEntries() throws IOException {
List<MountTableRecordProto> entries =
this.translator.getProtoOrBuilder().getEntriesList();
List<MountTable> ret = new ArrayList<MountTable>();
for (MountTableRecordProto entry : entries) {
MountTable record = new MountTablePBImpl(entry);
ret.add(record);
}
return ret;
}
@Override
public void setEntries(List<MountTable> records) throws IOException {
this.translator.getBuilder().clearEntries();
for (MountTable entry : records) {
if (entry instanceof MountTablePBImpl) {
MountTablePBImpl entryPB = (MountTablePBImpl)entry;
this.translator.getBuilder().addEntries(entryPB.getProto());
}
}
}
@Override
public long getTimestamp() {
return this.translator.getProtoOrBuilder().getTimestamp();
}
@Override
public void setTimestamp(long time) {
this.translator.getBuilder().setTimestamp(time);
}
} | GetMountTableEntriesResponsePBImpl |
java | quarkusio__quarkus | independent-projects/qute/core/src/main/java/io/quarkus/qute/FragmentSectionHelper.java | {
"start": 5762,
"end": 5971
} | enum ____ implements ErrorCode {
INVALID_FRAGMENT_ID,
NON_UNIQUE_FRAGMENT_ID;
@Override
public String getName() {
return "FRAGMENT_" + name();
}
}
}
| Code |
java | google__guava | android/guava/src/com/google/common/reflect/TypeToken.java | {
"start": 18182,
"end": 22642
} | class ____ parameterized type.
checkArgument(
getRawType().isAssignableFrom(subclass), "%s isn't a subclass of %s", subclass, this);
Type resolvedTypeArgs = resolveTypeArgsForSubclass(subclass);
@SuppressWarnings("unchecked") // guarded by the isAssignableFrom() statement above
TypeToken<? extends T> subtype = (TypeToken<? extends T>) of(resolvedTypeArgs);
checkArgument(
subtype.isSubtypeOf(this), "%s does not appear to be a subtype of %s", subtype, this);
return subtype;
}
/**
* Returns true if this type is a supertype of the given {@code type}. "Supertype" is defined
* according to <a
* href="http://docs.oracle.com/javase/specs/jls/se8/html/jls-4.html#jls-4.5.1">the rules for type
* arguments</a> introduced with Java generics.
*
* @since 19.0
*/
public final boolean isSupertypeOf(TypeToken<?> type) {
return type.isSubtypeOf(getType());
}
/**
* Returns true if this type is a supertype of the given {@code type}. "Supertype" is defined
* according to <a
* href="http://docs.oracle.com/javase/specs/jls/se8/html/jls-4.html#jls-4.5.1">the rules for type
* arguments</a> introduced with Java generics.
*
* @since 19.0
*/
public final boolean isSupertypeOf(Type type) {
return of(type).isSubtypeOf(getType());
}
/**
* Returns true if this type is a subtype of the given {@code type}. "Subtype" is defined
* according to <a
* href="http://docs.oracle.com/javase/specs/jls/se8/html/jls-4.html#jls-4.5.1">the rules for type
* arguments</a> introduced with Java generics.
*
* @since 19.0
*/
public final boolean isSubtypeOf(TypeToken<?> type) {
return isSubtypeOf(type.getType());
}
/**
* Returns true if this type is a subtype of the given {@code type}. "Subtype" is defined
* according to <a
* href="http://docs.oracle.com/javase/specs/jls/se8/html/jls-4.html#jls-4.5.1">the rules for type
* arguments</a> introduced with Java generics.
*
* @since 19.0
*/
public final boolean isSubtypeOf(Type supertype) {
checkNotNull(supertype);
if (supertype instanceof WildcardType) {
// if 'supertype' is <? super Foo>, 'this' can be:
// Foo, SubFoo, <? extends Foo>.
// if 'supertype' is <? extends Foo>, nothing is a subtype.
return any(((WildcardType) supertype).getLowerBounds()).isSupertypeOf(runtimeType);
}
// if 'this' is wildcard, it's a suptype of to 'supertype' if any of its "extends"
// bounds is a subtype of 'supertype'.
if (runtimeType instanceof WildcardType) {
// <? super Base> is of no use in checking 'from' being a subtype of 'to'.
return any(((WildcardType) runtimeType).getUpperBounds()).isSubtypeOf(supertype);
}
// if 'this' is type variable, it's a subtype if any of its "extends"
// bounds is a subtype of 'supertype'.
if (runtimeType instanceof TypeVariable) {
return runtimeType.equals(supertype)
|| any(((TypeVariable<?>) runtimeType).getBounds()).isSubtypeOf(supertype);
}
if (runtimeType instanceof GenericArrayType) {
return of(supertype).isSupertypeOfArray((GenericArrayType) runtimeType);
}
// Proceed to regular Type subtype check
if (supertype instanceof Class) {
return this.someRawTypeIsSubclassOf((Class<?>) supertype);
} else if (supertype instanceof ParameterizedType) {
return this.isSubtypeOfParameterizedType((ParameterizedType) supertype);
} else if (supertype instanceof GenericArrayType) {
return this.isSubtypeOfArrayType((GenericArrayType) supertype);
} else { // to instanceof TypeVariable
return false;
}
}
/**
* Returns true if this type is known to be an array type, such as {@code int[]}, {@code T[]},
* {@code <? extends Map<String, Integer>[]>} etc.
*/
public final boolean isArray() {
return getComponentType() != null;
}
/**
* Returns true if this type is one of the nine primitive types (including {@code void}).
*
* @since 15.0
*/
public final boolean isPrimitive() {
return (runtimeType instanceof Class) && ((Class<?>) runtimeType).isPrimitive();
}
/**
* Returns the corresponding wrapper type if this is a primitive type; otherwise returns {@code
* this} itself. Idempotent.
*
* @since 15.0
*/
public final TypeToken<T> wrap() {
if (isPrimitive()) {
@SuppressWarnings("unchecked") // this is a primitive | or |
java | spring-projects__spring-boot | documentation/spring-boot-actuator-docs/src/test/java/org/springframework/boot/actuate/docs/env/EnvironmentEndpointDocumentationTests.java | {
"start": 6305,
"end": 6996
} | class ____ {
@Bean
EnvironmentEndpoint endpoint(ConfigurableEnvironment environment) {
return new EnvironmentEndpoint(new AbstractEnvironment() {
@Override
protected void customizePropertySources(MutablePropertySources propertySources) {
environment.getPropertySources()
.stream()
.filter(this::includedPropertySource)
.forEach(propertySources::addLast);
}
private boolean includedPropertySource(PropertySource<?> propertySource) {
return propertySource instanceof EnumerablePropertySource
&& !"Inlined Test Properties".equals(propertySource.getName());
}
}, Collections.emptyList(), Show.ALWAYS);
}
}
}
| TestConfiguration |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/PartitionRequestQueue.java | {
"start": 12492,
"end": 17545
} | class ____ as the input
// gate and the consumed views as the local input channels.
BufferAndAvailability next = null;
int nextSubpartitionId = -1;
try {
while (true) {
NetworkSequenceViewReader reader = pollAvailableReader();
// No queue with available data. We allow this here, because
// of the write callbacks that are executed after each write.
if (reader == null) {
return;
}
nextSubpartitionId = reader.peekNextBufferSubpartitionId();
next = reader.getNextBuffer();
if (next == null) {
if (!reader.isReleased()) {
continue;
}
Throwable cause = reader.getFailureCause();
if (cause != null) {
ErrorResponse msg = new ErrorResponse(cause, reader.getReceiverId());
ctx.writeAndFlush(msg);
}
} else {
// This channel was now removed from the available reader queue.
// We re-add it into the queue if it is still available
if (next.moreAvailable()) {
registerAvailableReader(reader);
}
BufferResponse msg =
new BufferResponse(
next.buffer(),
next.getSequenceNumber(),
reader.getReceiverId(),
nextSubpartitionId,
next.buffer() instanceof FullyFilledBuffer
? ((FullyFilledBuffer) next.buffer())
.getPartialBuffers()
.size()
: 0,
next.buffersInBacklog());
// Write and flush and wait until this is done before
// trying to continue with the next buffer.
channel.writeAndFlush(msg).addListener(writeListener);
return;
}
}
} catch (Throwable t) {
if (next != null) {
next.buffer().recycleBuffer();
}
throw new IOException(t.getMessage(), t);
}
}
private void registerAvailableReader(NetworkSequenceViewReader reader) {
availableReaders.add(reader);
reader.setRegisteredAsAvailable(true);
}
@Nullable
private NetworkSequenceViewReader pollAvailableReader() {
NetworkSequenceViewReader reader = availableReaders.poll();
if (reader != null) {
reader.setRegisteredAsAvailable(false);
}
return reader;
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
releaseAllResources();
ctx.fireChannelInactive();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
handleException(ctx.channel(), cause);
}
private void handleException(Channel channel, Throwable cause) throws IOException {
LOG.error(
"Encountered error while consuming partitions (connection to {})",
channel.remoteAddress(),
cause);
fatalError = true;
releaseAllResources();
if (channel.isActive()) {
channel.writeAndFlush(new ErrorResponse(cause))
.addListener(ChannelFutureListener.CLOSE);
}
}
private void releaseAllResources() throws IOException {
// note: this is only ever executed by one thread: the Netty IO thread!
for (NetworkSequenceViewReader reader : allReaders.values()) {
releaseViewReader(reader);
}
availableReaders.clear();
allReaders.clear();
}
private void releaseViewReader(NetworkSequenceViewReader reader) throws IOException {
reader.setRegisteredAsAvailable(false);
reader.releaseAllResources();
}
private void onChannelFutureFailure(ChannelFuture future) throws Exception {
if (future.cause() != null) {
handleException(future.channel(), future.cause());
} else {
handleException(
future.channel(), new IllegalStateException("Sending cancelled by user."));
}
}
public void notifyPartitionRequestTimeout(PartitionRequestListener partitionRequestListener) {
ctx.pipeline().fireUserEventTriggered(partitionRequestListener);
}
// This listener is called after an element of the current nonEmptyReader has been
// flushed. If successful, the listener triggers further processing of the
// queues.
private | acting |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java | {
"start": 1084,
"end": 2650
} | class ____ extends AbstractHyperLogLogPlusPlus implements Releasable {
// TODO: consider a hll sparse structure
private final LinearCounting lc;
/**
* Create an sparse HLL++ algorithm where capacity is the maximum number of hashes this structure can hold
* per bucket.
*/
HyperLogLogPlusPlusSparse(int precision, BigArrays bigArrays, long initialBuckets) {
super(precision);
this.lc = new LinearCounting(precision, bigArrays, initialBuckets);
}
/** Needs to be called before adding elements into a bucket */
protected void ensureCapacity(long bucketOrd, long size) {
lc.ensureCapacity(bucketOrd, size);
}
@Override
public long cardinality(long bucketOrd) {
return lc.cardinality(bucketOrd);
}
@Override
protected boolean getAlgorithm(long bucketOrd) {
return LINEAR_COUNTING;
}
@Override
protected AbstractLinearCounting.HashesIterator getLinearCounting(long bucketOrd) {
return lc.values(bucketOrd);
}
@Override
protected AbstractHyperLogLog.RunLenIterator getHyperLogLog(long bucketOrd) {
throw new IllegalArgumentException("Implementation does not support HLL structures");
}
@Override
public void collect(long bucket, long hash) {
lc.collect(bucket, hash);
}
@Override
public void close() {
Releasables.close(lc);
}
protected void addEncoded(long bucket, int encoded) {
lc.addEncoded(bucket, encoded);
}
private static | HyperLogLogPlusPlusSparse |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripedDataStreamer.java | {
"start": 1929,
"end": 6978
} | class ____ extends DataStreamer {
private final Coordinator coordinator;
private final int index;
StripedDataStreamer(HdfsFileStatus stat,
DFSClient dfsClient, String src,
Progressable progress, DataChecksum checksum,
AtomicReference<CachingStrategy> cachingStrategy,
ByteArrayManager byteArrayManage, String[] favoredNodes,
short index, Coordinator coordinator,
final EnumSet<AddBlockFlag> flags) {
super(stat, null, dfsClient, src, progress, checksum, cachingStrategy,
byteArrayManage, favoredNodes, flags);
this.index = index;
this.coordinator = coordinator;
}
int getIndex() {
return index;
}
boolean isHealthy() {
return !streamerClosed() && !getErrorState().hasInternalError();
}
@Override
protected void endBlock() {
coordinator.offerEndBlock(index, block.getCurrentBlock());
super.endBlock();
}
/**
* The upper level DFSStripedOutputStream will allocate the new block group.
* All the striped data streamer only needs to fetch from the queue, which
* should be already be ready.
*/
private LocatedBlock getFollowingBlock() throws IOException {
if (!this.isHealthy()) {
// No internal block for this streamer, maybe no enough healthy DN.
// Throw the exception which has been set by the StripedOutputStream.
this.getLastException().check(false);
}
return coordinator.getFollowingBlocks().poll(index);
}
@Override
protected void setupPipelineForCreate() throws IOException {
boolean success;
LocatedBlock lb = getFollowingBlock();
block.setCurrentBlock(lb.getBlock());
block.setNumBytes(0);
bytesSent = 0;
accessToken = lb.getBlockToken();
DatanodeInfo[] nodes = lb.getLocations();
StorageType[] storageTypes = lb.getStorageTypes();
String[] storageIDs = lb.getStorageIDs();
// Connect to the DataNode. If fail the internal error state will be set.
success = createBlockOutputStream(nodes, storageTypes, storageIDs, 0L,
false);
if (!success) {
block.setCurrentBlock(null);
final DatanodeInfo badNode = nodes[getErrorState().getBadNodeIndex()];
LOG.warn("Excluding datanode " + badNode);
excludedNodes.put(badNode, badNode);
throw new IOException("Unable to create new block." + this);
}
setPipeline(lb);
}
@VisibleForTesting
LocatedBlock peekFollowingBlock() {
return coordinator.getFollowingBlocks().peek(index);
}
@Override
protected boolean setupPipelineInternal(DatanodeInfo[] nodes,
StorageType[] nodeStorageTypes, String[] nodeStorageIDs)
throws IOException {
boolean success = false;
while (!success && !streamerClosed() && dfsClient.clientRunning) {
if (!handleRestartingDatanode()) {
return false;
}
if (!handleBadDatanode()) {
// for striped streamer if it is datanode error then close the stream
// and return. no need to replace datanode
return false;
}
// get a new generation stamp and an access token
final LocatedBlock lb = coordinator.getNewBlocks().take(index);
long newGS = lb.getBlock().getGenerationStamp();
setAccessToken(lb.getBlockToken());
// set up the pipeline again with the remaining nodes. when a striped
// data streamer comes here, it must be in external error state.
assert getErrorState().hasExternalError()
|| getErrorState().doWaitForRestart();
success = createBlockOutputStream(nodes, nodeStorageTypes,
nodeStorageIDs, newGS, true);
failPacket4Testing();
getErrorState().checkRestartingNodeDeadline(nodes);
// notify coordinator the result of createBlockOutputStream
synchronized (coordinator) {
if (!streamerClosed()) {
coordinator.updateStreamer(this, success);
coordinator.notify();
} else {
success = false;
}
}
if (success) {
// wait for results of other streamers
success = coordinator.takeStreamerUpdateResult(index);
if (success) {
// if all succeeded, update its block using the new GS
updateBlockGS(newGS);
} else {
// otherwise close the block stream and restart the recovery process
closeStream();
}
} else {
// if fail, close the stream. The internal error state and last
// exception have already been set in createBlockOutputStream
// TODO: wait for restarting DataNodes during RollingUpgrade
closeStream();
setStreamerAsClosed();
}
} // while
return success;
}
void setExternalError() {
getErrorState().setExternalError();
synchronized (dataQueue) {
dataQueue.notifyAll();
}
}
@Override
public String toString() {
return "#" + index + ": " + (!isHealthy() ? "failed, ": "") + super.toString();
}
}
| StripedDataStreamer |
java | apache__kafka | metadata/src/test/java/org/apache/kafka/image/MetadataVersionChangeTest.java | {
"start": 1234,
"end": 3305
} | class ____ {
private static final MetadataVersionChange CHANGE_MINIMUM_TO_LATEST =
new MetadataVersionChange(MetadataVersion.MINIMUM_VERSION, MetadataVersion.latestProduction());
private static final MetadataVersionChange CHANGE_LATEST_TO_MINIMUM =
new MetadataVersionChange(MetadataVersion.latestProduction(), MetadataVersion.MINIMUM_VERSION);
@Test
public void testIsUpgrade() {
assertTrue(CHANGE_MINIMUM_TO_LATEST.isUpgrade());
assertFalse(CHANGE_LATEST_TO_MINIMUM.isUpgrade());
}
@Test
public void testIsDowngrade() {
assertFalse(CHANGE_MINIMUM_TO_LATEST.isDowngrade());
assertTrue(CHANGE_LATEST_TO_MINIMUM.isDowngrade());
}
@Test
public void testMetadataVersionChangeExceptionToString() {
assertEquals("org.apache.kafka.image.MetadataVersionChangeException: The metadata.version " +
"is changing from " + MetadataVersion.MINIMUM_VERSION + " to " + MetadataVersion.latestProduction(),
new MetadataVersionChangeException(CHANGE_MINIMUM_TO_LATEST).toString());
assertEquals("org.apache.kafka.image.MetadataVersionChangeException: The metadata.version " +
"is changing from " + MetadataVersion.latestProduction() + " to " + MetadataVersion.MINIMUM_VERSION,
new MetadataVersionChangeException(CHANGE_LATEST_TO_MINIMUM).toString());
}
@Test
public void testConstructorThrowsExceptionWhenOldVersionIsNull() {
assertThrows(NullPointerException.class, () ->
new MetadataVersionChange(null, MetadataVersion.MINIMUM_VERSION));
}
@Test
public void testConstructorThrowsExceptionWhenNewVersionIsNull() {
assertThrows(NullPointerException.class, () ->
new MetadataVersionChange(MetadataVersion.MINIMUM_VERSION, null));
}
@Test
public void testConstructorThrowsExceptionWhenBothVersionsAreNull() {
assertThrows(NullPointerException.class, () ->
new MetadataVersionChange(null, null));
}
}
| MetadataVersionChangeTest |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/model/internal/InheritanceState.java | {
"start": 12589,
"end": 13006
} | class ____ {
private final List<PropertyData> properties;
private final int idPropertyCount;
public List<PropertyData> getElements() {
return properties;
}
public int getIdPropertyCount() {
return idPropertyCount;
}
private ElementsToProcess(List<PropertyData> properties, int idPropertyCount) {
this.properties = properties;
this.idPropertyCount = idPropertyCount;
}
}
}
| ElementsToProcess |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/metadata/CheckpointMetadata.java | {
"start": 1281,
"end": 3302
} | class ____ implements Disposable {
/** The checkpoint ID. */
private final long checkpointId;
/** The operator states. */
private final Collection<OperatorState> operatorStates;
/** The states generated by the CheckpointCoordinator. */
private final Collection<MasterState> masterStates;
// null when restored from an older version
@Nullable private final CheckpointProperties properties;
public CheckpointMetadata(
long checkpointId,
Collection<OperatorState> operatorStates,
Collection<MasterState> masterStates) {
this(checkpointId, operatorStates, masterStates, null);
}
public CheckpointMetadata(
long checkpointId,
Collection<OperatorState> operatorStates,
Collection<MasterState> masterStates,
@Nullable CheckpointProperties properties) {
this.checkpointId = checkpointId;
this.operatorStates = operatorStates;
this.masterStates = checkNotNull(masterStates, "masterStates");
this.properties = properties;
}
public long getCheckpointId() {
return checkpointId;
}
public Collection<OperatorState> getOperatorStates() {
return operatorStates;
}
public Collection<MasterState> getMasterStates() {
return masterStates;
}
@Override
public void dispose() throws Exception {
for (OperatorState operatorState : operatorStates) {
operatorState.discardState();
}
operatorStates.clear();
masterStates.clear();
}
@Override
public String toString() {
return "Checkpoint Metadata";
}
@Nullable
public CheckpointProperties getCheckpointProperties() {
return properties;
}
public CheckpointMetadata withProperties(CheckpointProperties properties) {
return new CheckpointMetadata(
this.checkpointId, this.operatorStates, this.masterStates, properties);
}
}
| CheckpointMetadata |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/checkreturnvalue/DaggerRules.java | {
"start": 1483,
"end": 2832
} | class ____ {
/**
* Returns a rule that handles {@code @dagger.Component.Builder} types, making their fluent setter
* methods' results ignorable.
*/
public static ResultUseRule<VisitorState, Symbol> componentBuilders() {
return new DaggerRule("dagger.Component.Builder");
}
/**
* Returns a rule that handles {@code @dagger.Subcomponent.Builder} types, making their fluent
* setter methods' results ignorable.
*/
public static ResultUseRule<VisitorState, Symbol> subcomponentBuilders() {
return new DaggerRule("dagger.Subcomponent.Builder");
}
/**
* Returns a rule that handles {@code @dagger.producers.ProductionComponent.Builder} types, making
* their fluent setter methods' results ignorable.
*/
public static ResultUseRule<VisitorState, Symbol> productionComponentBuilders() {
return new DaggerRule("dagger.producers.ProductionComponent.Builder");
}
/**
* Returns a rule that handles {@code @dagger.producers.ProductionSubcomponent.Builder} types,
* making their fluent setter methods' results ignorable.
*/
public static ResultUseRule<VisitorState, Symbol> productionSubcomponentBuilders() {
return new DaggerRule("dagger.producers.ProductionSubcomponent.Builder");
}
/** Rules for methods on Dagger components and subcomponents. */
private static final | DaggerRules |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/config/WebFluxConfigurationSupport.java | {
"start": 4620,
"end": 22110
} | class ____ implements ApplicationContextAware {
private static final boolean BEAN_VALIDATION_PRESENT =
ClassUtils.isPresent("jakarta.validation.Validator", WebFluxConfigurationSupport.class.getClassLoader());
private @Nullable Map<String, CorsConfiguration> corsConfigurations;
private @Nullable PathMatchConfigurer pathMatchConfigurer;
private @Nullable BlockingExecutionConfigurer blockingExecutionConfigurer;
private @Nullable ApiVersionStrategy apiVersionStrategy;
private @Nullable List<ErrorResponse.Interceptor> errorResponseInterceptors;
private @Nullable ViewResolverRegistry viewResolverRegistry;
private @Nullable ApplicationContext applicationContext;
@Override
public void setApplicationContext(@Nullable ApplicationContext applicationContext) {
this.applicationContext = applicationContext;
if (applicationContext != null) {
Assert.state(!applicationContext.containsBean("mvcContentNegotiationManager"),
"The Java/XML config for Spring MVC and Spring WebFlux cannot both be enabled, " +
"for example, via @EnableWebMvc and @EnableWebFlux, in the same application.");
}
}
public final @Nullable ApplicationContext getApplicationContext() {
return this.applicationContext;
}
@Bean
public DispatcherHandler webHandler() {
return new DispatcherHandler();
}
@Bean
@Order(0)
public WebExceptionHandler responseStatusExceptionHandler() {
return new WebFluxResponseStatusExceptionHandler();
}
@Bean
public RequestMappingHandlerMapping requestMappingHandlerMapping(
@Qualifier("webFluxContentTypeResolver") RequestedContentTypeResolver contentTypeResolver,
@Qualifier("webFluxApiVersionStrategy") @Nullable ApiVersionStrategy apiVersionStrategy) {
RequestMappingHandlerMapping mapping = createRequestMappingHandlerMapping();
mapping.setOrder(0);
mapping.setContentTypeResolver(contentTypeResolver);
mapping.setApiVersionStrategy(apiVersionStrategy);
PathMatchConfigurer configurer = getPathMatchConfigurer();
configureAbstractHandlerMapping(mapping, configurer);
Map<String, Predicate<Class<?>>> pathPrefixes = configurer.getPathPrefixes();
if (pathPrefixes != null) {
mapping.setPathPrefixes(pathPrefixes);
}
return mapping;
}
private void configureAbstractHandlerMapping(AbstractHandlerMapping mapping, PathMatchConfigurer configurer) {
mapping.setCorsConfigurations(getCorsConfigurations());
Boolean useCaseSensitiveMatch = configurer.isUseCaseSensitiveMatch();
if (useCaseSensitiveMatch != null) {
mapping.setUseCaseSensitiveMatch(useCaseSensitiveMatch);
}
}
/**
* Override to plug a subclass of {@link RequestMappingHandlerMapping}.
*/
protected RequestMappingHandlerMapping createRequestMappingHandlerMapping() {
return new RequestMappingHandlerMapping();
}
@Bean
public RequestedContentTypeResolver webFluxContentTypeResolver() {
RequestedContentTypeResolverBuilder builder = new RequestedContentTypeResolverBuilder();
configureContentTypeResolver(builder);
return builder.build();
}
/**
* Override to configure how the requested content type is resolved.
*/
protected void configureContentTypeResolver(RequestedContentTypeResolverBuilder builder) {
}
/**
* Return the central strategy to manage API versioning with, or {@code null}
* if the application does not use versioning.
* @since 7.0
*/
@Bean
public @Nullable ApiVersionStrategy webFluxApiVersionStrategy() {
if (this.apiVersionStrategy == null) {
ApiVersionConfigurer configurer = new ApiVersionConfigurer();
configureApiVersioning(configurer);
ApiVersionStrategy strategy = configurer.getApiVersionStrategy();
if (strategy != null) {
this.apiVersionStrategy = strategy;
}
}
return this.apiVersionStrategy;
}
/**
* Override this method to configure API versioning.
* @since 7.0
*/
protected void configureApiVersioning(ApiVersionConfigurer configurer) {
}
/**
* Callback for building the global CORS configuration. This method is final.
* Use {@link #addCorsMappings(CorsRegistry)} to customize the CORS config.
*/
protected final Map<String, CorsConfiguration> getCorsConfigurations() {
if (this.corsConfigurations == null) {
CorsRegistry registry = new CorsRegistry();
addCorsMappings(registry);
this.corsConfigurations = registry.getCorsConfigurations();
}
return this.corsConfigurations;
}
/**
* Override this method to configure cross-origin requests processing.
* @see CorsRegistry
*/
protected void addCorsMappings(CorsRegistry registry) {
}
/**
* Callback for building the {@link PathMatchConfigurer}. This method is
* final, use {@link #configurePathMatching} to customize path matching.
*/
protected final PathMatchConfigurer getPathMatchConfigurer() {
if (this.pathMatchConfigurer == null) {
this.pathMatchConfigurer = new PathMatchConfigurer();
configurePathMatching(this.pathMatchConfigurer);
}
return this.pathMatchConfigurer;
}
/**
* Override to configure path matching options.
*/
public void configurePathMatching(PathMatchConfigurer configurer) {
}
@Bean
public RouterFunctionMapping routerFunctionMapping(
ServerCodecConfigurer serverCodecConfigurer, @Nullable ApiVersionStrategy apiVersionStrategy) {
RouterFunctionMapping mapping = createRouterFunctionMapping();
mapping.setOrder(-1); // go before RequestMappingHandlerMapping
mapping.setMessageReaders(serverCodecConfigurer.getReaders());
mapping.setApiVersionStrategy(apiVersionStrategy);
configureAbstractHandlerMapping(mapping, getPathMatchConfigurer());
return mapping;
}
/**
* Override to plug a subclass of {@link RouterFunctionMapping}.
*/
protected RouterFunctionMapping createRouterFunctionMapping() {
return new RouterFunctionMapping();
}
/**
* Return a handler mapping ordered at Integer.MAX_VALUE-1 with mapped
* resource handlers. To configure resource handling, override
* {@link #addResourceHandlers}.
*/
@Bean
public HandlerMapping resourceHandlerMapping(ResourceUrlProvider resourceUrlProvider) {
ResourceLoader resourceLoader = this.applicationContext;
if (resourceLoader == null) {
resourceLoader = new DefaultResourceLoader();
}
ResourceHandlerRegistry registry = new ResourceHandlerRegistry(resourceLoader);
registry.setResourceUrlProvider(resourceUrlProvider);
addResourceHandlers(registry);
AbstractHandlerMapping handlerMapping = registry.getHandlerMapping();
if (handlerMapping != null) {
configureAbstractHandlerMapping(handlerMapping, getPathMatchConfigurer());
}
else {
handlerMapping = new EmptyHandlerMapping();
}
return handlerMapping;
}
@Bean
public ResourceUrlProvider resourceUrlProvider() {
return new ResourceUrlProvider();
}
/**
* Override this method to add resource handlers for serving static resources.
* @see ResourceHandlerRegistry
*/
protected void addResourceHandlers(ResourceHandlerRegistry registry) {
}
@Bean
public RequestMappingHandlerAdapter requestMappingHandlerAdapter(
@Qualifier("webFluxAdapterRegistry") ReactiveAdapterRegistry reactiveAdapterRegistry,
ServerCodecConfigurer serverCodecConfigurer,
@Qualifier("webFluxConversionService") FormattingConversionService conversionService,
@Qualifier("webFluxContentTypeResolver") RequestedContentTypeResolver contentTypeResolver,
@Qualifier("webFluxValidator") Validator validator) {
RequestMappingHandlerAdapter adapter = createRequestMappingHandlerAdapter();
adapter.setMessageReaders(serverCodecConfigurer.getReaders());
adapter.setWebBindingInitializer(getConfigurableWebBindingInitializer(conversionService, validator));
adapter.setReactiveAdapterRegistry(reactiveAdapterRegistry);
adapter.setContentTypeResolver(contentTypeResolver);
BlockingExecutionConfigurer executorConfigurer = getBlockingExecutionConfigurer();
if (executorConfigurer.getExecutor() != null) {
adapter.setBlockingExecutor(executorConfigurer.getExecutor());
}
if (executorConfigurer.getBlockingControllerMethodPredicate() != null) {
adapter.setBlockingMethodPredicate(executorConfigurer.getBlockingControllerMethodPredicate());
}
ArgumentResolverConfigurer configurer = new ArgumentResolverConfigurer();
configureArgumentResolvers(configurer);
adapter.setArgumentResolverConfigurer(configurer);
return adapter;
}
/**
* Override to plug a subclass of {@link RequestMappingHandlerAdapter}.
*/
protected RequestMappingHandlerAdapter createRequestMappingHandlerAdapter() {
return new RequestMappingHandlerAdapter();
}
/**
* Configure resolvers for custom controller method arguments.
*/
protected void configureArgumentResolvers(ArgumentResolverConfigurer configurer) {
}
/**
* Return the configurer for HTTP message readers and writers.
* <p>Use {@link #configureHttpMessageCodecs(ServerCodecConfigurer)} to
* configure the readers and writers.
*/
@Bean
public ServerCodecConfigurer serverCodecConfigurer() {
ServerCodecConfigurer serverCodecConfigurer = ServerCodecConfigurer.create();
configureHttpMessageCodecs(serverCodecConfigurer);
return serverCodecConfigurer;
}
/**
* Override to plug a subclass of {@link LocaleContextResolver}.
*/
protected LocaleContextResolver createLocaleContextResolver() {
return new AcceptHeaderLocaleContextResolver();
}
@Bean
public LocaleContextResolver localeContextResolver() {
return createLocaleContextResolver();
}
/**
* Override to configure the HTTP message readers and writers to use.
*/
protected void configureHttpMessageCodecs(ServerCodecConfigurer configurer) {
}
/**
* Return the {@link ConfigurableWebBindingInitializer} to use for
* initializing all {@link WebDataBinder} instances.
*/
protected ConfigurableWebBindingInitializer getConfigurableWebBindingInitializer(
FormattingConversionService webFluxConversionService, Validator webFluxValidator) {
ConfigurableWebBindingInitializer initializer = new ConfigurableWebBindingInitializer();
initializer.setConversionService(webFluxConversionService);
initializer.setValidator(webFluxValidator);
MessageCodesResolver messageCodesResolver = getMessageCodesResolver();
if (messageCodesResolver != null) {
initializer.setMessageCodesResolver(messageCodesResolver);
}
return initializer;
}
/**
* Return a {@link FormattingConversionService} for use with annotated controllers.
* <p>See {@link #addFormatters} as an alternative to overriding this method.
*/
@Bean
public FormattingConversionService webFluxConversionService() {
FormattingConversionService service = new DefaultFormattingConversionService();
addFormatters(service);
return service;
}
/**
* Override this method to add custom {@link Converter} and/or {@link Formatter}
* delegates to the common {@link FormattingConversionService}.
* @see #webFluxConversionService()
*/
protected void addFormatters(FormatterRegistry registry) {
}
/**
* Return a {@link ReactiveAdapterRegistry} to adapting reactive types.
*/
@Bean
public ReactiveAdapterRegistry webFluxAdapterRegistry() {
return new ReactiveAdapterRegistry();
}
/**
* Return a global {@link Validator} instance for example for validating
* {@code @RequestBody} method arguments.
* <p>Delegates to {@link #getValidator()} first. If that returns {@code null}
* checks the classpath for the presence of a JSR-303 implementations
* before creating a {@code OptionalValidatorFactoryBean}. If a JSR-303
* implementation is not available, a "no-op" {@link Validator} is returned.
*/
@Bean
public Validator webFluxValidator() {
Validator validator = getValidator();
if (validator == null) {
if (BEAN_VALIDATION_PRESENT) {
try {
validator = new OptionalValidatorFactoryBean();
}
catch (Throwable ex) {
throw new BeanInitializationException("Failed to create default validator", ex);
}
}
else {
validator = new NoOpValidator();
}
}
return validator;
}
/**
* Override this method to provide a custom {@link Validator}.
*/
protected @Nullable Validator getValidator() {
return null;
}
/**
* Override this method to provide a custom {@link MessageCodesResolver}.
*/
protected @Nullable MessageCodesResolver getMessageCodesResolver() {
return null;
}
/**
* Callback to build and cache the {@link BlockingExecutionConfigurer}.
* This method is final, but subclasses can override
* {@link #configureBlockingExecution}.
* @since 6.1
*/
protected final BlockingExecutionConfigurer getBlockingExecutionConfigurer() {
if (this.blockingExecutionConfigurer == null) {
this.blockingExecutionConfigurer = new BlockingExecutionConfigurer();
configureBlockingExecution(this.blockingExecutionConfigurer);
}
return this.blockingExecutionConfigurer;
}
/**
* Override this method to configure blocking execution.
* @since 6.1
*/
protected void configureBlockingExecution(BlockingExecutionConfigurer configurer) {
}
@Bean
public HandlerFunctionAdapter handlerFunctionAdapter() {
return new HandlerFunctionAdapter();
}
@Bean
public SimpleHandlerAdapter simpleHandlerAdapter() {
return new SimpleHandlerAdapter();
}
@Bean
public WebSocketHandlerAdapter webFluxWebSocketHandlerAdapter() {
WebSocketHandlerAdapter adapter = new WebSocketHandlerAdapter(initWebSocketService());
// Lower the (default) priority for now, for backwards compatibility
int defaultOrder = adapter.getOrder();
adapter.setOrder(defaultOrder + 1);
return adapter;
}
private WebSocketService initWebSocketService() {
WebSocketService service = getWebSocketService();
if (service == null) {
try {
service = new HandshakeWebSocketService();
}
catch (Throwable ex) {
// Don't fail, test environment perhaps
service = new NoUpgradeStrategyWebSocketService(ex);
}
}
return service;
}
protected @Nullable WebSocketService getWebSocketService() {
return null;
}
@Bean
public ResponseEntityResultHandler responseEntityResultHandler(
@Qualifier("webFluxAdapterRegistry") ReactiveAdapterRegistry reactiveAdapterRegistry,
ServerCodecConfigurer serverCodecConfigurer,
@Qualifier("webFluxContentTypeResolver") RequestedContentTypeResolver contentTypeResolver) {
return new ResponseEntityResultHandler(serverCodecConfigurer.getWriters(),
contentTypeResolver, reactiveAdapterRegistry, getErrorResponseInterceptors());
}
@Bean
public ResponseBodyResultHandler responseBodyResultHandler(
@Qualifier("webFluxAdapterRegistry") ReactiveAdapterRegistry reactiveAdapterRegistry,
ServerCodecConfigurer serverCodecConfigurer,
@Qualifier("webFluxContentTypeResolver") RequestedContentTypeResolver contentTypeResolver) {
return new ResponseBodyResultHandler(serverCodecConfigurer.getWriters(),
contentTypeResolver, reactiveAdapterRegistry, getErrorResponseInterceptors());
}
@Bean
public ViewResolutionResultHandler viewResolutionResultHandler(
@Qualifier("webFluxAdapterRegistry") ReactiveAdapterRegistry reactiveAdapterRegistry,
@Qualifier("webFluxContentTypeResolver") RequestedContentTypeResolver contentTypeResolver) {
ViewResolverRegistry registry = getViewResolverRegistry();
List<ViewResolver> resolvers = registry.getViewResolvers();
ViewResolutionResultHandler handler = new ViewResolutionResultHandler(
resolvers, contentTypeResolver, reactiveAdapterRegistry);
handler.setDefaultViews(registry.getDefaultViews());
handler.setOrder(registry.getOrder());
return handler;
}
@Bean
public ServerResponseResultHandler serverResponseResultHandler(ServerCodecConfigurer serverCodecConfigurer) {
List<ViewResolver> resolvers = getViewResolverRegistry().getViewResolvers();
ServerResponseResultHandler handler = new ServerResponseResultHandler();
handler.setMessageWriters(serverCodecConfigurer.getWriters());
handler.setViewResolvers(resolvers);
return handler;
}
/**
* Provide access to the list of {@link ErrorResponse.Interceptor}'s to apply
* in result handlers when rendering error responses.
* <p>This method cannot be overridden; use {@link #configureErrorResponseInterceptors(List)} instead.
* @since 6.2
*/
protected final List<ErrorResponse.Interceptor> getErrorResponseInterceptors() {
if (this.errorResponseInterceptors == null) {
this.errorResponseInterceptors = new ArrayList<>();
configureErrorResponseInterceptors(this.errorResponseInterceptors);
}
return this.errorResponseInterceptors;
}
/**
* Override this method for control over the {@link ErrorResponse.Interceptor}'s
* to apply in result handling when rendering error responses.
* @param interceptors the list to add handlers to
* @since 6.2
*/
protected void configureErrorResponseInterceptors(List<ErrorResponse.Interceptor> interceptors) {
}
/**
* Callback for building the {@link ViewResolverRegistry}. This method is final,
* use {@link #configureViewResolvers} to customize view resolvers.
*/
protected final ViewResolverRegistry getViewResolverRegistry() {
if (this.viewResolverRegistry == null) {
this.viewResolverRegistry = new ViewResolverRegistry(this.applicationContext);
configureViewResolvers(this.viewResolverRegistry);
}
return this.viewResolverRegistry;
}
/**
* Configure view resolution for supporting template engines.
* @see ViewResolverRegistry
*/
protected void configureViewResolvers(ViewResolverRegistry registry) {
}
private static final | WebFluxConfigurationSupport |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/ondeletecascade/OnDeleteOneToManyTest.java | {
"start": 2734,
"end": 2935
} | class ____ {
@Id
long id;
@OneToMany(fetch = EAGER)
@JoinColumn(name = "parent_id")
@OnDelete(action = OnDeleteAction.CASCADE)
Set<Child> children = new HashSet<>();
}
@Entity
static | Parent |
java | apache__flink | flink-test-utils-parent/flink-connector-test-utils/src/main/java/org/apache/flink/connector/testframe/junit/extensions/TestCaseInvocationContextProvider.java | {
"start": 5382,
"end": 6862
} | class ____ implements TestTemplateInvocationContext {
private final TestEnvironment testEnvironment;
private final ExternalContext externalContext;
private final CheckpointingMode semantic;
public TestResourceProvidingInvocationContext(
TestEnvironment testEnvironment,
ExternalContext externalContext,
CheckpointingMode semantic) {
this.testEnvironment = testEnvironment;
this.externalContext = externalContext;
this.semantic = semantic;
}
@Override
public String getDisplayName(int invocationIndex) {
return String.format(
"TestEnvironment: [%s], ExternalContext: [%s], Semantic: [%s]",
testEnvironment, externalContext, semantic);
}
@Override
public List<Extension> getAdditionalExtensions() {
return Arrays.asList(
// Extension for injecting parameters
new TestEnvironmentResolver(testEnvironment),
new ExternalContextProvider(externalContext),
new ClusterControllableProvider(testEnvironment),
new SemanticResolver(semantic),
// Extension for closing external context
(AfterTestExecutionCallback) ignore -> externalContext.close());
}
}
private static | TestResourceProvidingInvocationContext |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/testng/transaction/programmatic/ProgrammaticTxMgmtTestNGTests.java | {
"start": 2133,
"end": 2248
} | class ____ has been modified to run with TestNG.
*
* @author Sam Brannen
* @since 4.1
*/
@ContextConfiguration
| that |
java | google__dagger | javatests/dagger/internal/codegen/InjectConstructorFactoryGeneratorTest.java | {
"start": 57118,
"end": 57305
} | interface ____ {}");
Source nonQualifier =
CompilerTests.javaSource(
"test.NonQualifier",
"package test;",
"",
"@ | MisplacedQualifier |
java | google__dagger | javatests/artifacts/dagger-ksp/transitive-annotation-app/library1/src/main/java/library1/MyBaseComponent.java | {
"start": 2176,
"end": 5026
} | class ____ {
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public abstract MyBaseComponent create(
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
MyComponentModule myComponentModule,
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
MyComponentDependency myComponentDependency);
// Non-dagger factory code
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public MyTransitiveType nonDaggerField = null;
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public static MyTransitiveType nonDaggerStaticField = null;
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public MyTransitiveType nonDaggerMethod(
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
MyTransitiveType nonDaggerParameter) {
return nonDaggerParameter;
}
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public static MyTransitiveType nonDaggerStaticMethod(
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
MyTransitiveType nonDaggerParameter) {
return nonDaggerParameter;
}
}
// Non-dagger code
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public MyTransitiveType nonDaggerField = null;
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public static MyTransitiveType nonDaggerStaticField = null;
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public MyTransitiveType nonDaggerMethod(
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
MyTransitiveType nonDaggerParameter) {
return nonDaggerParameter;
}
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public static MyTransitiveType nonDaggerStaticMethod(
@MyTransitiveAnnotation
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
MyTransitiveType nonDaggerParameter) {
return nonDaggerParameter;
}
}
| Factory |
java | elastic__elasticsearch | plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java | {
"start": 696,
"end": 1396
} | class ____ {
private static final Duration IMDS_CONNECTION_TIMEOUT = Duration.ofSeconds(2);
static String getInstanceMetadata(String metadataPath) {
final var httpClientBuilder = ApacheHttpClient.builder();
httpClientBuilder.connectionTimeout(IMDS_CONNECTION_TIMEOUT);
try (var ec2Client = Ec2MetadataClient.builder().httpClient(httpClientBuilder).build()) {
final var metadataValue = ec2Client.get(metadataPath).asString();
if (Strings.hasText(metadataValue) == false) {
throw new IllegalStateException("no ec2 metadata returned from " + metadataPath);
}
return metadataValue;
}
}
}
| AwsEc2Utils |
java | apache__maven | compat/maven-resolver-provider/src/test/java/org/apache/maven/repository/internal/DefaultArtifactDescriptorReaderTest.java | {
"start": 1489,
"end": 3309
} | class ____ extends AbstractRepositoryTestCase {
@Test
void testMng5459() throws Exception {
// prepare
DefaultArtifactDescriptorReader reader =
(DefaultArtifactDescriptorReader) getContainer().lookup(ArtifactDescriptorReader.class);
RepositoryEventDispatcher eventDispatcher = mock(RepositoryEventDispatcher.class);
ArgumentCaptor<RepositoryEvent> event = ArgumentCaptor.forClass(RepositoryEvent.class);
Field field = DefaultArtifactDescriptorReader.class.getDeclaredField("repositoryEventDispatcher");
field.setAccessible(true);
field.set(reader, eventDispatcher);
ArtifactDescriptorRequest request = new ArtifactDescriptorRequest();
request.addRepository(newTestRepository());
request.setArtifact(new DefaultArtifact("org.apache.maven.its", "dep-mng5459", "jar", "0.4.0-SNAPSHOT"));
// execute
reader.readArtifactDescriptor(session, request);
// verify
verify(eventDispatcher).dispatch(event.capture());
boolean missingArtifactDescriptor = false;
for (RepositoryEvent evt : event.getAllValues()) {
if (EventType.ARTIFACT_DESCRIPTOR_MISSING.equals(evt.getType())) {
assertEquals(
"Could not find artifact org.apache.maven.its:dep-mng5459:pom:0.4.0-20130404.090532-2 in repo ("
+ newTestRepository().getUrl() + ")",
evt.getException().getMessage());
missingArtifactDescriptor = true;
}
}
assertTrue(
missingArtifactDescriptor,
"Expected missing artifact descriptor for org.apache.maven.its:dep-mng5459:pom:0.4.0-20130404.090532-2");
}
}
| DefaultArtifactDescriptorReaderTest |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/WatsonLanguageEndpointBuilderFactory.java | {
"start": 19744,
"end": 20101
} | class ____ extends AbstractEndpointBuilder implements WatsonLanguageEndpointBuilder, AdvancedWatsonLanguageEndpointBuilder {
public WatsonLanguageEndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new WatsonLanguageEndpointBuilderImpl(path);
}
} | WatsonLanguageEndpointBuilderImpl |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustConfigTests.java | {
"start": 875,
"end": 2919
} | class ____ extends ESTestCase {
public void testDelegationOfFilesToMonitor() throws Exception {
Path homeDir = createTempDir();
Settings settings = Settings.builder().put("path.home", homeDir).build();
Environment environment = TestEnvironment.newEnvironment(settings);
final int numOtherFiles = randomIntBetween(0, 4);
List<Path> otherFiles = new ArrayList<>(numOtherFiles);
for (int i = 0; i < numOtherFiles; i++) {
otherFiles.add(Files.createFile(homeDir.resolve("otherFile" + i)));
}
Path groupConfigPath = Files.createFile(homeDir.resolve("groupConfig"));
SslTrustConfig delegate = new SslTrustConfig() {
@Override
public X509ExtendedTrustManager createTrustManager() {
return null;
}
@Override
public Collection<? extends StoredCertificate> getConfiguredCertificates() {
return List.of();
}
@Override
public Collection<Path> getDependentFiles() {
return otherFiles;
}
@Override
public String toString() {
return null;
}
@Override
public boolean equals(Object o) {
return false;
}
@Override
public int hashCode() {
return 0;
}
};
final RestrictedTrustConfig restrictedTrustConfig = new RestrictedTrustConfig(
groupConfigPath,
Set.of(X509Field.SAN_OTHERNAME_COMMONNAME),
delegate
);
Collection<Path> filesToMonitor = restrictedTrustConfig.getDependentFiles();
List<Path> expectedPathList = new ArrayList<>(otherFiles);
expectedPathList.add(groupConfigPath);
assertEquals(numOtherFiles + 1, filesToMonitor.size());
assertThat(filesToMonitor, Matchers.contains(expectedPathList.toArray(new Path[0])));
}
}
| RestrictedTrustConfigTests |
java | elastic__elasticsearch | x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/SqlQueryParameterAnalyzerTests.java | {
"start": 377,
"end": 3262
} | class ____ extends ESTestCase {
public void testNoParameters() throws Exception {
assertEquals(0, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM table"));
assertEquals(0, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM 'table'"));
assertEquals(0, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM \"table\""));
assertEquals(0, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM \"table\" WHERE i = 0"));
assertEquals(0, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM 'table' WHERE s = '?'"));
assertEquals(0, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM 'table' WHERE s = 'foo''bar''?'"));
assertEquals(0, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM `table` where b = 'fo\"o\\\"b{ar\\}?b\"az?}\\-?\"?\\?{'"));
}
public void testSingleParameter() throws Exception {
assertEquals(1, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM 'table' WHERE s = '?' AND b = ?"));
assertEquals(1, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM 'table' WHERE b = ? AND s = '?'"));
assertEquals(1, SqlQueryParameterAnalyzer.parametersCount("""
SELECT ?/10 /* multiline \s
* query\s
* more ? /* lines */ ? here\s
*/ FROM foo"""));
assertEquals(1, SqlQueryParameterAnalyzer.parametersCount("SELECT ?"));
}
public void testMultipleParameters() throws Exception {
assertEquals(4, SqlQueryParameterAnalyzer.parametersCount("SELECT ?, ?, ? , ?"));
assertEquals(3, SqlQueryParameterAnalyzer.parametersCount("SELECT ?, ?, '?' , ?"));
assertEquals(3, SqlQueryParameterAnalyzer.parametersCount("SELECT ?, ?\n, '?' , ?"));
assertEquals(3, SqlQueryParameterAnalyzer.parametersCount("""
SELECT ? - 10 -- first parameter with ????
, ? -- second parameter with random " and '\s
, ? -- last parameter without new line"""));
}
public void testUnclosedJdbcEscape() {
SQLException exception = expectThrows(SQLException.class, () -> SqlQueryParameterAnalyzer.parametersCount("SELECT {foobar"));
assertEquals("Jdbc escape sequences are not supported yet", exception.getMessage());
}
public void testUnclosedMultilineComment() {
SQLException exception = expectThrows(SQLException.class, () -> SqlQueryParameterAnalyzer.parametersCount("SELECT /* * * * "));
assertEquals("Cannot parse given sql; unclosed /* comment", exception.getMessage());
}
public void testUnclosedSingleQuoteString() {
SQLException exception = expectThrows(SQLException.class, () -> SqlQueryParameterAnalyzer.parametersCount("SELECT ' '' '' "));
assertEquals("Cannot parse given sql; unclosed string", exception.getMessage());
}
}
| SqlQueryParameterAnalyzerTests |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/lifecycle/internal/DependencyContext.java | {
"start": 1098,
"end": 1252
} | class ____ not part of any public api and can be changed or deleted without prior notice.
*
* @since 3.0
*/
// TODO From a concurrency perspective, this | is |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/vld/ValidatePolymSubTypeTest.java | {
"start": 2861,
"end": 7619
} | class ____ extends PolymorphicTypeValidator {
private static final long serialVersionUID = 1L;
@Override
public Validity validateBaseType(DatabindContext ctxt, JavaType baseType) {
return Validity.INDETERMINATE;
}
@Override
public Validity validateSubClassName(DatabindContext ctxt, JavaType baseType, String subClassName) {
return Validity.INDETERMINATE;
}
@Override
public Validity validateSubType(DatabindContext ctxt, JavaType baseType, JavaType subType) {
if (subType.hasRawClass(BadValue.class)) {
return Validity.DENIED;
}
if (subType.hasRawClass(GoodValue.class)) {
return Validity.ALLOWED;
}
// defaults to denied, then:
return Validity.INDETERMINATE; }
}
// // // Mappers with Default Typing
private final ObjectMapper MAPPER_DEF_TYPING_NAME_CHECK = jsonMapperBuilder()
.activateDefaultTyping(new SimpleNameBasedValidator())
.build();
private final ObjectMapper MAPPER_DEF_TYPING_CLASS_CHECK = jsonMapperBuilder()
.activateDefaultTyping(new SimpleNameBasedValidator())
.build();
// // // Mappers without Default Typing (explicit annotation needed)
private final ObjectMapper MAPPER_EXPLICIT_NAME_CHECK = jsonMapperBuilder()
.polymorphicTypeValidator(new SimpleNameBasedValidator())
.build();
private final ObjectMapper MAPPER_EXPLICIT_CLASS_CHECK = jsonMapperBuilder()
.polymorphicTypeValidator(new SimpleClassBasedValidator())
.build();
/*
/**********************************************************************
/* Test methods: default typing
/**********************************************************************
*/
// // With Name check
@Test
public void testWithDefaultTypingNameAccept() throws Exception
{
final BaseValue inputValue = new GoodValue();
DefTypeWrapper result = _roundTripDefault(MAPPER_DEF_TYPING_NAME_CHECK, inputValue);
assertEquals(inputValue, result.value);
}
@Test
public void testWithDefaultTypingNameDenyExplicit() throws Exception
{
_verifyBadDefaultValue(MAPPER_DEF_TYPING_NAME_CHECK);
}
@Test
public void testWithDefaultTypingNameDenyDefault() throws Exception
{
_verifyMehDefaultValue(MAPPER_DEF_TYPING_NAME_CHECK);
}
// // With Class check
@Test
public void testWithDefaultTypingClassAccept() throws Exception
{
final BaseValue inputValue = new GoodValue();
DefTypeWrapper result = _roundTripDefault(MAPPER_DEF_TYPING_CLASS_CHECK, inputValue);
assertEquals(inputValue, result.value);
}
@Test
public void testWithDefaultTypingClassDenyExplicit() throws Exception
{
_verifyBadDefaultValue(MAPPER_DEF_TYPING_CLASS_CHECK);
}
@Test
public void testWithDefaultTypingClassDenyDefault() throws Exception
{
_verifyMehDefaultValue(MAPPER_DEF_TYPING_CLASS_CHECK);
}
/*
/**********************************************************************
/* Test methods, annotated typing, full class
/**********************************************************************
*/
// // With Name
@Test
public void testWithAnnotationNameAccept() throws Exception
{
final BaseValue inputValue = new GoodValue();
AnnotatedWrapper result = _roundTripAnnotated(MAPPER_EXPLICIT_NAME_CHECK, inputValue);
assertEquals(inputValue, result.value);
}
@Test
public void testWithAnnotationNameDenyExplicit() throws Exception
{
_verifyBadAnnotatedValue(MAPPER_EXPLICIT_NAME_CHECK);
}
@Test
public void testWithAnnotationNameDenyDefault() throws Exception
{
_verifyMehAnnotatedValue(MAPPER_EXPLICIT_NAME_CHECK);
}
// // With Class
@Test
public void testWithAnnotationClassAccept() throws Exception
{
final BaseValue inputValue = new GoodValue();
AnnotatedWrapper result = _roundTripAnnotated(MAPPER_EXPLICIT_CLASS_CHECK, inputValue);
assertEquals(inputValue, result.value);
}
@Test
public void testWithAnnotationClassDenyExplicit() throws Exception
{
_verifyBadAnnotatedValue(MAPPER_EXPLICIT_CLASS_CHECK);
}
@Test
public void testWithAnnotationClassDenyDefault() throws Exception
{
_verifyMehAnnotatedValue(MAPPER_EXPLICIT_CLASS_CHECK);
}
/*
/**********************************************************************
/* Test methods, annotated typing, minimal | SimpleClassBasedValidator |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/basic/ExplicitTypeTest.java | {
"start": 591,
"end": 812
} | class ____ {
@Test
public void test(EntityManagerFactoryScope scope) {
scope.inTransaction( entityManager -> {
});
}
//tag::basic-type-annotation-example[]
@Entity(name = "Product")
public static | ExplicitTypeTest |
java | quarkusio__quarkus | integration-tests/smallrye-graphql/src/test/java/io/quarkus/it/smallrye/graphql/GreetingResourceTest.java | {
"start": 386,
"end": 1925
} | class ____ {
@Test
void testEndpoint() {
String helloRequest = getPayload("{\n" +
" greeting:hello {\n" +
" time\n" +
" message\n" +
" options{\n" +
" message\n" +
" }\n" +
" options2{\n" +
" message\n" +
" }\n" +
" }\n" +
" farewell:farewell{\n" +
" type\n" +
" }\n" +
"}");
given()
.when()
.accept(MEDIATYPE_JSON)
.contentType(MEDIATYPE_JSON)
.body(helloRequest)
.post("/graphql")
.then()
.statusCode(200)
.and()
.body(containsString("hello"))
.body(containsString("11:34"))
.body(containsString("Morning"))
.body(containsString("Farewell"));
}
@Test
void testError() {
String errorRequest = getPayload("mutation error{\n" +
" error \n" +
"}");
given()
.when()
.accept(MEDIATYPE_JSON)
.contentType(MEDIATYPE_JSON)
.body(errorRequest)
.post("/graphql")
.then()
.statusCode(200)
.and()
.body(containsString("No foo allowed"));
}
}
| GreetingResourceTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestUnnecessaryBlockingOnHistoryFileInfo.java | {
"start": 1864,
"end": 2154
} | class ____ created specifically to address the issue in
* MAPREDUCE-6684. In cases where there are two threads trying to load different
* jobs through job history file manager, one thread could be blocked by the
* other that is loading a huge job file, which is undesirable.
*
*/
public | is |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java | {
"start": 9477,
"end": 13495
} | class ____]"));
// stable plugin loader delegates to parent for other packages
Class<?> c3 = loader.loadClass("q.OtherClass");
Object instance3 = c3.getConstructor().newInstance();
assertThat(instance3.toString(), equalTo("OtherClass"));
}
}
public void testMultipleJarSinglePackageLoadClass() throws Exception {
Path tempDir = createTempDir(getTestName());
Path jar1 = tempDir.resolve("my-jar-1.jar");
Path jar2 = tempDir.resolve("my-jar-2.jar");
Path jar3 = tempDir.resolve("my-jar-3.jar");
createMinimalJar(jar1, "p.FirstClass");
createMinimalJar(jar2, "p.SecondClass");
createMinimalJar(jar3, "p.ThirdClass");
try (UberModuleClassLoader loader = getLoader(List.of(jar1, jar2, jar3))) {
Class<?> c1 = loader.loadClass("p.FirstClass");
Object instance1 = c1.getConstructor().newInstance();
assertThat(instance1.toString(), equalTo("FirstClass"));
Class<?> c2 = loader.loadClass("p.SecondClass");
Object instance2 = c2.getConstructor().newInstance();
assertThat(instance2.toString(), equalTo("SecondClass"));
Class<?> c3 = loader.loadClass("p.ThirdClass");
Object instance3 = c3.getConstructor().newInstance();
assertThat(instance3.toString(), equalTo("ThirdClass"));
}
}
public void testSplitPackageJarLoadClass() throws Exception {
Path tempDir = createTempDir(getTestName());
Path jar1 = tempDir.resolve("my-jar-1.jar");
Path jar2 = tempDir.resolve("my-jar-2.jar");
Path jar3 = tempDir.resolve("my-jar-3.jar");
createMinimalJar(jar1, "p.a.FirstClass");
createMinimalJar(jar2, "p.split.SecondClass");
createMinimalJar(jar3, "p.split.ThirdClass");
try (UberModuleClassLoader loader = getLoader(List.of(jar1, jar2, jar3))) {
Class<?> c1 = loader.loadClass("p.a.FirstClass");
Object instance1 = c1.getConstructor().newInstance();
assertThat(instance1.toString(), equalTo("FirstClass"));
Class<?> c2 = loader.loadClass("p.split.SecondClass");
Object instance2 = c2.getConstructor().newInstance();
assertThat(instance2.toString(), equalTo("SecondClass"));
Class<?> c3 = loader.loadClass("p.split.ThirdClass");
Object instance3 = c3.getConstructor().newInstance();
assertThat(instance3.toString(), equalTo("ThirdClass"));
}
}
public void testPackagePerJarLoadClass() throws Exception {
Path tempDir = createTempDir(getTestName());
Path jar1 = tempDir.resolve("my-jar-1.jar");
Path jar2 = tempDir.resolve("my-jar-2.jar");
Path jar3 = tempDir.resolve("my-jar-3.jar");
createMinimalJar(jar1, "p.a.FirstClass");
createMinimalJar(jar2, "p.b.SecondClass");
createMinimalJar(jar3, "p.c.ThirdClass");
try (UberModuleClassLoader loader = getLoader(List.of(jar1, jar2, jar3))) {
Class<?> c1 = loader.loadClass("p.a.FirstClass");
Object instance1 = c1.getConstructor().newInstance();
assertThat(instance1.toString(), equalTo("FirstClass"));
Class<?> c2 = loader.loadClass("p.b.SecondClass");
Object instance2 = c2.getConstructor().newInstance();
assertThat(instance2.toString(), equalTo("SecondClass"));
Class<?> c3 = loader.loadClass("p.c.ThirdClass");
Object instance3 = c3.getConstructor().newInstance();
assertThat(instance3.toString(), equalTo("ThirdClass"));
}
}
public void testModuleDenyList() throws Exception {
Path topLevelDir = createTempDir(getTestName());
Path jar = topLevelDir.resolve("my-jar-with-resources.jar");
createSingleClassJar(jar, "p.MyImportingClass", """
package p;
import java.sql.ResultSet;
public | jar |
java | google__error-prone | check_api/src/main/java/com/google/errorprone/apply/FileSource.java | {
"start": 741,
"end": 822
} | interface ____ {
SourceFile readFile(String path) throws IOException;
}
| FileSource |
java | quarkusio__quarkus | extensions/panache/hibernate-orm-rest-data-panache/deployment/src/test/java/io/quarkus/hibernate/orm/rest/data/panache/deployment/openapi/AbstractEntity.java | {
"start": 226,
"end": 380
} | class ____<IdType extends Number> {
@Id
@GeneratedValue
private IdType id;
public IdType getId() {
return id;
}
}
| AbstractEntity |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/aot/hint/annotation/RegisterReflectionForBindingProcessorTests.java | {
"start": 3547,
"end": 3672
} | class ____ {
@RegisterReflectionForBinding
public void method() {
}
}
}
| SampleClassWithoutMethodLevelAnnotationAttribute |
java | grpc__grpc-java | xds/src/generated/thirdparty/grpc/io/envoyproxy/envoy/service/auth/v3/AuthorizationGrpc.java | {
"start": 136,
"end": 296
} | interface ____ performing authorization check on incoming
* requests to a networked service.
* </pre>
*/
@io.grpc.stub.annotations.GrpcGenerated
public final | for |
java | quarkusio__quarkus | test-framework/junit5-component/src/test/java/io/quarkus/test/component/paraminject/ParameterInjectionTest.java | {
"start": 1134,
"end": 1808
} | class ____ {
@ExtendWith(MyParamResolver.class)
@TestConfigProperty(key = "foo", value = "BAZ")
@Test
public void testParamsInjection(
// TestInfo should be ignored automatically
TestInfo testInfo,
// MyComponent is automatically a component
MyComponent myComponent,
// This would be normally resolved by QuarkusComponentTest but is annotated with @SkipInject
@SkipInject MyComponent anotherComponent,
// Inject unconfigured mock
@InjectMock Charlie charlie,
// Note that @SkipInject is redundant in this case because the Supplier | ParameterInjectionTest |
java | elastic__elasticsearch | libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DerParser.java | {
"start": 5550,
"end": 10673
} | class ____ {
protected final int type;
protected final int length;
protected final byte[] value;
protected final int tag;
/**
* Construct a ASN.1 TLV. The TLV could be either a
* constructed or primitive entity.
* <p>
* The first byte in DER encoding is made of following fields:
* </p>
* <pre>
* -------------------------------------------------
* |Bit 8|Bit 7|Bit 6|Bit 5|Bit 4|Bit 3|Bit 2|Bit 1|
* -------------------------------------------------
* | Class | CF | + Type |
* -------------------------------------------------
* </pre>
* <ul>
* <li>Class: Universal, Application, Context or Private
* <li>CF: Constructed flag. If 1, the field is constructed.
* <li>Type: This is actually called tag in ASN.1. It
* indicates data type (Integer, String) or a construct
* (sequence, choice, set).
* </ul>
*
* @param tag Tag or Identifier
* @param length Length of the field
* @param value Encoded octet string for the field.
*/
Asn1Object(int tag, int length, byte[] value) {
this.tag = tag;
this.type = tag & 0x1F;
this.length = length;
this.value = value;
}
public int getType() {
return type;
}
public int getLength() {
return length;
}
public byte[] getValue() {
return value;
}
public boolean isConstructed() {
return (tag & DerParser.CONSTRUCTED) == DerParser.CONSTRUCTED;
}
/**
* For constructed field, return a parser for its content.
*
* @return A parser for the construct.
*/
public DerParser getParser() throws IOException {
if (isConstructed() == false) {
throw new IOException("Invalid DER: can't parse primitive entity"); //$NON-NLS-1$
}
return new DerParser(value);
}
/**
* Get the value as integer
*
* @return BigInteger
*/
public BigInteger getInteger() throws IOException {
if (type != Type.INTEGER) throw new IOException("Invalid DER: object is not integer"); //$NON-NLS-1$
return new BigInteger(value);
}
public String getString() throws IOException {
String encoding;
switch (type) {
case Type.OCTET_STRING:
// octet string is basically a byte array
return toHexString(value);
case Type.NUMERIC_STRING:
case Type.PRINTABLE_STRING:
case Type.VIDEOTEX_STRING:
case Type.IA5_STRING:
case Type.GRAPHIC_STRING:
case Type.ISO646_STRING:
case Type.GENERAL_STRING:
encoding = "ISO-8859-1"; //$NON-NLS-1$
break;
case Type.BMP_STRING:
encoding = "UTF-16BE"; //$NON-NLS-1$
break;
case Type.UTF8_STRING:
encoding = "UTF-8"; //$NON-NLS-1$
break;
case Type.UNIVERSAL_STRING:
throw new IOException("Invalid DER: can't handle UCS-4 string"); //$NON-NLS-1$
default:
throw new IOException("Invalid DER: object is not a string"); //$NON-NLS-1$
}
return new String(value, encoding);
}
public String getOid() throws IOException {
if (type != Type.OBJECT_OID) {
throw new IOException("Ivalid DER: object is not object OID");
}
StringBuilder sb = new StringBuilder(64);
switch (value[0] / 40) {
case 0 -> sb.append('0');
case 1 -> {
sb.append('1');
value[0] -= 40;
}
default -> {
sb.append('2');
value[0] -= 80;
}
}
int oidPart = 0;
for (int i = 0; i < length; i++) {
oidPart = (oidPart << 7) + (value[i] & 0x7F);
if ((value[i] & 0x80) == 0) {
sb.append('.');
sb.append(oidPart);
oidPart = 0;
}
}
return sb.toString();
}
}
private static final char[] HEX_DIGITS = "0123456789abcdef".toCharArray();
private static String toHexString(byte[] bytes) {
Objects.requireNonNull(bytes);
StringBuilder sb = new StringBuilder(2 * bytes.length);
for (int i = 0; i < bytes.length; i++) {
byte b = bytes[i];
sb.append(HEX_DIGITS[b >> 4 & 0xf]).append(HEX_DIGITS[b & 0xf]);
}
return sb.toString();
}
}
| Asn1Object |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java | {
"start": 2070,
"end": 2227
} | class ____ by the Map and Reduce tasks for merging
* both their memory and disk segments
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public | used |
java | spring-projects__spring-framework | framework-docs/src/main/java/org/springframework/docs/core/aot/hints/testing/SampleReflection.java | {
"start": 856,
"end": 1350
} | class ____ {
private final Log logger = LogFactory.getLog(SampleReflection.class);
public void performReflection() {
try {
Class<?> springVersion = ClassUtils.forName("org.springframework.core.SpringVersion", null);
Method getVersion = ClassUtils.getMethod(springVersion, "getVersion");
String version = (String) getVersion.invoke(null);
logger.info("Spring version: " + version);
}
catch (Exception exc) {
logger.error("reflection failed", exc);
}
}
}
| SampleReflection |
java | apache__camel | components/camel-aws/camel-aws2-s3/src/test/java/org/apache/camel/component/aws2/s3/AWS2S3ConsumerHealthCheckStaticCredsIT.java | {
"start": 1656,
"end": 4180
} | class ____ extends CamelTestSupport {
@RegisterExtension
public static AWSService service = AWSServiceFactory.createSingletonS3Service();
CamelContext context;
@Override
protected CamelContext createCamelContext() throws Exception {
context = super.createCamelContext();
context.getPropertiesComponent().setLocation("ref:prop");
AWS2S3Component component = new AWS2S3Component(context);
component.getConfiguration().setAmazonS3Client(AWSSDKClientUtils.newS3Client());
component.init();
context.addComponent("aws2-s3", component);
// install health check manually (yes a bit cumbersome)
HealthCheckRegistry registry = new DefaultHealthCheckRegistry();
registry.setCamelContext(context);
Object hc = registry.resolveById("context");
registry.register(hc);
hc = registry.resolveById("routes");
registry.register(hc);
hc = registry.resolveById("consumers");
registry.register(hc);
context.getCamelContextExtension().addContextPlugin(HealthCheckRegistry.class, registry);
return context;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("aws2-s3://bucket1?moveAfterRead=true®ion=l&secretKey=l&accessKey=k&destinationBucket=bucket1&autoCreateBucket=false")
.startupOrder(2).log("${body}").routeId("test-health-it");
}
};
}
@Test
public void testConnectivity() {
Collection<HealthCheck.Result> res = HealthCheckHelper.invokeLiveness(context);
boolean up = res.stream().allMatch(r -> r.getState().equals(HealthCheck.State.UP));
Assertions.assertTrue(up, "liveness check");
// health-check readiness should be down
await().atMost(20, TimeUnit.SECONDS).untilAsserted(() -> {
Collection<HealthCheck.Result> res2 = HealthCheckHelper.invokeReadiness(context);
boolean down = res2.stream().allMatch(r -> r.getState().equals(HealthCheck.State.DOWN));
boolean containsAws2S3HealthCheck = res2.stream()
.anyMatch(result -> result.getCheck().getId().startsWith("consumer:test-health-it"));
Assertions.assertTrue(down, "liveness check");
Assertions.assertTrue(containsAws2S3HealthCheck, "aws2-s3 check");
});
}
}
| AWS2S3ConsumerHealthCheckStaticCredsIT |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/BaseConnectorClientConfigOverridePolicyTest.java | {
"start": 1143,
"end": 2446
} | class ____ {
protected abstract ConnectorClientConfigOverridePolicy policyToTest();
protected void testValidOverride(Map<String, Object> clientConfig) {
List<ConfigValue> configValues = configValues(clientConfig);
assertNoError(configValues);
}
protected void testInvalidOverride(Map<String, Object> clientConfig) {
List<ConfigValue> configValues = configValues(clientConfig);
assertError(configValues);
}
private List<ConfigValue> configValues(Map<String, Object> clientConfig) {
ConnectorClientConfigRequest connectorClientConfigRequest = new ConnectorClientConfigRequest(
"test",
ConnectorType.SOURCE,
SampleSourceConnector.class,
clientConfig,
ConnectorClientConfigRequest.ClientType.PRODUCER);
return policyToTest().validate(connectorClientConfigRequest);
}
protected void assertNoError(List<ConfigValue> configValues) {
assertTrue(configValues.stream().allMatch(configValue -> configValue.errorMessages().isEmpty()));
}
protected void assertError(List<ConfigValue> configValues) {
assertTrue(configValues.stream().anyMatch(configValue -> !configValue.errorMessages().isEmpty()));
}
}
| BaseConnectorClientConfigOverridePolicyTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java | {
"start": 2341,
"end": 5252
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(FederationUtil.class);
private FederationUtil() {
// Utility Class
}
/**
* Get a JMX data from a web endpoint.
*
* @param beanQuery JMX bean.
* @param webAddress Web address of the JMX endpoint.
* @param connectionFactory to open http/https connection.
* @param scheme to use for URL connection.
* @return JSON with the JMX data
*/
public static JSONArray getJmx(String beanQuery, String webAddress,
URLConnectionFactory connectionFactory, String scheme) {
JSONArray ret = null;
BufferedReader reader = null;
try {
String host = webAddress;
int port = -1;
if (webAddress.indexOf(":") > 0) {
String[] webAddressSplit = webAddress.split(":");
host = webAddressSplit[0];
port = Integer.parseInt(webAddressSplit[1]);
}
URL jmxURL = new URL(scheme, host, port, "/jmx?qry=" + beanQuery);
LOG.debug("JMX URL: {}", jmxURL);
// Create a URL connection
URLConnection conn = connectionFactory.openConnection(
jmxURL, UserGroupInformation.isSecurityEnabled());
conn.setConnectTimeout(5 * 1000);
conn.setReadTimeout(5 * 1000);
InputStream in = conn.getInputStream();
InputStreamReader isr = new InputStreamReader(in, "UTF-8");
reader = new BufferedReader(isr);
StringBuilder sb = new StringBuilder();
String line = null;
while ((line = reader.readLine()) != null) {
sb.append(line);
}
String jmxOutput = sb.toString();
// Parse JSON
JSONObject json = new JSONObject(jmxOutput);
ret = json.getJSONArray("beans");
} catch (IOException e) {
LOG.error("Cannot read JMX bean {} from server {}",
beanQuery, webAddress, e);
} catch (JSONException e) {
// We shouldn't need more details if the JSON parsing fails.
LOG.error("Cannot parse JMX output for {} from server {}: {}",
beanQuery, webAddress, e.getMessage());
} catch (Exception e) {
LOG.error("Cannot parse JMX output for {} from server {}",
beanQuery, webAddress, e);
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException e) {
LOG.error("Problem closing {}", webAddress, e);
}
}
}
return ret;
}
/**
* Fetch the Hadoop version string for this jar.
*
* @return Hadoop version string, e.g., 3.0.1.
*/
public static String getVersion() {
return VersionInfo.getVersion();
}
/**
* Fetch the build/compile information for this jar.
*
* @return String Compilation info.
*/
public static String getCompileInfo() {
return VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from "
+ VersionInfo.getBranch();
}
/**
* Create an instance of an | FederationUtil |
java | elastic__elasticsearch | plugins/repository-hdfs/src/yamlRestTest/java/org/elasticsearch/repositories/hdfs/SecureRepositoryHdfsClientYamlTestSuiteIT.java | {
"start": 1554,
"end": 3034
} | class ____ extends ESClientYamlSuiteTestCase {
public static Krb5kDcContainer krb5Fixture = new Krb5kDcContainer();
public static HdfsFixture hdfsFixture = new HdfsFixture().withKerberos(() -> krb5Fixture.getPrincipal(), () -> krb5Fixture.getKeytab());
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.distribution(DistributionType.DEFAULT)
.plugin("repository-hdfs")
.setting("xpack.license.self_generated.type", "trial")
.setting("xpack.security.enabled", "false")
.systemProperty("java.security.krb5.conf", () -> krb5Fixture.getConfPath().toString())
.configFile("repository-hdfs/krb5.conf", Resource.fromString(() -> krb5Fixture.getConf()))
.configFile("repository-hdfs/krb5.keytab", Resource.fromFile(() -> krb5Fixture.getEsKeytab()))
.build();
@ClassRule
public static TestRule ruleChain = RuleChain.outerRule(krb5Fixture).around(hdfsFixture).around(cluster);
public SecureRepositoryHdfsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
super(testCandidate);
}
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws Exception {
return createParameters(Map.of("secure_hdfs_port", hdfsFixture.getPort()), "secure_hdfs_repository");
}
}
| SecureRepositoryHdfsClientYamlTestSuiteIT |
java | apache__flink | flink-core-api/src/main/java/org/apache/flink/api/common/state/MapState.java | {
"start": 974,
"end": 1808
} | interface ____ partitioned key-value state. The key-value pair can be added, updated
* and retrieved.
*
* <p>The state is accessed and modified by user functions, and checkpointed consistently by the
* system as part of the distributed snapshots.
*
* <p>The state is only accessible by functions applied on a {@code KeyedStream}. The key is
* automatically supplied by the system, so the function always sees the value mapped to the key of
* the current element. That way, the system can handle stream and state partitioning consistently
* together.
*
* <p>The user value could be null, but change log state backend is not compatible with the user
* value is null, see FLINK-38144 for more details.
*
* @param <UK> Type of the keys in the state.
* @param <UV> Type of the values in the state.
*/
@PublicEvolving
public | for |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/handler/HandlerMappingIntrospectorTests.java | {
"start": 14265,
"end": 14971
} | class ____ {
@Bean
public RouterFunctionMapping routerFunctionMapping() {
RouterFunctionMapping mapping = new RouterFunctionMapping();
mapping.setOrder(1);
return mapping;
}
@Bean
public RequestMappingHandlerMapping handlerMapping() {
RequestMappingHandlerMapping mapping = new RequestMappingHandlerMapping();
mapping.setOrder(2);
return mapping;
}
@Bean
public TestController testController() {
return new TestController();
}
@Bean
public RouterFunction<?> routerFunction() {
return RouterFunctions.route().GET("/fn-path", request -> ServerResponse.ok().build()).build();
}
}
@CrossOrigin("http://localhost:9000")
@Controller
private static | TestConfig |
java | spring-projects__spring-boot | module/spring-boot-zipkin/src/test/java/org/springframework/boot/zipkin/autoconfigure/ZipkinHttpSenderTests.java | {
"start": 1226,
"end": 1808
} | class ____ {
protected BytesMessageSender sender;
abstract BytesMessageSender createSender();
@BeforeEach
void beforeEach() {
this.sender = createSender();
}
@AfterEach
void afterEach() throws IOException {
this.sender.close();
}
@Test
void sendShouldThrowIfCloseWasCalled() throws IOException {
this.sender.close();
assertThatExceptionOfType(ClosedSenderException.class)
.isThrownBy(() -> this.sender.send(Collections.emptyList()));
}
protected byte[] toByteArray(String input) {
return input.getBytes(StandardCharsets.UTF_8);
}
}
| ZipkinHttpSenderTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/metadata/SubThing.java | {
"start": 244,
"end": 405
} | class ____ extends Thing {
private String blah;
public String getBlah() {
return blah;
}
public void setBlah(String blah) {
this.blah = blah;
}
}
| SubThing |
java | spring-projects__spring-boot | build-plugin/spring-boot-maven-plugin/src/main/java/org/springframework/boot/maven/Docker.java | {
"start": 7111,
"end": 9210
} | class ____ {
private @Nullable String username;
private @Nullable String password;
private @Nullable String url;
private @Nullable String email;
private @Nullable String token;
public DockerRegistry() {
}
public DockerRegistry(@Nullable String username, @Nullable String password, @Nullable String url,
@Nullable String email) {
this.username = username;
this.password = password;
this.url = url;
this.email = email;
}
public DockerRegistry(String token) {
this.token = token;
}
/**
* The username that will be used for user authentication to the registry.
* @return the username
*/
public @Nullable String getUsername() {
return this.username;
}
void setUsername(@Nullable String username) {
this.username = username;
}
/**
* The password that will be used for user authentication to the registry.
* @return the password
*/
public @Nullable String getPassword() {
return this.password;
}
void setPassword(@Nullable String password) {
this.password = password;
}
/**
* The email address that will be used for user authentication to the registry.
* @return the email address
*/
public @Nullable String getEmail() {
return this.email;
}
void setEmail(@Nullable String email) {
this.email = email;
}
/**
* The URL of the registry.
* @return the registry URL
*/
@Nullable String getUrl() {
return this.url;
}
void setUrl(@Nullable String url) {
this.url = url;
}
/**
* The token that will be used for token authentication to the registry.
* @return the authentication token
*/
public @Nullable String getToken() {
return this.token;
}
void setToken(@Nullable String token) {
this.token = token;
}
boolean isEmpty() {
return this.username == null && this.password == null && this.url == null && this.email == null
&& this.token == null;
}
boolean hasTokenAuth() {
return this.token != null;
}
boolean hasUserAuth() {
return this.username != null && this.password != null;
}
}
}
| DockerRegistry |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.