language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-boot | core/spring-boot-test/src/test/java/org/springframework/boot/test/system/OutputCaptureTests.java | {
"start": 5666,
"end": 5866
} | class ____ extends PrintStream {
TestPrintStream() {
super(new ByteArrayOutputStream());
}
@Override
public String toString() {
return this.out.toString();
}
}
static | TestPrintStream |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/appender/SocketAppenderTest.java | {
"start": 10768,
"end": 12659
} | class ____ extends Thread {
private final DatagramSocket sock;
private boolean shutdown = false;
private Thread thread;
private final CountDownLatch latch = new CountDownLatch(1);
private volatile int count = 0;
private final BlockingQueue<LogEvent> queue;
private final ObjectMapper objectMapper = new Log4jJsonObjectMapper();
public UdpSocketTestServer() throws IOException {
this.sock = new DatagramSocket(PORT);
this.queue = new ArrayBlockingQueue<>(10);
}
public void reset() {
queue.clear();
count = 0;
}
public void shutdown() {
this.shutdown = true;
thread.interrupt();
try {
thread.join(100);
} catch (final InterruptedException ie) {
System.out.println("Unable to stop server");
}
}
@Override
public void run() {
this.thread = Thread.currentThread();
final byte[] bytes = new byte[4096];
final DatagramPacket packet = new DatagramPacket(bytes, bytes.length);
try {
while (!shutdown) {
latch.countDown();
sock.receive(packet);
++count;
final LogEvent event = objectMapper.readValue(packet.getData(), Log4jLogEvent.class);
queue.add(event);
}
} catch (final Throwable e) {
e.printStackTrace();
if (!shutdown) {
Throwables.rethrow(e);
}
}
}
public int getCount() {
return count;
}
public BlockingQueue<LogEvent> getQueue() {
return queue;
}
}
public static | UdpSocketTestServer |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/operators/AbstractUdfOperator.java | {
"start": 2478,
"end": 4787
} | class ____ the user
* code.
*
* @return The object with the user function for this operator.
* @see org.apache.flink.api.common.operators.Operator#getUserCodeWrapper()
*/
@Override
public UserCodeWrapper<FT> getUserCodeWrapper() {
return userFunction;
}
// --------------------------------------------------------------------------------------------
/**
* Returns the input, or null, if none is set.
*
* @return The broadcast input root operator.
*/
public Map<String, Operator<?>> getBroadcastInputs() {
return this.broadcastInputs;
}
/**
* Binds the result produced by a plan rooted at {@code root} to a variable used by the UDF
* wrapped in this operator.
*
* @param root The root of the plan producing this input.
*/
public void setBroadcastVariable(String name, Operator<?> root) {
if (name == null) {
throw new IllegalArgumentException("The broadcast input name may not be null.");
}
if (root == null) {
throw new IllegalArgumentException(
"The broadcast input root operator may not be null.");
}
this.broadcastInputs.put(name, root);
}
/**
* Clears all previous broadcast inputs and binds the given inputs as broadcast variables of
* this operator.
*
* @param inputs The {@code<name, root>} pairs to be set as broadcast inputs.
*/
public <T> void setBroadcastVariables(Map<String, Operator<T>> inputs) {
this.broadcastInputs.clear();
this.broadcastInputs.putAll(inputs);
}
// --------------------------------------------------------------------------------------------
/**
* Gets the number of inputs for this operator.
*
* @return The number of inputs for this operator.
*/
public abstract int getNumberOfInputs();
/**
* Gets the column numbers of the key fields in the input records for the given input.
*
* @return The column numbers of the key fields.
*/
public abstract int[] getKeyColumns(int inputNum);
// --------------------------------------------------------------------------------------------
/**
* Generic utility function that wraps a single | for |
java | quarkusio__quarkus | test-framework/junit5/src/main/java/io/quarkus/test/junit/classloading/FacadeClassLoader.java | {
"start": 12027,
"end": 16317
} | class ____ found exceptions
// as the servoce loader tries to instantiate things in a nobbled loader. Instead, do it in a crude, safe, way by looking for the resource files and reading them.
try {
Enumeration<URL> declaredExtensions = annotationLoader
.getResources("META-INF/services/org.junit.jupiter.api.extension.Extension");
while (declaredExtensions.hasMoreElements()) {
URL url = declaredExtensions.nextElement();
try (InputStream in = url.openStream()) {
String contents = new String(in.readAllBytes(), StandardCharsets.UTF_8).trim();
if (QuarkusTestExtension.class.getName()
.equals(contents)) {
isServiceLoaderMechanism = true;
}
}
}
} catch (IOException e) {
log.debug("Could not check service loader registrations: " + e);
throw new RuntimeException(e);
}
if (profileNames != null) {
this.profiles = new HashMap<>();
profileNames.forEach((k, profileName) -> {
Class<?> profile;
if (profileName != null) {
try {
profile = peekingClassLoader.loadClass(profileName);
} catch (ClassNotFoundException e1) {
throw new RuntimeException(e1);
}
this.profiles.put(k, profile);
}
});
} else {
// We set it to null so we know not to look in it
this.profiles = null;
}
// In the case where a QuarkusMainTest is present, but not a QuarkusTest, tests will be loaded and run using
// the parent classloader. In continuous testing mode, that will be a Quarkus classloader and it will not have
// the test config on it, so get that classloader test-ready.
// It would be nice to do this without the guard, but doing this on the normal path causes us to write something we don't want
if (isContinuousTesting) {
try {
initialiseTestConfig(parent);
} catch (ClassNotFoundException | InvocationTargetException | NoSuchMethodException | InstantiationException
| IllegalAccessException e) {
throw new RuntimeException(e);
}
}
facadeClassLoaderProviders = new ArrayList<>();
ServiceLoader<FacadeClassLoaderProvider> loader = ServiceLoader.load(FacadeClassLoaderProvider.class,
FacadeClassLoader.class.getClassLoader());
loader.forEach(facadeClassLoaderProviders::add);
}
@Override
public Class<?> loadClass(String name) throws ClassNotFoundException {
log.debugf("Facade classloader loading %s", name);
if (peekingClassLoader == null) {
throw new RuntimeException("Attempted to load classes with a closed classloader: " + this);
}
boolean isQuarkusTest = false;
boolean isIntegrationTest = false;
Class<?> inspectionClass = null;
// If the service loader mechanism is being used, QuarkusTestExtension gets loaded before any extensions which use it. We need to make sure it's on the right classloader.
if (isServiceLoaderMechanism && (name.equals(QuarkusTestExtension.class.getName()))) {
try {
// We don't have enough information to make a runtime classloader yet, but we can make a curated application and a base classloader
QuarkusClassLoader runtimeClassLoader = getOrCreateBaseClassLoader(getProfileKey(null), null);
return runtimeClassLoader.loadClass(name);
} catch (AppModelResolverException | BootstrapException | IOException e) {
throw new RuntimeException(e);
}
}
try {
Class<?> profile = null;
if (isContinuousTesting && !isServiceLoaderMechanism) {
isQuarkusTest = quarkusTestClasses.contains(name);
profile = profiles.get(name);
// In continuous testing, only load an inspection version of the | not |
java | apache__camel | components/camel-openstack/src/main/java/org/apache/camel/component/openstack/glance/GlanceEndpoint.java | {
"start": 1609,
"end": 4137
} | class ____ extends AbstractOpenstackEndpoint {
@UriPath
@Metadata(required = true)
private String host;
@UriParam(defaultValue = "default")
private String domain = "default";
@UriParam
@Metadata(required = true)
private String project;
@UriParam
private String operation;
@UriParam
@Metadata(required = true, secret = true)
private String username;
@UriParam
@Metadata(required = true, secret = true)
private String password;
@UriParam
private Config config;
@UriParam(defaultValue = V3, enums = "V2,V3")
private String apiVersion = V3;
public GlanceEndpoint(String uri, GlanceComponent component) {
super(uri, component);
}
@Override
public Producer createProducer() throws Exception {
return new GlanceProducer(this, createClient());
}
@Override
public String getDomain() {
return domain;
}
/**
* Authentication domain
*/
public void setDomain(String domain) {
this.domain = domain;
}
@Override
public String getProject() {
return project;
}
/**
* The project ID
*/
public void setProject(String project) {
this.project = project;
}
@Override
public String getOperation() {
return operation;
}
/**
* The operation to do
*/
public void setOperation(String operation) {
this.operation = operation;
}
@Override
public String getUsername() {
return username;
}
/**
* OpenStack username
*/
public void setUsername(String username) {
this.username = username;
}
@Override
public String getPassword() {
return password;
}
/**
* OpenStack password
*/
public void setPassword(String password) {
this.password = password;
}
@Override
public String getHost() {
return host;
}
/**
* OpenStack host url
*/
public void setHost(String host) {
this.host = host;
}
@Override
public Config getConfig() {
return config;
}
/**
* OpenStack configuration
*/
public void setConfig(Config config) {
this.config = config;
}
@Override
public String getApiVersion() {
return apiVersion;
}
/**
* OpenStack API version
*/
public void setApiVersion(String apiVersion) {
this.apiVersion = apiVersion;
}
}
| GlanceEndpoint |
java | spring-projects__spring-boot | module/spring-boot-restclient/src/test/java/org/springframework/boot/restclient/RestTemplateBuilderTests.java | {
"start": 21953,
"end": 22016
} | class ____ extends RestTemplate {
}
static | RestTemplateSubclass |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/test/AlibTest.java | {
"start": 150,
"end": 681
} | class ____ extends TestCase {
protected DruidDataSource dataSource;
protected void setUp() throws Exception {
dataSource = new DruidDataSource();
dataSource.setUrl("jdbc:mysql://127.0.0.1:8507");
dataSource.setUsername("root");
dataSource.setPassword("root");
}
protected void tearDown() throws Exception {
dataSource.close();
}
public void test_for_alib() throws Exception {
Connection conn = dataSource.getConnection();
conn.close();
}
}
| AlibTest |
java | alibaba__nacos | naming/src/main/java/com/alibaba/nacos/naming/controllers/v3/ClientControllerV3.java | {
"start": 2309,
"end": 6335
} | class ____ {
private final ClientManager clientManager;
private final ClientService clientServiceV2Impl;
public ClientControllerV3(ClientManager clientManager, ClientService clientServiceV2Impl) {
this.clientManager = clientManager;
this.clientServiceV2Impl = clientServiceV2Impl;
}
/**
* Query all clients.
*/
@GetMapping("/list")
@Secured(action = ActionTypes.READ, apiType = ApiType.ADMIN_API)
public Result<List<String>> getClientList() {
return Result.success(clientServiceV2Impl.getClientList());
}
/**
* Query client by clientId.
*/
@GetMapping()
@Secured(action = ActionTypes.READ, apiType = ApiType.ADMIN_API)
public Result<ClientSummaryInfo> getClientDetail(@RequestParam("clientId") String clientId)
throws NacosApiException {
checkClientId(clientId);
return Result.success(clientServiceV2Impl.getClientDetail(clientId));
}
/**
* Query the services registered by the specified client.
*/
@GetMapping("/publish/list")
@Secured(action = ActionTypes.READ, apiType = ApiType.ADMIN_API)
public Result<List<ClientServiceInfo>> getPublishedServiceList(@RequestParam("clientId") String clientId)
throws NacosApiException {
checkClientId(clientId);
return Result.success(clientServiceV2Impl.getPublishedServiceList(clientId));
}
/**
* Query the services to which the specified client subscribes.
*/
@GetMapping("/subscribe/list")
@Secured(action = ActionTypes.READ, apiType = ApiType.ADMIN_API)
public Result<List<ClientServiceInfo>> getSubscribeServiceList(@RequestParam("clientId") String clientId)
throws NacosApiException {
checkClientId(clientId);
return Result.success(clientServiceV2Impl.getSubscribeServiceList(clientId));
}
/**
* Query the clients that have registered the specified service.
*/
@GetMapping("/service/publisher/list")
@Secured(action = ActionTypes.READ, apiType = ApiType.ADMIN_API)
public Result<List<ClientPublisherInfo>> getPublishedClientList(ClientServiceForm clientServiceForm)
throws NacosApiException {
clientServiceForm.validate();
return Result.success(clientServiceV2Impl.getPublishedClientList(clientServiceForm.getNamespaceId(),
clientServiceForm.getGroupName(), clientServiceForm.getServiceName(), clientServiceForm.getIp(),
clientServiceForm.getPort()));
}
/**
* Query the clients that are subscribed to the specified service.
*/
@GetMapping("/service/subscriber/list")
@Secured(action = ActionTypes.READ, apiType = ApiType.ADMIN_API)
public Result<List<ClientSubscriberInfo>> getSubscribeClientList(ClientServiceForm clientServiceForm)
throws NacosApiException {
clientServiceForm.validate();
return Result.success(clientServiceV2Impl.getSubscribeClientList(clientServiceForm.getNamespaceId(),
clientServiceForm.getGroupName(), clientServiceForm.getServiceName(), clientServiceForm.getIp(),
clientServiceForm.getPort()));
}
/**
* Query the responsible server for a given client based on its IP and port.
*/
@GetMapping("/distro")
@Secured(resource = UtilsAndCommons.CLIENT_CONTROLLER_V3_ADMIN_PATH, action = ActionTypes.READ, apiType = ApiType.ADMIN_API)
public Result<ObjectNode> getResponsibleServer4Client(@RequestParam String ip, @RequestParam String port) {
return Result.success(clientServiceV2Impl.getResponsibleServer4Client(ip, port));
}
private void checkClientId(String clientId) throws NacosApiException {
if (!clientManager.contains(clientId)) {
throw new NacosApiException(HttpStatus.NOT_FOUND.value(), ErrorCode.RESOURCE_NOT_FOUND,
"clientId [ " + clientId + " ] not exist");
}
}
} | ClientControllerV3 |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReceivedDeletedBlocks.java | {
"start": 963,
"end": 1981
} | class ____ {
final DatanodeStorage storage;
private final ReceivedDeletedBlockInfo[] blocks;
/**
* @deprecated Use {@link #getStorage()} instead
*/
@Deprecated
public String getStorageID() {
return storage.getStorageID();
}
public DatanodeStorage getStorage() {
return storage;
}
public ReceivedDeletedBlockInfo[] getBlocks() {
return blocks;
}
/**
* @deprecated Use {@link #StorageReceivedDeletedBlocks(
* DatanodeStorage, ReceivedDeletedBlockInfo[])} instead
*/
@Deprecated
public StorageReceivedDeletedBlocks(final String storageID,
final ReceivedDeletedBlockInfo[] blocks) {
this.storage = new DatanodeStorage(storageID);
this.blocks = blocks;
}
public StorageReceivedDeletedBlocks(final DatanodeStorage storage,
final ReceivedDeletedBlockInfo[] blocks) {
this.storage = storage;
this.blocks = blocks;
}
@Override
public String toString() {
return storage + Arrays.toString(blocks);
}
}
| StorageReceivedDeletedBlocks |
java | netty__netty | transport/src/main/java/io/netty/channel/pool/FixedChannelPool.java | {
"start": 17808,
"end": 18686
} | class ____ implements Runnable {
@Override
public final void run() {
assert executor.inEventLoop();
long nanoTime = System.nanoTime();
for (;;) {
AcquireTask task = pendingAcquireQueue.peek();
// Compare nanoTime as descripted in the javadocs of System.nanoTime()
//
// See https://docs.oracle.com/javase/7/docs/api/java/lang/System.html#nanoTime()
// See https://github.com/netty/netty/issues/3705
if (task == null || nanoTime - task.expireNanoTime < 0) {
break;
}
pendingAcquireQueue.remove();
--pendingAcquireCount;
onTimeout(task);
}
}
public abstract void onTimeout(AcquireTask task);
}
private | TimeoutTask |
java | elastic__elasticsearch | test/external-modules/error-query/src/test/java/org/elasticsearch/test/errorquery/ErrorQueryBuilderTests.java | {
"start": 950,
"end": 2639
} | class ____ extends AbstractQueryTestCase<ErrorQueryBuilder> {
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return Arrays.asList(ErrorQueryPlugin.class);
}
@Override
protected ErrorQueryBuilder doCreateTestQueryBuilder() {
int numIndex = randomIntBetween(0, 5);
List<IndexError> indices = new ArrayList<>();
for (int i = 0; i < numIndex; i++) {
String indexName = randomAlphaOfLengthBetween(5, 30);
int numShards = randomIntBetween(0, 3);
int[] shardIds = numShards > 0 ? new int[numShards] : null;
for (int j = 0; j < numShards; j++) {
shardIds[j] = j;
}
String message = randomBoolean() ? "" : randomAlphaOfLengthBetween(5, 100);
indices.add(new IndexError(indexName, shardIds, randomFrom(IndexError.ERROR_TYPE.values()), message, 0));
}
return new ErrorQueryBuilder(indices);
}
@Override
protected void doAssertLuceneQuery(ErrorQueryBuilder queryBuilder, Query query, SearchExecutionContext context) throws IOException {
assertEquals(new MatchAllDocsQuery(), query);
}
@Override
public void testCacheability() throws IOException {
ErrorQueryBuilder queryBuilder = createTestQueryBuilder();
SearchExecutionContext context = createSearchExecutionContext();
QueryBuilder rewriteQuery = rewriteQuery(queryBuilder, new SearchExecutionContext(context));
assertNotNull(rewriteQuery.toQuery(context));
assertFalse("query should not be cacheable: " + queryBuilder.toString(), context.isCacheable());
}
}
| ErrorQueryBuilderTests |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/InternalRequestSignatureTest.java | {
"start": 2224,
"end": 7110
} | class ____ {
private static final byte[] REQUEST_BODY =
"[{\"config\":\"value\"},{\"config\":\"other_value\"}]".getBytes();
private static final String SIGNATURE_ALGORITHM = "HmacSHA256";
private static final SecretKey KEY = new SecretKeySpec(
new byte[] {
109, 116, -111, 49, -94, 25, -103, 44, -99, -118, 53, -69, 87, -124, 5, 48,
89, -105, -2, 58, -92, 87, 67, 49, -125, -79, -39, -126, -51, -53, -85, 57
}, "HmacSHA256"
);
private static final byte[] SIGNATURE = new byte[] {
42, -3, 127, 57, 43, 49, -51, -43, 72, -62, -10, 120, 123, 125, 26, -65,
36, 72, 86, -71, -32, 13, -8, 115, 85, 73, -65, -112, 6, 68, 41, -50
};
private static final String ENCODED_SIGNATURE = Base64.getEncoder().encodeToString(SIGNATURE);
private final Crypto crypto = Crypto.SYSTEM;
@Test
public void fromHeadersShouldReturnNullOnNullHeaders() {
assertNull(InternalRequestSignature.fromHeaders(crypto, REQUEST_BODY, null));
}
@Test
public void fromHeadersShouldReturnNullIfSignatureHeaderMissing() {
assertNull(InternalRequestSignature.fromHeaders(crypto, REQUEST_BODY, internalRequestHeaders(null, SIGNATURE_ALGORITHM)));
}
@Test
public void fromHeadersShouldReturnNullIfSignatureAlgorithmHeaderMissing() {
assertNull(InternalRequestSignature.fromHeaders(crypto, REQUEST_BODY, internalRequestHeaders(ENCODED_SIGNATURE, null)));
}
@Test
public void fromHeadersShouldThrowExceptionOnInvalidSignatureAlgorithm() {
assertThrows(BadRequestException.class, () -> InternalRequestSignature.fromHeaders(crypto, REQUEST_BODY,
internalRequestHeaders(ENCODED_SIGNATURE, "doesn'texist")));
}
@Test
public void fromHeadersShouldThrowExceptionOnInvalidBase64Signature() {
assertThrows(BadRequestException.class, () -> InternalRequestSignature.fromHeaders(crypto, REQUEST_BODY,
internalRequestHeaders("not valid base 64", SIGNATURE_ALGORITHM)));
}
@Test
public void fromHeadersShouldReturnNonNullResultOnValidSignatureAndSignatureAlgorithm() {
InternalRequestSignature signature =
InternalRequestSignature.fromHeaders(crypto, REQUEST_BODY, internalRequestHeaders(ENCODED_SIGNATURE, SIGNATURE_ALGORITHM));
assertNotNull(signature);
assertNotNull(signature.keyAlgorithm());
}
@Test
public void addToRequestShouldThrowExceptionOnInvalidSignatureAlgorithm() throws NoSuchAlgorithmException {
Request request = mock(Request.class);
Crypto crypto = mock(Crypto.class);
when(crypto.mac(anyString())).thenThrow(new NoSuchAlgorithmException("doesn'texist"));
assertThrows(ConnectException.class, () -> InternalRequestSignature.addToRequest(crypto, KEY, REQUEST_BODY, "doesn'texist", request));
}
@Test
public void addToRequestShouldAddHeadersOnValidSignatureAlgorithm() {
HttpClient httpClient = new HttpClient();
Request request = httpClient.newRequest(URI.create("http://localhost"));
InternalRequestSignature.addToRequest(crypto, KEY, REQUEST_BODY, SIGNATURE_ALGORITHM, request);
assertEquals(ENCODED_SIGNATURE,
request.getHeaders().get(InternalRequestSignature.SIGNATURE_HEADER),
"Request should have valid base 64-encoded signature added as header");
assertEquals(SIGNATURE_ALGORITHM,
request.getHeaders().get(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER),
"Request should have provided signature algorithm added as header");
}
@Test
public void testSignatureValidation() throws Exception {
Mac mac = Mac.getInstance(SIGNATURE_ALGORITHM);
InternalRequestSignature signature = new InternalRequestSignature(REQUEST_BODY, mac, SIGNATURE);
assertTrue(signature.isValid(KEY));
signature = InternalRequestSignature.fromHeaders(crypto, REQUEST_BODY, internalRequestHeaders(ENCODED_SIGNATURE, SIGNATURE_ALGORITHM));
assertTrue(signature.isValid(KEY));
signature = new InternalRequestSignature("[{\"different_config\":\"different_value\"}]".getBytes(), mac, SIGNATURE);
assertFalse(signature.isValid(KEY));
signature = new InternalRequestSignature(REQUEST_BODY, mac, "bad signature".getBytes());
assertFalse(signature.isValid(KEY));
}
private static HttpHeaders internalRequestHeaders(String signature, String signatureAlgorithm) {
HttpHeaders result = mock(HttpHeaders.class);
when(result.getHeaderString(eq(InternalRequestSignature.SIGNATURE_HEADER)))
.thenReturn(signature);
when(result.getHeaderString(eq(InternalRequestSignature.SIGNATURE_ALGORITHM_HEADER)))
.thenReturn(signatureAlgorithm);
return result;
}
}
| InternalRequestSignatureTest |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/generic/GenericData.java | {
"start": 2696,
"end": 3430
} | class ____ {
private static final GenericData INSTANCE = new GenericData();
private static final Map<Class<?>, String> PRIMITIVE_DATUM_TYPES = new IdentityHashMap<>();
static {
PRIMITIVE_DATUM_TYPES.put(Integer.class, Type.INT.getName());
PRIMITIVE_DATUM_TYPES.put(Long.class, Type.LONG.getName());
PRIMITIVE_DATUM_TYPES.put(Float.class, Type.FLOAT.getName());
PRIMITIVE_DATUM_TYPES.put(Double.class, Type.DOUBLE.getName());
PRIMITIVE_DATUM_TYPES.put(Boolean.class, Type.BOOLEAN.getName());
PRIMITIVE_DATUM_TYPES.put(String.class, Type.STRING.getName());
PRIMITIVE_DATUM_TYPES.put(Utf8.class, Type.STRING.getName());
}
/** Used to specify the Java type for a string schema. */
public | GenericData |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/throwables/Throwables_assertHasMessageStartingWith_Test.java | {
"start": 1019,
"end": 2160
} | class ____ extends ThrowablesBaseTest {
@Test
void should_pass_if_actual_has_message_starting_with_expected_description() {
throwables.assertHasMessageStartingWith(INFO, actual, "Throwable");
}
@Test
void should_fail_if_actual_is_null() {
// WHEN
var error = expectAssertionError(() -> throwables.assertHasMessageStartingWith(INFO, null, "foo"));
// THEN
then(error).hasMessage(actualIsNull());
}
@Test
void should_fail_if_actual_has_message_not_starting_with_expected_description() {
// WHEN
expectAssertionError(() -> throwables.assertHasMessageStartingWith(INFO, actual, "expected start"));
// THEN
verify(failures).failure(INFO, shouldStartWith(actual.getMessage(), "expected start"));
}
@Test
void should_fail_if_actual_has_null_message() {
// GIVEN
Throwable actual = new Throwable((String) null);
// WHEN
expectAssertionError(() -> throwables.assertHasMessageStartingWith(INFO, actual, "expected start"));
// THEN
verify(failures).failure(INFO, shouldStartWith(actual.getMessage(), "expected start"));
}
}
| Throwables_assertHasMessageStartingWith_Test |
java | elastic__elasticsearch | x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/CartesianShapeValues.java | {
"start": 2429,
"end": 3607
} | class ____ extends ShapeValues.ShapeValue {
public CartesianShapeValue() {
super(CoordinateEncoder.CARTESIAN, CartesianPoint::new);
}
@SuppressWarnings("this-escape")
public CartesianShapeValue(StreamInput in) throws IOException {
this();
this.reset(in);
}
@Override
protected Component2D centroidAsComponent2D() throws IOException {
return XYGeometry.create(new XYPoint((float) getX(), (float) getY()));
}
/**
* Determine the {@link GeoRelation} between the current shape and a {@link XYGeometry}. It only supports
* simple geometries, therefore it will fail if the LatLonGeometry is a {@link org.apache.lucene.geo.Rectangle}
* that crosses the dateline.
* TODO: this is test only method, perhaps should be moved to test code
*/
public GeoRelation relate(XYGeometry geometry) throws IOException {
return relate(XYGeometry.create(geometry));
}
@Override
public String getWriteableName() {
return "CartesianShapeValue";
}
}
}
| CartesianShapeValue |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/objects/Objects_assertIsNotIn_with_array_Test.java | {
"start": 1583,
"end": 3260
} | class ____ extends ObjectsBaseTest {
private static String[] values;
@BeforeAll
static void setUpOnce() {
values = array("Yoda", "Leia");
}
@Test
void should_throw_error_if_array_is_null() {
Object[] array = null;
assertThatNullPointerException().isThrownBy(() -> objects.assertIsNotIn(someInfo(), "Yoda", array))
.withMessage(arrayIsNull());
}
@Test
void should_pass_if_given_array_is_empty() {
objects.assertIsNotIn(someInfo(), "Luke", emptyArray());
}
@Test
void should_pass_if_actual_is_in_not_array() {
objects.assertIsNotIn(someInfo(), "Luke", values);
}
@Test
void should_pass_if_actual_is_null_and_array_does_not_contain_null() {
objects.assertIsNotIn(someInfo(), null, values);
}
@Test
void should_fail_if_actual_is_not_in_array() {
// GIVEN
AssertionInfo info = someInfo();
// WHEN
expectAssertionError(() -> objects.assertIsNotIn(info, "Yoda", values));
// THEN
verify(failures).failure(info, shouldNotBeIn("Yoda", asList(values)));
}
@Test
void should_pass_if_actual_is_in_not_array_according_to_custom_comparison_strategy() {
objectsWithCustomComparisonStrategy.assertIsNotIn(someInfo(), "Luke", values);
}
@Test
void should_fail_if_actual_is_not_in_array_according_to_custom_comparison_strategy() {
// GIVEN
AssertionInfo info = someInfo();
// WHEN
expectAssertionError(() -> objectsWithCustomComparisonStrategy.assertIsNotIn(info, "YODA", values));
// THEN
verify(failures).failure(info, shouldNotBeIn("YODA", asList(values), customComparisonStrategy));
}
}
| Objects_assertIsNotIn_with_array_Test |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/queuemanagement/TestDeactivatedLeafQueuesByLabel.java | {
"start": 1037,
"end": 1802
} | class ____ {
@Test
public void testGetMaxLeavesToBeActivated() {
DeactivatedLeafQueuesByLabel d1 = spy(DeactivatedLeafQueuesByLabel.class);
d1.setAvailableCapacity(0.17f);
d1.setLeafQueueTemplateAbsoluteCapacity(0.03f);
assertEquals(1, d1.getMaxLeavesToBeActivated(1));
DeactivatedLeafQueuesByLabel d2 = spy(DeactivatedLeafQueuesByLabel.class);
d2.setAvailableCapacity(0.17f);
d2.setLeafQueueTemplateAbsoluteCapacity(0.03f);
assertEquals(5, d2.getMaxLeavesToBeActivated(7));
DeactivatedLeafQueuesByLabel d3 = spy(DeactivatedLeafQueuesByLabel.class);
d3.setAvailableCapacity(0f);
d3.setLeafQueueTemplateAbsoluteCapacity(0.03f);
assertEquals(0, d3.getMaxLeavesToBeActivated(10));
}
} | TestDeactivatedLeafQueuesByLabel |
java | quarkusio__quarkus | integration-tests/opentelemetry-grpc/src/main/java/io/quarkus/it/opentelemetry/grpc/ExporterResource.java | {
"start": 1052,
"end": 1248
} | class ____ {
@Produces
@Singleton
InMemorySpanExporter inMemorySpanExporter() {
return InMemorySpanExporter.create();
}
}
}
| InMemorySpanExporterProducer |
java | apache__camel | components/camel-bindy/src/main/java/org/apache/camel/dataformat/bindy/format/factories/LongFormatFactory.java | {
"start": 1652,
"end": 1974
} | class ____ extends AbstractNumberFormat<Long> {
@Override
public String format(Long object) throws Exception {
return object.toString();
}
@Override
public Long parse(String string) throws Exception {
return Long.valueOf(string);
}
}
}
| LongFormat |
java | spring-projects__spring-security | oauth2/oauth2-client/src/test/java/org/springframework/security/oauth2/client/AuthorizedClientServiceReactiveOAuth2AuthorizedClientManagerTests.java | {
"start": 2426,
"end": 28755
} | class ____ {
private ReactiveClientRegistrationRepository clientRegistrationRepository;
private ReactiveOAuth2AuthorizedClientService authorizedClientService;
private ReactiveOAuth2AuthorizedClientProvider authorizedClientProvider;
private Function<OAuth2AuthorizeRequest, Mono<Map<String, Object>>> contextAttributesMapper;
private AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager authorizedClientManager;
private ClientRegistration clientRegistration;
private Authentication principal;
private OAuth2AuthorizedClient authorizedClient;
private ArgumentCaptor<OAuth2AuthorizationContext> authorizationContextCaptor;
private PublisherProbe<Void> saveAuthorizedClientProbe;
private PublisherProbe<Void> removeAuthorizedClientProbe;
@SuppressWarnings("unchecked")
@BeforeEach
public void setup() {
this.clientRegistrationRepository = mock(ReactiveClientRegistrationRepository.class);
this.authorizedClientService = mock(ReactiveOAuth2AuthorizedClientService.class);
this.saveAuthorizedClientProbe = PublisherProbe.empty();
given(this.authorizedClientService.saveAuthorizedClient(any(), any()))
.willReturn(this.saveAuthorizedClientProbe.mono());
this.removeAuthorizedClientProbe = PublisherProbe.empty();
given(this.authorizedClientService.removeAuthorizedClient(any(), any()))
.willReturn(this.removeAuthorizedClientProbe.mono());
this.authorizedClientProvider = mock(ReactiveOAuth2AuthorizedClientProvider.class);
this.contextAttributesMapper = mock(Function.class);
given(this.contextAttributesMapper.apply(any())).willReturn(Mono.empty());
this.authorizedClientManager = new AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager(
this.clientRegistrationRepository, this.authorizedClientService);
this.authorizedClientManager.setAuthorizedClientProvider(this.authorizedClientProvider);
this.authorizedClientManager.setContextAttributesMapper(this.contextAttributesMapper);
this.clientRegistration = TestClientRegistrations.clientRegistration().build();
this.principal = new TestingAuthenticationToken("principal", "password");
this.authorizedClient = new OAuth2AuthorizedClient(this.clientRegistration, this.principal.getName(),
TestOAuth2AccessTokens.scopes("read", "write"), TestOAuth2RefreshTokens.refreshToken());
this.authorizationContextCaptor = ArgumentCaptor.forClass(OAuth2AuthorizationContext.class);
}
@Test
public void constructorWhenClientRegistrationRepositoryIsNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager(null,
this.authorizedClientService))
.withMessage("clientRegistrationRepository cannot be null");
}
@Test
public void constructorWhenOAuth2AuthorizedClientServiceIsNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager(
this.clientRegistrationRepository, null))
.withMessage("authorizedClientService cannot be null");
}
@Test
public void setAuthorizedClientProviderWhenNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> this.authorizedClientManager.setAuthorizedClientProvider(null))
.withMessage("authorizedClientProvider cannot be null");
}
@Test
public void setContextAttributesMapperWhenNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> this.authorizedClientManager.setContextAttributesMapper(null))
.withMessage("contextAttributesMapper cannot be null");
}
@Test
public void setAuthorizationSuccessHandlerWhenNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> this.authorizedClientManager.setAuthorizationSuccessHandler(null))
.withMessage("authorizationSuccessHandler cannot be null");
}
@Test
public void setAuthorizationFailureHandlerWhenNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> this.authorizedClientManager.setAuthorizationFailureHandler(null))
.withMessage("authorizationFailureHandler cannot be null");
}
@Test
public void authorizeWhenRequestIsNullThenThrowIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> this.authorizedClientManager.authorize(null))
.withMessage("authorizeRequest cannot be null");
}
@Test
public void authorizeWhenClientRegistrationNotFoundThenThrowIllegalArgumentException() {
String clientRegistrationId = "invalid-registration-id";
OAuth2AuthorizeRequest authorizeRequest = OAuth2AuthorizeRequest.withClientRegistrationId(clientRegistrationId)
.principal(this.principal)
.build();
given(this.clientRegistrationRepository.findByRegistrationId(clientRegistrationId)).willReturn(Mono.empty());
StepVerifier.create(this.authorizedClientManager.authorize(authorizeRequest))
.verifyError(IllegalArgumentException.class);
}
@SuppressWarnings("unchecked")
@Test
public void authorizeWhenNotAuthorizedAndUnsupportedProviderThenNotAuthorized() {
given(this.clientRegistrationRepository.findByRegistrationId(eq(this.clientRegistration.getRegistrationId())))
.willReturn(Mono.just(this.clientRegistration));
given(this.authorizedClientService.loadAuthorizedClient(any(), any())).willReturn(Mono.empty());
given(this.authorizedClientProvider.authorize(any())).willReturn(Mono.empty());
// @formatter:off
OAuth2AuthorizeRequest authorizeRequest = OAuth2AuthorizeRequest
.withClientRegistrationId(this.clientRegistration.getRegistrationId())
.principal(this.principal)
.build();
// @formatter:on
Mono<OAuth2AuthorizedClient> authorizedClient = this.authorizedClientManager.authorize(authorizeRequest);
StepVerifier.create(authorizedClient).verifyComplete();
verify(this.authorizedClientProvider).authorize(this.authorizationContextCaptor.capture());
verify(this.contextAttributesMapper).apply(eq(authorizeRequest));
OAuth2AuthorizationContext authorizationContext = this.authorizationContextCaptor.getValue();
assertThat(authorizationContext.getClientRegistration()).isEqualTo(this.clientRegistration);
assertThat(authorizationContext.getAuthorizedClient()).isNull();
assertThat(authorizationContext.getPrincipal()).isEqualTo(this.principal);
verify(this.authorizedClientService, never()).saveAuthorizedClient(any(OAuth2AuthorizedClient.class),
eq(this.principal));
}
@SuppressWarnings("unchecked")
@Test
public void authorizeWhenNotAuthorizedAndSupportedProviderThenAuthorized() {
given(this.clientRegistrationRepository.findByRegistrationId(eq(this.clientRegistration.getRegistrationId())))
.willReturn(Mono.just(this.clientRegistration));
given(this.authorizedClientService.loadAuthorizedClient(any(), any())).willReturn(Mono.empty());
given(this.authorizedClientProvider.authorize(any(OAuth2AuthorizationContext.class)))
.willReturn(Mono.just(this.authorizedClient));
OAuth2AuthorizeRequest authorizeRequest = OAuth2AuthorizeRequest
.withClientRegistrationId(this.clientRegistration.getRegistrationId())
.principal(this.principal)
.build();
Mono<OAuth2AuthorizedClient> authorizedClient = this.authorizedClientManager.authorize(authorizeRequest);
StepVerifier.create(authorizedClient).expectNext(this.authorizedClient).verifyComplete();
verify(this.authorizedClientProvider).authorize(this.authorizationContextCaptor.capture());
verify(this.contextAttributesMapper).apply(eq(authorizeRequest));
OAuth2AuthorizationContext authorizationContext = this.authorizationContextCaptor.getValue();
assertThat(authorizationContext.getClientRegistration()).isEqualTo(this.clientRegistration);
assertThat(authorizationContext.getAuthorizedClient()).isNull();
assertThat(authorizationContext.getPrincipal()).isEqualTo(this.principal);
verify(this.authorizedClientService).saveAuthorizedClient(eq(this.authorizedClient), eq(this.principal));
this.saveAuthorizedClientProbe.assertWasSubscribed();
verify(this.authorizedClientService, never()).removeAuthorizedClient(any(), any());
}
@SuppressWarnings("unchecked")
@Test
public void authorizeWhenNotAuthorizedAndSupportedProviderAndCustomSuccessHandlerThenInvokeCustomSuccessHandler() {
given(this.clientRegistrationRepository.findByRegistrationId(eq(this.clientRegistration.getRegistrationId())))
.willReturn(Mono.just(this.clientRegistration));
given(this.authorizedClientService.loadAuthorizedClient(any(), any())).willReturn(Mono.empty());
given(this.authorizedClientProvider.authorize(any(OAuth2AuthorizationContext.class)))
.willReturn(Mono.just(this.authorizedClient));
// @formatter:off
OAuth2AuthorizeRequest authorizeRequest = OAuth2AuthorizeRequest
.withClientRegistrationId(this.clientRegistration.getRegistrationId())
.principal(this.principal)
.build();
// @formatter:on
PublisherProbe<Void> authorizationSuccessHandlerProbe = PublisherProbe.empty();
this.authorizedClientManager
.setAuthorizationSuccessHandler((client, principal, attributes) -> authorizationSuccessHandlerProbe.mono());
Mono<OAuth2AuthorizedClient> authorizedClient = this.authorizedClientManager.authorize(authorizeRequest);
StepVerifier.create(authorizedClient).expectNext(this.authorizedClient).verifyComplete();
verify(this.authorizedClientProvider).authorize(this.authorizationContextCaptor.capture());
verify(this.contextAttributesMapper).apply(eq(authorizeRequest));
OAuth2AuthorizationContext authorizationContext = this.authorizationContextCaptor.getValue();
assertThat(authorizationContext.getClientRegistration()).isEqualTo(this.clientRegistration);
assertThat(authorizationContext.getAuthorizedClient()).isNull();
assertThat(authorizationContext.getPrincipal()).isEqualTo(this.principal);
authorizationSuccessHandlerProbe.assertWasSubscribed();
verify(this.authorizedClientService, never()).saveAuthorizedClient(any(), any());
verify(this.authorizedClientService, never()).removeAuthorizedClient(any(), any());
}
@Test
public void authorizeWhenInvalidTokenThenRemoveAuthorizedClient() {
given(this.clientRegistrationRepository.findByRegistrationId(eq(this.clientRegistration.getRegistrationId())))
.willReturn(Mono.just(this.clientRegistration));
given(this.authorizedClientService.loadAuthorizedClient(any(), any())).willReturn(Mono.empty());
// @formatter:off
OAuth2AuthorizeRequest authorizeRequest = OAuth2AuthorizeRequest
.withClientRegistrationId(this.clientRegistration.getRegistrationId())
.principal(this.principal)
.build();
// @formatter:on
ClientAuthorizationException exception = new ClientAuthorizationException(
new OAuth2Error(OAuth2ErrorCodes.INVALID_TOKEN, null, null),
this.clientRegistration.getRegistrationId());
given(this.authorizedClientProvider.authorize(any(OAuth2AuthorizationContext.class)))
.willReturn(Mono.error(exception));
assertThatExceptionOfType(ClientAuthorizationException.class)
.isThrownBy(() -> this.authorizedClientManager.authorize(authorizeRequest).block())
.isEqualTo(exception);
verify(this.authorizedClientProvider).authorize(this.authorizationContextCaptor.capture());
verify(this.contextAttributesMapper).apply(eq(authorizeRequest));
OAuth2AuthorizationContext authorizationContext = this.authorizationContextCaptor.getValue();
assertThat(authorizationContext.getClientRegistration()).isEqualTo(this.clientRegistration);
assertThat(authorizationContext.getAuthorizedClient()).isNull();
assertThat(authorizationContext.getPrincipal()).isEqualTo(this.principal);
verify(this.authorizedClientService).removeAuthorizedClient(eq(this.clientRegistration.getRegistrationId()),
eq(this.principal.getName()));
this.removeAuthorizedClientProbe.assertWasSubscribed();
verify(this.authorizedClientService, never()).saveAuthorizedClient(any(), any());
}
@Test
public void authorizeWhenInvalidGrantThenRemoveAuthorizedClient() {
given(this.clientRegistrationRepository.findByRegistrationId(eq(this.clientRegistration.getRegistrationId())))
.willReturn(Mono.just(this.clientRegistration));
given(this.authorizedClientService.loadAuthorizedClient(any(), any())).willReturn(Mono.empty());
// @formatter:off
OAuth2AuthorizeRequest authorizeRequest = OAuth2AuthorizeRequest
.withClientRegistrationId(this.clientRegistration.getRegistrationId())
.principal(this.principal)
.build();
// @formatter:on
ClientAuthorizationException exception = new ClientAuthorizationException(
new OAuth2Error(OAuth2ErrorCodes.INVALID_GRANT, null, null),
this.clientRegistration.getRegistrationId());
given(this.authorizedClientProvider.authorize(any(OAuth2AuthorizationContext.class)))
.willReturn(Mono.error(exception));
assertThatExceptionOfType(ClientAuthorizationException.class)
.isThrownBy(() -> this.authorizedClientManager.authorize(authorizeRequest).block())
.isEqualTo(exception);
verify(this.authorizedClientProvider).authorize(this.authorizationContextCaptor.capture());
verify(this.contextAttributesMapper).apply(eq(authorizeRequest));
OAuth2AuthorizationContext authorizationContext = this.authorizationContextCaptor.getValue();
assertThat(authorizationContext.getClientRegistration()).isEqualTo(this.clientRegistration);
assertThat(authorizationContext.getAuthorizedClient()).isNull();
assertThat(authorizationContext.getPrincipal()).isEqualTo(this.principal);
verify(this.authorizedClientService).removeAuthorizedClient(eq(this.clientRegistration.getRegistrationId()),
eq(this.principal.getName()));
this.removeAuthorizedClientProbe.assertWasSubscribed();
verify(this.authorizedClientService, never()).saveAuthorizedClient(any(), any());
}
@Test
public void authorizeWhenServerErrorThenDoNotRemoveAuthorizedClient() {
given(this.clientRegistrationRepository.findByRegistrationId(eq(this.clientRegistration.getRegistrationId())))
.willReturn(Mono.just(this.clientRegistration));
given(this.authorizedClientService.loadAuthorizedClient(any(), any())).willReturn(Mono.empty());
// @formatter:off
OAuth2AuthorizeRequest authorizeRequest = OAuth2AuthorizeRequest
.withClientRegistrationId(this.clientRegistration.getRegistrationId())
.principal(this.principal)
.build();
// @formatter:on
ClientAuthorizationException exception = new ClientAuthorizationException(
new OAuth2Error(OAuth2ErrorCodes.SERVER_ERROR, null, null),
this.clientRegistration.getRegistrationId());
given(this.authorizedClientProvider.authorize(any(OAuth2AuthorizationContext.class)))
.willReturn(Mono.error(exception));
assertThatExceptionOfType(ClientAuthorizationException.class)
.isThrownBy(() -> this.authorizedClientManager.authorize(authorizeRequest).block())
.isEqualTo(exception);
verify(this.authorizedClientProvider).authorize(this.authorizationContextCaptor.capture());
verify(this.contextAttributesMapper).apply(eq(authorizeRequest));
OAuth2AuthorizationContext authorizationContext = this.authorizationContextCaptor.getValue();
assertThat(authorizationContext.getClientRegistration()).isEqualTo(this.clientRegistration);
assertThat(authorizationContext.getAuthorizedClient()).isNull();
assertThat(authorizationContext.getPrincipal()).isEqualTo(this.principal);
verify(this.authorizedClientService, never()).removeAuthorizedClient(any(), any());
verify(this.authorizedClientService, never()).saveAuthorizedClient(any(), any());
}
@Test
public void authorizeWhenOAuth2AuthorizationExceptionThenDoNotRemoveAuthorizedClient() {
given(this.clientRegistrationRepository.findByRegistrationId(eq(this.clientRegistration.getRegistrationId())))
.willReturn(Mono.just(this.clientRegistration));
given(this.authorizedClientService.loadAuthorizedClient(any(), any())).willReturn(Mono.empty());
// @formatter:off
OAuth2AuthorizeRequest authorizeRequest = OAuth2AuthorizeRequest
.withClientRegistrationId(this.clientRegistration.getRegistrationId())
.principal(this.principal)
.build();
// @formatter:on
OAuth2AuthorizationException exception = new OAuth2AuthorizationException(
new OAuth2Error(OAuth2ErrorCodes.INVALID_GRANT, null, null));
given(this.authorizedClientProvider.authorize(any(OAuth2AuthorizationContext.class)))
.willReturn(Mono.error(exception));
assertThatExceptionOfType(OAuth2AuthorizationException.class)
.isThrownBy(() -> this.authorizedClientManager.authorize(authorizeRequest).block())
.isEqualTo(exception);
verify(this.authorizedClientProvider).authorize(this.authorizationContextCaptor.capture());
verify(this.contextAttributesMapper).apply(eq(authorizeRequest));
OAuth2AuthorizationContext authorizationContext = this.authorizationContextCaptor.getValue();
assertThat(authorizationContext.getClientRegistration()).isEqualTo(this.clientRegistration);
assertThat(authorizationContext.getAuthorizedClient()).isNull();
assertThat(authorizationContext.getPrincipal()).isEqualTo(this.principal);
verify(this.authorizedClientService, never()).removeAuthorizedClient(any(), any());
verify(this.authorizedClientService, never()).saveAuthorizedClient(any(), any());
}
@Test
public void authorizeWhenOAuth2AuthorizationExceptionAndCustomFailureHandlerThenInvokeCustomFailureHandler() {
given(this.clientRegistrationRepository.findByRegistrationId(eq(this.clientRegistration.getRegistrationId())))
.willReturn(Mono.just(this.clientRegistration));
given(this.authorizedClientService.loadAuthorizedClient(any(), any())).willReturn(Mono.empty());
// @formatter:off
OAuth2AuthorizeRequest authorizeRequest = OAuth2AuthorizeRequest
.withClientRegistrationId(this.clientRegistration.getRegistrationId())
.principal(this.principal)
.build();
// @formatter:on
OAuth2AuthorizationException exception = new OAuth2AuthorizationException(
new OAuth2Error(OAuth2ErrorCodes.INVALID_GRANT, null, null));
given(this.authorizedClientProvider.authorize(any(OAuth2AuthorizationContext.class)))
.willReturn(Mono.error(exception));
PublisherProbe<Void> authorizationFailureHandlerProbe = PublisherProbe.empty();
this.authorizedClientManager
.setAuthorizationFailureHandler((client, principal, attributes) -> authorizationFailureHandlerProbe.mono());
assertThatExceptionOfType(OAuth2AuthorizationException.class)
.isThrownBy(() -> this.authorizedClientManager.authorize(authorizeRequest).block())
.isEqualTo(exception);
verify(this.authorizedClientProvider).authorize(this.authorizationContextCaptor.capture());
verify(this.contextAttributesMapper).apply(eq(authorizeRequest));
OAuth2AuthorizationContext authorizationContext = this.authorizationContextCaptor.getValue();
assertThat(authorizationContext.getClientRegistration()).isEqualTo(this.clientRegistration);
assertThat(authorizationContext.getAuthorizedClient()).isNull();
assertThat(authorizationContext.getPrincipal()).isEqualTo(this.principal);
authorizationFailureHandlerProbe.assertWasSubscribed();
verify(this.authorizedClientService, never()).removeAuthorizedClient(any(), any());
verify(this.authorizedClientService, never()).saveAuthorizedClient(any(), any());
}
@SuppressWarnings("unchecked")
@Test
public void authorizeWhenAuthorizedAndSupportedProviderThenReauthorized() {
given(this.clientRegistrationRepository.findByRegistrationId(eq(this.clientRegistration.getRegistrationId())))
.willReturn(Mono.just(this.clientRegistration));
given(this.authorizedClientService.loadAuthorizedClient(eq(this.clientRegistration.getRegistrationId()),
eq(this.principal.getName())))
.willReturn(Mono.just(this.authorizedClient));
OAuth2AuthorizedClient reauthorizedClient = new OAuth2AuthorizedClient(this.clientRegistration,
this.principal.getName(), TestOAuth2AccessTokens.noScopes(), TestOAuth2RefreshTokens.refreshToken());
given(this.authorizedClientProvider.authorize(any(OAuth2AuthorizationContext.class)))
.willReturn(Mono.just(reauthorizedClient));
OAuth2AuthorizeRequest authorizeRequest = OAuth2AuthorizeRequest
.withClientRegistrationId(this.clientRegistration.getRegistrationId())
.principal(this.principal)
.build();
Mono<OAuth2AuthorizedClient> authorizedClient = this.authorizedClientManager.authorize(authorizeRequest);
// @formatter:off
StepVerifier.create(authorizedClient)
.expectNext(reauthorizedClient)
.verifyComplete();
// @formatter:on
verify(this.authorizedClientProvider).authorize(this.authorizationContextCaptor.capture());
verify(this.contextAttributesMapper).apply(eq(authorizeRequest));
OAuth2AuthorizationContext authorizationContext = this.authorizationContextCaptor.getValue();
assertThat(authorizationContext.getClientRegistration()).isEqualTo(this.clientRegistration);
assertThat(authorizationContext.getAuthorizedClient()).isSameAs(this.authorizedClient);
assertThat(authorizationContext.getPrincipal()).isEqualTo(this.principal);
verify(this.authorizedClientService).saveAuthorizedClient(eq(reauthorizedClient), eq(this.principal));
this.saveAuthorizedClientProbe.assertWasSubscribed();
verify(this.authorizedClientService, never()).removeAuthorizedClient(any(), any());
}
@SuppressWarnings("unchecked")
@Test
public void reauthorizeWhenUnsupportedProviderThenNotReauthorized() {
given(this.authorizedClientProvider.authorize(any(OAuth2AuthorizationContext.class))).willReturn(Mono.empty());
// @formatter:off
OAuth2AuthorizeRequest reauthorizeRequest = OAuth2AuthorizeRequest.withAuthorizedClient(this.authorizedClient)
.principal(this.principal)
.build();
// @formatter:on
Mono<OAuth2AuthorizedClient> authorizedClient = this.authorizedClientManager.authorize(reauthorizeRequest);
StepVerifier.create(authorizedClient).expectNext(this.authorizedClient).verifyComplete();
verify(this.authorizedClientProvider).authorize(this.authorizationContextCaptor.capture());
verify(this.contextAttributesMapper).apply(eq(reauthorizeRequest));
OAuth2AuthorizationContext authorizationContext = this.authorizationContextCaptor.getValue();
assertThat(authorizationContext.getClientRegistration()).isEqualTo(this.clientRegistration);
assertThat(authorizationContext.getAuthorizedClient()).isSameAs(this.authorizedClient);
assertThat(authorizationContext.getPrincipal()).isEqualTo(this.principal);
verify(this.authorizedClientService, never()).saveAuthorizedClient(any(OAuth2AuthorizedClient.class),
eq(this.principal));
}
@SuppressWarnings("unchecked")
@Test
public void reauthorizeWhenSupportedProviderThenReauthorized() {
OAuth2AuthorizedClient reauthorizedClient = new OAuth2AuthorizedClient(this.clientRegistration,
this.principal.getName(), TestOAuth2AccessTokens.noScopes(), TestOAuth2RefreshTokens.refreshToken());
given(this.authorizedClientProvider.authorize(any(OAuth2AuthorizationContext.class)))
.willReturn(Mono.just(reauthorizedClient));
// @formatter:off
OAuth2AuthorizeRequest reauthorizeRequest = OAuth2AuthorizeRequest.withAuthorizedClient(this.authorizedClient)
.principal(this.principal)
.build();
// @formatter:on
Mono<OAuth2AuthorizedClient> authorizedClient = this.authorizedClientManager.authorize(reauthorizeRequest);
StepVerifier.create(authorizedClient).expectNext(reauthorizedClient).verifyComplete();
verify(this.authorizedClientProvider).authorize(this.authorizationContextCaptor.capture());
verify(this.contextAttributesMapper).apply(eq(reauthorizeRequest));
OAuth2AuthorizationContext authorizationContext = this.authorizationContextCaptor.getValue();
assertThat(authorizationContext.getClientRegistration()).isEqualTo(this.clientRegistration);
assertThat(authorizationContext.getAuthorizedClient()).isSameAs(this.authorizedClient);
assertThat(authorizationContext.getPrincipal()).isEqualTo(this.principal);
verify(this.authorizedClientService).saveAuthorizedClient(eq(reauthorizedClient), eq(this.principal));
this.saveAuthorizedClientProbe.assertWasSubscribed();
verify(this.authorizedClientService, never()).removeAuthorizedClient(any(), any());
}
@SuppressWarnings("unchecked")
@Test
public void reauthorizeWhenRequestAttributeScopeThenMappedToContext() {
OAuth2AuthorizedClient reauthorizedClient = new OAuth2AuthorizedClient(this.clientRegistration,
this.principal.getName(), TestOAuth2AccessTokens.noScopes(), TestOAuth2RefreshTokens.refreshToken());
given(this.authorizedClientProvider.authorize(any(OAuth2AuthorizationContext.class)))
.willReturn(Mono.just(reauthorizedClient));
OAuth2AuthorizeRequest reauthorizeRequest = OAuth2AuthorizeRequest.withAuthorizedClient(this.authorizedClient)
.principal(this.principal)
.attribute(OAuth2ParameterNames.SCOPE, "read write")
.build();
this.authorizedClientManager.setContextAttributesMapper(
new AuthorizedClientServiceReactiveOAuth2AuthorizedClientManager.DefaultContextAttributesMapper());
Mono<OAuth2AuthorizedClient> authorizedClient = this.authorizedClientManager.authorize(reauthorizeRequest);
// @formatter:off
StepVerifier.create(authorizedClient)
.expectNext(reauthorizedClient)
.verifyComplete();
// @formatter:on
verify(this.authorizedClientService).saveAuthorizedClient(eq(reauthorizedClient), eq(this.principal));
this.saveAuthorizedClientProbe.assertWasSubscribed();
verify(this.authorizedClientService, never()).removeAuthorizedClient(any(), any());
verify(this.authorizedClientProvider).authorize(this.authorizationContextCaptor.capture());
OAuth2AuthorizationContext authorizationContext = this.authorizationContextCaptor.getValue();
assertThat(authorizationContext.getClientRegistration()).isEqualTo(this.clientRegistration);
assertThat(authorizationContext.getAuthorizedClient()).isSameAs(this.authorizedClient);
assertThat(authorizationContext.getPrincipal()).isEqualTo(this.principal);
assertThat(authorizationContext.getAttributes())
.containsKey(OAuth2AuthorizationContext.REQUEST_SCOPE_ATTRIBUTE_NAME);
String[] requestScopeAttribute = authorizationContext
.getAttribute(OAuth2AuthorizationContext.REQUEST_SCOPE_ATTRIBUTE_NAME);
assertThat(requestScopeAttribute).contains("read", "write");
}
}
| AuthorizedClientServiceReactiveOAuth2AuthorizedClientManagerTests |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocatedFileStatus.java | {
"start": 1010,
"end": 1062
} | class ____ the LocatedFileStatus class.
*/
public | tests |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStore.java | {
"start": 16610,
"end": 17406
} | class ____ implements FileChangesListener {
@Override
public void onFileCreated(Path file) {
onFileChanged(file);
}
@Override
public void onFileDeleted(Path file) {
onFileChanged(file);
}
@Override
public void onFileChanged(Path file) {
if (file.equals(FileOperatorUsersStore.this.file)) {
final OperatorUsersDescriptor newDescriptor = parseFile(file, logger);
if (operatorUsersDescriptor.equals(newDescriptor) == false) {
logger.info("operator users file [{}] changed. updating operator users...", file.toAbsolutePath());
operatorUsersDescriptor = newDescriptor;
}
}
}
}
}
| FileListener |
java | elastic__elasticsearch | benchmarks/src/main/java/org/elasticsearch/benchmark/xcontent/FilterContentBenchmark.java | {
"start": 2044,
"end": 7792
} | class ____ {
@Param({ "cluster_stats", "index_stats", "node_stats" })
private String type;
@Param({ "10_field", "half_field", "all_field", "wildcard_field", "10_wildcard_field" })
private String fieldCount;
@Param({ "true" })
private boolean inclusive;
private BytesReference source;
private XContentParserConfiguration parserConfig;
private Set<String> filters;
private XContentParserConfiguration parserConfigMatchDotsInFieldNames;
@Setup
public void setup() throws IOException {
String sourceFile = switch (type) {
case "cluster_stats" -> "monitor_cluster_stats.json";
case "index_stats" -> "monitor_index_stats.json";
case "node_stats" -> "monitor_node_stats.json";
default -> throw new IllegalArgumentException("Unknown type [" + type + "]");
};
source = readSource(sourceFile);
filters = buildFilters();
parserConfig = buildParseConfig(false);
parserConfigMatchDotsInFieldNames = buildParseConfig(true);
}
private Set<String> buildFilters() {
Map<String, Object> flattenMap = Maps.flatten(XContentHelper.convertToMap(source, true, XContentType.JSON).v2(), false, true);
Set<String> keys = flattenMap.keySet();
AtomicInteger count = new AtomicInteger();
return switch (fieldCount) {
case "10_field" -> keys.stream().filter(key -> count.getAndIncrement() % 5 == 0).limit(10).collect(Collectors.toSet());
case "half_field" -> keys.stream().filter(key -> count.getAndIncrement() % 2 == 0).collect(Collectors.toSet());
case "all_field" -> new HashSet<>(keys);
case "wildcard_field" -> new HashSet<>(Arrays.asList("*stats"));
case "10_wildcard_field" -> Set.of(
"*stats.nodes*",
"*stats.ind*",
"*sta*.shards",
"*stats*.xpack",
"*stats.*.segments",
"*stat*.*.data*",
inclusive ? "*stats.**.request_cache" : "*stats.*.request_cache",
inclusive ? "*stats.**.stat" : "*stats.*.stat",
inclusive ? "*stats.**.threads" : "*stats.*.threads",
"*source_node.t*"
);
default -> throw new IllegalArgumentException("Unknown type [" + type + "]");
};
}
@Benchmark
public BytesReference filterWithParserConfigCreated() throws IOException {
return filter(this.parserConfig);
}
@Benchmark
public BytesReference filterWithParserConfigCreatedMatchDotsInFieldNames() throws IOException {
return filter(this.parserConfigMatchDotsInFieldNames);
}
@Benchmark
public BytesReference filterWithNewParserConfig() throws IOException {
XContentParserConfiguration contentParserConfiguration = buildParseConfig(false);
return filter(contentParserConfiguration);
}
@Benchmark
public BytesReference filterWithMap() throws IOException {
Map<String, Object> sourceMap = XContentHelper.convertToMap(source, false).v2();
String[] includes;
String[] excludes;
if (inclusive) {
includes = filters.toArray(Strings.EMPTY_ARRAY);
excludes = null;
} else {
includes = null;
excludes = filters.toArray(Strings.EMPTY_ARRAY);
}
Map<String, Object> filterMap = XContentMapValues.filter(sourceMap, includes, excludes);
return Source.fromMap(filterMap, XContentType.JSON).internalSourceRef();
}
@Benchmark
public BytesReference filterWithBuilder() throws IOException {
BytesStreamOutput streamOutput = new BytesStreamOutput(Math.min(1024, source.length()));
Set<String> includes;
Set<String> excludes;
if (inclusive) {
includes = filters;
excludes = Set.of();
} else {
includes = Set.of();
excludes = filters;
}
XContentBuilder builder = new XContentBuilder(
XContentType.JSON.xContent(),
streamOutput,
includes,
excludes,
XContentType.JSON.toParsedMediaType()
);
try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, source.streamInput())) {
builder.copyCurrentStructure(parser);
return BytesReference.bytes(builder);
}
}
private XContentParserConfiguration buildParseConfig(boolean matchDotsInFieldNames) {
Set<String> includes;
Set<String> excludes;
if (inclusive) {
includes = filters;
excludes = null;
} else {
includes = null;
excludes = filters;
}
return XContentParserConfiguration.EMPTY.withFiltering(null, includes, excludes, matchDotsInFieldNames);
}
private BytesReference filter(XContentParserConfiguration contentParserConfiguration) throws IOException {
try (BytesStreamOutput os = new BytesStreamOutput()) {
XContentBuilder builder = new XContentBuilder(XContentType.JSON.xContent(), os);
try (XContentParser parser = XContentType.JSON.xContent().createParser(contentParserConfiguration, source.streamInput())) {
if (parser.nextToken() != null) {
builder.copyCurrentStructure(parser);
}
return BytesReference.bytes(builder);
}
}
}
private BytesReference readSource(String fileName) throws IOException {
return Streams.readFully(FilterContentBenchmark.class.getResourceAsStream(fileName));
}
}
| FilterContentBenchmark |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/apigenerator/CreateSyncNodeSelectionClusterApi.java | {
"start": 1669,
"end": 4571
} | class ____ {
private static final Set<String> FILTER_TEMPLATES = LettuceSets.unmodifiableSet("RedisSentinelCommands",
"RedisTransactionalCommands");
private static final Set<String> FILTER_METHODS = LettuceSets.unmodifiableSet("shutdown", "debugOom", "debugSegfault",
"digest", "close", "isOpen", "BaseRedisCommands.reset", "readOnly", "readWrite", "dispatch", "setAutoFlushCommands",
"flushCommands");
/**
* Mutate type comment.
*
* @return
*/
Function<String, String> commentMutator() {
return s -> s.replaceAll("\\$\\{intent\\}", "Synchronous executed commands on a node selection") + "* @generated by "
+ getClass().getName() + "\r\n ";
}
/**
* Mutate type to async result.
*
* @return
*/
Predicate<MethodDeclaration> methodFilter() {
return method -> {
ClassOrInterfaceDeclaration classOfMethod = (ClassOrInterfaceDeclaration) method.getParentNode().get();
return !FILTER_METHODS.contains(method.getName().getIdentifier())
&& !FILTER_METHODS.contains(classOfMethod.getName().getIdentifier() + "." + method.getName());
};
}
/**
* Mutate type to async result.
*
* @return
*/
Function<MethodDeclaration, Type> methodTypeMutator() {
return method -> CompilationUnitFactory.createParametrizedType("Executions", method.getType().toString());
}
/**
* Supply additional imports.
*
* @return
*/
Supplier<List<String>> importSupplier() {
return Collections::emptyList;
}
@ParameterizedTest
@MethodSource("arguments")
@Tag(API_GENERATOR)
void createInterface(String argument) throws Exception {
createFactory(argument).createInterface();
}
static List<String> arguments() {
return Stream.of(Constants.TEMPLATE_NAMES).filter(t -> !FILTER_TEMPLATES.contains(t)).collect(Collectors.toList());
}
private CompilationUnitFactory createFactory(String templateName) {
String targetName = templateName.replace("Redis", "NodeSelection");
if (targetName.equals(templateName)) {
targetName = templateName.replace("Redi", "NodeSelection");
}
File templateFile = new File(Constants.TEMPLATES, "io/lettuce/core/api/" + templateName + ".java");
String targetPackage = "io.lettuce.core.cluster.api.sync";
// todo: remove AutoCloseable from BaseNodeSelectionAsyncCommands
CompilationUnitFactory factory = new CompilationUnitFactory(templateFile, Constants.SOURCES, targetPackage, targetName,
commentMutator(), methodTypeMutator(), methodFilter(), importSupplier(), null, Function.identity());
factory.keepMethodSignaturesFor(FILTER_METHODS);
return factory;
}
}
| CreateSyncNodeSelectionClusterApi |
java | apache__camel | core/camel-core-processor/src/main/java/org/apache/camel/processor/ThreadsProcessor.java | {
"start": 2477,
"end": 2997
} | class ____ extends BaseProcessorSupport implements IdAware, RouteIdAware {
private static final Logger LOG = LoggerFactory.getLogger(ThreadsProcessor.class);
private String id;
private String routeId;
private final CamelContext camelContext;
private final ExecutorService executorService;
private final ThreadPoolRejectedPolicy rejectedPolicy;
private final boolean shutdownExecutorService;
private final AtomicBoolean shutdown = new AtomicBoolean(true);
private final | ThreadsProcessor |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/record_type/RecordTypeTest.java | {
"start": 1096,
"end": 3350
} | class ____ {
private static SqlSessionFactory sqlSessionFactory;
@BeforeAll
static void setUp() throws Exception {
// create a SqlSessionFactory
try (Reader reader = Resources.getResourceAsReader("org/apache/ibatis/submitted/record_type/mybatis-config.xml")) {
sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader);
}
// populate in-memory database
BaseDataTest.runScript(sqlSessionFactory.getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/record_type/CreateDB.sql");
}
@Test
void selectRecord() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
RecordTypeMapper mapper = sqlSession.getMapper(RecordTypeMapper.class);
Property prop = mapper.selectProperty(1);
assertEquals("Val1!", prop.value());
assertEquals("https://www.google.com", prop.URL());
}
}
@Test
void selectRecordAutomapping() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
RecordTypeMapper mapper = sqlSession.getMapper(RecordTypeMapper.class);
Property prop = mapper.selectPropertyAutomapping(1);
assertEquals("Val1!", prop.value());
assertEquals("https://www.google.com", prop.URL());
}
}
@Test
void insertRecord() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
RecordTypeMapper mapper = sqlSession.getMapper(RecordTypeMapper.class);
assertEquals(1, mapper.insertProperty(new Property(2, "Val2", "https://mybatis.org")));
sqlSession.commit();
}
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
RecordTypeMapper mapper = sqlSession.getMapper(RecordTypeMapper.class);
Property prop = mapper.selectProperty(2);
assertEquals("Val2!!", prop.value());
assertEquals("https://mybatis.org", prop.URL());
}
}
@Test
void selectNestedRecord() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
RecordTypeMapper mapper = sqlSession.getMapper(RecordTypeMapper.class);
Item item = mapper.selectItem(100);
assertEquals(Integer.valueOf(100), item.id());
assertEquals(new Property(1, "Val1", "https://www.google.com"), item.property());
}
}
}
| RecordTypeTest |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/checkreturnvalue/ResultUsePolicyEvaluator.java | {
"start": 1879,
"end": 2586
} | class ____<C, S, M extends S> {
/**
* Returns a new {@link Builder} for creating a {@link ResultUsePolicyEvaluator}.
*
* @param <C> the type of the context object used during evaluation
* @param <S> the type of symbols
* @param <M> the type of method symbols
*/
public static <C, S, M extends S> ResultUsePolicyEvaluator.Builder<C, S, M> builder(
MethodInfo<C, S, M> methodInfo) {
return new Builder<>(methodInfo);
}
/**
* Delegate to return information about a method symbol.
*
* @param <C> the type of the context object used during evaluation
* @param <S> the type of symbols
* @param <M> the type of method symbols
*/
public | ResultUsePolicyEvaluator |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/results/graph/embeddable/internal/AggregateEmbeddableResultImpl.java | {
"start": 2122,
"end": 7048
} | class ____<T> extends AbstractFetchParent
implements AggregateEmbeddableResultGraphNode, DomainResult<T>, EmbeddableResult<T>,
InitializerProducer<AggregateEmbeddableResultImpl<T>> {
private final String resultVariable;
private final boolean containsAnyNonScalars;
private final EmbeddableMappingType fetchContainer;
private final BasicFetch<?> discriminatorFetch;
private final int[] aggregateValuesArrayPositions;
public AggregateEmbeddableResultImpl(
NavigablePath navigablePath,
EmbeddableValuedModelPart embeddedPartDescriptor,
String resultVariable,
DomainResultCreationState creationState) {
/*
An `{embeddable_result}` sub-path is created for the corresponding initializer to differentiate it from a fetch-initializer if this embedded is also fetched.
The Jakarta Persistence spec says that any embedded value selected in the result should not be part of the state of any managed entity.
Using this `{embeddable_result}` sub-path avoids this situation.
*/
super( navigablePath.append( "{embeddable_result}" ) );
this.fetchContainer = embeddedPartDescriptor.getEmbeddableTypeDescriptor();
this.resultVariable = resultVariable;
final var sqlAstCreationState = creationState.getSqlAstCreationState();
final var fromClauseAccess = sqlAstCreationState.getFromClauseAccess();
final TableGroup tableGroup = fromClauseAccess.resolveTableGroup(
getNavigablePath(),
np -> {
final EmbeddableValuedModelPart embeddedValueMapping =
embeddedPartDescriptor.getEmbeddableTypeDescriptor()
.getEmbeddedValueMapping();
final TableGroup tg =
fromClauseAccess.findTableGroup( NullnessUtil.castNonNull( np.getParent() ).getParent() );
final TableGroupJoin tableGroupJoin = embeddedValueMapping.createTableGroupJoin(
np,
tg,
resultVariable,
null,
SqlAstJoinType.INNER,
true,
false,
sqlAstCreationState
);
tg.addTableGroupJoin( tableGroupJoin );
return tableGroupJoin.getJoinedGroup();
}
);
final var sqlExpressionResolver = sqlAstCreationState.getSqlExpressionResolver();
final var tableReference = tableGroup.getPrimaryTableReference();
final var selectableMapping = embeddedPartDescriptor.getEmbeddableTypeDescriptor().getAggregateMapping();
final Expression expression = sqlExpressionResolver.resolveSqlExpression( tableReference, selectableMapping );
final var typeConfiguration = sqlAstCreationState.getCreationContext().getTypeConfiguration();
final SqlSelection aggregateSelection = sqlExpressionResolver.resolveSqlSelection(
expression,
// Using the Object[] type here, so that a different JDBC extractor is chosen
typeConfiguration.getJavaTypeRegistry().resolveDescriptor( Object[].class ),
null,
typeConfiguration
);
this.discriminatorFetch = creationState.visitEmbeddableDiscriminatorFetch( this, true );
this.aggregateValuesArrayPositions = determineAggregateValuesArrayPositions( null, aggregateSelection );
resetFetches( creationState.visitNestedFetches( this ) );
this.containsAnyNonScalars = determineIfContainedAnyScalars( getFetches() );
}
@Override
public int[] getAggregateValuesArrayPositions() {
return aggregateValuesArrayPositions;
}
private static boolean determineIfContainedAnyScalars(ImmutableFetchList fetches) {
for ( Fetch fetch : fetches ) {
if ( fetch.containsAnyNonScalarResults() ) {
return true;
}
}
return false;
}
@Override
public String getResultVariable() {
return resultVariable;
}
@Override
public boolean containsAnyNonScalarResults() {
return containsAnyNonScalars;
}
@Override
public EmbeddableMappingType getFetchContainer() {
return this.fetchContainer;
}
@Override
public JavaType<?> getResultJavaType() {
return getReferencedMappingType().getJavaType();
}
@Override
public EmbeddableMappingType getReferencedMappingType() {
return getFetchContainer();
}
@Override
public EmbeddableValuedModelPart getReferencedMappingContainer() {
return getFetchContainer().getEmbeddedValueMapping();
}
@Override
public DomainResultAssembler<T> createResultAssembler(
InitializerParent<?> parent,
AssemblerCreationState creationState) {
//noinspection unchecked
return new EmbeddableAssembler( creationState.resolveInitializer( this, parent, this ).asEmbeddableInitializer() );
}
@Override
public Initializer<?> createInitializer(
AggregateEmbeddableResultImpl<T> resultGraphNode,
InitializerParent<?> parent,
AssemblerCreationState creationState) {
return resultGraphNode.createInitializer( parent, creationState );
}
@Override
public Initializer<?> createInitializer(InitializerParent<?> parent, AssemblerCreationState creationState) {
return new AggregateEmbeddableInitializerImpl(
this,
discriminatorFetch,
parent,
creationState,
true
);
}
}
| AggregateEmbeddableResultImpl |
java | quarkusio__quarkus | independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/BuiltinScope.java | {
"start": 480,
"end": 2414
} | enum ____ {
DEPENDENT(Dependent.class, false),
SINGLETON(Singleton.class, false),
APPLICATION(ApplicationScoped.class, true),
REQUEST(RequestScoped.class, true),
SESSION(SessionScoped.class, true),
CONVERSATION(ConversationScoped.class, true);
private ScopeInfo info;
private BuiltinScope(Class<? extends Annotation> clazz, boolean isNormal) {
this.info = new ScopeInfo(clazz, isNormal);
}
public ScopeInfo getInfo() {
return info;
}
public DotName getName() {
return info.getDotName();
}
public static BuiltinScope from(DotName scopeAnnotationName) {
for (BuiltinScope scope : BuiltinScope.values()) {
if (scope.getInfo().getDotName().equals(scopeAnnotationName)) {
return scope;
}
}
return null;
}
public static BuiltinScope from(ClassInfo clazz) {
for (BuiltinScope scope : BuiltinScope.values()) {
if (clazz.hasDeclaredAnnotation(scope.getName())) {
return scope;
}
}
return null;
}
public static boolean isDefault(ScopeInfo scope) {
return DEPENDENT.is(scope);
}
public boolean is(ScopeInfo scope) {
return getInfo().equals(scope);
}
public boolean isDeclaredBy(BeanInfo bean) {
return is(bean.getScope());
}
public static boolean isIn(Iterable<AnnotationInstance> annotations) {
for (AnnotationInstance annotation : annotations) {
if (from(annotation.name()) != null) {
return true;
}
}
return false;
}
public static boolean isDeclaredOn(ClassInfo clazz) {
for (BuiltinScope scope : BuiltinScope.values()) {
if (clazz.hasDeclaredAnnotation(scope.getName())) {
return true;
}
}
return false;
}
}
| BuiltinScope |
java | dropwizard__dropwizard | dropwizard-benchmarks/src/main/java/io/dropwizard/benchmarks/jersey/ConstraintViolationBenchmark.java | {
"start": 1198,
"end": 1466
} | class ____ {
public String paramFunc(@HeaderParam("cheese") @NotEmpty String secretSauce) {
return secretSauce;
}
public String objectFunc(@Valid Foo foo) {
return foo.toString();
}
}
public static | Resource |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/PlaceholderParser.java | {
"start": 11014,
"end": 13184
} | class ____ implements PlaceholderResolver {
private final String prefix;
private final String suffix;
private final boolean ignoreUnresolvablePlaceholders;
private final Function<String, @Nullable List<Part>> parser;
private final PlaceholderResolver resolver;
private @Nullable Set<String> visitedPlaceholders;
PartResolutionContext(PlaceholderResolver resolver, String prefix, String suffix,
boolean ignoreUnresolvablePlaceholders, Function<String, @Nullable List<Part>> parser) {
this.prefix = prefix;
this.suffix = suffix;
this.ignoreUnresolvablePlaceholders = ignoreUnresolvablePlaceholders;
this.parser = parser;
this.resolver = resolver;
}
@Override
public @Nullable String resolvePlaceholder(String placeholderName) {
String value = this.resolver.resolvePlaceholder(placeholderName);
if (value != null && logger.isTraceEnabled()) {
logger.trace("Resolved placeholder '" + placeholderName + "'");
}
return value;
}
public String handleUnresolvablePlaceholder(String key, String text) {
if (this.ignoreUnresolvablePlaceholders) {
return toPlaceholderText(key);
}
String originalValue = (!key.equals(text) ? toPlaceholderText(text) : null);
throw new PlaceholderResolutionException(
"Could not resolve placeholder '%s'".formatted(key), key, originalValue);
}
private String toPlaceholderText(String text) {
return this.prefix + text + this.suffix;
}
public @Nullable List<Part> parse(String text) {
return this.parser.apply(text);
}
public void flagPlaceholderAsVisited(String placeholder) {
if (this.visitedPlaceholders == null) {
this.visitedPlaceholders = new HashSet<>(4);
}
if (!this.visitedPlaceholders.add(placeholder)) {
throw new PlaceholderResolutionException(
"Circular placeholder reference '%s'".formatted(placeholder), placeholder, null);
}
}
public void removePlaceholder(String placeholder) {
if (this.visitedPlaceholders != null) {
this.visitedPlaceholders.remove(placeholder);
}
}
}
/**
* A part is a section of a String containing placeholders to replace.
*/
private | PartResolutionContext |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/TestRootType.java | {
"start": 1357,
"end": 1481
} | class ____ extends BaseClass398 {
public String property = "aa";
}
@JsonRootName("root")
static | TestClass398 |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/TypeConverterRegistry.java | {
"start": 1616,
"end": 4010
} | interface ____ {
/**
* Number of noop attempts (no type conversion was needed)
*/
long getNoopCounter();
/**
* Number of type conversion attempts
*/
long getAttemptCounter();
/**
* Number of successful conversions
*/
long getHitCounter();
/**
* Number of attempts which cannot be converted as no suitable type converter exists
*/
long getMissCounter();
/**
* Number of failed attempts during type conversion
*/
long getFailedCounter();
/**
* Reset the counters
*/
void reset();
default void computeIfEnabled(LongSupplier supplier, LongConsumer consumer) {
consumer.accept(supplier.getAsLong());
}
}
/**
* Registers a new set of type converters that are bulked together into a single {@link BulkTypeConverters} class.
*/
void addBulkTypeConverters(BulkTypeConverters bulkTypeConverters);
/**
* Registers a new type converter.
* <p/>
* This method may throw {@link org.apache.camel.TypeConverterExistsException} if configured to fail if an existing
* type converter already exists
*
* @param toType the type to convert to
* @param fromType the type to convert from
* @param typeConverter the type converter to use
*/
void addTypeConverter(Class<?> toType, Class<?> fromType, TypeConverter typeConverter);
/**
* Removes the type converter
*
* @param toType the type to convert to
* @param fromType the type to convert from
* @return <tt>true</tt> if removed, <tt>false</tt> if the type converter didn't exist
*/
boolean removeTypeConverter(Class<?> toType, Class<?> fromType);
/**
* Registers all the type converters from the instance, each converter must be implemented as a method and annotated
* with {@link org.apache.camel.Converter}.
*
* @param typeConverters instance which implements the type converters
*/
void addTypeConverters(Object typeConverters);
/**
* Registers a new fallback type converter
*
* @param typeConverter the type converter to use
* @param canPromote whether or not the fallback type converter can be promoted to a first | Statistics |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableZipTest.java | {
"start": 62323,
"end": 67229
} | class ____ implements QueueSubscription<Integer>, Publisher<Integer> {
@Override
public int requestFusion(int mode) {
return mode & SYNC;
}
@Override
public boolean offer(Integer value) {
throw new UnsupportedOperationException();
}
@Override
public boolean offer(Integer v1, Integer v2) {
throw new UnsupportedOperationException();
}
@Override
public Integer poll() throws Exception {
throw new TestException();
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public void clear() {
}
@Override
public void request(long n) {
}
@Override
public void cancel() {
}
@Override
public void subscribe(Subscriber<? super Integer> s) {
s.onSubscribe(this);
}
}
@Test
public void fusedInputThrows2() {
Flowable.zip(new ThrowingQueueSubscription(), Flowable.just(1), new BiFunction<Integer, Integer, Integer>() {
@Override
public Integer apply(Integer a, Integer b) throws Exception {
return a + b;
}
})
.test()
.assertFailure(TestException.class);
}
@Test
public void fusedInputThrows2Backpressured() {
Flowable.zip(new ThrowingQueueSubscription(), Flowable.just(1), new BiFunction<Integer, Integer, Integer>() {
@Override
public Integer apply(Integer a, Integer b) throws Exception {
return a + b;
}
})
.test(0)
.assertFailure(TestException.class);
}
@Test
public void cancelOnBackpressureBoundary() {
TestSubscriber<Integer> ts = new TestSubscriber<Integer>(1L) {
@Override
public void onNext(Integer t) {
super.onNext(t);
cancel();
onComplete();
}
};
Flowable.zip(Flowable.range(1, 2), Flowable.range(3, 2), new BiFunction<Integer, Integer, Integer>() {
@Override
public Integer apply(Integer a, Integer b) throws Exception {
return a + b;
}
})
.subscribe(ts);
ts.assertResult(4);
}
@Test
public void firstErrorPreventsSecondSubscription() {
final AtomicInteger counter = new AtomicInteger();
List<Flowable<Object>> flowableList = new ArrayList<>();
flowableList.add(Flowable.create(new FlowableOnSubscribe<Object>() {
@Override
public void subscribe(FlowableEmitter<Object> e)
throws Exception { throw new TestException(); }
}, BackpressureStrategy.MISSING));
flowableList.add(Flowable.create(new FlowableOnSubscribe<Object>() {
@Override
public void subscribe(FlowableEmitter<Object> e)
throws Exception { counter.getAndIncrement(); }
}, BackpressureStrategy.MISSING));
Flowable.zip(flowableList,
new Function<Object[], Object>() {
@Override
public Object apply(Object[] a) throws Exception {
return a;
}
})
.test()
.assertFailure(TestException.class)
;
assertEquals(0, counter.get());
}
@Test
public void publishersInIterable() {
Publisher<Integer> source = new Publisher<Integer>() {
@Override
public void subscribe(Subscriber<? super Integer> subscriber) {
Flowable.just(1).subscribe(subscriber);
}
};
Flowable.zip(Arrays.asList(source, source), new Function<Object[], Integer>() {
@Override
public Integer apply(Object[] t) throws Throwable {
return 2;
}
})
.test()
.assertResult(2);
}
@Test
public void fusedInnerPollCrashDelayError() {
Flowable.zip(
Flowable.range(1, 5),
Flowable.just(1)
.<Integer>map(v -> { throw new TestException(); })
.compose(TestHelper.flowableStripBoundary()),
(a, b) -> a + b, true
)
.test()
.assertFailure(TestException.class);
}
@Test
public void fusedInnerPollCrashRequestBoundaryDelayError() {
Flowable.zip(
Flowable.range(1, 5),
Flowable.just(1)
.<Integer>map(v -> { throw new TestException(); })
.compose(TestHelper.flowableStripBoundary()),
(a, b) -> a + b, true
)
.test(0L)
.assertFailure(TestException.class);
}
}
| ThrowingQueueSubscription |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/support/hierarchical/NopLock.java | {
"start": 572,
"end": 1115
} | class ____ implements ResourceLock {
static final ResourceLock INSTANCE = new NopLock();
private NopLock() {
}
@Override
public List<ExclusiveResource> getResources() {
return emptyList();
}
@Override
public boolean tryAcquire() {
return true;
}
@Override
public ResourceLock acquire() {
return this;
}
@Override
public void release() {
// nothing to do
}
@Override
public boolean isExclusive() {
return false;
}
@Override
public String toString() {
return new ToStringBuilder(this).toString();
}
}
| NopLock |
java | elastic__elasticsearch | x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/GeoCentroidCalculatorExtraTests.java | {
"start": 823,
"end": 959
} | class ____ split in two, most moving to server, but one test remaining in xpack.spatial because it depends on GeoShapeValues.
*/
public | was |
java | spring-projects__spring-framework | spring-tx/src/test/java/org/springframework/transaction/reactive/TransactionalOperatorTests.java | {
"start": 1440,
"end": 7920
} | class ____ {
ReactiveTestTransactionManager tm = new ReactiveTestTransactionManager(false, true);
@Test
void commitWithMono() {
TransactionalOperator operator = TransactionalOperator.create(tm, new DefaultTransactionDefinition());
Mono.just(true).as(operator::transactional)
.as(StepVerifier::create)
.expectNext(true)
.verifyComplete();
assertThat(tm.commit).isTrue();
assertThat(tm.rollback).isFalse();
}
@Test
void monoSubscriptionNotCancelled() {
AtomicBoolean cancelled = new AtomicBoolean();
TransactionalOperator operator = TransactionalOperator.create(tm, new DefaultTransactionDefinition());
Mono.just(true).doOnCancel(() -> cancelled.set(true)).as(operator::transactional)
.as(StepVerifier::create)
.expectNext(true)
.verifyComplete();
assertThat(tm.commit).isTrue();
assertThat(tm.rollback).isFalse();
assertThat(cancelled).isFalse();
}
@Test
void cancellationPropagatedToMono() {
AtomicBoolean cancelled = new AtomicBoolean();
TransactionalOperator operator = TransactionalOperator.create(tm, new DefaultTransactionDefinition());
Mono.create(sink -> sink.onCancel(() -> cancelled.set(true))).as(operator::transactional)
.as(StepVerifier::create)
.thenAwait()
.thenCancel()
.verify();
assertThat(tm.commit).isFalse();
assertThat(tm.rollback).isTrue();
assertThat(cancelled).isTrue();
}
@Test
void cancellationPropagatedToFlux() {
AtomicBoolean cancelled = new AtomicBoolean();
TransactionalOperator operator = TransactionalOperator.create(tm, new DefaultTransactionDefinition());
Flux.create(sink -> sink.onCancel(() -> cancelled.set(true))).as(operator::transactional)
.as(StepVerifier::create)
.thenAwait()
.thenCancel()
.verify();
assertThat(tm.commit).isFalse();
assertThat(tm.rollback).isTrue();
assertThat(cancelled).isTrue();
}
@Test
void rollbackWithMono() {
TransactionalOperator operator = TransactionalOperator.create(tm, new DefaultTransactionDefinition());
Mono.error(new IllegalStateException()).as(operator::transactional)
.as(StepVerifier::create)
.verifyError(IllegalStateException.class);
assertThat(tm.commit).isFalse();
assertThat(tm.rollback).isTrue();
}
@Test
void commitFailureWithMono() {
ReactiveTransactionManager tm = mock(ReactiveTransactionManager.class);
given(tm.getReactiveTransaction(any())).willReturn(Mono.just(mock(ReactiveTransaction.class)));
PublisherProbe<Void> commit = PublisherProbe.of(Mono.error(IOException::new));
given(tm.commit(any())).willReturn(commit.mono());
PublisherProbe<Void> rollback = PublisherProbe.empty();
given(tm.rollback(any())).willReturn(rollback.mono());
TransactionalOperator operator = TransactionalOperator.create(tm, new DefaultTransactionDefinition());
Mono.just(true).as(operator::transactional)
.as(StepVerifier::create)
.verifyError(IOException.class);
assertThat(commit.subscribeCount()).isEqualTo(1);
rollback.assertWasNotSubscribed();
}
@Test
void rollbackFailureWithMono() {
ReactiveTransactionManager tm = mock(ReactiveTransactionManager.class);
given(tm.getReactiveTransaction(any())).willReturn(Mono.just(mock(ReactiveTransaction.class)));
PublisherProbe<Void> commit = PublisherProbe.empty();
given(tm.commit(any())).willReturn(commit.mono());
PublisherProbe<Void> rollback = PublisherProbe.of(Mono.error(IOException::new));
given(tm.rollback(any())).willReturn(rollback.mono());
TransactionalOperator operator = TransactionalOperator.create(tm, new DefaultTransactionDefinition());
IllegalStateException actionFailure = new IllegalStateException();
Mono.error(actionFailure).as(operator::transactional)
.as(StepVerifier::create)
.verifyErrorSatisfies(ex -> assertThat(ex)
.isInstanceOf(IOException.class)
.hasSuppressedException(actionFailure));
commit.assertWasNotSubscribed();
assertThat(rollback.subscribeCount()).isEqualTo(1);
}
@Test
void commitWithFlux() {
TransactionalOperator operator = TransactionalOperator.create(tm, new DefaultTransactionDefinition());
Flux.just(1, 2, 3, 4).as(operator::transactional)
.as(StepVerifier::create)
.expectNextCount(4)
.verifyComplete();
assertThat(tm.commit).isTrue();
assertThat(tm.rollback).isFalse();
}
@Test
void rollbackWithFlux() {
TransactionalOperator operator = TransactionalOperator.create(tm, new DefaultTransactionDefinition());
Flux.error(new IllegalStateException()).as(operator::transactional)
.as(StepVerifier::create)
.verifyError(IllegalStateException.class);
assertThat(tm.commit).isFalse();
assertThat(tm.rollback).isTrue();
}
@Test
void commitFailureWithFlux() {
ReactiveTransactionManager tm = mock(ReactiveTransactionManager.class);
given(tm.getReactiveTransaction(any())).willReturn(Mono.just(mock(ReactiveTransaction.class)));
PublisherProbe<Void> commit = PublisherProbe.of(Mono.error(IOException::new));
given(tm.commit(any())).willReturn(commit.mono());
PublisherProbe<Void> rollback = PublisherProbe.empty();
given(tm.rollback(any())).willReturn(rollback.mono());
TransactionalOperator operator = TransactionalOperator.create(tm, new DefaultTransactionDefinition());
Flux.just(1, 2, 3, 4).as(operator::transactional)
.as(StepVerifier::create)
.expectNextCount(4)
.verifyError(IOException.class);
assertThat(commit.subscribeCount()).isEqualTo(1);
rollback.assertWasNotSubscribed();
}
@Test
void rollbackFailureWithFlux() {
ReactiveTransactionManager tm = mock(ReactiveTransactionManager.class);
given(tm.getReactiveTransaction(any())).willReturn(Mono.just(mock(ReactiveTransaction.class)));
PublisherProbe<Void> commit = PublisherProbe.empty();
given(tm.commit(any())).willReturn(commit.mono());
PublisherProbe<Void> rollback = PublisherProbe.of(Mono.error(IOException::new));
given(tm.rollback(any())).willReturn(rollback.mono());
TransactionalOperator operator = TransactionalOperator.create(tm, new DefaultTransactionDefinition());
IllegalStateException actionFailure = new IllegalStateException();
Flux.just(1, 2, 3).concatWith(Flux.error(actionFailure)).as(operator::transactional)
.as(StepVerifier::create)
.expectNextCount(3)
.verifyErrorSatisfies(ex -> assertThat(ex)
.isInstanceOf(IOException.class)
.hasSuppressedException(actionFailure));
commit.assertWasNotSubscribed();
assertThat(rollback.subscribeCount()).isEqualTo(1);
}
}
| TransactionalOperatorTests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/AutoValueBoxedValuesTest.java | {
"start": 29346,
"end": 29577
} | class ____ {
public abstract long getLongId();
public abstract boolean isBooleanId();
public abstract Builder toBuilder();
@AutoValue.Builder
abstract static | Test |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authentication/jaas/AuthorityGranter.java | {
"start": 1050,
"end": 1841
} | interface ____ {
/**
* The grant method is called for each principal returned from the LoginContext
* subject. If the AuthorityGranter wishes to grant any authorities, it should return
* a java.util.Set containing the role names it wishes to grant, such as ROLE_USER. If
* the AuthorityGranter does not wish to grant any authorities it should return null.
* <p>
* The set may contain any object as all objects in the returned set will be passed to
* the JaasGrantedAuthority constructor using toString().
* @param principal One of the principals from the
* LoginContext.getSubject().getPrincipals() method.
* @return the role names to grant, or null, meaning no roles should be granted to the
* principal.
*/
Set<String> grant(Principal principal);
}
| AuthorityGranter |
java | quarkusio__quarkus | integration-tests/hibernate-search-orm-elasticsearch/src/test/java/io/quarkus/it/hibernate/search/orm/elasticsearch/devservices/HibernateSearchElasticsearchDevServicesDisabledImplicitlyTest.java | {
"start": 840,
"end": 3145
} | class ____ implements QuarkusTestProfile {
@Override
public Map<String, String> getConfigOverrides() {
return Map.of(
// Make sure quarkus.hibernate-search-orm.elasticsearch.hosts is set,
// so that Quarkus detects disables Elasticsearch dev-services implicitly.
"quarkus.hibernate-search-orm.elasticsearch.hosts", EXPLICIT_HOSTS,
// Ensure we can work offline, because the host we set just above does not actually exist.
"quarkus.hibernate-search-orm.schema-management.strategy", "none",
"quarkus.hibernate-search-orm.elasticsearch.version-check.enabled", "false",
// When disabling the version check we need to set a more precise version
// than what we have in application.properties.
// But here it doesn't matter as we won't send a request to Elasticsearch anyway,
// so we're free to put anything.
// Just make sure to set something consistent with what we have in application.properties.
"quarkus.hibernate-search-orm.elasticsearch.version", "9.1");
}
@Override
public String getConfigProfile() {
// Don't use %test properties;
// that way, we can control whether quarkus.hibernate-search-orm.elasticsearch.hosts is set or not.
// In this test, we DO set quarkus.hibernate-search-orm.elasticsearch.hosts (see above).
return "someotherprofile";
}
}
DevServicesContext context;
@Test
public void testDevServicesProperties() {
assertThat(context.devServicesProperties())
.doesNotContainKey("quarkus.hibernate-search-orm.elasticsearch.hosts");
}
@Test
public void testHibernateSearch() {
RestAssured.when().get("/test/dev-services/hosts").then()
.statusCode(200)
.body(is(EXPLICIT_HOSTS));
// We don't test Hibernate Search features (indexing, search) here,
// because we're not sure that there is a host that Hibernate Search can talk to.
// It's fine, though: we checked that Hibernate Search is configured as intended.
}
}
| Profile |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobFinishEvent.java | {
"start": 1050,
"end": 1261
} | enum ____ {
STATE_CHANGED
}
private JobId jobID;
public JobFinishEvent(JobId jobID) {
super(Type.STATE_CHANGED);
this.jobID = jobID;
}
public JobId getJobId() {
return jobID;
}
}
| Type |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/main/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/writer/DocumentStoreWriter.java | {
"start": 1186,
"end": 1399
} | interface ____<Document> extends AutoCloseable {
void createDatabase();
void createCollection(String collectionName);
void writeDocument(Document document, CollectionType collectionType);
} | DocumentStoreWriter |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/FtpsEndpointBuilderFactory.java | {
"start": 140400,
"end": 174239
} | interface ____
extends
EndpointProducerBuilder {
default AdvancedFtpsEndpointProducerBuilder advanced() {
return (AdvancedFtpsEndpointProducerBuilder) this;
}
/**
* Specifies the file transfer mode, BINARY or ASCII. Default is ASCII
* (false).
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param binary the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder binary(boolean binary) {
doSetProperty("binary", binary);
return this;
}
/**
* Specifies the file transfer mode, BINARY or ASCII. Default is ASCII
* (false).
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param binary the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder binary(String binary) {
doSetProperty("binary", binary);
return this;
}
/**
* This option is used to specify the encoding of the file. You can use
* this on the consumer, to specify the encodings of the files, which
* allow Camel to know the charset it should load the file content in
* case the file content is being accessed. Likewise when writing a
* file, you can use this option to specify which charset to write the
* file as well. Do mind that when writing the file Camel may have to
* read the message content into memory to be able to convert the data
* into the configured charset, so do not use this if you have big
* messages.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param charset the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder charset(String charset) {
doSetProperty("charset", charset);
return this;
}
/**
* Whether or not to disconnect from remote FTP server right after use.
* Disconnect will only disconnect the current connection to the FTP
* server. If you have a consumer which you want to stop, then you need
* to stop the consumer/route instead.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param disconnect the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder disconnect(boolean disconnect) {
doSetProperty("disconnect", disconnect);
return this;
}
/**
* Whether or not to disconnect from remote FTP server right after use.
* Disconnect will only disconnect the current connection to the FTP
* server. If you have a consumer which you want to stop, then you need
* to stop the consumer/route instead.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param disconnect the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder disconnect(String disconnect) {
doSetProperty("disconnect", disconnect);
return this;
}
/**
* Producer: If provided, then Camel will write a 2nd done file when the
* original file has been written. The done file will be empty. This
* option configures what file name to use. Either you can specify a
* fixed name. Or you can use dynamic placeholders. The done file will
* always be written in the same folder as the original file. Consumer:
* If provided, Camel will only consume files if a done file exists.
* This option configures what file name to use. Either you can specify
* a fixed name. Or you can use dynamic placeholders.The done file is
* always expected in the same folder as the original file. Only
* ${file.name} and ${file.name.next} is supported as dynamic
* placeholders.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param doneFileName the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder doneFileName(String doneFileName) {
doSetProperty("doneFileName", doneFileName);
return this;
}
/**
* Use Expression such as File Language to dynamically set the filename.
* For consumers, it's used as a filename filter. For producers, it's
* used to evaluate the filename to write. If an expression is set, it
* take precedence over the CamelFileName header. (Note: The header
* itself can also be an Expression). The expression options support
* both String and Expression types. If the expression is a String type,
* it is always evaluated using the File Language. If the expression is
* an Expression type, the specified Expression type is used - this
* allows you, for instance, to use OGNL expressions. For the consumer,
* you can use it to filter filenames, so you can for instance consume
* today's file using the File Language syntax:
* mydata-${date:now:yyyyMMdd}.txt. The producers support the
* CamelOverruleFileName header which takes precedence over any existing
* CamelFileName header; the CamelOverruleFileName is a header that is
* used only once, and makes it easier as this avoids to temporary store
* CamelFileName and have to restore it afterwards.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*
* @param fileName the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder fileName(String fileName) {
doSetProperty("fileName", fileName);
return this;
}
/**
* Sets passive mode connections. Default is active mode connections.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param passiveMode the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder passiveMode(boolean passiveMode) {
doSetProperty("passiveMode", passiveMode);
return this;
}
/**
* Sets passive mode connections. Default is active mode connections.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param passiveMode the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder passiveMode(String passiveMode) {
doSetProperty("passiveMode", passiveMode);
return this;
}
/**
* Sets the path separator to be used. UNIX = Uses unix style path
* separator Windows = Uses windows style path separator Auto = (is
* default) Use existing path separator in file name.
*
* The option is a:
* <code>org.apache.camel.component.file.remote.RemoteFileConfiguration.PathSeparator</code> type.
*
* Default: UNIX
* Group: common
*
* @param separator the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder separator(org.apache.camel.component.file.remote.RemoteFileConfiguration.PathSeparator separator) {
doSetProperty("separator", separator);
return this;
}
/**
* Sets the path separator to be used. UNIX = Uses unix style path
* separator Windows = Uses windows style path separator Auto = (is
* default) Use existing path separator in file name.
*
* The option will be converted to a
* <code>org.apache.camel.component.file.remote.RemoteFileConfiguration.PathSeparator</code> type.
*
* Default: UNIX
* Group: common
*
* @param separator the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder separator(String separator) {
doSetProperty("separator", separator);
return this;
}
/**
* Configures the interval in seconds to use when logging the progress
* of upload and download operations that are in-flight. This is used
* for logging progress when operations take a longer time.
*
* The option is a: <code>int</code> type.
*
* Default: 5
* Group: common
*
* @param transferLoggingIntervalSeconds the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder transferLoggingIntervalSeconds(int transferLoggingIntervalSeconds) {
doSetProperty("transferLoggingIntervalSeconds", transferLoggingIntervalSeconds);
return this;
}
/**
* Configures the interval in seconds to use when logging the progress
* of upload and download operations that are in-flight. This is used
* for logging progress when operations take a longer time.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 5
* Group: common
*
* @param transferLoggingIntervalSeconds the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder transferLoggingIntervalSeconds(String transferLoggingIntervalSeconds) {
doSetProperty("transferLoggingIntervalSeconds", transferLoggingIntervalSeconds);
return this;
}
/**
* Configure the logging level to use when logging the progress of
* upload and download operations.
*
* The option is a: <code>org.apache.camel.LoggingLevel</code> type.
*
* Default: DEBUG
* Group: common
*
* @param transferLoggingLevel the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder transferLoggingLevel(org.apache.camel.LoggingLevel transferLoggingLevel) {
doSetProperty("transferLoggingLevel", transferLoggingLevel);
return this;
}
/**
* Configure the logging level to use when logging the progress of
* upload and download operations.
*
* The option will be converted to a
* <code>org.apache.camel.LoggingLevel</code> type.
*
* Default: DEBUG
* Group: common
*
* @param transferLoggingLevel the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder transferLoggingLevel(String transferLoggingLevel) {
doSetProperty("transferLoggingLevel", transferLoggingLevel);
return this;
}
/**
* Configures whether perform verbose (fine-grained) logging of the
* progress of upload and download operations.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param transferLoggingVerbose the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder transferLoggingVerbose(boolean transferLoggingVerbose) {
doSetProperty("transferLoggingVerbose", transferLoggingVerbose);
return this;
}
/**
* Configures whether perform verbose (fine-grained) logging of the
* progress of upload and download operations.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: common
*
* @param transferLoggingVerbose the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder transferLoggingVerbose(String transferLoggingVerbose) {
doSetProperty("transferLoggingVerbose", transferLoggingVerbose);
return this;
}
/**
* If provided, then Camel will write a checksum file when the original
* file has been written. The checksum file will contain the checksum
* created with the provided algorithm for the original file. The
* checksum file will always be written in the same folder as the
* original file.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param checksumFileAlgorithm the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder checksumFileAlgorithm(String checksumFileAlgorithm) {
doSetProperty("checksumFileAlgorithm", checksumFileAlgorithm);
return this;
}
/**
* What to do if a file already exists with the same name. Override,
* which is the default, replaces the existing file. - Append - adds
* content to the existing file. - Fail - throws a
* GenericFileOperationException, indicating that there is already an
* existing file. - Ignore - silently ignores the problem and does not
* override the existing file, but assumes everything is okay. - Move -
* option requires to use the moveExisting option to be configured as
* well. The option eagerDeleteTargetFile can be used to control what to
* do if an moving the file, and there exists already an existing file,
* otherwise causing the move operation to fail. The Move option will
* move any existing files, before writing the target file. - TryRename
* is only applicable if tempFileName option is in use. This allows to
* try renaming the file from the temporary name to the actual name,
* without doing any exists check. This check may be faster on some file
* systems and especially FTP servers.
*
* The option is a:
* <code>org.apache.camel.component.file.GenericFileExist</code> type.
*
* Default: Override
* Group: producer
*
* @param fileExist the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder fileExist(org.apache.camel.component.file.GenericFileExist fileExist) {
doSetProperty("fileExist", fileExist);
return this;
}
/**
* What to do if a file already exists with the same name. Override,
* which is the default, replaces the existing file. - Append - adds
* content to the existing file. - Fail - throws a
* GenericFileOperationException, indicating that there is already an
* existing file. - Ignore - silently ignores the problem and does not
* override the existing file, but assumes everything is okay. - Move -
* option requires to use the moveExisting option to be configured as
* well. The option eagerDeleteTargetFile can be used to control what to
* do if an moving the file, and there exists already an existing file,
* otherwise causing the move operation to fail. The Move option will
* move any existing files, before writing the target file. - TryRename
* is only applicable if tempFileName option is in use. This allows to
* try renaming the file from the temporary name to the actual name,
* without doing any exists check. This check may be faster on some file
* systems and especially FTP servers.
*
* The option will be converted to a
* <code>org.apache.camel.component.file.GenericFileExist</code> type.
*
* Default: Override
* Group: producer
*
* @param fileExist the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder fileExist(String fileExist) {
doSetProperty("fileExist", fileExist);
return this;
}
/**
* Flatten is used to flatten the file name path to strip any leading
* paths, so it's just the file name. This allows you to consume
* recursively into sub-directories, but when you eg write the files to
* another directory they will be written in a single directory. Setting
* this to true on the producer enforces that any file name in
* CamelFileName header will be stripped for any leading paths.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param flatten the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder flatten(boolean flatten) {
doSetProperty("flatten", flatten);
return this;
}
/**
* Flatten is used to flatten the file name path to strip any leading
* paths, so it's just the file name. This allows you to consume
* recursively into sub-directories, but when you eg write the files to
* another directory they will be written in a single directory. Setting
* this to true on the producer enforces that any file name in
* CamelFileName header will be stripped for any leading paths.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param flatten the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder flatten(String flatten) {
doSetProperty("flatten", flatten);
return this;
}
/**
* Used for jailing (restricting) writing files to the starting
* directory (and sub) only. This is enabled by default to not allow
* Camel to write files to outside directories (to be more secured out
* of the box). You can turn this off to allow writing files to
* directories outside the starting directory, such as parent or root
* folders.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: producer
*
* @param jailStartingDirectory the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder jailStartingDirectory(boolean jailStartingDirectory) {
doSetProperty("jailStartingDirectory", jailStartingDirectory);
return this;
}
/**
* Used for jailing (restricting) writing files to the starting
* directory (and sub) only. This is enabled by default to not allow
* Camel to write files to outside directories (to be more secured out
* of the box). You can turn this off to allow writing files to
* directories outside the starting directory, such as parent or root
* folders.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: true
* Group: producer
*
* @param jailStartingDirectory the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder jailStartingDirectory(String jailStartingDirectory) {
doSetProperty("jailStartingDirectory", jailStartingDirectory);
return this;
}
/**
* Expression (such as File Language) used to compute file name to use
* when fileExist=Move is configured. To move files into a backup
* subdirectory just enter backup. This option only supports the
* following File Language tokens: file:name, file:name.ext,
* file:name.noext, file:onlyname, file:onlyname.noext, file:ext, and
* file:parent. Notice the file:parent is not supported by the FTP
* component, as the FTP component can only move any existing files to a
* relative directory based on current dir as base.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param moveExisting the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder moveExisting(String moveExisting) {
doSetProperty("moveExisting", moveExisting);
return this;
}
/**
* The same as tempPrefix option but offering a more fine grained
* control on the naming of the temporary filename as it uses the File
* Language. The location for tempFilename is relative to the final file
* location in the option 'fileName', not the target directory in the
* base uri. For example if option fileName includes a directory prefix:
* dir/finalFilename then tempFileName is relative to that subdirectory
* dir.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param tempFileName the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder tempFileName(String tempFileName) {
doSetProperty("tempFileName", tempFileName);
return this;
}
/**
* This option is used to write the file using a temporary name and
* then, after the write is complete, rename it to the real name. Can be
* used to identify files being written and also avoid consumers (not
* using exclusive read locks) reading in progress files. Is often used
* by FTP when uploading big files.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param tempPrefix the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder tempPrefix(String tempPrefix) {
doSetProperty("tempPrefix", tempPrefix);
return this;
}
/**
* Account to use for login.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param account the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder account(String account) {
doSetProperty("account", account);
return this;
}
/**
* Use this option to disable default options when using secure data
* channel. This allows you to be in full control what the execPbsz and
* execProt setting should be used. Default is false.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param disableSecureDataChannelDefaults the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder disableSecureDataChannelDefaults(boolean disableSecureDataChannelDefaults) {
doSetProperty("disableSecureDataChannelDefaults", disableSecureDataChannelDefaults);
return this;
}
/**
* Use this option to disable default options when using secure data
* channel. This allows you to be in full control what the execPbsz and
* execProt setting should be used. Default is false.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param disableSecureDataChannelDefaults the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder disableSecureDataChannelDefaults(String disableSecureDataChannelDefaults) {
doSetProperty("disableSecureDataChannelDefaults", disableSecureDataChannelDefaults);
return this;
}
/**
* When using secure data channel you can set the exec protection buffer
* size.
*
* The option is a: <code>java.lang.Long</code> type.
*
* Group: security
*
* @param execPbsz the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder execPbsz(Long execPbsz) {
doSetProperty("execPbsz", execPbsz);
return this;
}
/**
* When using secure data channel you can set the exec protection buffer
* size.
*
* The option will be converted to a <code>java.lang.Long</code> type.
*
* Group: security
*
* @param execPbsz the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder execPbsz(String execPbsz) {
doSetProperty("execPbsz", execPbsz);
return this;
}
/**
* The exec protection level PROT command. C - Clear S - Safe(SSL
* protocol only) E - Confidential(SSL protocol only) P - Private.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param execProt the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder execProt(String execProt) {
doSetProperty("execProt", execProt);
return this;
}
/**
* Set the key store parameters. This is a multi-value option with
* prefix: ftpClient.keyStore.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* ftpClientKeyStoreParameters(String, Object) method to add a value
* (call the method multiple times to set more values).
*
* Group: security
*
* @param key the option key
* @param value the option value
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder ftpClientKeyStoreParameters(String key, Object value) {
doSetMultiValueProperty("ftpClientKeyStoreParameters", "ftpClient.keyStore." + key, value);
return this;
}
/**
* Set the key store parameters. This is a multi-value option with
* prefix: ftpClient.keyStore.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* ftpClientKeyStoreParameters(String, Object) method to add a value
* (call the method multiple times to set more values).
*
* Group: security
*
* @param values the values
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder ftpClientKeyStoreParameters(Map values) {
doSetMultiValueProperties("ftpClientKeyStoreParameters", "ftpClient.keyStore.", values);
return this;
}
/**
* Set the trust store parameters. This is a multi-value option with
* prefix: ftpClient.trustStore.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* ftpClientTrustStoreParameters(String, Object) method to add a value
* (call the method multiple times to set more values).
*
* Group: security
*
* @param key the option key
* @param value the option value
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder ftpClientTrustStoreParameters(String key, Object value) {
doSetMultiValueProperty("ftpClientTrustStoreParameters", "ftpClient.trustStore." + key, value);
return this;
}
/**
* Set the trust store parameters. This is a multi-value option with
* prefix: ftpClient.trustStore.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* The option is multivalued, and you can use the
* ftpClientTrustStoreParameters(String, Object) method to add a value
* (call the method multiple times to set more values).
*
* Group: security
*
* @param values the values
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder ftpClientTrustStoreParameters(Map values) {
doSetMultiValueProperties("ftpClientTrustStoreParameters", "ftpClient.trustStore.", values);
return this;
}
/**
* Set the security mode (Implicit/Explicit). true - Implicit Mode /
* False - Explicit Mode.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param implicit the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder implicit(boolean implicit) {
doSetProperty("implicit", implicit);
return this;
}
/**
* Set the security mode (Implicit/Explicit). true - Implicit Mode /
* False - Explicit Mode.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param implicit the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder implicit(String implicit) {
doSetProperty("implicit", implicit);
return this;
}
/**
* Password to use for login.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param password the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder password(String password) {
doSetProperty("password", password);
return this;
}
/**
* Set the underlying security protocol.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: TLSv1.3
* Group: security
*
* @param securityProtocol the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder securityProtocol(String securityProtocol) {
doSetProperty("securityProtocol", securityProtocol);
return this;
}
/**
* Gets the JSSE configuration that overrides any settings in
* FtpsEndpoint#ftpClientKeyStoreParameters,
* ftpClientTrustStoreParameters, and
* FtpsConfiguration#getSecurityProtocol().
*
* The option is a:
* <code>org.apache.camel.support.jsse.SSLContextParameters</code> type.
*
* Group: security
*
* @param sslContextParameters the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder sslContextParameters(org.apache.camel.support.jsse.SSLContextParameters sslContextParameters) {
doSetProperty("sslContextParameters", sslContextParameters);
return this;
}
/**
* Gets the JSSE configuration that overrides any settings in
* FtpsEndpoint#ftpClientKeyStoreParameters,
* ftpClientTrustStoreParameters, and
* FtpsConfiguration#getSecurityProtocol().
*
* The option will be converted to a
* <code>org.apache.camel.support.jsse.SSLContextParameters</code> type.
*
* Group: security
*
* @param sslContextParameters the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder sslContextParameters(String sslContextParameters) {
doSetProperty("sslContextParameters", sslContextParameters);
return this;
}
/**
* Username to use for login.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param username the value to set
* @return the dsl builder
*/
default FtpsEndpointProducerBuilder username(String username) {
doSetProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint producers for the FTPS component.
*/
public | FtpsEndpointProducerBuilder |
java | spring-projects__spring-security | core/src/main/java/org/springframework/security/authorization/method/AuthorizationAdvisorProxyFactory.java | {
"start": 2165,
"end": 2804
} | class ____ {
* @PreAuthorize("hasAuthority('bar:read')")
* String bar() { ... }
* }
* </pre>
*
* Use {@link AuthorizationAdvisorProxyFactory} to wrap the instance in Spring Security's
* {@link org.springframework.security.access.prepost.PreAuthorize} method interceptor
* like so:
*
* <pre>
* AuthorizationProxyFactory proxyFactory = AuthorizationAdvisorProxyFactory.withDefaults();
* Foo foo = new Foo();
* foo.bar(); // passes
* Foo securedFoo = proxyFactory.proxy(foo);
* securedFoo.bar(); // access denied!
* </pre>
*
* @author Josh Cummings
* @since 6.3
*/
public final | Foo |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialEvaluatorFactory.java | {
"start": 3035,
"end": 3346
} | interface ____ a supplier of the key information needed by the spatial evaluator factories.
* The SpatialRelatesFunction will use this to supply the necessary information to the factories.
* When we need to swap left and right sides around, we can use a SwappableSpatialSourceSupplier.
*/
| defines |
java | google__dagger | javatests/artifacts/dagger-ksp/transitive-annotation-app/library1/src/main/java/library1/MyBaseComponent.java | {
"start": 707,
"end": 1035
} | class ____ to test that Dagger won't fail on unresolvable transitive types used in non-dagger
* related elements and annotations.
*/
// TODO(b/219587431): Support @MyTransitiveAnnotation (We shouldn't need scope/qualifier here).
@MyAnnotation(MyTransitiveType.VALUE)
@MyOtherAnnotation(MyTransitiveType.class)
public abstract | used |
java | elastic__elasticsearch | x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene70/Lucene70DocValuesProducer.java | {
"start": 13768,
"end": 14169
} | class ____ extends TermsDictEntry {
SortedEntry singleValueEntry;
long docsWithFieldOffset;
long docsWithFieldLength;
int numDocsWithField;
byte bitsPerValue;
long ordsOffset;
long ordsLength;
LegacyDirectMonotonicReader.Meta addressesMeta;
long addressesOffset;
long addressesLength;
}
private static | SortedSetEntry |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/interfaces/relation/ISetRefEdEntity.java | {
"start": 230,
"end": 353
} | interface ____ {
Integer getId();
void setId(Integer id);
String getData();
void setData(String data);
}
| ISetRefEdEntity |
java | spring-projects__spring-security | access/src/main/java/org/springframework/security/access/vote/RoleHierarchyVoter.java | {
"start": 1382,
"end": 1979
} | class ____ extends RoleVoter {
@SuppressWarnings("NullAway")
private @Nullable RoleHierarchy roleHierarchy = null;
public RoleHierarchyVoter(RoleHierarchy roleHierarchy) {
Assert.notNull(roleHierarchy, "RoleHierarchy must not be null");
this.roleHierarchy = roleHierarchy;
}
/**
* Calls the <tt>RoleHierarchy</tt> to obtain the complete set of user authorities.
*/
@Override
Collection<? extends GrantedAuthority> extractAuthorities(Authentication authentication) {
return this.roleHierarchy.getReachableGrantedAuthorities(authentication.getAuthorities());
}
}
| RoleHierarchyVoter |
java | apache__rocketmq | store/src/test/java/org/apache/rocketmq/store/DefaultMessageStoreCleanFilesTest.java | {
"start": 2442,
"end": 24401
} | class ____ {
private DefaultMessageStore messageStore;
private DefaultMessageStore.CleanCommitLogService cleanCommitLogService;
private ConsumeQueueStore.CleanConsumeQueueService cleanConsumeQueueService;
private SocketAddress bornHost;
private SocketAddress storeHost;
private String topic = "test";
private String keys = "hello";
private int queueId = 0;
private int fileCountCommitLog = 55;
// exactly one message per CommitLog file.
private int msgCount = fileCountCommitLog;
private int mappedFileSize = 128;
private int fileReservedTime = 1;
@Before
public void init() throws Exception {
storeHost = new InetSocketAddress(InetAddress.getLocalHost(), 8123);
bornHost = new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 0);
}
@Test
public void testIsSpaceFullFunctionEmpty2Full() throws Exception {
String deleteWhen = "04";
// the min value of diskMaxUsedSpaceRatio.
int diskMaxUsedSpaceRatio = 1;
// used to set disk-full flag
double diskSpaceCleanForciblyRatio = 0.01D;
initMessageStore(deleteWhen, diskMaxUsedSpaceRatio, diskSpaceCleanForciblyRatio);
// build and put 55 messages, exactly one message per CommitLog file.
buildAndPutMessagesToMessageStore(msgCount);
MappedFileQueue commitLogQueue = getMappedFileQueueCommitLog();
assertEquals(fileCountCommitLog, commitLogQueue.getMappedFiles().size());
int fileCountConsumeQueue = getFileCountConsumeQueue();
MappedFileQueue consumeQueue = getMappedFileQueueConsumeQueue();
assertEquals(fileCountConsumeQueue, consumeQueue.getMappedFiles().size());
cleanCommitLogService.isSpaceFull();
assertEquals(1 << 4, messageStore.getRunningFlags().getFlagBits() & (1 << 4));
}
@Test
public void testIsSpaceFullMultiCommitLogStorePath() throws Exception {
String deleteWhen = "04";
// the min value of diskMaxUsedSpaceRatio.
int diskMaxUsedSpaceRatio = 1;
// used to set disk-full flag
double diskSpaceCleanForciblyRatio = 0.01D;
MessageStoreConfig config = genMessageStoreConfig(deleteWhen, diskMaxUsedSpaceRatio);
String storePath = config.getStorePathCommitLog();
StringBuilder storePathBuilder = new StringBuilder();
for (int i = 0; i < 3; i++) {
storePathBuilder.append(storePath).append(i).append(MixAll.MULTI_PATH_SPLITTER);
}
config.setStorePathCommitLog(storePathBuilder.toString());
String[] paths = config.getStorePathCommitLog().trim().split(MixAll.MULTI_PATH_SPLITTER);
assertEquals(3, paths.length);
initMessageStore(config, diskSpaceCleanForciblyRatio);
// build and put 55 messages, exactly one message per CommitLog file.
buildAndPutMessagesToMessageStore(msgCount);
MappedFileQueue commitLogQueue = getMappedFileQueueCommitLog();
assertEquals(fileCountCommitLog, commitLogQueue.getMappedFiles().size());
int fileCountConsumeQueue = getFileCountConsumeQueue();
MappedFileQueue consumeQueue = getMappedFileQueueConsumeQueue();
assertEquals(fileCountConsumeQueue, consumeQueue.getMappedFiles().size());
cleanCommitLogService.isSpaceFull();
assertEquals(1 << 4, messageStore.getRunningFlags().getFlagBits() & (1 << 4));
}
@Test
public void testIsSpaceFullFunctionFull2Empty() throws Exception {
String deleteWhen = "04";
// the min value of diskMaxUsedSpaceRatio.
int diskMaxUsedSpaceRatio = 1;
//use to reset disk-full flag
double diskSpaceCleanForciblyRatio = 0.999D;
initMessageStore(deleteWhen, diskMaxUsedSpaceRatio, diskSpaceCleanForciblyRatio);
//set disk full
messageStore.getRunningFlags().getAndMakeDiskFull();
cleanCommitLogService.isSpaceFull();
assertEquals(0, messageStore.getRunningFlags().getFlagBits() & (1 << 4));
}
@Test
public void testDeleteExpiredFilesByTimeUp() throws Exception {
String deleteWhen = Calendar.getInstance().get(Calendar.HOUR_OF_DAY) + "";
// the max value of diskMaxUsedSpaceRatio
int diskMaxUsedSpaceRatio = 99;
// used to ensure that automatic file deletion is not triggered
double diskSpaceCleanForciblyRatio = 0.999D;
initMessageStore(deleteWhen, diskMaxUsedSpaceRatio, diskSpaceCleanForciblyRatio);
// build and put 55 messages, exactly one message per CommitLog file.
buildAndPutMessagesToMessageStore(msgCount);
// undo comment out the code below, if want to debug this case rather than just run it.
// Thread.sleep(1000 * 60 + 100);
MappedFileQueue commitLogQueue = getMappedFileQueueCommitLog();
assertEquals(fileCountCommitLog, commitLogQueue.getMappedFiles().size());
int fileCountConsumeQueue = getFileCountConsumeQueue();
MappedFileQueue consumeQueue = getMappedFileQueueConsumeQueue();
assertEquals(fileCountConsumeQueue, consumeQueue.getMappedFiles().size());
int fileCountIndexFile = getFileCountIndexFile();
assertEquals(fileCountIndexFile, getIndexFileList().size());
int expireFileCount = 15;
expireFiles(commitLogQueue, expireFileCount);
// magic code 10 reference to MappedFileQueue#DELETE_FILES_BATCH_MAX
for (int a = 1, fileCount = expireFileCount; a <= (int) Math.ceil((double) expireFileCount / 10); a++, fileCount -= 10) {
cleanCommitLogService.run();
cleanConsumeQueueService.run();
int expectDeletedCount = fileCount >= 10 ? a * 10 : ((a - 1) * 10 + fileCount);
assertEquals(fileCountCommitLog - expectDeletedCount, commitLogQueue.getMappedFiles().size());
int msgCountPerFile = getMsgCountPerConsumeQueueMappedFile();
int expectDeleteCountConsumeQueue = (int) Math.floor((double) expectDeletedCount / msgCountPerFile);
assertEquals(fileCountConsumeQueue - expectDeleteCountConsumeQueue, consumeQueue.getMappedFiles().size());
int msgCountPerIndexFile = getMsgCountPerIndexFile();
int expectDeleteCountIndexFile = (int) Math.floor((double) expectDeletedCount / msgCountPerIndexFile);
assertEquals(fileCountIndexFile - expectDeleteCountIndexFile, getIndexFileList().size());
}
}
@Test
public void testDeleteExpiredFilesBySpaceFull() throws Exception {
String deleteWhen = "04";
// the min value of diskMaxUsedSpaceRatio.
int diskMaxUsedSpaceRatio = 1;
// used to ensure that automatic file deletion is not triggered
double diskSpaceCleanForciblyRatio = 0.999D;
initMessageStore(deleteWhen, diskMaxUsedSpaceRatio, diskSpaceCleanForciblyRatio);
// build and put 55 messages, exactly one message per CommitLog file.
buildAndPutMessagesToMessageStore(msgCount);
// undo comment out the code below, if want to debug this case rather than just run it.
// Thread.sleep(1000 * 60 + 100);
MappedFileQueue commitLogQueue = getMappedFileQueueCommitLog();
assertEquals(fileCountCommitLog, commitLogQueue.getMappedFiles().size());
int fileCountConsumeQueue = getFileCountConsumeQueue();
MappedFileQueue consumeQueue = getMappedFileQueueConsumeQueue();
assertEquals(fileCountConsumeQueue, consumeQueue.getMappedFiles().size());
int fileCountIndexFile = getFileCountIndexFile();
assertEquals(fileCountIndexFile, getIndexFileList().size());
int expireFileCount = 15;
expireFiles(commitLogQueue, expireFileCount);
// magic code 10 reference to MappedFileQueue#DELETE_FILES_BATCH_MAX
for (int a = 1, fileCount = expireFileCount; a <= (int) Math.ceil((double) expireFileCount / 10); a++, fileCount -= 10) {
cleanCommitLogService.run();
cleanConsumeQueueService.run();
int expectDeletedCount = fileCount >= 10 ? a * 10 : ((a - 1) * 10 + fileCount);
assertEquals(fileCountCommitLog - expectDeletedCount, commitLogQueue.getMappedFiles().size());
int msgCountPerFile = getMsgCountPerConsumeQueueMappedFile();
int expectDeleteCountConsumeQueue = (int) Math.floor((double) expectDeletedCount / msgCountPerFile);
assertEquals(fileCountConsumeQueue - expectDeleteCountConsumeQueue, consumeQueue.getMappedFiles().size());
int msgCountPerIndexFile = getMsgCountPerIndexFile();
int expectDeleteCountIndexFile = (int) Math.floor((double) expectDeletedCount / msgCountPerIndexFile);
assertEquals(fileCountIndexFile - expectDeleteCountIndexFile, getIndexFileList().size());
}
}
@Test
public void testDeleteFilesImmediatelyBySpaceFull() throws Exception {
String deleteWhen = "04";
// the min value of diskMaxUsedSpaceRatio.
int diskMaxUsedSpaceRatio = 1;
// make sure to trigger the automatic file deletion feature
double diskSpaceCleanForciblyRatio = 0.01D;
initMessageStore(deleteWhen, diskMaxUsedSpaceRatio, diskSpaceCleanForciblyRatio);
// build and put 55 messages, exactly one message per CommitLog file.
buildAndPutMessagesToMessageStore(msgCount);
// undo comment out the code below, if want to debug this case rather than just run it.
// Thread.sleep(1000 * 60 + 100);
MappedFileQueue commitLogQueue = getMappedFileQueueCommitLog();
assertEquals(fileCountCommitLog, commitLogQueue.getMappedFiles().size());
int fileCountConsumeQueue = getFileCountConsumeQueue();
MappedFileQueue consumeQueue = getMappedFileQueueConsumeQueue();
assertEquals(fileCountConsumeQueue, consumeQueue.getMappedFiles().size());
int fileCountIndexFile = getFileCountIndexFile();
assertEquals(fileCountIndexFile, getIndexFileList().size());
// In this case, there is no need to expire the files.
// int expireFileCount = 15;
// expireFiles(commitLogQueue, expireFileCount);
// magic code 10 reference to MappedFileQueue#DELETE_FILES_BATCH_MAX
for (int a = 1, fileCount = fileCountCommitLog;
a <= (int) Math.ceil((double) fileCountCommitLog / 10) && fileCount >= 10;
a++, fileCount -= 10) {
cleanCommitLogService.run();
cleanConsumeQueueService.run();
assertEquals(fileCountCommitLog - 10 * a, commitLogQueue.getMappedFiles().size());
int msgCountPerFile = getMsgCountPerConsumeQueueMappedFile();
int expectDeleteCountConsumeQueue = (int) Math.floor((double) (a * 10) / msgCountPerFile);
assertEquals(fileCountConsumeQueue - expectDeleteCountConsumeQueue, consumeQueue.getMappedFiles().size());
int msgCountPerIndexFile = getMsgCountPerIndexFile();
int expectDeleteCountIndexFile = (int) Math.floor((double) (a * 10) / msgCountPerIndexFile);
assertEquals(fileCountIndexFile - expectDeleteCountIndexFile, getIndexFileList().size());
}
}
@Test
public void testDeleteExpiredFilesManually() throws Exception {
String deleteWhen = "04";
// the max value of diskMaxUsedSpaceRatio
int diskMaxUsedSpaceRatio = 99;
// used to ensure that automatic file deletion is not triggered
double diskSpaceCleanForciblyRatio = 0.999D;
initMessageStore(deleteWhen, diskMaxUsedSpaceRatio, diskSpaceCleanForciblyRatio);
messageStore.executeDeleteFilesManually();
// build and put 55 messages, exactly one message per CommitLog file.
buildAndPutMessagesToMessageStore(msgCount);
// undo comment out the code below, if want to debug this case rather than just run it.
// Thread.sleep(1000 * 60 + 100);
MappedFileQueue commitLogQueue = getMappedFileQueueCommitLog();
assertEquals(fileCountCommitLog, commitLogQueue.getMappedFiles().size());
int fileCountConsumeQueue = getFileCountConsumeQueue();
MappedFileQueue consumeQueue = getMappedFileQueueConsumeQueue();
assertEquals(fileCountConsumeQueue, consumeQueue.getMappedFiles().size());
int fileCountIndexFile = getFileCountIndexFile();
assertEquals(fileCountIndexFile, getIndexFileList().size());
int expireFileCount = 15;
expireFiles(commitLogQueue, expireFileCount);
// magic code 10 reference to MappedFileQueue#DELETE_FILES_BATCH_MAX
for (int a = 1, fileCount = expireFileCount; a <= (int) Math.ceil((double) expireFileCount / 10); a++, fileCount -= 10) {
cleanCommitLogService.run();
cleanConsumeQueueService.run();
int expectDeletedCount = fileCount >= 10 ? a * 10 : ((a - 1) * 10 + fileCount);
assertEquals(fileCountCommitLog - expectDeletedCount, commitLogQueue.getMappedFiles().size());
int msgCountPerFile = getMsgCountPerConsumeQueueMappedFile();
int expectDeleteCountConsumeQueue = (int) Math.floor((double) expectDeletedCount / msgCountPerFile);
assertEquals(fileCountConsumeQueue - expectDeleteCountConsumeQueue, consumeQueue.getMappedFiles().size());
int msgCountPerIndexFile = getMsgCountPerIndexFile();
int expectDeleteCountIndexFile = (int) Math.floor((double) (a * 10) / msgCountPerIndexFile);
assertEquals(fileCountIndexFile - expectDeleteCountIndexFile, getIndexFileList().size());
}
}
private DefaultMessageStore.CleanCommitLogService getCleanCommitLogService()
throws Exception {
Field serviceField = messageStore.getClass().getDeclaredField("cleanCommitLogService");
serviceField.setAccessible(true);
DefaultMessageStore.CleanCommitLogService cleanCommitLogService =
(DefaultMessageStore.CleanCommitLogService) serviceField.get(messageStore);
serviceField.setAccessible(false);
return cleanCommitLogService;
}
private ConsumeQueueStore.CleanConsumeQueueService getCleanConsumeQueueService()
throws Exception {
Field serviceField = messageStore.getQueueStore().getClass().getDeclaredField("cleanConsumeQueueService");
serviceField.setAccessible(true);
ConsumeQueueStore.CleanConsumeQueueService cleanConsumeQueueService =
(ConsumeQueueStore.CleanConsumeQueueService) serviceField.get(messageStore.getQueueStore());
serviceField.setAccessible(false);
return cleanConsumeQueueService;
}
private MappedFileQueue getMappedFileQueueConsumeQueue()
throws Exception {
ConsumeQueueInterface consumeQueue = messageStore.getConsumeQueueTable().get(topic).get(queueId);
Field queueField = consumeQueue.getClass().getDeclaredField("mappedFileQueue");
queueField.setAccessible(true);
MappedFileQueue fileQueue = (MappedFileQueue) queueField.get(consumeQueue);
queueField.setAccessible(false);
return fileQueue;
}
private MappedFileQueue getMappedFileQueueCommitLog() throws Exception {
CommitLog commitLog = messageStore.getCommitLog();
Field queueField = commitLog.getClass().getDeclaredField("mappedFileQueue");
queueField.setAccessible(true);
MappedFileQueue fileQueue = (MappedFileQueue) queueField.get(commitLog);
queueField.setAccessible(false);
return fileQueue;
}
private ArrayList<IndexFile> getIndexFileList() throws Exception {
Field indexServiceField = messageStore.getClass().getDeclaredField("indexService");
indexServiceField.setAccessible(true);
IndexService indexService = (IndexService) indexServiceField.get(messageStore);
Field indexFileListField = indexService.getClass().getDeclaredField("indexFileList");
indexFileListField.setAccessible(true);
ArrayList<IndexFile> indexFileList = (ArrayList<IndexFile>) indexFileListField.get(indexService);
return indexFileList;
}
private int getFileCountConsumeQueue() {
int countPerFile = getMsgCountPerConsumeQueueMappedFile();
double fileCount = (double) msgCount / countPerFile;
return (int) Math.ceil(fileCount);
}
private int getFileCountIndexFile() {
int countPerFile = getMsgCountPerIndexFile();
double fileCount = (double) msgCount / countPerFile;
return (int) Math.ceil(fileCount);
}
private int getMsgCountPerConsumeQueueMappedFile() {
int size = messageStore.getMessageStoreConfig().getMappedFileSizeConsumeQueue();
return size / CQ_STORE_UNIT_SIZE;// 7 in this case
}
private int getMsgCountPerIndexFile() {
// 7 in this case
return messageStore.getMessageStoreConfig().getMaxIndexNum() - 1;
}
private void buildAndPutMessagesToMessageStore(int msgCount) throws Exception {
int msgLen = topic.getBytes(CHARSET_UTF8).length + 91;
Map<String, String> properties = new HashMap<>(4);
properties.put(MessageConst.PROPERTY_KEYS, keys);
String s = MessageDecoder.messageProperties2String(properties);
int propertiesLen = s.getBytes(CHARSET_UTF8).length;
int commitLogEndFileMinBlankLength = 4 + 4;
int singleMsgBodyLen = mappedFileSize - msgLen - propertiesLen - commitLogEndFileMinBlankLength;
for (int i = 0; i < msgCount; i++) {
MessageExtBrokerInner msg = new MessageExtBrokerInner();
msg.setTopic(topic);
msg.setBody(new byte[singleMsgBodyLen]);
msg.setKeys(keys);
msg.setQueueId(queueId);
msg.setSysFlag(0);
msg.setBornTimestamp(System.currentTimeMillis());
msg.setStoreHost(storeHost);
msg.setBornHost(bornHost);
msg.setPropertiesString(MessageDecoder.messageProperties2String(msg.getProperties()));
PutMessageResult result = messageStore.putMessage(msg);
assertTrue(result != null && result.isOk());
}
StoreTestUtil.waitCommitLogReput(messageStore);
StoreTestUtil.flushConsumeQueue(messageStore);
StoreTestUtil.flushConsumeIndex(messageStore);
}
private void expireFiles(MappedFileQueue commitLogQueue, int expireCount) {
for (int i = 0; i < commitLogQueue.getMappedFiles().size(); i++) {
MappedFile mappedFile = commitLogQueue.getMappedFiles().get(i);
int reservedTime = fileReservedTime * 60 * 60 * 1000;
if (i < expireCount) {
boolean modified = mappedFile.getFile().setLastModified(System.currentTimeMillis() - reservedTime * 2);
assertTrue(modified);
}
}
}
private void initMessageStore(String deleteWhen, int diskMaxUsedSpaceRatio, double diskSpaceCleanForciblyRatio) throws Exception {
initMessageStore(genMessageStoreConfig(deleteWhen,diskMaxUsedSpaceRatio), diskSpaceCleanForciblyRatio);
}
private MessageStoreConfig genMessageStoreConfig(String deleteWhen, int diskMaxUsedSpaceRatio) {
MessageStoreConfig messageStoreConfig = new MessageStoreConfigForTest();
messageStoreConfig.setMappedFileSizeCommitLog(mappedFileSize);
messageStoreConfig.setMappedFileSizeConsumeQueue(mappedFileSize);
messageStoreConfig.setMaxHashSlotNum(100);
messageStoreConfig.setMaxIndexNum(8);
messageStoreConfig.setFlushDiskType(FlushDiskType.SYNC_FLUSH);
messageStoreConfig.setFlushIntervalConsumeQueue(1);
// Invalidate DefaultMessageStore`s scheduled task of cleaning expired files.
// work with the code 'Thread.sleep(1000 * 60 + 100)' behind.
messageStoreConfig.setCleanResourceInterval(Integer.MAX_VALUE);
messageStoreConfig.setFileReservedTime(fileReservedTime);
messageStoreConfig.setDeleteWhen(deleteWhen);
messageStoreConfig.setDiskMaxUsedSpaceRatio(diskMaxUsedSpaceRatio);
String storePathRootDir = System.getProperty("java.io.tmpdir") + File.separator
+ "DefaultMessageStoreCleanFilesTest-" + UUID.randomUUID();
String storePathCommitLog = storePathRootDir + File.separator + "commitlog";
messageStoreConfig.setStorePathRootDir(storePathRootDir);
messageStoreConfig.setStorePathCommitLog(storePathCommitLog);
return messageStoreConfig;
}
private void initMessageStore(MessageStoreConfig messageStoreConfig, double diskSpaceCleanForciblyRatio) throws Exception {
messageStore = new DefaultMessageStore(messageStoreConfig,
new BrokerStatsManager("test", true), new MyMessageArrivingListener(), new BrokerConfig(), new ConcurrentHashMap<>());
cleanCommitLogService = getCleanCommitLogService();
cleanConsumeQueueService = getCleanConsumeQueueService();
assertTrue(messageStore.load());
messageStore.start();
// partially mock a real obj
cleanCommitLogService = spy(cleanCommitLogService);
when(cleanCommitLogService.getDiskSpaceWarningLevelRatio()).thenReturn(diskSpaceCleanForciblyRatio);
when(cleanCommitLogService.getDiskSpaceCleanForciblyRatio()).thenReturn(diskSpaceCleanForciblyRatio);
putFiledBackToMessageStore(cleanCommitLogService);
}
private void putFiledBackToMessageStore(DefaultMessageStore.CleanCommitLogService cleanCommitLogService) throws Exception {
Field cleanCommitLogServiceField = DefaultMessageStore.class.getDeclaredField("cleanCommitLogService");
cleanCommitLogServiceField.setAccessible(true);
cleanCommitLogServiceField.set(messageStore, cleanCommitLogService);
cleanCommitLogServiceField.setAccessible(false);
}
private | DefaultMessageStoreCleanFilesTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/basic/CharacterMappingTests.java | {
"start": 1091,
"end": 2949
} | class ____ {
@Test
public void testMappings(SessionFactoryScope scope) {
// first, verify the type selections...
final MappingMetamodelImplementor mappingMetamodel = scope.getSessionFactory()
.getRuntimeMetamodels()
.getMappingMetamodel();
final JdbcTypeRegistry jdbcRegistry = mappingMetamodel.getTypeConfiguration().getJdbcTypeRegistry();
final EntityPersister entityDescriptor = mappingMetamodel.getEntityDescriptor(EntityOfCharacters.class);
{
final BasicAttributeMapping attribute = (BasicAttributeMapping) entityDescriptor.findAttributeMapping("wrapper");
assertThat( attribute.getJavaType().getJavaTypeClass(), equalTo( Character.class));
final JdbcMapping jdbcMapping = attribute.getJdbcMapping();
assertThat(jdbcMapping.getJavaTypeDescriptor().getJavaTypeClass(), equalTo(Character.class));
assertThat( jdbcMapping.getJdbcType(), equalTo( jdbcRegistry.getDescriptor( Types.CHAR)));
}
{
final BasicAttributeMapping attribute = (BasicAttributeMapping) entityDescriptor.findAttributeMapping("primitive");
assertThat( attribute.getJavaType().getJavaTypeClass(), equalTo( Character.class));
final JdbcMapping jdbcMapping = attribute.getJdbcMapping();
assertThat(jdbcMapping.getJavaTypeDescriptor().getJavaTypeClass(), equalTo(Character.class));
assertThat( jdbcMapping.getJdbcType(), equalTo( jdbcRegistry.getDescriptor( Types.CHAR)));
}
// and try to use the mapping
scope.inTransaction(
(session) -> session.persist(new EntityOfCharacters(1, 'A', 'b'))
);
scope.inTransaction(
(session) -> session.get(EntityOfCharacters.class, 1)
);
}
@AfterEach
public void dropData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Entity(name = "EntityOfCharacters")
@Table(name = "EntityOfCharacters")
public static | CharacterMappingTests |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java | {
"start": 85361,
"end": 85739
} | class ____ implements
SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
job.setFinishTime();
job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
job.jobContext,
org.apache.hadoop.mapreduce.JobStatus.State.KILLED));
}
}
private static | KilledDuringCommitTransition |
java | elastic__elasticsearch | qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java | {
"start": 4826,
"end": 30427
} | interface ____ {
/** Point to JIRA entry. */
String bugUrl();
}
protected final Logger logger = LogManager.getLogger(getClass());
// the distribution being tested
protected static final Distribution distribution;
static {
distribution = new Distribution(Paths.get(System.getProperty("tests.distribution")));
}
// the java installation already installed on the system
protected static final String systemJavaHome;
static {
Shell initShell = new Shell();
if (Platforms.WINDOWS) {
systemJavaHome = initShell.run("$Env:SYSTEM_JAVA_HOME").stdout().trim();
} else {
assert Platforms.LINUX || Platforms.DARWIN;
systemJavaHome = initShell.run("echo $SYSTEM_JAVA_HOME").stdout().trim();
}
}
// the current installation of the distribution being tested
protected static Installation installation;
protected static Tuple<String, String> fileSuperuserForInstallation;
private static boolean failed;
@Rule
public final TestWatcher testFailureRule = new TestWatcher() {
@Override
protected void failed(Throwable e, Description description) {
failed = true;
if (installation != null && installation.distribution.isDocker()) {
logger.warn("Test {} failed. Printing logs for failed test...", description.getMethodName());
FileUtils.logAllLogs(installation.logs, logger);
}
}
};
// a shell to run system commands with
protected static Shell sh;
@Rule
public final TestName testNameRule = new TestName();
@BeforeClass
public static void init() throws Exception {
assumeTrue("only compatible distributions", distribution.packaging.compatible);
// make sure temp dir exists
if (Files.exists(getRootTempDir()) == false) {
Files.createDirectories(getRootTempDir());
}
// cleanup from previous test
cleanup();
// create shell
if (distribution().isDocker()) {
ensureImageIsLoaded(distribution);
sh = new DockerShell();
} else {
sh = new Shell();
}
}
@AfterClass
public static void cleanupDocker() {
if (distribution().isDocker()) {
// runContainer also calls this, so we don't need this method to be annotated as `@After`
removeContainer();
}
}
@Before
public void setup() throws Exception {
assumeFalse(failed); // skip rest of tests once one fails
sh.reset();
if (distribution().hasJdk == false) {
Platforms.onLinux(() -> sh.getEnv().put("ES_JAVA_HOME", systemJavaHome));
Platforms.onWindows(() -> sh.getEnv().put("ES_JAVA_HOME", systemJavaHome));
}
if (installation != null
&& installation.distribution.isDocker() == false
&& Version.fromString(installation.distribution.baseVersion).onOrAfter(Version.V_7_11_0)) {
// Explicitly set heap for versions 7.11 and later otherwise auto heap sizing will cause OOM issues
setHeap("1g");
}
}
@After
public void teardown() throws Exception {
// move log file so we can avoid false positives when grepping for
// messages in logs during test
if (installation != null && failed == false) {
if (Files.exists(installation.logs)) {
Path logFile = installation.logs.resolve("elasticsearch.log");
String prefix = this.getClass().getSimpleName() + "." + testNameRule.getMethodName();
if (Files.exists(logFile)) {
Path newFile = installation.logs.resolve(prefix + ".elasticsearch.log");
try {
FileUtils.mv(logFile, newFile);
} catch (Exception e) {
// There was a problem cleaning up log files. This usually means Windows wackiness
// where something still has the file open. Here we dump what we can of the log files to see
// if ES is still running.
dumpDebug();
throw e;
}
}
for (Path rotatedLogFile : FileUtils.lsGlob(installation.logs, "elasticsearch*.tar.gz")) {
Path newRotatedLogFile = installation.logs.resolve(prefix + "." + rotatedLogFile.getFileName());
FileUtils.mv(rotatedLogFile, newRotatedLogFile);
}
}
}
}
/** The {@link Distribution} that should be tested in this case */
protected static Distribution distribution() {
return distribution;
}
protected static void install() throws Exception {
switch (distribution.packaging) {
case TAR, ZIP -> {
installation = Archives.installArchive(sh, distribution);
Archives.verifyArchiveInstallation(installation, distribution);
}
case DEB, RPM -> {
installation = Packages.installPackage(sh, distribution);
Packages.verifyPackageInstallation(installation, distribution, sh);
}
case DOCKER, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> {
installation = Docker.runContainer(distribution);
Docker.verifyContainerInstallation(installation);
}
default -> throw new IllegalStateException("Unknown Elasticsearch packaging type.");
}
// the purpose of the packaging tests are not to all test auto heap, so we explicitly set heap size to 1g
if (distribution.isDocker() == false) {
setHeap("1g");
}
}
protected static void cleanup() throws Exception {
installation = null;
fileSuperuserForInstallation = null;
cleanEverything();
}
/**
* Prints all available information about the installed Elasticsearch process, including pid, logs and stdout/stderr.
*/
protected void dumpDebug() {
if (Files.exists(installation.home.resolve("elasticsearch.pid"))) {
String pid = FileUtils.slurp(installation.home.resolve("elasticsearch.pid")).trim();
logger.info("Dumping jstack of elasticsearch processb ({}) that failed to start", pid);
sh.runIgnoreExitCode("jstack " + pid);
}
if (Files.exists(installation.logs.resolve("elasticsearch.log"))) {
logger.warn("Elasticsearch log:\n" + FileUtils.slurpAllLogs(installation.logs, "elasticsearch.log", "*.log.gz"));
}
if (Files.exists(installation.logs.resolve("output.out"))) {
logger.warn("Stdout:\n" + FileUtils.slurpTxtorGz(installation.logs.resolve("output.out")));
}
if (Files.exists(installation.logs.resolve("output.err"))) {
logger.warn("Stderr:\n" + FileUtils.slurpTxtorGz(installation.logs.resolve("output.err")));
}
}
/**
* Starts and stops elasticsearch, and performs assertions while it is running.
*/
protected void assertWhileRunning(Platforms.PlatformAction assertions) throws Exception {
try {
awaitElasticsearchStartup(runElasticsearchStartCommand(null, true, false));
} catch (AssertionError | Exception e) {
dumpDebug();
throw e;
}
try {
assertions.run();
} catch (AssertionError | Exception e) {
dumpDebug();
throw e;
}
stopElasticsearch();
}
/**
* Run the command to start Elasticsearch, but don't wait or test for success.
* This method is useful for testing failure conditions in startup. To await success,
* use {@link #startElasticsearch()}.
* @param password Password for password-protected keystore, null for no password;
* this option will fail for non-archive distributions
* @param daemonize Run Elasticsearch in the background
* @param useTty Use a tty for inputting the password rather than standard input;
* this option will fail for non-archive distributions
* @return Shell results of the startup command.
* @throws Exception when command fails immediately.
*/
public Shell.Result runElasticsearchStartCommand(String password, boolean daemonize, boolean useTty) throws Exception {
if (password != null) {
assertTrue("Only archives support user-entered passwords", distribution().isArchive());
}
switch (distribution.packaging) {
case TAR:
case ZIP:
if (useTty) {
return Archives.startElasticsearchWithTty(installation, sh, password, List.of(), null, daemonize);
} else {
return Archives.runElasticsearchStartCommand(installation, sh, password, List.of(), daemonize);
}
case DEB:
case RPM:
return Packages.runElasticsearchStartCommand(sh);
case DOCKER:
case DOCKER_IRON_BANK:
case DOCKER_CLOUD_ESS:
case DOCKER_WOLFI:
// nothing, "installing" docker image is running it
return Shell.NO_OP;
default:
throw new IllegalStateException("Unknown Elasticsearch packaging type.");
}
}
public void stopElasticsearch() throws Exception {
switch (distribution.packaging) {
case TAR:
case ZIP:
Archives.stopElasticsearch(installation);
break;
case DEB:
case RPM:
Packages.stopElasticsearch(sh);
break;
case DOCKER:
case DOCKER_IRON_BANK:
case DOCKER_CLOUD_ESS:
case DOCKER_WOLFI:
// nothing, "installing" docker image is running it
break;
default:
throw new IllegalStateException("Unknown Elasticsearch packaging type.");
}
}
public void awaitElasticsearchStartup(Shell.Result result) throws Exception {
assertThat("Startup command should succeed. Stderr: [" + result + "]", result.exitCode(), equalTo(0));
switch (distribution.packaging) {
case TAR, ZIP -> Archives.assertElasticsearchStarted(installation);
case DEB, RPM -> Packages.assertElasticsearchStarted(sh, installation);
case DOCKER, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> Docker.waitForElasticsearchToStart();
default -> throw new IllegalStateException("Unknown Elasticsearch packaging type.");
}
}
/**
* Call {@link PackagingTestCase#awaitElasticsearchStartup}
* returning the result.
*/
public Shell.Result awaitElasticsearchStartupWithResult(Shell.Result result) throws Exception {
awaitElasticsearchStartup(result);
return result;
}
/**
* Start Elasticsearch and wait until it's up and running. If you just want to run
* the start command, use {@link #runElasticsearchStartCommand(String, boolean, boolean)}.
* @throws Exception if Elasticsearch can't start
*/
public void startElasticsearch() throws Exception {
try {
awaitElasticsearchStartup(runElasticsearchStartCommand(null, true, false));
} catch (AssertionError | Exception e) {
dumpDebug();
throw e;
}
}
public void assertElasticsearchFailure(Shell.Result result, String expectedMessage, Packages.JournaldWrapper journaldWrapper) {
assertElasticsearchFailure(result, Collections.singletonList(expectedMessage), journaldWrapper);
}
public void assertElasticsearchFailure(Shell.Result result, List<String> expectedMessages, Packages.JournaldWrapper journaldWrapper) {
@SuppressWarnings("unchecked")
Matcher<String>[] stringMatchers = expectedMessages.stream().map(CoreMatchers::containsString).toArray(Matcher[]::new);
if (Files.exists(installation.logs.resolve("elasticsearch.log"))) {
// If log file exists, then we have bootstrapped our logging and the
// error should be in the logs
assertThat(installation.logs.resolve("elasticsearch.log"), fileExists());
String logfile = FileUtils.slurp(installation.logs.resolve("elasticsearch.log"));
if (logfile.isBlank()) {
// bootstrap errors still
}
assertThat(logfile, anyOf(stringMatchers));
} else if (distribution().isPackage() && Platforms.isSystemd()) {
// For systemd, retrieve the error from journalctl
assertThat(result.stderr(), containsString("Job for elasticsearch.service failed"));
Shell.Result error = journaldWrapper.getLogs();
assertThat(error.stdout(), anyOf(stringMatchers));
} else {
// Otherwise, error should be on shell stderr
assertThat(result.stderr(), anyOf(stringMatchers));
}
}
public void setFileSuperuser(String username, String password) {
assertThat(installation, Matchers.not(Matchers.nullValue()));
assertThat(fileSuperuserForInstallation, Matchers.nullValue());
Shell.Result result = sh.run(
installation.executables().usersTool + " useradd " + username + " -p " + password + " -r " + "superuser"
);
assertThat(result.isSuccess(), is(true));
fileSuperuserForInstallation = new Tuple<>(username, password);
}
public void runElasticsearchTestsAsElastic(String elasticPassword) throws Exception {
ServerUtils.runElasticsearchTests("elastic", elasticPassword, ServerUtils.getCaCert(installation));
}
public void runElasticsearchTests() throws Exception {
ServerUtils.runElasticsearchTests(
fileSuperuserForInstallation != null ? fileSuperuserForInstallation.v1() : null,
fileSuperuserForInstallation != null ? fileSuperuserForInstallation.v2() : null,
ServerUtils.getCaCert(installation)
);
}
public String makeRequest(String request) throws Exception {
return ServerUtils.makeRequest(
Request.Get(request),
fileSuperuserForInstallation != null ? fileSuperuserForInstallation.v1() : null,
fileSuperuserForInstallation != null ? fileSuperuserForInstallation.v2() : null,
ServerUtils.getCaCert(installation)
);
}
public String makeRequestAsElastic(String request, String elasticPassword) throws Exception {
return ServerUtils.makeRequest(Request.Get(request), "elastic", elasticPassword, ServerUtils.getCaCert(installation));
}
public int makeRequestAsElastic(String elasticPassword) throws Exception {
return ServerUtils.makeRequestAndGetStatus(
Request.Get("https://localhost:9200"),
"elastic",
elasticPassword,
ServerUtils.getCaCert(installation)
);
}
public static Path getRootTempDir() {
if (distribution().isPackage()) {
// The custom config directory is not under /tmp or /var/tmp because
// systemd's private temp directory functionally means different
// processes can have different views of what's in these directories
return Paths.get("/var/test-tmp").toAbsolutePath();
} else {
// vagrant creates /tmp for us in windows so we use that to avoid long paths
return Paths.get("/tmp").toAbsolutePath();
}
}
private static final FileAttribute<?>[] NEW_DIR_PERMS;
static {
if (Platforms.WINDOWS) {
NEW_DIR_PERMS = new FileAttribute<?>[0];
} else {
NEW_DIR_PERMS = new FileAttribute<?>[] { PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString("rwxr-xr-x")) };
}
}
public static Path createTempDir(String prefix) throws IOException {
return Files.createTempDirectory(getRootTempDir(), prefix, NEW_DIR_PERMS);
}
/**
* Run the given action with a temporary copy of the config directory.
*
* Files under the path passed to the action may be modified as necessary for the
* test to execute, and running Elasticsearch with {@link #startElasticsearch()} will
* use the temporary directory.
*/
public void withCustomConfig(CheckedConsumer<Path, Exception> action) throws Exception {
Path tempDir = createTempDir("custom-config");
Path tempConf = tempDir.resolve("elasticsearch");
FileUtils.copyDirectory(installation.config, tempConf);
// this is what install does
sh.chown(tempDir);
if (distribution.isPackage()) {
Files.copy(installation.envFile, tempDir.resolve("elasticsearch.bk"), StandardCopyOption.COPY_ATTRIBUTES);// backup
append(installation.envFile, "ES_PATH_CONF=" + tempConf + "\n");
} else {
sh.getEnv().put("ES_PATH_CONF", tempConf.toString());
}
// Auto-configuration file paths are absolute so we need to replace them in the config now that we copied them to tempConf
// if auto-configuration has happened. Otherwise, the action below is a no-op.
Path yml = tempConf.resolve("elasticsearch.yml");
List<String> lines;
try (Stream<String> allLines = Files.readAllLines(yml).stream()) {
lines = allLines.map(l -> {
if (l.contains(installation.config.toString())) {
return l.replace(installation.config.toString(), tempConf.toString());
}
return l;
}).collect(Collectors.toList());
}
Files.write(yml, lines, TRUNCATE_EXISTING);
action.accept(tempConf);
if (distribution.isPackage()) {
IOUtils.rm(installation.envFile);
Files.copy(tempDir.resolve("elasticsearch.bk"), installation.envFile, StandardCopyOption.COPY_ATTRIBUTES);
} else {
sh.getEnv().remove("ES_PATH_CONF");
}
IOUtils.rm(tempDir);
}
public void withCustomConfigOwner(String tempOwner, Predicate<Distribution.Platform> predicate, CheckedRunnable<Exception> action)
throws Exception {
if (predicate.test(installation.distribution.platform)) {
sh.chown(installation.config, tempOwner);
action.run();
sh.chown(installation.config);
} else {
action.run();
}
}
/**
* Manually set the heap size with a jvm.options.d file. This will be reset before each test.
*/
public static void setHeap(String heapSize) throws IOException {
setHeap(heapSize, installation.config);
}
public static void setHeap(String heapSize, Path config) throws IOException {
Path heapOptions = config.resolve("jvm.options.d").resolve("heap.options");
if (heapSize == null) {
FileUtils.rm(heapOptions);
} else {
Files.writeString(
heapOptions,
String.format(Locale.ROOT, "-Xmx%1$s%n-Xms%1$s%n", heapSize),
StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING
);
}
}
/**
* Runs the code block for 10 seconds waiting for no assertion to trip.
*/
public static void assertBusy(CheckedRunnable<Exception> codeBlock) throws Exception {
assertBusy(codeBlock, 10, TimeUnit.SECONDS);
}
/**
* Runs the code block for the provided interval, waiting for no assertions to trip.
*/
public static void assertBusy(CheckedRunnable<Exception> codeBlock, long maxWaitTime, TimeUnit unit) throws Exception {
long maxTimeInMillis = TimeUnit.MILLISECONDS.convert(maxWaitTime, unit);
// In case you've forgotten your high-school studies, log10(x) / log10(y) == log y(x)
long iterations = Math.max(Math.round(Math.log10(maxTimeInMillis) / Math.log10(2)), 1);
long timeInMillis = 1;
long sum = 0;
List<AssertionError> failures = new ArrayList<>();
for (int i = 0; i < iterations; i++) {
try {
codeBlock.run();
return;
} catch (AssertionError e) {
failures.add(e);
}
sum += timeInMillis;
Thread.sleep(timeInMillis);
timeInMillis *= 2;
}
timeInMillis = maxTimeInMillis - sum;
Thread.sleep(Math.max(timeInMillis, 0));
try {
codeBlock.run();
} catch (AssertionError e) {
for (AssertionError failure : failures) {
e.addSuppressed(failure);
}
throw e;
}
}
/**
* Validates that the installation {@code es} has been auto-configured. This applies to archives and docker only,
* packages have nuances that justify their own version.
* @param es the {@link Installation} to check
*/
public void verifySecurityAutoConfigured(Installation es) throws Exception {
final String autoConfigDirName = "certs";
final Settings settings;
if (es.distribution.isArchive()) {
// We chown the installation on Windows to Administrators so that we can auto-configure it.
String owner = Platforms.WINDOWS ? "BUILTIN\\Administrators" : "elasticsearch";
assertThat(es.config(autoConfigDirName), FileMatcher.file(Directory, owner, owner, p750));
Stream.of("http.p12", "http_ca.crt", "transport.p12")
.forEach(file -> assertThat(es.config(autoConfigDirName).resolve(file), FileMatcher.file(File, owner, owner, p660)));
settings = Settings.builder().loadFromPath(es.config("elasticsearch.yml")).build();
} else if (es.distribution.isDocker()) {
assertThat(es.config(autoConfigDirName), DockerFileMatcher.file(Directory, "elasticsearch", "root", p750));
Stream.of("http.p12", "http_ca.crt", "transport.p12")
.forEach(
file -> assertThat(
es.config(autoConfigDirName).resolve(file),
DockerFileMatcher.file(File, "elasticsearch", "root", p660)
)
);
Path localTempDir = createTempDir("docker-config");
copyFromContainer(es.config("elasticsearch.yml"), localTempDir.resolve("docker_elasticsearch.yml"));
settings = Settings.builder().loadFromPath(localTempDir.resolve("docker_elasticsearch.yml")).build();
rm(localTempDir.resolve("docker_elasticsearch.yml"));
rm(localTempDir);
} else {
assert es.distribution.isPackage();
assertThat(es.config(autoConfigDirName), FileMatcher.file(Directory, "root", "elasticsearch", p750));
Stream.of("http.p12", "http_ca.crt", "transport.p12")
.forEach(
file -> assertThat(es.config(autoConfigDirName).resolve(file), FileMatcher.file(File, "root", "elasticsearch", p660))
);
assertThat(
sh.run(es.executables().keystoreTool + " list").stdout(),
Matchers.containsString("autoconfiguration.password_hash")
);
settings = Settings.builder().loadFromPath(es.config("elasticsearch.yml")).build();
}
assertThat(settings.get("xpack.security.enabled"), equalTo("true"));
assertThat(settings.get("xpack.security.enrollment.enabled"), equalTo("true"));
assertThat(settings.get("xpack.security.transport.ssl.enabled"), equalTo("true"));
assertThat(settings.get("xpack.security.transport.ssl.verification_mode"), equalTo("certificate"));
assertThat(settings.get("xpack.security.http.ssl.enabled"), equalTo("true"));
assertThat(settings.get("xpack.security.enabled"), equalTo("true"));
if (es.distribution.isDocker() == false) {
assertThat(settings.get("http.host"), equalTo("0.0.0.0"));
}
}
/**
* Validates that the installation {@code es} has not been auto-configured. This applies to archives and docker only,
* packages have nuances that justify their own version.
* @param es the {@link Installation} to check
*/
public static void verifySecurityNotAutoConfigured(Installation es) throws Exception {
assertThat(Files.exists(es.config("certs")), Matchers.is(false));
if (es.distribution.isPackage()) {
if (Files.exists(es.config("elasticsearch.keystore"))) {
assertThat(
sh.run(es.executables().keystoreTool + " list").stdout(),
not(Matchers.containsString("autoconfiguration.password_hash"))
);
}
}
List<String> configLines = Files.readAllLines(es.config("elasticsearch.yml"));
assertThat(
configLines,
not(contains(containsString("#----------------------- BEGIN SECURITY AUTO CONFIGURATION -----------------------")))
);
Path caCert = ServerUtils.getCaCert(installation);
if (caCert != null) {
assertThat(caCert.toString(), Matchers.not(Matchers.containsString("certs")));
}
}
}
| AwaitsFix |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpProxy.java | {
"start": 666,
"end": 3632
} | class ____ implements ToXContentFragment {
public static final HttpProxy NO_PROXY = new HttpProxy(null, null, null);
private static final ParseField HOST = new ParseField("host");
private static final ParseField PORT = new ParseField("port");
private static final ParseField SCHEME = new ParseField("scheme");
private String host;
private Integer port;
private Scheme scheme;
public HttpProxy(String host, Integer port) {
this.host = host;
this.port = port;
}
public HttpProxy(String host, Integer port, Scheme scheme) {
this.host = host;
this.port = port;
this.scheme = scheme;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (Strings.hasText(host) && port != null) {
builder.startObject("proxy").field("host", host).field("port", port);
if (scheme != null) {
builder.field("scheme", scheme.scheme());
}
builder.endObject();
}
return builder;
}
public String getHost() {
return host;
}
public Integer getPort() {
return port;
}
public Scheme getScheme() {
return scheme;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
HttpProxy that = (HttpProxy) o;
return Objects.equals(port, that.port) && Objects.equals(host, that.host) && Objects.equals(scheme, that.scheme);
}
@Override
public int hashCode() {
return Objects.hash(host, port, scheme);
}
public static HttpProxy parse(XContentParser parser) throws IOException {
XContentParser.Token token;
String currentFieldName = null;
String host = null;
Integer port = null;
Scheme scheme = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (HOST.match(currentFieldName, parser.getDeprecationHandler())) {
host = parser.text();
} else if (SCHEME.match(currentFieldName, parser.getDeprecationHandler())) {
scheme = Scheme.parse(parser.text());
} else if (PORT.match(currentFieldName, parser.getDeprecationHandler())) {
port = parser.intValue();
if (port <= 0 || port >= 65535) {
throw new ElasticsearchParseException("Proxy port must be between 1 and 65534, but was " + port);
}
}
}
if (port == null || host == null) {
throw new ElasticsearchParseException("Proxy must contain 'port' and 'host' field");
}
return new HttpProxy(host, port, scheme);
}
}
| HttpProxy |
java | spring-projects__spring-boot | buildpack/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/docker/configuration/DockerConnectionConfiguration.java | {
"start": 897,
"end": 1705
} | interface ____ {
/**
* Connect to specific host.
*
* @param address the host address
* @param secure if connection is secure
* @param certificatePath a path to the certificate used for secure connections
*/
record Host(String address, boolean secure,
@Nullable String certificatePath) implements DockerConnectionConfiguration {
public Host(String address) {
this(address, false, null);
}
public Host {
Assert.hasLength(address, "'address' must not be empty");
}
}
/**
* Connect using a specific context reference.
*
* @param context a reference to the Docker context
*/
record Context(String context) implements DockerConnectionConfiguration {
public Context {
Assert.hasLength(context, "'context' must not be empty");
}
}
}
| DockerConnectionConfiguration |
java | google__guava | android/guava/src/com/google/common/collect/Synchronized.java | {
"start": 2695,
"end": 2762
} | class ____ {
private Synchronized() {}
private static | Synchronized |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStIterateOperation.java | {
"start": 1199,
"end": 3888
} | class ____ implements ForStDBOperation {
public static final int CACHE_SIZE_LIMIT = 128;
private final RocksDB db;
private final List<ForStDBIterRequest<?, ?, ?, ?, ?>> batchRequest;
private final Executor executor;
private final Runnable subProcessFinished;
ForStIterateOperation(
RocksDB db, List<ForStDBIterRequest<?, ?, ?, ?, ?>> batchRequest, Executor executor) {
this(db, batchRequest, executor, null);
}
ForStIterateOperation(
RocksDB db,
List<ForStDBIterRequest<?, ?, ?, ?, ?>> batchRequest,
Executor executor,
Runnable subProcessFinished) {
this.db = db;
this.batchRequest = batchRequest;
this.executor = executor;
this.subProcessFinished = subProcessFinished;
}
@Override
public CompletableFuture<Void> process() {
CompletableFuture<Void> future = new CompletableFuture<>();
AtomicReference<Exception> error = new AtomicReference<>();
AtomicInteger counter = new AtomicInteger(batchRequest.size());
for (int i = 0; i < batchRequest.size(); i++) {
ForStDBIterRequest<?, ?, ?, ?, ?> request = batchRequest.get(i);
executor.execute(
() -> {
// todo: config read options
try {
if (error.get() == null) {
request.process(db, CACHE_SIZE_LIMIT);
} else {
request.completeStateFutureExceptionally(
"Error when execute ForStDb iterate operation",
error.get());
}
} catch (Exception e) {
error.set(e);
request.completeStateFutureExceptionally(
"Error when execute ForStDb iterate operation", e);
future.completeExceptionally(e);
} finally {
if (counter.decrementAndGet() == 0
&& !future.isCompletedExceptionally()) {
future.complete(null);
}
if (subProcessFinished != null) {
subProcessFinished.run();
}
}
});
}
return future;
}
@Override
public int subProcessCount() {
return batchRequest.size();
}
}
| ForStIterateOperation |
java | elastic__elasticsearch | x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/recovery/SearchableSnapshotRecoveryState.java | {
"start": 5422,
"end": 7151
} | class ____ extends RecoveryFilesDetails {
@Override
public void addFileDetails(String name, long length, boolean reused) {
// We allow reporting the same file details multiple times as we populate the file
// details before the recovery is executed (see SearchableSnapshotDirectory#prewarmCache)
// and therefore we ignore the rest of the calls for the same files.
// Additionally, it's possible that a segments_n file that wasn't part of the snapshot is
// sent over during peer recoveries as after restore a new segments file is generated
// (see StoreRecovery#bootstrap).
FileDetail fileDetail = fileDetails.computeIfAbsent(name, n -> new FileDetail(name, length, reused));
assert fileDetail == null || fileDetail.name().equals(name) && fileDetail.length() == length
: "The file "
+ name
+ " was reported multiple times with different lengths: ["
+ fileDetail.length()
+ "] and ["
+ length
+ "]";
}
void markFileAsReused(String name) {
final FileDetail fileDetail = fileDetails.get(name);
assert fileDetail != null;
fileDetails.put(name, new FileDetail(fileDetail.name(), fileDetail.length(), true));
}
@Override
public void clear() {
// Since we don't want to remove the recovery information that might have been
// populated during cache pre-warming we just ignore clearing the file details.
complete = false;
}
}
}
| SearchableSnapshotRecoveryFilesDetails |
java | google__guava | guava-testlib/test/com/google/common/testing/ClassSanityTesterTest.java | {
"start": 22231,
"end": 22372
} | class ____ extends Wrapper {
@Keep
public SetWrapper(Set<NotInstantiable> wrapped) {
super(wrapped);
}
}
static | SetWrapper |
java | apache__flink | flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/workflow/scheduler/EmbeddedQuartzScheduler.java | {
"start": 17236,
"end": 23822
} | class ____ implements AutoCloseable {
private final String address;
private final int port;
private final RestClient restClient;
private SqlGatewayRestClient(String endpointUrl) throws Exception {
URL url = new URL(endpointUrl);
this.address = url.getHost();
this.port = url.getPort();
this.restClient =
RestClient.forUrl(new Configuration(), Executors.directExecutor(), url);
}
private SessionHandle openSession(String sessionName, Map<String, String> initConfig)
throws Exception {
OpenSessionRequestBody requestBody =
new OpenSessionRequestBody(sessionName, initConfig);
OpenSessionHeaders headers = OpenSessionHeaders.getInstance();
OpenSessionResponseBody responseBody =
restClient
.sendRequest(
address,
port,
headers,
EmptyMessageParameters.getInstance(),
requestBody)
.get();
return new SessionHandle(UUID.fromString(responseBody.getSessionHandle()));
}
private void closeSession(SessionHandle sessionHandle) throws Exception {
// Close session
CloseSessionHeaders closeSessionHeaders = CloseSessionHeaders.getInstance();
SessionMessageParameters sessionMessageParameters =
new SessionMessageParameters(sessionHandle);
restClient
.sendRequest(
address,
port,
closeSessionHeaders,
sessionMessageParameters,
EmptyRequestBody.getInstance())
.get();
}
private void closeOperation(
SessionHandle sessionHandle, OperationHandle operationHandle) throws Exception {
// Close operation
CloseOperationHeaders closeOperationHeaders = CloseOperationHeaders.getInstance();
OperationMessageParameters operationMessageParameters =
new OperationMessageParameters(sessionHandle, operationHandle);
restClient
.sendRequest(
address,
port,
closeOperationHeaders,
operationMessageParameters,
EmptyRequestBody.getInstance())
.get();
}
private OperationHandle refreshMaterializedTable(
SessionHandle sessionHandle,
String materializedTableIdentifier,
String schedulerTime,
Map<String, String> dynamicOptions,
Map<String, String> staticPartitions,
Map<String, String> executionConfig)
throws Exception {
RefreshMaterializedTableRequestBody requestBody =
new RefreshMaterializedTableRequestBody(
true,
schedulerTime,
dynamicOptions,
staticPartitions,
executionConfig);
RefreshMaterializedTableHeaders headers =
RefreshMaterializedTableHeaders.getInstance();
RefreshMaterializedTableParameters parameters =
new RefreshMaterializedTableParameters(
sessionHandle, materializedTableIdentifier);
RefreshMaterializedTableResponseBody responseBody =
restClient
.sendRequest(address, port, headers, parameters, requestBody)
.get();
return new OperationHandle(UUID.fromString(responseBody.getOperationHandle()));
}
private List<RowData> fetchOperationAllResults(
SessionHandle sessionHandle, OperationHandle operationHandle) throws Exception {
Long token = 0L;
List<RowData> results = new ArrayList<>();
while (token != null) {
FetchResultsResponseBody responseBody =
fetchOperationResults(sessionHandle, operationHandle, token);
if (responseBody instanceof NotReadyFetchResultResponse) {
Thread.sleep(10);
continue;
}
responseBody.getNextResultUri();
results.addAll(responseBody.getResults().getData());
token = SqlGatewayRestEndpointUtils.parseToken(responseBody.getNextResultUri());
}
return results;
}
private FetchResultsResponseBody fetchOperationResults(
SessionHandle sessionHandle, OperationHandle operationHandle, Long token)
throws Exception {
FetchResultsMessageParameters fetchResultsMessageParameters =
new FetchResultsMessageParameters(
sessionHandle, operationHandle, token, RowFormat.JSON);
FetchResultsHeaders fetchResultsHeaders = FetchResultsHeaders.getDefaultInstance();
CompletableFuture<FetchResultsResponseBody> response =
restClient.sendRequest(
address,
port,
fetchResultsHeaders,
fetchResultsMessageParameters,
EmptyRequestBody.getInstance());
return response.get();
}
@Override
public void close() {
try {
restClient.close();
} catch (Exception e) {
LOG.error("Failed to close rest client.", e);
}
}
}
}
}
| SqlGatewayRestClient |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/cdi/bcextensions/CustomQualifierTest.java | {
"start": 1931,
"end": 2244
} | class ____ extends AnnotationLiteral<MyAnnotation> implements MyAnnotation {
private final String value;
MyAnnotationLiteral(String value) {
this.value = value;
}
@Override
public String value() {
return value;
}
}
| MyAnnotationLiteral |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/common/io/stream/DelayableWriteable.java | {
"start": 12323,
"end": 12894
} | class ____ implements Deduplicator {
private static final int MAX_SIZE = 1024;
// lazily init
private Map<Object, Object> cache = null;
@SuppressWarnings("unchecked")
@Override
public <T> T deduplicate(T object) {
if (cache == null) {
cache = new HashMap<>();
cache.put(object, object);
} else if (cache.size() < MAX_SIZE) {
object = (T) cache.computeIfAbsent(object, o -> o);
}
return object;
}
}
}
| DeduplicatorCache |
java | google__dagger | javatests/artifacts/dagger/build-tests/src/test/java/buildtests/TransitiveProvidesQualifierTest.java | {
"start": 7309,
"end": 7946
} | class ____ implements BindingGraphPlugin {",
" @Override",
" public void visitGraph(",
" BindingGraph bindingGraph, DiagnosticReporter diagnosticReporter) {",
" bindingGraph.dependencyEdges().stream()",
" .map(DependencyEdge::dependencyRequest)",
" .map(DependencyRequest::key)",
" .forEach(key -> System.out.println(\"REQUEST: \" + key));",
" }",
"}");
return GradleRunner.create()
.withArguments("--stacktrace", "build")
.withProjectDir(projectDir);
}
}
| TestBindingGraphPlugin |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/bytecode/enhance/spi/EnhancementContext.java | {
"start": 2139,
"end": 3817
} | class ____ check
*
* @return true/false
* @deprecated Will be removed without replacement. See HHH-15641
*/
@Deprecated(forRemoval = true)
boolean hasLazyLoadableAttributes(UnloadedClass classDescriptor);
// todo : may be better to invert these 2 such that the context is asked for an ordered list of persistent fields for an entity/composite
/**
* Does the field represent persistent state? Persistent fields will be "enhanced".
* <p>
* may be better to perform basic checks in the caller (non-static, etc) and call out with just the
* Class name and field name...
*
* @param ctField The field reference.
*
* @return {@code true} if the field is ; {@code false} otherwise.
*/
boolean isPersistentField(UnloadedField ctField);
/**
* For fields which are persistent (according to {@link #isPersistentField}), determine the corresponding ordering
* maintained within the Hibernate metamodel.
*
* @param persistentFields The persistent field references.
*
* @return The ordered references.
*/
UnloadedField[] order(UnloadedField[] persistentFields);
/**
* Determine if a field is lazy loadable.
*
* @param field The field to check
*
* @return {@code true} if the field is lazy loadable; {@code false} otherwise.
*/
boolean isLazyLoadable(UnloadedField field);
/**
* @param field the field to check
*
* @return {@code true} if the field is mapped
*/
boolean isMappedCollection(UnloadedField field);
boolean isDiscoveredType(UnloadedClass classDescriptor);
void registerDiscoveredType(UnloadedClass classDescriptor, Type.PersistenceType type);
/**
* @return The expected behavior when encountering a | to |
java | junit-team__junit5 | junit-platform-launcher/src/main/java/org/junit/platform/launcher/ExcludeMethodFilter.java | {
"start": 918,
"end": 1947
} | class ____ extends AbstractMethodFilter {
ExcludeMethodFilter(String... patterns) {
super(patterns);
}
@Override
public FilterResult apply(TestDescriptor descriptor) {
String methodName = getFullyQualifiedMethodNameFromDescriptor(descriptor);
return findMatchingPattern(methodName) //
.map(pattern -> excluded(formatExclusionReason(methodName, pattern))) //
.orElseGet(() -> included(formatInclusionReason(methodName)));
}
private String formatInclusionReason(@Nullable String methodName) {
return "Method name [%s] does not match any excluded pattern: %s".formatted(methodName, patternDescription);
}
private String formatExclusionReason(@Nullable String methodName, Pattern pattern) {
return "Method name [%s] matches excluded pattern: '%s'".formatted(methodName, pattern);
}
@Override
public String toString() {
return "%s that excludes method names that match one of the following regular expressions: %s".formatted(
getClass().getSimpleName(), patternDescription);
}
}
| ExcludeMethodFilter |
java | spring-projects__spring-framework | spring-web/src/test/java/org/springframework/web/util/ContentCachingRequestWrapperTests.java | {
"start": 1170,
"end": 5431
} | class ____ {
protected static final String FORM_CONTENT_TYPE = MediaType.APPLICATION_FORM_URLENCODED_VALUE;
@Test
void cachedContentToByteArrayWithNoRead() {
ContentCachingRequestWrapper wrapper = createGetRequest("Hello", -1);
assertThat(wrapper.getContentAsByteArray()).isEmpty();
}
@Test
void cachedContentToStringWithNoRead() {
ContentCachingRequestWrapper wrapper = createGetRequest("Hello", -1);
assertThat(wrapper.getContentAsString()).isEqualTo("");
}
@Test
void cachedContentToByteArray() throws Exception {
ContentCachingRequestWrapper wrapper = createGetRequest("Hello World", -1);
byte[] response = wrapper.getInputStream().readAllBytes();
assertThat(wrapper.getContentAsByteArray()).isEqualTo(response);
}
@Test
void cachedContentToString() throws Exception {
ContentCachingRequestWrapper wrapper = createGetRequest("Hello World", -1);
byte[] response = wrapper.getInputStream().readAllBytes();
assertThat(wrapper.getContentAsString()).isEqualTo(new String(response, UTF_8));
}
@Test
void cachedContentToByteArrayWithLimit() throws Exception {
ContentCachingRequestWrapper wrapper = createGetRequest("Hello World", 3);
byte[] response = wrapper.getInputStream().readAllBytes();
assertThat(response).isEqualTo("Hello World".getBytes(UTF_8));
assertThat(wrapper.getContentAsByteArray()).isEqualTo("Hel".getBytes(UTF_8));
}
@Test
void cachedContentToStringWithLimit() throws Exception {
ContentCachingRequestWrapper wrapper = createGetRequest("Hello World", 3);
byte[] response = wrapper.getInputStream().readAllBytes();
assertThat(response).isEqualTo("Hello World".getBytes(UTF_8));
assertThat(wrapper.getContentAsString()).isEqualTo(new String("Hel".getBytes(UTF_8), UTF_8));
}
@Test
void shouldNotAllocateMoreThanCacheLimit() {
ContentCachingRequestWrapper wrapper = createGetRequest("Hello World", 3);
assertThat(wrapper).extracting("cachedContent.initialBlockSize").isEqualTo(3);
}
@Test
void cachedContentWithOverflow() {
ContentCachingRequestWrapper wrapper = new ContentCachingRequestWrapper(createGetRequest("Hello World"), 3) {
@Override
protected void handleContentOverflow(int contentCacheLimit) {
throw new IllegalStateException(String.valueOf(contentCacheLimit));
}
};
assertThatIllegalStateException()
.isThrownBy(() -> wrapper.getInputStream().readAllBytes())
.withMessage("3");
}
@Test
void requestParams() throws Exception {
MockHttpServletRequest request = createPostRequest();
request.setParameter("first", "value");
request.setParameter("second", "foo", "bar");
ContentCachingRequestWrapper wrapper = new ContentCachingRequestWrapper(request, -1);
// getting request parameters will consume the request body
assertThat(wrapper.getParameterMap()).isNotEmpty();
assertThat(new String(wrapper.getContentAsByteArray())).isEqualTo("first=value&second=foo&second=bar");
// SPR-12810 : inputstream body should be consumed
assertThat(new String(wrapper.getInputStream().readAllBytes())).isEmpty();
}
@Test // SPR-12810
void inputStreamFormPostRequest() throws Exception {
MockHttpServletRequest request = createPostRequest();
request.setParameter("first", "value");
request.setParameter("second", "foo", "bar");
ContentCachingRequestWrapper wrapper = new ContentCachingRequestWrapper(request, -1);
byte[] response = wrapper.getInputStream().readAllBytes();
assertThat(wrapper.getContentAsByteArray()).isEqualTo(response);
}
private ContentCachingRequestWrapper createGetRequest(String content, int cacheLimit) {
return new ContentCachingRequestWrapper(createGetRequest(content), cacheLimit);
}
private MockHttpServletRequest createGetRequest(String content) {
MockHttpServletRequest request = new MockHttpServletRequest();
request.setMethod(HttpMethod.GET.name());
request.setCharacterEncoding(UTF_8);
request.setContent(content.getBytes(UTF_8));
return request;
}
private MockHttpServletRequest createPostRequest() {
MockHttpServletRequest request = new MockHttpServletRequest();
request.setMethod(HttpMethod.POST.name());
request.setContentType(FORM_CONTENT_TYPE);
request.setCharacterEncoding(UTF_8.name());
return request;
}
}
| ContentCachingRequestWrapperTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/ondeletecascade/OnDeleteManyToOneTest.java | {
"start": 1055,
"end": 2628
} | class ____ {
@Test
public void testOnDelete(EntityManagerFactoryScope scope) {
Parent parent = new Parent();
Child child = new Child();
child.parent = parent;
parent.children.add( child );
scope.inTransaction( em -> {
em.persist( parent );
em.persist( child );
} );
scope.inTransaction( em -> {
Parent p = em.find( Parent.class, parent.id );
em.remove( p );
} );
scope.inTransaction( em -> {
assertNull( em.find( Child.class, child.id ) );
} );
}
@Test
public void testOnDeleteReference(EntityManagerFactoryScope scope) {
Parent parent = new Parent();
Child child = new Child();
child.parent = parent;
parent.children.add( child );
scope.inTransaction( em -> {
em.persist( parent );
em.persist( child );
} );
scope.inTransaction( em -> em.remove( em.getReference( parent ) ) );
scope.inTransaction( em -> assertNull( em.find( Child.class, child.id ) ) );
}
@Test
public void testOnDeleteInReverse(EntityManagerFactoryScope scope) {
Parent parent = new Parent();
Child child = new Child();
child.parent = parent;
parent.children.add( child );
scope.inTransaction( em -> {
em.persist( parent );
em.persist( child );
} );
scope.inTransaction( em -> {
Child c = em.find( Child.class, child.id );
em.remove( c );
} );
scope.inTransaction( em -> {
assertNull( em.find( Child.class, child.id ) );
} );
}
@AfterEach
public void tearDown(EntityManagerFactoryScope scope) {
scope.getEntityManagerFactory().getSchemaManager().truncate();
}
@Entity
static | OnDeleteManyToOneTest |
java | apache__camel | components/camel-azure/camel-azure-eventhubs/src/main/java/org/apache/camel/component/azure/eventhubs/EventHubsConstants.java | {
"start": 942,
"end": 4306
} | class ____ {
public static final String HEADER_PREFIX = "CamelAzureEventHubs";
public static final String COMPLETED_BY_SIZE = "size";
public static final String COMPLETED_BY_TIMEOUT = "timeout";
public static final String UNCOMPLETED = "uncompleted";
// common headers, set by consumer and evaluated by producer
@Metadata(description = "(producer) Overrides the hashing key to be provided for the batch of events, which instructs the Event Hubs service to map this key to a specific partition.\n"
+
"(consumer) It sets the partition hashing key if it was set when originally publishing the event. "
+
"If it exists, this value was used to compute a hash to select a partition to send the message to. This is only present on a received `EventData`.",
javaType = "String")
public static final String PARTITION_KEY = HEADER_PREFIX + "PartitionKey";
@Metadata(description = "(producer) Overrides the identifier of the Event Hub partition that the events will be sent to.\n"
+
"(consumer) It sets the partition id of the Event Hub.",
javaType = "String")
public static final String PARTITION_ID = HEADER_PREFIX + "PartitionId";
// headers set by the consumer only
@Metadata(label = "consumer",
description = "It sets the offset of the event when it was received from the associated Event Hub partition. This is only present on a received `EventData`.",
javaType = "Integer")
public static final String OFFSET = HEADER_PREFIX + "Offset";
@Metadata(label = "consumer",
description = "It sets the instant, in UTC, of when the event was enqueued in the Event Hub partition. This is only present on a received `EventData`.",
javaType = "Instant")
public static final String ENQUEUED_TIME = HEADER_PREFIX + "EnqueuedTime";
@Metadata(label = "consumer",
description = "It sets the sequence number assigned to the event when it was enqueued in the associated Event Hub partition. "
+
"This is unique for every message received in the Event Hub partition. This is only present on a received `EventData`.",
javaType = "Long")
public static final String SEQUENCE_NUMBER = HEADER_PREFIX + "SequenceNumber";
@Metadata(label = "consumer",
description = "The set of free-form event properties which may be used for passing metadata associated with the event with the event body during Event Hubs operations.",
javaType = "Map<String, Object>")
public static final String METADATA = HEADER_PREFIX + "Metadata";
@Metadata(label = "consumer", description = "The timestamp of the message", javaType = "long")
public static final String MESSAGE_TIMESTAMP = Exchange.MESSAGE_TIMESTAMP;
@Metadata(label = "consumer",
description = "It sets the reason for the checkpoint to have been updated. This is only present on a received `EventData`.",
javaType = "String")
public static final String CHECKPOINT_UPDATED_BY = HEADER_PREFIX + "CheckpointUpdatedBy";
private EventHubsConstants() {
}
}
| EventHubsConstants |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/builder/nestedprop/flattening/FlatteningMapper.java | {
"start": 354,
"end": 733
} | interface ____ {
FlatteningMapper INSTANCE = Mappers.getMapper( FlatteningMapper.class );
@Mappings({
@Mapping(target = "articleCount", source = "count"),
@Mapping(target = "article1", source = "first.description"),
@Mapping(target = "article2", ignore = true)
})
ImmutableFlattenedStock writeToFlatProperty(Stock source);
}
| FlatteningMapper |
java | apache__camel | core/camel-api/src/generated/java/org/apache/camel/spi/annotations/Language.java | {
"start": 1492,
"end": 1963
} | class ____ name is of type
* <i>xxxConstants</i> where <i>xxx</i> is the name of the corresponding language like for example
* <i>SimpleConstants</i> for the language <i>camel-simple</i>.
*
* The metadata of a given functions are retrieved directly from the annotation {@code @Metadata} added to the
* {@code String} constant representing its name and defined in the functions class.
*/
Class<?> functionsClass() default void.class;
}
| whose |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/SystemUtils.java | {
"start": 71264,
"end": 71741
} | class ____ loaded.
* </p>
*
* @since 3.0
*/
public static final boolean IS_OS_WINDOWS_7 = getOsNameMatches(OS_NAME_WINDOWS_PREFIX + " 7");
/**
* The constant {@code true} if this is Windows 8.
* <p>
* The result depends on the value of the {@link #OS_NAME} constant.
* </p>
* <p>
* The field will return {@code false} if {@link #OS_NAME} is {@code null}.
* </p>
* <p>
* This value is initialized when the | is |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/ExecutionConfig.java | {
"start": 23238,
"end": 26797
} | class ____ to use when loading classes
*/
public void configure(ReadableConfig configuration, ClassLoader classLoader) {
configuration
.getOptional(PipelineOptions.AUTO_GENERATE_UIDS)
.ifPresent(this::setAutoGeneratedUids);
configuration
.getOptional(PipelineOptions.AUTO_WATERMARK_INTERVAL)
.ifPresent(this::setAutoWatermarkInterval);
configuration
.getOptional(PipelineOptions.CLOSURE_CLEANER_LEVEL)
.ifPresent(this::setClosureCleanerLevel);
configuration
.getOptional(PipelineOptions.GLOBAL_JOB_PARAMETERS)
.ifPresent(this::setGlobalJobParameters);
configuration
.getOptional(MetricOptions.LATENCY_INTERVAL)
.ifPresent(interval -> setLatencyTrackingInterval(interval.toMillis()));
configuration
.getOptional(StateChangelogOptions.PERIODIC_MATERIALIZATION_ENABLED)
.ifPresent(this::enablePeriodicMaterialize);
configuration
.getOptional(StateChangelogOptions.PERIODIC_MATERIALIZATION_INTERVAL)
.ifPresent(this::setPeriodicMaterializeIntervalMillis);
configuration
.getOptional(StateChangelogOptions.MATERIALIZATION_MAX_FAILURES_ALLOWED)
.ifPresent(this::setMaterializationMaxAllowedFailures);
configuration
.getOptional(PipelineOptions.MAX_PARALLELISM)
.ifPresent(this::setMaxParallelism);
configuration.getOptional(CoreOptions.DEFAULT_PARALLELISM).ifPresent(this::setParallelism);
configuration.getOptional(PipelineOptions.OBJECT_REUSE).ifPresent(this::setObjectReuse);
configuration
.getOptional(TaskManagerOptions.TASK_CANCELLATION_INTERVAL)
.ifPresent(interval -> setTaskCancellationInterval(interval.toMillis()));
configuration
.getOptional(TaskManagerOptions.TASK_CANCELLATION_TIMEOUT)
.ifPresent(timeout -> setTaskCancellationTimeout(timeout.toMillis()));
configuration
.getOptional(ExecutionOptions.SNAPSHOT_COMPRESSION)
.ifPresent(this::setUseSnapshotCompression);
configuration
.getOptional(RestartStrategyOptions.RESTART_STRATEGY)
.ifPresent(s -> this.setRestartStrategy(configuration));
configuration
.getOptional(JobManagerOptions.SCHEDULER)
.ifPresent(t -> this.configuration.set(JobManagerOptions.SCHEDULER, t));
serializerConfig.configure(configuration, classLoader);
}
private void setRestartStrategy(ReadableConfig configuration) {
Map<String, String> map = configuration.toMap();
Map<String, String> restartStrategyEntries = new HashMap<>();
for (Map.Entry<String, String> entry : map.entrySet()) {
if (entry.getKey().startsWith(RestartStrategyOptions.RESTART_STRATEGY_CONFIG_PREFIX)) {
restartStrategyEntries.put(entry.getKey(), entry.getValue());
}
}
this.configuration.addAll(Configuration.fromMap(restartStrategyEntries));
}
/**
* @return A copy of internal {@link #configuration}. Note it is missing all options that are
* stored as plain java fields in {@link ExecutionConfig}.
*/
@Internal
public Configuration toConfiguration() {
return new Configuration(configuration);
}
private static | loader |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/PollingConsumerSupport.java | {
"start": 1179,
"end": 2619
} | class ____ extends ServiceSupport implements PollingConsumer {
private final Endpoint endpoint;
private ExceptionHandler exceptionHandler;
public PollingConsumerSupport(Endpoint endpoint) {
this.endpoint = endpoint;
this.exceptionHandler = new LoggingExceptionHandler(endpoint.getCamelContext(), getClass());
}
@Override
public String toString() {
return "PollingConsumer on " + endpoint;
}
@Override
public Endpoint getEndpoint() {
return endpoint;
}
@Override
public Processor getProcessor() {
return null;
}
@Override
public Exchange createExchange(boolean autoRelease) {
throw new UnsupportedOperationException("Not supported on PollingConsumer");
}
@Override
public void releaseExchange(Exchange exchange, boolean autoRelease) {
throw new UnsupportedOperationException("Not supported on PollingConsumer");
}
public ExceptionHandler getExceptionHandler() {
return exceptionHandler;
}
public void setExceptionHandler(ExceptionHandler exceptionHandler) {
this.exceptionHandler = exceptionHandler;
}
/**
* Handles the given exception using the {@link #getExceptionHandler()}
*
* @param t the exception to handle
*/
protected void handleException(Throwable t) {
getExceptionHandler().handleException(t);
}
}
| PollingConsumerSupport |
java | quarkusio__quarkus | independent-projects/arc/tcks/arquillian/src/main/java/io/quarkus/arc/arquillian/utils/Archives.java | {
"start": 426,
"end": 1332
} | class ____ {
public static void explode(Archive<?> archive, String prefix, Path targetPath) throws IOException {
String prefixPattern = "^" + Pattern.quote(prefix);
Map<ArchivePath, Node> files = archive.getContent(Filters.include(prefixPattern + ".*"));
for (Map.Entry<ArchivePath, Node> entry : files.entrySet()) {
Asset asset = entry.getValue().getAsset();
if (asset == null) {
continue;
}
String path = entry.getKey().get().replaceFirst(prefixPattern, "");
copy(asset, targetPath.resolve(path));
}
}
public static void copy(Asset asset, Path targetPath) throws IOException {
Files.createDirectories(targetPath.getParent()); // make sure the directory exists
try (InputStream in = asset.openStream()) {
Files.copy(in, targetPath);
}
}
}
| Archives |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/identifier/SimpleEntityTest.java | {
"start": 4854,
"end": 5550
} | class ____ {
//tag::entity-pojo-identifier-mapping-example[]
@Id
private Long id;
//end::entity-pojo-identifier-mapping-example[]
private String title;
private String author;
//Getters and setters are omitted for brevity
//end::entity-pojo-mapping-example[]
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public String getAuthor() {
return author;
}
public void setAuthor(String author) {
this.author = author;
}
//tag::entity-pojo-mapping-example[]
}
//end::entity-pojo-mapping-example[]
}
| Book |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/compress/NoCompression.java | {
"start": 1696,
"end": 1874
} | class ____ implements Compression.Builder<NoCompression> {
@Override
public NoCompression build() {
return new NoCompression();
}
}
}
| Builder |
java | apache__camel | components/camel-jpa/src/test/java/org/apache/camel/processor/jpa/JpaCronSchedulerTest.java | {
"start": 1092,
"end": 2357
} | class ____ extends AbstractJpaTest {
protected static final String SELECT_ALL_STRING = "select x from " + SendEmail.class.getName() + " x";
@Test
public void testPreConsumed() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
template.sendBody("direct:start", new SendEmail("dummy"));
MockEndpoint.assertIsSatisfied(context);
// @PreConsumed should change the dummy address
SendEmail email = mock.getReceivedExchanges().get(0).getIn().getBody(SendEmail.class);
assertEquals("dummy@somewhere.org", email.getAddress());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:start").to("jpa://" + SendEmail.class.getName());
from("jpa://" + SendEmail.class.getName() + "?scheduler=spring&scheduler.cron=*+*+*+*+*+*").to("mock:result");
}
};
}
@Override
protected String routeXml() {
return "org/apache/camel/processor/jpa/springJpaRouteTest.xml";
}
@Override
protected String selectAllString() {
return SELECT_ALL_STRING;
}
}
| JpaCronSchedulerTest |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/tests/http/Http2MultiplexClientTest.java | {
"start": 547,
"end": 2099
} | class ____ extends Http2ClientTest {
@Override
protected HttpClientOptions createBaseClientOptions() {
return super.createBaseClientOptions().setHttp2MultiplexImplementation(true);
}
@Test
@Ignore
@Override
public void testStreamPriority() throws Exception {
super.testStreamPriority();
}
@Test
@Ignore
@Override
public void testStreamPriorityChange() throws Exception {
super.testStreamPriorityChange();
}
@Test
@Ignore
@Override
public void testClientStreamPriorityNoChange() throws Exception {
super.testClientStreamPriorityNoChange();
}
@Test
@Ignore
@Override
public void testServerStreamPriorityNoChange() throws Exception {
super.testServerStreamPriorityNoChange();
}
@Test
@Ignore
@Override
public void testPushPromise() throws Exception {
super.testPushPromise();
}
@Test
@Ignore
@Override
public void testResetActivePushPromise() throws Exception {
super.testResetActivePushPromise();
}
@Test
@Ignore
@Override
public void testResetPushPromiseNoHandler() throws Exception {
super.testResetPushPromiseNoHandler();
}
@Test
@Ignore
@Override
public void testResetPendingPushPromise() throws Exception {
super.testResetPendingPushPromise();
}
@Test
@Ignore
@Override
public void testConnectionDecodeError() throws Exception {
super.testConnectionDecodeError();
}
@Test
@Ignore
@Override
public void testStreamError() throws Exception {
super.testStreamError();
}
}
| Http2MultiplexClientTest |
java | google__guava | android/guava-tests/test/com/google/common/primitives/ImmutableLongArrayTest.java | {
"start": 19128,
"end": 19822
} | class ____
extends TestLongListGenerator {
@Override
protected List<Long> create(Long[] elements) {
Long[] prefix = {Long.MIN_VALUE, Long.MAX_VALUE};
Long[] suffix = {86L, 99L};
Long[] all = concat(concat(prefix, elements), suffix);
return makeArray(all).subArray(2, elements.length + 2).asList();
}
}
@J2ktIncompatible
@GwtIncompatible // used only from suite
@AndroidIncompatible
private static Long[] concat(Long[] a, Long[] b) {
return ObjectArrays.concat(a, b, Long.class);
}
@J2ktIncompatible
@GwtIncompatible // used only from suite
@AndroidIncompatible
public abstract static | ImmutableLongArrayMiddleSubListAsListGenerator |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/utils/DuplicateChangesUtils.java | {
"start": 1236,
"end": 3257
} | class ____ {
private DuplicateChangesUtils() {}
/**
* Get an optional {@link DuplicateChanges} from the given {@link StreamPhysicalRel}.
*
* <p>The {@link DuplicateChanges} is inferred from {@link DuplicateChangesTrait}.
*/
public static Optional<DuplicateChanges> getDuplicateChanges(StreamPhysicalRel rel) {
Optional<DuplicateChangesTrait> duplicateChangesTraitOp =
Optional.ofNullable(rel.getTraitSet().getTrait(DuplicateChangesTraitDef.INSTANCE));
return duplicateChangesTraitOp.stream()
.map(DuplicateChangesTrait::getDuplicateChanges)
.findFirst();
}
/**
* Merge the given two {@link DuplicateChanges} as a new one.
*
* <p>The logic matrix is following:
*
* <pre>
* +-------------+-------------+---------------+
* | origin_1 | origin_2 | merge result |
* +-------------+-------------+---------------+
* | NONE | * | * |
* | `ANY` | NONE | `ANY` |
* | DISALLOW | * | DISALLOW |
* | * | DISALLOW | DISALLOW |
* | ALLOW | ALLOW | ALLOW |
* +-------------+-------------+---------------+
* </pre>
*/
public static DuplicateChanges mergeDuplicateChanges(
DuplicateChanges duplicateChanges1, DuplicateChanges duplicateChanges2) {
if (duplicateChanges1 == DuplicateChanges.NONE
|| duplicateChanges2 == DuplicateChanges.NONE) {
return duplicateChanges1 == DuplicateChanges.NONE
? duplicateChanges2
: duplicateChanges1;
}
if (duplicateChanges1 == DuplicateChanges.DISALLOW
|| duplicateChanges2 == DuplicateChanges.DISALLOW) {
return DuplicateChanges.DISALLOW;
}
return DuplicateChanges.ALLOW;
}
}
| DuplicateChangesUtils |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/record/MultiRecordsSendTest.java | {
"start": 1255,
"end": 2343
} | class ____ {
@Test
public void testSendsFreedAfterWriting() throws IOException {
int numChunks = 4;
int chunkSize = 32;
int totalSize = numChunks * chunkSize;
Queue<Send> sends = new LinkedList<>();
ByteBuffer[] chunks = new ByteBuffer[numChunks];
for (int i = 0; i < numChunks; i++) {
ByteBuffer buffer = ByteBuffer.wrap(TestUtils.randomBytes(chunkSize));
chunks[i] = buffer;
sends.add(new ByteBufferSend(buffer));
}
MultiRecordsSend send = new MultiRecordsSend(sends);
assertEquals(totalSize, send.size());
for (int i = 0; i < numChunks; i++) {
assertEquals(numChunks - i, send.numResidentSends());
NonOverflowingByteBufferChannel out = new NonOverflowingByteBufferChannel(chunkSize);
send.writeTo(out);
out.close();
assertEquals(chunks[i], out.buffer());
}
assertEquals(0, send.numResidentSends());
assertTrue(send.completed());
}
private static | MultiRecordsSendTest |
java | apache__maven | impl/maven-core/src/test/java/org/apache/maven/configuration/DefaultBeanConfiguratorPathTest.java | {
"start": 3736,
"end": 3781
} | class ____ {
Path file;
}
}
| SomeBean |
java | netty__netty | codec-http2/src/test/java/io/netty/handler/codec/http2/WeightedFairQueueByteDistributorDependencyTreeTest.java | {
"start": 1425,
"end": 48465
} | class ____ extends
AbstractWeightedFairQueueByteDistributorDependencyTest {
private static final int leadersId = 3; // js, css
private static final int unblockedId = 5;
private static final int backgroundId = 7;
private static final int speculativeId = 9;
private static final int followersId = 11; // images
private static final short leadersWeight = 201;
private static final short unblockedWeight = 101;
private static final short backgroundWeight = 1;
private static final short speculativeWeight = 1;
private static final short followersWeight = 1;
@BeforeEach
public void setup() throws Http2Exception {
MockitoAnnotations.initMocks(this);
setup(0);
}
private void setup(int maxStateOnlySize) {
connection = new DefaultHttp2Connection(false);
distributor = new WeightedFairQueueByteDistributor(connection, maxStateOnlySize);
// Assume we always write all the allocated bytes.
doAnswer(writeAnswer(false)).when(writer).write(any(Http2Stream.class), anyInt());
}
@Test
public void closingStreamWithChildrenDoesNotCauseConcurrentModification() throws Http2Exception {
// We create enough streams to wrap around the child array. We carefully craft the stream ids so that they hash
// codes overlap with respect to the child collection. If the implementation is not careful this may lead to a
// concurrent modification exception while promoting all children to the connection stream.
final Http2Stream streamA = connection.local().createStream(1, false);
final int numStreams = INITIAL_CHILDREN_MAP_SIZE - 1;
for (int i = 0, streamId = 3; i < numStreams; ++i, streamId += INITIAL_CHILDREN_MAP_SIZE) {
final Http2Stream stream = connection.local().createStream(streamId, false);
setPriority(stream.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
}
assertEquals(INITIAL_CHILDREN_MAP_SIZE, connection.numActiveStreams());
streamA.close();
assertEquals(numStreams, connection.numActiveStreams());
}
@Test
public void closeWhileIteratingDoesNotNPE() throws Http2Exception {
final Http2Stream streamA = connection.local().createStream(3, false);
final Http2Stream streamB = connection.local().createStream(5, false);
final Http2Stream streamC = connection.local().createStream(7, false);
setPriority(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
connection.forEachActiveStream(new Http2StreamVisitor() {
@Override
public boolean visit(Http2Stream stream) throws Http2Exception {
streamA.close();
setPriority(streamB.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT, false);
return true;
}
});
}
@Test
public void localStreamCanDependUponIdleStream() throws Http2Exception {
setup(1);
Http2Stream streamA = connection.local().createStream(1, false);
setPriority(3, streamA.id(), MIN_WEIGHT, true);
assertTrue(distributor.isChild(3, streamA.id(), MIN_WEIGHT));
}
@Test
public void remoteStreamCanDependUponIdleStream() throws Http2Exception {
setup(1);
Http2Stream streamA = connection.remote().createStream(2, false);
setPriority(4, streamA.id(), MIN_WEIGHT, true);
assertTrue(distributor.isChild(4, streamA.id(), MIN_WEIGHT));
}
@Test
public void prioritizeShouldUseDefaults() throws Exception {
Http2Stream stream = connection.local().createStream(1, false);
assertTrue(distributor.isChild(stream.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
assertEquals(0, distributor.numChildren(stream.id()));
}
@Test
public void reprioritizeWithNoChangeShouldDoNothing() throws Exception {
Http2Stream stream = connection.local().createStream(1, false);
setPriority(stream.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT, false);
assertTrue(distributor.isChild(stream.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
assertEquals(0, distributor.numChildren(stream.id()));
}
@Test
public void stateOnlyPriorityShouldBePreservedWhenStreamsAreCreatedAndClosed() throws Http2Exception {
setup(3);
short weight3 = MIN_WEIGHT + 1;
short weight5 = (short) (weight3 + 1);
short weight7 = (short) (weight5 + 1);
setPriority(3, connection.connectionStream().id(), weight3, true);
setPriority(5, connection.connectionStream().id(), weight5, true);
setPriority(7, connection.connectionStream().id(), weight7, true);
assertEquals(0, connection.numActiveStreams());
verifyStateOnlyPriorityShouldBePreservedWhenStreamsAreCreated(weight3, weight5, weight7);
// Now create stream objects and ensure the state and dependency tree is preserved.
Http2Stream streamA = connection.local().createStream(3, false);
Http2Stream streamB = connection.local().createStream(5, false);
Http2Stream streamC = connection.local().createStream(7, false);
assertEquals(3, connection.numActiveStreams());
verifyStateOnlyPriorityShouldBePreservedWhenStreamsAreCreated(weight3, weight5, weight7);
// Close all the streams and ensure the state and dependency tree is preserved.
streamA.close();
streamB.close();
streamC.close();
assertEquals(0, connection.numActiveStreams());
verifyStateOnlyPriorityShouldBePreservedWhenStreamsAreCreated(weight3, weight5, weight7);
}
private void verifyStateOnlyPriorityShouldBePreservedWhenStreamsAreCreated(short weight3, short weight5,
short weight7) {
// Level 0
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(7, connection.connectionStream().id(), weight7));
assertEquals(1, distributor.numChildren(7));
// Level 2
assertTrue(distributor.isChild(5, 7, weight5));
assertEquals(1, distributor.numChildren(5));
// Level 3
assertTrue(distributor.isChild(3, 5, weight3));
assertEquals(0, distributor.numChildren(3));
}
@Test
public void fireFoxQoSStreamsRemainAfterDataStreamsAreClosed() throws Http2Exception {
// https://bitsup.blogspot.com/2015/01/http2-dependency-priorities-in-firefox.html
setup(5);
setPriority(leadersId, connection.connectionStream().id(), leadersWeight, false);
setPriority(unblockedId, connection.connectionStream().id(), unblockedWeight, false);
setPriority(backgroundId, connection.connectionStream().id(), backgroundWeight, false);
setPriority(speculativeId, backgroundId, speculativeWeight, false);
setPriority(followersId, leadersId, followersWeight, false);
verifyFireFoxQoSStreams();
// Simulate a HTML request
short htmlGetStreamWeight = 2;
Http2Stream htmlGetStream = connection.local().createStream(13, false);
setPriority(htmlGetStream.id(), followersId, htmlGetStreamWeight, false);
Http2Stream favIconStream = connection.local().createStream(15, false);
setPriority(favIconStream.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT, false);
Http2Stream cssStream = connection.local().createStream(17, false);
setPriority(cssStream.id(), leadersId, DEFAULT_PRIORITY_WEIGHT, false);
Http2Stream jsStream = connection.local().createStream(19, false);
setPriority(jsStream.id(), leadersId, DEFAULT_PRIORITY_WEIGHT, false);
Http2Stream imageStream = connection.local().createStream(21, false);
setPriority(imageStream.id(), followersId, 1, false);
// Level 0
assertEquals(4, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(leadersId, connection.connectionStream().id(), leadersWeight));
assertEquals(3, distributor.numChildren(leadersId));
assertTrue(distributor.isChild(unblockedId, connection.connectionStream().id(), unblockedWeight));
assertEquals(0, distributor.numChildren(unblockedId));
assertTrue(distributor.isChild(backgroundId, connection.connectionStream().id(), backgroundWeight));
assertEquals(1, distributor.numChildren(backgroundId));
assertTrue(distributor.isChild(favIconStream.id(), connection.connectionStream().id(),
DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(favIconStream.id()));
// Level 2
assertTrue(distributor.isChild(followersId, leadersId, followersWeight));
assertEquals(2, distributor.numChildren(followersId));
assertTrue(distributor.isChild(speculativeId, backgroundId, speculativeWeight));
assertEquals(0, distributor.numChildren(speculativeId));
assertTrue(distributor.isChild(cssStream.id(), leadersId, DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(cssStream.id()));
assertTrue(distributor.isChild(jsStream.id(), leadersId, DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(jsStream.id()));
// Level 3
assertTrue(distributor.isChild(htmlGetStream.id(), followersId, htmlGetStreamWeight));
assertEquals(0, distributor.numChildren(htmlGetStream.id()));
assertTrue(distributor.isChild(imageStream.id(), followersId, followersWeight));
assertEquals(0, distributor.numChildren(imageStream.id()));
// Close all the data streams and ensure the "priority only streams" are retained in the dependency tree.
htmlGetStream.close();
favIconStream.close();
cssStream.close();
jsStream.close();
imageStream.close();
verifyFireFoxQoSStreams();
}
private void verifyFireFoxQoSStreams() {
// Level 0
assertEquals(3, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(leadersId, connection.connectionStream().id(), leadersWeight));
assertEquals(1, distributor.numChildren(leadersId));
assertTrue(distributor.isChild(unblockedId, connection.connectionStream().id(), unblockedWeight));
assertEquals(0, distributor.numChildren(unblockedId));
assertTrue(distributor.isChild(backgroundId, connection.connectionStream().id(), backgroundWeight));
assertEquals(1, distributor.numChildren(backgroundId));
// Level 2
assertTrue(distributor.isChild(followersId, leadersId, followersWeight));
assertEquals(0, distributor.numChildren(followersId));
assertTrue(distributor.isChild(speculativeId, backgroundId, speculativeWeight));
assertEquals(0, distributor.numChildren(speculativeId));
}
@Test
public void lowestPrecedenceStateShouldBeDropped() throws Http2Exception {
setup(3);
short weight3 = MAX_WEIGHT;
short weight5 = (short) (weight3 - 1);
short weight7 = (short) (weight5 - 1);
short weight9 = (short) (weight7 - 1);
setPriority(3, connection.connectionStream().id(), weight3, true);
setPriority(5, connection.connectionStream().id(), weight5, true);
setPriority(7, connection.connectionStream().id(), weight7, false);
assertEquals(0, connection.numActiveStreams());
verifyLowestPrecedenceStateShouldBeDropped1(weight3, weight5, weight7);
// Attempt to create a new item in the dependency tree but the maximum amount of "state only" streams is meet
// so a stream will have to be dropped. Currently the new stream is the lowest "precedence" so it is dropped.
setPriority(9, 3, weight9, false);
assertEquals(0, connection.numActiveStreams());
verifyLowestPrecedenceStateShouldBeDropped1(weight3, weight5, weight7);
// Set the priority for stream 9 such that its depth in the dependency tree is numerically lower than stream 3,
// and therefore the dependency state associated with stream 3 will be dropped.
setPriority(9, 5, weight9, true);
verifyLowestPrecedenceStateShouldBeDropped2(weight9, weight5, weight7);
// Test that stream which has been activated is lower priority than other streams that have not been activated.
Http2Stream streamA = connection.local().createStream(5, false);
streamA.close();
verifyLowestPrecedenceStateShouldBeDropped2(weight9, weight5, weight7);
// Stream 3 (hasn't been opened) should result in stream 5 being dropped.
// dropping stream 5 will distribute its weight to children (only 9)
setPriority(3, 9, weight3, false);
verifyLowestPrecedenceStateShouldBeDropped3(weight3, weight7, weight5);
// Stream 5's state has been discarded so we should be able to re-insert this state.
setPriority(5, 0, weight5, false);
verifyLowestPrecedenceStateShouldBeDropped4(weight5, weight7, weight5);
// All streams are at the same level, so stream ID should be used to drop the numeric lowest valued stream.
short weight11 = (short) (weight9 - 1);
setPriority(11, 0, weight11, false);
verifyLowestPrecedenceStateShouldBeDropped5(weight7, weight5, weight11);
}
private void verifyLowestPrecedenceStateShouldBeDropped1(short weight3, short weight5, short weight7) {
// Level 0
assertEquals(2, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(7, connection.connectionStream().id(), weight7));
assertEquals(0, distributor.numChildren(7));
assertTrue(distributor.isChild(5, connection.connectionStream().id(), weight5));
assertEquals(1, distributor.numChildren(5));
// Level 2
assertTrue(distributor.isChild(3, 5, weight3));
assertEquals(0, distributor.numChildren(3));
}
private void verifyLowestPrecedenceStateShouldBeDropped2(short weight9, short weight5, short weight7) {
// Level 0
assertEquals(2, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(7, connection.connectionStream().id(), weight7));
assertEquals(0, distributor.numChildren(7));
assertTrue(distributor.isChild(5, connection.connectionStream().id(), weight5));
assertEquals(1, distributor.numChildren(5));
// Level 2
assertTrue(distributor.isChild(9, 5, weight9));
assertEquals(0, distributor.numChildren(9));
}
private void verifyLowestPrecedenceStateShouldBeDropped3(short weight3, short weight7, short weight9) {
// Level 0
assertEquals(2, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(7, connection.connectionStream().id(), weight7));
assertEquals(0, distributor.numChildren(7));
assertTrue(distributor.isChild(9, connection.connectionStream().id(), weight9));
assertEquals(1, distributor.numChildren(9));
// Level 2
assertTrue(distributor.isChild(3, 9, weight3));
assertEquals(0, distributor.numChildren(3));
}
private void verifyLowestPrecedenceStateShouldBeDropped4(short weight5, short weight7, short weight9) {
// Level 0
assertEquals(3, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(5, connection.connectionStream().id(), weight5));
assertEquals(0, distributor.numChildren(5));
assertTrue(distributor.isChild(7, connection.connectionStream().id(), weight7));
assertEquals(0, distributor.numChildren(7));
assertTrue(distributor.isChild(9, connection.connectionStream().id(), weight9));
assertEquals(0, distributor.numChildren(9));
}
private void verifyLowestPrecedenceStateShouldBeDropped5(short weight7, short weight9, short weight11) {
// Level 0
assertEquals(3, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(11, connection.connectionStream().id(), weight11));
assertEquals(0, distributor.numChildren(11));
assertTrue(distributor.isChild(7, connection.connectionStream().id(), weight7));
assertEquals(0, distributor.numChildren(7));
assertTrue(distributor.isChild(9, connection.connectionStream().id(), weight9));
assertEquals(0, distributor.numChildren(9));
}
@Test
public void priorityOnlyStreamsArePreservedWhenReservedStreamsAreClosed() throws Http2Exception {
setup(1);
short weight3 = MIN_WEIGHT;
setPriority(3, connection.connectionStream().id(), weight3, true);
Http2Stream streamA = connection.local().createStream(5, false);
Http2Stream streamB = connection.remote().reservePushStream(4, streamA);
// Level 0
assertEquals(3, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(3, connection.connectionStream().id(), weight3));
assertEquals(0, distributor.numChildren(3));
assertTrue(distributor.isChild(streamA.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamA.id()));
assertTrue(distributor.isChild(streamB.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamB.id()));
// Close both streams.
streamB.close();
streamA.close();
// Level 0
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(3, connection.connectionStream().id(), weight3));
assertEquals(0, distributor.numChildren(3));
}
@Test
public void insertExclusiveShouldAddNewLevel() throws Exception {
Http2Stream streamA = connection.local().createStream(1, false);
Http2Stream streamB = connection.local().createStream(3, false);
Http2Stream streamC = connection.local().createStream(5, false);
Http2Stream streamD = connection.local().createStream(7, false);
setPriority(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamD.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, true);
assertEquals(4, connection.numActiveStreams());
// Level 0
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(streamA.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamA.id()));
// Level 2
assertTrue(distributor.isChild(streamD.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(2, distributor.numChildren(streamD.id()));
// Level 3
assertTrue(distributor.isChild(streamB.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamB.id()));
assertTrue(distributor.isChild(streamC.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamC.id()));
}
@Test
public void existingChildMadeExclusiveShouldNotCreateTreeCycle() throws Http2Exception {
Http2Stream streamA = connection.local().createStream(1, false);
Http2Stream streamB = connection.local().createStream(3, false);
Http2Stream streamC = connection.local().createStream(5, false);
Http2Stream streamD = connection.local().createStream(7, false);
setPriority(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamD.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT, false);
// Stream C is already dependent on Stream A, but now make that an exclusive dependency
setPriority(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, true);
assertEquals(4, connection.numActiveStreams());
// Level 0
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(streamA.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamA.id()));
// Level 2
assertTrue(distributor.isChild(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(2, distributor.numChildren(streamC.id()));
// Level 3
assertTrue(distributor.isChild(streamB.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamB.id()));
assertTrue(distributor.isChild(streamD.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamD.id()));
}
@Test
public void newExclusiveChildShouldUpdateOldParentCorrectly() throws Http2Exception {
Http2Stream streamA = connection.local().createStream(1, false);
Http2Stream streamB = connection.local().createStream(3, false);
Http2Stream streamC = connection.local().createStream(5, false);
Http2Stream streamD = connection.local().createStream(7, false);
Http2Stream streamE = connection.local().createStream(9, false);
Http2Stream streamF = connection.local().createStream(11, false);
setPriority(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamD.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamF.id(), streamE.id(), DEFAULT_PRIORITY_WEIGHT, false);
// F is now going to be exclusively dependent on A, after this we should check that stream E
// prioritizableForTree is not over decremented.
setPriority(streamF.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, true);
assertEquals(6, connection.numActiveStreams());
// Level 0
assertEquals(2, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(streamE.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamE.id()));
assertTrue(distributor.isChild(streamA.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamA.id()));
// Level 2
assertTrue(distributor.isChild(streamF.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(2, distributor.numChildren(streamF.id()));
// Level 3
assertTrue(distributor.isChild(streamB.id(), streamF.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamB.id()));
assertTrue(distributor.isChild(streamC.id(), streamF.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamC.id()));
// Level 4
assertTrue(distributor.isChild(streamD.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamD.id()));
}
@Test
public void weightChangeWithNoTreeChangeShouldBeRespected() throws Http2Exception {
Http2Stream streamA = connection.local().createStream(1, false);
Http2Stream streamB = connection.local().createStream(3, false);
Http2Stream streamC = connection.local().createStream(5, false);
Http2Stream streamD = connection.local().createStream(7, false);
setPriority(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamD.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, true);
assertEquals(4, connection.numActiveStreams());
short newWeight = (short) (DEFAULT_PRIORITY_WEIGHT + 1);
setPriority(streamD.id(), streamA.id(), newWeight, false);
// Level 0
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(streamA.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamA.id()));
// Level 2
assertTrue(distributor.isChild(streamD.id(), streamA.id(), newWeight));
assertEquals(2, distributor.numChildren(streamD.id()));
// Level 3
assertTrue(distributor.isChild(streamB.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamB.id()));
assertTrue(distributor.isChild(streamC.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamC.id()));
}
@Test
public void sameNodeDependentShouldNotStackOverflowNorChangePrioritizableForTree() throws Http2Exception {
Http2Stream streamA = connection.local().createStream(1, false);
Http2Stream streamB = connection.local().createStream(3, false);
Http2Stream streamC = connection.local().createStream(5, false);
Http2Stream streamD = connection.local().createStream(7, false);
setPriority(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamD.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, true);
boolean[] exclusives = { true, false };
short[] weights = { DEFAULT_PRIORITY_WEIGHT, 100, 200, DEFAULT_PRIORITY_WEIGHT };
assertEquals(4, connection.numActiveStreams());
// The goal is to call setPriority with the same parent and vary the parameters
// we were at one point adding a circular depends to the tree and then throwing
// a StackOverflow due to infinite recursive operation.
for (short weight : weights) {
for (boolean exclusive : exclusives) {
setPriority(streamD.id(), streamA.id(), weight, exclusive);
assertEquals(0, distributor.numChildren(streamB.id()));
assertEquals(0, distributor.numChildren(streamC.id()));
assertEquals(1, distributor.numChildren(streamA.id()));
assertEquals(2, distributor.numChildren(streamD.id()));
assertFalse(distributor.isChild(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertFalse(distributor.isChild(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertTrue(distributor.isChild(streamB.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT));
assertTrue(distributor.isChild(streamC.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT));
assertTrue(distributor.isChild(streamD.id(), streamA.id(), weight));
}
}
}
@Test
public void multipleCircularDependencyShouldUpdatePrioritizable() throws Http2Exception {
Http2Stream streamA = connection.local().createStream(1, false);
Http2Stream streamB = connection.local().createStream(3, false);
Http2Stream streamC = connection.local().createStream(5, false);
Http2Stream streamD = connection.local().createStream(7, false);
setPriority(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamD.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, true);
assertEquals(4, connection.numActiveStreams());
// Bring B to the root
setPriority(streamA.id(), streamB.id(), DEFAULT_PRIORITY_WEIGHT, true);
// Move all streams to be children of B
setPriority(streamC.id(), streamB.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamD.id(), streamB.id(), DEFAULT_PRIORITY_WEIGHT, false);
// Move A back to the root
setPriority(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, true);
// Move all streams to be children of A
setPriority(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamD.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
// Level 0
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(streamA.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(3, distributor.numChildren(streamA.id()));
// Level 2
assertTrue(distributor.isChild(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamB.id()));
assertTrue(distributor.isChild(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamC.id()));
assertTrue(distributor.isChild(streamD.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamD.id()));
}
@Test
public void removeWithPrioritizableDependentsShouldNotRestructureTree() throws Exception {
Http2Stream streamA = connection.local().createStream(1, false);
Http2Stream streamB = connection.local().createStream(3, false);
Http2Stream streamC = connection.local().createStream(5, false);
Http2Stream streamD = connection.local().createStream(7, false);
setPriority(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamC.id(), streamB.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamD.id(), streamB.id(), DEFAULT_PRIORITY_WEIGHT, false);
// Default removal policy will cause it to be removed immediately.
// Closing streamB will distribute its weight to the children (C & D) equally.
streamB.close();
// Level 0
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(streamA.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(2, distributor.numChildren(streamA.id()));
// Level 2
short halfWeight = DEFAULT_PRIORITY_WEIGHT / 2;
assertTrue(distributor.isChild(streamC.id(), streamA.id(), halfWeight));
assertEquals(0, distributor.numChildren(streamC.id()));
assertTrue(distributor.isChild(streamD.id(), streamA.id(), halfWeight));
assertEquals(0, distributor.numChildren(streamD.id()));
}
@Test
public void closeWithNoPrioritizableDependentsShouldRestructureTree() throws Exception {
Http2Stream streamA = connection.local().createStream(1, false);
Http2Stream streamB = connection.local().createStream(3, false);
Http2Stream streamC = connection.local().createStream(5, false);
Http2Stream streamD = connection.local().createStream(7, false);
Http2Stream streamE = connection.local().createStream(9, false);
Http2Stream streamF = connection.local().createStream(11, false);
setPriority(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamC.id(), streamB.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamD.id(), streamB.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamE.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamF.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT, false);
// Close internal nodes, leave 1 leaf node open, the only remaining stream is the one that is not closed (E).
streamA.close();
// Closing streamB will distribute its weight to the children (C & D) equally.
streamB.close();
streamC.close();
streamD.close();
streamF.close();
// Level 0
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
// Level 1
short halfWeight = DEFAULT_PRIORITY_WEIGHT / 2;
assertTrue(distributor.isChild(streamE.id(), connection.connectionStream().id(), halfWeight));
assertEquals(0, distributor.numChildren(streamE.id()));
}
@Test
public void closeStreamWithChildrenShouldRedistributeWeightToChildren() throws Exception {
Http2Stream streamA = connection.local().createStream(1, false);
Http2Stream streamB = connection.local().createStream(3, false);
Http2Stream streamC = connection.local().createStream(5, false);
Http2Stream streamD = connection.local().createStream(7, false);
Http2Stream streamE = connection.local().createStream(9, false);
Http2Stream streamF = connection.local().createStream(11, false);
Http2Stream streamG = connection.local().createStream(13, false);
Http2Stream streamH = connection.local().createStream(15, false);
setPriority(streamC.id(), streamA.id(), MAX_WEIGHT, false);
setPriority(streamD.id(), streamA.id(), MAX_WEIGHT, false);
setPriority(streamE.id(), streamA.id(), MAX_WEIGHT, false);
setPriority(streamF.id(), streamB.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamG.id(), streamB.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamH.id(), streamB.id(), 2 * DEFAULT_PRIORITY_WEIGHT, false);
streamE.close();
// closing stream A will distribute its weight to the children (C & D) equally
streamA.close();
// closing stream B will distribute its weight to the children (F & G & H) proportionally
streamB.close();
// Level 0
assertEquals(5, distributor.numChildren(connection.connectionStream().id()));
// Level 1
short halfWeight = DEFAULT_PRIORITY_WEIGHT / 2;
assertTrue(distributor.isChild(streamC.id(), connection.connectionStream().id(), halfWeight));
assertTrue(distributor.isChild(streamD.id(), connection.connectionStream().id(), halfWeight));
short quarterWeight = DEFAULT_PRIORITY_WEIGHT / 4;
assertTrue(distributor.isChild(streamF.id(), connection.connectionStream().id(), quarterWeight));
assertTrue(distributor.isChild(streamG.id(), connection.connectionStream().id(), quarterWeight));
assertTrue(distributor.isChild(streamH.id(), connection.connectionStream().id(), (short) (2 * quarterWeight)));
}
@Test
public void priorityChangeWithNoPrioritizableDependentsShouldRestructureTree() throws Exception {
Http2Stream streamA = connection.local().createStream(1, false);
Http2Stream streamB = connection.local().createStream(3, false);
Http2Stream streamC = connection.local().createStream(5, false);
Http2Stream streamD = connection.local().createStream(7, false);
Http2Stream streamE = connection.local().createStream(9, false);
Http2Stream streamF = connection.local().createStream(11, false);
setPriority(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamC.id(), streamB.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamD.id(), streamB.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamF.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT, false);
setPriority(streamE.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT, false);
// Leave leaf nodes open (E & F)
streamA.close();
// Closing streamB will distribute its weight to the children (C & D) equally.
streamB.close();
streamC.close();
streamD.close();
// Move F to depend on C, even though C is closed.
setPriority(streamF.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT, false);
// Level 0
assertEquals(2, distributor.numChildren(connection.connectionStream().id()));
// Level 1
short halfWeight = DEFAULT_PRIORITY_WEIGHT / 2;
assertTrue(distributor.isChild(streamE.id(), connection.connectionStream().id(), halfWeight));
assertEquals(0, distributor.numChildren(streamE.id()));
assertTrue(distributor.isChild(streamF.id(), connection.connectionStream().id(), halfWeight));
assertEquals(0, distributor.numChildren(streamF.id()));
}
@Test
public void circularDependencyShouldRestructureTree() throws Exception {
// Using example from https://tools.ietf.org/html/rfc7540#section-5.3.3
// Initialize all the nodes
Http2Stream streamA = connection.local().createStream(1, false);
Http2Stream streamB = connection.local().createStream(3, false);
Http2Stream streamC = connection.local().createStream(5, false);
Http2Stream streamD = connection.local().createStream(7, false);
Http2Stream streamE = connection.local().createStream(9, false);
Http2Stream streamF = connection.local().createStream(11, false);
assertEquals(6, distributor.numChildren(connection.connectionStream().id()));
assertEquals(0, distributor.numChildren(streamA.id()));
assertEquals(0, distributor.numChildren(streamB.id()));
assertEquals(0, distributor.numChildren(streamC.id()));
assertEquals(0, distributor.numChildren(streamD.id()));
assertEquals(0, distributor.numChildren(streamE.id()));
assertEquals(0, distributor.numChildren(streamF.id()));
// Build the tree
setPriority(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
assertEquals(5, distributor.numChildren(connection.connectionStream().id()));
assertTrue(distributor.isChild(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamA.id()));
setPriority(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
assertEquals(4, distributor.numChildren(connection.connectionStream().id()));
assertTrue(distributor.isChild(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(2, distributor.numChildren(streamA.id()));
setPriority(streamD.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT, false);
assertEquals(3, distributor.numChildren(connection.connectionStream().id()));
assertTrue(distributor.isChild(streamD.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamC.id()));
setPriority(streamE.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT, false);
assertEquals(2, distributor.numChildren(connection.connectionStream().id()));
assertTrue(distributor.isChild(streamE.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(2, distributor.numChildren(streamC.id()));
setPriority(streamF.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT, false);
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
assertTrue(distributor.isChild(streamF.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamD.id()));
assertEquals(6, connection.numActiveStreams());
// Non-exclusive re-prioritization of a->d.
setPriority(streamA.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT, false);
// Level 0
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(streamD.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(2, distributor.numChildren(streamD.id()));
// Level 2
assertTrue(distributor.isChild(streamF.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamF.id()));
assertTrue(distributor.isChild(streamA.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(2, distributor.numChildren(streamA.id()));
// Level 3
assertTrue(distributor.isChild(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamB.id()));
assertTrue(distributor.isChild(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamC.id()));
// Level 4
assertTrue(distributor.isChild(streamE.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamE.id()));
}
@Test
public void circularDependencyWithExclusiveShouldRestructureTree() throws Exception {
// Using example from https://tools.ietf.org/html/rfc7540#section-5.3.3
// Initialize all the nodes
Http2Stream streamA = connection.local().createStream(1, false);
Http2Stream streamB = connection.local().createStream(3, false);
Http2Stream streamC = connection.local().createStream(5, false);
Http2Stream streamD = connection.local().createStream(7, false);
Http2Stream streamE = connection.local().createStream(9, false);
Http2Stream streamF = connection.local().createStream(11, false);
assertEquals(6, distributor.numChildren(connection.connectionStream().id()));
assertEquals(0, distributor.numChildren(streamA.id()));
assertEquals(0, distributor.numChildren(streamB.id()));
assertEquals(0, distributor.numChildren(streamC.id()));
assertEquals(0, distributor.numChildren(streamD.id()));
assertEquals(0, distributor.numChildren(streamE.id()));
assertEquals(0, distributor.numChildren(streamF.id()));
// Build the tree
setPriority(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
assertEquals(5, distributor.numChildren(connection.connectionStream().id()));
assertTrue(distributor.isChild(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamA.id()));
setPriority(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT, false);
assertEquals(4, distributor.numChildren(connection.connectionStream().id()));
assertTrue(distributor.isChild(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(2, distributor.numChildren(streamA.id()));
setPriority(streamD.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT, false);
assertEquals(3, distributor.numChildren(connection.connectionStream().id()));
assertTrue(distributor.isChild(streamD.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamC.id()));
setPriority(streamE.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT, false);
assertEquals(2, distributor.numChildren(connection.connectionStream().id()));
assertTrue(distributor.isChild(streamE.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(2, distributor.numChildren(streamC.id()));
setPriority(streamF.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT, false);
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
assertTrue(distributor.isChild(streamF.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamD.id()));
assertEquals(6, connection.numActiveStreams());
// Exclusive re-prioritization of a->d.
setPriority(streamA.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT, true);
// Level 0
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(streamD.id(), connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamD.id()));
// Level 2
assertTrue(distributor.isChild(streamA.id(), streamD.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(3, distributor.numChildren(streamA.id()));
// Level 3
assertTrue(distributor.isChild(streamB.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamB.id()));
assertTrue(distributor.isChild(streamF.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamF.id()));
assertTrue(distributor.isChild(streamC.id(), streamA.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamC.id()));
// Level 4;
assertTrue(distributor.isChild(streamE.id(), streamC.id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamE.id()));
}
// Unknown parent streams can come about in two ways:
// 1. Because the stream is old and its state was purged
// 2. This is the first reference to the stream, as implied at least by RFC7540§5.3.1:
// > A dependency on a stream that is not currently in the tree — such as a stream in the
// > "idle" state — results in that stream being given a default priority
@Test
public void unknownParentShouldBeCreatedUnderConnection() throws Exception {
setup(5);
// Purposefully avoid creating streamA's Http2Stream so that is it completely unknown.
// It shouldn't matter whether the ID is before or after streamB.id()
int streamAId = 1;
Http2Stream streamB = connection.local().createStream(3, false);
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
assertEquals(0, distributor.numChildren(streamB.id()));
// Build the tree
setPriority(streamB.id(), streamAId, DEFAULT_PRIORITY_WEIGHT, false);
assertEquals(1, connection.numActiveStreams());
// Level 0
assertEquals(1, distributor.numChildren(connection.connectionStream().id()));
// Level 1
assertTrue(distributor.isChild(streamAId, connection.connectionStream().id(), DEFAULT_PRIORITY_WEIGHT));
assertEquals(1, distributor.numChildren(streamAId));
// Level 2
assertTrue(distributor.isChild(streamB.id(), streamAId, DEFAULT_PRIORITY_WEIGHT));
assertEquals(0, distributor.numChildren(streamB.id()));
}
}
| WeightedFairQueueByteDistributorDependencyTreeTest |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java | {
"start": 8088,
"end": 9659
} | interface ____.elasticsearch.license.internal.MutableLicenseService " + "may not have multiple implementations")
);
}
public void testLoadNoExtensions() throws Exception {
XPackPlugin xpackPlugin = createXPackPlugin(Settings.builder().build());
xpackPlugin.loadExtensions(new ExtensiblePlugin.ExtensionLoader() {
@Override
public <T> List<T> loadExtensions(Class<T> extensionPointType) {
return Collections.emptyList();
}
});
Environment mockEnvironment = mock(Environment.class);
when(mockEnvironment.settings()).thenReturn(Settings.builder().build());
when(mockEnvironment.configDir()).thenReturn(PathUtils.get(""));
Plugin.PluginServices services = mock(Plugin.PluginServices.class);
when(services.clusterService()).thenReturn(mock(ClusterService.class));
when(services.threadPool()).thenReturn(mock(ThreadPool.class));
when(services.environment()).thenReturn(mockEnvironment);
xpackPlugin.createComponents(services);
assertThat(XPackPlugin.getSharedLicenseService(), instanceOf(ClusterStateLicenseService.class));
assertEquals(License.OperationMode.TRIAL, XPackPlugin.getSharedLicenseState().getOperationMode());
}
private XPackPlugin createXPackPlugin(Settings settings) throws Exception {
return new XPackPlugin(settings) {
@Override
protected void setSslService(SSLService sslService) {
// disable
}
};
}
| org |
java | spring-projects__spring-boot | module/spring-boot-http-client/src/main/java/org/springframework/boot/http/client/autoconfigure/service/HttpServiceClientProperties.java | {
"start": 1099,
"end": 1190
} | class ____ extends LinkedHashMap<String, HttpClientProperties> {
}
| HttpServiceClientProperties |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/select/MySqlSelectTest_163.java | {
"start": 315,
"end": 1206
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "SELECT\n" +
" val,\n" +
" ROW_NUMBER() OVER (ORDER BY val) AS 'row_number',\n" +
" RANK() OVER (ORDER BY val) AS 'rank',\n" +
" DENSE_RANK() OVER (ORDER BY val) AS 'dense_rank'\n" +
"FROM numbers;";
//
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.MYSQL);
SQLSelectStatement stmt = (SQLSelectStatement) statementList.get(0);
assertEquals(1, statementList.size());
assertEquals("SELECT val, ROW_NUMBER() OVER (ORDER BY val) AS \"row_number\", RANK() OVER (ORDER BY val) AS \"rank\"\n" +
"\t, DENSE_RANK() OVER (ORDER BY val) AS \"dense_rank\"\n" +
"FROM numbers;", stmt.toString());
}
}
| MySqlSelectTest_163 |
java | apache__kafka | coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetricsShard.java | {
"start": 1333,
"end": 1957
} | interface ____ {
/**
* Increment the value of a sensor.
*
* @param sensorName the sensor name.
*/
void record(String sensorName);
/**
* Record a sensor with a value.
*
* @param sensorName the sensor name.
* @param val the value to record.
*/
void record(String sensorName, double val);
/**
* @return The topic partition.
*/
TopicPartition topicPartition();
/**
* Commits all gauges backed by the snapshot registry.
*
* @param offset The last committed offset.
*/
void commitUpTo(long offset);
}
| CoordinatorMetricsShard |
java | apache__camel | components/camel-spring-parent/camel-spring-ai/camel-spring-ai-embeddings/src/generated/java/org/apache/camel/component/springai/embeddings/SpringAiEmbeddingsConfigurationConfigurer.java | {
"start": 759,
"end": 2342
} | class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.component.springai.embeddings.SpringAiEmbeddingsConfiguration target = (org.apache.camel.component.springai.embeddings.SpringAiEmbeddingsConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "embeddingmodel":
case "embeddingModel": target.setEmbeddingModel(property(camelContext, org.springframework.ai.embedding.EmbeddingModel.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "embeddingmodel":
case "embeddingModel": return org.springframework.ai.embedding.EmbeddingModel.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.component.springai.embeddings.SpringAiEmbeddingsConfiguration target = (org.apache.camel.component.springai.embeddings.SpringAiEmbeddingsConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "embeddingmodel":
case "embeddingModel": return target.getEmbeddingModel();
default: return null;
}
}
}
| SpringAiEmbeddingsConfigurationConfigurer |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/util/Loader.java | {
"start": 4696,
"end": 5202
} | class ____.", resource, defaultLoader);
final URL url = defaultLoader.getResource(resource);
if (url != null) {
return url;
}
}
} catch (final Throwable t) {
//
// can't be InterruptedException or InterruptedIOException
// since not declared, must be error or RuntimeError.
LOGGER.warn(TSTR, t);
}
// Last ditch attempt: get the resource from the | loader |
java | eclipse-vertx__vert.x | vertx-core/src/test/java/io/vertx/tests/addressresolver/ResolvingHttpClientTest.java | {
"start": 1214,
"end": 29357
} | class ____ extends VertxTestBase {
private BiConsumer<Integer, HttpServerRequest> requestHandler;
private List<HttpServer> servers;
@Override
protected VertxOptions getOptions() {
VertxOptions options = super.getOptions();
options.getAddressResolverOptions().setHostsValue(Buffer.buffer("" +
"127.0.0.1 localhost\n" +
"127.0.0.1 s1.example.com\n" +
"127.0.0.1 s2.example.com\n"
));
return options;
}
private void startServers(int numServers) throws Exception {
startServers(numServers, new HttpServerOptions());
}
private void startServers(int numServers, HttpServerOptions options) throws Exception {
if (servers != null) {
throw new IllegalStateException();
}
servers = new ArrayList<>();
for (int i = 0;i < numServers;i++) {
int val = i;
HttpServer server = vertx
.createHttpServer(options)
.requestHandler(req -> {
BiConsumer<Integer, HttpServerRequest> handler = requestHandler;
if (handler != null) {
handler.accept(val, req);
} else {
req.response().setStatusCode(404).end();
}
});
servers.add(server);
awaitFuture(server.listen(HttpTestBase.DEFAULT_HTTP_PORT + i, "localhost"));
}
}
@Test
public void testResolveServers() throws Exception {
int numServers = 2;
waitFor(numServers * 2);
startServers(numServers);
requestHandler = (idx, req) -> req.response().end("server-" + idx);
FakeEndpointResolver resolver = new FakeEndpointResolver();
resolver.registerAddress("example.com", Arrays.asList(SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "localhost"), SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT + 1, "localhost")));
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.withAddressResolver(resolver)
.build();
Set<String> responses = Collections.synchronizedSet(new HashSet<>());
for (int i = 0;i < numServers * 2;i++) {
client.request(new RequestOptions().setServer(new FakeAddress("example.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
).onComplete(onSuccess(v -> {
responses.add(v.toString());
complete();
}));
}
await();
Assert.assertEquals(new HashSet<>(Arrays.asList("server-0", "server-1")), responses);
}
@Ignore
@Test
public void testShutdownServers() throws Exception {
int numServers = 4;
requestHandler = (idx, req) -> req.response().end("server-" + idx);
startServers(numServers);
FakeEndpointResolver resolver = new FakeEndpointResolver();
resolver.registerAddress("example.com", Arrays.asList(
SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "localhost"),
SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT + 1, "localhost"),
SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT + 2, "localhost"),
SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT + 3, "localhost")
));
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.withAddressResolver(resolver)
.build();
CountDownLatch latch = new CountDownLatch(numServers);
for (int i = 0;i < numServers;i++) {
client.request(new RequestOptions().setServer(new FakeAddress("example.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
).onComplete(onSuccess(v -> {
latch.countDown();
}));
}
awaitLatch(latch);
List<FakeEndpoint> endpoints = resolver.endpoints("example.com");
for (int i = 0;i < servers.size();i++) {
int expected = endpoints.size() - 1;
awaitFuture(servers.get(i).close());
assertWaitUntil(() -> endpoints.size() == expected);
}
assertWaitUntil(resolver.addresses()::isEmpty);
}
@Test
public void testResolveToSameSocketAddress() throws Exception {
requestHandler = (idx, req) -> req.response().end("server-" + idx);
startServers(1);
FakeEndpointResolver resolver = new FakeEndpointResolver();
SocketAddress address = SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "localhost");
resolver.registerAddress("server1.com", List.of(address));
resolver.registerAddress("server2.com", List.of(address));
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.withAddressResolver(resolver)
.build();
Future<Buffer> result = client.request(new RequestOptions().setServer(new FakeAddress("server1.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
).compose(body -> client
.request(new RequestOptions().setServer(new FakeAddress("server2.com")))
.compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
));
awaitFuture(result);
awaitFuture(servers.get(0).close());
}
@Test
public void testResolveToSameSocketAddressWithProxy() throws Exception {
requestHandler = (idx, req) -> req.response().end("server-" + idx);
startServers(1);
HttpProxy proxy = new HttpProxy();
proxy.start(vertx);
FakeEndpointResolver resolver = new FakeEndpointResolver();
resolver.registerAddress("example.com", Arrays.asList(SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "localhost")));
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.withAddressResolver(resolver)
.build();
RequestOptions request1 = new RequestOptions().setServer(new FakeAddress("example.com"));
RequestOptions request2 = new RequestOptions(request1).setProxyOptions(new ProxyOptions()
.setHost("localhost")
.setPort(proxy.defaultPort())
.setType(ProxyType.HTTP));
List<RequestOptions> requests = Arrays.asList(request1, request2);
List<Buffer> responses = new ArrayList<>();
for (RequestOptions request : requests) {
Future<Buffer> result = client.request(request).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
);
Buffer response = awaitFuture(result);
responses.add(response);
}
assertNotNull(proxy.lastLocalAddress());
}
@Test
public void testAcceptProxyFilter() throws Exception {
testFilter(true);
}
@Test
public void testRejectProxyFilter() throws Exception {
testFilter(false);
}
private void testFilter(boolean accept) throws Exception {
HttpProxy proxy = new HttpProxy();
proxy.start(vertx);
try {
int numServers = 2;
waitFor(numServers * 2);
startServers(numServers);
requestHandler = (idx, req) -> {
boolean proxied = idx == 0 && accept;
SocketAddress remote = req.connection().remoteAddress();
assertEquals(proxied, proxy.localAddresses().contains(remote.host() + ":" + remote.port()));
req.response().end("server-" + idx);
};
FakeEndpointResolver resolver = new FakeEndpointResolver();
resolver.registerAddress("example.com", Arrays.asList(
SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "s1.example.com"),
SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT + 1, "s2.example.com")));
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.with(new HttpClientOptions()
.setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.port())))
.withAddressResolver(resolver)
.build();
((HttpClientImpl)((CleanableHttpClient)client).delegate).proxyFilter(so -> {
return accept && so.host().equals("s1.example.com");
});
Set<String> responses = Collections.synchronizedSet(new HashSet<>());
for (int i = 0;i < numServers * 2;i++) {
client.request(new RequestOptions().setServer(new FakeAddress("example.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
).onComplete(onSuccess(v -> {
responses.add(v.toString());
complete();
}));
}
await();
Assert.assertEquals(new HashSet<>(Arrays.asList("server-0", "server-1")), responses);
} finally {
proxy.stop();
}
}
@Test
public void testResolveFailure() {
Exception cause = new Exception("Not found");
FakeEndpointResolver lookup = new FakeEndpointResolver() {
@Override
public Future<FakeState> resolve(FakeAddress address, EndpointBuilder builder) {
return Future.failedFuture(cause);
}
};
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.withAddressResolver(lookup)
.build();
client.request(new RequestOptions().setServer(new FakeAddress("foo.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
).onComplete(onFailure(err -> {
assertSame(cause, err);
testComplete();
}));
await();
}
@Test
public void testUseInvalidAddress() {
FakeEndpointResolver lookup = new FakeEndpointResolver();
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.withAddressResolver(lookup)
.build();
client.request(new RequestOptions().setServer(new Address() {
})).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
).onComplete(onFailure(err -> {
assertTrue(err.getMessage().contains("Cannot resolve address"));
testComplete();
}));
await();
}
@Test
public void testKeepAliveTimeout() throws Exception {
startServers(1);
requestHandler = (idx, req) -> req.response().end("server-" + idx);
CountDownLatch closedLatch = new CountDownLatch(1);
FakeEndpointResolver resolver = new FakeEndpointResolver();
resolver.registerAddress("example.com", Arrays.asList(SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "localhost")));
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.with(new HttpClientOptions().setKeepAliveTimeout(1))
.withConnectHandler(conn -> {
conn.closeHandler(v -> {
closedLatch.countDown();
});
})
.withAddressResolver(resolver)
.build();
client.request(new RequestOptions().setServer(new FakeAddress("example.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
).onComplete(onSuccess(v -> {
}));
awaitLatch(closedLatch);
}
@Test
public void testStatistics() throws Exception {
startServers(1);
requestHandler = (idx, req) -> {
vertx.setTimer(500, id -> {
req.response().end();
});
};
FakeEndpointResolver resolver = new FakeEndpointResolver();
resolver.registerAddress("example.com", Arrays.asList(SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "localhost")));
FakeLoadBalancer lb = new FakeLoadBalancer();
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.with(new HttpClientOptions().setKeepAliveTimeout(1))
.withAddressResolver(resolver)
.withLoadBalancer(lb)
.build();
client.request(new RequestOptions().setServer(new FakeAddress("example.com")))
.compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
).await();
FakeLoadBalancer.FakeLoadBalancerMetrics<?> endpoint = (FakeLoadBalancer.FakeLoadBalancerMetrics<?>) ((ServerEndpoint) lb.endpoints().get(0)).metrics();
FakeLoadBalancer.FakeMetric metric = endpoint.metrics2().get(0);
assertTrue(metric.requestEnd() - metric.requestBegin() >= 0);
assertTrue(metric.responseBegin() - metric.requestEnd() >= 500);
assertTrue(metric.responseEnd() - metric.responseBegin() >= 0);
}
@Test
public void testStatisticsReportingFailure0() throws Exception {
startServers(1);
AtomicInteger count = new AtomicInteger();
requestHandler = (idx, req) -> {
count.incrementAndGet();
};
FakeEndpointResolver resolver = new FakeEndpointResolver();
FakeLoadBalancer lb = new FakeLoadBalancer();
resolver.registerAddress("example.com", Arrays.asList(SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "localhost")));
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.with(new HttpClientOptions().setKeepAliveTimeout(1))
.withAddressResolver(resolver)
.withLoadBalancer(lb)
.build();
List<Future<Buffer>> futures = new ArrayList<>();
for (int i = 0;i < 5;i++) {
Future<Buffer> fut = client.request(new RequestOptions().setServer(new FakeAddress("example.com"))).compose(req -> req
.send()
.compose(HttpClientResponse::body));
futures.add(fut);
}
assertWaitUntil(() -> count.get() == 5);
try {
awaitFuture(client.request(new RequestOptions().setServer(new FakeAddress("example.com")).setTimeout(100)));
} catch (Throwable e) {
assertTrue(e.getMessage().contains("timeout"));
}
FakeLoadBalancer.FakeLoadBalancerMetrics<?> endpoint = (FakeLoadBalancer.FakeLoadBalancerMetrics) lb.endpoints().get(0).metrics();
assertWaitUntil(() -> endpoint.metrics2().size() == 6);
FakeLoadBalancer.FakeMetric metric = endpoint.metrics2().get(5);
assertNotNull(metric.failure);
}
@Test
public void testStatisticsReportingFailure1() throws Exception {
FakeLoadBalancer.FakeMetric metric = testStatisticsReportingFailure((fail, req) -> {
if (fail) {
req.connection().close();
} else {
req.response().end();
}
}, req -> {
req.setChunked(true);
req.write("chunk");
return req.response().compose(HttpClientResponse::body);
});
assertNotSame(0, metric.responseBegin());
assertEquals(0, metric.requestEnd());
assertEquals(0, metric.responseBegin());
assertEquals(0, metric.responseEnd());
assertNotNull(metric.failure());
assertTrue(metric.failure() instanceof HttpClosedException);
}
@Test
public void testStatisticsReportingFailure2() throws Exception {
FakeLoadBalancer.FakeMetric metric = testStatisticsReportingFailure((fail, req) -> {
if (fail) {
req.connection().close();
} else {
req.response().end();
}
}, req -> req
.send()
.compose(HttpClientResponse::body));
assertTrue(metric.requestEnd() - metric.requestBegin() >= 0);
assertEquals(0, metric.responseBegin());
assertEquals(0, metric.responseEnd());
assertNotNull(metric.failure());
assertTrue(metric.failure() instanceof HttpClosedException);
}
@Test
public void testStatisticsReportingFailure3() throws Exception {
FakeLoadBalancer.FakeMetric metric = testStatisticsReportingFailure((fail, req) -> {
HttpServerResponse resp = req.response();
resp.setChunked(true).write("chunk");
vertx.setTimer(100, id -> {
if (fail) {
req.connection().close();
} else {
resp.end();
}
});
}, req -> req
.send()
.compose(HttpClientResponse::body)
);
assertTrue(metric.requestEnd() - metric.requestBegin() >= 0);
assertTrue(metric.responseBegin() - metric.requestEnd() >= 0);
assertEquals(0, metric.responseEnd());
assertNotNull(metric.failure());
assertTrue(metric.failure() instanceof HttpClosedException);
}
private FakeLoadBalancer.FakeMetric testStatisticsReportingFailure(BiConsumer<Boolean, HttpServerRequest> handler, Function<HttpClientRequest, Future<Buffer>> sender) throws Exception {
startServers(1);
AtomicBoolean mode = new AtomicBoolean();
requestHandler = (idx, req) -> handler.accept(mode.get(), req);
FakeEndpointResolver resolver = new FakeEndpointResolver();
resolver.registerAddress("example.com", Arrays.asList(SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "localhost")));
FakeLoadBalancer lb = new FakeLoadBalancer();
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.with(new HttpClientOptions().setKeepAliveTimeout(1))
.withAddressResolver(resolver)
.withLoadBalancer(lb)
.build();
List<Future<Buffer>> futures = new ArrayList<>();
for (int i = 0;i < 10;i++) {
Future<Buffer> fut = client.request(new RequestOptions().setServer(new FakeAddress("example.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body));
futures.add(fut);
}
awaitFuture(Future.join(futures));
mode.set(true);
try {
awaitFuture(client
.request(new RequestOptions().setServer(new FakeAddress("example.com")))
.compose(sender::apply));
} catch (RuntimeException e) {
assertTrue(e.getMessage().contains("Connection was closed"));
}
FakeLoadBalancer.FakeLoadBalancerMetrics<?> endpoint = (FakeLoadBalancer.FakeLoadBalancerMetrics) (lb.endpoints().get(0).metrics());
assertWaitUntil(() -> endpoint.metrics2().size() == 11);
for (int i = 0;i < 10;i++) {
FakeLoadBalancer.FakeMetric metric = endpoint.metrics2().get(i);
assertTrue(metric.requestEnd() - metric.requestBegin() >= 0);
assertTrue(metric.responseBegin() - metric.requestEnd() >= 0);
assertTrue(metric.responseEnd() - metric.responseBegin() >= 0);
}
return endpoint.metrics2().get(10);
}
@Test
public void testInvalidation() throws Exception {
startServers(2);
requestHandler = (idx, req) -> {
req.response().end("" + idx);
};
FakeEndpointResolver resolver = new FakeEndpointResolver();
SocketAddress addr1 = SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "localhost");
SocketAddress addr2 = SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT + 1, "localhost");
resolver.registerAddress("example.com", Arrays.asList(addr1));
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.withAddressResolver(resolver)
.build();
String res = awaitFuture(client.request(new RequestOptions().setServer(new FakeAddress("example.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
)).toString();
assertEquals("0", res);
resolver.registerAddress("example.com", List.of(addr2));
try {
awaitFuture(client.request(new RequestOptions().setServer(new FakeAddress("example.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
));
} catch (Throwable e) {
assertTrue(e.getMessage().startsWith("Cannot resolve address"));
}
}
@Test
public void testDelayedInvalidation() throws Exception {
testInvalidation(false);
}
@Test
public void testImmediateInvalidation() throws Exception {
testInvalidation(true);
}
private void testInvalidation(boolean immediate) throws Exception {
startServers(2);
requestHandler = (idx, req) -> {
req.response().end("" + idx);
};
FakeEndpointResolver resolver = new FakeEndpointResolver();
SocketAddress addr1 = SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "localhost");
SocketAddress addr2 = SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT + 1, "localhost");
AtomicInteger accessCount = new AtomicInteger();
resolver.registerAddress("example.com", () -> {
accessCount.incrementAndGet();
return new FakeEndpointResolver.Endpoint(List.of(addr1), !immediate);
});
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.withAddressResolver(resolver)
.build();
String res = awaitFuture(client.request(new RequestOptions().setServer(new FakeAddress("example.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
)).toString();
assertEquals("0", res);
assertEquals(1, accessCount.get());
res = awaitFuture(client.request(new RequestOptions().setServer(new FakeAddress("example.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
)).toString();
assertEquals("0", res);
assertEquals(immediate ? 2 : 1, accessCount.get());
resolver.registerAddress("example.com", List.of(addr2));
res = awaitFuture(client.request(new RequestOptions().setServer(new FakeAddress("example.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
)).toString();
assertEquals("1", res);
}
@Test
public void testTimeExpiration() throws Exception {
startServers(1);
requestHandler = (idx, req) -> {
req.response().end("" + idx);
};
FakeEndpointResolver resolver = new FakeEndpointResolver();
SocketAddress addr1 = SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "localhost");
resolver.registerAddress("example.com", Arrays.asList(addr1));
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.with(new HttpClientOptions().setKeepAliveTimeout(1))
.withAddressResolver(resolver)
.build();
String res = awaitFuture(client.request(new RequestOptions().setServer(new FakeAddress("example.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
)).toString();
assertEquals("0", res);
waitUntil(() -> resolver.endpoints("example.com") == null);
}
@Test
public void testTimeRefreshExpiration() throws Exception {
startServers(1);
requestHandler = (idx, req) -> {
req.response().end("" + idx);
};
FakeEndpointResolver resolver = new FakeEndpointResolver();
SocketAddress addr1 = SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "localhost");
resolver.registerAddress("example.com", Arrays.asList(addr1));
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.with(new HttpClientOptions().setKeepAliveTimeout(2))
.withAddressResolver(resolver)
.build();
int numRequests = 20; // 200ms x 20 = 4 seconds
for (int i = 0;i < numRequests;i++) {
String res = awaitFuture(client.request(new RequestOptions().setServer(new FakeAddress("example.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
)).toString();
assertEquals("0", res);
Thread.sleep(200);
}
assertNotNull(resolver.endpoints("example.com"));
waitUntil(() -> resolver.endpoints("example.com") == null);
}
@Test
public void testSSL() throws Exception {
testSSL(new RequestOptions()
.setMethod(HttpMethod.GET)
.setServer(new FakeAddress("example.com"))
.setURI("/"), false, true);
}
@Test
public void testSSLOverridePeer() throws Exception {
testSSL(new RequestOptions()
.setMethod(HttpMethod.GET)
.setServer(new FakeAddress("example.com"))
.setHost("example.com")
.setPort(HttpTestBase.DEFAULT_HTTP_PORT)
.setURI("/"), true, true);
}
@Test
public void testSSLOverridePeerNoVerify() throws Exception {
testSSL(new RequestOptions()
.setMethod(HttpMethod.GET)
.setServer(new FakeAddress("example.com"))
.setHost("example.com")
.setPort(HttpTestBase.DEFAULT_HTTP_PORT)
.setURI("/"), false, false);
}
private void testSSL(RequestOptions request, boolean expectFailure, boolean verifyHost) throws Exception {
startServers(1, new HttpServerOptions()
.setSsl(true)
.setKeyCertOptions(Cert.SERVER_JKS.get()));
requestHandler = (idx, req) -> {
req.response().end("" + idx);
};
FakeEndpointResolver resolver = new FakeEndpointResolver();
SocketAddress addr1 = SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "localhost");
resolver.registerAddress("example.com", Arrays.asList(addr1));
HttpClient client = vertx.httpClientBuilder()
.with(new HttpClientOptions()
.setSsl(true)
.setTrustOptions(Trust.SERVER_JKS.get())
.setVerifyHost(verifyHost)
)
.withAddressResolver(resolver)
.build();
try {
String res = awaitFuture(client.request(request).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
)).toString();
assertFalse(expectFailure);
assertEquals("0", res);
} catch (Exception e) {
assertTrue(expectFailure);
}
}
@Test
public void testConsistentHashing() throws Exception {
int numServers = 4;
int numClients = 10;
int numRequests = 4;
waitFor(numClients * numRequests);
startServers(numServers);
requestHandler = (idx, req) -> req.response().end("server-" + idx);
FakeEndpointResolver resolver = new FakeEndpointResolver();
List<SocketAddress> servers = IntStream
.range(0, numServers)
.mapToObj(idx -> SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT + idx, "localhost"))
.collect(Collectors.toList());
resolver.registerAddress("example.com", servers);
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.withLoadBalancer(LoadBalancer.CONSISTENT_HASHING)
.withAddressResolver(resolver)
.build();
Map<Integer, List<String>> responses = new ConcurrentHashMap<>();
for (int i = 0;i < numClients * numRequests;i++) {
int idx = i % numClients;
String hashingKey = "client-" + idx;
client.request(new RequestOptions().setServer(new FakeAddress("example.com")).setRoutingKey(hashingKey)).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
).onComplete(onSuccess(v -> {
responses.compute(idx, (id, list) -> {
if (list == null) {
list = Collections.synchronizedList(new ArrayList<>());
}
return list;
}).add(v.toString());
complete();
}));
}
await();
responses.values().forEach(list -> {
String resp = list.get(0);
for (int i = 1;i < list.size();i++) {
Assert.assertEquals(resp, list.get(i));
}
});
}
@Test
public void testLyingLoadBalancer() throws Exception {
int numServers = 2;
startServers(numServers);
requestHandler = (idx, req) -> req.response().end("server-" + idx);
FakeEndpointResolver resolver = new FakeEndpointResolver();
resolver.registerAddress("example.com", Arrays.asList(SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT, "localhost"), SocketAddress.inetSocketAddress(HttpTestBase.DEFAULT_HTTP_PORT + 1, "localhost")));
HttpClientInternal client = (HttpClientInternal) vertx.httpClientBuilder()
.withLoadBalancer(endpoints -> () -> endpoints.size() + 1)
.withAddressResolver(resolver)
.build();
Set<String> responses = Collections.synchronizedSet(new HashSet<>());
client.request(new RequestOptions().setServer(new FakeAddress("example.com"))).compose(req -> req
.send()
.expecting(HttpResponseExpectation.SC_OK)
.compose(HttpClientResponse::body)
).onComplete(onFailure(err -> {
assertEquals("No results for ServiceName(example.com)", err.getMessage());
testComplete();
}));
await();
}
}
| ResolvingHttpClientTest |
java | micronaut-projects__micronaut-core | http-server-netty/src/main/java/io/micronaut/http/server/netty/handler/PipeliningServerHandler.java | {
"start": 44838,
"end": 45770
} | class ____ extends OutboundHandler {
private final FullHttpResponse message;
FullOutboundHandler(OutboundAccessImpl outboundAccess, FullHttpResponse message) {
super(outboundAccess);
this.message = message;
}
@Override
void writeSome() {
writeCompressing(message, true, true);
outboundHandler = null;
requestHandler.responseWritten(outboundAccess.attachment);
PipeliningServerHandler.this.writeSome();
}
@Override
void discardOutbound() {
super.discardOutbound();
outboundHandler = null;
// pretend we wrote to clean up resources
requestHandler.responseWritten(outboundAccess.attachment);
message.release();
}
}
/**
* Handler that writes a {@link StreamedHttpResponse}.
*/
private final | FullOutboundHandler |
java | spring-projects__spring-boot | core/spring-boot-test/src/main/java/org/springframework/boot/test/system/OutputCapture.java | {
"start": 8026,
"end": 8499
} | class ____ {
private final Enabled saved;
AnsiOutputState() {
this.saved = AnsiOutput.getEnabled();
AnsiOutput.setEnabled(Enabled.NEVER);
}
void restore() {
AnsiOutput.setEnabled(this.saved);
}
static @Nullable AnsiOutputState saveAndDisable() {
if (!ClassUtils.isPresent("org.springframework.boot.ansi.AnsiOutput",
OutputCapture.class.getClassLoader())) {
return null;
}
return new AnsiOutputState();
}
}
static | AnsiOutputState |
java | apache__kafka | raft/src/main/java/org/apache/kafka/raft/TimingWheelExpirationService.java | {
"start": 1915,
"end": 2363
} | class ____<T> extends TimerTask {
private final CompletableFuture<T> future = new CompletableFuture<>();
TimerTaskCompletableFuture(long delayMs) {
super(delayMs);
}
@Override
public void run() {
future.completeExceptionally(new TimeoutException("Future failed to be completed before timeout of " + delayMs + " ms was reached"));
}
}
private | TimerTaskCompletableFuture |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/function/RequestPredicates.java | {
"start": 2742,
"end": 14859
} | class ____ {
private static final Log logger = LogFactory.getLog(RequestPredicates.class);
/**
* Return a {@code RequestPredicate} that always matches.
* @return a predicate that always matches
*/
public static RequestPredicate all() {
return request -> true;
}
/**
* Return a {@code RequestPredicate} that matches if the request's
* HTTP method is equal to the given method.
* @param httpMethod the HTTP method to match against
* @return a predicate that tests against the given HTTP method
*/
public static RequestPredicate method(HttpMethod httpMethod) {
Assert.notNull(httpMethod, "HttpMethod must not be null");
return new SingleHttpMethodPredicate(httpMethod);
}
/**
* Return a {@code RequestPredicate} that matches if the request's
* HTTP method is equal to one the of the given methods.
* @param httpMethods the HTTP methods to match against
* @return a predicate that tests against the given HTTP methods
*/
public static RequestPredicate methods(HttpMethod... httpMethods) {
Assert.notEmpty(httpMethods, "HttpMethods must not be empty");
if (httpMethods.length == 1) {
return new SingleHttpMethodPredicate(httpMethods[0]);
}
else {
return new MultipleHttpMethodsPredicate(httpMethods);
}
}
/**
* Return a {@code RequestPredicate} that tests the request path
* against the given path pattern.
* @param pattern the pattern to match to
* @return a predicate that tests against the given path pattern
* @see org.springframework.web.util.pattern.PathPattern
*/
public static RequestPredicate path(String pattern) {
Assert.notNull(pattern, "'pattern' must not be null");
PathPatternParser parser = PathPatternParser.defaultInstance;
pattern = parser.initFullPathPattern(pattern);
return pathPredicates(parser).apply(pattern);
}
/**
* Return a function that creates new path-matching {@code RequestPredicates}
* from pattern Strings using the given {@link PathPatternParser}.
* <p>This method can be used to specify a non-default, customized
* {@code PathPatternParser} when resolving path patterns.
* @param patternParser the parser used to parse patterns given to the returned function
* @return a function that resolves a pattern String into a path-matching
* {@code RequestPredicates} instance
*/
public static Function<String, RequestPredicate> pathPredicates(PathPatternParser patternParser) {
Assert.notNull(patternParser, "PathPatternParser must not be null");
return pattern -> new PathPatternPredicate(patternParser.parse(pattern));
}
/**
* Return a {@code RequestPredicate} that tests the request's headers
* against the given headers predicate.
* @param headersPredicate a predicate that tests against the request headers
* @return a predicate that tests against the given header predicate
*/
public static RequestPredicate headers(Predicate<ServerRequest.Headers> headersPredicate) {
return new HeadersPredicate(headersPredicate);
}
/**
* Return a {@code RequestPredicate} that tests if the request's
* {@linkplain ServerRequest.Headers#contentType() content type} is
* {@linkplain MediaType#includes(MediaType) included} by any of the given media types.
* @param mediaTypes the media types to match the request's content type against
* @return a predicate that tests the request's content type against the given media types
*/
public static RequestPredicate contentType(MediaType... mediaTypes) {
Assert.notEmpty(mediaTypes, "'mediaTypes' must not be empty");
if (mediaTypes.length == 1) {
return new SingleContentTypePredicate(mediaTypes[0]);
}
else {
return new MultipleContentTypesPredicate(mediaTypes);
}
}
/**
* Return a {@code RequestPredicate} that tests if the request's
* {@linkplain ServerRequest.Headers#accept() accept} header is
* {@linkplain MediaType#isCompatibleWith(MediaType) compatible} with any of the given media types.
* @param mediaTypes the media types to match the request's accept header against
* @return a predicate that tests the request's accept header against the given media types
*/
public static RequestPredicate accept(MediaType... mediaTypes) {
Assert.notEmpty(mediaTypes, "'mediaTypes' must not be empty");
if (mediaTypes.length == 1) {
return new SingleAcceptPredicate(mediaTypes[0]);
}
else {
return new MultipleAcceptsPredicate(mediaTypes);
}
}
/**
* {@code RequestPredicate} to match to the request API version extracted
* from and parsed with the configured {@link ApiVersionStrategy}.
* <p>The version may be one of the following:
* <ul>
* <li>Fixed version ("1.2") -- match this version only.
* <li>Baseline version ("1.2+") -- match this and subsequent versions.
* </ul>
* <p>A baseline version allows n endpoint route to continue to work in
* subsequent versions if it remains compatible until an incompatible change
* eventually leads to the creation of a new route.
* @param version the version to use
* @return the created predicate instance
* @since 7.0
*/
public static RequestPredicate version(Object version) {
return new ApiVersionPredicate(version);
}
/**
* Return a {@code RequestPredicate} that matches if request's HTTP method is {@code GET}
* and the given {@code pattern} matches against the request path.
* @param pattern the path pattern to match against
* @return a predicate that matches if the request method is GET and if the given pattern
* matches against the request path
* @see org.springframework.web.util.pattern.PathPattern
*/
public static RequestPredicate GET(String pattern) {
return method(HttpMethod.GET).and(path(pattern));
}
/**
* Return a {@code RequestPredicate} that matches if request's HTTP method is {@code HEAD}
* and the given {@code pattern} matches against the request path.
* @param pattern the path pattern to match against
* @return a predicate that matches if the request method is HEAD and if the given pattern
* matches against the request path
* @see org.springframework.web.util.pattern.PathPattern
*/
public static RequestPredicate HEAD(String pattern) {
return method(HttpMethod.HEAD).and(path(pattern));
}
/**
* Return a {@code RequestPredicate} that matches if request's HTTP method is {@code POST}
* and the given {@code pattern} matches against the request path.
* @param pattern the path pattern to match against
* @return a predicate that matches if the request method is POST and if the given pattern
* matches against the request path
* @see org.springframework.web.util.pattern.PathPattern
*/
public static RequestPredicate POST(String pattern) {
return method(HttpMethod.POST).and(path(pattern));
}
/**
* Return a {@code RequestPredicate} that matches if request's HTTP method is {@code PUT}
* and the given {@code pattern} matches against the request path.
* @param pattern the path pattern to match against
* @return a predicate that matches if the request method is PUT and if the given pattern
* matches against the request path
* @see org.springframework.web.util.pattern.PathPattern
*/
public static RequestPredicate PUT(String pattern) {
return method(HttpMethod.PUT).and(path(pattern));
}
/**
* Return a {@code RequestPredicate} that matches if request's HTTP method is {@code PATCH}
* and the given {@code pattern} matches against the request path.
* @param pattern the path pattern to match against
* @return a predicate that matches if the request method is PATCH and if the given pattern
* matches against the request path
* @see org.springframework.web.util.pattern.PathPattern
*/
public static RequestPredicate PATCH(String pattern) {
return method(HttpMethod.PATCH).and(path(pattern));
}
/**
* Return a {@code RequestPredicate} that matches if request's HTTP method is {@code DELETE}
* and the given {@code pattern} matches against the request path.
* @param pattern the path pattern to match against
* @return a predicate that matches if the request method is DELETE and if the given pattern
* matches against the request path
* @see org.springframework.web.util.pattern.PathPattern
*/
public static RequestPredicate DELETE(String pattern) {
return method(HttpMethod.DELETE).and(path(pattern));
}
/**
* Return a {@code RequestPredicate} that matches if request's HTTP method is {@code OPTIONS}
* and the given {@code pattern} matches against the request path.
* @param pattern the path pattern to match against
* @return a predicate that matches if the request method is OPTIONS and if the given pattern
* matches against the request path
* @see org.springframework.web.util.pattern.PathPattern
*/
public static RequestPredicate OPTIONS(String pattern) {
return method(HttpMethod.OPTIONS).and(path(pattern));
}
/**
* Return a {@code RequestPredicate} that matches if the request's path has the given extension.
* @param extension the path extension to match against, ignoring case
* @return a predicate that matches if the request's path has the given file extension
* @deprecated without replacement to discourage use of path extensions for request
* mapping and for content negotiation (with similar deprecations and removals already
* applied to annotated controllers). For further context, please read issue
* <a href="https://github.com/spring-projects/spring-framework/issues/24179">#24179</a>
*/
@Deprecated(since = "7.0", forRemoval = true)
public static RequestPredicate pathExtension(String extension) {
Assert.notNull(extension, "'extension' must not be null");
return new PathExtensionPredicate(extension);
}
/**
* Return a {@code RequestPredicate} that matches if the request's path matches the given
* predicate.
* @param extensionPredicate the predicate to test against the request path extension
* @return a predicate that matches if the given predicate matches against the request's path
* file extension
* @deprecated without replacement to discourage use of path extensions for request
* mapping and for content negotiation (with similar deprecations and removals already
* applied to annotated controllers). For further context, please read issue
* <a href="https://github.com/spring-projects/spring-framework/issues/24179">#24179</a>
*/
@Deprecated(since = "7.0", forRemoval = true)
public static RequestPredicate pathExtension(Predicate<String> extensionPredicate) {
return new PathExtensionPredicate(extensionPredicate);
}
/**
* Return a {@code RequestPredicate} that matches if the request's parameter of the given name
* has the given value.
* @param name the name of the parameter to test against
* @param value the value of the parameter to test against
* @return a predicate that matches if the parameter has the given value
* @see ServerRequest#param(String)
*/
public static RequestPredicate param(String name, String value) {
return new ParamPredicate(name, value);
}
/**
* Return a {@code RequestPredicate} that tests the request's parameter of the given name
* against the given predicate.
* @param name the name of the parameter to test against
* @param predicate the predicate to test against the parameter value
* @return a predicate that matches the given predicate against the parameter of the given name
* @see ServerRequest#param(String)
*/
public static RequestPredicate param(String name, Predicate<String> predicate) {
return new ParamPredicate(name, predicate);
}
private static void traceMatch(String prefix, Object desired, @Nullable Object actual, boolean match) {
if (logger.isTraceEnabled()) {
logger.trace(String.format("%s \"%s\" %s against value \"%s\"",
prefix, desired, match ? "matches" : "does not match", actual));
}
}
private static PathPattern mergePatterns(@Nullable PathPattern oldPattern, PathPattern newPattern) {
if (oldPattern != null) {
return oldPattern.combine(newPattern);
}
else {
return newPattern;
}
}
/**
* Receives notifications from the logical structure of request predicates.
*/
public | RequestPredicates |
java | quarkusio__quarkus | extensions/smallrye-health/deployment/src/test/java/io/quarkus/smallrye/health/test/BlockingChecksVertxContextDuplicationTest.java | {
"start": 1902,
"end": 2211
} | class ____ implements HealthCheck {
public static Context capturedContext = null;
@Override
public HealthCheckResponse call() {
capturedContext = Vertx.currentContext();
return HealthCheckResponse.up("ContextCaptureCheck2");
}
}
}
| ContextCaptureCheck2 |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/jmx/IJmxTestBean.java | {
"start": 912,
"end": 947
} | interface
____ dontExposeMe();
}
| void |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/inheritance/basic/InheritanceTest.java | {
"start": 1003,
"end": 1699
} | class
____( House.class, Building.class );
assertSuperclassRelationshipInMetamodel( Building.class, Area.class );
// METAGEN-29
assertSuperclassRelationshipInMetamodel( Person.class, AbstractEntity.class );
assertPresenceOfFieldInMetamodelFor( AbstractEntity.class, "id", "Property 'id' should exist" );
assertPresenceOfFieldInMetamodelFor( AbstractEntity.class, "foo", "Property should exist - METAGEN-29" );
assertAttributeTypeInMetaModelFor(
AbstractEntity.class,
"foo",
Object.class,
"Object is the upper bound of foo "
);
assertPresenceOfFieldInMetamodelFor( Person.class, "name", "Property 'name' should exist" );
}
}
| assertSuperclassRelationshipInMetamodel |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/BaseTransportInferenceAction.java | {
"start": 2485,
"end": 2638
} | class ____ transport actions that handle inference requests.
* @param <Request> The specific type of inference request being handled
*/
public abstract | for |
java | google__dagger | javatests/dagger/internal/codegen/kotlin/KspComponentProcessorTest.java | {
"start": 12612,
"end": 12675
} | class ____ constructor(bar: Bar)",
"",
" | Foo |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/rest/FileUploadHandlerITCase.java | {
"start": 20982,
"end": 23834
} | class ____
implements BiConsumerWithException<
HandlerRequest<? extends RequestBody>, RestfulGateway, Exception> {
private final String customFilename1;
private final Path fileContent1;
private final String customFilename2;
private final Path fileContent2;
public CustomFilenameVerifier(
String customFilename1,
Path fileContent1,
String customFilename2,
Path fileContent2) {
this.customFilename1 = customFilename1;
this.fileContent1 = fileContent1;
this.customFilename2 = customFilename2;
this.fileContent2 = fileContent2;
}
@Override
public void accept(
HandlerRequest<? extends RequestBody> request, RestfulGateway restfulGateway)
throws Exception {
List<Path> uploadedFiles =
request.getUploadedFiles().stream()
.map(File::toPath)
.collect(Collectors.toList());
List<Path> actualList = new ArrayList<>(uploadedFiles);
actualList.sort(Comparator.comparing(Path::toString));
SortedMap<String, Path> expectedFilenamesAndContent = new TreeMap<>();
expectedFilenamesAndContent.put(customFilename1, fileContent1);
expectedFilenamesAndContent.put(customFilename2, fileContent2);
assertThat(expectedFilenamesAndContent).hasSameSizeAs(uploadedFiles);
Iterator<Path> uploadedFileIterator = actualList.iterator();
for (Map.Entry<String, Path> expectedFilenameAndContent :
expectedFilenamesAndContent.entrySet()) {
String expectedFilename = expectedFilenameAndContent.getKey();
Path expectedContent = expectedFilenameAndContent.getValue();
assertThat(uploadedFileIterator).hasNext();
Path actual = uploadedFileIterator.next();
assertThat(actual.getFileName()).hasToString(expectedFilename);
byte[] originalContent = java.nio.file.Files.readAllBytes(expectedContent);
byte[] receivedContent = java.nio.file.Files.readAllBytes(actual);
assertThat(receivedContent).isEqualTo(originalContent);
}
}
}
private OkHttpClient createOkHttpClientWithNoTimeouts() {
// don't fail if some OkHttpClient operations take longer. See FLINK-17725
return new OkHttpClient.Builder()
.connectTimeout(0, TimeUnit.MILLISECONDS)
.writeTimeout(0, TimeUnit.MILLISECONDS)
.readTimeout(0, TimeUnit.MILLISECONDS)
.build();
}
/**
* DiskAttribute and DiskFileUpload | CustomFilenameVerifier |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.