language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
bumptech__glide
|
library/src/main/java/com/bumptech/glide/load/resource/bytes/BytesResource.java
|
{
"start": 268,
"end": 1250
}
|
class ____ implements Resource<byte[]> {
private final byte[] bytes;
public BytesResource(byte[] bytes) {
this.bytes = Preconditions.checkNotNull(bytes);
}
@NonNull
@Override
public Class<byte[]> getResourceClass() {
return byte[].class;
}
/**
* In most cases it will only be retrieved once (see linked methods).
*
* @return the same array every time, do not mutate the contents. Not a copy returned, because
* copying the array can be prohibitively expensive and/or lead to OOMs.
* @see com.bumptech.glide.load.ResourceEncoder
* @see com.bumptech.glide.load.resource.transcode.ResourceTranscoder
* @see com.bumptech.glide.request.SingleRequest#onResourceReady
*/
@NonNull
@Override
@SuppressWarnings("PMD.MethodReturnsInternalArray")
public byte[] get() {
return bytes;
}
@Override
public int getSize() {
return bytes.length;
}
@Override
public void recycle() {
// Do nothing.
}
}
|
BytesResource
|
java
|
elastic__elasticsearch
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java
|
{
"start": 9567,
"end": 15431
}
|
class ____ implements Processor.Factory {
private final String type; // currently always just "geoip"
private final IpDatabaseProvider ipDatabaseProvider;
public Factory(String type, IpDatabaseProvider ipDatabaseProvider) {
this.type = type;
this.ipDatabaseProvider = ipDatabaseProvider;
}
@Override
public Processor create(
final Map<String, Processor.Factory> registry,
final String processorTag,
final String description,
final Map<String, Object> config,
final ProjectId projectId
) throws IOException {
String ipField = readStringProperty(type, processorTag, config, "field");
String targetField = readStringProperty(type, processorTag, config, "target_field", type);
String databaseFile = readStringProperty(type, processorTag, config, "database_file", "GeoLite2-City.mmdb");
List<String> propertyNames = readOptionalList(type, processorTag, config, "properties");
boolean ignoreMissing = readBooleanProperty(type, processorTag, config, "ignore_missing", false);
boolean firstOnly = readBooleanProperty(type, processorTag, config, "first_only", true);
// validate (and consume) the download_database_on_pipeline_creation property even though the result is not used by the factory
readBooleanProperty(type, processorTag, config, "download_database_on_pipeline_creation", true);
final String databaseType;
try (IpDatabase ipDatabase = ipDatabaseProvider.getDatabase(projectId, databaseFile)) {
if (ipDatabase == null) {
// It's possible that the database could be downloaded via the GeoipDownloader process and could become available
// at a later moment, so a processor impl is returned that tags documents instead. If a database cannot be sourced
// then the processor will continue to tag documents with a warning until it is remediated by providing a database
// or changing the pipeline.
return new DatabaseUnavailableProcessor(type, processorTag, description, databaseFile);
}
databaseType = ipDatabase.getDatabaseType();
}
final IpDataLookupFactory factory;
try {
factory = IpDataLookupFactories.get(databaseType, databaseFile);
} catch (IllegalArgumentException e) {
throw newConfigurationException(type, processorTag, "database_file", e.getMessage());
}
// the "geoip" processor type does additional validation of the database_type
if (GEOIP_TYPE.equals(type)) {
// type sniffing is done with the lowercased type
final String lowerCaseDatabaseType = databaseType.toLowerCase(Locale.ROOT);
// start with a strict positive rejection check -- as we support addition database providers,
// we should expand these checks when possible
if (lowerCaseDatabaseType.startsWith(IpinfoIpDataLookups.IPINFO_PREFIX)) {
throw newConfigurationException(
type,
processorTag,
"database_file",
Strings.format("Unsupported database type [%s] for file [%s]", databaseType, databaseFile)
);
}
// end with a lax negative rejection check -- if we aren't *certain* it's a maxmind database, then we'll warn --
// it's possible for example that somebody cooked up a custom database of their own that happened to work with
// our preexisting code, they should migrate to the new processor, but we're not going to break them right now
if (lowerCaseDatabaseType.startsWith(MaxmindIpDataLookups.GEOIP2_PREFIX) == false
&& lowerCaseDatabaseType.startsWith(MaxmindIpDataLookups.GEOLITE2_PREFIX) == false) {
deprecationLogger.warn(
DeprecationCategory.OTHER,
"unsupported_database_type",
UNSUPPORTED_DATABASE_DEPRECATION_MESSAGE,
databaseType
);
}
}
final IpDataLookup ipDataLookup;
try {
ipDataLookup = factory.create(propertyNames);
} catch (IllegalArgumentException e) {
throw newConfigurationException(type, processorTag, "properties", e.getMessage());
}
return new GeoIpProcessor(
type,
processorTag,
description,
ipField,
new DatabaseVerifyingSupplier(ipDatabaseProvider, databaseFile, databaseType, projectId),
() -> ipDatabaseProvider.isValid(projectId, databaseFile),
targetField,
ipDataLookup,
ignoreMissing,
firstOnly,
databaseFile
);
}
/**
* Get the value of the "download_database_on_pipeline_creation" property from a processor's config map.
* <p>
* As with the actual property definition, the default value of the property is 'true'. Unlike the actual
* property definition, this method doesn't consume (that is, <code>config.remove</code>) the property from
* the config map.
*/
public static boolean downloadDatabaseOnPipelineCreation(Map<String, Object> config) {
return (boolean) config.getOrDefault("download_database_on_pipeline_creation", true);
}
}
static
|
Factory
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/util/config/memory/JobManagerProcessSpecTest.java
|
{
"start": 1121,
"end": 2332
}
|
class ____ {
@Test
void testEquals() {
JobManagerProcessSpec spec1 =
new JobManagerProcessSpec(
MemorySize.parse("1m"),
MemorySize.parse("2m"),
MemorySize.parse("3m"),
MemorySize.parse("4m"));
JobManagerProcessSpec spec2 =
new JobManagerProcessSpec(
MemorySize.parse("1m"),
MemorySize.parse("2m"),
MemorySize.parse("3m"),
MemorySize.parse("4m"));
assertThat(spec1).isEqualTo(spec2);
}
@Test
void testNotEquals() {
JobManagerProcessSpec spec1 =
new JobManagerProcessSpec(
MemorySize.parse("1m"),
MemorySize.parse("2m"),
MemorySize.parse("3m"),
MemorySize.parse("4m"));
JobManagerProcessSpec spec2 =
new JobManagerProcessSpec(
MemorySize.ZERO, MemorySize.ZERO, MemorySize.ZERO, MemorySize.ZERO);
assertThat(spec1).isNotEqualTo(spec2);
}
}
|
JobManagerProcessSpecTest
|
java
|
elastic__elasticsearch
|
libs/entitlement/qa/entitlement-test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/DummyImplementations.java
|
{
"start": 16323,
"end": 16607
}
|
class ____ extends CharsetProvider {
@Override
public Iterator<Charset> charsets() {
return null;
}
@Override
public Charset charsetForName(String charsetName) {
return null;
}
}
static
|
DummyCharsetProvider
|
java
|
quarkusio__quarkus
|
extensions/reactive-mssql-client/deployment/src/test/java/io/quarkus/reactive/mssql/client/ConfigActiveFalseDefaultDatasourceDynamicInjectionTest.java
|
{
"start": 574,
"end": 2717
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.overrideConfigKey("quarkus.datasource.active", "false");
@Inject
InjectableInstance<Pool> pool;
@Inject
InjectableInstance<io.vertx.mutiny.sqlclient.Pool> mutinyPool;
@Inject
InjectableInstance<MSSQLPool> vendorPool;
@Inject
InjectableInstance<io.vertx.mutiny.mssqlclient.MSSQLPool> mutinyVendorPool;
@Test
public void pool() {
doTest(pool, Pool::getConnection);
}
@Test
public void mutinyPool() {
doTest(mutinyPool, io.vertx.mutiny.sqlclient.Pool::getConnection);
}
@Test
public void vendorPool() {
doTest(vendorPool, Pool::getConnection);
}
@Test
public void mutinyVendorPool() {
doTest(mutinyVendorPool, io.vertx.mutiny.sqlclient.Pool::getConnection);
}
private <T> void doTest(InjectableInstance<T> instance, Consumer<T> action) {
// The bean is always available to be injected during static init
// since we don't know whether the datasource will be active at runtime.
// So the bean proxy cannot be null.
assertThat(instance.getHandle().getBean())
.isNotNull()
.returns(false, InjectableBean::isActive);
var pool = instance.get();
assertThat(pool).isNotNull();
// However, any attempt to use it at runtime will fail.
assertThatThrownBy(() -> action.accept(pool))
.isInstanceOf(InactiveBeanException.class)
.hasMessageContainingAll("Datasource '<default>' was deactivated through configuration properties.",
"To avoid this exception while keeping the bean inactive", // Message from Arc with generic hints
"To activate the datasource, set configuration property 'quarkus.datasource.active'"
+ " to 'true' and configure datasource '<default>'",
"Refer to https://quarkus.io/guides/datasource for guidance.");
}
}
|
ConfigActiveFalseDefaultDatasourceDynamicInjectionTest
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/index/mapper/IPSyntheticSourceNativeArrayIntegrationTests.java
|
{
"start": 766,
"end": 4377
}
|
class ____ extends NativeArrayIntegrationTestCase {
@Override
protected String getFieldTypeName() {
return "ip";
}
@Override
protected String getRandomValue() {
return NetworkAddress.format(randomIp(true));
}
@Override
protected String getMalformedValue() {
return RandomStrings.randomAsciiOfLength(random(), 8);
}
public void testSynthesizeArray() throws Exception {
var arrayValues = new Object[][] {
new Object[] { "192.168.1.4", "192.168.1.3", null, "192.168.1.2", null, "192.168.1.1" },
new Object[] { null, "192.168.1.2", null, "192.168.1.1" },
new Object[] { null },
new Object[] { null, null, null },
new Object[] { "192.168.1.3", "192.168.1.2", "192.168.1.1" } };
verifySyntheticArray(arrayValues);
}
public void testSynthesizeArrayIgnoreMalformed() throws Exception {
var mapping = jsonBuilder().startObject()
.startObject("properties")
.startObject("field")
.field("type", "ip")
.field("ignore_malformed", true)
.endObject()
.endObject()
.endObject();
// Note values that would be ignored are added at the end of arrays,
// this makes testing easier as ignored values are always synthesized after regular values:
var arrayValues = new Object[][] {
new Object[] { null, "192.168.1.1", "192.168.1.2", "192.168.1.3", "192.168.1.4", null, "malformed" },
new Object[] { "192.168.1.1", "192.168.1.2", "malformed" },
new Object[] { "192.168.1.1", "192.168.1.1", "malformed" },
new Object[] { null, null, null, "malformed" },
new Object[] { "192.168.1.3", "192.168.1.3", "192.168.1.1", "malformed" } };
verifySyntheticArray(arrayValues, mapping, "_id", "field._ignore_malformed");
}
public void testSynthesizeObjectArray() throws Exception {
List<List<Object[]>> documents = new ArrayList<>();
{
List<Object[]> document = new ArrayList<>();
document.add(new Object[] { "192.168.1.3", "192.168.1.2", "192.168.1.1" });
document.add(new Object[] { "192.168.1.110", "192.168.1.109", "192.168.1.111" });
document.add(new Object[] { "192.168.1.2", "192.168.1.2", "192.168.1.1" });
documents.add(document);
}
{
List<Object[]> document = new ArrayList<>();
document.add(new Object[] { "192.168.1.9", "192.168.1.7", "192.168.1.5" });
document.add(new Object[] { "192.168.1.2", "192.168.1.4", "192.168.1.6" });
document.add(new Object[] { "192.168.1.7", "192.168.1.6", "192.168.1.5" });
documents.add(document);
}
verifySyntheticObjectArray(documents);
}
public void testSynthesizeArrayInObjectField() throws Exception {
List<Object[]> documents = new ArrayList<>();
documents.add(new Object[] { "192.168.1.254", "192.168.1.253", "192.168.1.252" });
documents.add(new Object[] { "192.168.1.112", "192.168.1.113", "192.168.1.114" });
documents.add(new Object[] { "192.168.1.3", "192.168.1.2", "192.168.1.1" });
documents.add(new Object[] { "192.168.1.9", "192.168.1.7", "192.168.1.5" });
documents.add(new Object[] { "192.168.1.2", "192.168.1.4", "192.168.1.6" });
documents.add(new Object[] { "192.168.1.7", "192.168.1.6", "192.168.1.5" });
verifySyntheticArrayInObject(documents);
}
}
|
IPSyntheticSourceNativeArrayIntegrationTests
|
java
|
spring-projects__spring-boot
|
module/spring-boot-actuator/src/test/java/org/springframework/boot/actuate/endpoint/web/annotation/ControllerEndpointDiscovererTests.java
|
{
"start": 7765,
"end": 7866
}
|
class ____ {
}
@RestControllerEndpoint(id = "testrestcontroller")
static
|
TestProxyControllerEndpoint
|
java
|
apache__commons-lang
|
src/test/java/org/apache/commons/lang3/FunctionsTest.java
|
{
"start": 34893,
"end": 35343
}
|
interface ____ properly defined to throw any exception using String and IOExceptions as
* generic test types.
*/
@Test
void testThrows_FailableCallable_String_IOException() {
new Functions.FailableCallable<String, IOException>() {
@Override
public String call() throws IOException {
throw new IOException("test");
}
};
}
/**
* Tests that our failable
|
is
|
java
|
dropwizard__dropwizard
|
dropwizard-core/src/main/java/io/dropwizard/core/server/ServerFactory.java
|
{
"start": 449,
"end": 950
}
|
interface ____ extends Discoverable {
/**
* Build a server for the given Dropwizard application.
*
* @param environment the application's environment
* @return a {@link Server} running the Dropwizard application
*/
Server build(Environment environment);
/**
* Configures the given environment with settings defined in the factory.
*
* @param environment the application's environment
*/
void configure(Environment environment);
}
|
ServerFactory
|
java
|
apache__flink
|
flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/writer/S3RecoverableSerializerTest.java
|
{
"start": 1132,
"end": 5265
}
|
class ____ {
private final S3RecoverableSerializer serializer = S3RecoverableSerializer.INSTANCE;
private static final String TEST_OBJECT_NAME = "TEST-OBJECT";
private static final String TEST_UPLOAD_ID = "TEST-UPLOAD-ID";
private static final String INCOMPLETE_OBJECT_NAME = "TEST-INCOMPLETE-PART";
private static final String ETAG_PREFIX = "TEST-ETAG-";
@Test
void serializeEmptyS3Recoverable() throws IOException {
S3Recoverable originalEmptyRecoverable = createTestS3Recoverable(false);
byte[] serializedRecoverable = serializer.serialize(originalEmptyRecoverable);
S3Recoverable copiedEmptyRecoverable = serializer.deserialize(1, serializedRecoverable);
assertThatIsEqualTo(originalEmptyRecoverable, copiedEmptyRecoverable);
}
@Test
void serializeS3RecoverableWithoutIncompleteObject() throws IOException {
S3Recoverable originalNoIncompletePartRecoverable = createTestS3Recoverable(false, 1, 5, 9);
byte[] serializedRecoverable = serializer.serialize(originalNoIncompletePartRecoverable);
S3Recoverable copiedNoIncompletePartRecoverable =
serializer.deserialize(1, serializedRecoverable);
assertThatIsEqualTo(originalNoIncompletePartRecoverable, copiedNoIncompletePartRecoverable);
}
@Test
void serializeS3RecoverableOnlyWithIncompleteObject() throws IOException {
S3Recoverable originalOnlyIncompletePartRecoverable = createTestS3Recoverable(true);
byte[] serializedRecoverable = serializer.serialize(originalOnlyIncompletePartRecoverable);
S3Recoverable copiedOnlyIncompletePartRecoverable =
serializer.deserialize(1, serializedRecoverable);
assertThatIsEqualTo(
originalOnlyIncompletePartRecoverable, copiedOnlyIncompletePartRecoverable);
}
@Test
void serializeS3RecoverableWithCompleteAndIncompleteParts() throws IOException {
S3Recoverable originalFullRecoverable = createTestS3Recoverable(true, 1, 5, 9);
byte[] serializedRecoverable = serializer.serialize(originalFullRecoverable);
S3Recoverable copiedFullRecoverable = serializer.deserialize(1, serializedRecoverable);
assertThatIsEqualTo(originalFullRecoverable, copiedFullRecoverable);
}
private static void assertThatIsEqualTo(
S3Recoverable actualRecoverable, S3Recoverable expectedRecoverable) {
assertThat(actualRecoverable.getObjectName())
.isEqualTo(expectedRecoverable.getObjectName());
assertThat(actualRecoverable.uploadId()).isEqualTo(expectedRecoverable.uploadId());
assertThat(actualRecoverable.numBytesInParts())
.isEqualTo(expectedRecoverable.numBytesInParts());
assertThat(actualRecoverable.incompleteObjectName())
.isEqualTo(expectedRecoverable.incompleteObjectName());
assertThat(actualRecoverable.incompleteObjectLength())
.isEqualTo(expectedRecoverable.incompleteObjectLength());
assertThat(actualRecoverable.parts().stream().map(PartETag::getETag).toArray())
.isEqualTo(expectedRecoverable.parts().stream().map(PartETag::getETag).toArray());
}
// --------------------------------- Test Utils ---------------------------------
private static S3Recoverable createTestS3Recoverable(
boolean withIncompletePart, int... partNumbers) {
List<PartETag> etags = new ArrayList<>();
for (int i : partNumbers) {
etags.add(createEtag(i));
}
if (withIncompletePart) {
return new S3Recoverable(
TEST_OBJECT_NAME,
TEST_UPLOAD_ID,
etags,
12345L,
INCOMPLETE_OBJECT_NAME,
54321L);
} else {
return new S3Recoverable(TEST_OBJECT_NAME, TEST_UPLOAD_ID, etags, 12345L);
}
}
private static PartETag createEtag(int partNumber) {
return new PartETag(partNumber, ETAG_PREFIX + partNumber);
}
}
|
S3RecoverableSerializerTest
|
java
|
apache__camel
|
components/camel-openstack/src/test/java/org/apache/camel/component/openstack/keystone/RegionProducerTest.java
|
{
"start": 1960,
"end": 5627
}
|
class ____ extends KeystoneProducerTestSupport {
private Region dummyRegion;
@Mock
private Region testOSregion;
@Mock
private RegionService regionService;
@Captor
private ArgumentCaptor<Region> regionCaptor;
@Captor
private ArgumentCaptor<String> regionIdCaptor;
@BeforeEach
public void setUp() {
when(identityService.regions()).thenReturn(regionService);
producer = new RegionProducer(endpoint, client);
when(regionService.create(any())).thenReturn(testOSregion);
when(regionService.get(anyString())).thenReturn(testOSregion);
List<Region> getAllList = new ArrayList<>();
getAllList.add(testOSregion);
getAllList.add(testOSregion);
doReturn(getAllList).when(regionService).list();
dummyRegion = createRegion();
when(testOSregion.getDescription()).thenReturn(dummyRegion.getDescription());
}
@Test
public void createTest() throws Exception {
msg.setHeader(OpenstackConstants.OPERATION, OpenstackConstants.CREATE);
msg.setHeader(KeystoneConstants.DESCRIPTION, dummyRegion.getDescription());
producer.process(exchange);
verify(regionService).create(regionCaptor.capture());
assertEqualsRegion(dummyRegion, regionCaptor.getValue());
}
@Test
public void getTest() throws Exception {
final String id = "id";
msg.setHeader(OpenstackConstants.OPERATION, OpenstackConstants.GET);
msg.setHeader(OpenstackConstants.ID, id);
producer.process(exchange);
verify(regionService).get(regionIdCaptor.capture());
assertEquals(id, regionIdCaptor.getValue());
assertEqualsRegion(testOSregion, msg.getBody(Region.class));
}
@Test
public void getAllTest() throws Exception {
msg.setHeader(OpenstackConstants.OPERATION, OpenstackConstants.GET_ALL);
producer.process(exchange);
final List<Network> result = msg.getBody(List.class);
assertEquals(2, result.size());
assertEquals(testOSregion, result.get(0));
}
@Test
public void updateTest() throws Exception {
final String id = "myID";
msg.setHeader(OpenstackConstants.OPERATION, OpenstackConstants.UPDATE);
when(testOSregion.getId()).thenReturn(id);
final String newDescription = "ndesc";
when(testOSregion.getDescription()).thenReturn(newDescription);
when(regionService.update(any())).thenReturn(testOSregion);
msg.setBody(testOSregion);
producer.process(exchange);
verify(regionService).update(regionCaptor.capture());
assertEqualsRegion(testOSregion, regionCaptor.getValue());
assertNotNull(regionCaptor.getValue().getId());
assertEquals(newDescription, msg.getBody(Region.class).getDescription());
}
@Test
public void deleteTest() throws Exception {
when(regionService.delete(anyString())).thenReturn(ActionResponse.actionSuccess());
final String networkID = "myID";
msg.setHeader(OpenstackConstants.OPERATION, OpenstackConstants.DELETE);
msg.setHeader(OpenstackConstants.ID, networkID);
producer.process(exchange);
verify(regionService).delete(regionIdCaptor.capture());
assertEquals(networkID, regionIdCaptor.getValue());
}
private void assertEqualsRegion(Region old, Region newRegion) {
assertEquals(old.getDescription(), newRegion.getDescription());
}
private Region createRegion() {
return Builders.region()
.description("desc")
.build();
}
}
|
RegionProducerTest
|
java
|
spring-projects__spring-framework
|
spring-expression/src/test/java/org/springframework/expression/spel/ast/OpPlusTests.java
|
{
"start": 1573,
"end": 8446
}
|
class ____ {
private final ExpressionState expressionState = new ExpressionState(new StandardEvaluationContext());
@Test
void emptyOperands() {
assertThatIllegalArgumentException().isThrownBy(() -> new OpPlus(-1, -1));
}
@Test
void unaryPlusWithStringLiteral() {
StringLiteral stringLiteral = new StringLiteral("word", -1, -1, "word");
OpPlus operator = new OpPlus(-1, -1, stringLiteral);
assertThatExceptionOfType(SpelEvaluationException.class)
.isThrownBy(() -> operator.getValueInternal(expressionState));
}
@Test
void unaryPlusWithIntegerOperand() {
IntLiteral intLiteral = new IntLiteral("123", -1, -1, 123);
OpPlus operator = new OpPlus(-1, -1, intLiteral);
TypedValue value = operator.getValueInternal(expressionState);
assertThat(value.getTypeDescriptor().getObjectType()).isEqualTo(Integer.class);
assertThat(value.getTypeDescriptor().getType()).isEqualTo(Integer.class);
assertThat(value.getValue()).isEqualTo(intLiteral.getLiteralValue().getValue());
}
@Test
void unaryPlusWithLongOperand() {
LongLiteral longLiteral = new LongLiteral("123", -1, -1, 123L);
OpPlus operator = new OpPlus(-1, -1, longLiteral);
TypedValue value = operator.getValueInternal(expressionState);
assertThat(value.getTypeDescriptor().getObjectType()).isEqualTo(Long.class);
assertThat(value.getTypeDescriptor().getType()).isEqualTo(Long.class);
assertThat(value.getValue()).isEqualTo(longLiteral.getLiteralValue().getValue());
}
@Test
void unaryPlusWithRealOperand() {
RealLiteral realLiteral = new RealLiteral("123.00", -1, -1, 123.0);
OpPlus operator = new OpPlus(-1, -1, realLiteral);
TypedValue value = operator.getValueInternal(expressionState);
assertThat(value.getTypeDescriptor().getObjectType()).isEqualTo(Double.class);
assertThat(value.getTypeDescriptor().getType()).isEqualTo(Double.class);
assertThat(value.getValue()).isEqualTo(realLiteral.getLiteralValue().getValue());
}
@Test
void binaryPlusWithIntegerOperands() {
IntLiteral n1 = new IntLiteral("123", -1, -1, 123);
IntLiteral n2 = new IntLiteral("456", -1, -1, 456);
OpPlus operator = new OpPlus(-1, -1, n1, n2);
TypedValue value = operator.getValueInternal(expressionState);
assertThat(value.getTypeDescriptor().getObjectType()).isEqualTo(Integer.class);
assertThat(value.getTypeDescriptor().getType()).isEqualTo(Integer.class);
assertThat(value.getValue()).isEqualTo(123 + 456);
}
@Test
void binaryPlusWithLongOperands() {
LongLiteral n1 = new LongLiteral("123", -1, -1, 123L);
LongLiteral n2 = new LongLiteral("456", -1, -1, 456L);
OpPlus operator = new OpPlus(-1, -1, n1, n2);
TypedValue value = operator.getValueInternal(expressionState);
assertThat(value.getTypeDescriptor().getObjectType()).isEqualTo(Long.class);
assertThat(value.getTypeDescriptor().getType()).isEqualTo(Long.class);
assertThat(value.getValue()).isEqualTo(123L + 456L);
}
@Test
void binaryPlusWithRealOperands() {
RealLiteral n1 = new RealLiteral("123.00", -1, -1, 123.0);
RealLiteral n2 = new RealLiteral("456.00", -1, -1, 456.0);
OpPlus operator = new OpPlus(-1, -1, n1, n2);
TypedValue value = operator.getValueInternal(expressionState);
assertThat(value.getTypeDescriptor().getObjectType()).isEqualTo(Double.class);
assertThat(value.getTypeDescriptor().getType()).isEqualTo(Double.class);
assertThat(value.getValue()).isEqualTo(123.0 + 456.0);
}
@Test
void binaryPlusWithStringOperands() {
StringLiteral str1 = new StringLiteral("\"foo\"", -1, -1, "\"foo\"");
StringLiteral str2 = new StringLiteral("\"bar\"", -1, -1, "\"bar\"");
OpPlus operator = new OpPlus(-1, -1, str1, str2);
TypedValue value = operator.getValueInternal(expressionState);
assertThat(value.getTypeDescriptor().getObjectType()).isEqualTo(String.class);
assertThat(value.getTypeDescriptor().getType()).isEqualTo(String.class);
assertThat(value.getValue()).isEqualTo("foobar");
}
@Test
void binaryPlusWithLeftStringOperand() {
StringLiteral stringLiteral = new StringLiteral("\"number is \"", -1, -1, "\"number is \"");
LongLiteral longLiteral = new LongLiteral("123", -1, -1, 123);
OpPlus operator = new OpPlus(-1, -1, stringLiteral, longLiteral);
TypedValue value = operator.getValueInternal(expressionState);
assertThat(value.getTypeDescriptor().getObjectType()).isEqualTo(String.class);
assertThat(value.getTypeDescriptor().getType()).isEqualTo(String.class);
assertThat(value.getValue()).isEqualTo("number is 123");
}
@Test
void binaryPlusWithRightStringOperand() {
LongLiteral longLiteral = new LongLiteral("123", -1, -1, 123);
StringLiteral stringLiteral = new StringLiteral("\" is a number\"", -1, -1, "\" is a number\"");
OpPlus operator = new OpPlus(-1, -1, longLiteral, stringLiteral);
TypedValue value = operator.getValueInternal(expressionState);
assertThat(value.getTypeDescriptor().getObjectType()).isEqualTo(String.class);
assertThat(value.getTypeDescriptor().getType()).isEqualTo(String.class);
assertThat(value.getValue()).isEqualTo("123 is a number");
}
@Test
void binaryPlusWithSqlTimeToString() {
Time time = new Time(new Date().getTime());
VariableReference var = new VariableReference("timeVar", -1, -1);
var.setValue(expressionState, time);
StringLiteral stringLiteral = new StringLiteral("\" is now\"", -1, -1, "\" is now\"");
OpPlus operator = new OpPlus(-1, -1, var, stringLiteral);
TypedValue value = operator.getValueInternal(expressionState);
assertThat(value.getTypeDescriptor().getObjectType()).isEqualTo(String.class);
assertThat(value.getTypeDescriptor().getType()).isEqualTo(String.class);
assertThat(value.getValue()).isEqualTo(time + " is now");
}
@Test
void binaryPlusWithTimeConverted() {
SimpleDateFormat format = new SimpleDateFormat("hh :--: mm :--: ss", Locale.ENGLISH);
GenericConversionService conversionService = new GenericConversionService();
conversionService.addConverter(Time.class, String.class, format::format);
StandardEvaluationContext evaluationContextConverter = new StandardEvaluationContext();
evaluationContextConverter.setTypeConverter(new StandardTypeConverter(conversionService));
ExpressionState expressionState = new ExpressionState(evaluationContextConverter);
Time time = new Time(new Date().getTime());
VariableReference var = new VariableReference("timeVar", -1, -1);
var.setValue(expressionState, time);
StringLiteral stringLiteral = new StringLiteral("\" is now\"", -1, -1, "\" is now\"");
OpPlus operator = new OpPlus(-1, -1, var, stringLiteral);
TypedValue value = operator.getValueInternal(expressionState);
assertThat(value.getTypeDescriptor().getObjectType()).isEqualTo(String.class);
assertThat(value.getTypeDescriptor().getType()).isEqualTo(String.class);
assertThat(value.getValue()).isEqualTo(format.format(time) + " is now");
}
}
|
OpPlusTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java
|
{
"start": 1582,
"end": 7803
}
|
enum
____ static final ParseField MIN = new ParseField("min");
public static final ParseField MAX = new ParseField("max");
public static final ParseField SUM = new ParseField("sum");
public static final ParseField AVG = new ParseField("avg");
public static final ParseField VALUE_COUNT = new ParseField("value_count");
public static final String NAME = "metrics";
private static final String FIELD = "field";
private static final String METRICS = "metrics";
private static final ConstructingObjectParser<MetricConfig, Void> PARSER;
static {
PARSER = new ConstructingObjectParser<>(NAME, args -> {
@SuppressWarnings("unchecked")
List<String> metrics = (List<String>) args[1];
return new MetricConfig((String) args[0], metrics);
});
PARSER.declareString(constructorArg(), new ParseField(FIELD));
PARSER.declareStringArray(constructorArg(), new ParseField(METRICS));
}
private final String field;
private final List<String> metrics;
public MetricConfig(final String field, final List<String> metrics) {
if (field == null || field.isEmpty()) {
throw new IllegalArgumentException("Field must be a non-null, non-empty string");
}
if (metrics == null || metrics.isEmpty()) {
throw new IllegalArgumentException("Metrics must be a non-null, non-empty array of strings");
}
metrics.forEach(m -> {
if (RollupField.SUPPORTED_METRICS.contains(m) == false) {
throw new IllegalArgumentException(
"Unsupported metric [" + m + "]. " + "Supported metrics include: " + RollupField.SUPPORTED_METRICS
);
}
});
this.field = field;
this.metrics = metrics;
}
public MetricConfig(final StreamInput in) throws IOException {
field = in.readString();
metrics = in.readStringCollectionAsList();
}
/**
* @return the name of the field used in the metric configuration. Never {@code null}.
*/
public String getField() {
return field;
}
/**
* @return the names of the metrics used in the metric configuration. Never {@code null}.
*/
public List<String> getMetrics() {
return metrics;
}
public void validateMappings(
Map<String, Map<String, FieldCapabilities>> fieldCapsResponse,
ActionRequestValidationException validationException
) {
Map<String, FieldCapabilities> fieldCaps = fieldCapsResponse.get(field);
if (fieldCaps != null && fieldCaps.isEmpty() == false) {
fieldCaps.forEach((key, value) -> {
if (value.isAggregatable() == false) {
validationException.addValidationError(
"The field [" + field + "] must be aggregatable across all indices, " + "but is not."
);
}
if (RollupField.NUMERIC_FIELD_MAPPER_TYPES.contains(key)) {
// nothing to do as all metrics are supported by SUPPORTED_NUMERIC_METRICS currently
} else if (RollupField.DATE_FIELD_MAPPER_TYPES.contains(key)) {
if (RollupField.SUPPORTED_DATE_METRICS.containsAll(metrics) == false) {
validationException.addValidationError(buildSupportedMetricError(key, RollupField.SUPPORTED_DATE_METRICS));
}
} else {
validationException.addValidationError(
"The field referenced by a metric group must be a [numeric] or ["
+ Strings.collectionToCommaDelimitedString(RollupField.DATE_FIELD_MAPPER_TYPES)
+ "] type, "
+ "but found "
+ fieldCaps.keySet().toString()
+ " for field ["
+ field
+ "]"
);
}
});
} else {
validationException.addValidationError(
"Could not find a [numeric] or ["
+ Strings.collectionToCommaDelimitedString(RollupField.DATE_FIELD_MAPPER_TYPES)
+ "] field with name ["
+ field
+ "] in any of the "
+ "indices matching the index pattern."
);
}
}
@Override
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
builder.startObject();
{
builder.field(FIELD, field);
builder.stringListField(METRICS, metrics);
}
return builder.endObject();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(field);
out.writeStringCollection(metrics);
}
@Override
public boolean equals(final Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
final MetricConfig that = (MetricConfig) other;
return Objects.equals(field, that.field) && Objects.equals(metrics, that.metrics);
}
@Override
public int hashCode() {
return Objects.hash(field, metrics);
}
@Override
public String toString() {
return Strings.toString(this, true, true);
}
public static MetricConfig fromXContent(final XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
private String buildSupportedMetricError(String type, List<String> supportedMetrics) {
List<String> unsupportedMetrics = new ArrayList<>(metrics);
unsupportedMetrics.removeAll(supportedMetrics);
return "Only the metrics "
+ supportedMetrics
+ " are supported for ["
+ type
+ "] types,"
+ " but unsupported metrics "
+ unsupportedMetrics
+ " supplied for field ["
+ field
+ "]";
}
}
|
public
|
java
|
spring-projects__spring-framework
|
spring-tx/src/test/java/org/springframework/transaction/TxNamespaceHandlerEventTests.java
|
{
"start": 1298,
"end": 1989
}
|
class ____ {
private final DefaultListableBeanFactory beanFactory = new DefaultListableBeanFactory();
private final CollectingReaderEventListener eventListener = new CollectingReaderEventListener();
@BeforeEach
void setUp() {
XmlBeanDefinitionReader reader = new XmlBeanDefinitionReader(this.beanFactory);
reader.setEventListener(this.eventListener);
reader.loadBeanDefinitions(new ClassPathResource("txNamespaceHandlerTests.xml", getClass()));
}
@Test
void componentEventReceived() {
ComponentDefinition component = this.eventListener.getComponentDefinition("txAdvice");
assertThat(component).isInstanceOf(BeanComponentDefinition.class);
}
}
|
TxNamespaceHandlerEventTests
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configurers/oauth2/server/authorization/OidcTests.java
|
{
"start": 7393,
"end": 30108
}
|
class ____ {
private static final String DEFAULT_AUTHORIZATION_ENDPOINT_URI = "/oauth2/authorize";
private static final String DEFAULT_TOKEN_ENDPOINT_URI = "/oauth2/token";
private static final String DEFAULT_OIDC_LOGOUT_ENDPOINT_URI = "/connect/logout";
// See RFC 7636: Appendix B. Example for the S256 code_challenge_method
// https://tools.ietf.org/html/rfc7636#appendix-B
private static final String S256_CODE_VERIFIER = "dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk";
private static final String S256_CODE_CHALLENGE = "E9Melhoa2OwvFrEMTJguCHaoeK1t8URWbuGJSstw-cM";
private static final String AUTHORITIES_CLAIM = "authorities";
private static final OAuth2TokenType AUTHORIZATION_CODE_TOKEN_TYPE = new OAuth2TokenType(OAuth2ParameterNames.CODE);
private static EmbeddedDatabase db;
private static JWKSource<SecurityContext> jwkSource;
private static HttpMessageConverter<OAuth2AccessTokenResponse> accessTokenHttpResponseConverter = new OAuth2AccessTokenResponseHttpMessageConverter();
private static SessionRegistry sessionRegistry;
public final SpringTestContext spring = new SpringTestContext(this);
@Autowired
private MockMvc mvc;
@Autowired
private JdbcOperations jdbcOperations;
@Autowired
private RegisteredClientRepository registeredClientRepository;
@Autowired
private OAuth2AuthorizationService authorizationService;
@Autowired
private JwtDecoder jwtDecoder;
@Autowired(required = false)
private OAuth2TokenGenerator<?> tokenGenerator;
@BeforeAll
public static void init() {
JWKSet jwkSet = new JWKSet(TestJwks.DEFAULT_RSA_JWK);
jwkSource = (jwkSelector, securityContext) -> jwkSelector.select(jwkSet);
db = new EmbeddedDatabaseBuilder().generateUniqueName(true)
.setType(EmbeddedDatabaseType.HSQL)
.setScriptEncoding("UTF-8")
.addScript("org/springframework/security/oauth2/server/authorization/oauth2-authorization-schema.sql")
.addScript(
"org/springframework/security/oauth2/server/authorization/client/oauth2-registered-client-schema.sql")
.build();
sessionRegistry = spy(new SessionRegistryImpl());
}
@AfterEach
public void tearDown() {
if (this.jdbcOperations != null) {
this.jdbcOperations.update("truncate table oauth2_authorization");
this.jdbcOperations.update("truncate table oauth2_registered_client");
}
}
@AfterAll
public static void destroy() {
db.shutdown();
}
@Test
public void requestWhenAuthenticationRequestThenTokenResponseIncludesIdToken() throws Exception {
this.spring.register(AuthorizationServerConfiguration.class).autowire();
RegisteredClient registeredClient = TestRegisteredClients.registeredClient().scope(OidcScopes.OPENID).build();
this.registeredClientRepository.save(registeredClient);
MultiValueMap<String, String> authorizationRequestParameters = getAuthorizationRequestParameters(
registeredClient);
MvcResult mvcResult = this.mvc
.perform(get(DEFAULT_AUTHORIZATION_ENDPOINT_URI).queryParams(authorizationRequestParameters)
.with(user("user").roles("A", "B")))
.andExpect(status().is3xxRedirection())
.andReturn();
String redirectedUrl = mvcResult.getResponse().getRedirectedUrl();
String expectedRedirectUri = authorizationRequestParameters.getFirst(OAuth2ParameterNames.REDIRECT_URI);
assertThat(redirectedUrl).matches(expectedRedirectUri + "\\?code=.{15,}&state=state");
String authorizationCode = extractParameterFromRedirectUri(redirectedUrl, "code");
OAuth2Authorization authorization = this.authorizationService.findByToken(authorizationCode,
AUTHORIZATION_CODE_TOKEN_TYPE);
mvcResult = this.mvc
.perform(post(DEFAULT_TOKEN_ENDPOINT_URI).params(getTokenRequestParameters(registeredClient, authorization))
.header(HttpHeaders.AUTHORIZATION,
"Basic " + encodeBasicAuth(registeredClient.getClientId(), registeredClient.getClientSecret())))
.andExpect(status().isOk())
.andExpect(header().string(HttpHeaders.CACHE_CONTROL, containsString("no-store")))
.andExpect(header().string(HttpHeaders.PRAGMA, containsString("no-cache")))
.andExpect(jsonPath("$.access_token").isNotEmpty())
.andExpect(jsonPath("$.token_type").isNotEmpty())
.andExpect(jsonPath("$.expires_in").isNotEmpty())
.andExpect(jsonPath("$.refresh_token").isNotEmpty())
.andExpect(jsonPath("$.scope").isNotEmpty())
.andExpect(jsonPath("$.id_token").isNotEmpty())
.andReturn();
MockHttpServletResponse servletResponse = mvcResult.getResponse();
MockClientHttpResponse httpResponse = new MockClientHttpResponse(servletResponse.getContentAsByteArray(),
HttpStatus.valueOf(servletResponse.getStatus()));
OAuth2AccessTokenResponse accessTokenResponse = accessTokenHttpResponseConverter
.read(OAuth2AccessTokenResponse.class, httpResponse);
Jwt idToken = this.jwtDecoder
.decode((String) accessTokenResponse.getAdditionalParameters().get(OidcParameterNames.ID_TOKEN));
// Assert user authorities was propagated as claim in ID Token
List<String> authoritiesClaim = idToken.getClaim(AUTHORITIES_CLAIM);
Authentication principal = authorization.getAttribute(Principal.class.getName());
Set<String> userAuthorities = new HashSet<>();
for (GrantedAuthority authority : principal.getAuthorities()) {
userAuthorities.add(authority.getAuthority());
}
assertThat(authoritiesClaim).containsExactlyInAnyOrderElementsOf(userAuthorities);
// Assert sid claim was added in ID Token
assertThat(idToken.<String>getClaim("sid")).isNotNull();
}
// gh-1224
@Test
public void requestWhenRefreshTokenRequestThenIdTokenContainsSidClaim() throws Exception {
this.spring.register(AuthorizationServerConfiguration.class).autowire();
RegisteredClient registeredClient = TestRegisteredClients.registeredClient().scope(OidcScopes.OPENID).build();
this.registeredClientRepository.save(registeredClient);
MultiValueMap<String, String> authorizationRequestParameters = getAuthorizationRequestParameters(
registeredClient);
MvcResult mvcResult = this.mvc
.perform(get(DEFAULT_AUTHORIZATION_ENDPOINT_URI).queryParams(authorizationRequestParameters)
.with(user("user").roles("A", "B")))
.andExpect(status().is3xxRedirection())
.andReturn();
String redirectedUrl = mvcResult.getResponse().getRedirectedUrl();
String expectedRedirectUri = authorizationRequestParameters.getFirst(OAuth2ParameterNames.REDIRECT_URI);
assertThat(redirectedUrl).matches(expectedRedirectUri + "\\?code=.{15,}&state=state");
String authorizationCode = extractParameterFromRedirectUri(redirectedUrl, "code");
OAuth2Authorization authorization = this.authorizationService.findByToken(authorizationCode,
AUTHORIZATION_CODE_TOKEN_TYPE);
mvcResult = this.mvc
.perform(post(DEFAULT_TOKEN_ENDPOINT_URI).params(getTokenRequestParameters(registeredClient, authorization))
.header(HttpHeaders.AUTHORIZATION,
"Basic " + encodeBasicAuth(registeredClient.getClientId(), registeredClient.getClientSecret())))
.andExpect(status().isOk())
.andReturn();
MockHttpServletResponse servletResponse = mvcResult.getResponse();
MockClientHttpResponse httpResponse = new MockClientHttpResponse(servletResponse.getContentAsByteArray(),
HttpStatus.valueOf(servletResponse.getStatus()));
OAuth2AccessTokenResponse accessTokenResponse = accessTokenHttpResponseConverter
.read(OAuth2AccessTokenResponse.class, httpResponse);
Jwt idToken = this.jwtDecoder
.decode((String) accessTokenResponse.getAdditionalParameters().get(OidcParameterNames.ID_TOKEN));
String sidClaim = idToken.getClaim("sid");
assertThat(sidClaim).isNotNull();
// Refresh access token
mvcResult = this.mvc
.perform(post(DEFAULT_TOKEN_ENDPOINT_URI)
.param(OAuth2ParameterNames.GRANT_TYPE, AuthorizationGrantType.REFRESH_TOKEN.getValue())
.param(OAuth2ParameterNames.REFRESH_TOKEN, accessTokenResponse.getRefreshToken().getTokenValue())
.header(HttpHeaders.AUTHORIZATION,
"Basic " + encodeBasicAuth(registeredClient.getClientId(), registeredClient.getClientSecret())))
.andExpect(status().isOk())
.andReturn();
servletResponse = mvcResult.getResponse();
httpResponse = new MockClientHttpResponse(servletResponse.getContentAsByteArray(),
HttpStatus.valueOf(servletResponse.getStatus()));
accessTokenResponse = accessTokenHttpResponseConverter.read(OAuth2AccessTokenResponse.class, httpResponse);
idToken = this.jwtDecoder
.decode((String) accessTokenResponse.getAdditionalParameters().get(OidcParameterNames.ID_TOKEN));
assertThat(idToken.<String>getClaim("sid")).isEqualTo(sidClaim);
}
@Test
public void requestWhenLogoutRequestThenLogout() throws Exception {
this.spring.register(AuthorizationServerConfiguration.class).autowire();
RegisteredClient registeredClient = TestRegisteredClients.registeredClient().scope(OidcScopes.OPENID).build();
this.registeredClientRepository.save(registeredClient);
String issuer = "https://example.com:8443/issuer1";
// Login
MultiValueMap<String, String> authorizationRequestParameters = getAuthorizationRequestParameters(
registeredClient);
MvcResult mvcResult = this.mvc
.perform(get(issuer.concat(DEFAULT_AUTHORIZATION_ENDPOINT_URI)).queryParams(authorizationRequestParameters)
.with(user("user")))
.andExpect(status().is3xxRedirection())
.andReturn();
MockHttpSession session = (MockHttpSession) mvcResult.getRequest().getSession();
assertThat(session.isNew()).isTrue();
String redirectedUrl = mvcResult.getResponse().getRedirectedUrl();
String authorizationCode = extractParameterFromRedirectUri(redirectedUrl, "code");
OAuth2Authorization authorization = this.authorizationService.findByToken(authorizationCode,
AUTHORIZATION_CODE_TOKEN_TYPE);
// Get ID Token
mvcResult = this.mvc
.perform(post(issuer.concat(DEFAULT_TOKEN_ENDPOINT_URI))
.params(getTokenRequestParameters(registeredClient, authorization))
.header(HttpHeaders.AUTHORIZATION,
"Basic " + encodeBasicAuth(registeredClient.getClientId(), registeredClient.getClientSecret())))
.andExpect(status().isOk())
.andReturn();
MockHttpServletResponse servletResponse = mvcResult.getResponse();
MockClientHttpResponse httpResponse = new MockClientHttpResponse(servletResponse.getContentAsByteArray(),
HttpStatus.valueOf(servletResponse.getStatus()));
OAuth2AccessTokenResponse accessTokenResponse = accessTokenHttpResponseConverter
.read(OAuth2AccessTokenResponse.class, httpResponse);
String idToken = (String) accessTokenResponse.getAdditionalParameters().get(OidcParameterNames.ID_TOKEN);
// Logout
mvcResult = this.mvc
.perform(post(issuer.concat(DEFAULT_OIDC_LOGOUT_ENDPOINT_URI)).param("id_token_hint", idToken)
.session(session))
.andExpect(status().is3xxRedirection())
.andReturn();
redirectedUrl = mvcResult.getResponse().getRedirectedUrl();
assertThat(redirectedUrl).matches("/");
assertThat(session.isInvalid()).isTrue();
}
@Test
public void requestWhenLogoutRequestWithOtherUsersIdTokenThenNotLogout() throws Exception {
this.spring.register(AuthorizationServerConfiguration.class).autowire();
// Login user1
RegisteredClient registeredClient1 = TestRegisteredClients.registeredClient().scope(OidcScopes.OPENID).build();
this.registeredClientRepository.save(registeredClient1);
MultiValueMap<String, String> authorizationRequestParameters = getAuthorizationRequestParameters(
registeredClient1);
MvcResult mvcResult = this.mvc
.perform(get(DEFAULT_AUTHORIZATION_ENDPOINT_URI).queryParams(authorizationRequestParameters)
.with(user("user1")))
.andExpect(status().is3xxRedirection())
.andReturn();
MockHttpSession user1Session = (MockHttpSession) mvcResult.getRequest().getSession();
assertThat(user1Session.isNew()).isTrue();
String redirectedUrl = mvcResult.getResponse().getRedirectedUrl();
String authorizationCode = extractParameterFromRedirectUri(redirectedUrl, "code");
OAuth2Authorization user1Authorization = this.authorizationService.findByToken(authorizationCode,
AUTHORIZATION_CODE_TOKEN_TYPE);
mvcResult = this.mvc
.perform(post(DEFAULT_TOKEN_ENDPOINT_URI)
.params(getTokenRequestParameters(registeredClient1, user1Authorization))
.header(HttpHeaders.AUTHORIZATION,
"Basic " + encodeBasicAuth(registeredClient1.getClientId(),
registeredClient1.getClientSecret())))
.andExpect(status().isOk())
.andReturn();
MockHttpServletResponse servletResponse = mvcResult.getResponse();
MockClientHttpResponse httpResponse = new MockClientHttpResponse(servletResponse.getContentAsByteArray(),
HttpStatus.valueOf(servletResponse.getStatus()));
OAuth2AccessTokenResponse accessTokenResponse = accessTokenHttpResponseConverter
.read(OAuth2AccessTokenResponse.class, httpResponse);
String user1IdToken = (String) accessTokenResponse.getAdditionalParameters().get(OidcParameterNames.ID_TOKEN);
// Login user2
RegisteredClient registeredClient2 = TestRegisteredClients.registeredClient2().scope(OidcScopes.OPENID).build();
this.registeredClientRepository.save(registeredClient2);
authorizationRequestParameters = getAuthorizationRequestParameters(registeredClient2);
mvcResult = this.mvc
.perform(get(DEFAULT_AUTHORIZATION_ENDPOINT_URI).queryParams(authorizationRequestParameters)
.with(user("user2")))
.andExpect(status().is3xxRedirection())
.andReturn();
MockHttpSession user2Session = (MockHttpSession) mvcResult.getRequest().getSession();
assertThat(user2Session.isNew()).isTrue();
redirectedUrl = mvcResult.getResponse().getRedirectedUrl();
authorizationCode = extractParameterFromRedirectUri(redirectedUrl, "code");
OAuth2Authorization user2Authorization = this.authorizationService.findByToken(authorizationCode,
AUTHORIZATION_CODE_TOKEN_TYPE);
mvcResult = this.mvc
.perform(post(DEFAULT_TOKEN_ENDPOINT_URI)
.params(getTokenRequestParameters(registeredClient2, user2Authorization))
.header(HttpHeaders.AUTHORIZATION,
"Basic " + encodeBasicAuth(registeredClient2.getClientId(),
registeredClient2.getClientSecret())))
.andExpect(status().isOk())
.andReturn();
servletResponse = mvcResult.getResponse();
httpResponse = new MockClientHttpResponse(servletResponse.getContentAsByteArray(),
HttpStatus.valueOf(servletResponse.getStatus()));
accessTokenResponse = accessTokenHttpResponseConverter.read(OAuth2AccessTokenResponse.class, httpResponse);
String user2IdToken = (String) accessTokenResponse.getAdditionalParameters().get(OidcParameterNames.ID_TOKEN);
// Attempt to log out user1 using user2's ID Token
mvcResult = this.mvc
.perform(post(DEFAULT_OIDC_LOGOUT_ENDPOINT_URI).param("id_token_hint", user2IdToken).session(user1Session))
.andExpect(status().isBadRequest())
.andExpect(status().reason("[invalid_token] OpenID Connect 1.0 Logout Request Parameter: sub"))
.andReturn();
assertThat(user1Session.isInvalid()).isFalse();
}
@Test
public void requestWhenCustomTokenGeneratorThenUsed() throws Exception {
this.spring.register(AuthorizationServerConfigurationWithTokenGenerator.class).autowire();
RegisteredClient registeredClient = TestRegisteredClients.registeredClient().scope(OidcScopes.OPENID).build();
this.registeredClientRepository.save(registeredClient);
OAuth2Authorization authorization = createAuthorization(registeredClient);
this.authorizationService.save(authorization);
this.mvc
.perform(post(DEFAULT_TOKEN_ENDPOINT_URI).params(getTokenRequestParameters(registeredClient, authorization))
.header(HttpHeaders.AUTHORIZATION,
"Basic " + encodeBasicAuth(registeredClient.getClientId(), registeredClient.getClientSecret())))
.andExpect(status().isOk());
verify(this.tokenGenerator, times(3)).generate(any());
}
// gh-1422
@Test
public void requestWhenAuthenticationRequestWithOfflineAccessScopeThenTokenResponseIncludesRefreshToken()
throws Exception {
this.spring.register(AuthorizationServerConfigurationWithCustomRefreshTokenGenerator.class).autowire();
RegisteredClient registeredClient = TestRegisteredClients.registeredClient()
.scope(OidcScopes.OPENID)
.scope("offline_access")
.build();
this.registeredClientRepository.save(registeredClient);
MultiValueMap<String, String> authorizationRequestParameters = getAuthorizationRequestParameters(
registeredClient);
MvcResult mvcResult = this.mvc
.perform(get(DEFAULT_AUTHORIZATION_ENDPOINT_URI).queryParams(authorizationRequestParameters)
.with(user("user")))
.andExpect(status().is3xxRedirection())
.andReturn();
String redirectedUrl = mvcResult.getResponse().getRedirectedUrl();
String expectedRedirectUri = authorizationRequestParameters.getFirst(OAuth2ParameterNames.REDIRECT_URI);
assertThat(redirectedUrl).matches(expectedRedirectUri + "\\?code=.{15,}&state=state");
String authorizationCode = extractParameterFromRedirectUri(redirectedUrl, "code");
OAuth2Authorization authorization = this.authorizationService.findByToken(authorizationCode,
AUTHORIZATION_CODE_TOKEN_TYPE);
this.mvc
.perform(post(DEFAULT_TOKEN_ENDPOINT_URI).params(getTokenRequestParameters(registeredClient, authorization))
.header(HttpHeaders.AUTHORIZATION,
"Basic " + encodeBasicAuth(registeredClient.getClientId(), registeredClient.getClientSecret())))
.andExpect(status().isOk())
.andExpect(header().string(HttpHeaders.CACHE_CONTROL, containsString("no-store")))
.andExpect(header().string(HttpHeaders.PRAGMA, containsString("no-cache")))
.andExpect(jsonPath("$.access_token").isNotEmpty())
.andExpect(jsonPath("$.token_type").isNotEmpty())
.andExpect(jsonPath("$.expires_in").isNotEmpty())
.andExpect(jsonPath("$.refresh_token").isNotEmpty())
.andExpect(jsonPath("$.scope").isNotEmpty())
.andExpect(jsonPath("$.id_token").isNotEmpty())
.andReturn();
}
// gh-1422
@Test
public void requestWhenAuthenticationRequestWithoutOfflineAccessScopeThenTokenResponseDoesNotIncludeRefreshToken()
throws Exception {
this.spring.register(AuthorizationServerConfigurationWithCustomRefreshTokenGenerator.class).autowire();
RegisteredClient registeredClient = TestRegisteredClients.registeredClient().scope(OidcScopes.OPENID).build();
this.registeredClientRepository.save(registeredClient);
MultiValueMap<String, String> authorizationRequestParameters = getAuthorizationRequestParameters(
registeredClient);
MvcResult mvcResult = this.mvc
.perform(get(DEFAULT_AUTHORIZATION_ENDPOINT_URI).queryParams(authorizationRequestParameters)
.with(user("user")))
.andExpect(status().is3xxRedirection())
.andReturn();
String redirectedUrl = mvcResult.getResponse().getRedirectedUrl();
String expectedRedirectUri = authorizationRequestParameters.getFirst(OAuth2ParameterNames.REDIRECT_URI);
assertThat(redirectedUrl).matches(expectedRedirectUri + "\\?code=.{15,}&state=state");
String authorizationCode = extractParameterFromRedirectUri(redirectedUrl, "code");
OAuth2Authorization authorization = this.authorizationService.findByToken(authorizationCode,
AUTHORIZATION_CODE_TOKEN_TYPE);
this.mvc
.perform(post(DEFAULT_TOKEN_ENDPOINT_URI).params(getTokenRequestParameters(registeredClient, authorization))
.header(HttpHeaders.AUTHORIZATION,
"Basic " + encodeBasicAuth(registeredClient.getClientId(), registeredClient.getClientSecret())))
.andExpect(status().isOk())
.andExpect(header().string(HttpHeaders.CACHE_CONTROL, containsString("no-store")))
.andExpect(header().string(HttpHeaders.PRAGMA, containsString("no-cache")))
.andExpect(jsonPath("$.access_token").isNotEmpty())
.andExpect(jsonPath("$.token_type").isNotEmpty())
.andExpect(jsonPath("$.expires_in").isNotEmpty())
.andExpect(jsonPath("$.refresh_token").doesNotExist())
.andExpect(jsonPath("$.scope").isNotEmpty())
.andExpect(jsonPath("$.id_token").isNotEmpty())
.andReturn();
}
private static OAuth2Authorization createAuthorization(RegisteredClient registeredClient) {
Map<String, Object> additionalParameters = new HashMap<>();
additionalParameters.put(PkceParameterNames.CODE_CHALLENGE, S256_CODE_CHALLENGE);
additionalParameters.put(PkceParameterNames.CODE_CHALLENGE_METHOD, "S256");
return TestOAuth2Authorizations.authorization(registeredClient, additionalParameters).build();
}
private static MultiValueMap<String, String> getAuthorizationRequestParameters(RegisteredClient registeredClient) {
MultiValueMap<String, String> parameters = new LinkedMultiValueMap<>();
parameters.set(OAuth2ParameterNames.RESPONSE_TYPE, OAuth2AuthorizationResponseType.CODE.getValue());
parameters.set(OAuth2ParameterNames.CLIENT_ID, registeredClient.getClientId());
parameters.set(OAuth2ParameterNames.REDIRECT_URI, registeredClient.getRedirectUris().iterator().next());
parameters.set(OAuth2ParameterNames.SCOPE,
StringUtils.collectionToDelimitedString(registeredClient.getScopes(), " "));
parameters.set(OAuth2ParameterNames.STATE, "state");
parameters.set(PkceParameterNames.CODE_CHALLENGE, S256_CODE_CHALLENGE);
parameters.set(PkceParameterNames.CODE_CHALLENGE_METHOD, "S256");
return parameters;
}
private static MultiValueMap<String, String> getTokenRequestParameters(RegisteredClient registeredClient,
OAuth2Authorization authorization) {
MultiValueMap<String, String> parameters = new LinkedMultiValueMap<>();
parameters.set(OAuth2ParameterNames.GRANT_TYPE, AuthorizationGrantType.AUTHORIZATION_CODE.getValue());
parameters.set(OAuth2ParameterNames.CODE,
authorization.getToken(OAuth2AuthorizationCode.class).getToken().getTokenValue());
parameters.set(OAuth2ParameterNames.REDIRECT_URI, registeredClient.getRedirectUris().iterator().next());
parameters.set(PkceParameterNames.CODE_VERIFIER, S256_CODE_VERIFIER);
return parameters;
}
private static String encodeBasicAuth(String clientId, String secret) throws Exception {
clientId = URLEncoder.encode(clientId, StandardCharsets.UTF_8.name());
secret = URLEncoder.encode(secret, StandardCharsets.UTF_8.name());
String credentialsString = clientId + ":" + secret;
byte[] encodedBytes = Base64.getEncoder().encode(credentialsString.getBytes(StandardCharsets.UTF_8));
return new String(encodedBytes, StandardCharsets.UTF_8);
}
private String extractParameterFromRedirectUri(String redirectUri, String param)
throws UnsupportedEncodingException {
String locationHeader = URLDecoder.decode(redirectUri, StandardCharsets.UTF_8.name());
UriComponents uriComponents = UriComponentsBuilder.fromUriString(locationHeader).build();
return uriComponents.getQueryParams().getFirst(param);
}
@EnableWebSecurity
@Configuration(proxyBeanMethods = false)
static
|
OidcTests
|
java
|
junit-team__junit5
|
junit-vintage-engine/src/testFixtures/java/org/junit/vintage/engine/samples/junit4/JUnit4ParameterizedTestCase.java
|
{
"start": 669,
"end": 1125
}
|
class ____ {
@Parameters
public static Object[] data() {
return new Object[] { 1, 2, 3 };
}
public JUnit4ParameterizedTestCase(int i) {
}
@Test
public void test1() {
fail("this test should fail");
}
@Test
public void endingIn_test1() {
fail("this test should fail");
}
@Test
public void test1_atTheBeginning() {
fail("this test should fail");
}
@Test
public void test2() {
/* always succeeds */
}
}
|
JUnit4ParameterizedTestCase
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/amazonbedrock/embeddings/AmazonBedrockEmbeddingsTaskSettingsTests.java
|
{
"start": 1234,
"end": 4872
}
|
class ____ extends AbstractBWCWireSerializationTestCase<AmazonBedrockEmbeddingsTaskSettings> {
public static AmazonBedrockEmbeddingsTaskSettings emptyTaskSettings() {
return AmazonBedrockEmbeddingsTaskSettings.EMPTY;
}
public static AmazonBedrockEmbeddingsTaskSettings randomTaskSettings() {
var inputType = randomBoolean() ? randomWithoutUnspecified() : null;
var truncation = randomBoolean() ? randomFrom(CohereTruncation.values()) : null;
return new AmazonBedrockEmbeddingsTaskSettings(truncation);
}
public static AmazonBedrockEmbeddingsTaskSettings mutateTaskSettings(AmazonBedrockEmbeddingsTaskSettings instance) {
return randomValueOtherThanMany(
v -> Objects.equals(instance, v) || (instance.cohereTruncation() != null && v.cohereTruncation() == null),
AmazonBedrockEmbeddingsTaskSettingsTests::randomTaskSettings
);
}
@Override
protected AmazonBedrockEmbeddingsTaskSettings mutateInstanceForVersion(
AmazonBedrockEmbeddingsTaskSettings instance,
TransportVersion version
) {
return instance;
}
@Override
protected Writeable.Reader<AmazonBedrockEmbeddingsTaskSettings> instanceReader() {
return AmazonBedrockEmbeddingsTaskSettings::new;
}
@Override
protected AmazonBedrockEmbeddingsTaskSettings createTestInstance() {
return randomTaskSettings();
}
@Override
protected AmazonBedrockEmbeddingsTaskSettings mutateInstance(AmazonBedrockEmbeddingsTaskSettings instance) throws IOException {
return mutateTaskSettings(instance);
}
public void testEmpty() {
assertTrue(emptyTaskSettings().isEmpty());
assertTrue(AmazonBedrockEmbeddingsTaskSettings.fromMap(null).isEmpty());
assertTrue(AmazonBedrockEmbeddingsTaskSettings.fromMap(Map.of()).isEmpty());
}
public static Map<String, Object> mutableMap(String key, Enum<?> value) {
return new HashMap<>(Map.of(key, value.toString()));
}
public void testValidCohereTruncations() {
for (var expectedCohereTruncation : CohereTruncation.ALL) {
var map = mutableMap(TRUNCATE_FIELD, expectedCohereTruncation);
var taskSettings = AmazonBedrockEmbeddingsTaskSettings.fromMap(map);
assertFalse(taskSettings.isEmpty());
assertThat(taskSettings.cohereTruncation(), equalTo(expectedCohereTruncation));
}
}
public void testGarbageCohereTruncations() {
var map = new HashMap<String, Object>(Map.of(TRUNCATE_FIELD, "oiuesoirtuoawoeirha"));
assertThrows(ValidationException.class, () -> AmazonBedrockEmbeddingsTaskSettings.fromMap(map));
}
public void testXContent() throws IOException {
var taskSettings = randomTaskSettings();
var taskSettingsAsMap = toMap(taskSettings);
var roundTripTaskSettings = AmazonBedrockEmbeddingsTaskSettings.fromMap(new HashMap<>(taskSettingsAsMap));
assertThat(roundTripTaskSettings, equalTo(taskSettings));
}
public static Map<String, Object> toMap(AmazonBedrockEmbeddingsTaskSettings taskSettings) throws IOException {
try (var builder = JsonXContent.contentBuilder()) {
taskSettings.toXContent(builder, ToXContent.EMPTY_PARAMS);
var taskSettingsBytes = Strings.toString(builder).getBytes(StandardCharsets.UTF_8);
try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, taskSettingsBytes)) {
return parser.map();
}
}
}
}
|
AmazonBedrockEmbeddingsTaskSettingsTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/jointable/Person.java
|
{
"start": 398,
"end": 968
}
|
class ____ {
private Long id;
private Address address;
private Set<Address> addresses;
@Id
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
@ManyToOne
@JoinTable( name = "SOME_OTHER_TABLE" )
public Address getAddress() {
return address;
}
public void setAddress(Address address) {
this.address = address;
}
@OneToMany
@JoinTable( name = "SOME_OTHER_TABLE2" )
public Set<Address> getAddresses() {
return addresses;
}
public void setAddresses(Set<Address> addresses) {
this.addresses = addresses;
}
}
|
Person
|
java
|
micronaut-projects__micronaut-core
|
http-client-tck/src/main/java/io/micronaut/http/client/tck/tests/HttpMethodPostTest.java
|
{
"start": 2076,
"end": 2348
}
|
class ____ {
@Post()
String response() {
return "ok";
}
@Post("/object-body")
String person(@Body Person person) {
return person.getName() + ":" + person.getAge();
}
}
}
|
HttpMethodPostTestController
|
java
|
reactor__reactor-core
|
reactor-core/src/test/java/reactor/core/publisher/SinkManyEmitterProcessorTest.java
|
{
"start": 18043,
"end": 29972
}
|
class ____ implements UncaughtExceptionHandler {
@Override
public void uncaughtException(Thread t, Throwable e) {
lastException = e;
}
}
public MyThread(SinkManyEmitterProcessor<String> processor, CyclicBarrier barrier, int n, int index) {
this.processor = processor.log("consuming."+index);
this.barrier = barrier;
this.n = n;
setUncaughtExceptionHandler(new MyUncaughtExceptionHandler());
}
@Override
public void run() {
try {
doRun();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public void doRun() throws Exception {
AssertSubscriber<String> subscriber = AssertSubscriber.create(5);
processor.subscribe(subscriber);
barrier.await();
subscriber.request(3);
subscriber.request(4);
subscriber.request(1);
subscriber
.await()
.assertValueCount(n)
.assertComplete();
}
public @Nullable Throwable getLastException() {
return lastException;
}
}
@Test
@Disabled
void testRacing() throws Exception {
int N_THREADS = 3;
int N_ITEMS = 8;
SinkManyEmitterProcessor<String> processor = new SinkManyEmitterProcessor<>(true, 4);
List<String> data = new ArrayList<>();
for (int i = 1; i <= N_ITEMS; i++) {
data.add(String.valueOf(i));
}
Flux.fromIterable(data)
.log("publishing")
.subscribe(processor);
CyclicBarrier barrier = new CyclicBarrier(N_THREADS);
MyThread threads[] = new MyThread[N_THREADS];
for (int i = 0; i < N_THREADS; i++) {
threads[i] = new MyThread(processor, barrier, N_ITEMS, i);
threads[i].start();
}
for (int j = 0; j < N_THREADS; j++) {
threads[j].join();
Throwable lastException = threads[j].getLastException();
if (lastException != null) {
lastException.printStackTrace();
fail("Should not have encounterd exception");
}
}
}
@Test
void testThreadAffinity() throws InterruptedException {
int count = 10;
Scheduler[] schedulers = new Scheduler[4];
CountDownLatch[] latches = new CountDownLatch[schedulers.length];
for (int i = 0; i < schedulers.length; i++) {
schedulers[i] = Schedulers.newSingle("scheduler" + i + '-');
int expectedCount = i == 1 ? count * 2 : count;
latches[i] = new CountDownLatch(expectedCount);
}
SinkManyEmitterProcessor<Integer> processor = new SinkManyEmitterProcessor<>(true, Queues.SMALL_BUFFER_SIZE);
processor.publishOn(schedulers[0])
.share();
processor.publishOn(schedulers[1])
.subscribe(i -> {
assertThat(Thread.currentThread().getName().contains("scheduler1")).isTrue();
latches[1].countDown();
});
for (int i = 0; i < count; i++)
processor.onNext(i);
processor.publishOn(schedulers[2])
.map(i -> {
assertThat(Thread.currentThread().getName().contains("scheduler2")).isTrue();
latches[2].countDown();
return i;
})
.publishOn(schedulers[3])
.doOnNext(i -> {
assertThat(Thread.currentThread().getName().contains("scheduler3")).isTrue();
latches[3].countDown();
})
.subscribe();
for (int i = 0; i < count; i++)
processor.onNext(i);
processor.onComplete();
for (int i = 1; i < latches.length; i++)
assertThat(latches[i].await(5, TimeUnit.SECONDS)).isTrue();
assertThat(latches[0].getCount()).isEqualTo(count);
}
@Test
void createDefault() {
SinkManyEmitterProcessor<Integer> processor = new SinkManyEmitterProcessor<>(true, Queues.SMALL_BUFFER_SIZE);
assertProcessor(processor, null, null);
}
@Test
void createOverrideBufferSize() {
int bufferSize = 1024;
SinkManyEmitterProcessor<Integer> processor = new SinkManyEmitterProcessor<>(true, bufferSize);
assertProcessor(processor, bufferSize, null);
}
@Test
void createOverrideAutoCancel() {
boolean autoCancel = false;
SinkManyEmitterProcessor<Integer> processor = new SinkManyEmitterProcessor<>(autoCancel, Queues.SMALL_BUFFER_SIZE);
assertProcessor(processor, null, autoCancel);
}
@Test
void createOverrideAll() {
int bufferSize = 1024;
boolean autoCancel = false;
SinkManyEmitterProcessor<Integer> processor = new SinkManyEmitterProcessor<>(autoCancel, bufferSize);
assertProcessor(processor, bufferSize, autoCancel);
}
void assertProcessor(SinkManyEmitterProcessor<Integer> processor,
@Nullable Integer bufferSize,
@Nullable Boolean autoCancel) {
int expectedBufferSize = bufferSize != null ? bufferSize : Queues.SMALL_BUFFER_SIZE;
boolean expectedAutoCancel = autoCancel != null ? autoCancel : true;
assertThat(processor.prefetch).isEqualTo(expectedBufferSize);
assertThat(processor.autoCancel).isEqualTo(expectedAutoCancel);
}
@Test
void scanMain() {
SinkManyEmitterProcessor<Integer> test = new SinkManyEmitterProcessor<>(true, 123);
assertThat(test.scan(BUFFERED)).isEqualTo(0);
assertThat(test.scan(CANCELLED)).isFalse();
assertThat(test.scan(PREFETCH)).isEqualTo(123);
assertThat(test.scan(CAPACITY)).isEqualTo(123);
Disposable d1 = test.subscribe();
test.tryEmitNext(2).orThrow();
test.tryEmitNext(3).orThrow();
test.tryEmitNext(4).orThrow();
assertThat(test.scan(BUFFERED)).isEqualTo(0);
AtomicReference<Subscription> d2 = new AtomicReference<>();
test.subscribe(new CoreSubscriber<Integer>() {
@Override
public void onSubscribe(Subscription s) {
d2.set(s);
}
@Override
public void onNext(Integer integer) {
}
@Override
public void onError(Throwable t) {
}
@Override
public void onComplete() {
}
});
test.tryEmitNext(5).orThrow();
test.tryEmitNext(6).orThrow();
test.tryEmitNext(7).orThrow();
assertThat(test.scan(BUFFERED)).isEqualTo(3);
assertThat(test.scan(TERMINATED)).isFalse();
test.tryEmitComplete().orThrow();
assertThat(test.scan(TERMINATED)).isFalse();
d1.dispose();
d2.get().cancel();
assertThat(test.scan(TERMINATED)).isTrue();
//other values
assertThat(test.scan(Scannable.Attr.PARENT)).isNotNull();
assertThat(test.scan(Attr.ERROR)).isNull();
}
@Test
void scanMainCancelled() {
SinkManyEmitterProcessor<?> test = new SinkManyEmitterProcessor<>(true, Queues.SMALL_BUFFER_SIZE);
test.subscribe().dispose();
assertThat(test.scan(CANCELLED)).isTrue();
assertThat(test.isCancelled()).isTrue();
}
@Test
void scanMainError() {
SinkManyEmitterProcessor<?> test = new SinkManyEmitterProcessor<>(true, Queues.SMALL_BUFFER_SIZE);
test.tryEmitError(new IllegalStateException("boom")).orThrow();
assertThat(test.scan(TERMINATED)).as("terminated").isTrue();
assertThat(test.scan(Attr.ERROR)).hasMessage("boom");
}
@Test
void inners() {
Sinks.Many<Integer> sink = new SinkManyEmitterProcessor<>(true, Queues.SMALL_BUFFER_SIZE);
CoreSubscriber<Integer> notScannable = new BaseSubscriber<Integer>() {};
InnerConsumer<Integer> scannable = new LambdaSubscriber<>(null, null, null, null);
assertThat(sink.inners()).as("before subscriptions").isEmpty();
sink.asFlux().subscribe(notScannable);
sink.asFlux().subscribe(scannable);
assertThat(sink.inners())
.asInstanceOf(InstanceOfAssertFactories.LIST)
.as("after subscriptions")
.hasSize(2)
.extracting(l -> (Object) ((SinkManyEmitterProcessor.EmitterInner<?>) l).actual)
.containsExactly(notScannable, scannable);
}
//see https://github.com/reactor/reactor-core/issues/1528
@Test
void syncFusionFromInfiniteStream() {
final Flux<Integer> flux = Flux.fromStream(Stream.iterate(0, i -> i + 1));
final SinkManyEmitterProcessor<Integer> processor = new SinkManyEmitterProcessor<>(true, Queues.SMALL_BUFFER_SIZE);
StepVerifier.create(processor)
.then(() -> flux.subscribe(processor))
.thenConsumeWhile(i -> i < 10)
.expectNextCount(10)
.thenCancel()
.verify(Duration.ofSeconds(4));
}
//see https://github.com/reactor/reactor-core/issues/1528
@Test
void syncFusionFromInfiniteStreamAndTake() {
final Flux<Integer> flux = Flux.fromStream(Stream.iterate(0, i -> i + 1))
.take(10, false);
final SinkManyEmitterProcessor<Integer> processor = new SinkManyEmitterProcessor<>(true, Queues.SMALL_BUFFER_SIZE);
flux.subscribe(processor);
StepVerifier.create(processor)
.expectNextCount(10)
.expectComplete()
.verify(Duration.ofSeconds(4));
}
@Test
void removeUnknownInnerIgnored() {
SinkManyEmitterProcessor<Integer> processor = new SinkManyEmitterProcessor<>(true, Queues.SMALL_BUFFER_SIZE);
SinkManyEmitterProcessor.EmitterInner<Integer> inner = new SinkManyEmitterProcessor.EmitterInner<>(null, processor);
SinkManyEmitterProcessor.EmitterInner<Integer> notInner = new SinkManyEmitterProcessor.EmitterInner<>(null, processor);
processor.add(inner);
assertThat(processor.subscribers).as("adding inner").hasSize(1);
processor.remove(notInner);
assertThat(processor.subscribers).as("post remove notInner").hasSize(1);
processor.remove(inner);
assertThat(processor.subscribers).as("post remove inner").isEmpty();
}
@Test
void currentContextDelegatesToFirstSubscriber() {
AssertSubscriber<Object> testSubscriber1 = new AssertSubscriber<>(Context.of("key", "value1"));
AssertSubscriber<Object> testSubscriber2 = new AssertSubscriber<>(Context.of("key", "value2"));
SinkManyEmitterProcessor<Object> sinkManyEmitterProcessor = new SinkManyEmitterProcessor<>(false, 1);
sinkManyEmitterProcessor.subscribe(testSubscriber1);
sinkManyEmitterProcessor.subscribe(testSubscriber2);
Context processorContext = sinkManyEmitterProcessor.currentContext();
assertThat(processorContext.getOrDefault("key", "EMPTY")).isEqualTo("value1");
}
@Test
void tryEmitNextWithNoSubscriberFailsOnlyIfNoCapacity() {
SinkManyEmitterProcessor<Integer> sinkManyEmitterProcessor = new SinkManyEmitterProcessor<>(true, 1);
assertThat(sinkManyEmitterProcessor.tryEmitNext(1)).isEqualTo(Sinks.EmitResult.OK);
assertThat(sinkManyEmitterProcessor.tryEmitNext(2)).isEqualTo(Sinks.EmitResult.FAIL_ZERO_SUBSCRIBER);
StepVerifier.create(sinkManyEmitterProcessor)
.expectNext(1)
.then(() -> sinkManyEmitterProcessor.tryEmitComplete().orThrow())
.verifyComplete();
}
@Test
void tryEmitNextWithNoSubscriberFailsIfNoCapacityAndAllSubscribersCancelledAndNoAutoTermination() {
//in case of autoCancel, removing all subscribers results in TERMINATED rather than EMPTY
SinkManyEmitterProcessor<Integer> sinkManyEmitterProcessor = new SinkManyEmitterProcessor<>(false, 1);
AssertSubscriber<Integer> testSubscriber = AssertSubscriber.create();
sinkManyEmitterProcessor.subscribe(testSubscriber);
assertThat(sinkManyEmitterProcessor.tryEmitNext(1)).as("emit 1, with subscriber").isEqualTo(
Sinks.EmitResult.OK);
assertThat(sinkManyEmitterProcessor.tryEmitNext(2)).as("emit 2, with subscriber").isEqualTo(
Sinks.EmitResult.OK);
assertThat(sinkManyEmitterProcessor.tryEmitNext(3)).as("emit 3, with subscriber").isEqualTo(
Sinks.EmitResult.OK);
testSubscriber.cancel();
assertThat(sinkManyEmitterProcessor.tryEmitNext(4)).as("emit 4, without subscriber, buffered").isEqualTo(
Sinks.EmitResult.OK);
assertThat(sinkManyEmitterProcessor.tryEmitNext(5)).as("emit 5, without subscriber").isEqualTo(
Sinks.EmitResult.FAIL_ZERO_SUBSCRIBER);
}
@Test
void emitNextWithNoSubscriberNoCapacityKeepsSinkOpenWithBuffer() {
SinkManyEmitterProcessor<Integer> sinkManyEmitterProcessor = new SinkManyEmitterProcessor<>(false, 1);
//fill the buffer
assertThat(sinkManyEmitterProcessor.tryEmitNext(1)).as("filling buffer").isEqualTo(Sinks.EmitResult.OK);
//test proper
//this is "discarded" but no hook can be invoked, so effectively dropped on the floor
sinkManyEmitterProcessor.emitNext(2, FAIL_FAST);
StepVerifier.create(sinkManyEmitterProcessor)
.expectNext(1)
.expectTimeout(Duration.ofSeconds(1))
.verify();
}
}
|
MyUncaughtExceptionHandler
|
java
|
spring-projects__spring-boot
|
smoke-test/spring-boot-smoke-test-secure/src/main/java/smoketest/secure/SampleService.java
|
{
"start": 845,
"end": 1111
}
|
class ____ {
@Secured("ROLE_USER")
public String secure() {
return "Hello Security";
}
@PreAuthorize("true")
public String authorized() {
return "Hello World";
}
@PreAuthorize("false")
public String denied() {
return "Goodbye World";
}
}
|
SampleService
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_577/Issue577Test.java
|
{
"start": 484,
"end": 745
}
|
class ____ {
@ProcessorTest
public void shouldMapTwoArraysToCollections() {
Source source = new Source();
Target target = SourceTargetMapper.INSTANCE.sourceToTarget( source );
assertThat( target ).isNotNull();
}
}
|
Issue577Test
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/impl/VertxImpl.java
|
{
"start": 46730,
"end": 49376
}
|
interface ____ should not be used, instead {@link ContextInternal#dispatch(Object, io.vertx.core.Handler)}
* shall be used.
*
* @param prev the previous context thread to restore, might be {@code null}
*/
void endDispatch(ContextInternal prev) {
Thread thread = Thread.currentThread();
if (thread instanceof VertxThread) {
VertxThread vertxThread = (VertxThread) thread;
vertxThread.context = prev;
if (!disableTCCL) {
ClassLoader tccl;
if (prev == null) {
tccl = vertxThread.topLevelTCCL;
vertxThread.topLevelTCCL = null;
} else {
tccl = prev.classLoader();
}
Thread.currentThread().setContextClassLoader(tccl);
}
if (!ContextImpl.DISABLE_TIMINGS) {
vertxThread.executeEnd();
}
} else {
endDispatch2(prev);
}
}
private void endDispatch2(ContextInternal prev) {
ClassLoader tccl;
ContextDispatch current = nonVertxContextDispatch.get();
if (prev != null) {
current.context = prev;
tccl = prev.classLoader();
} else {
nonVertxContextDispatch.remove();
tccl = current.topLevelTCCL;
}
if (!disableTCCL) {
Thread.currentThread().setContextClassLoader(tccl);
}
}
public <C> C createSharedResource(String resourceKey, String resourceName, CloseFuture closeFuture, Function<CloseFuture, C> supplier) {
return SharedResourceHolder.createSharedResource(this, resourceKey, resourceName, closeFuture, supplier);
}
void duplicate(ContextBase src, ContextBase dst) {
for (int i = 0;i < contextLocals.length;i++) {
ContextLocalImpl<?> contextLocal = (ContextLocalImpl<?>) contextLocals[i];
Object local = AccessMode.CONCURRENT.get(src.locals, i);
if (local != null) {
local = ((Function)contextLocal.duplicator).apply(local);
}
AccessMode.CONCURRENT.put(dst.locals, i, local);
}
}
/**
* Reads the version from the {@code vertx-version.txt} file.
*
* @return the version
*/
public static String version() {
if (version != null) {
return version;
}
try (InputStream is = VertxImpl.class.getClassLoader().getResourceAsStream("META-INF/vertx/vertx-version.txt")) {
if (is == null) {
throw new IllegalStateException("Cannot find vertx-version.txt on classpath");
}
try (Scanner scanner = new Scanner(is, StandardCharsets.UTF_8).useDelimiter("\\A")) {
return version = scanner.hasNext() ? scanner.next().trim() : "";
}
} catch (IOException e) {
throw new IllegalStateException(e.getMessage());
}
}
}
|
that
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
|
{
"start": 13892,
"end": 14646
}
|
class ____ implements RemoteIterator<FileStatus> {
private final RemoteIterator<FileStatus> remote;
StoppableRemoteIterator(RemoteIterator<FileStatus> remote) {
this.remote = remote;
}
@Override public boolean hasNext() throws IOException {
return !stopExecutors.get() && remote.hasNext();
}
@Override public FileStatus next() throws IOException {
return remote.next();
}
}
@VisibleForTesting
Map<String, Set<Service>> getSyncUserServices() {
return syncUserServices;
}
@VisibleForTesting
int getBadFileNameExtensionSkipCounter() {
return badFileNameExtensionSkipCounter;
}
@VisibleForTesting
int getBadDirSkipCounter() {
return badDirSkipCounter;
}
}
|
StoppableRemoteIterator
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/aot/samples/common/GreetingService.java
|
{
"start": 761,
"end": 813
}
|
interface ____ {
String greeting();
}
|
GreetingService
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/test/SymbolTableDupTest.java
|
{
"start": 248,
"end": 2909
}
|
class ____ extends TestCase {
private HashMap<Integer, Integer> map = new HashMap<Integer, Integer>();
private Set<Integer> dupHashCodes = new HashSet<Integer>();
private HashMap<Integer, List<String>> dupList = new HashMap<Integer, List<String>>();
private final int VALUE = 114788;
public void test_0() throws Exception {
int len = 3;
char[] chars = new char[len];
tryBit(chars, len);
tryBit2(chars, len);
// tryBit3(chars, len);
// for (Map.Entry<Integer, List<String>> entry : dupList.entrySet()) {
// System.out.println(entry.getKey() + " : " + entry.getValue());
// }
}
private void tryBit(char[] chars, int i) {
char startChar = 'A';
char endChar = 'z';
for (char j = startChar; j <= endChar; j++) {
chars[i - 1] = j;
if (i > 1) {
tryBit(chars, i - 1);
} else {
test(chars);
}
}
}
private void tryBit2(char[] chars, int i) {
char startChar = 'A';
char endChar = 'z';
for (char j = startChar; j <= endChar; j++) {
chars[i - 1] = j;
if (i > 1) {
tryBit2(chars, i - 1);
} else {
test2(chars);
}
}
}
private void tryBit3(char[] chars, int i) {
char startChar = 'A';
char endChar = 'z';
for (char j = startChar; j <= endChar; j++) {
chars[i - 1] = j;
if (i > 1) {
tryBit3(chars, i - 1);
} else {
test3(chars);
}
}
}
private void test3(char[] chars) {
int hash = SymbolTable.hash(chars, 0, chars.length);
if (hash == VALUE) {
System.out.println(new String(chars));
}
}
private void test2(char[] chars) {
int hash = SymbolTable.hash(chars, 0, chars.length);
if (dupHashCodes.contains(hash)) {
List<String> list = dupList.get(hash);
if (list == null) {
list = new ArrayList<String>();
dupList.put(hash, list);
}
list.add(new String(chars));
}
}
private void test(char[] chars) {
int hash = SymbolTable.hash(chars, 0, chars.length);
Integer count = map.get(hash);
if (count != null) {
dupHashCodes.add(hash);
map.put(hash, count.intValue() + 1);
} else {
map.put(hash, 1);
}
}
}
|
SymbolTableDupTest
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/pkg/steps/NativeImageBuildRunner.java
|
{
"start": 6455,
"end": 6677
}
|
class ____ {
private final int exitCode;
public Result(int exitCode) {
this.exitCode = exitCode;
}
public int getExitCode() {
return exitCode;
}
}
}
|
Result
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-ddb/src/main/java/org/apache/camel/component/aws2/ddbstream/Ddb2StreamEndpoint.java
|
{
"start": 1685,
"end": 4624
}
|
class ____ extends ScheduledPollEndpoint implements EndpointServiceLocation {
@UriParam
Ddb2StreamConfiguration configuration;
private DynamoDbStreamsClient ddbStreamClient;
public Ddb2StreamEndpoint(String uri, Ddb2StreamConfiguration configuration, Ddb2StreamComponent component) {
super(uri, component);
this.configuration = configuration;
}
@Override
public Producer createProducer() throws Exception {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
Ddb2StreamConsumer consumer = new Ddb2StreamConsumer(this, processor);
consumer.setSchedulerProperties(consumer.getEndpoint().getSchedulerProperties());
configureConsumer(consumer);
return consumer;
}
@Override
public void doStart() throws Exception {
super.doStart();
ddbStreamClient = configuration.getAmazonDynamoDbStreamsClient() != null
? configuration.getAmazonDynamoDbStreamsClient()
: Ddb2StreamClientFactory.getDynamoDBStreamClient(configuration).getDynamoDBStreamClient();
}
@Override
public void doStop() throws Exception {
if (ObjectHelper.isEmpty(configuration.getAmazonDynamoDbStreamsClient())) {
if (ddbStreamClient != null) {
ddbStreamClient.close();
}
}
super.doStop();
}
@Override
public Ddb2StreamComponent getComponent() {
return (Ddb2StreamComponent) super.getComponent();
}
public Ddb2StreamConfiguration getConfiguration() {
return configuration;
}
public DynamoDbStreamsClient getClient() {
return ddbStreamClient;
}
@Override
public String toString() {
return "DdbStreamEndpoint{" + "tableName=" + configuration.getTableName()
+ ", amazonDynamoDbStreamsClient=[redacted], maxResultsPerRequest="
+ configuration.getMaxResultsPerRequest() + ", streamIteratorType=" + configuration.getStreamIteratorType()
+ ", uri=" + getEndpointUri() + '}';
}
@Override
public String getServiceUrl() {
if (!configuration.isOverrideEndpoint()) {
if (ObjectHelper.isNotEmpty(configuration.getRegion())) {
return configuration.getRegion();
}
} else if (ObjectHelper.isNotEmpty(configuration.getUriEndpointOverride())) {
return configuration.getUriEndpointOverride();
}
return null;
}
@Override
public String getServiceProtocol() {
return "dynamodb-stream";
}
@Override
public Map<String, String> getServiceMetadata() {
if (configuration.getTableName() != null) {
return Map.of("table", configuration.getTableName());
}
return null;
}
}
|
Ddb2StreamEndpoint
|
java
|
apache__kafka
|
streams/src/test/java/org/apache/kafka/streams/state/internals/ChangeLoggingTimestampedWindowBytesStoreTest.java
|
{
"start": 2090,
"end": 5636
}
|
class ____ {
private final byte[] value = {0};
private final byte[] valueAndTimestamp = {0, 0, 0, 0, 0, 0, 0, 42, 0};
private final Bytes bytesKey = Bytes.wrap(value);
@Mock
private WindowStore<Bytes, byte[]> inner;
@Mock
private ProcessorContextImpl context;
private ChangeLoggingTimestampedWindowBytesStore store;
private static final Position POSITION = Position.fromMap(mkMap(mkEntry("", mkMap(mkEntry(0, 1L)))));
@BeforeEach
public void setUp() {
store = new ChangeLoggingTimestampedWindowBytesStore(inner, false);
store.init(context, store);
}
@AfterEach
public void tearDown() {
verify(inner).init(context, store);
}
@Test
public void shouldDelegateInit() {
// testing the combination of setUp and tearDown
}
@Test
public void shouldLogPuts() {
final Bytes key = WindowKeySchema.toStoreKeyBinary(bytesKey, 0, 0);
when(inner.getPosition()).thenReturn(Position.emptyPosition());
when(context.recordContext()).thenReturn(new ProcessorRecordContext(0, 0, 0, "topic", new RecordHeaders()));
store.put(bytesKey, valueAndTimestamp, context.recordContext().timestamp());
verify(inner).put(bytesKey, valueAndTimestamp, 0);
verify(context).logChange(store.name(), key, value, 42, Position.emptyPosition());
}
@Test
public void shouldLogPutsWithPosition() {
final Bytes key = WindowKeySchema.toStoreKeyBinary(bytesKey, 0, 0);
when(inner.getPosition()).thenReturn(POSITION);
when(context.recordContext()).thenReturn(new ProcessorRecordContext(0, 0, 0, "topic", new RecordHeaders()));
store.put(bytesKey, valueAndTimestamp, context.recordContext().timestamp());
verify(inner).put(bytesKey, valueAndTimestamp, 0);
verify(context).logChange(store.name(), key, value, 42, POSITION);
}
@SuppressWarnings({"resource", "unused"})
@Test
public void shouldDelegateToUnderlyingStoreWhenFetching() {
try (final WindowStoreIterator<byte[]> unused = store.fetch(bytesKey, ofEpochMilli(0), ofEpochMilli(10))) {
verify(inner).fetch(bytesKey, 0, 10);
}
}
@SuppressWarnings({"resource", "unused"})
@Test
public void shouldDelegateToUnderlyingStoreWhenFetchingRange() {
try (final KeyValueIterator<Windowed<Bytes>, byte[]> unused = store.fetch(bytesKey, bytesKey, ofEpochMilli(0), ofEpochMilli(1))) {
verify(inner).fetch(bytesKey, bytesKey, 0, 1);
}
}
@Test
public void shouldRetainDuplicatesWhenSet() {
store = new ChangeLoggingTimestampedWindowBytesStore(inner, true);
store.init(context, store);
final Bytes key1 = WindowKeySchema.toStoreKeyBinary(bytesKey, 0, 1);
final Bytes key2 = WindowKeySchema.toStoreKeyBinary(bytesKey, 0, 2);
when(inner.getPosition()).thenReturn(Position.emptyPosition());
when(context.recordContext()).thenReturn(new ProcessorRecordContext(0, 0, 0, "topic", new RecordHeaders()));
store.put(bytesKey, valueAndTimestamp, context.recordContext().timestamp());
store.put(bytesKey, valueAndTimestamp, context.recordContext().timestamp());
verify(inner, times(2)).put(bytesKey, valueAndTimestamp, 0);
verify(context).logChange(store.name(), key1, value, 42L, Position.emptyPosition());
verify(context).logChange(store.name(), key2, value, 42L, Position.emptyPosition());
}
}
|
ChangeLoggingTimestampedWindowBytesStoreTest
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-jackson/deployment/src/main/java/io/quarkus/resteasy/reactive/jackson/deployment/processor/ResteasyReactiveJacksonProcessor.java
|
{
"start": 28334,
"end": 32279
}
|
class ____ annotated with @CustomSerialization.");
} else {
result.add(new ResourceMethodCustomSerializationBuildItem(methodInfo, entry.getActualClassInfo(),
SecurityCustomSerialization.class));
}
}
if (needToDeleteCache.get()) {
typeToHasSecureField.clear();
typeToHasSecureField.putAll(getTypesWithSecureField());
}
}
if (!result.isEmpty()) {
for (ResourceMethodCustomSerializationBuildItem bi : result) {
producer.produce(bi);
}
}
}
private static ClassInfo getEffectiveClassInfo(Type type, IndexView indexView) {
if (type.kind() == Type.Kind.VOID) {
return null;
}
Type effectiveReturnType = getEffectiveType(type);
return effectiveReturnType == null ? null : indexView.getClassByName(effectiveReturnType.name());
}
private static Type getEffectiveType(Type type) {
Type effectiveReturnType = type;
if (effectiveReturnType.name().equals(ResteasyReactiveDotNames.REST_RESPONSE) ||
effectiveReturnType.name().equals(ResteasyReactiveDotNames.UNI) ||
effectiveReturnType.name().equals(ResteasyReactiveDotNames.COMPLETABLE_FUTURE) ||
effectiveReturnType.name().equals(ResteasyReactiveDotNames.COMPLETION_STAGE) ||
effectiveReturnType.name().equals(ResteasyReactiveDotNames.REST_MULTI) ||
effectiveReturnType.name().equals(ResteasyReactiveDotNames.MULTI)) {
if (effectiveReturnType.kind() != Type.Kind.PARAMETERIZED_TYPE) {
return null;
}
effectiveReturnType = type.asParameterizedType().arguments().get(0);
}
if (effectiveReturnType.name().equals(ResteasyReactiveDotNames.SET) ||
effectiveReturnType.name().equals(ResteasyReactiveDotNames.COLLECTION) ||
effectiveReturnType.name().equals(ResteasyReactiveDotNames.LIST)) {
effectiveReturnType = effectiveReturnType.asParameterizedType().arguments().get(0);
} else if (effectiveReturnType.name().equals(ResteasyReactiveDotNames.MAP)) {
effectiveReturnType = effectiveReturnType.asParameterizedType().arguments().get(1);
}
return effectiveReturnType;
}
private static Map<String, Boolean> getTypesWithSecureField() {
// if any of following types is detected as an endpoint return type or a field of endpoint return type,
// we always need to apply security serialization as any type can be represented with them
return Map.of(ResteasyReactiveDotNames.OBJECT.toString(), Boolean.TRUE, ResteasyReactiveDotNames.RESPONSE.toString(),
Boolean.TRUE);
}
private static boolean hasSecureFields(IndexView indexView, ClassInfo currentClassInfo,
Map<String, Boolean> typeToHasSecureField, AtomicBoolean needToDeleteCache,
Map<String, Type> typeParamIdentifierToParameterizedType) {
// use cached result if there is any
final String className = currentClassInfo.name().toString();
if (typeToHasSecureField.containsKey(className)) {
Boolean hasSecureFields = typeToHasSecureField.get(className);
if (hasSecureFields == null) {
// this is to avoid false negative for scenario like:
// when 'a' declares field of type 'b' which declares field of type 'a' and both 'a' and 'b'
// are returned from an endpoint and 'b' is detected based on 'a' and processed after 'a'
needToDeleteCache.set(true);
return false;
}
return hasSecureFields;
}
// prevent cyclic check of the same type
// for example when a field has a same type as the current
|
are
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/DocsV3Support.java
|
{
"start": 45310,
"end": 50565
}
|
class ____ extends DocsV3Support {
private final LogicalPlan command;
private List<EsqlFunctionRegistry.ArgSignature> args;
private final XPackLicenseState licenseState;
private final ObservabilityTier observabilityTier;
/** Used in CommandLicenseTests to generate Kibana docs with licensing information for commands */
public CommandsDocsSupport(
String name,
Class<?> testClass,
LogicalPlan command,
XPackLicenseState licenseState,
ObservabilityTier observabilityTier,
Callbacks callbacks
) {
super("commands", name, testClass, Set::of, callbacks);
this.command = command;
this.licenseState = licenseState;
this.observabilityTier = observabilityTier;
}
/** Used in LookupJoinTypesIT to generate table of supported types for join field */
public CommandsDocsSupport(
String name,
Class<?> testClass,
LogicalPlan command,
List<EsqlFunctionRegistry.ArgSignature> args,
Supplier<Set<TypeSignature>> signatures,
Callbacks callbacks
) {
super("commands", name, testClass, signatures, callbacks);
this.command = command;
this.args = args;
this.licenseState = null;
this.observabilityTier = null;
}
@Override
public void renderSignature() throws IOException {
// Unimplemented until we make command docs dynamically generated
}
@Override
public void renderDocs() throws Exception {
// Currently we only render either signatures or kibana definition files,
// but we could expand to rendering much more if we decide to
if (args != null) {
renderTypes(name, args);
}
if (licenseState != null) {
renderKibanaCommandDefinition();
}
}
void renderKibanaCommandDefinition() throws Exception {
try (XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint().lfAtEnd().startObject()) {
builder.field(
"comment",
"This is generated by ESQL’s DocsV3Support. Do not edit it. See ../README.md for how to regenerate it."
);
builder.field("type", "command");
builder.field("name", name);
License.OperationMode license = licenseState.getOperationMode();
if (license != null && license != License.OperationMode.BASIC) {
builder.field("license", license.toString());
}
if (observabilityTier != null && observabilityTier != ObservabilityTier.LOGS_ESSENTIALS) {
builder.field("observability_tier", observabilityTier.toString());
}
String rendered = Strings.toString(builder.endObject());
logger.info("Writing kibana command definition for [{}]:\n{}", name, rendered);
writeToTempKibanaDir("definition", "json", rendered);
}
}
@Override
void renderTypes(String name, List<EsqlFunctionRegistry.ArgSignature> args) throws IOException {
assert args.size() == 2;
StringBuilder header = new StringBuilder("| ");
StringBuilder separator = new StringBuilder("| ");
List<String> argNames = args.stream().map(EsqlFunctionRegistry.ArgSignature::name).toList();
for (String arg : argNames) {
header.append(arg).append(" | ");
separator.append("---").append(" | ");
}
Map<String, List<String>> compactedTable = new TreeMap<>();
for (TypeSignature sig : this.signatures.get()) {
if (shouldHideSignature(sig.argTypes(), sig.returnType())) {
continue;
}
String mainType = sig.argTypes().getFirst().dataType().esNameIfPossible();
String secondaryType = sig.argTypes().get(1).dataType().esNameIfPossible();
List<String> secondaryTypes = compactedTable.computeIfAbsent(mainType, (k) -> new ArrayList<>());
secondaryTypes.add(secondaryType);
}
List<String> table = new ArrayList<>();
for (Map.Entry<String, List<String>> sig : compactedTable.entrySet()) {
String row = "| " + sig.getKey() + " | " + String.join(", ", sig.getValue()) + " |";
table.add(row);
}
Collections.sort(table);
if (table.isEmpty()) {
logger.info("Warning: No table of types generated for [{}]", name);
return;
}
String rendered = DOCS_WARNING + """
**Supported types**
""" + header + "\n" + separator + "\n" + String.join("\n", table) + "\n\n";
logger.info("Writing function types for [{}]:\n{}", name, rendered);
writeToTempSnippetsDir("types", rendered);
}
}
public static
|
CommandsDocsSupport
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/join/Int2HashJoinOperatorTestBase.java
|
{
"start": 16064,
"end": 16298
}
|
class ____ extends AbstractRichFunction implements JoinCondition {
@Override
public boolean apply(RowData in1, RowData in2) {
return true;
}
}
/** Test cond. */
public static
|
TrueCondition
|
java
|
apache__camel
|
test-infra/camel-test-infra-infinispan/src/main/java/org/apache/camel/test/infra/infinispan/services/InfinispanRemoteInfraService.java
|
{
"start": 944,
"end": 2009
}
|
class ____ implements InfinispanInfraService {
@Override
public void registerProperties() {
// NO-OP
}
@Override
public void initialize() {
registerProperties();
}
@Override
public void shutdown() {
// NO-OP
}
@Override
public String getServiceAddress() {
return System.getProperty(InfinispanProperties.SERVICE_ADDRESS);
}
@Override
public int port() {
String port = System.getProperty(InfinispanProperties.SERVICE_PORT);
if (port == null) {
return InfinispanProperties.DEFAULT_SERVICE_PORT;
}
return Integer.valueOf(port);
}
@Override
public String host() {
return System.getProperty(InfinispanProperties.SERVICE_HOST);
}
@Override
public String username() {
return System.getProperty(InfinispanProperties.SERVICE_USERNAME);
}
@Override
public String password() {
return System.getProperty(InfinispanProperties.SERVICE_PASSWORD);
}
}
|
InfinispanRemoteInfraService
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/engine/spi/SessionFactoryDelegatingImpl.java
|
{
"start": 2967,
"end": 11552
}
|
class ____ implements SessionFactoryImplementor, SessionFactory {
private final SessionFactoryImplementor delegate;
public SessionFactoryDelegatingImpl(SessionFactoryImplementor delegate) {
this.delegate = delegate;
}
protected SessionFactoryImplementor delegate() {
return delegate;
}
@Override
public SessionFactoryOptions getSessionFactoryOptions() {
return delegate.getSessionFactoryOptions();
}
@Override
public SessionBuilderImplementor withOptions() {
return delegate.withOptions();
}
@Override
public SessionImplementor openSession() throws HibernateException {
return delegate.openSession();
}
@Override
public Session getCurrentSession() throws HibernateException {
return delegate.getCurrentSession();
}
@Override
public StatelessSessionBuilder withStatelessOptions() {
return delegate.withStatelessOptions();
}
@Override
public StatelessSession openStatelessSession() {
return delegate.openStatelessSession();
}
@Override
public StatelessSession openStatelessSession(Connection connection) {
return delegate.openStatelessSession( connection );
}
@Override
public StatisticsImplementor getStatistics() {
return delegate.getStatistics();
}
@Override
public SchemaManager getSchemaManager() {
return delegate.getSchemaManager();
}
@Override
public RuntimeMetamodelsImplementor getRuntimeMetamodels() {
return delegate.getRuntimeMetamodels();
}
@Override
public EventEngine getEventEngine() {
return delegate.getEventEngine();
}
@Override
public void close() throws HibernateException {
delegate.close();
}
@Override
public boolean isClosed() {
return delegate.isClosed();
}
@Override
public CacheImplementor getCache() {
return delegate.getCache();
}
@Override
public PersistenceUnitUtil getPersistenceUnitUtil() {
return delegate.getPersistenceUnitUtil();
}
@Override
public PersistenceUnitTransactionType getTransactionType() {
return delegate.getTransactionType();
}
@Override
public void addNamedQuery(String name, Query query) {
delegate.addNamedQuery( name, query );
}
@Override
public <T> T unwrap(Class<T> cls) {
return delegate.unwrap( cls );
}
@Override
public <T> void addNamedEntityGraph(String graphName, EntityGraph<T> entityGraph) {
delegate.addNamedEntityGraph( graphName, entityGraph );
}
@Override
public void runInTransaction(Consumer<EntityManager> work) {
delegate.runInTransaction( work );
}
@Override
public <R> R callInTransaction(Function<EntityManager, R> work) {
return delegate.callInTransaction( work );
}
@Override
public Set<String> getDefinedFilterNames() {
return delegate.getDefinedFilterNames();
}
@Override @Deprecated
public FilterDefinition getFilterDefinition(String filterName) throws HibernateException {
return delegate.getFilterDefinition( filterName );
}
@Override
public Collection<FilterDefinition> getAutoEnabledFilters() {
return delegate.getAutoEnabledFilters();
}
@Override
public boolean containsFetchProfileDefinition(String name) {
return delegate.containsFetchProfileDefinition( name );
}
@Override
public Set<String> getDefinedFetchProfileNames() {
return delegate.getDefinedFetchProfileNames();
}
@Override @Deprecated
public Generator getGenerator(String rootEntityName) {
return delegate.getGenerator( rootEntityName );
}
@Override
public Map<String, Object> getProperties() {
return delegate.getProperties();
}
@Override
public JdbcServices getJdbcServices() {
return delegate.getJdbcServices();
}
@Override
public SqlStringGenerationContext getSqlStringGenerationContext() {
return delegate.getSqlStringGenerationContext();
}
@Override
public RootGraphImplementor<Map<String, ?>> createGraphForDynamicEntity(String entityName) {
return delegate.createGraphForDynamicEntity( entityName );
}
@Override
public RootGraphImplementor<?> findEntityGraphByName(String name) {
return delegate.findEntityGraphByName( name );
}
@Override
public <R> Map<String, TypedQueryReference<R>> getNamedQueries(Class<R> resultType) {
return delegate.getNamedQueries( resultType );
}
@Override
public <E> Map<String, EntityGraph<? extends E>> getNamedEntityGraphs(Class<E> entityType) {
return delegate.getNamedEntityGraphs( entityType );
}
@Override
public String bestGuessEntityName(Object object) {
return delegate.bestGuessEntityName( object );
}
@Override
public SessionImplementor openTemporarySession() throws HibernateException {
return delegate.openTemporarySession();
}
@Deprecated
public Set<String> getCollectionRolesByEntityParticipant(String entityName) {
return delegate.getMappingMetamodel().getCollectionRolesByEntityParticipant( entityName );
}
@Override
public EntityNotFoundDelegate getEntityNotFoundDelegate() {
return delegate.getEntityNotFoundDelegate();
}
@Override
public FetchProfile getFetchProfile(String name) {
return delegate.getFetchProfile( name );
}
@Override
public JpaMetamodel getJpaMetamodel() {
return delegate.getJpaMetamodel();
}
@Override
public ServiceRegistryImplementor getServiceRegistry() {
return delegate.getServiceRegistry();
}
@Override
public void addObserver(SessionFactoryObserver observer) {
delegate.addObserver( observer );
}
@Override
public CustomEntityDirtinessStrategy getCustomEntityDirtinessStrategy() {
return delegate.getCustomEntityDirtinessStrategy();
}
@Override
public CurrentTenantIdentifierResolver<Object> getCurrentTenantIdentifierResolver() {
return delegate.getCurrentTenantIdentifierResolver();
}
@Override
public JavaType<Object> getTenantIdentifierJavaType() {
return delegate.getTenantIdentifierJavaType();
}
@Override
public String getUuid() {
return delegate.getUuid();
}
@Override
public String getName() {
return delegate.getName();
}
@Override
public String getJndiName() {
return delegate.getJndiName();
}
@Override
public TypeConfiguration getTypeConfiguration() {
return delegate.getTypeConfiguration();
}
@Override
public QueryEngine getQueryEngine() {
return delegate.getQueryEngine();
}
@Override
public SqlTranslationEngine getSqlTranslationEngine() {
return delegate.getSqlTranslationEngine();
}
@Override
public Reference getReference() throws NamingException {
return delegate.getReference();
}
@Override
public Session createEntityManager() {
return delegate.createEntityManager();
}
@Override
public Session createEntityManager(Map map) {
return delegate.createEntityManager( map );
}
@Override
public Session createEntityManager(SynchronizationType synchronizationType) {
return delegate.createEntityManager( synchronizationType );
}
@Override
public Session createEntityManager(SynchronizationType synchronizationType, Map map) {
return delegate.createEntityManager( synchronizationType, map );
}
@Override
public HibernateCriteriaBuilder getCriteriaBuilder() {
return delegate.getCriteriaBuilder();
}
@Override @Deprecated
public MappingMetamodel getMetamodel() {
return (MappingMetamodel) delegate.getMetamodel();
}
@Override
public boolean isOpen() {
return delegate.isOpen();
}
@Override
public WrapperOptions getWrapperOptions() {
return delegate.getWrapperOptions();
}
@Override
public <T> List<EntityGraph<? super T>> findEntityGraphsByType(Class<T> entityClass) {
return delegate.findEntityGraphsByType(entityClass);
}
@Override
public EventListenerGroups getEventListenerGroups() {
return delegate.getEventListenerGroups();
}
@Override
public ParameterMarkerStrategy getParameterMarkerStrategy() {
return delegate.getParameterMarkerStrategy();
}
@Override
public JdbcValuesMappingProducerProvider getJdbcValuesMappingProducerProvider() {
return delegate.getJdbcValuesMappingProducerProvider();
}
@Override
public EntityCopyObserverFactory getEntityCopyObserver() {
return delegate.getEntityCopyObserver();
}
@Override
public ClassLoaderService getClassLoaderService() {
return delegate.getClassLoaderService();
}
@Override
public ManagedBeanRegistry getManagedBeanRegistry() {
return delegate.getManagedBeanRegistry();
}
@Override
public EventListenerRegistry getEventListenerRegistry() {
return delegate.getEventListenerRegistry();
}
@Override
public <R> TypedQueryReference<R> addNamedQuery(String name, TypedQuery<R> query) {
return delegate.addNamedQuery( name, query );
}
@Override
public Object resolveTenantIdentifier() {
return delegate.resolveTenantIdentifier();
}
}
|
SessionFactoryDelegatingImpl
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/functions/HashcodeITCase.java
|
{
"start": 6694,
"end": 7150
}
|
class ____ extends TestSinkV2.DefaultSinkWriter<RowData> {
private final SharedReference<List<RowData>> results;
private TestWriter(SharedReference<List<RowData>> results) {
this.results = results;
}
@Override
public void write(RowData element, Context context) {
results.applySync(r -> r.add(element));
super.write(element, context);
}
}
private static
|
TestWriter
|
java
|
elastic__elasticsearch
|
x-pack/plugin/searchable-snapshots/qa/s3/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsCredentialsReloadIT.java
|
{
"start": 7822,
"end": 12671
}
|
class ____ {
private final String mountedIndexName = randomIdentifier();
private final String repositoryName = randomIdentifier();
@Nullable // to use the default
WarningsHandler warningsHandler;
void putRepository() throws IOException {
putRepository(UnaryOperator.identity());
}
void putRepository(UnaryOperator<Settings.Builder> settingsOperator) throws IOException {
// Register repository
final Request request = newXContentRequest(
HttpMethod.PUT,
"/_snapshot/" + repositoryName,
(b, p) -> b.field("type", "s3")
.startObject("settings")
.value(
settingsOperator.apply(
Settings.builder().put("bucket", BUCKET).put("base_path", BASE_PATH).put("endpoint", s3Fixture.getAddress())
).build()
)
.endObject()
);
request.addParameter("verify", "false"); // because we don't have access to the blob store yet
request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warningsHandler));
assertOK(client().performRequest(request));
}
void createFrozenSearchableSnapshotIndex() throws IOException {
// Create an index, large enough that its data is not all captured in the file headers
final String indexName = randomValueOtherThan(mountedIndexName, ESTestCase::randomIdentifier);
createIndex(indexName, indexSettings(1, 0).build());
try (var bodyStream = new ByteArrayOutputStream()) {
for (int i = 0; i < 1024; i++) {
try (XContentBuilder bodyLineBuilder = new XContentBuilder(XContentType.JSON.xContent(), bodyStream)) {
bodyLineBuilder.startObject().startObject("index").endObject().endObject();
}
bodyStream.write(0x0a);
try (XContentBuilder bodyLineBuilder = new XContentBuilder(XContentType.JSON.xContent(), bodyStream)) {
bodyLineBuilder.startObject().field("foo", "bar").endObject();
}
bodyStream.write(0x0a);
}
bodyStream.flush();
final Request request = new Request("PUT", indexName + "/_bulk");
request.setEntity(new ByteArrayEntity(bodyStream.toByteArray(), ContentType.APPLICATION_JSON));
client().performRequest(request);
}
// Take a snapshot and delete the original index
final String snapshotName = randomIdentifier();
final Request createSnapshotRequest = new Request(HttpPut.METHOD_NAME, "_snapshot/" + repositoryName + '/' + snapshotName);
createSnapshotRequest.addParameter("wait_for_completion", "true");
createSnapshotRequest.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warningsHandler));
assertOK(client().performRequest(createSnapshotRequest));
deleteIndex(indexName);
// Mount the snapshotted index as a searchable snapshot
final Request mountRequest = newXContentRequest(
HttpMethod.POST,
"/_snapshot/" + repositoryName + "/" + snapshotName + "/_mount",
(b, p) -> b.field("index", indexName).field("renamed_index", mountedIndexName)
);
mountRequest.addParameter("wait_for_completion", "true");
mountRequest.addParameter("storage", "shared_cache");
assertOK(client().performRequest(mountRequest));
ensureGreen(mountedIndexName);
}
void ensureSearchSuccess() throws IOException {
final Request searchRequest = new Request("GET", mountedIndexName + "/_search");
searchRequest.addParameter("size", "10000");
assertEquals(
"bar",
ObjectPath.createFromResponse(assertOK(client().performRequest(searchRequest))).evaluate("hits.hits.0._source.foo")
);
}
void ensureSearchFailure() throws IOException {
assertOK(client().performRequest(new Request("POST", "/_searchable_snapshots/cache/clear")));
final Request searchRequest = new Request("GET", mountedIndexName + "/_search");
searchRequest.addParameter("size", "10000");
assertThat(
expectThrows(ResponseException.class, () -> client().performRequest(searchRequest)).getMessage(),
allOf(containsString("Access denied"), containsString("Status Code: 403"), containsString("failed to read data from cache"))
);
}
}
}
|
TestHarness
|
java
|
apache__camel
|
components/camel-knative/camel-knative-http/src/test/java/org/apache/camel/component/knative/http/KnativeHttpTestSupport.java
|
{
"start": 1719,
"end": 4529
}
|
class ____ {
private KnativeHttpTestSupport() {
}
public static KnativeComponent configureKnativeComponent(
CamelContext context, CloudEvent ce, KnativeResource... definitions) {
return configureKnativeComponent(context, ce, Arrays.asList(definitions));
}
public static KnativeComponent configureKnativeComponent(
CamelContext context, CloudEvent ce, Map<String, Object> properties) {
return configureKnativeComponent(context, ce, KnativeEnvironment.mandatoryLoadFromProperties(context, properties));
}
public static KnativeComponent configureKnativeComponent(
CamelContext context, CloudEvent ce, List<KnativeResource> definitions) {
return configureKnativeComponent(context, ce, new KnativeEnvironment(definitions));
}
public static KnativeComponent configureKnativeComponent(
CamelContext context, CloudEvent ce, KnativeEnvironment environment) {
KnativeComponent component = context.getComponent("knative", KnativeComponent.class);
component.setCloudEventsSpecVersion(ce.version());
component.setEnvironment(environment);
component.setConsumerFactory(new KnativeHttpConsumerFactory(context) {
@Override
protected void doBuild() throws Exception {
super.doBuild();
this.setRouter(VertxPlatformHttpRouter.lookup(context,
VertxPlatformHttpRouter.getRouterNameFromPort(RestAssured.port)));
}
});
component.setProducerFactory(new KnativeHttpProducerFactory(context) {
@Override
protected void doBuild() throws Exception {
super.doBuild();
this.setVertx(VertxPlatformHttpRouter
.lookup(context, VertxPlatformHttpRouter.getRouterNameFromPort(RestAssured.port)).vertx());
}
});
return component;
}
public static String httpAttribute(CloudEvent ce, String name) {
return ce.mandatoryAttribute(name).http();
}
public static void configurePlatformHttpComponent(CamelContext camelContext, int bindPort) {
VertxPlatformHttpServerConfiguration configuration = new VertxPlatformHttpServerConfiguration();
configuration.setBindPort(bindPort);
try {
camelContext.addService(new MyVertxPlatformHttpServer(configuration));
} catch (Exception e) {
throw new RuntimeException(e);
}
PlatformHttpComponent component = new PlatformHttpComponent(camelContext);
component.setEngine(new VertxPlatformHttpEngine());
camelContext.getRegistry().bind(PlatformHttpConstants.PLATFORM_HTTP_COMPONENT_NAME, component);
}
private static
|
KnativeHttpTestSupport
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/jdk8/OptionalTest3.java
|
{
"start": 166,
"end": 708
}
|
class ____ extends TestCase {
public void test_optional() throws Exception {
UserExt ext = new UserExt();
ext.setValue(Optional.of(123));
User user = new User();
user.setExt(Optional.of(ext));
String text = JSON.toJSONString(user);
Assert.assertEquals("{\"ext\":{\"value\":123}}", text);
User user2 = JSON.parseObject(text, User.class);
Assert.assertEquals(user.getExt().get().getValue().get(), user2.getExt().get().getValue().get());
}
public static
|
OptionalTest3
|
java
|
quarkusio__quarkus
|
extensions/spring-web/resteasy-classic/tests/src/test/java/io/quarkus/spring/web/resteasy/classic/test/SimpleSpringController.java
|
{
"start": 290,
"end": 398
}
|
class ____ {
@GetMapping
public String hello() {
return "hello";
}
}
|
SimpleSpringController
|
java
|
spring-projects__spring-boot
|
module/spring-boot-mail/src/dockerTest/java/org/springframework/boot/mail/autoconfigure/MailSenderAutoConfigurationIntegrationTests.java
|
{
"start": 2012,
"end": 3167
}
|
class ____ {
private SimpleMailMessage createMessage(String subject) {
SimpleMailMessage msg = new SimpleMailMessage();
msg.setFrom("from@example.com");
msg.setTo("to@example.com");
msg.setSubject(subject);
msg.setText("Subject: " + subject);
return msg;
}
private String getSubject(Message message) {
try {
return message.getSubject();
}
catch (MessagingException ex) {
throw new RuntimeException("Failed to get message subject", ex);
}
}
private void assertMessagesContainSubject(Session session, String subject) throws MessagingException {
try (Store store = session.getStore("pop3")) {
String host = session.getProperty("mail.pop3.host");
int port = Integer.parseInt(session.getProperty("mail.pop3.port"));
store.connect(host, port, "user", "pass");
try (Folder folder = store.getFolder("inbox")) {
folder.open(Folder.READ_ONLY);
Awaitility.await()
.atMost(Duration.ofSeconds(5))
.ignoreExceptions()
.untilAsserted(() -> assertThat(Arrays.stream(folder.getMessages()).map(this::getSubject))
.contains(subject));
}
}
}
@Nested
|
MailSenderAutoConfigurationIntegrationTests
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletSpec.java
|
{
"start": 24288,
"end": 24889
}
|
interface ____ {
/**
* Add an INS (insert) element.
* @return an INS element builder
*/
INS ins();
/**
* Add a complete INS element.
* @param cdata inserted data
* @return the current element builder
*/
_InsDel ins(String cdata);
/**
* Add a DEL (delete) element.
* @return a DEL element builder
*/
DEL del();
/**
* Add a complete DEL element.
* @param cdata deleted data
* @return the current element builder
*/
_InsDel del(String cdata);
}
/** %special -(A|%pre.exclusion) */
public
|
_InsDel
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/dynamic/DefaultRedisCommandsMetadata.java
|
{
"start": 4264,
"end": 4569
}
|
class ____ defined in the default package
*/
private static String getPackageName(Class<?> clazz) {
LettuceAssert.notNull(clazz, "Class must not be null");
return getPackageName(clazz.getName());
}
/**
* Determine the name of the package of the given fully-qualified
|
is
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/HadoopLogsAnalyzer.java
|
{
"start": 34975,
"end": 57658
}
|
class ____ {
LoggedTaskAttempt attempt;
SetField(LoggedTaskAttempt attempt) {
this.attempt = attempt;
}
abstract void set(long value);
}
private void incorporateCounter(SetField thunk, String counterString,
String counterName) {
String valueString = parseCounter(counterString, counterName);
if (valueString != null) {
thunk.set(Long.parseLong(valueString));
}
}
private void incorporateCounters(LoggedTaskAttempt attempt2,
String counterString) {
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.hdfsBytesRead = val;
}
}, counterString, "HDFS_BYTES_READ");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.hdfsBytesWritten = val;
}
}, counterString, "HDFS_BYTES_WRITTEN");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.fileBytesRead = val;
}
}, counterString, "FILE_BYTES_READ");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.fileBytesWritten = val;
}
}, counterString, "FILE_BYTES_WRITTEN");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.mapInputBytes = val;
}
}, counterString, "MAP_INPUT_BYTES");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.mapInputRecords = val;
}
}, counterString, "MAP_INPUT_RECORDS");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.mapOutputBytes = val;
}
}, counterString, "MAP_OUTPUT_BYTES");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.mapOutputRecords = val;
}
}, counterString, "MAP_OUTPUT_RECORDS");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.combineInputRecords = val;
}
}, counterString, "COMBINE_INPUT_RECORDS");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.reduceInputGroups = val;
}
}, counterString, "REDUCE_INPUT_GROUPS");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.reduceInputRecords = val;
}
}, counterString, "REDUCE_INPUT_RECORDS");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.reduceShuffleBytes = val;
}
}, counterString, "REDUCE_SHUFFLE_BYTES");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.reduceOutputRecords = val;
}
}, counterString, "REDUCE_OUTPUT_RECORDS");
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.spilledRecords = val;
}
}, counterString, "SPILLED_RECORDS");
// incorporate CPU usage
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.getResourceUsageMetrics().setCumulativeCpuUsage(val);
}
}, counterString, "CPU_MILLISECONDS");
// incorporate virtual memory usage
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.getResourceUsageMetrics().setVirtualMemoryUsage(val);
}
}, counterString, "VIRTUAL_MEMORY_BYTES");
// incorporate physical memory usage
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.getResourceUsageMetrics().setPhysicalMemoryUsage(val);
}
}, counterString, "PHYSICAL_MEMORY_BYTES");
// incorporate heap usage
incorporateCounter(new SetField(attempt2) {
@Override
void set(long val) {
attempt.getResourceUsageMetrics().setHeapUsage(val);
}
}, counterString, "COMMITTED_HEAP_BYTES");
}
private ParsedHost getAndRecordParsedHost(String hostName) {
ParsedHost result = ParsedHost.parse(hostName);
if (result != null && !allHosts.contains(result)) {
allHosts.add(result);
}
return result;
}
private void processMapAttemptLine(ParsedLine line) {
String attemptID = line.get("TASK_ATTEMPT_ID");
String taskID = line.get("TASKID");
String status = line.get("TASK_STATUS");
String attemptStartTime = line.get("START_TIME");
String attemptFinishTime = line.get("FINISH_TIME");
String hostName = line.get("HOSTNAME");
String counters = line.get("COUNTERS");
if (jobBeingTraced != null && taskID != null) {
LoggedTask task = tasksInCurrentJob.get(taskID);
if (task == null) {
task = new LoggedTask();
task.setTaskID(taskID);
jobBeingTraced.getMapTasks().add(task);
tasksInCurrentJob.put(taskID, task);
}
task.setTaskID(taskID);
LoggedTaskAttempt attempt = attemptsInCurrentJob.get(attemptID);
boolean attemptAlreadyExists = attempt != null;
if (attempt == null) {
attempt = new LoggedTaskAttempt();
attempt.setAttemptID(attemptID);
}
if (!attemptAlreadyExists) {
attemptsInCurrentJob.put(attemptID, attempt);
task.getAttempts().add(attempt);
}
Pre21JobHistoryConstants.Values stat = null;
try {
stat =
status == null ? null : Pre21JobHistoryConstants.Values
.valueOf(status);
} catch (IllegalArgumentException e) {
LOG.error("A map attempt status you don't know about is \"" + status
+ "\".", e);
stat = null;
}
incorporateCounters(attempt, counters);
attempt.setResult(stat);
if (attemptStartTime != null) {
attempt.setStartTime(Long.parseLong(attemptStartTime));
}
if (attemptFinishTime != null) {
attempt.setFinishTime(Long.parseLong(attemptFinishTime));
}
int distance = Integer.MAX_VALUE;
if (hostName != null) {
ParsedHost host = getAndRecordParsedHost(hostName);
if (host != null) {
attempt.setHostName(host.getNodeName(), host.getRackName());
attempt.setLocation(host.makeLoggedLocation());
} else {
attempt.setHostName(hostName, null);
}
List<LoggedLocation> locs = task.getPreferredLocations();
if (host != null && locs != null) {
for (LoggedLocation loc : locs) {
ParsedHost preferedLoc = new ParsedHost(loc);
distance = Math.min(distance, preferedLoc.distance(host));
}
}
mapperLocality.enter(distance);
}
distance = Math.min(distance, successfulMapAttemptTimes.length - 1);
if (attempt.getStartTime() > 0 && attempt.getFinishTime() > 0) {
long runtime = attempt.getFinishTime() - attempt.getStartTime();
if (stat == Pre21JobHistoryConstants.Values.SUCCESS) {
successfulMapAttemptTimes[distance].enter(runtime);
}
if (stat == Pre21JobHistoryConstants.Values.FAILED) {
failedMapAttemptTimes[distance].enter(runtime);
}
}
if (attemptID != null) {
Matcher matcher = taskAttemptIDPattern.matcher(attemptID);
if (matcher.matches()) {
String attemptNumberString = matcher.group(1);
if (attemptNumberString != null) {
int attemptNumber = Integer.parseInt(attemptNumberString);
successfulNthMapperAttempts.enter(attemptNumber);
}
}
}
}
try {
if (attemptStartTime != null) {
long startTimeValue = Long.parseLong(attemptStartTime);
if (startTimeValue != 0
&& startTimeValue + MAXIMUM_CLOCK_SKEW >= launchTimeCurrentJob) {
taskAttemptStartTimes.put(attemptID, startTimeValue);
} else {
taskAttemptStartTimes.remove(attemptID);
}
} else if (status != null && attemptFinishTime != null) {
long finishTime = Long.parseLong(attemptFinishTime);
if (status.equals("SUCCESS")) {
taskMapAttemptFinishTimes.put(attemptID, finishTime);
}
}
} catch (NumberFormatException e) {
LOG.warn(
"HadoopLogsAnalyzer.processMapAttemptLine: bad numerical format, at line"
+ lineNumber + ".", e);
}
}
private void processReduceAttemptLine(ParsedLine line) {
String attemptID = line.get("TASK_ATTEMPT_ID");
String taskID = line.get("TASKID");
String status = line.get("TASK_STATUS");
String attemptStartTime = line.get("START_TIME");
String attemptFinishTime = line.get("FINISH_TIME");
String attemptShuffleFinished = line.get("SHUFFLE_FINISHED");
String attemptSortFinished = line.get("SORT_FINISHED");
String counters = line.get("COUNTERS");
String hostName = line.get("HOSTNAME");
if (hostName != null && !hostNames.contains(hostName)) {
hostNames.add(hostName);
}
if (jobBeingTraced != null && taskID != null) {
LoggedTask task = tasksInCurrentJob.get(taskID);
if (task == null) {
task = new LoggedTask();
task.setTaskID(taskID);
jobBeingTraced.getReduceTasks().add(task);
tasksInCurrentJob.put(taskID, task);
}
task.setTaskID(taskID);
LoggedTaskAttempt attempt = attemptsInCurrentJob.get(attemptID);
boolean attemptAlreadyExists = attempt != null;
if (attempt == null) {
attempt = new LoggedTaskAttempt();
attempt.setAttemptID(attemptID);
}
if (!attemptAlreadyExists) {
attemptsInCurrentJob.put(attemptID, attempt);
task.getAttempts().add(attempt);
}
Pre21JobHistoryConstants.Values stat = null;
try {
stat =
status == null ? null : Pre21JobHistoryConstants.Values
.valueOf(status);
} catch (IllegalArgumentException e) {
LOG.warn("A map attempt status you don't know about is \"" + status
+ "\".", e);
stat = null;
}
incorporateCounters(attempt, counters);
attempt.setResult(stat);
if (attemptStartTime != null) {
attempt.setStartTime(Long.parseLong(attemptStartTime));
}
if (attemptFinishTime != null) {
attempt.setFinishTime(Long.parseLong(attemptFinishTime));
}
if (attemptShuffleFinished != null) {
attempt.setShuffleFinished(Long.parseLong(attemptShuffleFinished));
}
if (attemptSortFinished != null) {
attempt.setSortFinished(Long.parseLong(attemptSortFinished));
}
if (attempt.getStartTime() > 0 && attempt.getFinishTime() > 0) {
long runtime = attempt.getFinishTime() - attempt.getStartTime();
if (stat == Pre21JobHistoryConstants.Values.SUCCESS) {
successfulReduceAttemptTimes.enter(runtime);
}
if (stat == Pre21JobHistoryConstants.Values.FAILED) {
failedReduceAttemptTimes.enter(runtime);
}
}
if (hostName != null) {
ParsedHost host = getAndRecordParsedHost(hostName);
if (host != null) {
attempt.setHostName(host.getNodeName(), host.getRackName());
} else {
attempt.setHostName(hostName, null);
}
}
if (attemptID != null) {
Matcher matcher = taskAttemptIDPattern.matcher(attemptID);
if (matcher.matches()) {
String attemptNumberString = matcher.group(1);
if (attemptNumberString != null) {
int attemptNumber = Integer.parseInt(attemptNumberString);
successfulNthReducerAttempts.enter(attemptNumber);
}
}
}
}
try {
if (attemptStartTime != null) {
long startTimeValue = Long.parseLong(attemptStartTime);
if (startTimeValue != 0
&& startTimeValue + MAXIMUM_CLOCK_SKEW >= launchTimeCurrentJob) {
taskAttemptStartTimes.put(attemptID, startTimeValue);
}
} else if (status != null && status.equals("SUCCESS")
&& attemptFinishTime != null) {
long finishTime = Long.parseLong(attemptFinishTime);
taskReduceAttemptFinishTimes.put(attemptID, finishTime);
if (attemptShuffleFinished != null) {
taskReduceAttemptShuffleEndTimes.put(attemptID, Long
.parseLong(attemptShuffleFinished));
}
if (attemptSortFinished != null) {
taskReduceAttemptSortEndTimes.put(attemptID, Long
.parseLong(attemptSortFinished));
}
}
} catch (NumberFormatException e) {
LOG.error(
"HadoopLogsAnalyzer.processReduceAttemptLine: bad numerical format, at line"
+ lineNumber + ".", e);
}
}
private void processParsedLine(ParsedLine line)
throws JsonProcessingException, IOException {
if (!collecting) {
// "Job", "MapAttempt", "ReduceAttempt", "Task"
LogRecordType myType = line.getType();
if (myType == canonicalJob) {
processJobLine(line);
} else if (myType == canonicalTask) {
processTaskLine(line);
} else if (myType == canonicalMapAttempt) {
processMapAttemptLine(line);
} else if (myType == canonicalReduceAttempt) {
processReduceAttemptLine(line);
} else {
}
}
}
private void printDistributionSet(String title, Histogram[][] distSet) {
statisticalOutput.print(title + "\n\n");
// print out buckets
for (int i = 0; i < JobOutcome.values().length; ++i) {
for (int j = 0; j < LoggedJob.JobType.values().length; ++j) {
JobOutcome thisOutcome = JobOutcome.values()[i];
LoggedJob.JobType thisType = LoggedJob.JobType.values()[j];
statisticalOutput.print("outcome = ");
statisticalOutput.print(thisOutcome.toString());
statisticalOutput.print(", and type = ");
statisticalOutput.print(thisType.toString());
statisticalOutput.print(".\n\n");
Histogram dist = distSet[i][j];
printSingleDistributionData(dist);
}
}
}
private void printSingleDistributionData(Histogram dist) {
int[] percentiles = new int[numberBuckets];
for (int k = 0; k < numberBuckets; ++k) {
percentiles[k] = k + 1;
}
long[] cdf = dist.getCDF(numberBuckets + 1, percentiles);
if (cdf == null) {
statisticalOutput.print("(No data)\n");
} else {
statisticalOutput.print("min: ");
statisticalOutput.print(cdf[0]);
statisticalOutput.print("\n");
for (int k = 0; k < numberBuckets; ++k) {
statisticalOutput.print(percentiles[k]);
statisticalOutput.print("% ");
statisticalOutput.print(cdf[k + 1]);
statisticalOutput.print("\n");
}
statisticalOutput.print("max: ");
statisticalOutput.print(cdf[numberBuckets + 1]);
statisticalOutput.print("\n");
}
}
private void maybeMateJobAndConf() throws IOException {
if (jobBeingTraced != null && jobconf != null
&& jobBeingTraced.getJobID().toString().equals(jobconf.jobID)) {
jobBeingTraced.setHeapMegabytes(jobconf.heapMegabytes);
jobBeingTraced.setQueue(jobconf.queue);
jobBeingTraced.setJobName(jobconf.jobName);
jobBeingTraced.setClusterMapMB(jobconf.clusterMapMB);
jobBeingTraced.setClusterReduceMB(jobconf.clusterReduceMB);
jobBeingTraced.setJobMapMB(jobconf.jobMapMB);
jobBeingTraced.setJobReduceMB(jobconf.jobReduceMB);
jobBeingTraced.setJobProperties(jobconf.properties);
jobconf = null;
finalizeJob();
}
}
private ArrayList<LoggedDiscreteCDF> mapCDFArrayList(Histogram[] data) {
ArrayList<LoggedDiscreteCDF> result = new ArrayList<LoggedDiscreteCDF>();
for (Histogram hist : data) {
LoggedDiscreteCDF discCDF = new LoggedDiscreteCDF();
discCDF.setCDF(hist, attemptTimesPercentiles, 100);
result.add(discCDF);
}
return result;
}
private void finalizeJob() throws IOException {
if (jobBeingTraced != null) {
if (omitTaskDetails) {
jobBeingTraced.setMapTasks(null);
jobBeingTraced.setReduceTasks(null);
jobBeingTraced.setOtherTasks(null);
}
// add digest info to the job
jobBeingTraced
.setSuccessfulMapAttemptCDFs(mapCDFArrayList(successfulMapAttemptTimes));
jobBeingTraced
.setFailedMapAttemptCDFs(mapCDFArrayList(failedMapAttemptTimes));
LoggedDiscreteCDF discCDF = new LoggedDiscreteCDF();
discCDF
.setCDF(successfulReduceAttemptTimes, attemptTimesPercentiles, 100);
jobBeingTraced.setSuccessfulReduceAttemptCDF(discCDF);
discCDF = new LoggedDiscreteCDF();
discCDF.setCDF(failedReduceAttemptTimes, attemptTimesPercentiles, 100);
jobBeingTraced.setFailedReduceAttemptCDF(discCDF);
long totalSuccessfulAttempts = 0L;
long maxTriesToSucceed = 0L;
for (Map.Entry<Long, Long> ent : successfulNthMapperAttempts) {
totalSuccessfulAttempts += ent.getValue();
maxTriesToSucceed = Math.max(maxTriesToSucceed, ent.getKey());
}
if (totalSuccessfulAttempts > 0L) {
double[] successAfterI = new double[(int) maxTriesToSucceed + 1];
for (int i = 0; i < successAfterI.length; ++i) {
successAfterI[i] = 0.0D;
}
for (Map.Entry<Long, Long> ent : successfulNthMapperAttempts) {
successAfterI[ent.getKey().intValue()] =
((double) ent.getValue()) / totalSuccessfulAttempts;
}
jobBeingTraced.setMapperTriesToSucceed(successAfterI);
} else {
jobBeingTraced.setMapperTriesToSucceed(null);
}
jobTraceGen.output(jobBeingTraced);
jobBeingTraced = null;
}
}
public int run(String[] args) throws IOException {
int result = initializeHadoopLogsAnalyzer(args);
if (result != 0) {
return result;
}
return run();
}
int run() throws IOException {
Pair<String, String> line = readBalancedLine();
while (line != null) {
if (debug
&& (lineNumber < 1000000L && lineNumber % 1000L == 0 || lineNumber % 1000000L == 0)) {
LOG.debug("" + lineNumber + " " + line.second());
}
if (line.first() == null) {
try {
// HACK ALERT!! It's possible for a Job end line to end a
// job for which we have a config file
// image [ a ParsedConfigFile ] in jobconf.
//
// processParsedLine handles this.
processParsedLine(new ParsedLine(line.second(), version));
} catch (StringIndexOutOfBoundsException e) {
LOG.warn("anomalous line #" + lineNumber + ":" + line, e);
}
} else {
jobconf = new ParsedConfigFile(line.first(), line.second());
if (jobconf.valid == false) {
jobconf = null;
}
maybeMateJobAndConf();
}
line = readBalancedLine();
}
finalizeJob();
if (collecting) {
String[] typeNames = LogRecordType.lineTypes();
for (int i = 0; i < typeNames.length; ++i) {
statisticalOutput.print(typeNames[i]);
statisticalOutput.print('\n');
}
} else {
if (delays) {
printDistributionSet("Job start delay spectrum:", delayTimeDists);
}
if (runtimes) {
printDistributionSet("Job run time spectrum:", runTimeDists);
}
if (spreading) {
String ratioDescription =
"(" + spreadMax + "/1000 %ile) to (" + spreadMin
+ "/1000 %ile) scaled by 1000000";
printDistributionSet(
"Map task success times " + ratioDescription + ":",
mapTimeSpreadDists);
printDistributionSet("Shuffle success times " + ratioDescription + ":",
shuffleTimeSpreadDists);
printDistributionSet("Sort success times " + ratioDescription + ":",
sortTimeSpreadDists);
printDistributionSet("Reduce success times " + ratioDescription + ":",
reduceTimeSpreadDists);
}
if (collectTaskTimes) {
printDistributionSet("Global map task success times:", mapTimeDists);
printDistributionSet("Global shuffle task success times:",
shuffleTimeDists);
printDistributionSet("Global sort task success times:", sortTimeDists);
printDistributionSet("Global reduce task success times:",
reduceTimeDists);
}
}
if (topologyGen != null) {
LoggedNetworkTopology topo =
new LoggedNetworkTopology(allHosts, "<root>", 0);
topologyGen.output(topo);
topologyGen.close();
}
if (jobTraceGen != null) {
jobTraceGen.close();
}
if (input != null) {
input.close();
input = null;
}
if (inputCodec != null) {
CodecPool.returnDecompressor(inputDecompressor);
inputDecompressor = null;
inputCodec = null;
}
return 0;
}
/**
* @param args
*
* Last arg is the input file. That file can be a directory, in which
* case you get all the files in sorted order. We will decompress
* files whose nmes end in .gz .
*
* switches: -c collect line types.
*
* -d debug mode
*
* -delays print out the delays [interval between job submit time and
* launch time]
*
* -runtimes print out the job runtimes
*
* -spreads print out the ratio of 10%ile and 90%ile, of both the
* successful map task attempt run times and the the successful
* reduce task attempt run times
*
* -tasktimes prints out individual task time distributions
*
* collects all the line types and prints the first example of each
* one
*/
public static void main(String[] args) {
try {
HadoopLogsAnalyzer analyzer = new HadoopLogsAnalyzer();
int result = ToolRunner.run(analyzer, args);
if (result == 0) {
return;
}
System.exit(result);
} catch (FileNotFoundException e) {
LOG.error("", e);
e.printStackTrace(staticDebugOutput);
System.exit(1);
} catch (IOException e) {
LOG.error("", e);
e.printStackTrace(staticDebugOutput);
System.exit(2);
} catch (Exception e) {
LOG.error("", e);
e.printStackTrace(staticDebugOutput);
System.exit(3);
}
}
}
|
SetField
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/persister/entity/EntityPersister.java
|
{
"start": 37024,
"end": 39550
}
|
class ____ be
* cast to (optional operation).
*/
Class<?> getConcreteProxyClass();
default void setValues(Object object, Object[] values) {
setPropertyValues( object, values );
}
/**
* Set the given values to the mapped properties of the given object.
*
* @deprecated Use {@link #setValues} instead
*/
@Deprecated(since = "6.0")
void setPropertyValues(Object object, Object[] values);
default void setValue(Object object, int i, Object value) {
setPropertyValue( object, i, value );
}
/**
* Set the value of a particular property of the given instance.
*
* @deprecated Use {@link #setValue} instead
*/
@Deprecated(since = "6.0")
void setPropertyValue(Object object, int i, Object value);
default Object[] getValues(Object object) {
return getPropertyValues( object );
}
/**
* @deprecated Use {@link #getValues} instead
*/
@Deprecated(since = "6.0")
Object[] getPropertyValues(Object object);
default Object getValue(Object object, int i) {
return getPropertyValue( object, i );
}
/**
* @deprecated Use {@link #getValue} instead
*/
@Deprecated(since = "6.0")
Object getPropertyValue(Object object, int i) throws HibernateException;
/**
* Get the value of a particular property
*/
Object getPropertyValue(Object object, String propertyName);
/**
* Get the identifier of an instance from the object's identifier property.
* Throw an exception if it has no identifier property.
*/
Object getIdentifier(Object entity, SharedSessionContractImplementor session);
/**
* Get the identifier of an instance from the object's identifier property.
* Throw an exception if it has no identifier property.
*
* It's supposed to be use during the merging process
*/
default Object getIdentifier(Object entity, MergeContext mergeContext) {
return getIdentifier( entity, mergeContext.getEventSource() );
}
/**
* Get the identifier of an instance from the object's identifier property.
* Throw an exception if it has no identifier property.
*/
default Object getIdentifier(Object entity) {
return getIdentifier( entity, (SharedSessionContractImplementor) null );
}
/**
* Inject the identifier value into the given entity.
*/
void setIdentifier(Object entity, Object id, SharedSessionContractImplementor session);
/**
* Get the version number (or timestamp) from the object's version property.
* Return {@code null} if it is not versioned.
*/
Object getVersion(Object object) throws HibernateException;
/**
* Create a
|
will
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java
|
{
"start": 1958,
"end": 6565
}
|
class ____ extends ESTestCase {
private ClusterService clusterService;
@Before
public void init() throws Exception {
clusterService = mock(ClusterService.class);
}
public void testAvailable() {
TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor();
IndexLifecycleInfoTransportAction featureSet = new IndexLifecycleInfoTransportAction(transportService, mock(ActionFilters.class));
assertThat(featureSet.available(), equalTo(true));
}
public void testName() {
TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor();
IndexLifecycleInfoTransportAction featureSet = new IndexLifecycleInfoTransportAction(transportService, mock(ActionFilters.class));
assertThat(featureSet.name(), equalTo("ilm"));
}
public void testUsageStats() throws Exception {
Map<String, String> indexPolicies = new HashMap<>();
List<LifecyclePolicy> policies = new ArrayList<>();
String policy1Name = randomAlphaOfLength(10);
String policy2Name = randomAlphaOfLength(10);
String policy3Name = randomAlphaOfLength(10);
indexPolicies.put("index_1", policy1Name);
indexPolicies.put("index_2", policy1Name);
indexPolicies.put("index_3", policy1Name);
indexPolicies.put("index_4", policy1Name);
indexPolicies.put("index_5", policy3Name);
LifecyclePolicy policy1 = new LifecyclePolicy(policy1Name, Map.of());
policies.add(policy1);
PolicyStats policy1Stats = new PolicyStats(Map.of(), 4);
Map<String, Phase> phases1 = new HashMap<>();
LifecyclePolicy policy2 = new LifecyclePolicy(policy2Name, phases1);
policies.add(policy2);
PolicyStats policy2Stats = new PolicyStats(Map.of(), 0);
LifecyclePolicy policy3 = new LifecyclePolicy(policy3Name, Map.of());
policies.add(policy3);
PolicyStats policy3Stats = new PolicyStats(Map.of(), 1);
final var projectId = randomProjectIdOrDefault();
ClusterState clusterState = buildClusterState(projectId, policies, indexPolicies);
Mockito.when(clusterService.state()).thenReturn(clusterState);
ThreadPool threadPool = mock(ThreadPool.class);
TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor(threadPool);
var usageAction = new IndexLifecycleUsageTransportAction(
transportService,
null,
threadPool,
mock(ActionFilters.class),
TestProjectResolvers.singleProject(projectId)
);
PlainActionFuture<XPackUsageFeatureResponse> future = new PlainActionFuture<>();
usageAction.localClusterStateOperation(null, null, clusterState, future);
IndexLifecycleFeatureSetUsage ilmUsage = (IndexLifecycleFeatureSetUsage) future.get().getUsage();
assertThat(ilmUsage.enabled(), equalTo(true));
assertThat(ilmUsage.available(), equalTo(true));
List<PolicyStats> policyStatsList = ilmUsage.getPolicyStats();
assertThat(policyStatsList.size(), equalTo(policies.size()));
assertTrue(policyStatsList.contains(policy1Stats));
assertTrue(policyStatsList.contains(policy2Stats));
assertTrue(policyStatsList.contains(policy3Stats));
}
private ClusterState buildClusterState(
ProjectId projectId,
List<LifecyclePolicy> lifecyclePolicies,
Map<String, String> indexPolicies
) {
Map<String, LifecyclePolicyMetadata> lifecyclePolicyMetadatasMap = lifecyclePolicies.stream()
.map(p -> new LifecyclePolicyMetadata(p, Map.of(), 1, 0L))
.collect(Collectors.toMap(LifecyclePolicyMetadata::getName, Function.identity()));
IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(lifecyclePolicyMetadatasMap, OperationMode.RUNNING);
ProjectMetadata.Builder project = ProjectMetadata.builder(projectId).putCustom(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata);
indexPolicies.forEach((indexName, policyName) -> {
Settings indexSettings = indexSettings(IndexVersion.current(), 1, 0).put(LifecycleSettings.LIFECYCLE_NAME, policyName).build();
IndexMetadata.Builder indexMetadata = IndexMetadata.builder(indexName).settings(indexSettings);
project.put(indexMetadata);
});
return ClusterState.builder(new ClusterName("my_cluster")).putProjectMetadata(project).build();
}
}
|
IndexLifecycleInfoTransportActionTests
|
java
|
apache__dubbo
|
dubbo-rpc/dubbo-rpc-injvm/src/test/java/org/apache/dubbo/rpc/protocol/injvm/InjvmDeepCopyTest.java
|
{
"start": 4291,
"end": 4981
}
|
class ____ implements DemoInterface {
private AtomicReference<Data> requestReference;
private AtomicReference<Data> responseReference;
public Demo(AtomicReference<Data> requestReference, AtomicReference<Data> responseReference) {
this.requestReference = requestReference;
this.responseReference = responseReference;
}
@Override
public Data call(Data obj) {
requestReference.set(obj);
Data result = null;
if (obj != null) {
result = new Data();
}
responseReference.set(result);
return result;
}
}
private static
|
Demo
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java
|
{
"start": 1501,
"end": 4737
}
|
class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(POST, "/{index}/_update/{id}"));
}
@Override
public String getName() {
return "document_update_action";
}
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
UpdateRequest updateRequest = new UpdateRequest(request.param("index"), request.param("id"));
updateRequest.routing(request.param("routing"));
updateRequest.timeout(request.paramAsTime("timeout", updateRequest.timeout()));
updateRequest.setRefreshPolicy(request.param("refresh"));
String waitForActiveShards = request.param("wait_for_active_shards");
if (waitForActiveShards != null) {
updateRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards));
}
updateRequest.docAsUpsert(request.paramAsBoolean("doc_as_upsert", updateRequest.docAsUpsert()));
FetchSourceContext fetchSourceContext = FetchSourceContext.parseFromRestRequest(request);
if (fetchSourceContext != null) {
updateRequest.fetchSource(fetchSourceContext);
}
updateRequest.retryOnConflict(request.paramAsInt("retry_on_conflict", updateRequest.retryOnConflict()));
if (request.hasParam("version") || request.hasParam("version_type")) {
final ActionRequestValidationException versioningError = new ActionRequestValidationException();
versioningError.addValidationError(
"internal versioning can not be used for optimistic concurrency control. "
+ "Please use `if_seq_no` and `if_primary_term` instead"
);
throw versioningError;
}
updateRequest.setIfSeqNo(request.paramAsLong("if_seq_no", updateRequest.ifSeqNo()));
updateRequest.setIfPrimaryTerm(request.paramAsLong("if_primary_term", updateRequest.ifPrimaryTerm()));
updateRequest.setRequireAlias(request.paramAsBoolean(DocWriteRequest.REQUIRE_ALIAS, updateRequest.isRequireAlias()));
request.applyContentParser(RestUtils.getIncludeSourceOnError(request), parser -> {
updateRequest.fromXContent(parser);
IndexRequest upsertRequest = updateRequest.upsertRequest();
if (upsertRequest != null) {
upsertRequest.routing(request.param("routing"));
upsertRequest.version(RestActions.parseVersion(request));
upsertRequest.versionType(VersionType.fromString(request.param("version_type"), upsertRequest.versionType()));
}
IndexRequest doc = updateRequest.doc();
if (doc != null) {
doc.routing(request.param("routing"));
doc.version(RestActions.parseVersion(request));
doc.versionType(VersionType.fromString(request.param("version_type"), doc.versionType()));
}
});
return channel -> client.update(
updateRequest,
new RestToXContentListener<>(channel, UpdateResponse::status, r -> r.getLocation(updateRequest.routing()))
);
}
}
|
RestUpdateAction
|
java
|
spring-projects__spring-boot
|
buildpack/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/build/BuildpackLayersMetadata.java
|
{
"start": 1347,
"end": 3669
}
|
class ____ extends MappedObject {
private static final String LABEL_NAME = "io.buildpacks.buildpack.layers";
private final Buildpacks buildpacks;
private BuildpackLayersMetadata(JsonNode node) {
super(node, MethodHandles.lookup());
this.buildpacks = Buildpacks.fromJson(getNode());
}
/**
* Return the metadata details of a buildpack with the given ID and version.
* @param id the buildpack ID
* @param version the buildpack version
* @return the buildpack details or {@code null} if a buildpack with the given ID and
* version does not exist in the metadata
*/
@Nullable BuildpackLayerDetails getBuildpack(String id, @Nullable String version) {
return this.buildpacks.getBuildpack(id, version);
}
/**
* Create a {@link BuildpackLayersMetadata} from an image.
* @param image the source image
* @return the buildpack layers metadata
* @throws IOException on IO error
*/
static BuildpackLayersMetadata fromImage(Image image) throws IOException {
Assert.notNull(image, "'image' must not be null");
return fromImageConfig(image.getConfig());
}
/**
* Create a {@link BuildpackLayersMetadata} from image config.
* @param imageConfig the source image config
* @return the buildpack layers metadata
* @throws IOException on IO error
*/
static BuildpackLayersMetadata fromImageConfig(ImageConfig imageConfig) throws IOException {
Assert.notNull(imageConfig, "'imageConfig' must not be null");
String json = imageConfig.getLabels().get(LABEL_NAME);
Assert.state(json != null, () -> "No '" + LABEL_NAME + "' label found in image config labels '"
+ StringUtils.collectionToCommaDelimitedString(imageConfig.getLabels().keySet()) + "'");
return fromJson(json);
}
/**
* Create a {@link BuildpackLayersMetadata} from JSON.
* @param json the source JSON
* @return the buildpack layers metadata
* @throws IOException on IO error
*/
static BuildpackLayersMetadata fromJson(String json) throws IOException {
return fromJson(SharedJsonMapper.get().readTree(json));
}
/**
* Create a {@link BuildpackLayersMetadata} from JSON.
* @param node the source JSON
* @return the buildpack layers metadata
*/
static BuildpackLayersMetadata fromJson(JsonNode node) {
return new BuildpackLayersMetadata(node);
}
private static final
|
BuildpackLayersMetadata
|
java
|
quarkusio__quarkus
|
extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/args/OnOpenInvalidArgumentTest.java
|
{
"start": 841,
"end": 940
}
|
class ____ {
@OnOpen
void open(CloseReason unsupported) {
}
}
}
|
Endpoint
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/ThrowingConsumer_accept_Test.java
|
{
"start": 831,
"end": 2178
}
|
class ____ {
@Test
void should_rethrow_checked_exception_as_runtime_exception() {
// GIVEN
IOException ioException = new IOException("boom!");
ThrowingConsumer<?> throwingConsumer = throwingConsumer(ioException);
// WHEN
Throwable throwable = catchThrowable(() -> throwingConsumer.accept(null));
// THEN
then(throwable).isExactlyInstanceOf(RuntimeException.class)
.hasCause(ioException);
}
@Test
void should_rethrow_runtime_exception_as_is() {
// GIVEN
RuntimeException runtimeException = new RuntimeException("boom!");
ThrowingConsumer<?> throwingConsumer = throwingConsumer(runtimeException);
// WHEN
Throwable throwable = catchThrowable(() -> throwingConsumer.accept(null));
// THEN
then(throwable).isSameAs(runtimeException);
}
@Test
void should_rethrow_assertion_error_as_is() {
// GIVEN
AssertionError assertionError = new AssertionError("boom!");
ThrowingConsumer<?> throwingConsumer = throwingConsumer(assertionError);
// WHEN
Throwable throwable = catchThrowable(() -> throwingConsumer.accept(null));
// THEN
then(throwable).isSameAs(assertionError);
}
private static ThrowingConsumer<?> throwingConsumer(Throwable throwable) {
return value -> {
throw throwable;
};
}
}
|
ThrowingConsumer_accept_Test
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/collection/embeddable/NullPointerExceptionTest.java
|
{
"start": 855,
"end": 3609
}
|
class ____ {
private Integer productId;
@BeforeClassTemplate
public void initData(EntityManagerFactoryScope scope) {
// Revision 1
this.productId = scope.fromTransaction( entityManager -> {
Product product = new Product( 1 , "Test" );
product.getItems().add( new Item( "bread", null ) );
entityManager.persist( product );
return product.getId();
} );
// Revision 2
scope.inTransaction( entityManager -> {
Type type = new Type( 2, "T2" );
entityManager.persist( type );
Product product = entityManager.find( Product.class, productId );
product.getItems().add( new Item( "bread2", type ) );
entityManager.merge( product );
} );
// Revision 3
scope.inTransaction( entityManager -> {
Product product = entityManager.find( Product.class, productId );
product.getItems().remove( 0 );
entityManager.merge( product );
} );
}
@Test
public void testRevisionCounts(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
assertEquals( Arrays.asList( 1, 2, 3 ), auditReader.getRevisions( Product.class, productId ) );
assertEquals( 1, auditReader.find( Product.class, productId, 1 ).getItems().size() );
assertEquals( 2, auditReader.find( Product.class, productId, 2 ).getItems().size() );
assertEquals( 1, auditReader.find( Product.class, productId, 3 ).getItems().size() );
} );
}
@Test
public void testRevision1(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
final Product product = auditReader.find( Product.class, productId, 1 );
assertEquals( 1, product.getItems().size() );
assertEquals( "bread", product.getItems().get( 0 ).getName() );
} );
}
@Test
public void testRevision2(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
final Product product = auditReader.find( Product.class, productId, 2 );
assertEquals( 2, product.getItems().size() );
assertEquals( "bread", product.getItems().get( 0 ).getName() );
assertEquals( "bread2", product.getItems().get( 1 ).getName() );
assertEquals( new Type( 2, "T2" ), product.getItems().get( 1 ).getType() );
} );
}
@Test
public void testRevision3(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
final Product product = auditReader.find( Product.class, productId, 3 );
assertEquals( 1, product.getItems().size() );
assertEquals( "bread2", product.getItems().get( 0 ).getName() );
assertEquals( new Type( 2, "T2" ), product.getItems().get( 0 ).getType() );
} );
}
}
|
NullPointerExceptionTest
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java
|
{
"start": 532,
"end": 781
}
|
class ____ extends AbstractNumericMetricTestCase<AvgAggregationBuilder> {
@Override
protected AvgAggregationBuilder doCreateTestAggregatorFactory() {
return new AvgAggregationBuilder(randomAlphaOfLengthBetween(3, 10));
}
}
|
AvgTests
|
java
|
apache__camel
|
components/camel-hl7/src/test/java/org/apache/camel/component/hl7/HL7MLLPCodecBoundaryTest.java
|
{
"start": 1404,
"end": 2961
}
|
class ____ extends HL7TestSupport {
@BindToRegistry("hl7codec")
public HL7MLLPCodec addHl7MllpCodec() {
HL7MLLPCodec codec = new HL7MLLPCodec();
codec.setCharset("iso-8859-1");
return codec;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("mina:tcp://127.0.0.1:" + getPort() + "?sync=true&codec=#hl7codec").process(new Processor() {
public void process(Exchange exchange) {
// check presence of correct message type
exchange.getIn().getBody(MDM_T02.class);
}
}).to("mock:result");
}
};
}
@Test
public void testSendHL7Message() throws Exception {
BufferedReader in = IOHelper.buffered(new InputStreamReader(getClass().getResourceAsStream("/mdm_t02-1022.txt")));
String line = "";
String message = "";
while (line != null) {
if ((line = in.readLine()) != null) {
message += line + "\r";
}
}
message = message.substring(0, message.length() - 1);
assertEquals(1022, message.length());
MockEndpoint mockEndpoint = getMockEndpoint("mock:result");
mockEndpoint.expectedMessageCount(1);
template.requestBody("mina:tcp://127.0.0.1:" + getPort() + "?sync=true&codec=#hl7codec", message);
mockEndpoint.assertIsSatisfied();
}
}
|
HL7MLLPCodecBoundaryTest
|
java
|
lettuce-io__lettuce-core
|
src/test/jmh/io/lettuce/core/JmhMain.java
|
{
"start": 426,
"end": 1214
}
|
class ____ {
public static void main(String... args) throws RunnerException {
runRedisClientBenchmark();
}
private static void runBenchmarks() throws RunnerException {
new Runner(prepareOptions().mode(Mode.AverageTime).timeUnit(TimeUnit.NANOSECONDS).build()).run();
}
private static void runRedisClientBenchmark() throws RunnerException {
new Runner(prepareOptions().mode(Mode.AverageTime).timeUnit(TimeUnit.NANOSECONDS).include(".RedisClientBenchmark.*")
.build()).run();
}
private static ChainedOptionsBuilder prepareOptions() {
return new OptionsBuilder().forks(1).warmupIterations(5).threads(1).measurementIterations(5)
.timeout(TimeValue.seconds(2)).output("benchmark.log");
}
}
|
JmhMain
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/sample/RedeclaringRepositoryMethodsRepository.java
|
{
"start": 1143,
"end": 1529
}
|
interface ____ extends CrudRepository<User, Integer> {
/**
* Should not find any users at all.
*/
@Override
@Query("SELECT u FROM User u where u.id = -1")
List<User> findAll();
/**
* Should only find users with the firstname 'Oliver'.
*/
@Query("SELECT u FROM User u where u.firstname = 'Oliver'")
Page<User> findAll(Pageable page);
}
|
RedeclaringRepositoryMethodsRepository
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/schemavalidation/MariaDbJsonColumnValidationTest.java
|
{
"start": 1010,
"end": 2035
}
|
class ____ {
@BeforeEach
void setUp(ServiceRegistryScope registryScope) {
JdbcUtils.withConnection( registryScope.getRegistry(), connection -> {
try (var statement = connection.createStatement()) {
try {
statement.execute( "drop table Foo" );
}
catch (Exception ignore) {
}
statement.execute(
"""
create table Foo (
id integer not null,
bigDecimals json,
primary key (id)
) engine=InnoDB
"""
);
}
} );
}
@AfterEach
void tearDown(ServiceRegistryScope registryScope) {
JdbcUtils.withConnection( registryScope.getRegistry(), connection -> {
try (var statement = connection.createStatement()) {
statement.execute( "drop table Foo" );
}
} );
}
@Test
@DomainModel(annotatedClasses = Foo.class)
public void testSchemaValidation(DomainModelScope modelScope) {
new SchemaValidator().validate( modelScope.getDomainModel() );
}
@Entity(name = "Foo")
@Table(name = "Foo")
public static
|
MariaDbJsonColumnValidationTest
|
java
|
quarkusio__quarkus
|
extensions/scheduler/deployment/src/test/java/io/quarkus/scheduler/test/InvalidScheduledMethodTest.java
|
{
"start": 658,
"end": 815
}
|
class ____ {
@Scheduled(cron = "0/1 * * * * ?")
String wrongMethod() {
return "";
}
}
}
|
BeanWithInvalidScheduledMethod
|
java
|
apache__kafka
|
connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java
|
{
"start": 13042,
"end": 17052
}
|
class ____ abstract and cannot be created"));
}
private void assertTransformationStageWithPredicate(Map<String, String> props, boolean expectedNegated) {
final ConnectorConfig config = new ConnectorConfig(MOCK_PLUGINS, props);
final List<TransformationStage<SinkRecord>> transformationStages = config.transformationStages(MOCK_PLUGINS, CONNECTOR_TASK_ID, METRICS);
assertEquals(1, transformationStages.size());
TransformationStage<SinkRecord> stage = transformationStages.get(0);
assertEquals(expectedNegated ? 42 : 0, stage.apply(DUMMY_RECORD).kafkaPartition().intValue());
SinkRecord matchingRecord = DUMMY_RECORD.newRecord(null, 84, null, null, null, null, 0L);
assertEquals(expectedNegated ? 84 : 42, stage.apply(matchingRecord).kafkaPartition().intValue());
assertEquals(SimpleTransformation.class, stage.transformClass());
stage.close();
}
@Test
public void misconfiguredPredicate() {
Map<String, String> props = new HashMap<>();
props.put("name", "test");
props.put("connector.class", TestConnector.class.getName());
props.put("transforms", "a");
props.put("transforms.a.type", SimpleTransformation.class.getName());
props.put("transforms.a.magic.number", "42");
props.put("transforms.a.predicate", "my-pred");
props.put("transforms.a.negate", "true");
props.put("predicates", "my-pred");
props.put("predicates.my-pred.type", TestPredicate.class.getName());
props.put("predicates.my-pred.int", "79");
try {
new ConnectorConfig(MOCK_PLUGINS, props);
fail();
} catch (ConfigException e) {
assertTrue(e.getMessage().contains("Value must be at least 80"));
}
}
@Test
public void missingPredicateAliasProperty() {
Map<String, String> props = new HashMap<>();
props.put("name", "test");
props.put("connector.class", TestConnector.class.getName());
props.put("transforms", "a");
props.put("transforms.a.type", SimpleTransformation.class.getName());
props.put("transforms.a.magic.number", "42");
props.put("transforms.a.predicate", "my-pred");
// technically not needed
//props.put("predicates", "my-pred");
props.put("predicates.my-pred.type", TestPredicate.class.getName());
props.put("predicates.my-pred.int", "84");
new ConnectorConfig(MOCK_PLUGINS, props);
}
@Test
public void missingPredicateConfig() {
Map<String, String> props = new HashMap<>();
props.put("name", "test");
props.put("connector.class", TestConnector.class.getName());
props.put("transforms", "a");
props.put("transforms.a.type", SimpleTransformation.class.getName());
props.put("transforms.a.magic.number", "42");
props.put("transforms.a.predicate", "my-pred");
props.put("predicates", "my-pred");
//props.put("predicates.my-pred.type", TestPredicate.class.getName());
//props.put("predicates.my-pred.int", "84");
ConfigException e = assertThrows(ConfigException.class, () -> new ConnectorConfig(MOCK_PLUGINS, props));
assertTrue(e.getMessage().contains("Not a Predicate"));
}
@Test
public void negatedButNoPredicate() {
Map<String, String> props = new HashMap<>();
props.put("name", "test");
props.put("connector.class", TestConnector.class.getName());
props.put("transforms", "a");
props.put("transforms.a.type", SimpleTransformation.class.getName());
props.put("transforms.a.magic.number", "42");
props.put("transforms.a.negate", "true");
ConfigException e = assertThrows(ConfigException.class, () -> new ConnectorConfig(MOCK_PLUGINS, props));
assertTrue(e.getMessage().contains("there is no config 'transforms.a.predicate' defining a predicate to be negated"));
}
public static
|
is
|
java
|
google__auto
|
common/src/main/java/com/google/auto/common/MoreStreams.java
|
{
"start": 1465,
"end": 3105
}
|
class ____ {
/** Returns a collector for an {@link ImmutableList}. */
public static <T> Collector<T, ?, ImmutableList<T>> toImmutableList() {
return collectingAndThen(toList(), ImmutableList::copyOf);
}
/** Returns a collector for an {@link ImmutableSet}. */
public static <T> Collector<T, ?, ImmutableSet<T>> toImmutableSet() {
return collectingAndThen(toList(), ImmutableSet::copyOf);
}
/** Returns a collector for an {@link ImmutableMap}. */
public static <T, K, V> Collector<T, ?, ImmutableMap<K, V>> toImmutableMap(
Function<? super T, K> keyMapper, Function<? super T, V> valueMapper) {
return Collectors.mapping(
value -> Maps.immutableEntry(keyMapper.apply(value), valueMapper.apply(value)),
Collector.of(
ImmutableMap::builder,
(ImmutableMap.Builder<K, V> builder, Map.Entry<K, V> entry) -> builder.put(entry),
(left, right) -> left.putAll(right.build()),
ImmutableMap.Builder::build));
}
/** Returns a collector for an {@link ImmutableBiMap}. */
public static <T, K, V> Collector<T, ?, ImmutableBiMap<K, V>> toImmutableBiMap(
Function<? super T, K> keyMapper, Function<? super T, V> valueMapper) {
return Collectors.mapping(
value -> Maps.immutableEntry(keyMapper.apply(value), valueMapper.apply(value)),
Collector.of(
ImmutableBiMap::builder,
(ImmutableBiMap.Builder<K, V> builder, Map.Entry<K, V> entry) -> builder.put(entry),
(left, right) -> left.putAll(right.build()),
ImmutableBiMap.Builder::build));
}
private MoreStreams() {}
}
|
MoreStreams
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/querydsl/query/TermsQuery.java
|
{
"start": 531,
"end": 1440
}
|
class ____ extends LeafQuery {
private final String term;
private final Set<Object> values;
public TermsQuery(Source source, String term, Set<Object> values) {
super(source);
this.term = term;
this.values = values;
}
@Override
public QueryBuilder asBuilder() {
return termsQuery(term, values);
}
@Override
public int hashCode() {
return Objects.hash(term, values);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
TermsQuery other = (TermsQuery) obj;
return Objects.equals(term, other.term) && Objects.equals(values, other.values);
}
@Override
protected String innerToString() {
return term + ":" + values;
}
}
|
TermsQuery
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/inheritance/joined/notownedrelation/PersonalContact.java
|
{
"start": 280,
"end": 640
}
|
class ____ extends Contact {
private String firstname;
public PersonalContact() {
}
public PersonalContact(Long id, String email, String firstname) {
super( id, email );
this.firstname = firstname;
}
public String getFirstname() {
return firstname;
}
public void setFirstname(String firstname) {
this.firstname = firstname;
}
}
|
PersonalContact
|
java
|
google__guava
|
android/guava/src/com/google/common/util/concurrent/AbstractFutureState.java
|
{
"start": 2282,
"end": 11965
}
|
class ____<V extends @Nullable Object> extends InternalFutureFailureAccess
implements ListenableFuture<V> {
/**
* Performs a {@linkplain java.lang.invoke.VarHandle#compareAndSet compare-and-set} operation on
* {@link #listenersField}.
*/
final boolean casListeners(@Nullable Listener expect, Listener update) {
return ATOMIC_HELPER.casListeners(this, expect, update);
}
/**
* Performs a {@linkplain java.lang.invoke.VarHandle#getAndSet get-and-set} operation on {@link
* #listenersField}.
*/
final @Nullable Listener gasListeners(Listener update) {
return ATOMIC_HELPER.gasListeners(this, update);
}
/**
* Performs a {@linkplain java.lang.invoke.VarHandle#compareAndSet compare-and-set} operation on
* {@link #valueField} of {@code future}.
*/
static boolean casValue(AbstractFutureState<?> future, @Nullable Object expect, Object update) {
return ATOMIC_HELPER.casValue(future, expect, update);
}
/** Returns the value of the future, using a volatile read. */
final @Nullable Object value() {
return valueField;
}
/** Returns the head of the listener stack, using a volatile read. */
final @Nullable Listener listeners() {
return listenersField;
}
/** Releases all threads in the {@link #waitersField} list, and clears the list. */
final void releaseWaiters() {
Waiter head = gasWaiters(Waiter.TOMBSTONE);
for (Waiter currentWaiter = head; currentWaiter != null; currentWaiter = currentWaiter.next) {
currentWaiter.unpark();
}
}
// Gets and Timed Gets
//
// * Be responsive to interruption
// * Don't create Waiter nodes if you aren't going to park, this helps reduce contention on
// waitersField.
// * Future completion is defined by when #valueField becomes non-null/non DelegatingToFuture
// * Future completion can be observed if the waitersField field contains a TOMBSTONE
// Timed Get
// There are a few design constraints to consider
// * We want to be responsive to small timeouts, unpark() has non trivial latency overheads (I
// have observed 12 micros on 64-bit linux systems to wake up a parked thread). So if the
// timeout is small we shouldn't park(). This needs to be traded off with the cpu overhead of
// spinning, so we use SPIN_THRESHOLD_NANOS which is what AbstractQueuedSynchronizer uses for
// similar purposes.
// * We want to behave reasonably for timeouts of 0
// * We are more responsive to completion than timeouts. This is because parkNanos depends on
// system scheduling and as such we could either miss our deadline, or unpark() could be delayed
// so that it looks like we timed out even though we didn't. For comparison FutureTask respects
// completion preferably and AQS is non-deterministic (depends on where in the queue the waiter
// is). If we wanted to be strict about it, we could store the unpark() time in the Waiter node
// and we could use that to make a decision about whether or not we timed out prior to being
// unparked.
@SuppressWarnings({
"LabelledBreakTarget", // TODO(b/345814817): Maybe fix?
"nullness", // TODO(b/147136275): Remove once our checker understands & and |.
})
@ParametricNullness
final V blockingGet(long timeout, TimeUnit unit)
throws InterruptedException, TimeoutException, ExecutionException {
// NOTE: if timeout < 0, remainingNanos will be < 0 and we will fall into the while(true) loop
// at the bottom and throw a timeoutexception.
long timeoutNanos = unit.toNanos(timeout); // we rely on the implicit null check on unit.
long remainingNanos = timeoutNanos;
if (Thread.interrupted()) {
throw new InterruptedException();
}
@RetainedLocalRef Object localValue = valueField;
if (localValue != null & notInstanceOfDelegatingToFuture(localValue)) {
return getDoneValue(localValue);
}
// we delay calling nanoTime until we know we will need to either park or spin
long endNanos = remainingNanos > 0 ? System.nanoTime() + remainingNanos : 0;
long_wait_loop:
if (remainingNanos >= SPIN_THRESHOLD_NANOS) {
Waiter oldHead = waitersField;
if (oldHead != Waiter.TOMBSTONE) {
Waiter node = new Waiter();
do {
node.setNext(oldHead);
if (casWaiters(oldHead, node)) {
while (true) {
OverflowAvoidingLockSupport.parkNanos(this, remainingNanos);
// Check interruption first, if we woke up due to interruption we need to honor that.
if (Thread.interrupted()) {
removeWaiter(node);
throw new InterruptedException();
}
// Otherwise re-read and check doneness. If we loop then it must have been a spurious
// wakeup
localValue = valueField;
if (localValue != null & notInstanceOfDelegatingToFuture(localValue)) {
return getDoneValue(localValue);
}
// timed out?
remainingNanos = endNanos - System.nanoTime();
if (remainingNanos < SPIN_THRESHOLD_NANOS) {
// Remove the waiter, one way or another we are done parking this thread.
removeWaiter(node);
break long_wait_loop; // jump down to the busy wait loop
}
}
}
oldHead = waitersField; // re-read and loop.
} while (oldHead != Waiter.TOMBSTONE);
}
// re-read valueField, if we get here then we must have observed a TOMBSTONE while trying to
// add a waiter.
// requireNonNull is safe because valueField is always set before TOMBSTONE.
return getDoneValue(requireNonNull(valueField));
}
// If we get here then we have remainingNanos < SPIN_THRESHOLD_NANOS and there is no node on the
// waiters list
while (remainingNanos > 0) {
localValue = valueField;
if (localValue != null & notInstanceOfDelegatingToFuture(localValue)) {
return getDoneValue(localValue);
}
if (Thread.interrupted()) {
throw new InterruptedException();
}
remainingNanos = endNanos - System.nanoTime();
}
String futureToString = toString();
String unitString = unit.toString().toLowerCase(Locale.ROOT);
String message = "Waited " + timeout + " " + unit.toString().toLowerCase(Locale.ROOT);
// Only report scheduling delay if larger than our spin threshold - otherwise it's just noise
if (remainingNanos + SPIN_THRESHOLD_NANOS < 0) {
// We over-waited for our timeout.
message += " (plus ";
long overWaitNanos = -remainingNanos;
long overWaitUnits = unit.convert(overWaitNanos, NANOSECONDS);
long overWaitLeftoverNanos = overWaitNanos - unit.toNanos(overWaitUnits);
boolean shouldShowExtraNanos =
overWaitUnits == 0 || overWaitLeftoverNanos > SPIN_THRESHOLD_NANOS;
if (overWaitUnits > 0) {
message += overWaitUnits + " " + unitString;
if (shouldShowExtraNanos) {
message += ",";
}
message += " ";
}
if (shouldShowExtraNanos) {
message += overWaitLeftoverNanos + " nanoseconds ";
}
message += "delay)";
}
// It's confusing to see a completed future in a timeout message; if isDone() returns false,
// then we know it must have given a pending toString value earlier. If not, then the future
// completed after the timeout expired, and the message might be success.
if (isDone()) {
throw new TimeoutException(message + " but future completed as timeout expired");
}
throw new TimeoutException(message + " for " + futureToString);
}
@ParametricNullness
@SuppressWarnings("nullness") // TODO(b/147136275): Remove once our checker understands & and |.
final V blockingGet() throws InterruptedException, ExecutionException {
if (Thread.interrupted()) {
throw new InterruptedException();
}
@RetainedLocalRef Object localValue = valueField;
if (localValue != null & notInstanceOfDelegatingToFuture(localValue)) {
return getDoneValue(localValue);
}
Waiter oldHead = waitersField;
if (oldHead != Waiter.TOMBSTONE) {
Waiter node = new Waiter();
do {
node.setNext(oldHead);
if (casWaiters(oldHead, node)) {
// we are on the stack, now wait for completion.
while (true) {
LockSupport.park(this);
// Check interruption first, if we woke up due to interruption we need to honor that.
if (Thread.interrupted()) {
removeWaiter(node);
throw new InterruptedException();
}
// Otherwise re-read and check doneness. If we loop then it must have been a spurious
// wakeup
localValue = valueField;
if (localValue != null & notInstanceOfDelegatingToFuture(localValue)) {
return getDoneValue(localValue);
}
}
}
oldHead = waitersField; // re-read and loop.
} while (oldHead != Waiter.TOMBSTONE);
}
// re-read valueField, if we get here then we must have observed a TOMBSTONE while trying to add
// a waiter.
// requireNonNull is safe because valueField is always set before TOMBSTONE.
return getDoneValue(requireNonNull(valueField));
}
/** Constructor for use by {@link AbstractFuture}. */
AbstractFutureState() {}
/*
* We put various static objects here rather than in AbstractFuture so that they're initialized in
* time for AbstractFutureState to potentially use them during
|
AbstractFutureState
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/ganglia/AbstractGangliaSink.java
|
{
"start": 3908,
"end": 4036
}
|
enum ____ {
zero, // 0
positive, // 1
negative, // 2
both // 3
};
/**
* define
|
GangliaSlope
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsSnapshot.java
|
{
"start": 2589,
"end": 8355
}
|
class ____
implements IOStatistics, Serializable, IOStatisticsAggregator,
IOStatisticsSetters {
private static final long serialVersionUID = -1762522703841538084L;
/**
* List of chasses needed to deserialize.
*/
private static final Class[] DESERIALIZATION_CLASSES = {
IOStatisticsSnapshot.class,
TreeMap.class,
Long.class,
MeanStatistic.class,
};
/**
* Counters.
*/
@JsonProperty
private transient Map<String, Long> counters;
/**
* Gauges.
*/
@JsonProperty
private transient Map<String, Long> gauges;
/**
* Minimum values.
*/
@JsonProperty
private transient Map<String, Long> minimums;
/**
* Maximum values.
*/
@JsonProperty
private transient Map<String, Long> maximums;
/**
* mean statistics. The JSON key is all lower case..
*/
@JsonProperty("meanstatistics")
private transient Map<String, MeanStatistic> meanStatistics;
/**
* Construct.
*/
public IOStatisticsSnapshot() {
createMaps();
}
/**
* Construct, taking a snapshot of the source statistics data
* if the source is non-null.
* If the source is null, the empty maps are created
* @param source statistics source. Nullable.
*/
public IOStatisticsSnapshot(IOStatistics source) {
if (source != null) {
snapshot(source);
} else {
createMaps();
}
}
/**
* Create the maps.
*/
private synchronized void createMaps() {
counters = new ConcurrentHashMap<>();
gauges = new ConcurrentHashMap<>();
minimums = new ConcurrentHashMap<>();
maximums = new ConcurrentHashMap<>();
meanStatistics = new ConcurrentHashMap<>();
}
/**
* Clear all the maps.
*/
public synchronized void clear() {
counters.clear();
gauges.clear();
minimums.clear();
maximums.clear();
meanStatistics.clear();
}
/**
* Take a snapshot.
*
* This completely overwrites the map data with the statistics
* from the source.
* @param source statistics source.
*/
public synchronized void snapshot(IOStatistics source) {
checkNotNull(source);
counters = snapshotMap(source.counters());
gauges = snapshotMap(source.gauges());
minimums = snapshotMap(source.minimums());
maximums = snapshotMap(source.maximums());
meanStatistics = snapshotMap(source.meanStatistics(),
MeanStatistic::copy);
}
/**
* Aggregate the current statistics with the
* source reference passed in.
*
* The operation is synchronized.
* @param source source; may be null
* @return true if a merge took place.
*/
@Override
public synchronized boolean aggregate(
@Nullable IOStatistics source) {
if (source == null) {
return false;
}
aggregateMaps(counters, source.counters(),
IOStatisticsBinding::aggregateCounters,
IOStatisticsBinding::passthroughFn);
aggregateMaps(gauges, source.gauges(),
IOStatisticsBinding::aggregateGauges,
IOStatisticsBinding::passthroughFn);
aggregateMaps(minimums, source.minimums(),
IOStatisticsBinding::aggregateMinimums,
IOStatisticsBinding::passthroughFn);
aggregateMaps(maximums, source.maximums(),
IOStatisticsBinding::aggregateMaximums,
IOStatisticsBinding::passthroughFn);
aggregateMaps(meanStatistics, source.meanStatistics(),
IOStatisticsBinding::aggregateMeanStatistics, MeanStatistic::copy);
return true;
}
@Override
public synchronized Map<String, Long> counters() {
return counters;
}
@Override
public synchronized Map<String, Long> gauges() {
return gauges;
}
@Override
public synchronized Map<String, Long> minimums() {
return minimums;
}
@Override
public synchronized Map<String, Long> maximums() {
return maximums;
}
@Override
public synchronized Map<String, MeanStatistic> meanStatistics() {
return meanStatistics;
}
@Override
public synchronized void setCounter(final String key, final long value) {
counters().put(key, value);
}
@Override
public synchronized void setGauge(final String key, final long value) {
gauges().put(key, value);
}
@Override
public synchronized void setMaximum(final String key, final long value) {
maximums().put(key, value);
}
@Override
public synchronized void setMinimum(final String key, final long value) {
minimums().put(key, value);
}
@Override
public void setMeanStatistic(final String key, final MeanStatistic value) {
meanStatistics().put(key, value);
}
@Override
public String toString() {
return ioStatisticsToString(this);
}
/**
* Get a JSON serializer for this class.
* @return a serializer.
*/
public static JsonSerialization<IOStatisticsSnapshot> serializer() {
return new JsonSerialization<>(IOStatisticsSnapshot.class, false, true);
}
/**
* Serialize by converting each map to a TreeMap, and saving that
* to the stream.
* @param s ObjectOutputStream.
* @throws IOException raised on errors performing I/O.
*/
private synchronized void writeObject(ObjectOutputStream s)
throws IOException {
// Write out the core
s.defaultWriteObject();
s.writeObject(new TreeMap<String, Long>(counters));
s.writeObject(new TreeMap<String, Long>(gauges));
s.writeObject(new TreeMap<String, Long>(minimums));
s.writeObject(new TreeMap<String, Long>(maximums));
s.writeObject(new TreeMap<String, MeanStatistic>(meanStatistics));
}
/**
* Deserialize by loading each TreeMap, and building concurrent
* hash maps from them.
*
* @param s ObjectInputStream.
* @throws IOException raised on errors performing I/O.
* @throws ClassNotFoundException
|
IOStatisticsSnapshot
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/blob/BlobUtilsNonWritableTest.java
|
{
"start": 1604,
"end": 3854
}
|
class ____ {
private static final String CANNOT_CREATE_THIS = "cannot-create-this";
@TempDir private Path tempDir;
@BeforeEach
void before() {
assumeThat(OperatingSystem.isWindows()).as("setWritable doesn't work on Windows").isFalse();
// Prepare test directory
assertThat(tempDir.toFile().setExecutable(true, false)).isTrue();
assertThat(tempDir.toFile().setReadable(true, false)).isTrue();
assertThat(tempDir.toFile().setWritable(false, false)).isTrue();
}
@Test
void testExceptionOnCreateStorageDirectoryFailure() {
Configuration config = new Configuration();
config.set(BlobServerOptions.STORAGE_DIRECTORY, getStorageLocationFile().getAbsolutePath());
assertThatThrownBy(() -> BlobUtils.createBlobStorageDirectory(config, null))
.isInstanceOf(IOException.class);
}
@Test
void testExceptionOnCreateCacheDirectoryFailureNoJob() {
assertThatThrownBy(
() ->
BlobUtils.getStorageLocation(
getStorageLocationFile(), null, new TransientBlobKey()))
.isInstanceOf(IOException.class);
}
@Test
void testExceptionOnCreateCacheDirectoryFailureForJobTransient() {
assertThatThrownBy(
() ->
BlobUtils.getStorageLocation(
getStorageLocationFile(),
new JobID(),
new TransientBlobKey()))
.isInstanceOf(IOException.class);
}
@Test
void testExceptionOnCreateCacheDirectoryFailureForJobPermanent() {
assertThatThrownBy(
() ->
BlobUtils.getStorageLocation(
getStorageLocationFile(),
new JobID(),
new PermanentBlobKey()))
.isInstanceOf(IOException.class);
}
private File getStorageLocationFile() {
return tempDir.resolve(CANNOT_CREATE_THIS).toFile();
}
}
|
BlobUtilsNonWritableTest
|
java
|
apache__camel
|
components/camel-kubernetes/src/generated/java/org/apache/camel/component/openshift/deploymentconfigs/OpenshiftDeploymentConfigsEndpointUriFactory.java
|
{
"start": 537,
"end": 3844
}
|
class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":masterUrl";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(34);
props.add("apiVersion");
props.add("bridgeErrorHandler");
props.add("caCertData");
props.add("caCertFile");
props.add("clientCertData");
props.add("clientCertFile");
props.add("clientKeyAlgo");
props.add("clientKeyData");
props.add("clientKeyFile");
props.add("clientKeyPassphrase");
props.add("connectionTimeout");
props.add("crdGroup");
props.add("crdName");
props.add("crdPlural");
props.add("crdScope");
props.add("crdVersion");
props.add("dnsDomain");
props.add("exceptionHandler");
props.add("exchangePattern");
props.add("kubernetesClient");
props.add("labelKey");
props.add("labelValue");
props.add("lazyStartProducer");
props.add("masterUrl");
props.add("namespace");
props.add("oauthToken");
props.add("operation");
props.add("password");
props.add("poolSize");
props.add("portName");
props.add("portProtocol");
props.add("resourceName");
props.add("trustCerts");
props.add("username");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(12);
secretProps.add("caCertData");
secretProps.add("caCertFile");
secretProps.add("clientCertData");
secretProps.add("clientCertFile");
secretProps.add("clientKeyAlgo");
secretProps.add("clientKeyData");
secretProps.add("clientKeyFile");
secretProps.add("clientKeyPassphrase");
secretProps.add("oauthToken");
secretProps.add("password");
secretProps.add("trustCerts");
secretProps.add("username");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
return "openshift-deploymentconfigs".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "masterUrl", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
|
OpenshiftDeploymentConfigsEndpointUriFactory
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/engine/jdbc/batch/internal/BatchBuilderInitiator.java
|
{
"start": 828,
"end": 1972
}
|
class ____ implements StandardServiceInitiator<BatchBuilder> {
/**
* Singleton access
*/
public static final BatchBuilderInitiator INSTANCE = new BatchBuilderInitiator();
@Override
public Class<BatchBuilder> getServiceInitiated() {
return BatchBuilder.class;
}
@Override
public BatchBuilder initiateService(Map<String, Object> configurationValues, ServiceRegistryImplementor registry) {
Object builder = configurationValues.get( BUILDER );
if ( builder == null ) {
builder = configurationValues.get( BATCH_STRATEGY );
}
if ( builder == null ) {
return new BatchBuilderImpl( getInt( STATEMENT_BATCH_SIZE, configurationValues, 1 ) );
}
if ( builder instanceof BatchBuilder batchBuilder ) {
return batchBuilder;
}
final String builderClassName = builder.toString();
try {
return (BatchBuilder)
registry.requireService( ClassLoaderService.class )
.classForName( builderClassName )
.getConstructor()
.newInstance();
}
catch (Exception e) {
throw new ServiceException( "Could not build explicit BatchBuilder [" + builderClassName + "]", e );
}
}
}
|
BatchBuilderInitiator
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configurers/RememberMeConfigurerTests.java
|
{
"start": 18101,
"end": 18845
}
|
class ____ {
static UserDetailsService userDetailsService = mock(UserDetailsService.class);
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.httpBasic(withDefaults())
.rememberMe((me) -> me
.userDetailsService(userDetailsService))
.rememberMe(withDefaults());
return http.build();
// @formatter:on
}
@Bean
UserDetailsService userDetailsService() {
return new InMemoryUserDetailsManager(
// @formatter:off
User.withDefaultPasswordEncoder()
.username("user")
.password("password")
.roles("USER")
.build()
// @formatter:on
);
}
}
@Configuration
@EnableWebSecurity
static
|
DuplicateDoesNotOverrideConfig
|
java
|
spring-projects__spring-security
|
core/src/test/java/org/springframework/security/authorization/method/SecuredAuthorizationManagerTests.java
|
{
"start": 8751,
"end": 9011
}
|
class ____ implements InterfaceAnnotationsThree {
@Secured("ROLE_ADMIN")
public void securedAdmin() {
}
public void securedUser() {
}
@Override
@Secured("ROLE_ADMIN")
public void inheritedAnnotations() {
}
}
public
|
ClassLevelAnnotations
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/operators/coordination/CoordinationRequestHandler.java
|
{
"start": 1077,
"end": 1439
}
|
interface ____ {
/**
* Called when receiving a request from the client or operator.
*
* @param request the request received
* @return a future containing the response from the coordinator for this request
*/
CompletableFuture<CoordinationResponse> handleCoordinationRequest(CoordinationRequest request);
}
|
CoordinationRequestHandler
|
java
|
quarkusio__quarkus
|
extensions/qute/runtime/src/main/java/io/quarkus/qute/runtime/cache/CacheConfigurator.java
|
{
"start": 426,
"end": 1017
}
|
class ____ {
@CacheName(QuteCache.NAME)
Cache cache;
void configureEngine(@Observes EngineBuilder builder) {
builder.addSectionHelper(new CacheSectionHelper.Factory(new CacheSectionHelper.Cache() {
@Override
public CompletionStage<ResultNode> getValue(String key, Function<String, CompletionStage<ResultNode>> loader) {
return cache.<String, ResultNode> getAsync(key, k -> Uni.createFrom().completionStage(loader.apply(k)))
.subscribeAsCompletionStage();
}
}));
}
}
|
CacheConfigurator
|
java
|
apache__flink
|
flink-state-backends/flink-statebackend-rocksdb/src/main/java/org/apache/flink/state/rocksdb/RocksDBSnapshotTransformFactoryAdaptor.java
|
{
"start": 2880,
"end": 3415
}
|
class ____<SV, SEV>
extends RocksDBSnapshotTransformFactoryAdaptor<SV, SEV> {
private RocksDBValueStateSnapshotTransformFactory(
StateSnapshotTransformFactory<SEV> snapshotTransformFactory) {
super(snapshotTransformFactory);
}
@Override
public Optional<StateSnapshotTransformer<byte[]>> createForSerializedState() {
return snapshotTransformFactory.createForSerializedState();
}
}
private static
|
RocksDBValueStateSnapshotTransformFactory
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/SystemUtils.java
|
{
"start": 1062,
"end": 1244
}
|
class ____ be set to {@code null} and a message will be
* written to {@code System.err}.
* </p>
* <p>
* #ThreadSafe#
* </p>
*
* @since 1.0
* @see SystemProperties
*/
public
|
will
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/util/ExceptionCollectorTests.java
|
{
"start": 1071,
"end": 3967
}
|
class ____ {
private static final char EOL = '\n';
private final ExceptionCollector collector = new ExceptionCollector();
@Test
void noExceptions() {
this.collector.execute(() -> {});
assertThat(this.collector.getExceptions()).isEmpty();
assertThatNoException().isThrownBy(this.collector::assertEmpty);
}
@Test
void oneError() {
this.collector.execute(error());
assertOneFailure(Error.class, "error");
}
@Test
void oneAssertionError() {
this.collector.execute(assertionError());
assertOneFailure(AssertionError.class, "assertion");
}
@Test
void oneCheckedException() {
this.collector.execute(checkedException());
assertOneFailure(Exception.class, "checked");
}
@Test
void oneUncheckedException() {
this.collector.execute(uncheckedException());
assertOneFailure(RuntimeException.class, "unchecked");
}
@Test
void oneThrowable() {
this.collector.execute(throwable());
assertThatExceptionOfType(AssertionError.class)
.isThrownBy(this.collector::assertEmpty)
.withMessage("throwable")
.withCauseExactlyInstanceOf(Throwable.class)
.satisfies(error -> assertThat(error.getCause()).hasMessage("throwable"))
.satisfies(error -> assertThat(error).hasNoSuppressedExceptions());
}
private void assertOneFailure(Class<? extends Throwable> expectedType, String failureMessage) {
assertThatExceptionOfType(expectedType)
.isThrownBy(this.collector::assertEmpty)
.satisfies(exception ->
assertThat(exception)
.isExactlyInstanceOf(expectedType)
.hasNoSuppressedExceptions()
.hasNoCause()
.hasMessage(failureMessage));
}
@Test
void multipleFailures() {
this.collector.execute(assertionError());
this.collector.execute(checkedException());
this.collector.execute(uncheckedException());
this.collector.execute(error());
this.collector.execute(throwable());
assertThatExceptionOfType(AssertionError.class)
.isThrownBy(this.collector::assertEmpty)
.withMessage("Multiple Exceptions (5):" + EOL + //
"assertion" + EOL + //
"checked" + EOL + //
"unchecked" + EOL + //
"error" + EOL + //
"throwable"//
)
.satisfies(exception ->
assertThat(exception.getSuppressed()).extracting(Object::getClass).map(Class::getSimpleName)
.containsExactly("AssertionError", "Exception", "RuntimeException", "Error", "Throwable"));
}
private Executable throwable() {
return () -> {
throw new Throwable("throwable");
};
}
private Executable error() {
return () -> {
throw new Error("error");
};
}
private Executable assertionError() {
return () -> {
throw new AssertionError("assertion");
};
}
private Executable checkedException() {
return () -> {
throw new Exception("checked");
};
}
private Executable uncheckedException() {
return () -> {
throw new RuntimeException("unchecked");
};
}
}
|
ExceptionCollectorTests
|
java
|
apache__kafka
|
connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/ReflectionScanner.java
|
{
"start": 2391,
"end": 3516
}
|
class ____ without throwing an exception</li>
* <li>The no-args constructor completes without throwing an exception</li>
* <li>One of the following is true:
* <ul>
* <li>Is a subclass of {@link SinkConnector}, {@link SourceConnector}, {@link Converter},
* {@link HeaderConverter}, {@link Transformation}, or {@link Predicate}</li>
* <li>Is a subclass of {@link ConfigProvider}, {@link ConnectRestExtension}, or
* {@link ConnectorClientConfigOverridePolicy}, and has a {@link ServiceLoader} compatible
* manifest file or module declaration</li>
* </ul>
* </li>
* </ul>
* <p>Note: This scanner has a runtime proportional to the number of overall classes in the passed-in
* {@link PluginSource} objects, which may be significant for plugins with large dependencies. For a more performant
* implementation, consider using {@link ServiceLoaderScanner} and follow migration instructions for
* <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-898%3A+Modernize+Connect+plugin+discovery">KIP-898</a>.
*/
public
|
completes
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java
|
{
"start": 1625,
"end": 2078
}
|
class ____ the logic to track decommissioning and entering
* maintenance nodes, ensure all their blocks are adequately replicated
* before they are moved to the decommissioned or maintenance state.
*
* This monitor avoids flooding the replication queue with all pending blocks
* and instead feeds them to the queue as the prior set complete replication.
*
* HDFS-14854 contains details about the overall design of this class.
*
*/
public
|
implements
|
java
|
apache__camel
|
components/camel-stitch/src/main/java/org/apache/camel/component/stitch/client/StitchClientBuilder.java
|
{
"start": 1007,
"end": 3081
}
|
class ____ {
private HttpClient httpClient;
private String token;
private ConnectionProvider connectionProvider;
private StitchRegion region;
private StitchClientBuilder() {
}
public static StitchClientBuilder builder() {
return new StitchClientBuilder();
}
public StitchClientBuilder withHttpClient(HttpClient httpClient) {
if (ObjectHelper.isNotEmpty(httpClient)) {
this.httpClient = httpClient;
}
return this;
}
public StitchClientBuilder withToken(String token) {
if (ObjectHelper.isNotEmpty(token)) {
this.token = token;
}
return this;
}
public StitchClientBuilder withConnectionProvider(ConnectionProvider connectionProvider) {
if (ObjectHelper.isNotEmpty(connectionProvider)) {
this.connectionProvider = connectionProvider;
}
return this;
}
public StitchClientBuilder withRegion(StitchRegion region) {
if (ObjectHelper.isNotEmpty(region)) {
this.region = region;
}
return this;
}
public StitchClientImpl build() {
// let's check if we have all the required properties
if (ObjectHelper.isEmpty(token) || ObjectHelper.isEmpty(region)) {
throw new IllegalArgumentException("Token or Region cannot be empty!");
}
// if we supplied the HttpClient
if (ObjectHelper.isNotEmpty(httpClient)) {
return new StitchClientImpl(httpClient, getBaseUrl(region), token);
}
// if we supplied the ConnectionProvider
if (ObjectHelper.isNotEmpty(connectionProvider)) {
return new StitchClientImpl(HttpClient.create(connectionProvider), getBaseUrl(region), token);
}
// otherwise create using the default options
return new StitchClientImpl(HttpClient.create(), getBaseUrl(region), token);
}
private String getBaseUrl(final StitchRegion stitchRegion) {
return "https://" + stitchRegion.getUrl();
}
}
|
StitchClientBuilder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/promql/PromqlFoldingUtils.java
|
{
"start": 787,
"end": 1056
}
|
class ____ evaluating scalar arithmetic operations at parse time.
* Handles operations between:
* - Numbers (delegates to Arithmetics)
* - Durations and numbers (converts to seconds, computes, converts back)
* - Durations and durations (only for ADD/SUB)
*/
public
|
for
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
|
{
"start": 4115,
"end": 23951
}
|
class ____ {
private static final Logger logger = LogManager.getLogger(AllocationService.class);
private final AllocationDeciders allocationDeciders;
private Map<String, ExistingShardsAllocator> existingShardsAllocators;
private final ShardsAllocator shardsAllocator;
private final ClusterInfoService clusterInfoService;
private final SnapshotsInfoService snapshotsInfoService;
private final ShardRoutingRoleStrategy shardRoutingRoleStrategy;
// only for tests that use the GatewayAllocator as the unique ExistingShardsAllocator
@SuppressWarnings("this-escape")
public AllocationService(
AllocationDeciders allocationDeciders,
GatewayAllocator gatewayAllocator,
ShardsAllocator shardsAllocator,
ClusterInfoService clusterInfoService,
SnapshotsInfoService snapshotsInfoService,
ShardRoutingRoleStrategy shardRoutingRoleStrategy
) {
this(allocationDeciders, shardsAllocator, clusterInfoService, snapshotsInfoService, shardRoutingRoleStrategy);
setExistingShardsAllocators(Collections.singletonMap(GatewayAllocator.ALLOCATOR_NAME, gatewayAllocator));
}
public AllocationService(
AllocationDeciders allocationDeciders,
ShardsAllocator shardsAllocator,
ClusterInfoService clusterInfoService,
SnapshotsInfoService snapshotsInfoService,
ShardRoutingRoleStrategy shardRoutingRoleStrategy
) {
this.allocationDeciders = allocationDeciders;
this.shardsAllocator = shardsAllocator;
this.clusterInfoService = clusterInfoService;
this.snapshotsInfoService = snapshotsInfoService;
this.shardRoutingRoleStrategy = shardRoutingRoleStrategy;
}
/**
* Inject the {@link ExistingShardsAllocator}s to use. May only be called once.
*/
public void setExistingShardsAllocators(Map<String, ExistingShardsAllocator> existingShardsAllocators) {
assert this.existingShardsAllocators == null : "cannot set allocators " + existingShardsAllocators + " twice";
assert existingShardsAllocators.isEmpty() == false : "must add at least one ExistingShardsAllocator";
this.existingShardsAllocators = Collections.unmodifiableMap(existingShardsAllocators);
}
/**
* @return The allocation deciders that the allocation service has been configured with.
*/
public AllocationDeciders getAllocationDeciders() {
return allocationDeciders;
}
public ShardRoutingRoleStrategy getShardRoutingRoleStrategy() {
return shardRoutingRoleStrategy;
}
public ClusterInfoService getClusterInfoService() {
return clusterInfoService;
}
/**
* Applies the started shards. Note, only initializing ShardRouting instances that exist in the routing table should be
* provided as parameter and no duplicates should be contained.
* <p>
* If the same instance of the {@link ClusterState} is returned, then no change has been made.</p>
*/
public ClusterState applyStartedShards(ClusterState clusterState, List<ShardRouting> startedShards) {
assert assertInitialized();
if (startedShards.isEmpty()) {
return clusterState;
}
RoutingAllocation allocation = createRoutingAllocation(clusterState, currentNanoTime());
// as starting a primary relocation target can reinitialize replica shards, start replicas first
startedShards = new ArrayList<>(startedShards);
startedShards.sort(Comparator.comparing(ShardRouting::primary));
applyStartedShards(allocation, startedShards);
for (final ExistingShardsAllocator allocator : existingShardsAllocators.values()) {
allocator.applyStartedShards(startedShards, allocation);
}
assert RoutingNodes.assertShardStats(allocation.routingNodes());
String startedShardsAsString = firstListElementsToCommaDelimitedString(
startedShards,
s -> s.shardId().toString(),
logger.isDebugEnabled()
);
return buildResultAndLogHealthChange(clusterState, allocation, "shards started [" + startedShardsAsString + "]");
}
private static ClusterState buildResultAndLogHealthChange(ClusterState oldState, RoutingAllocation allocation, String reason) {
final GlobalRoutingTable oldRoutingTable = oldState.globalRoutingTable();
final GlobalRoutingTable newRoutingTable = oldRoutingTable.rebuild(allocation.routingNodes(), allocation.metadata());
final Metadata newMetadata = allocation.updateMetadataWithRoutingChanges(newRoutingTable);
assert newRoutingTable.validate(newMetadata); // validates the routing table is coherent with the cluster state metadata
final ClusterState.Builder newStateBuilder = ClusterState.builder(oldState).routingTable(newRoutingTable).metadata(newMetadata);
final RestoreInProgress restoreInProgress = RestoreInProgress.get(allocation.getClusterState());
RestoreInProgress updatedRestoreInProgress = allocation.updateRestoreInfoWithRoutingChanges(restoreInProgress);
if (updatedRestoreInProgress != restoreInProgress) {
ImmutableOpenMap.Builder<String, ClusterState.Custom> customsBuilder = ImmutableOpenMap.builder(
allocation.getClusterState().getCustoms()
);
customsBuilder.put(RestoreInProgress.TYPE, updatedRestoreInProgress);
newStateBuilder.customs(customsBuilder.build());
}
final ClusterState newState = newStateBuilder.build();
logClusterHealthStateChange(oldState, newState, reason);
return newState;
}
/**
* Applies the failed shards. Note, only assigned ShardRouting instances that exist in the routing table should be
* provided as parameter. Also applies a list of allocation ids to remove from the in-sync set for shard copies for which there
* are no routing entries in the routing table.
*
* <p>
* If the same instance of ClusterState is returned, then no change has been made.</p>
*/
public ClusterState applyFailedShards(
final ClusterState clusterState,
final List<FailedShard> failedShards,
final List<StaleShard> staleShards
) {
assert assertInitialized();
if (staleShards.isEmpty() && failedShards.isEmpty()) {
return clusterState;
}
ClusterState tmpState = IndexMetadataUpdater.removeStaleIdsWithoutRoutings(clusterState, staleShards, logger);
long currentNanoTime = currentNanoTime();
RoutingAllocation allocation = createRoutingAllocation(tmpState, currentNanoTime);
for (FailedShard failedShardEntry : failedShards) {
ShardRouting shardToFail = failedShardEntry.routingEntry();
assert allocation.metadata().findIndex(shardToFail.shardId().getIndex()).isPresent()
: "Expected index [" + shardToFail.shardId().getIndexName() + "] of failed shard to still exist";
allocation.addIgnoreShardForNode(shardToFail.shardId(), shardToFail.currentNodeId());
// failing a primary also fails initializing replica shards, re-resolve ShardRouting
ShardRouting failedShard = allocation.routingNodes()
.getByAllocationId(shardToFail.shardId(), shardToFail.allocationId().getId());
if (failedShard != null) {
if (failedShard != shardToFail) {
logger.trace(
"{} shard routing modified in an earlier iteration (previous: {}, current: {})",
shardToFail.shardId(),
shardToFail,
failedShard
);
}
int failedAllocations = failedShard.unassignedInfo() != null ? failedShard.unassignedInfo().failedAllocations() : 0;
final Set<String> failedNodeIds;
if (failedShard.unassignedInfo() != null) {
failedNodeIds = Sets.newHashSetWithExpectedSize(failedShard.unassignedInfo().failedNodeIds().size() + 1);
failedNodeIds.addAll(failedShard.unassignedInfo().failedNodeIds());
failedNodeIds.add(failedShard.currentNodeId());
} else {
failedNodeIds = Collections.emptySet();
}
String message = "failed shard on node [" + shardToFail.currentNodeId() + "]: " + failedShardEntry.message();
UnassignedInfo unassignedInfo = new UnassignedInfo(
UnassignedInfo.Reason.ALLOCATION_FAILED,
message,
failedShardEntry.failure(),
failedAllocations + 1,
currentNanoTime,
System.currentTimeMillis(),
false,
AllocationStatus.NO_ATTEMPT,
failedNodeIds,
shardToFail.currentNodeId()
);
if (failedShardEntry.markAsStale()) {
allocation.removeAllocationId(failedShard);
}
logger.warn(() -> "failing shard [" + failedShardEntry + "]", failedShardEntry.failure());
allocation.routingNodes().failShard(failedShard, unassignedInfo, allocation.changes());
} else {
logger.trace("{} shard routing failed in an earlier iteration (routing: {})", shardToFail.shardId(), shardToFail);
}
}
for (final ExistingShardsAllocator allocator : existingShardsAllocators.values()) {
allocator.applyFailedShards(failedShards, allocation);
}
reroute(
allocation,
routingAllocation -> shardsAllocator.allocate(
routingAllocation,
rerouteCompletionIsNotRequired() /* this is not triggered by a user request */
)
);
String failedShardsAsString = firstListElementsToCommaDelimitedString(
failedShards,
s -> s.routingEntry().shardId().toString(),
logger.isDebugEnabled()
);
return buildResultAndLogHealthChange(clusterState, allocation, "shards failed [" + failedShardsAsString + "]");
}
/**
* Unassign any shards that are associated with nodes that are no longer part of the cluster, potentially promoting replicas if needed.
*/
public ClusterState disassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) {
RoutingAllocation allocation = createRoutingAllocation(clusterState, currentNanoTime());
// first, clear from the shards any node id they used to belong to that is now dead
disassociateDeadNodes(allocation);
if (allocation.routingNodesChanged()) {
clusterState = buildResultAndLogHealthChange(clusterState, allocation, reason);
}
if (reroute) {
return reroute(clusterState, reason, rerouteCompletionIsNotRequired() /* this is not triggered by a user request */);
} else {
return clusterState;
}
}
/**
* Checks if there are replicas with the auto-expand feature that need to be adapted.
* Returns an updated cluster state if changes were necessary, or the identical cluster if no changes were required.
*/
public ClusterState adaptAutoExpandReplicas(ClusterState clusterState) {
final LazyInitializable<RoutingAllocation, RuntimeException> lazyAllocation = new LazyInitializable<>(
() -> new RoutingAllocation(
allocationDeciders,
clusterState,
clusterInfoService.getClusterInfo(),
snapshotsInfoService.snapshotShardSizes(),
currentNanoTime()
)
);
final Supplier<RoutingAllocation> allocationSupplier = lazyAllocation::getOrCompute;
GlobalRoutingTable.Builder routingBuilder = null;
Metadata.Builder metadataBuilder = null;
for (var entry : clusterState.metadata().projects().entrySet()) {
var projectId = entry.getKey();
var tuple = adaptAutoExpandReplicas(entry.getValue(), clusterState.routingTable(projectId), allocationSupplier);
if (tuple != null) {
if (metadataBuilder == null) {
metadataBuilder = Metadata.builder(clusterState.metadata());
}
metadataBuilder.put(tuple.v1());
if (routingBuilder == null) {
routingBuilder = GlobalRoutingTable.builder(clusterState.globalRoutingTable());
}
routingBuilder.put(projectId, tuple.v2());
}
}
if (metadataBuilder == null) {
// No projects were updated
return clusterState;
}
final ClusterState fixedState = ClusterState.builder(clusterState)
.routingTable(routingBuilder.build())
.metadata(metadataBuilder)
.build();
assert hasAutoExpandReplicaChanges(fixedState.metadata(), allocationSupplier) == false;
return fixedState;
}
private static boolean hasAutoExpandReplicaChanges(Metadata metadata, Supplier<RoutingAllocation> allocationSupplier) {
return metadata.projects()
.values()
.stream()
.anyMatch(project -> AutoExpandReplicas.getAutoExpandReplicaChanges(project, allocationSupplier).size() > 0);
}
@Nullable
private Tuple<ProjectMetadata.Builder, RoutingTable.Builder> adaptAutoExpandReplicas(
ProjectMetadata project,
RoutingTable projectRoutingTable,
Supplier<RoutingAllocation> allocationSupplier
) {
final Map<Integer, List<String>> autoExpandReplicaChanges = AutoExpandReplicas.getAutoExpandReplicaChanges(
project,
allocationSupplier
);
if (autoExpandReplicaChanges.isEmpty()) {
return null;
}
final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(shardRoutingRoleStrategy, projectRoutingTable);
ProjectMetadata.Builder projectBuilder = ProjectMetadata.builder(project);
for (Map.Entry<Integer, List<String>> entry : autoExpandReplicaChanges.entrySet()) {
final int numberOfReplicas = entry.getKey();
final String[] indices = entry.getValue().toArray(Strings.EMPTY_ARRAY);
// we do *not* update the in sync allocation ids as they will be removed upon the first index
// operation which make these copies stale
routingTableBuilder.updateNumberOfReplicas(numberOfReplicas, indices);
projectBuilder.updateNumberOfReplicas(numberOfReplicas, indices);
// update settings version for each index
for (final String index : indices) {
final IndexMetadata indexMetadata = projectBuilder.get(index);
final IndexMetadata.Builder indexMetadataBuilder = new IndexMetadata.Builder(indexMetadata).settingsVersion(
1 + indexMetadata.getSettingsVersion()
);
projectBuilder.put(indexMetadataBuilder);
}
logger.info("in project [{}] updating number_of_replicas to [{}] for indices {}", project.id(), numberOfReplicas, indices);
}
return new Tuple<>(projectBuilder, routingTableBuilder);
}
/**
* Internal helper to cap the number of elements in a potentially long list for logging.
*
* @param elements The elements to log. May be any non-null list. Must not be null.
* @param formatter A function that can convert list elements to a String. Must not be null.
* @param <T> The list element type.
* @return A comma-separated string of the first few elements.
*/
public static <T> String firstListElementsToCommaDelimitedString(
List<T> elements,
Function<T, String> formatter,
boolean isDebugEnabled
) {
final int maxNumberOfElements = 10;
if (isDebugEnabled || elements.size() <= maxNumberOfElements) {
return elements.stream().map(formatter).collect(Collectors.joining(", "));
} else {
return elements.stream().limit(maxNumberOfElements).map(formatter).collect(Collectors.joining(", "))
+ ", ... ["
+ elements.size()
+ " items in total]";
}
}
@FixForMultiProject(description = "we should assert retryFailed is not allowed with non-empty commands")
public CommandsResult reroute(
ClusterState clusterState,
AllocationCommands commands,
boolean explain,
boolean retryFailed,
boolean dryRun,
ActionListener<Void> reroute
) {
RoutingAllocation allocation = createRoutingAllocation(clusterState, currentNanoTime());
var explanations = shardsAllocator.execute(allocation, commands, explain, retryFailed);
// the assumption is that commands will move / act on shards (or fail through exceptions)
// so, there will always be shard "movements", so no need to check on reroute
if (dryRun == false) {
reroute(allocation, routingAllocation -> shardsAllocator.allocate(routingAllocation, reroute));
} else {
reroute.onResponse(null);
}
return new CommandsResult(explanations, buildResultAndLogHealthChange(clusterState, allocation, "reroute commands"));
}
/**
* Computes the next step towards a fully allocated and balanced cluster and records this step in the routing table of the returned
* state. Should be called after every change to the cluster that affects the routing table and/or the balance of shards.
* <p>
* This method is expensive in larger clusters. Wherever possible you should invoke this method asynchronously using
* {@link RerouteService#reroute} to batch up invocations rather than calling the method directly.
*
* @return an updated cluster state, or the same instance that was passed as an argument if no changes were made.
*/
public ClusterState reroute(ClusterState clusterState, String reason, ActionListener<Void> listener) {
return executeWithRoutingAllocation(
clusterState,
reason,
routingAllocation -> shardsAllocator.allocate(routingAllocation, listener)
);
}
/**
* Computes the next step towards a fully allocated and balanced cluster and records this step in the routing table of the returned
* state. Should be called after every change to the cluster that affects the routing table and/or the balance of shards.
* <p>
* This method is expensive in larger clusters. Wherever possible you should invoke this method asynchronously using
* {@link RerouteService#reroute} to batch up invocations rather than calling the method directly.
*
* @return an updated cluster state, or the same instance that was passed as an argument if no changes were made.
*/
public ClusterState executeWithRoutingAllocation(ClusterState clusterState, String reason, RerouteStrategy rerouteStrategy) {
ClusterState fixedClusterState = adaptAutoExpandReplicas(clusterState);
RoutingAllocation allocation = createRoutingAllocation(fixedClusterState, currentNanoTime());
reroute(allocation, rerouteStrategy);
if (fixedClusterState == clusterState && allocation.routingNodesChanged() == false) {
return clusterState;
}
return buildResultAndLogHealthChange(clusterState, allocation, reason);
}
@FunctionalInterface
public
|
AllocationService
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/fulltext/MatchQueryPredicate.java
|
{
"start": 583,
"end": 1635
}
|
class ____ extends FullTextPredicate {
private final Expression field;
public MatchQueryPredicate(Source source, Expression field, String query, String options) {
super(source, query, options, singletonList(field));
this.field = field;
}
@Override
protected NodeInfo<MatchQueryPredicate> info() {
return NodeInfo.create(this, MatchQueryPredicate::new, field, query(), options());
}
@Override
public MatchQueryPredicate replaceChildren(List<Expression> newChildren) {
return new MatchQueryPredicate(source(), newChildren.get(0), query(), options());
}
public Expression field() {
return field;
}
@Override
public int hashCode() {
return Objects.hash(field, super.hashCode());
}
@Override
public boolean equals(Object obj) {
if (super.equals(obj)) {
MatchQueryPredicate other = (MatchQueryPredicate) obj;
return Objects.equals(field, other.field);
}
return false;
}
}
|
MatchQueryPredicate
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/injection/guice/multibindings/MapBinder.java
|
{
"start": 1642,
"end": 2171
}
|
class ____ extends AbstractModule {
* protected void configure() {
* MapBinder<String, Snack> mapbinder
* = MapBinder.newMapBinder(binder(), String.class, Snack.class);
* mapbinder.addBinding("twix").toInstance(new Twix());
* mapbinder.addBinding("snickers").toProvider(SnickersProvider.class);
* mapbinder.addBinding("skittles").to(Skittles.class);
* }
* }</code></pre>
* <p>
* With this binding, a {@link Map}{@code <String, Snack>} can now be
* injected:
* <pre><code>
*
|
SnacksModule
|
java
|
spring-projects__spring-framework
|
spring-tx/src/main/java/org/springframework/jca/endpoint/AbstractMessageEndpointFactory.java
|
{
"start": 1791,
"end": 6880
}
|
class ____ implements MessageEndpointFactory, BeanNameAware {
/** Logger available to subclasses. */
protected final Log logger = LogFactory.getLog(getClass());
private @Nullable TransactionFactory transactionFactory;
private @Nullable String transactionName;
private int transactionTimeout = -1;
private @Nullable String beanName;
/**
* Set the XA transaction manager to use for wrapping endpoint
* invocations, enlisting the endpoint resource in each such transaction.
* <p>The passed-in object may be a transaction manager which implements
* Spring's {@link org.springframework.transaction.jta.TransactionFactory}
* interface, or a plain {@link jakarta.transaction.TransactionManager}.
* <p>If no transaction manager is specified, the endpoint invocation
* will simply not be wrapped in an XA transaction. Check out your
* resource provider's ActivationSpec documentation for local
* transaction options of your particular provider.
* @see #setTransactionName
* @see #setTransactionTimeout
*/
public void setTransactionManager(Object transactionManager) {
if (transactionManager instanceof TransactionFactory factory) {
this.transactionFactory = factory;
}
else if (transactionManager instanceof TransactionManager manager) {
this.transactionFactory = new SimpleTransactionFactory(manager);
}
else {
throw new IllegalArgumentException("Transaction manager [" + transactionManager +
"] is neither a [org.springframework.transaction.jta.TransactionFactory} nor a " +
"[jakarta.transaction.TransactionManager]");
}
}
/**
* Set the Spring TransactionFactory to use for wrapping endpoint
* invocations, enlisting the endpoint resource in each such transaction.
* <p>Alternatively, specify an appropriate transaction manager through
* the {@link #setTransactionManager "transactionManager"} property.
* <p>If no transaction factory is specified, the endpoint invocation
* will simply not be wrapped in an XA transaction. Check out your
* resource provider's ActivationSpec documentation for local
* transaction options of your particular provider.
* @see #setTransactionName
* @see #setTransactionTimeout
*/
public void setTransactionFactory(TransactionFactory transactionFactory) {
this.transactionFactory = transactionFactory;
}
/**
* Specify the name of the transaction, if any.
* <p>Default is none. A specified name will be passed on to the transaction
* manager, allowing to identify the transaction in a transaction monitor.
*/
public void setTransactionName(String transactionName) {
this.transactionName = transactionName;
}
/**
* Specify the transaction timeout, if any.
* <p>Default is -1: rely on the transaction manager's default timeout.
* Specify a concrete timeout to restrict the maximum duration of each
* endpoint invocation.
*/
public void setTransactionTimeout(int transactionTimeout) {
this.transactionTimeout = transactionTimeout;
}
/**
* Set the name of this message endpoint. Populated with the bean name
* automatically when defined within Spring's bean factory.
*/
@Override
public void setBeanName(String beanName) {
this.beanName = beanName;
}
/**
* Implementation of the JCA 1.7 {@code #getActivationName()} method,
* returning the bean name as set on this MessageEndpointFactory.
* @see #setBeanName
*/
@Override
public @Nullable String getActivationName() {
return this.beanName;
}
/**
* Implementation of the JCA 1.7 {@code #getEndpointClass()} method,
* returning {@code null} in order to indicate a synthetic endpoint type.
*/
@Override
public @Nullable Class<?> getEndpointClass() {
return null;
}
/**
* This implementation returns {@code true} if a transaction manager
* has been specified; {@code false} otherwise.
* @see #setTransactionManager
* @see #setTransactionFactory
*/
@Override
public boolean isDeliveryTransacted(Method method) throws NoSuchMethodException {
return (this.transactionFactory != null);
}
/**
* The standard JCA 1.5 version of {@code createEndpoint}.
* <p>This implementation delegates to {@link #createEndpointInternal()},
* initializing the endpoint's XAResource before the endpoint gets invoked.
*/
@Override
public MessageEndpoint createEndpoint(XAResource xaResource) throws UnavailableException {
AbstractMessageEndpoint endpoint = createEndpointInternal();
endpoint.initXAResource(xaResource);
return endpoint;
}
/**
* The alternative JCA 1.6 version of {@code createEndpoint}.
* <p>This implementation delegates to {@link #createEndpointInternal()},
* ignoring the specified timeout. It is only here for JCA 1.6 compliance.
*/
@Override
public MessageEndpoint createEndpoint(XAResource xaResource, long timeout) throws UnavailableException {
AbstractMessageEndpoint endpoint = createEndpointInternal();
endpoint.initXAResource(xaResource);
return endpoint;
}
/**
* Create the actual endpoint instance, as a subclass of the
* {@link AbstractMessageEndpoint} inner
|
AbstractMessageEndpointFactory
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/PromqlBaseParser.java
|
{
"start": 73961,
"end": 74941
}
|
class ____ extends NumberContext {
public TerminalNode INTEGER_VALUE() { return getToken(PromqlBaseParser.INTEGER_VALUE, 0); }
@SuppressWarnings("this-escape")
public IntegerLiteralContext(NumberContext ctx) { copyFrom(ctx); }
@Override
public void enterRule(ParseTreeListener listener) {
if ( listener instanceof PromqlBaseParserListener ) ((PromqlBaseParserListener)listener).enterIntegerLiteral(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if ( listener instanceof PromqlBaseParserListener ) ((PromqlBaseParserListener)listener).exitIntegerLiteral(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if ( visitor instanceof PromqlBaseParserVisitor ) return ((PromqlBaseParserVisitor<? extends T>)visitor).visitIntegerLiteral(this);
else return visitor.visitChildren(this);
}
}
@SuppressWarnings("CheckReturnValue")
public static
|
IntegerLiteralContext
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/ValidatorEndpointBuilderFactory.java
|
{
"start": 9659,
"end": 10548
}
|
class ____ and file
* system. Do not use together with resourceResolver.
*
* The option is a:
* <code>org.apache.camel.component.validator.ValidatorResourceResolverFactory</code> type.
*
* Group: advanced
*
* @param resourceResolverFactory the value to set
* @return the dsl builder
*/
default AdvancedValidatorEndpointBuilder resourceResolverFactory(org.apache.camel.component.validator.ValidatorResourceResolverFactory resourceResolverFactory) {
doSetProperty("resourceResolverFactory", resourceResolverFactory);
return this;
}
/**
* To use a custom LSResourceResolver which depends on a dynamic
* endpoint resource URI. The default resource resolver factory returns
* a resource resolver which can read files from the
|
path
|
java
|
quarkusio__quarkus
|
extensions/arc/deployment/src/main/java/io/quarkus/arc/deployment/ArcProcessor.java
|
{
"start": 39465,
"end": 40973
}
|
class ____ a static field of a type QuarkusComponentTestExtension
DotName quarkusComponentTest = DotName.createSimple("io.quarkus.test.component.QuarkusComponentTest");
DotName quarkusComponentTestExtension = DotName.createSimple("io.quarkus.test.component.QuarkusComponentTestExtension");
return new Predicate<ClassInfo>() {
@Override
public boolean test(ClassInfo clazz) {
if (clazz.nestingType() == NestingType.INNER
&& Modifier.isStatic(clazz.flags())) {
DotName enclosingClassName = clazz.enclosingClass();
ClassInfo enclosingClass = index.getClassByName(enclosingClassName);
if (enclosingClass != null) {
if (enclosingClass.hasDeclaredAnnotation(quarkusComponentTest)) {
return true;
} else {
for (FieldInfo field : enclosingClass.fields()) {
if (!field.isSynthetic()
&& Modifier.isStatic(field.flags())
&& field.type().name().equals(quarkusComponentTestExtension)) {
return true;
}
}
}
}
}
return false;
}
};
}
private abstract static
|
with
|
java
|
apache__flink
|
flink-table/flink-table-api-java-bridge/src/main/java/org/apache/flink/legacy/table/factories/StreamTableSinkFactory.java
|
{
"start": 1625,
"end": 1749
}
|
interface ____ instances of {@link DynamicTableSink}. See FLIP-95 for more information.
*/
@Deprecated
@Internal
public
|
creates
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanLongsEvaluator.java
|
{
"start": 5014,
"end": 5783
}
|
class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory lhs;
private final EvalOperator.ExpressionEvaluator.Factory rhs;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory lhs,
EvalOperator.ExpressionEvaluator.Factory rhs) {
this.source = source;
this.lhs = lhs;
this.rhs = rhs;
}
@Override
public GreaterThanLongsEvaluator get(DriverContext context) {
return new GreaterThanLongsEvaluator(source, lhs.get(context), rhs.get(context), context);
}
@Override
public String toString() {
return "GreaterThanLongsEvaluator[" + "lhs=" + lhs + ", rhs=" + rhs + "]";
}
}
}
|
Factory
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/state/KeyGroupPartitioner.java
|
{
"start": 12270,
"end": 12443
}
|
interface ____ how one element is written to a {@link DataOutputView}.
*
* @param <T> type of the written elements.
*/
@FunctionalInterface
public
|
defines
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/engine/spi/ExecutableList.java
|
{
"start": 1353,
"end": 1544
}
|
class ____<E extends ComparableExecutable>
implements Serializable, Iterable<E>, Externalizable {
public static final int INIT_QUEUE_LIST_SIZE = 5;
/**
* Provides a sorting
|
ExecutableList
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/EKS2EndpointBuilderFactory.java
|
{
"start": 1576,
"end": 15790
}
|
interface ____
extends
EndpointProducerBuilder {
default AdvancedEKS2EndpointBuilder advanced() {
return (AdvancedEKS2EndpointBuilder) this;
}
/**
* The operation to perform.
*
* The option is a:
* <code>org.apache.camel.component.aws2.eks.EKS2Operations</code> type.
*
* Required: true
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder operation(org.apache.camel.component.aws2.eks.EKS2Operations operation) {
doSetProperty("operation", operation);
return this;
}
/**
* The operation to perform.
*
* The option will be converted to a
* <code>org.apache.camel.component.aws2.eks.EKS2Operations</code> type.
*
* Required: true
* Group: producer
*
* @param operation the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder operation(String operation) {
doSetProperty("operation", operation);
return this;
}
/**
* Set the need for overriding the endpoint. This option needs to be
* used in combination with the uriEndpointOverride option.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param overrideEndpoint the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder overrideEndpoint(boolean overrideEndpoint) {
doSetProperty("overrideEndpoint", overrideEndpoint);
return this;
}
/**
* Set the need for overriding the endpoint. This option needs to be
* used in combination with the uriEndpointOverride option.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param overrideEndpoint the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder overrideEndpoint(String overrideEndpoint) {
doSetProperty("overrideEndpoint", overrideEndpoint);
return this;
}
/**
* If we want to use a POJO request as body or not.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param pojoRequest the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder pojoRequest(boolean pojoRequest) {
doSetProperty("pojoRequest", pojoRequest);
return this;
}
/**
* If we want to use a POJO request as body or not.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param pojoRequest the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder pojoRequest(String pojoRequest) {
doSetProperty("pojoRequest", pojoRequest);
return this;
}
/**
* If using a profile credentials provider, this parameter will set the
* profile name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: false
* Group: producer
*
* @param profileCredentialsName the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder profileCredentialsName(String profileCredentialsName) {
doSetProperty("profileCredentialsName", profileCredentialsName);
return this;
}
/**
* The region in which EKS client needs to work. When using this
* parameter, the configuration will expect the lowercase name of the
* region (for example, ap-east-1) You'll need to use the name
* Region.EU_WEST_1.id().
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param region the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder region(String region) {
doSetProperty("region", region);
return this;
}
/**
* Set the overriding uri endpoint. This option needs to be used in
* combination with overrideEndpoint option.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param uriEndpointOverride the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder uriEndpointOverride(String uriEndpointOverride) {
doSetProperty("uriEndpointOverride", uriEndpointOverride);
return this;
}
/**
* Set whether the EKS client should expect to load credentials through
* a default credentials provider or to expect static credentials to be
* passed in.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param useDefaultCredentialsProvider the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder useDefaultCredentialsProvider(boolean useDefaultCredentialsProvider) {
doSetProperty("useDefaultCredentialsProvider", useDefaultCredentialsProvider);
return this;
}
/**
* Set whether the EKS client should expect to load credentials through
* a default credentials provider or to expect static credentials to be
* passed in.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param useDefaultCredentialsProvider the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder useDefaultCredentialsProvider(String useDefaultCredentialsProvider) {
doSetProperty("useDefaultCredentialsProvider", useDefaultCredentialsProvider);
return this;
}
/**
* Set whether the EKS client should expect to load credentials through
* a profile credentials provider.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param useProfileCredentialsProvider the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder useProfileCredentialsProvider(boolean useProfileCredentialsProvider) {
doSetProperty("useProfileCredentialsProvider", useProfileCredentialsProvider);
return this;
}
/**
* Set whether the EKS client should expect to load credentials through
* a profile credentials provider.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param useProfileCredentialsProvider the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder useProfileCredentialsProvider(String useProfileCredentialsProvider) {
doSetProperty("useProfileCredentialsProvider", useProfileCredentialsProvider);
return this;
}
/**
* To define a proxy host when instantiating the EKS client.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: proxy
*
* @param proxyHost the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder proxyHost(String proxyHost) {
doSetProperty("proxyHost", proxyHost);
return this;
}
/**
* To define a proxy port when instantiating the EKS client.
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: proxy
*
* @param proxyPort the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder proxyPort(Integer proxyPort) {
doSetProperty("proxyPort", proxyPort);
return this;
}
/**
* To define a proxy port when instantiating the EKS client.
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: proxy
*
* @param proxyPort the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder proxyPort(String proxyPort) {
doSetProperty("proxyPort", proxyPort);
return this;
}
/**
* To define a proxy protocol when instantiating the EKS client.
*
* The option is a: <code>software.amazon.awssdk.core.Protocol</code>
* type.
*
* Default: HTTPS
* Group: proxy
*
* @param proxyProtocol the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder proxyProtocol(software.amazon.awssdk.core.Protocol proxyProtocol) {
doSetProperty("proxyProtocol", proxyProtocol);
return this;
}
/**
* To define a proxy protocol when instantiating the EKS client.
*
* The option will be converted to a
* <code>software.amazon.awssdk.core.Protocol</code> type.
*
* Default: HTTPS
* Group: proxy
*
* @param proxyProtocol the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder proxyProtocol(String proxyProtocol) {
doSetProperty("proxyProtocol", proxyProtocol);
return this;
}
/**
* Amazon AWS Access Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param accessKey the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder accessKey(String accessKey) {
doSetProperty("accessKey", accessKey);
return this;
}
/**
* Amazon AWS Secret Key.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param secretKey the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder secretKey(String secretKey) {
doSetProperty("secretKey", secretKey);
return this;
}
/**
* Amazon AWS Session Token used when the user needs to assume an IAM
* role.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param sessionToken the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder sessionToken(String sessionToken) {
doSetProperty("sessionToken", sessionToken);
return this;
}
/**
* If we want to trust all certificates in case of overriding the
* endpoint.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param trustAllCertificates the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder trustAllCertificates(boolean trustAllCertificates) {
doSetProperty("trustAllCertificates", trustAllCertificates);
return this;
}
/**
* If we want to trust all certificates in case of overriding the
* endpoint.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param trustAllCertificates the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder trustAllCertificates(String trustAllCertificates) {
doSetProperty("trustAllCertificates", trustAllCertificates);
return this;
}
/**
* Set whether the EKS client should expect to use Session Credentials.
* This is useful in a situation in which the user needs to assume an
* IAM role for doing operations in EKS.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useSessionCredentials the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder useSessionCredentials(boolean useSessionCredentials) {
doSetProperty("useSessionCredentials", useSessionCredentials);
return this;
}
/**
* Set whether the EKS client should expect to use Session Credentials.
* This is useful in a situation in which the user needs to assume an
* IAM role for doing operations in EKS.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useSessionCredentials the value to set
* @return the dsl builder
*/
default EKS2EndpointBuilder useSessionCredentials(String useSessionCredentials) {
doSetProperty("useSessionCredentials", useSessionCredentials);
return this;
}
}
/**
* Advanced builder for endpoint for the AWS Elastic Kubernetes Service (EKS) component.
*/
public
|
EKS2EndpointBuilder
|
java
|
spring-projects__spring-framework
|
spring-tx/src/main/java/org/springframework/transaction/reactive/TransactionSynchronization.java
|
{
"start": 957,
"end": 1096
}
|
interface ____ influence their execution order.
* A synchronization that does not implement the {@link org.springframework.core.Ordered}
*
|
to
|
java
|
lettuce-io__lettuce-core
|
src/test/java/io/lettuce/scenario/RelaxedTimeoutConfigurationTest.java
|
{
"start": 13514,
"end": 13604
}
|
enum ____ {
RELAXED, UNRELAXED
}
public static
|
TimeoutExpectation
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/proxies/PreGeneratedProxies.java
|
{
"start": 116,
"end": 439
}
|
class ____ proxies that were generated at build time,
* where possible these are re-used rather than generating new ones
* at static init time.
*
* In most circumstances these will be used for every entity, however
* in some corner cases it may still be necessary to generate proxies
* at static init time.
*
* This
|
for
|
java
|
elastic__elasticsearch
|
x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene70/Lucene70DocValuesProducer.java
|
{
"start": 38508,
"end": 54758
}
|
class ____ extends BaseTermsEnum {
final TermsDictEntry entry;
final LongValues blockAddresses;
final IndexInput bytes;
final long blockMask;
final LongValues indexAddresses;
final IndexInput indexBytes;
final BytesRef term;
long ord = -1;
TermsDict(TermsDictEntry entry, IndexInput data) throws IOException {
this.entry = entry;
RandomAccessInput addressesSlice = data.randomAccessSlice(entry.termsAddressesOffset, entry.termsAddressesLength);
blockAddresses = LegacyDirectMonotonicReader.getInstance(entry.termsAddressesMeta, addressesSlice);
bytes = data.slice("terms", entry.termsDataOffset, entry.termsDataLength);
blockMask = (1L << entry.termsDictBlockShift) - 1;
RandomAccessInput indexAddressesSlice = data.randomAccessSlice(
entry.termsIndexAddressesOffset,
entry.termsIndexAddressesLength
);
indexAddresses = LegacyDirectMonotonicReader.getInstance(entry.termsIndexAddressesMeta, indexAddressesSlice);
indexBytes = data.slice("terms-index", entry.termsIndexOffset, entry.termsIndexLength);
term = new BytesRef(entry.maxTermLength);
}
@Override
public BytesRef next() throws IOException {
if (++ord >= entry.termsDictSize) {
return null;
}
if ((ord & blockMask) == 0L) {
term.length = bytes.readVInt();
bytes.readBytes(term.bytes, 0, term.length);
} else {
final int token = Byte.toUnsignedInt(bytes.readByte());
int prefixLength = token & 0x0F;
int suffixLength = 1 + (token >>> 4);
if (prefixLength == 15) {
prefixLength += bytes.readVInt();
}
if (suffixLength == 16) {
suffixLength += bytes.readVInt();
}
term.length = prefixLength + suffixLength;
bytes.readBytes(term.bytes, prefixLength, suffixLength);
}
return term;
}
@Override
public void seekExact(long ord) throws IOException {
if (ord < 0 || ord >= entry.termsDictSize) {
throw new IndexOutOfBoundsException();
}
final long blockIndex = ord >>> entry.termsDictBlockShift;
final long blockAddress = blockAddresses.get(blockIndex);
bytes.seek(blockAddress);
this.ord = (blockIndex << entry.termsDictBlockShift) - 1;
do {
next();
} while (this.ord < ord);
}
private BytesRef getTermFromIndex(long index) throws IOException {
assert index >= 0 && index <= (entry.termsDictSize - 1) >>> entry.termsDictIndexShift;
final long start = indexAddresses.get(index);
term.length = (int) (indexAddresses.get(index + 1) - start);
indexBytes.seek(start);
indexBytes.readBytes(term.bytes, 0, term.length);
return term;
}
private long seekTermsIndex(BytesRef text) throws IOException {
long lo = 0L;
long hi = (entry.termsDictSize - 1) >>> entry.termsDictIndexShift;
while (lo <= hi) {
final long mid = (lo + hi) >>> 1;
getTermFromIndex(mid);
final int cmp = term.compareTo(text);
if (cmp <= 0) {
lo = mid + 1;
} else {
hi = mid - 1;
}
}
assert hi < 0 || getTermFromIndex(hi).compareTo(text) <= 0;
assert hi == ((entry.termsDictSize - 1) >>> entry.termsDictIndexShift) || getTermFromIndex(hi + 1).compareTo(text) > 0;
return hi;
}
private BytesRef getFirstTermFromBlock(long block) throws IOException {
assert block >= 0 && block <= (entry.termsDictSize - 1) >>> entry.termsDictBlockShift;
final long blockAddress = blockAddresses.get(block);
bytes.seek(blockAddress);
term.length = bytes.readVInt();
bytes.readBytes(term.bytes, 0, term.length);
return term;
}
private long seekBlock(BytesRef text) throws IOException {
long index = seekTermsIndex(text);
if (index == -1L) {
return -1L;
}
long ordLo = index << entry.termsDictIndexShift;
long ordHi = Math.min(entry.termsDictSize, ordLo + (1L << entry.termsDictIndexShift)) - 1L;
long blockLo = ordLo >>> entry.termsDictBlockShift;
long blockHi = ordHi >>> entry.termsDictBlockShift;
while (blockLo <= blockHi) {
final long blockMid = (blockLo + blockHi) >>> 1;
getFirstTermFromBlock(blockMid);
final int cmp = term.compareTo(text);
if (cmp <= 0) {
blockLo = blockMid + 1;
} else {
blockHi = blockMid - 1;
}
}
assert blockHi < 0 || getFirstTermFromBlock(blockHi).compareTo(text) <= 0;
assert blockHi == ((entry.termsDictSize - 1) >>> entry.termsDictBlockShift)
|| getFirstTermFromBlock(blockHi + 1).compareTo(text) > 0;
return blockHi;
}
@Override
public SeekStatus seekCeil(BytesRef text) throws IOException {
final long block = seekBlock(text);
if (block == -1) {
// before the first term
seekExact(0L);
return SeekStatus.NOT_FOUND;
}
final long blockAddress = blockAddresses.get(block);
this.ord = block << entry.termsDictBlockShift;
bytes.seek(blockAddress);
term.length = bytes.readVInt();
bytes.readBytes(term.bytes, 0, term.length);
while (true) {
int cmp = term.compareTo(text);
if (cmp == 0) {
return SeekStatus.FOUND;
} else if (cmp > 0) {
return SeekStatus.NOT_FOUND;
}
if (next() == null) {
return SeekStatus.END;
}
}
}
@Override
public BytesRef term() throws IOException {
return term;
}
@Override
public long ord() throws IOException {
return ord;
}
@Override
public long totalTermFreq() throws IOException {
return -1L;
}
@Override
public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public ImpactsEnum impacts(int flags) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int docFreq() throws IOException {
throw new UnsupportedOperationException();
}
}
@Override
public SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException {
SortedNumericEntry entry = sortedNumerics.get(field.name);
if (entry.numValues == entry.numDocsWithField) {
return DocValues.singleton(getNumeric(entry));
}
final RandomAccessInput addressesInput = data.randomAccessSlice(entry.addressesOffset, entry.addressesLength);
final LongValues addresses = LegacyDirectMonotonicReader.getInstance(entry.addressesMeta, addressesInput);
final LongValues values = getNumericValues(entry);
if (entry.docsWithFieldOffset == -1) {
// dense
return new SortedNumericDocValues() {
int doc = -1;
long start, end;
int count;
@Override
public int nextDoc() throws IOException {
return advance(doc + 1);
}
@Override
public int docID() {
return doc;
}
@Override
public long cost() {
return maxDoc;
}
@Override
public int advance(int target) throws IOException {
if (target >= maxDoc) {
return doc = NO_MORE_DOCS;
}
start = addresses.get(target);
end = addresses.get(target + 1L);
count = (int) (end - start);
return doc = target;
}
@Override
public boolean advanceExact(int target) throws IOException {
start = addresses.get(target);
end = addresses.get(target + 1L);
count = (int) (end - start);
doc = target;
return true;
}
@Override
public long nextValue() throws IOException {
return values.get(start++);
}
@Override
public int docValueCount() {
return count;
}
};
} else {
// sparse
final IndexedDISI disi = new IndexedDISI(data, entry.docsWithFieldOffset, entry.docsWithFieldLength, entry.numDocsWithField);
return new SortedNumericDocValues() {
boolean set;
long start, end;
int count;
@Override
public int nextDoc() throws IOException {
set = false;
return disi.nextDoc();
}
@Override
public int docID() {
return disi.docID();
}
@Override
public long cost() {
return disi.cost();
}
@Override
public int advance(int target) throws IOException {
set = false;
return disi.advance(target);
}
@Override
public boolean advanceExact(int target) throws IOException {
set = false;
return disi.advanceExact(target);
}
@Override
public long nextValue() throws IOException {
set();
return values.get(start++);
}
@Override
public int docValueCount() {
set();
return count;
}
private void set() {
if (set == false) {
final int index = disi.index();
start = addresses.get(index);
end = addresses.get(index + 1L);
count = (int) (end - start);
set = true;
}
}
};
}
}
@Override
public SortedSetDocValues getSortedSet(FieldInfo field) throws IOException {
SortedSetEntry entry = sortedSets.get(field.name);
if (entry.singleValueEntry != null) {
return DocValues.singleton(getSorted(entry.singleValueEntry));
}
final RandomAccessInput slice = data.randomAccessSlice(entry.ordsOffset, entry.ordsLength);
final LongValues ords = LegacyDirectReader.getInstance(slice, entry.bitsPerValue);
final RandomAccessInput addressesInput = data.randomAccessSlice(entry.addressesOffset, entry.addressesLength);
final LongValues addresses = LegacyDirectMonotonicReader.getInstance(entry.addressesMeta, addressesInput);
if (entry.docsWithFieldOffset == -1) {
// dense
return new BaseSortedSetDocValues(entry, data) {
int doc = -1;
long start, end;
int count;
@Override
public int nextDoc() throws IOException {
return advance(doc + 1);
}
@Override
public int docID() {
return doc;
}
@Override
public long cost() {
return maxDoc;
}
@Override
public int advance(int target) throws IOException {
if (target >= maxDoc) {
return doc = NO_MORE_DOCS;
}
start = addresses.get(target);
end = addresses.get(target + 1L);
count = (int) (end - start);
return doc = target;
}
@Override
public boolean advanceExact(int target) throws IOException {
start = addresses.get(target);
end = addresses.get(target + 1L);
count = (int) (end - start);
doc = target;
return true;
}
@Override
public long nextOrd() throws IOException {
if (start == end) {
return NO_MORE_ORDS;
}
return ords.get(start++);
}
@Override
public int docValueCount() {
return count;
}
};
} else {
// sparse
final IndexedDISI disi = new IndexedDISI(data, entry.docsWithFieldOffset, entry.docsWithFieldLength, entry.numDocsWithField);
return new BaseSortedSetDocValues(entry, data) {
boolean set;
long start;
long end = 0;
int count;
@Override
public int nextDoc() throws IOException {
set = false;
return disi.nextDoc();
}
@Override
public int docID() {
return disi.docID();
}
@Override
public long cost() {
return disi.cost();
}
@Override
public int advance(int target) throws IOException {
set = false;
return disi.advance(target);
}
@Override
public boolean advanceExact(int target) throws IOException {
set = false;
return disi.advanceExact(target);
}
private boolean set() {
if (set == false) {
final int index = disi.index();
start = addresses.get(index);
end = addresses.get(index + 1L);
count = (int) (end - start);
set = true;
return true;
}
return false;
}
@Override
public long nextOrd() throws IOException {
if (set()) {
return ords.get(start++);
} else if (start == end) {
return NO_MORE_ORDS;
} else {
return ords.get(start++);
}
}
@Override
public int docValueCount() {
set();
return count;
}
};
}
}
@Override
public void checkIntegrity() throws IOException {
CodecUtil.checksumEntireFile(data);
}
@Override
public DocValuesSkipper getSkipper(FieldInfo field) {
return null;
}
}
|
TermsDict
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/oracle/insert/OracleInsertTest13.java
|
{
"start": 1062,
"end": 2869
}
|
class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = "INSERT INTO bonuses"
+ " SELECT employee_id, salary*1.1 "//
+ " FROM employees"//
+ " WHERE commission_pct > 0.25; ";
OracleStatementParser parser = new OracleStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
assertEquals("INSERT INTO bonuses"
+ "\nSELECT employee_id, salary * 1.1"//
+ "\nFROM employees"//
+ "\nWHERE commission_pct > 0.25;",
SQLUtils.toSQLString(stmt, JdbcConstants.ORACLE));
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
assertEquals(2, visitor.getTables().size());
assertEquals(3, visitor.getColumns().size());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("bonuses")));
assertTrue(visitor.getTables().containsKey(new TableStat.Name("employees")));
assertTrue(visitor.getColumns().contains(new TableStat.Column("employees", "employee_id")));
assertTrue(visitor.getColumns().contains(new TableStat.Column("employees", "salary")));
assertTrue(visitor.getColumns().contains(new TableStat.Column("employees", "commission_pct")));
}
}
|
OracleInsertTest13
|
java
|
apache__kafka
|
connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java
|
{
"start": 4133,
"end": 50894
}
|
class ____ {
/**
* 0.11.0.0 clients can talk with older brokers, but the CREATE_TOPIC API was added in 0.10.1.0. That means,
* if our TopicAdmin talks to a pre 0.10.1 broker, it should receive an UnsupportedVersionException, should
* create no topics, and return false.
*/
@Test
public void returnEmptyWithApiVersionMismatchOnCreate() {
final NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(createTopicResponseWithUnsupportedVersion(newTopic));
TopicAdmin admin = new TopicAdmin(env.adminClient());
assertTrue(admin.createOrFindTopics(newTopic).isEmpty());
}
}
@Test
public void returnEmptyWithClusterAuthorizationFailureOnCreate() {
final NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().prepareResponse(createTopicResponseWithClusterAuthorizationException(newTopic));
TopicAdmin admin = new TopicAdmin(env.adminClient());
assertFalse(admin.createTopic(newTopic));
env.kafkaClient().prepareResponse(createTopicResponseWithClusterAuthorizationException(newTopic));
assertTrue(admin.createOrFindTopics(newTopic).isEmpty());
}
}
@Test
public void throwsWithClusterAuthorizationFailureOnDescribe() {
final NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().prepareResponse(describeClusterResponse(cluster));
env.kafkaClient().prepareResponse(describeTopicResponseWithClusterAuthorizationException(newTopic));
TopicAdmin admin = new TopicAdmin(env.adminClient());
Exception e = assertThrows(ConnectException.class, () -> admin.describeTopics(newTopic.name()));
assertInstanceOf(ClusterAuthorizationException.class, e.getCause());
}
}
@Test
public void returnEmptyWithTopicAuthorizationFailureOnCreate() {
final NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().prepareResponse(createTopicResponseWithTopicAuthorizationException(newTopic));
TopicAdmin admin = new TopicAdmin(env.adminClient());
assertFalse(admin.createTopic(newTopic));
env.kafkaClient().prepareResponse(createTopicResponseWithTopicAuthorizationException(newTopic));
assertTrue(admin.createOrFindTopics(newTopic).isEmpty());
}
}
@Test
public void throwsWithTopicAuthorizationFailureOnDescribe() {
final NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().prepareResponse(describeClusterResponse(cluster));
env.kafkaClient().prepareResponse(describeTopicResponseWithTopicAuthorizationException(newTopic));
TopicAdmin admin = new TopicAdmin(env.adminClient());
Exception e = assertThrows(ConnectException.class, () -> admin.describeTopics(newTopic.name()));
assertInstanceOf(TopicAuthorizationException.class, e.getCause());
}
}
@Test
public void shouldNotCreateTopicWhenItAlreadyExists() {
NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) {
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of());
mockAdminClient.addTopic(false, "myTopic", List.of(topicPartitionInfo), null);
TopicAdmin admin = new TopicAdmin(mockAdminClient);
assertFalse(admin.createTopic(newTopic));
assertTrue(admin.createTopics(newTopic).isEmpty());
assertTrue(admin.createOrFindTopic(newTopic));
TopicAdmin.TopicCreationResponse response = admin.createOrFindTopics(newTopic);
assertTrue(response.isCreatedOrExisting(newTopic.name()));
assertTrue(response.isExisting(newTopic.name()));
assertFalse(response.isCreated(newTopic.name()));
}
}
@Test
public void shouldCreateTopicWithPartitionsWhenItDoesNotExist() {
for (int numBrokers = 1; numBrokers < 10; ++numBrokers) {
int expectedReplicas = Math.min(3, numBrokers);
int maxDefaultRf = Math.min(numBrokers, 5);
for (int numPartitions = 1; numPartitions < 30; ++numPartitions) {
NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(numPartitions).compacted().build();
// Try clusters with no default replication factor or default partitions
assertTopicCreation(numBrokers, newTopic, null, null, expectedReplicas, numPartitions);
// Try clusters with different default partitions
for (int defaultPartitions = 1; defaultPartitions < 20; ++defaultPartitions) {
assertTopicCreation(numBrokers, newTopic, defaultPartitions, null, expectedReplicas, numPartitions);
}
// Try clusters with different default replication factors
for (int defaultRF = 1; defaultRF < maxDefaultRf; ++defaultRF) {
assertTopicCreation(numBrokers, newTopic, null, defaultRF, defaultRF, numPartitions);
}
}
}
}
@Test
public void shouldCreateTopicWithReplicationFactorWhenItDoesNotExist() {
for (int numBrokers = 1; numBrokers < 10; ++numBrokers) {
int maxRf = Math.min(numBrokers, 5);
int maxDefaultRf = Math.min(numBrokers, 5);
for (short rf = 1; rf < maxRf; ++rf) {
NewTopic newTopic = TopicAdmin.defineTopic("myTopic").replicationFactor(rf).compacted().build();
// Try clusters with no default replication factor or default partitions
assertTopicCreation(numBrokers, newTopic, null, null, rf, 1);
// Try clusters with different default partitions
for (int numPartitions = 1; numPartitions < 30; ++numPartitions) {
assertTopicCreation(numBrokers, newTopic, numPartitions, null, rf, numPartitions);
}
// Try clusters with different default replication factors
for (int defaultRF = 1; defaultRF < maxDefaultRf; ++defaultRF) {
assertTopicCreation(numBrokers, newTopic, null, defaultRF, rf, 1);
}
}
}
}
@Test
public void shouldCreateTopicWithDefaultPartitionsAndReplicationFactorWhenItDoesNotExist() {
NewTopic newTopic = TopicAdmin.defineTopic("my-topic")
.defaultPartitions()
.defaultReplicationFactor()
.compacted()
.build();
for (int numBrokers = 1; numBrokers < 10; ++numBrokers) {
int expectedReplicas = Math.min(3, numBrokers);
assertTopicCreation(numBrokers, newTopic, null, null, expectedReplicas, 1);
assertTopicCreation(numBrokers, newTopic, 30, null, expectedReplicas, 30);
}
}
@Test
public void shouldCreateOneTopicWhenProvidedMultipleDefinitionsWithSameTopicName() {
NewTopic newTopic1 = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
NewTopic newTopic2 = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (TopicAdmin admin = new TopicAdmin(new MockAdminClient(cluster.nodes(), cluster.nodeById(0)))) {
Set<String> newTopicNames = admin.createTopics(newTopic1, newTopic2);
assertEquals(1, newTopicNames.size());
assertEquals(newTopic2.name(), newTopicNames.iterator().next());
}
}
@Test
public void shouldRetryCreateTopicWhenAvailableBrokersAreNotEnoughForReplicationFactor() {
Cluster cluster = createCluster(1);
NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).replicationFactor((short) 2).compacted().build();
try (TopicAdmin admin = Mockito.spy(new TopicAdmin(new MockAdminClient(cluster.nodes(), cluster.nodeById(0))))) {
try {
admin.createTopicsWithRetry(newTopic, 2, 1, new MockTime());
} catch (Exception e) {
// not relevant
e.printStackTrace();
}
Mockito.verify(admin, Mockito.times(2)).createTopics(newTopic);
}
}
@Test
public void shouldRetryWhenTopicCreateThrowsWrappedTimeoutException() {
Cluster cluster = createCluster(1);
NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).replicationFactor((short) 1).compacted().build();
try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0));
TopicAdmin admin = Mockito.spy(new TopicAdmin(mockAdminClient))) {
mockAdminClient.timeoutNextRequest(1);
try {
admin.createTopicsWithRetry(newTopic, 2, 1, new MockTime());
} catch (Exception e) {
// not relevant
e.printStackTrace();
}
Mockito.verify(admin, Mockito.times(2)).createTopics(newTopic);
}
}
@Test
public void createShouldReturnFalseWhenSuppliedNullTopicDescription() {
Cluster cluster = createCluster(1);
try (TopicAdmin admin = new TopicAdmin(new MockAdminClient(cluster.nodes(), cluster.nodeById(0)))) {
boolean created = admin.createTopic(null);
assertFalse(created);
}
}
@Test
public void describeShouldReturnEmptyWhenTopicDoesNotExist() {
NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (TopicAdmin admin = new TopicAdmin(new MockAdminClient(cluster.nodes(), cluster.nodeById(0)))) {
assertTrue(admin.describeTopics(newTopic.name()).isEmpty());
}
}
@Test
public void describeShouldReturnTopicDescriptionWhenTopicExists() {
String topicName = "myTopic";
NewTopic newTopic = TopicAdmin.defineTopic(topicName).partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) {
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of());
mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), null);
TopicAdmin admin = new TopicAdmin(mockAdminClient);
Map<String, TopicDescription> desc = admin.describeTopics(newTopic.name());
assertFalse(desc.isEmpty());
TopicDescription topicDesc = new TopicDescription(topicName, false, List.of(topicPartitionInfo));
assertEquals(desc.get("myTopic"), topicDesc);
}
}
@Test
public void describeTopicConfigShouldReturnEmptyMapWhenNoTopicsAreSpecified() {
final NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().prepareResponse(describeConfigsResponseWithUnsupportedVersion(newTopic));
TopicAdmin admin = new TopicAdmin(env.adminClient());
Map<String, Config> results = admin.describeTopicConfigs();
assertTrue(results.isEmpty());
}
}
@Test
public void describeTopicConfigShouldReturnEmptyMapWhenUnsupportedVersionFailure() {
final NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().prepareResponse(describeConfigsResponseWithUnsupportedVersion(newTopic));
TopicAdmin admin = new TopicAdmin(env.adminClient());
Map<String, Config> results = admin.describeTopicConfigs(newTopic.name());
assertTrue(results.isEmpty());
}
}
@Test
public void describeTopicConfigShouldReturnEmptyMapWhenClusterAuthorizationFailure() {
final NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().prepareResponse(describeConfigsResponseWithClusterAuthorizationException(newTopic));
TopicAdmin admin = new TopicAdmin(env.adminClient());
Map<String, Config> results = admin.describeTopicConfigs(newTopic.name());
assertTrue(results.isEmpty());
}
}
@Test
public void describeTopicConfigShouldReturnEmptyMapWhenTopicAuthorizationFailure() {
final NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().prepareResponse(describeConfigsResponseWithTopicAuthorizationException(newTopic));
TopicAdmin admin = new TopicAdmin(env.adminClient());
Map<String, Config> results = admin.describeTopicConfigs(newTopic.name());
assertTrue(results.isEmpty());
}
}
@Test
public void describeTopicConfigShouldReturnMapWithNullValueWhenTopicDoesNotExist() {
NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (TopicAdmin admin = new TopicAdmin(new MockAdminClient(cluster.nodes(), cluster.nodeById(0)))) {
Map<String, Config> results = admin.describeTopicConfigs(newTopic.name());
assertFalse(results.isEmpty());
assertEquals(1, results.size());
assertNull(results.get("myTopic"));
}
}
@Test
public void describeTopicConfigShouldReturnTopicConfigWhenTopicExists() {
String topicName = "myTopic";
NewTopic newTopic = TopicAdmin.defineTopic(topicName)
.config(Map.of("foo", "bar"))
.partitions(1)
.compacted()
.build();
Cluster cluster = createCluster(1);
try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) {
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of());
mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), null);
TopicAdmin admin = new TopicAdmin(mockAdminClient);
Map<String, Config> result = admin.describeTopicConfigs(newTopic.name());
assertFalse(result.isEmpty());
assertEquals(1, result.size());
Config config = result.get("myTopic");
assertNotNull(config);
config.entries().forEach(entry -> assertEquals(newTopic.configs().get(entry.name()), entry.value()));
}
}
@Test
public void verifyingTopicCleanupPolicyShouldReturnFalseWhenBrokerVersionIsUnsupported() {
final NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().prepareResponse(describeConfigsResponseWithUnsupportedVersion(newTopic));
TopicAdmin admin = new TopicAdmin(env.adminClient());
boolean result = admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose");
assertFalse(result);
}
}
@Test
public void verifyingTopicCleanupPolicyShouldReturnFalseWhenClusterAuthorizationError() {
final NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().prepareResponse(describeConfigsResponseWithClusterAuthorizationException(newTopic));
TopicAdmin admin = new TopicAdmin(env.adminClient());
boolean result = admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose");
assertFalse(result);
}
}
@Test
public void verifyingTopicCleanupPolicyShouldReturnFalseWhenTopicAuthorizationError() {
final NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().prepareResponse(describeConfigsResponseWithTopicAuthorizationException(newTopic));
TopicAdmin admin = new TopicAdmin(env.adminClient());
boolean result = admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose");
assertFalse(result);
}
}
@Test
public void verifyingTopicCleanupPolicyShouldReturnTrueWhenTopicHasCorrectPolicy() {
String topicName = "myTopic";
Map<String, String> topicConfigs = Map.of("cleanup.policy", "compact");
Cluster cluster = createCluster(1);
try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) {
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of());
mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), topicConfigs);
TopicAdmin admin = new TopicAdmin(mockAdminClient);
boolean result = admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose");
assertTrue(result);
}
}
@Test
public void verifyingTopicCleanupPolicyShouldFailWhenTopicHasDeletePolicy() {
String topicName = "myTopic";
Map<String, String> topicConfigs = Map.of("cleanup.policy", "delete");
Cluster cluster = createCluster(1);
try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) {
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of());
mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), topicConfigs);
TopicAdmin admin = new TopicAdmin(mockAdminClient);
ConfigException e = assertThrows(ConfigException.class, () -> admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose"));
assertTrue(e.getMessage().contains("to guarantee consistency and durability"));
}
}
@Test
public void verifyingTopicCleanupPolicyShouldFailWhenTopicHasDeleteAndCompactPolicy() {
String topicName = "myTopic";
Map<String, String> topicConfigs = Map.of("cleanup.policy", "delete,compact");
Cluster cluster = createCluster(1);
try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) {
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of());
mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), topicConfigs);
TopicAdmin admin = new TopicAdmin(mockAdminClient);
ConfigException e = assertThrows(ConfigException.class, () -> admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose"));
assertTrue(e.getMessage().contains("to guarantee consistency and durability"));
}
}
@Test
public void verifyingGettingTopicCleanupPolicies() {
String topicName = "myTopic";
Map<String, String> topicConfigs = Map.of("cleanup.policy", "compact");
Cluster cluster = createCluster(1);
try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) {
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of());
mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), topicConfigs);
TopicAdmin admin = new TopicAdmin(mockAdminClient);
Set<String> policies = admin.topicCleanupPolicy("myTopic");
assertEquals(1, policies.size());
assertEquals(TopicConfig.CLEANUP_POLICY_COMPACT, policies.iterator().next());
}
}
/**
* TopicAdmin can be used to read the end offsets, but the admin client API used to do this was
* added to the broker in 0.11.0.0. This means that if Connect talks to older brokers,
* the admin client cannot be used to read end offsets, and will throw an UnsupportedVersionException.
*/
@Test
public void retryEndOffsetsShouldRethrowUnknownVersionException() {
String topicName = "myTopic";
TopicPartition tp1 = new TopicPartition(topicName, 0);
Set<TopicPartition> tps = Set.of(tp1);
Long offset = null; // response should use error
Cluster cluster = createCluster(1, topicName, 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
// Expect the admin client list offsets will throw unsupported version, simulating older brokers
env.kafkaClient().prepareResponse(listOffsetsResultWithUnsupportedVersion(tp1, offset));
TopicAdmin admin = new TopicAdmin(env.adminClient());
// The retryEndOffsets should catch and rethrow an unsupported version exception
assertThrows(UnsupportedVersionException.class, () -> admin.retryEndOffsets(tps, Duration.ofMillis(100), 1));
}
}
@Test
public void retryEndOffsetsShouldWrapNonRetriableExceptionsWithConnectException() {
String topicName = "myTopic";
TopicPartition tp1 = new TopicPartition(topicName, 0);
Set<TopicPartition> tps = Set.of(tp1);
Long offset = 1000L;
Cluster cluster = createCluster(1, "myTopic", 1);
try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
// This error should be treated as non-retriable and cause TopicAdmin::retryEndOffsets to fail
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.TOPIC_AUTHORIZATION_FAILED, Errors.NONE));
// But, in case there's a bug in our logic, prepare a valid response afterward so that TopicAdmin::retryEndOffsets
// will return successfully if we retry (which should in turn cause this test to fail)
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
env.kafkaClient().prepareResponse(listOffsetsResult(tp1, offset));
TopicAdmin admin = new TopicAdmin(env.adminClient());
ConnectException exception = assertThrows(ConnectException.class, () ->
admin.retryEndOffsets(tps, Duration.ofMillis(100), 1)
);
Throwable cause = exception.getCause();
assertNotNull(cause, "cause of failure should be preserved");
assertInstanceOf(TopicAuthorizationException.class, cause, "cause of failure should be accurately reported; expected topic authorization error, but was " + cause);
}
}
@Test
public void retryEndOffsetsShouldRetryWhenTopicNotFound() {
String topicName = "myTopic";
TopicPartition tp1 = new TopicPartition(topicName, 0);
Set<TopicPartition> tps = Set.of(tp1);
Long offset = 1000L;
Cluster cluster = createCluster(1, "myTopic", 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.UNKNOWN_TOPIC_OR_PARTITION));
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
env.kafkaClient().prepareResponse(listOffsetsResult(tp1, offset));
TopicAdmin admin = new TopicAdmin(env.adminClient());
Map<TopicPartition, Long> endoffsets = admin.retryEndOffsets(tps, Duration.ofMillis(100), 1);
assertEquals(Map.of(tp1, offset), endoffsets);
}
}
@Test
public void endOffsetsShouldFailWithNonRetriableWhenAuthorizationFailureOccurs() {
String topicName = "myTopic";
TopicPartition tp1 = new TopicPartition(topicName, 0);
Set<TopicPartition> tps = Set.of(tp1);
Long offset = null; // response should use error
Cluster cluster = createCluster(1, topicName, 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
env.kafkaClient().prepareResponse(listOffsetsResultWithClusterAuthorizationException(tp1, offset));
TopicAdmin admin = new TopicAdmin(env.adminClient());
ConnectException e = assertThrows(ConnectException.class, () -> admin.endOffsets(tps));
assertTrue(e.getMessage().contains("Not authorized to get the end offsets"));
}
}
@Test
public void endOffsetsShouldFailWithUnsupportedVersionWhenVersionUnsupportedErrorOccurs() {
String topicName = "myTopic";
TopicPartition tp1 = new TopicPartition(topicName, 0);
Set<TopicPartition> tps = Set.of(tp1);
Long offset = null; // response should use error
Cluster cluster = createCluster(1, topicName, 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
env.kafkaClient().prepareResponse(listOffsetsResultWithUnsupportedVersion(tp1, offset));
TopicAdmin admin = new TopicAdmin(env.adminClient());
assertThrows(UnsupportedVersionException.class, () -> admin.endOffsets(tps));
}
}
@Test
public void endOffsetsShouldFailWithTimeoutExceptionWhenTimeoutErrorOccurs() {
String topicName = "myTopic";
TopicPartition tp1 = new TopicPartition(topicName, 0);
Set<TopicPartition> tps = Set.of(tp1);
Long offset = null; // response should use error
Cluster cluster = createCluster(1, topicName, 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(
new MockTime(), cluster, AdminClientConfig.RETRIES_CONFIG, "0"
)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
env.kafkaClient().prepareResponse(listOffsetsResultWithTimeout(tp1, offset));
TopicAdmin admin = new TopicAdmin(env.adminClient());
assertThrows(TimeoutException.class, () -> admin.endOffsets(tps));
}
}
@Test
public void endOffsetsShouldFailWithNonRetriableWhenUnknownErrorOccurs() {
String topicName = "myTopic";
TopicPartition tp1 = new TopicPartition(topicName, 0);
Set<TopicPartition> tps = Set.of(tp1);
Long offset = null; // response should use error
Cluster cluster = createCluster(1, topicName, 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
env.kafkaClient().prepareResponse(listOffsetsResultWithUnknownError(tp1, offset));
TopicAdmin admin = new TopicAdmin(env.adminClient());
ConnectException e = assertThrows(ConnectException.class, () -> admin.endOffsets(tps));
assertTrue(e.getMessage().contains("Error while getting end offsets for topic"));
}
}
@Test
public void endOffsetsShouldReturnEmptyMapWhenPartitionsSetIsNull() {
String topicName = "myTopic";
Cluster cluster = createCluster(1, topicName, 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
TopicAdmin admin = new TopicAdmin(env.adminClient());
Map<TopicPartition, Long> offsets = admin.endOffsets(Set.of());
assertTrue(offsets.isEmpty());
}
}
@Test
public void endOffsetsShouldReturnOffsetsForOnePartition() {
String topicName = "myTopic";
TopicPartition tp1 = new TopicPartition(topicName, 0);
Set<TopicPartition> tps = Set.of(tp1);
long offset = 1000L;
Cluster cluster = createCluster(1, topicName, 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
env.kafkaClient().prepareResponse(listOffsetsResult(tp1, offset));
TopicAdmin admin = new TopicAdmin(env.adminClient());
Map<TopicPartition, Long> offsets = admin.endOffsets(tps);
assertEquals(1, offsets.size());
assertEquals(Long.valueOf(offset), offsets.get(tp1));
}
}
@Test
public void endOffsetsShouldReturnOffsetsForMultiplePartitions() {
String topicName = "myTopic";
TopicPartition tp1 = new TopicPartition(topicName, 0);
TopicPartition tp2 = new TopicPartition(topicName, 1);
Set<TopicPartition> tps = Set.of(tp1, tp2);
long offset1 = 1001;
long offset2 = 1002;
Cluster cluster = createCluster(1, topicName, 2);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
env.kafkaClient().prepareResponse(listOffsetsResult(tp1, offset1, tp2, offset2));
TopicAdmin admin = new TopicAdmin(env.adminClient());
Map<TopicPartition, Long> offsets = admin.endOffsets(tps);
assertEquals(2, offsets.size());
assertEquals(Long.valueOf(offset1), offsets.get(tp1));
assertEquals(Long.valueOf(offset2), offsets.get(tp2));
}
}
@Test
public void endOffsetsShouldFailWhenAnyTopicPartitionHasError() {
String topicName = "myTopic";
TopicPartition tp1 = new TopicPartition(topicName, 0);
Set<TopicPartition> tps = Set.of(tp1);
Cluster cluster = createCluster(1, topicName, 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
env.kafkaClient().prepareResponse(listOffsetsResultWithClusterAuthorizationException(tp1, null));
TopicAdmin admin = new TopicAdmin(env.adminClient());
ConnectException e = assertThrows(ConnectException.class, () -> admin.endOffsets(tps));
assertTrue(e.getMessage().contains("Not authorized to get the end offsets"));
}
}
private Cluster createCluster(int numNodes) {
return createCluster(numNodes, "unused", 0);
}
private Cluster createCluster(int numNodes, String topicName, int partitions) {
Node[] nodeArray = new Node[numNodes];
HashMap<Integer, Node> nodes = new HashMap<>();
for (int i = 0; i < numNodes; ++i) {
nodeArray[i] = new Node(i, "localhost", 8121 + i);
nodes.put(i, nodeArray[i]);
}
Node leader = nodeArray[0];
List<PartitionInfo> pInfos = new ArrayList<>();
for (int i = 0; i < partitions; ++i) {
pInfos.add(new PartitionInfo(topicName, i, leader, nodeArray, nodeArray));
}
return new Cluster(
"mockClusterId",
nodes.values(),
pInfos,
Set.of(),
Set.of(),
leader);
}
private MetadataResponse prepareMetadataResponse(Cluster cluster, Errors error) {
return prepareMetadataResponse(cluster, error, error);
}
private MetadataResponse prepareMetadataResponse(Cluster cluster, Errors topicError, Errors partitionError) {
List<MetadataResponseTopic> metadata = new ArrayList<>();
for (String topic : cluster.topics()) {
List<MetadataResponseData.MetadataResponsePartition> pms = new ArrayList<>();
for (PartitionInfo pInfo : cluster.availablePartitionsForTopic(topic)) {
MetadataResponseData.MetadataResponsePartition pm = new MetadataResponseData.MetadataResponsePartition()
.setErrorCode(partitionError.code())
.setPartitionIndex(pInfo.partition())
.setLeaderId(pInfo.leader().id())
.setLeaderEpoch(234)
.setReplicaNodes(Arrays.stream(pInfo.replicas()).map(Node::id).toList())
.setIsrNodes(Arrays.stream(pInfo.inSyncReplicas()).map(Node::id).toList())
.setOfflineReplicas(Arrays.stream(pInfo.offlineReplicas()).map(Node::id).toList());
pms.add(pm);
}
MetadataResponseTopic tm = new MetadataResponseTopic()
.setErrorCode(topicError.code())
.setName(topic)
.setIsInternal(false)
.setPartitions(pms);
metadata.add(tm);
}
return MetadataResponse.prepareResponse(true,
0,
cluster.nodes(),
cluster.clusterResource().clusterId(),
cluster.controller().id(),
metadata,
MetadataResponse.AUTHORIZED_OPERATIONS_OMITTED);
}
private ListOffsetsResponse listOffsetsResultWithUnknownError(TopicPartition tp1, Long offset1) {
return listOffsetsResult(
new ApiError(Errors.UNKNOWN_SERVER_ERROR, "Unknown error"),
Collections.singletonMap(tp1, offset1)
);
}
private ListOffsetsResponse listOffsetsResultWithTimeout(TopicPartition tp1, Long offset1) {
return listOffsetsResult(
new ApiError(Errors.REQUEST_TIMED_OUT, "Request timed out"),
Collections.singletonMap(tp1, offset1)
);
}
private ListOffsetsResponse listOffsetsResultWithUnsupportedVersion(TopicPartition tp1, Long offset1) {
return listOffsetsResult(
new ApiError(Errors.UNSUPPORTED_VERSION, "This version of the API is not supported"),
Collections.singletonMap(tp1, offset1)
);
}
private ListOffsetsResponse listOffsetsResultWithClusterAuthorizationException(TopicPartition tp1, Long offset1) {
return listOffsetsResult(
new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED, "Not authorized to create topic(s)"),
Collections.singletonMap(tp1, offset1)
);
}
private ListOffsetsResponse listOffsetsResult(TopicPartition tp1, Long offset1) {
return listOffsetsResult(null, Map.of(tp1, offset1));
}
private ListOffsetsResponse listOffsetsResult(TopicPartition tp1, Long offset1, TopicPartition tp2, Long offset2) {
Map<TopicPartition, Long> offsetsByPartitions = new HashMap<>();
offsetsByPartitions.put(tp1, offset1);
offsetsByPartitions.put(tp2, offset2);
return listOffsetsResult(null, offsetsByPartitions);
}
/**
* Create a ListOffsetResponse that exposes the supplied error and includes offsets for the supplied partitions.
* @param error the error; may be null if an unknown error should be used
* @param offsetsByPartitions offset for each partition, where offset is null signals the error should be used
* @return the response
*/
private ListOffsetsResponse listOffsetsResult(ApiError error, Map<TopicPartition, Long> offsetsByPartitions) {
if (error == null) error = new ApiError(Errors.UNKNOWN_TOPIC_OR_PARTITION, "unknown topic");
List<ListOffsetsTopicResponse> tpResponses = new ArrayList<>();
for (TopicPartition partition : offsetsByPartitions.keySet()) {
Long offset = offsetsByPartitions.get(partition);
ListOffsetsTopicResponse topicResponse;
if (offset == null) {
topicResponse = ListOffsetsResponse.singletonListOffsetsTopicResponse(partition, error.error(), -1L, 0, 321);
} else {
topicResponse = ListOffsetsResponse.singletonListOffsetsTopicResponse(partition, Errors.NONE, -1L, offset, 321);
}
tpResponses.add(topicResponse);
}
ListOffsetsResponseData responseData = new ListOffsetsResponseData()
.setThrottleTimeMs(0)
.setTopics(tpResponses);
return new ListOffsetsResponse(responseData);
}
private CreateTopicsResponse createTopicResponseWithUnsupportedVersion(NewTopic... topics) {
return createTopicResponse(new ApiError(Errors.UNSUPPORTED_VERSION, "This version of the API is not supported"), topics);
}
private CreateTopicsResponse createTopicResponseWithClusterAuthorizationException(NewTopic... topics) {
return createTopicResponse(new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED, "Not authorized to create topic(s)"), topics);
}
private CreateTopicsResponse createTopicResponseWithTopicAuthorizationException(NewTopic... topics) {
return createTopicResponse(new ApiError(Errors.TOPIC_AUTHORIZATION_FAILED, "Not authorized to create topic(s)"), topics);
}
private CreateTopicsResponse createTopicResponse(ApiError error, NewTopic... topics) {
if (error == null) error = new ApiError(Errors.NONE, "");
CreateTopicsResponseData response = new CreateTopicsResponseData();
for (NewTopic topic : topics) {
response.topics().add(new CreatableTopicResult().
setName(topic.name()).
setErrorCode(error.error().code()).
setErrorMessage(error.message()));
}
return new CreateTopicsResponse(response);
}
protected void assertTopicCreation(
int brokers,
NewTopic newTopic,
Integer defaultPartitions,
Integer defaultReplicationFactor,
int expectedReplicas,
int expectedPartitions
) {
Cluster cluster = createCluster(brokers);
MockAdminClient.Builder clientBuilder = MockAdminClient.create();
if (defaultPartitions != null) {
clientBuilder.defaultPartitions(defaultPartitions.shortValue());
}
if (defaultReplicationFactor != null) {
clientBuilder.defaultReplicationFactor(defaultReplicationFactor);
}
clientBuilder.brokers(cluster.nodes());
clientBuilder.controller(0);
try (MockAdminClient admin = clientBuilder.build()) {
TopicAdmin topicClient = new TopicAdmin(null, admin, false);
TopicAdmin.TopicCreationResponse response = topicClient.createOrFindTopics(newTopic);
assertTrue(response.isCreated(newTopic.name()));
assertFalse(response.isExisting(newTopic.name()));
assertTopic(admin, newTopic.name(), expectedPartitions, expectedReplicas);
}
}
protected void assertTopic(MockAdminClient admin, String topicName, int expectedPartitions, int expectedReplicas) {
TopicDescription desc = null;
try {
desc = topicDescription(admin, topicName);
} catch (Throwable t) {
fail("Failed to find topic description for topic '" + topicName + "'");
}
assertEquals(expectedPartitions, desc.partitions().size());
for (TopicPartitionInfo tp : desc.partitions()) {
assertEquals(expectedReplicas, tp.replicas().size());
}
}
protected TopicDescription topicDescription(MockAdminClient admin, String topicName)
throws ExecutionException, InterruptedException {
DescribeTopicsResult result = admin.describeTopics(Set.of(topicName));
Map<String, KafkaFuture<TopicDescription>> byName = result.topicNameValues();
return byName.get(topicName).get();
}
private DescribeTopicPartitionsResponse describeTopicResponseWithClusterAuthorizationException(NewTopic... topics) {
return describeTopicResponse(new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED, "Not authorized to create topic(s)"), topics);
}
private DescribeTopicPartitionsResponse describeTopicResponseWithTopicAuthorizationException(NewTopic... topics) {
return describeTopicResponse(new ApiError(Errors.TOPIC_AUTHORIZATION_FAILED, "Not authorized to create topic(s)"), topics);
}
private DescribeTopicPartitionsResponse describeTopicResponse(ApiError error, NewTopic... topics) {
if (error == null) error = new ApiError(Errors.NONE, "");
DescribeTopicPartitionsResponseData response = new DescribeTopicPartitionsResponseData();
for (NewTopic topic : topics) {
response.topics().add(new DescribeTopicPartitionsResponseData.DescribeTopicPartitionsResponseTopic()
.setErrorCode(error.error().code())
.setTopicId(Uuid.ZERO_UUID)
.setName(topic.name())
.setIsInternal(false)
);
}
return new DescribeTopicPartitionsResponse(response);
}
private DescribeClusterResponse describeClusterResponse(Cluster cluster) {
DescribeClusterResponseData data = new DescribeClusterResponseData()
.setErrorCode(Errors.NONE.code())
.setThrottleTimeMs(0)
.setControllerId(cluster.nodes().get(0).id())
.setClusterId(cluster.clusterResource().clusterId())
.setClusterAuthorizedOperations(MetadataResponse.AUTHORIZED_OPERATIONS_OMITTED);
cluster.nodes().forEach(broker ->
data.brokers().add(new DescribeClusterResponseData.DescribeClusterBroker()
.setHost(broker.host())
.setPort(broker.port())
.setBrokerId(broker.id())
.setRack(broker.rack())));
return new DescribeClusterResponse(data);
}
private DescribeConfigsResponse describeConfigsResponseWithUnsupportedVersion(NewTopic... topics) {
return describeConfigsResponse(new ApiError(Errors.UNSUPPORTED_VERSION, "This version of the API is not supported"), topics);
}
private DescribeConfigsResponse describeConfigsResponseWithClusterAuthorizationException(NewTopic... topics) {
return describeConfigsResponse(new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED, "Not authorized to create topic(s)"), topics);
}
private DescribeConfigsResponse describeConfigsResponseWithTopicAuthorizationException(NewTopic... topics) {
return describeConfigsResponse(new ApiError(Errors.TOPIC_AUTHORIZATION_FAILED, "Not authorized to create topic(s)"), topics);
}
private DescribeConfigsResponse describeConfigsResponse(ApiError error, NewTopic... topics) {
List<DescribeConfigsResponseData.DescribeConfigsResult> results = Stream.of(topics)
.map(topic -> new DescribeConfigsResponseData.DescribeConfigsResult()
.setErrorCode(error.error().code())
.setErrorMessage(error.message())
.setResourceType(ConfigResource.Type.TOPIC.id())
.setResourceName(topic.name())
.setConfigs(topic.configs().entrySet()
.stream()
.map(e -> new DescribeConfigsResponseData.DescribeConfigsResourceResult()
.setName(e.getKey())
.setValue(e.getValue()))
.toList()))
.toList();
return new DescribeConfigsResponse(new DescribeConfigsResponseData().setThrottleTimeMs(1000).setResults(results));
}
}
|
TopicAdminTest
|
java
|
apache__camel
|
components/camel-oauth/src/main/java/org/apache/camel/oauth/jakarta/ServletOAuthFactory.java
|
{
"start": 977,
"end": 1351
}
|
class ____ extends OAuthFactory {
public ServletOAuthFactory(CamelContext ctx) {
super(ctx);
}
public OAuth createOAuth() {
var oauth = new ServletOAuth();
oauth.discoverOAuthConfig(context);
var registry = context.getRegistry();
registry.bind(OAuth.class.getName(), oauth);
return oauth;
}
}
|
ServletOAuthFactory
|
java
|
alibaba__nacos
|
common/src/test/java/com/alibaba/nacos/common/notify/NotifyCenterTest.java
|
{
"start": 12284,
"end": 12619
}
|
class ____ extends Event {
private static final long serialVersionUID = 3024284255874382548L;
private final long no;
ExpireEvent(long no) {
this.no = no;
}
@Override
public long sequence() {
return no;
}
}
}
|
ExpireEvent
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/main/java/org/springframework/web/servlet/view/groovy/GroovyMarkupConfig.java
|
{
"start": 1018,
"end": 1318
}
|
interface ____ {
/**
* Return the Groovy {@link MarkupTemplateEngine} for the current
* web application context. May be unique to one servlet, or shared
* in the root context.
* @return the Groovy MarkupTemplateEngine engine
*/
MarkupTemplateEngine getTemplateEngine();
}
|
GroovyMarkupConfig
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/sagemaker/schema/SageMakerStreamSchemaPayload.java
|
{
"start": 1539,
"end": 2361
}
|
class ____ that Unified Chat Completion
* was its own interface.
*/
@Override
default EnumSet<TaskType> supportedTasks() {
return EnumSet.of(TaskType.COMPLETION, TaskType.CHAT_COMPLETION);
}
/**
* This API would only be called for Completion task types. {@link #requestBytes(SageMakerModel, SageMakerInferenceRequest)} would
* handle the request translation for both streaming and non-streaming.
*/
StreamingChatCompletionResults.Results streamResponseBody(SageMakerModel model, SdkBytes response) throws Exception;
SdkBytes chatCompletionRequestBytes(SageMakerModel model, UnifiedCompletionRequest request) throws Exception;
StreamingUnifiedChatCompletionResults.Results chatCompletionResponseBody(SageMakerModel model, SdkBytes response) throws Exception;
}
|
so
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.