language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/async/AsyncEndpointRecipientListTest.java | {
"start": 1170,
"end": 2689
} | class ____ extends ContextTestSupport {
private static String beforeThreadName;
private static String afterThreadName;
@Test
public void testAsyncEndpoint() {
getMockEndpoint("mock:before").expectedBodiesReceived("Hello Camel");
getMockEndpoint("mock:after").expectedBodiesReceived("Bye Camel");
getMockEndpoint("mock:result").expectedBodiesReceived("Bye Camel");
String reply = template.requestBody("direct:start", "Hello Camel", String.class);
assertEquals("Bye Camel", reply);
assertFalse(beforeThreadName.equalsIgnoreCase(afterThreadName), "Should use different threads");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
context.addComponent("async", new MyAsyncComponent());
from("direct:start").to("mock:before").to("log:before").process(new Processor() {
public void process(Exchange exchange) {
beforeThreadName = Thread.currentThread().getName();
}
}).recipientList(constant("async:bye:camel")).process(new Processor() {
public void process(Exchange exchange) {
afterThreadName = Thread.currentThread().getName();
}
}).to("log:after").to("mock:after").to("mock:result");
}
};
}
}
| AsyncEndpointRecipientListTest |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/single/SingleTimer.java | {
"start": 1567,
"end": 2343
} | class ____ extends AtomicReference<Disposable> implements Disposable, Runnable {
private static final long serialVersionUID = 8465401857522493082L;
final SingleObserver<? super Long> downstream;
TimerDisposable(final SingleObserver<? super Long> downstream) {
this.downstream = downstream;
}
@Override
public void run() {
downstream.onSuccess(0L);
}
@Override
public void dispose() {
DisposableHelper.dispose(this);
}
@Override
public boolean isDisposed() {
return DisposableHelper.isDisposed(get());
}
void setFuture(Disposable d) {
DisposableHelper.replace(this, d);
}
}
}
| TimerDisposable |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/sync/ForStSyncKeyedStateBackend.java | {
"start": 8216,
"end": 8601
} | interface ____ {
<K, N, SV, S extends State, IS extends S> IS createState(
StateDescriptor<S, SV> stateDesc,
Tuple2<ColumnFamilyHandle, RegisteredKeyValueStateBackendMetaInfo<N, SV>>
registerResult,
ForStSyncKeyedStateBackend<K> backend)
throws Exception;
}
private | StateCreateFactory |
java | elastic__elasticsearch | modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/builders/EnvelopeBuilderTests.java | {
"start": 711,
"end": 3017
} | class ____ extends AbstractShapeBuilderTestCase<EnvelopeBuilder> {
public void testInvalidConstructorArgs() {
NullPointerException e;
e = expectThrows(NullPointerException.class, () -> new EnvelopeBuilder(null, new Coordinate(1.0, -1.0)));
assertEquals("topLeft of envelope cannot be null", e.getMessage());
e = expectThrows(NullPointerException.class, () -> new EnvelopeBuilder(new Coordinate(1.0, -1.0), null));
assertEquals("bottomRight of envelope cannot be null", e.getMessage());
}
@Override
protected EnvelopeBuilder createTestShapeBuilder() {
return createRandomShape();
}
@Override
protected EnvelopeBuilder createMutation(EnvelopeBuilder original) throws IOException {
return mutate(original);
}
static EnvelopeBuilder mutate(EnvelopeBuilder original) throws IOException {
copyShape(original);
// move one corner to the middle of original
return switch (randomIntBetween(0, 3)) {
case 0 -> new EnvelopeBuilder(
new Coordinate(randomDoubleBetween(-180.0, original.bottomRight().x, true), original.topLeft().y),
original.bottomRight()
);
case 1 -> new EnvelopeBuilder(
new Coordinate(original.topLeft().x, randomDoubleBetween(original.bottomRight().y, 90.0, true)),
original.bottomRight()
);
case 2 -> new EnvelopeBuilder(
original.topLeft(),
new Coordinate(randomDoubleBetween(original.topLeft().x, 180.0, true), original.bottomRight().y)
);
case 3 -> new EnvelopeBuilder(
original.topLeft(),
new Coordinate(original.bottomRight().x, randomDoubleBetween(-90.0, original.topLeft().y, true))
);
default -> copyShape(original);
};
}
static EnvelopeBuilder createRandomShape() {
Rectangle box = RandomShapeGenerator.xRandomRectangle(random(), RandomShapeGenerator.xRandomPoint(random()));
EnvelopeBuilder envelope = new EnvelopeBuilder(
new Coordinate(box.getMinX(), box.getMaxY()),
new Coordinate(box.getMaxX(), box.getMinY())
);
return envelope;
}
}
| EnvelopeBuilderTests |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/PrimitiveBeanLookupAndAutowiringTests.java | {
"start": 3016,
"end": 3099
} | class ____ {
@Autowired boolean b;
@Autowired int i;
}
static | AutowiredComponent |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java | {
"start": 3158,
"end": 11838
} | class ____ {
private final Logger logger;
private final ShardId shardId;
StoreRecovery(ShardId shardId, Logger logger) {
this.logger = logger;
this.shardId = shardId;
}
/**
* Recovers a shard from it's local file system store. This method required pre-knowledge about if the shard should
* exist on disk ie. has been previously allocated or if the shard is a brand new allocation without pre-existing index
* files / transaction logs. This
* @param indexShard the index shard instance to recovery the shard into
* @param listener resolves to <code>true</code> if the shard has been recovered successfully, <code>false</code> if the recovery
* has been ignored due to a concurrent modification of if the clusters state has changed due to async updates.
* @see Store
*/
void recoverFromStore(final IndexShard indexShard, ActionListener<Boolean> listener) {
if (canRecover(indexShard)) {
RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType();
assert recoveryType == RecoverySource.Type.EMPTY_STORE
|| recoveryType == RecoverySource.Type.EXISTING_STORE
|| recoveryType == RecoverySource.Type.RESHARD_SPLIT : "expected one of store recovery types but was: " + recoveryType;
logger.debug("starting recovery from store ...");
final var recoveryListener = recoveryListener(indexShard, listener);
try {
internalRecoverFromStore(indexShard, recoveryListener.map(ignored -> true));
} catch (Exception e) {
recoveryListener.onFailure(e);
}
} else {
listener.onResponse(false);
}
}
void recoverFromLocalShards(
BiConsumer<MappingMetadata, ActionListener<Void>> mappingUpdateConsumer,
final IndexShard indexShard,
final List<LocalShardSnapshot> shards,
ActionListener<Boolean> outerListener
) {
if (canRecover(indexShard)) {
RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType();
assert recoveryType == RecoverySource.Type.LOCAL_SHARDS : "expected local shards recovery type: " + recoveryType;
if (shards.isEmpty()) {
throw new IllegalArgumentException("shards must not be empty");
}
Set<Index> indices = shards.stream().map((s) -> s.getIndex()).collect(Collectors.toSet());
if (indices.size() > 1) {
throw new IllegalArgumentException("can't add shards from more than one index");
}
IndexMetadata sourceMetadata = shards.get(0).getIndexMetadata();
final var mappingStep = new SubscribableListener<Void>();
if (sourceMetadata.mapping() == null) {
mappingStep.onResponse(null);
} else {
mappingUpdateConsumer.accept(sourceMetadata.mapping(), mappingStep);
}
mappingStep.addListener(outerListener.delegateFailure((listener, ignored) -> {
final var recoveryListener = recoveryListener(indexShard, listener);
try {
indexShard.mapperService().merge(sourceMetadata, MapperService.MergeReason.MAPPING_RECOVERY);
// now that the mapping is merged we can validate the index sort configuration.
Sort indexSort = indexShard.getIndexSort();
final boolean hasNested = indexShard.mapperService().hasNested();
final boolean isSplit = sourceMetadata.getNumberOfShards() < indexShard.indexSettings().getNumberOfShards();
logger.debug("starting recovery from local shards {}", shards);
final Directory directory = indexShard.store().directory(); // don't close this directory!!
final Directory[] sources = shards.stream().map(LocalShardSnapshot::getSnapshotDirectory).toArray(Directory[]::new);
final long maxSeqNo = shards.stream().mapToLong(LocalShardSnapshot::maxSeqNo).max().getAsLong();
final long maxUnsafeAutoIdTimestamp = shards.stream()
.mapToLong(LocalShardSnapshot::maxUnsafeAutoIdTimestamp)
.max()
.getAsLong();
addIndices(
indexShard.recoveryState().getIndex(),
directory,
indexSort,
sources,
maxSeqNo,
maxUnsafeAutoIdTimestamp,
indexShard.indexSettings().getIndexMetadata(),
indexShard.shardId().id(),
isSplit,
hasNested
);
internalRecoverFromStore(indexShard, recoveryListener.delegateFailure((delegate, v) -> {
ActionListener.completeWith(delegate, () -> {
// just trigger a merge to do housekeeping on the
// copied segments - we will also see them in stats etc.
indexShard.getEngine().forceMerge(false, -1, false, UUIDs.randomBase64UUID());
return true;
});
}));
} catch (IOException e) {
recoveryListener.onFailure(
new IndexShardRecoveryException(indexShard.shardId(), "failed to recover from local shards", e)
);
} catch (Exception e) {
recoveryListener.onFailure(e);
}
}));
} else {
outerListener.onResponse(false);
}
}
static void addIndices(
final RecoveryState.Index indexRecoveryStats,
final Directory target,
final Sort indexSort,
final Directory[] sources,
final long maxSeqNo,
final long maxUnsafeAutoIdTimestamp,
IndexMetadata indexMetadata,
int shardId,
boolean split,
boolean hasNested
) throws IOException {
assert sources.length > 0;
final int luceneIndexCreatedVersionMajor = Lucene.readSegmentInfos(sources[0]).getIndexCreatedVersionMajor();
final Directory hardLinkOrCopyTarget = new HardlinkCopyDirectoryWrapper(target);
IndexWriterConfig iwc = indexWriterConfigWithNoMerging(null).setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
.setCommitOnClose(false)
.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
.setIndexCreatedVersionMajor(luceneIndexCreatedVersionMajor);
if (indexSort != null) {
iwc.setIndexSort(indexSort);
if (indexMetadata != null && indexMetadata.getCreationVersion().onOrAfter(IndexVersions.INDEX_SORTING_ON_NESTED)) {
// Needed to support index sorting in the presence of nested objects.
iwc.setParentField(Engine.ROOT_DOC_FIELD_NAME);
}
}
try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(hardLinkOrCopyTarget, indexRecoveryStats), iwc)) {
writer.addIndexes(sources);
indexRecoveryStats.setFileDetailsComplete();
if (split) {
writer.deleteDocuments(new ShardSplittingQuery(indexMetadata, shardId, hasNested));
}
/*
* We set the maximum sequence number and the local checkpoint on the target to the maximum of the maximum sequence numbers on
* the source shards. This ensures that history after this maximum sequence number can advance and we have correct
* document-level semantics.
*/
writer.setLiveCommitData(() -> {
final Map<String, String> liveCommitData = Maps.newMapWithExpectedSize(4);
liveCommitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo));
liveCommitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(maxSeqNo));
liveCommitData.put(Engine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp));
liveCommitData.put(Engine.ES_VERSION, IndexVersion.current().toString());
return liveCommitData.entrySet().iterator();
});
writer.commit();
}
}
/**
* Directory wrapper that records copy process for recovery statistics
*/
static final | StoreRecovery |
java | quarkusio__quarkus | integration-tests/virtual-threads/security-webauthn-virtual-threads/src/test/java/io/quarkus/virtual/security/webauthn/RunOnVirtualThreadTest.java | {
"start": 848,
"end": 3864
} | class ____ {
@Inject
WebAuthnUserProvider userProvider;
@TestHTTPResource
URL url;
@Test
public void test() throws Exception {
RestAssured.get("/open").then().statusCode(200).body(Matchers.is("Hello"));
RestAssured
.given().redirects().follow(false)
.get("/secure").then().statusCode(302);
RestAssured
.given().redirects().follow(false)
.get("/admin").then().statusCode(302);
RestAssured
.given().redirects().follow(false)
.get("/cheese").then().statusCode(302);
Assertions.assertTrue(userProvider.findByUsername("stef").await().indefinitely().isEmpty());
CookieFilter cookieFilter = new CookieFilter();
WebAuthnHardware hardwareKey = new WebAuthnHardware(url);
String challenge = WebAuthnEndpointHelper.obtainRegistrationChallenge("stef", cookieFilter);
JsonObject registration = hardwareKey.makeRegistrationJson(challenge);
// now finalise
WebAuthnEndpointHelper.invokeRegistration("stef", registration, cookieFilter);
// make sure we stored the user
List<WebAuthnCredentialRecord> users = userProvider.findByUsername("stef").await().indefinitely();
Assertions.assertEquals(1, users.size());
Assertions.assertTrue(users.get(0).getUsername().equals("stef"));
Assertions.assertEquals(1, users.get(0).getCounter());
// make sure our login cookie works
checkLoggedIn(cookieFilter);
// reset cookies for the login phase
cookieFilter = new CookieFilter();
// now try to log in
challenge = WebAuthnEndpointHelper.obtainLoginChallenge("stef", cookieFilter);
JsonObject login = hardwareKey.makeLoginJson(challenge);
// now finalise
WebAuthnEndpointHelper.invokeLogin(login, cookieFilter);
// make sure we bumped the user
users = userProvider.findByUsername("stef").await().indefinitely();
Assertions.assertEquals(1, users.size());
Assertions.assertTrue(users.get(0).getUsername().equals("stef"));
Assertions.assertEquals(2, users.get(0).getCounter());
// make sure our login cookie still works
checkLoggedIn(cookieFilter);
}
public static void checkLoggedIn(CookieFilter cookieFilter) {
RestAssured
.given()
.filter(cookieFilter)
.get("/secure")
.then()
.statusCode(200)
.body(Matchers.is("stef: [admin]"));
RestAssured
.given()
.filter(cookieFilter)
.redirects().follow(false)
.get("/admin").then().statusCode(200).body(Matchers.is("OK"));
RestAssured
.given()
.filter(cookieFilter)
.redirects().follow(false)
.get("/cheese").then().statusCode(403);
}
}
| RunOnVirtualThreadTest |
java | elastic__elasticsearch | x-pack/plugin/rank-vectors/src/main/java/org/elasticsearch/xpack/rank/vectors/mapper/RankVectorsIndexFieldData.java | {
"start": 976,
"end": 2872
} | class ____ implements IndexFieldData<RankVectorsDVLeafFieldData> {
protected final String fieldName;
protected final ValuesSourceType valuesSourceType;
private final int dims;
private final DenseVectorFieldMapper.ElementType elementType;
public RankVectorsIndexFieldData(
String fieldName,
int dims,
ValuesSourceType valuesSourceType,
DenseVectorFieldMapper.ElementType elementType
) {
this.fieldName = fieldName;
this.valuesSourceType = valuesSourceType;
this.elementType = elementType;
this.dims = dims;
}
@Override
public String getFieldName() {
return fieldName;
}
@Override
public ValuesSourceType getValuesSourceType() {
return valuesSourceType;
}
@Override
public RankVectorsDVLeafFieldData load(LeafReaderContext context) {
return new RankVectorsDVLeafFieldData(context.reader(), fieldName, elementType, dims);
}
@Override
public RankVectorsDVLeafFieldData loadDirect(LeafReaderContext context) throws Exception {
return load(context);
}
@Override
public SortField sortField(Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) {
throw new IllegalArgumentException(
"Field [" + fieldName + "] of type [" + RankVectorsFieldMapper.CONTENT_TYPE + "] doesn't support sort"
);
}
@Override
public BucketedSort newBucketedSort(
BigArrays bigArrays,
Object missingValue,
MultiValueMode sortMode,
XFieldComparatorSource.Nested nested,
SortOrder sortOrder,
DocValueFormat format,
int bucketSize,
BucketedSort.ExtraData extra
) {
throw new IllegalArgumentException("only supported on numeric fields");
}
public static | RankVectorsIndexFieldData |
java | quarkusio__quarkus | independent-projects/qute/debug/src/main/java/io/quarkus/qute/debug/agent/variables/VariablesProvider.java | {
"start": 452,
"end": 1386
} | interface ____ {
/**
* Returns the reference ID used by the debugger protocol to identify
* this variable container.
* <p>
* This allows the debugger client to request the children of this
* container when expanding a variable in the UI.
* </p>
*
* @return the variables reference ID
*/
int getVariablesReference();
/**
* Sets the reference ID for this variable container.
*
* @param variablesReference the reference ID to set
*/
void setVariablesReference(int variablesReference);
/**
* Returns the collection of {@link Variable} instances contained
* by this provider.
* <p>
* This can be either simple properties or nested variables that
* themselves implement {@link VariablesProvider}.
* </p>
*
* @return a collection of debug variables
*/
Collection<Variable> getVariables();
}
| VariablesProvider |
java | quarkusio__quarkus | integration-tests/keycloak-authorization/src/test/java/io/quarkus/it/keycloak/KeycloakLifecycleManager.java | {
"start": 981,
"end": 9724
} | class ____ implements QuarkusTestResourceLifecycleManager, DevServicesContext.ContextAware {
private static final String KEYCLOAK_REALM = "quarkus";
final KeycloakTestClient client = new KeycloakTestClient();
@Override
public Map<String, String> start() {
RealmRepresentation realm = createRealm(KEYCLOAK_REALM);
client.createRealm(realm);
return Map.of();
}
private static RealmRepresentation createRealm(String name) {
RealmRepresentation realm = new RealmRepresentation();
realm.setRealm(name);
realm.setEnabled(true);
realm.setUsers(new ArrayList<>());
realm.setClients(new ArrayList<>());
realm.setAccessTokenLifespan(3);
realm.setSsoSessionMaxLifespan(3);
RolesRepresentation roles = new RolesRepresentation();
List<RoleRepresentation> realmRoles = new ArrayList<>();
roles.setRealm(realmRoles);
realm.setRoles(roles);
realm.getRoles().getRealm().add(new RoleRepresentation("user", null, false));
realm.getRoles().getRealm().add(new RoleRepresentation("superuser", null, false));
realm.getRoles().getRealm().add(new RoleRepresentation("admin", null, false));
realm.getRoles().getRealm().add(new RoleRepresentation("confidential", null, false));
realm.getClients().add(createClient("quarkus-app"));
realm.getUsers().add(createUser("alice", "user", "superuser"));
realm.getUsers().add(createUser("admin", "user", "admin"));
realm.getUsers().add(createUser("jdoe", "user", "confidential"));
return realm;
}
private static ClientRepresentation createClient(String clientId) {
ClientRepresentation client = new ClientRepresentation();
client.setClientId(clientId);
client.setRedirectUris(Arrays.asList("*"));
client.setPublicClient(false);
client.setSecret("secret");
client.setDirectAccessGrantsEnabled(true);
client.setEnabled(true);
client.setAuthorizationServicesEnabled(true);
ResourceServerRepresentation authorizationSettings = new ResourceServerRepresentation();
authorizationSettings.setResources(new ArrayList<>());
authorizationSettings.setPolicies(new ArrayList<>());
configurePermissionResourcePermission(authorizationSettings);
configureClaimBasedPermission(authorizationSettings);
configureHttpResponseClaimBasedPermission(authorizationSettings);
configureBodyClaimBasedPermission(authorizationSettings);
configurePaths(authorizationSettings);
configureScopePermission(authorizationSettings);
client.setAuthorizationSettings(authorizationSettings);
return client;
}
private static void configurePermissionResourcePermission(ResourceServerRepresentation settings) {
PolicyRepresentation policyConfidential = createJSPolicy("Confidential Policy",
"confidential-policy.js",
settings);
createPermission(settings, createResource(settings, "Permission Resource", "/api/permission"), policyConfidential);
PolicyRepresentation policyAdmin = createJSPolicy("Admin Policy", "admin-policy.js", settings);
createPermission(settings, createResource(settings, "Permission Resource Tenant", "/api-permission-tenant"),
policyAdmin);
createPermission(settings,
createResource(settings, "Dynamic Config Permission Resource Tenant", "/dynamic-permission-tenant"),
policyAdmin);
PolicyRepresentation policyUser = createJSPolicy("Superuser Policy", "superuser-policy.js", settings);
createPermission(settings, createResource(settings, "Permission Resource WebApp", "/api-permission-webapp"),
policyUser);
}
private static void configureScopePermission(ResourceServerRepresentation settings) {
PolicyRepresentation policy = createJSPolicy("Grant Policy", "always-grant.js", settings);
createScopePermission(settings,
createResource(settings, "Scope Permission Resource", "/api/permission/scope", "read", "write"), policy,
"read");
}
private static void configureClaimBasedPermission(ResourceServerRepresentation settings) {
PolicyRepresentation policy = createJSPolicy("Claim-Based Policy", "claim-based-policy.js", settings);
createPermission(settings, createResource(settings, "Claim Protected Resource", "/api/permission/claim-protected"),
policy);
}
private static void configureHttpResponseClaimBasedPermission(ResourceServerRepresentation settings) {
PolicyRepresentation policy = createJSPolicy("Http Response Claim-Based Policy",
"http-claim-based-policy.js",
settings);
createPermission(settings, createResource(settings, "Http Response Claim Protected Resource",
"/api/permission/http-response-claim-protected"), policy);
}
private static void configureBodyClaimBasedPermission(ResourceServerRepresentation settings) {
PolicyRepresentation policy = createJSPolicy("Body Claim-Based Policy",
"body-claim-based-policy.js",
settings);
createPermission(settings, createResource(settings, "Body Claim Protected Resource",
"/api/permission/body-claim"), policy);
}
private static void configurePaths(ResourceServerRepresentation settings) {
createResource(settings, "Root", null);
createResource(settings, "API", "/api2/*");
createResource(settings, "Hello", "/hello");
}
private static void createPermission(ResourceServerRepresentation settings, ResourceRepresentation resource,
PolicyRepresentation policy) {
PolicyRepresentation permission = new PolicyRepresentation();
permission.setName(resource.getName() + " Permission");
permission.setType("resource");
permission.setResources(new HashSet<>());
permission.getResources().add(resource.getName());
permission.setPolicies(new HashSet<>());
permission.getPolicies().add(policy.getName());
settings.getPolicies().add(permission);
}
private static void createScopePermission(ResourceServerRepresentation settings, ResourceRepresentation resource,
PolicyRepresentation policy, String scope) {
PolicyRepresentation permission = new PolicyRepresentation();
permission.setName(resource.getName() + " Permission");
permission.setType("scope");
permission.setResources(new HashSet<>());
permission.getResources().add(resource.getName());
permission.setScopes(new HashSet<>());
permission.getScopes().add(scope);
permission.setPolicies(new HashSet<>());
permission.getPolicies().add(policy.getName());
settings.getPolicies().add(permission);
}
private static ResourceRepresentation createResource(ResourceServerRepresentation authorizationSettings, String name,
String uri, String... scopes) {
ResourceRepresentation resource = new ResourceRepresentation(name);
for (String scope : scopes) {
resource.addScope(scope);
}
if (uri != null) {
resource.setUris(Collections.singleton(uri));
}
authorizationSettings.getResources().add(resource);
return resource;
}
private static PolicyRepresentation createJSPolicy(String name, String code, ResourceServerRepresentation settings) {
PolicyRepresentation policy = new PolicyRepresentation();
policy.setName(name);
policy.setType("script-" + code);
settings.getPolicies().add(policy);
return policy;
}
private static UserRepresentation createUser(String username, String... realmRoles) {
UserRepresentation user = new UserRepresentation();
user.setUsername(username);
user.setEnabled(true);
user.setCredentials(new ArrayList<>());
user.setRealmRoles(Arrays.asList(realmRoles));
CredentialRepresentation credential = new CredentialRepresentation();
credential.setType(CredentialRepresentation.PASSWORD);
credential.setValue(username);
credential.setTemporary(false);
user.getCredentials().add(credential);
return user;
}
@Override
public void stop() {
//client.deleteRealm(KEYCLOAK_REALM);
}
@Override
public void setIntegrationTestContext(DevServicesContext context) {
client.setIntegrationTestContext(context);
}
}
| KeycloakLifecycleManager |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/integration/spring/JmsConsumerShutdownIT.java | {
"start": 4039,
"end": 4617
} | class ____ extends RouteBuilder {
@Override
public void configure() {
from("jms:start")
.to("direct:dir")
.to("mock:end");
from("seda:start")
.to("direct:dir")
.to("mock:end");
from("direct:dir")
.onException(Exception.class)
.redeliveryDelay(500)
.maximumRedeliveries(-1) // forever
.end()
.to("mock:exception");
}
}
}
| MyRouteBuilder |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/discovery/DiscoverySelectors.java | {
"start": 14557,
"end": 19358
} | class ____} of the
* {@linkplain Thread thread} that uses the resulting selector.
*
* <p>Note: Since Java 9, all resources are on the module path. Either in
* named or unnamed modules. These resources are also considered to be
* classpath resources.
*
* <p>The {@link Set} supplied to this method should have a reliable iteration
* order to support reliable discovery and execution order. It is therefore
* recommended that the set be a {@link java.util.SequencedSet} (on Java 21
* or higher), {@link java.util.SortedSet}, {@link java.util.LinkedHashSet},
* or similar. Note that {@link Set#of(Object[])} and related {@code Set.of()}
* methods do not guarantee a reliable iteration order.
*
* @param classpathResources a set of classpath resources; never
* {@code null} or empty. All resources must have the same name, may not
* be {@code null} or blank.
* @since 1.14
* @see #selectClasspathResource(String, FilePosition)
* @see #selectClasspathResource(String)
* @see ClasspathResourceSelector
* @see org.junit.platform.commons.support.ResourceSupport#tryToGetResources(String)
*/
@API(status = MAINTAINED, since = "1.14")
public static ClasspathResourceSelector selectClasspathResourceByName(Set<? extends Resource> classpathResources) {
Preconditions.notEmpty(classpathResources, "classpath resources must not be null or empty");
Preconditions.containsNoNullElements(classpathResources, "individual classpath resources must not be null");
List<String> resourceNames = classpathResources.stream().map(Resource::getName).distinct().toList();
Preconditions.condition(resourceNames.size() == 1, "all classpath resources must have the same name");
Preconditions.notBlank(resourceNames.get(0), "classpath resource names must not be null or blank");
return new ClasspathResourceSelector(classpathResources);
}
/**
* Create a {@code ModuleSelector} for the supplied module name.
*
* <p>The unnamed module is not supported.
*
* @param moduleName the module name to select; never {@code null} or blank
* @since 1.1
* @see ModuleSelector
*/
@API(status = STABLE, since = "1.10")
public static ModuleSelector selectModule(String moduleName) {
Preconditions.notBlank(moduleName, "Module name must not be null or blank");
return new ModuleSelector(moduleName.strip());
}
/**
* Create a {@code ModuleSelector} for the supplied module.
*
* <p>The unnamed module is not supported.
*
* @param module the module to select; never {@code null} or <em>unnamed</em>
* @since 6.1
* @see ModuleSelector
*/
@API(status = EXPERIMENTAL, since = "6.1")
public static ModuleSelector selectModule(Module module) {
Preconditions.notNull(module, "Module must not be null");
Preconditions.condition(module.isNamed(), "Module must be named");
return new ModuleSelector(module);
}
/**
* Create a list of {@code ModuleSelectors} for the supplied module names.
*
* <p>The unnamed module is not supported.
*
* <p>The {@link Set} supplied to this method should have a reliable iteration
* order to support reliable discovery and execution order. It is therefore
* recommended that the set be a {@link java.util.SequencedSet} (on Java 21
* or higher), {@link java.util.SortedSet}, {@link java.util.LinkedHashSet},
* or similar. Note that {@link Set#of(Object[])} and related {@code Set.of()}
* methods do not guarantee a reliable iteration order.
*
* @param moduleNames the module names to select; never {@code null}, never
* containing {@code null} or blank
* @since 1.1
* @see ModuleSelector
*/
@API(status = STABLE, since = "1.10")
public static List<ModuleSelector> selectModules(Set<String> moduleNames) {
Preconditions.notNull(moduleNames, "Module names must not be null");
Preconditions.containsNoNullElements(moduleNames, "Individual module name must not be null");
// @formatter:off
return moduleNames.stream()
.map(DiscoverySelectors::selectModule)
// unmodifiable since this is a public, non-internal method
.toList();
// @formatter:on
}
/**
* Create a {@code PackageSelector} for the supplied package name.
*
* <p>The default package is represented by an empty string ({@code ""}).
*
* @param packageName the package name to select; never {@code null} and
* never containing whitespace only
* @see PackageSelector
*/
public static PackageSelector selectPackage(String packageName) {
Preconditions.notNull(packageName, "Package name must not be null");
Preconditions.condition(packageName.isEmpty() || !packageName.isBlank(),
"Package name must not contain only whitespace");
return new PackageSelector(packageName.strip());
}
/**
* Create a {@code ClassSelector} for the supplied {@link Class}.
*
* @param clazz the | loader |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/ConfigurableObjectInputStream.java | {
"start": 4482,
"end": 4922
} | class ____ to resolve
* @param ex the original exception thrown when attempting to load the class
* @return the newly resolved class (never {@code null})
*/
protected Class<?> resolveFallbackIfPossible(String className, ClassNotFoundException ex)
throws IOException, ClassNotFoundException{
throw ex;
}
/**
* Return the fallback ClassLoader to use when no ClassLoader was specified
* and ObjectInputStream's own default | name |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/CollectingErrorsTest.java | {
"start": 14169,
"end": 17084
} | class ____ {
@Test
@DisplayName("should stop collecting when default limit reached")
void defaultLimit() {
// setup - create JSON with 101 errors (default limit is 100)
String json = buildInvalidOrderJson(101);
ObjectReader reader = MAPPER.readerFor(Order.class).problemCollectingReader();
// when
DeferredBindingException ex = expectDeferredBinding(reader, json);
// then - should get DeferredBindingException as primary when limit reached
assertThat(ex).isNotNull();
assertThat(ex.getProblems()).hasSize(100); // Stopped at limit
assertThat(ex.isLimitReached()).isTrue();
assertThat(ex.getMessage()).contains("limit reached");
// Original DatabindException should be in suppressed for debugging
Throwable[] suppressed = ex.getSuppressed();
assertThat(suppressed).hasSizeGreaterThanOrEqualTo(1);
assertThat(suppressed[0]).isInstanceOf(DatabindException.class);
}
@Test
@DisplayName("should respect custom limit")
void customLimit() {
// setup
String json = buildInvalidOrderJson(20);
ObjectReader reader = MAPPER.readerFor(Order.class).problemCollectingReader(10);
// when
DeferredBindingException ex = expectDeferredBinding(reader, json);
// then - should get DeferredBindingException as primary when limit reached
assertThat(ex).isNotNull();
assertThat(ex.getProblems()).hasSize(10); // Custom limit
assertThat(ex.isLimitReached()).isTrue();
// Original DatabindException should be in suppressed for debugging
Throwable[] suppressed = ex.getSuppressed();
assertThat(suppressed).hasSizeGreaterThanOrEqualTo(1);
assertThat(suppressed[0]).isInstanceOf(DatabindException.class);
}
@Test
@DisplayName("should not set limit reached when under limit")
void underLimit() {
// setup
String json = "{\"name\":\"John\",\"age\":\"invalid\"}";
ObjectReader reader = MAPPER.readerFor(Person.class).problemCollectingReader(100);
// when
DeferredBindingException ex = expectDeferredBinding(reader, json);
// then
assertThat(ex).isNotNull();
assertThat(ex.getProblems()).hasSize(1);
assertThat(ex.isLimitReached()).isFalse();
assertThat(ex.getMessage()).doesNotContain("limit reached");
}
}
/*
/**********************************************************************
/* Test: Unknown property handling
/**********************************************************************
*/
@Nested
@DisplayName("Unknown property handling")
| LimitReachedTests |
java | apache__camel | dsl/camel-jbang/camel-jbang-core/src/main/java/org/apache/camel/dsl/jbang/core/commands/process/ListVault.java | {
"start": 11908,
"end": 12335
} | class ____ implements Cloneable {
String pid;
String name;
String vault;
String region;
long lastCheck;
long lastReload;
String secret;
long timestamp;
Row copy() {
try {
return (Row) clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeException(e);
}
}
}
}
| Row |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/mom/jeromq/JeroMqManager.java | {
"start": 1640,
"end": 7531
} | class ____ extends AbstractManager {
/**
* System property to enable shutdown hook.
*/
public static final String SYS_PROPERTY_ENABLE_SHUTDOWN_HOOK = "log4j.jeromq.enableShutdownHook";
/**
* System property to control JeroMQ I/O thread count.
*/
public static final String SYS_PROPERTY_IO_THREADS = "log4j.jeromq.ioThreads";
private static final JeroMqManagerFactory FACTORY = new JeroMqManagerFactory();
private static final ZContext CONTEXT;
// Retained to avoid garbage collection of the hook
private static final Cancellable SHUTDOWN_HOOK;
static {
LOGGER.trace("JeroMqManager using ZMQ version {}", ZMQ.getVersionString());
final int ioThreads = PropertiesUtil.getProperties().getIntegerProperty(SYS_PROPERTY_IO_THREADS, 1);
LOGGER.trace("JeroMqManager creating ZMQ context with ioThreads = {}", ioThreads);
CONTEXT = new ZContext(ioThreads);
final boolean enableShutdownHook =
PropertiesUtil.getProperties().getBooleanProperty(SYS_PROPERTY_ENABLE_SHUTDOWN_HOOK, true);
if (enableShutdownHook && LogManager.getFactory() instanceof ShutdownCallbackRegistry) {
SHUTDOWN_HOOK = ((ShutdownCallbackRegistry) LogManager.getFactory()).addShutdownCallback(CONTEXT::close);
} else {
SHUTDOWN_HOOK = null;
}
}
private final ZMQ.Socket publisher;
private final List<String> endpoints;
private JeroMqManager(final String name, final JeroMqConfiguration config) {
super(null, name);
publisher = CONTEXT.createSocket(SocketType.PUB);
final ZMonitor monitor = new ZMonitor(CONTEXT, publisher);
monitor.add(Event.LISTENING);
monitor.start();
publisher.setAffinity(config.affinity);
publisher.setBacklog(config.backlog);
publisher.setDelayAttachOnConnect(config.delayAttachOnConnect);
if (config.identity != null) {
publisher.setIdentity(config.identity);
}
publisher.setIPv4Only(config.ipv4Only);
publisher.setLinger(config.linger);
publisher.setMaxMsgSize(config.maxMsgSize);
publisher.setRcvHWM(config.rcvHwm);
publisher.setReceiveBufferSize(config.receiveBufferSize);
publisher.setReceiveTimeOut(config.receiveTimeOut);
publisher.setReconnectIVL(config.reconnectIVL);
publisher.setReconnectIVLMax(config.reconnectIVLMax);
publisher.setSendBufferSize(config.sendBufferSize);
publisher.setSendTimeOut(config.sendTimeOut);
publisher.setSndHWM(config.sndHwm);
publisher.setTCPKeepAlive(config.tcpKeepAlive);
publisher.setTCPKeepAliveCount(config.tcpKeepAliveCount);
publisher.setTCPKeepAliveIdle(config.tcpKeepAliveIdle);
publisher.setTCPKeepAliveInterval(config.tcpKeepAliveInterval);
publisher.setXpubVerbose(config.xpubVerbose);
final List<String> endpoints = new ArrayList<String>(config.endpoints.size());
for (final String endpoint : config.endpoints) {
publisher.bind(endpoint);
// Retrieve the standardized list of endpoints,
// this also converts port 0 to an ephemeral port.
final ZEvent event = monitor.nextEvent();
endpoints.add(event.address);
}
this.endpoints = Collections.unmodifiableList(endpoints);
monitor.destroy();
LOGGER.debug("Created JeroMqManager with {}", config);
}
public boolean send(final byte[] data) {
return publisher.send(data);
}
@Override
protected boolean releaseSub(final long timeout, final TimeUnit timeUnit) {
publisher.close();
return true;
}
// not public, handy for testing
Socket getSocket() {
return publisher;
}
public List<String> getEndpoints() {
return endpoints;
}
public static JeroMqManager getJeroMqManager(
final String name,
final long affinity,
final long backlog,
final boolean delayAttachOnConnect,
final byte[] identity,
final boolean ipv4Only,
final long linger,
final long maxMsgSize,
final long rcvHwm,
final long receiveBufferSize,
final int receiveTimeOut,
final long reconnectIVL,
final long reconnectIVLMax,
final long sendBufferSize,
final int sendTimeOut,
final long sndHwm,
final int tcpKeepAlive,
final long tcpKeepAliveCount,
final long tcpKeepAliveIdle,
final long tcpKeepAliveInterval,
final boolean xpubVerbose,
final List<String> endpoints) {
return getManager(
name,
FACTORY,
new JeroMqConfiguration(
affinity,
backlog,
delayAttachOnConnect,
identity,
ipv4Only,
linger,
maxMsgSize,
rcvHwm,
receiveBufferSize,
receiveTimeOut,
reconnectIVL,
reconnectIVLMax,
sendBufferSize,
sendTimeOut,
sndHwm,
tcpKeepAlive,
tcpKeepAliveCount,
tcpKeepAliveIdle,
tcpKeepAliveInterval,
xpubVerbose,
endpoints));
}
public static ZMQ.Context getContext() {
return CONTEXT.getContext();
}
public static ZContext getZContext() {
return CONTEXT;
}
private static final | JeroMqManager |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/initializers/annotation/OrderedInitializersAnnotationConfigTests.java | {
"start": 3196,
"end": 3457
} | class ____ {
@Bean
public String foo() {
return PROFILE_GLOBAL;
}
@Bean
public String bar() {
return PROFILE_GLOBAL;
}
@Bean
public String baz() {
return PROFILE_GLOBAL;
}
}
@Configuration
@Profile(PROFILE_ONE)
static | GlobalConfig |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/common/BackoffPolicyTests.java | {
"start": 779,
"end": 5826
} | class ____ extends ESTestCase {
public void testWrapBackoffPolicy() {
TimeValue timeValue = timeValueMillis(between(0, Integer.MAX_VALUE));
int maxNumberOfRetries = between(1, 1000);
BackoffPolicy policy = BackoffPolicy.constantBackoff(timeValue, maxNumberOfRetries);
AtomicInteger retries = new AtomicInteger();
policy = BackoffPolicy.wrap(policy, retries::getAndIncrement);
int expectedRetries = 0;
{
// Fetching the iterator doesn't call the callback
Iterator<TimeValue> itr = policy.iterator();
assertEquals(expectedRetries, retries.get());
while (itr.hasNext()) {
// hasNext doesn't trigger the callback
assertEquals(expectedRetries, retries.get());
// next does
itr.next();
expectedRetries += 1;
assertEquals(expectedRetries, retries.get());
}
// next doesn't call the callback when there isn't a backoff available
expectThrows(NoSuchElementException.class, () -> itr.next());
assertEquals(expectedRetries, retries.get());
}
{
// The second iterator also calls the callback
Iterator<TimeValue> itr = policy.iterator();
itr.next();
expectedRetries += 1;
assertEquals(expectedRetries, retries.get());
}
}
public void testExponentialBackOff() {
long initialDelayMillis = randomLongBetween(0, 100);
int maxNumberOfRetries = randomIntBetween(0, 10);
BackoffPolicy exponentialBackoff = BackoffPolicy.exponentialBackoff(timeValueMillis(initialDelayMillis), maxNumberOfRetries);
int numberOfBackoffsToPerform = randomIntBetween(1, 3);
for (int i = 0; i < numberOfBackoffsToPerform; i++) {
Iterator<TimeValue> iterator = exponentialBackoff.iterator();
TimeValue lastTimeValue = null;
int counter = 0;
while (iterator.hasNext()) {
TimeValue timeValue = iterator.next();
if (lastTimeValue == null) {
assertEquals(timeValueMillis(initialDelayMillis), timeValue);
} else {
// intervals should be always increasing
assertTrue(timeValue.compareTo(lastTimeValue) > 0);
}
lastTimeValue = timeValue;
counter++;
}
assertEquals(maxNumberOfRetries, counter);
}
}
public void testLinearBackoffWithLimit() {
long incrementMillis = randomIntBetween(10, 500);
long limitMillis = randomIntBetween(1000, 5000);
int maxNumberOfRetries = randomIntBetween(0, 30);
BackoffPolicy timeValues = BackoffPolicy.linearBackoff(
timeValueMillis(incrementMillis),
maxNumberOfRetries,
timeValueMillis(limitMillis)
);
int counter = 0;
for (TimeValue timeValue : timeValues) {
counter++;
long unlimitedValue = counter * incrementMillis;
long expectedValue = Math.min(unlimitedValue, limitMillis);
assertEquals(timeValueMillis(expectedValue), timeValue);
}
assertEquals(counter, maxNumberOfRetries);
}
public void testLinearBackoffWithoutLimit() {
long incrementMillis = randomIntBetween(10, 500);
int maxNumberOfRetries = randomIntBetween(0, 30);
BackoffPolicy timeValues = BackoffPolicy.linearBackoff(timeValueMillis(incrementMillis), maxNumberOfRetries, null);
int counter = 0;
for (TimeValue timeValue : timeValues) {
counter++;
assertEquals(timeValueMillis(counter * incrementMillis), timeValue);
}
assertEquals(counter, maxNumberOfRetries);
}
public void testNoBackoff() {
BackoffPolicy noBackoff = BackoffPolicy.noBackoff();
int numberOfBackoffsToPerform = randomIntBetween(1, 3);
for (int i = 0; i < numberOfBackoffsToPerform; i++) {
Iterator<TimeValue> iterator = noBackoff.iterator();
assertFalse(iterator.hasNext());
}
}
public void testConstantBackoff() {
long delayMillis = randomLongBetween(0, 100);
int maxNumberOfRetries = randomIntBetween(0, 10);
BackoffPolicy exponentialBackoff = BackoffPolicy.constantBackoff(timeValueMillis(delayMillis), maxNumberOfRetries);
int numberOfBackoffsToPerform = randomIntBetween(1, 3);
for (int i = 0; i < numberOfBackoffsToPerform; i++) {
final Iterator<TimeValue> iterator = exponentialBackoff.iterator();
int counter = 0;
while (iterator.hasNext()) {
TimeValue timeValue = iterator.next();
assertEquals(timeValueMillis(delayMillis), timeValue);
counter++;
}
assertEquals(maxNumberOfRetries, counter);
}
}
}
| BackoffPolicyTests |
java | spring-projects__spring-framework | spring-context-support/src/test/java/org/springframework/mail/SimpleMailMessageTests.java | {
"start": 1040,
"end": 4755
} | class ____ {
@Test
void testSimpleMessageCopyCtor() {
SimpleMailMessage message = new SimpleMailMessage();
message.setFrom("me@mail.org");
message.setTo("you@mail.org");
SimpleMailMessage messageCopy = new SimpleMailMessage(message);
assertThat(messageCopy.getFrom()).isEqualTo("me@mail.org");
assertThat(messageCopy.getTo()[0]).isEqualTo("you@mail.org");
message.setReplyTo("reply@mail.org");
message.setCc("he@mail.org", "she@mail.org");
message.setBcc("us@mail.org", "them@mail.org");
Date sentDate = new Date();
message.setSentDate(sentDate);
message.setSubject("my subject");
message.setText("my text");
assertThat(message.getFrom()).isEqualTo("me@mail.org");
assertThat(message.getReplyTo()).isEqualTo("reply@mail.org");
assertThat(message.getTo()[0]).isEqualTo("you@mail.org");
List<String> ccs = Arrays.asList(message.getCc());
assertThat(ccs).contains("he@mail.org");
assertThat(ccs).contains("she@mail.org");
List<String> bccs = Arrays.asList(message.getBcc());
assertThat(bccs).contains("us@mail.org");
assertThat(bccs).contains("them@mail.org");
assertThat(message.getSentDate()).isEqualTo(sentDate);
assertThat(message.getSubject()).isEqualTo("my subject");
assertThat(message.getText()).isEqualTo("my text");
messageCopy = new SimpleMailMessage(message);
assertThat(messageCopy.getFrom()).isEqualTo("me@mail.org");
assertThat(messageCopy.getReplyTo()).isEqualTo("reply@mail.org");
assertThat(messageCopy.getTo()[0]).isEqualTo("you@mail.org");
ccs = Arrays.asList(messageCopy.getCc());
assertThat(ccs).contains("he@mail.org");
assertThat(ccs).contains("she@mail.org");
bccs = Arrays.asList(message.getBcc());
assertThat(bccs).contains("us@mail.org");
assertThat(bccs).contains("them@mail.org");
assertThat(messageCopy.getSentDate()).isEqualTo(sentDate);
assertThat(messageCopy.getSubject()).isEqualTo("my subject");
assertThat(messageCopy.getText()).isEqualTo("my text");
}
@Test
void testDeepCopyOfStringArrayTypedFieldsOnCopyCtor() {
SimpleMailMessage original = new SimpleMailMessage();
original.setTo("fiona@mail.org", "apple@mail.org");
original.setCc("he@mail.org", "she@mail.org");
original.setBcc("us@mail.org", "them@mail.org");
SimpleMailMessage copy = new SimpleMailMessage(original);
original.getTo()[0] = "mmm@mmm.org";
original.getCc()[0] = "mmm@mmm.org";
original.getBcc()[0] = "mmm@mmm.org";
assertThat(copy.getTo()[0]).isEqualTo("fiona@mail.org");
assertThat(copy.getCc()[0]).isEqualTo("he@mail.org");
assertThat(copy.getBcc()[0]).isEqualTo("us@mail.org");
}
/**
* Tests that two equal SimpleMailMessages have equal hash codes.
*/
@Test
public final void testHashCode() {
SimpleMailMessage message1 = new SimpleMailMessage();
message1.setFrom("from@somewhere");
message1.setReplyTo("replyTo@somewhere");
message1.setTo("to@somewhere");
message1.setCc("cc@somewhere");
message1.setBcc("bcc@somewhere");
message1.setSentDate(new Date());
message1.setSubject("subject");
message1.setText("text");
// Copy the message
SimpleMailMessage message2 = new SimpleMailMessage(message1);
assertThat(message2).isEqualTo(message1);
assertThat(message2.hashCode()).isEqualTo(message1.hashCode());
}
@Test
public final void testEqualsObject() {
SimpleMailMessage message1;
SimpleMailMessage message2;
// Same object is equal
message1 = new SimpleMailMessage();
message2 = message1;
assertThat(message1).isEqualTo(message2);
// Null object is not equal
message1 = new SimpleMailMessage();
message2 = null;
boolean condition1 = !(message1.equals(message2));
assertThat(condition1).isTrue();
// Different | SimpleMailMessageTests |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/integration/issues/BeanExceptionIT.java | {
"start": 3352,
"end": 3525
} | class ____ {
public String doSomething(String input) {
throw new IllegalArgumentException("Forced exception by unit test");
}
}
}
| MyExceptionBean |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/comment/CommentsTest.java | {
"start": 3360,
"end": 3492
} | class ____ {
@Comment("I am amount")
private BigDecimal amount;
@Comment("I am currency")
private Currency currency;
}
}
| Money |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/PathUtils.java | {
"start": 904,
"end": 1707
} | class ____ {
public static Path getTestPath(Class<?> caller) {
return getTestPath(caller, true);
}
public static Path getTestPath(Class<?> caller, boolean create) {
return new Path(getTestDirName(caller));
}
public static File getTestDir(Class<?> caller) {
return getTestDir(caller, true);
}
public static File getTestDir(Class<?> caller, boolean create) {
File dir = new File(GenericTestUtils.getRandomizedTestDir(),
caller.getSimpleName());
if (create) {
dir.mkdirs();
}
return dir;
}
public static String getTestDirName(Class<?> caller) {
return getTestDirName(caller, true);
}
public static String getTestDirName(Class<?> caller, boolean create) {
return getTestDir(caller, create).getAbsolutePath();
}
}
| PathUtils |
java | quarkusio__quarkus | integration-tests/jackson/src/main/java/io/quarkus/it/jackson/model/SampleResponse.java | {
"start": 233,
"end": 971
} | class ____ {
private String blogTitle;
private String name;
public SampleResponse() {
}
public SampleResponse(String blogTitle, String name) {
this.blogTitle = blogTitle;
this.name = name;
}
public String getBlogTitle() {
return blogTitle;
}
public void setBlogTitle(String blogTitle) {
this.blogTitle = blogTitle;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public String toString() {
return "SampleResponse{" +
"blogTitle='" + blogTitle + '\'' +
", name='" + name + '\'' +
'}';
}
}
| SampleResponse |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/reflect/FieldAccessReflect.java | {
"start": 1018,
"end": 1526
} | class ____ extends FieldAccess {
@Override
protected FieldAccessor getAccessor(Field field) {
AvroEncode enc = ReflectionUtil.getAvroEncode(field);
if (enc != null)
try {
return new ReflectionBasesAccessorCustomEncoded(field, enc.using().getDeclaredConstructor().newInstance());
} catch (Exception e) {
throw new AvroRuntimeException("Could not instantiate custom Encoding");
}
return new ReflectionBasedAccessor(field);
}
private static | FieldAccessReflect |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/bytecode/enhance/internal/bytebuddy/CodeTemplates.java | {
"start": 5436,
"end": 5680
} | class ____ {
@Advice.OnMethodExit
static void $$_hibernate_getCollectionTracker( @Advice.Return(readOnly = false) CollectionTracker returned) {
returned = NoopCollectionTracker.INSTANCE;
}
}
static | GetCollectionTrackerWithoutCollections |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java | {
"start": 49135,
"end": 169735
} | class ____ implements Runnable {
/**
* Calls which have not yet been assigned to a node.
* Only accessed from this thread.
*/
private final ArrayList<Call> pendingCalls = new ArrayList<>();
/**
* Maps nodes to calls that we want to send.
* Only accessed from this thread.
*/
private final Map<Node, List<Call>> callsToSend = new HashMap<>();
/**
* Maps node ID strings to calls that have been sent.
* Only accessed from this thread.
*/
private final Map<String, Call> callsInFlight = new HashMap<>();
/**
* Maps correlation IDs to calls that have been sent.
* Only accessed from this thread.
*/
private final Map<Integer, Call> correlationIdToCalls = new HashMap<>();
/**
* Pending calls. Protected by the object monitor.
*/
private final List<Call> newCalls = new LinkedList<>();
/**
* Maps node ID strings to their readiness deadlines. A node will appear in this
* map if there are callsToSend which are waiting for it to be ready, and there
* are no calls in flight using the node.
*/
private final Map<Node, Long> nodeReadyDeadlines = new HashMap<>();
/**
* Whether the admin client is closing.
*/
private volatile boolean closing = false;
/**
* Time out the elements in the pendingCalls list which are expired.
*
* @param processor The timeout processor.
*/
private void timeoutPendingCalls(TimeoutProcessor processor) {
int numTimedOut = processor.handleTimeouts(pendingCalls, "Timed out waiting for a node assignment.");
if (numTimedOut > 0)
log.debug("Timed out {} pending calls.", numTimedOut);
}
/**
* Time out calls which have been assigned to nodes.
*
* @param processor The timeout processor.
*/
private int timeoutCallsToSend(TimeoutProcessor processor) {
int numTimedOut = 0;
for (List<Call> callList : callsToSend.values()) {
numTimedOut += processor.handleTimeouts(callList,
"Timed out waiting to send the call.");
}
if (numTimedOut > 0)
log.debug("Timed out {} call(s) with assigned nodes.", numTimedOut);
return numTimedOut;
}
/**
* Drain all the calls from newCalls into pendingCalls.
* <p>
* This function holds the lock for the minimum amount of time, to avoid blocking
* users of AdminClient who will also take the lock to add new calls.
*/
private synchronized void drainNewCalls() {
transitionToPendingAndClearList(newCalls);
}
/**
* Add some calls to pendingCalls, and then clear the input list.
* Also clears Call#curNode.
*
* @param calls The calls to add.
*/
private void transitionToPendingAndClearList(List<Call> calls) {
for (Call call : calls) {
call.curNode = null;
pendingCalls.add(call);
}
calls.clear();
}
/**
* Choose nodes for the calls in the pendingCalls list.
*
* @param now The current time in milliseconds.
* @return The minimum time until a call is ready to be retried if any of the pending
* calls are backing off after a failure
*/
private long maybeDrainPendingCalls(long now) {
long pollTimeout = Long.MAX_VALUE;
log.trace("Trying to choose nodes for {} at {}", pendingCalls, now);
List<Call> toRemove = new ArrayList<>();
// Using pendingCalls.size() to get the list size before the for-loop to avoid infinite loop.
// If call.fail keeps adding the call to pendingCalls,
// the loop like for (int i = 0; i < pendingCalls.size(); i++) can't stop.
int pendingSize = pendingCalls.size();
// pendingCalls could be modified in this loop,
// hence using for-loop instead of iterator to avoid ConcurrentModificationException.
for (int i = 0; i < pendingSize; i++) {
Call call = pendingCalls.get(i);
// If the call is being retried, await the proper backoff before finding the node
if (now < call.nextAllowedTryMs) {
pollTimeout = Math.min(pollTimeout, call.nextAllowedTryMs - now);
} else if (maybeDrainPendingCall(call, now)) {
toRemove.add(call);
}
}
// Use remove instead of removeAll to avoid delete all matched elements
for (Call call : toRemove) {
pendingCalls.remove(call);
}
return pollTimeout;
}
/**
* Check whether a pending call can be assigned a node. Return true if the pending call was either
* transferred to the callsToSend collection or if the call was failed. Return false if it
* should remain pending.
*/
private boolean maybeDrainPendingCall(Call call, long now) {
try {
Node node = call.nodeProvider.provide();
if (node != null) {
log.trace("Assigned {} to node {}", call, node);
call.curNode = node;
getOrCreateListValue(callsToSend, node).add(call);
return true;
} else {
log.trace("Unable to assign {} to a node.", call);
return false;
}
} catch (Throwable t) {
// Handle authentication errors while choosing nodes.
log.debug("Unable to choose node for {}", call, t);
call.fail(now, t);
return true;
}
}
/**
* Send the calls which are ready.
*
* @param now The current time in milliseconds.
* @return The minimum timeout we need for poll().
*/
private long sendEligibleCalls(long now) {
long pollTimeout = Long.MAX_VALUE;
for (Iterator<Map.Entry<Node, List<Call>>> iter = callsToSend.entrySet().iterator(); iter.hasNext(); ) {
Map.Entry<Node, List<Call>> entry = iter.next();
List<Call> calls = entry.getValue();
if (calls.isEmpty()) {
iter.remove();
continue;
}
Node node = entry.getKey();
if (callsInFlight.containsKey(node.idString())) {
log.trace("Still waiting for other calls to finish on node {}.", node);
nodeReadyDeadlines.remove(node);
continue;
}
if (!client.ready(node, now)) {
Long deadline = nodeReadyDeadlines.get(node);
if (deadline != null) {
if (now >= deadline) {
log.info("Disconnecting from {} and revoking {} node assignment(s) " +
"because the node is taking too long to become ready.",
node.idString(), calls.size());
transitionToPendingAndClearList(calls);
client.disconnect(node.idString());
nodeReadyDeadlines.remove(node);
iter.remove();
continue;
}
pollTimeout = Math.min(pollTimeout, deadline - now);
} else {
nodeReadyDeadlines.put(node, now + requestTimeoutMs);
}
long nodeTimeout = client.pollDelayMs(node, now);
pollTimeout = Math.min(pollTimeout, nodeTimeout);
log.trace("Client is not ready to send to {}. Must delay {} ms", node, nodeTimeout);
continue;
}
// Subtract the time we spent waiting for the node to become ready from
// the total request time.
int remainingRequestTime;
Long deadlineMs = nodeReadyDeadlines.remove(node);
if (deadlineMs == null) {
remainingRequestTime = requestTimeoutMs;
} else {
remainingRequestTime = calcTimeoutMsRemainingAsInt(now, deadlineMs);
}
while (!calls.isEmpty()) {
Call call = calls.remove(0);
int timeoutMs = Math.min(remainingRequestTime,
calcTimeoutMsRemainingAsInt(now, call.deadlineMs));
AbstractRequest.Builder<?> requestBuilder;
try {
requestBuilder = call.createRequest(timeoutMs);
} catch (Throwable t) {
call.fail(now, new KafkaException(String.format(
"Internal error sending %s to %s.", call.callName, node), t));
continue;
}
ClientRequest clientRequest = client.newClientRequest(node.idString(),
requestBuilder, now, true, timeoutMs, null);
log.debug("Sending {} to {}. correlationId={}, timeoutMs={}",
requestBuilder, node, clientRequest.correlationId(), timeoutMs);
client.send(clientRequest, now);
callsInFlight.put(node.idString(), call);
correlationIdToCalls.put(clientRequest.correlationId(), call);
break;
}
}
return pollTimeout;
}
/**
* Time out expired calls that are in flight.
* <p>
* Calls that are in flight may have been partially or completely sent over the wire. They may
* even be in the process of being processed by the remote server. At the moment, our only option
* to time them out is to close the entire connection.
*
* @param processor The timeout processor.
*/
private void timeoutCallsInFlight(TimeoutProcessor processor) {
int numTimedOut = 0;
for (Map.Entry<String, Call> entry : callsInFlight.entrySet()) {
Call call = entry.getValue();
String nodeId = entry.getKey();
if (processor.callHasExpired(call)) {
log.info("Disconnecting from {} due to timeout while awaiting {}", nodeId, call);
client.disconnect(nodeId);
numTimedOut++;
// We don't remove anything from the callsInFlight data structure. Because the connection
// has been closed, the calls should be returned by the next client#poll(),
// and handled at that point.
}
}
if (numTimedOut > 0)
log.debug("Timed out {} call(s) in flight.", numTimedOut);
}
/**
* Handle responses from the server.
*
* @param now The current time in milliseconds.
* @param responses The latest responses from KafkaClient.
*/
private void handleResponses(long now, List<ClientResponse> responses) {
for (ClientResponse response : responses) {
int correlationId = response.requestHeader().correlationId();
Call call = correlationIdToCalls.get(correlationId);
if (call == null) {
// If the server returns information about a correlation ID we didn't use yet,
// an internal server error has occurred. Close the connection and log an error message.
log.error("Internal server error on {}: server returned information about unknown " +
"correlation ID {}, requestHeader = {}", response.destination(), correlationId,
response.requestHeader());
client.disconnect(response.destination());
continue;
}
// Stop tracking this call.
correlationIdToCalls.remove(correlationId);
if (!callsInFlight.remove(response.destination(), call)) {
log.error("Internal server error on {}: ignoring call {} in correlationIdToCall " +
"that did not exist in callsInFlight", response.destination(), call);
continue;
}
// Handle the result of the call. This may involve retrying the call, if we got a
// retriable exception.
if (response.versionMismatch() != null) {
call.fail(now, response.versionMismatch());
} else if (response.wasDisconnected()) {
AuthenticationException authException = client.authenticationException(call.curNode());
if (authException != null) {
call.fail(now, authException);
} else {
call.fail(now, new DisconnectException(String.format(
"Cancelled %s request with correlation id %d due to node %s being disconnected",
call.callName, correlationId, response.destination())));
}
} else {
try {
call.handleResponse(response.responseBody());
adminFetchMetricsManager.recordLatency(response.destination(), response.requestLatencyMs());
if (log.isTraceEnabled())
log.trace("{} got response {}", call, response.responseBody());
} catch (Throwable t) {
if (log.isTraceEnabled())
log.trace("{} handleResponse failed with {}", call, prettyPrintException(t));
call.fail(now, t);
}
}
}
}
/**
* Unassign calls that have not yet been sent based on some predicate. For example, this
* is used to reassign the calls that have been assigned to a disconnected node.
*
* @param shouldUnassign Condition for reassignment. If the predicate is true, then the calls will
* be put back in the pendingCalls collection and they will be reassigned
*/
private void unassignUnsentCalls(Predicate<Node> shouldUnassign) {
for (Iterator<Map.Entry<Node, List<Call>>> iter = callsToSend.entrySet().iterator(); iter.hasNext(); ) {
Map.Entry<Node, List<Call>> entry = iter.next();
Node node = entry.getKey();
List<Call> awaitingCalls = entry.getValue();
if (awaitingCalls.isEmpty()) {
iter.remove();
} else if (shouldUnassign.test(node)) {
nodeReadyDeadlines.remove(node);
transitionToPendingAndClearList(awaitingCalls);
iter.remove();
}
}
}
private boolean hasActiveExternalCalls(Collection<Call> calls) {
for (Call call : calls) {
if (!call.isInternal()) {
return true;
}
}
return false;
}
/**
* Return true if there are currently active external calls.
*/
private boolean hasActiveExternalCalls() {
if (hasActiveExternalCalls(pendingCalls)) {
return true;
}
for (List<Call> callList : callsToSend.values()) {
if (hasActiveExternalCalls(callList)) {
return true;
}
}
return hasActiveExternalCalls(correlationIdToCalls.values());
}
private boolean threadShouldExit(long now, long curHardShutdownTimeMs) {
if (!hasActiveExternalCalls()) {
log.trace("All work has been completed, and the I/O thread is now exiting.");
return true;
}
if (now >= curHardShutdownTimeMs) {
log.info("Forcing a hard I/O thread shutdown. Requests in progress will be aborted.");
return true;
}
log.debug("Hard shutdown in {} ms.", curHardShutdownTimeMs - now);
return false;
}
@Override
public void run() {
log.debug("Thread starting");
try {
processRequests();
} finally {
closing = true;
AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics);
int numTimedOut = 0;
TimeoutProcessor timeoutProcessor = new TimeoutProcessor(Long.MAX_VALUE);
synchronized (this) {
numTimedOut += timeoutProcessor.handleTimeouts(newCalls, "The AdminClient thread has exited.");
}
numTimedOut += timeoutProcessor.handleTimeouts(pendingCalls, "The AdminClient thread has exited.");
numTimedOut += timeoutCallsToSend(timeoutProcessor);
numTimedOut += timeoutProcessor.handleTimeouts(correlationIdToCalls.values(),
"The AdminClient thread has exited.");
if (numTimedOut > 0) {
log.info("Timed out {} remaining operation(s) during close.", numTimedOut);
}
closeQuietly(client, "KafkaClient");
closeQuietly(metrics, "Metrics");
log.debug("Exiting AdminClientRunnable thread.");
}
}
private void processRequests() {
long now = time.milliseconds();
while (true) {
// Copy newCalls into pendingCalls.
drainNewCalls();
// Check if the AdminClient thread should shut down.
long curHardShutdownTimeMs = hardShutdownTimeMs.get();
if ((curHardShutdownTimeMs != INVALID_SHUTDOWN_TIME) && threadShouldExit(now, curHardShutdownTimeMs))
break;
// Handle timeouts.
TimeoutProcessor timeoutProcessor = timeoutProcessorFactory.create(now);
timeoutPendingCalls(timeoutProcessor);
timeoutCallsToSend(timeoutProcessor);
timeoutCallsInFlight(timeoutProcessor);
long pollTimeout = Math.min(1200000, timeoutProcessor.nextTimeoutMs());
if (curHardShutdownTimeMs != INVALID_SHUTDOWN_TIME) {
pollTimeout = Math.min(pollTimeout, curHardShutdownTimeMs - now);
}
// Choose nodes for our pending calls.
pollTimeout = Math.min(pollTimeout, maybeDrainPendingCalls(now));
long metadataFetchDelayMs = metadataManager.metadataFetchDelayMs(now);
if (metadataFetchDelayMs == 0) {
metadataManager.transitionToUpdatePending(now);
Call metadataCall = makeMetadataCall(now);
// Create a new metadata fetch call and add it to the end of pendingCalls.
// Assign a node for just the new call (we handled the other pending nodes above).
if (!maybeDrainPendingCall(metadataCall, now))
pendingCalls.add(metadataCall);
}
pollTimeout = Math.min(pollTimeout, sendEligibleCalls(now));
if (metadataFetchDelayMs > 0) {
pollTimeout = Math.min(pollTimeout, metadataFetchDelayMs);
}
// Ensure that we use a small poll timeout if there are pending calls which need to be sent
if (!pendingCalls.isEmpty())
pollTimeout = Math.min(pollTimeout, retryBackoffMs);
// Wait for network responses.
log.trace("Entering KafkaClient#poll(timeout={})", pollTimeout);
List<ClientResponse> responses = client.poll(Math.max(0L, pollTimeout), now);
log.trace("KafkaClient#poll retrieved {} response(s)", responses.size());
// unassign calls to disconnected nodes
unassignUnsentCalls(client::connectionFailed);
// Update the current time and handle the latest responses.
now = time.milliseconds();
handleResponses(now, responses);
}
}
/**
* Queue a call for sending.
* <p>
* If the AdminClient thread has exited, this will fail. Otherwise, it will succeed (even
* if the AdminClient is shutting down). This function should called when retrying an
* existing call.
*
* @param call The new call object.
* @param now The current time in milliseconds.
*/
void enqueue(Call call, long now) {
if (call.tries > maxRetries) {
log.debug("Max retries {} for {} reached", maxRetries, call);
call.handleTimeoutFailure(time.milliseconds(), new TimeoutException(
"Exceeded maxRetries after " + call.tries + " tries."));
return;
}
if (log.isDebugEnabled()) {
log.debug("Queueing {} with a timeout {} ms from now.", call,
Math.min(requestTimeoutMs, call.deadlineMs - now));
}
boolean accepted = false;
synchronized (this) {
if (!closing) {
newCalls.add(call);
accepted = true;
}
}
if (accepted) {
client.wakeup(); // wake the thread if it is in poll()
} else {
log.debug("The AdminClient thread has exited. Timing out {}.", call);
call.handleTimeoutFailure(time.milliseconds(),
new TimeoutException("The AdminClient thread has exited."));
}
}
/**
* Initiate a new call.
* <p>
* This will fail if the AdminClient is scheduled to shut down.
*
* @param call The new call object.
* @param now The current time in milliseconds.
*/
void call(Call call, long now) {
if (hardShutdownTimeMs.get() != INVALID_SHUTDOWN_TIME) {
log.debug("Cannot accept new call {} when AdminClient is closing.", call);
call.handleFailure(new IllegalStateException("Cannot accept new calls when AdminClient is closing."));
} else if (metadataManager.usingBootstrapControllers() &&
(!call.nodeProvider.supportsUseControllers())) {
call.fail(now, new UnsupportedEndpointTypeException("This Admin API is not " +
"yet supported when communicating directly with the controller quorum."));
} else {
enqueue(call, now);
}
}
/**
* Create a new metadata call.
*/
private Call makeMetadataCall(long now) {
if (metadataManager.usingBootstrapControllers()) {
return makeControllerMetadataCall(now);
} else {
return makeBrokerMetadataCall(now);
}
}
private Call makeControllerMetadataCall(long now) {
// Use DescribeCluster here, as specified by KIP-919.
return new Call(true, "describeCluster", calcDeadlineMs(now, requestTimeoutMs),
new MetadataUpdateNodeIdProvider()) {
@Override
public DescribeClusterRequest.Builder createRequest(int timeoutMs) {
return new DescribeClusterRequest.Builder(new DescribeClusterRequestData()
.setIncludeClusterAuthorizedOperations(false)
.setEndpointType(EndpointType.CONTROLLER.id()));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeClusterResponse response = (DescribeClusterResponse) abstractResponse;
Cluster cluster;
try {
cluster = parseDescribeClusterResponse(response.data());
} catch (ApiException e) {
handleFailure(e);
return;
}
long now = time.milliseconds();
metadataManager.update(cluster, now);
// Unassign all unsent requests after a metadata refresh to allow for a new
// destination to be selected from the new metadata
unassignUnsentCalls(node -> true);
}
@Override
boolean handleUnsupportedVersionException(final UnsupportedVersionException e) {
metadataManager.updateFailed(e);
return false;
}
@Override
public void handleFailure(Throwable e) {
metadataManager.updateFailed(e);
}
};
}
private Call makeBrokerMetadataCall(long now) {
// We use MetadataRequest here so that we can continue to support brokers that are too
// old to handle DescribeCluster.
return new Call(true, "fetchMetadata", calcDeadlineMs(now, requestTimeoutMs),
new MetadataUpdateNodeIdProvider()) {
@Override
public MetadataRequest.Builder createRequest(int timeoutMs) {
// Since this only requests node information, it's safe to pass true
// for allowAutoTopicCreation (and it simplifies communication with
// older brokers)
return new MetadataRequest.Builder(new MetadataRequestData()
.setTopics(Collections.emptyList())
.setAllowAutoTopicCreation(true));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
long now = time.milliseconds();
if (response.topLevelError() == Errors.REBOOTSTRAP_REQUIRED)
metadataManager.initiateRebootstrap();
else
metadataManager.update(response.buildCluster(), now);
// Unassign all unsent requests after a metadata refresh to allow for a new
// destination to be selected from the new metadata
unassignUnsentCalls(node -> true);
}
@Override
boolean handleUnsupportedVersionException(final UnsupportedVersionException e) {
metadataManager.updateFailed(e);
return false;
}
@Override
public void handleFailure(Throwable e) {
metadataManager.updateFailed(e);
}
};
}
}
static Cluster parseDescribeClusterResponse(DescribeClusterResponseData response) {
ApiError apiError = new ApiError(response.errorCode(), response.errorMessage());
if (apiError.isFailure()) {
throw apiError.exception();
}
if (response.endpointType() != EndpointType.CONTROLLER.id()) {
throw new MismatchedEndpointTypeException("Expected response from CONTROLLER " +
"endpoint, but got response from endpoint type " + (int) response.endpointType());
}
List<Node> nodes = new ArrayList<>();
Node controllerNode = null;
for (DescribeClusterResponseData.DescribeClusterBroker node : response.brokers()) {
Node newNode = new Node(node.brokerId(), node.host(), node.port(), node.rack());
nodes.add(newNode);
if (node.brokerId() == response.controllerId()) {
controllerNode = newNode;
}
}
return new Cluster(response.clusterId(),
nodes,
Collections.emptyList(),
Collections.emptySet(),
Collections.emptySet(),
controllerNode);
}
/**
* Returns true if a topic name cannot be represented in an RPC. This function does NOT check
* whether the name is too long, contains invalid characters, etc. It is better to enforce
* those policies on the server, so that they can be changed in the future if needed.
*/
private static boolean topicNameIsUnrepresentable(String topicName) {
return topicName == null || topicName.isEmpty();
}
private static boolean topicIdIsUnrepresentable(Uuid topicId) {
return topicId == null || topicId.equals(Uuid.ZERO_UUID);
}
// for testing
int numPendingCalls() {
return runnable.pendingCalls.size();
}
/**
* Fail futures in the given stream which are not done.
* Used when a response handler expected a result for some entity but no result was present.
*/
private static <K, V> void completeUnrealizedFutures(
Stream<Map.Entry<K, KafkaFutureImpl<V>>> futures,
Function<K, String> messageFormatter) {
futures.filter(entry -> !entry.getValue().isDone()).forEach(entry ->
entry.getValue().completeExceptionally(new ApiException(messageFormatter.apply(entry.getKey()))));
}
/**
* Fail futures in the given Map which were retried due to exceeding quota. We propagate
* the initial error back to the caller if the request timed out.
*/
private static <K, V> void maybeCompleteQuotaExceededException(
boolean shouldRetryOnQuotaViolation,
Throwable throwable,
Map<K, KafkaFutureImpl<V>> futures,
Map<K, ThrottlingQuotaExceededException> quotaExceededExceptions,
int throttleTimeDelta) {
if (shouldRetryOnQuotaViolation && throwable instanceof TimeoutException) {
quotaExceededExceptions.forEach((key, value) -> futures.get(key).completeExceptionally(
new ThrottlingQuotaExceededException(
Math.max(0, value.throttleTimeMs() - throttleTimeDelta),
value.getMessage())));
}
}
@Override
public CreateTopicsResult createTopics(final Collection<NewTopic> newTopics,
final CreateTopicsOptions options) {
final Map<String, KafkaFutureImpl<TopicMetadataAndConfig>> topicFutures = new HashMap<>(newTopics.size());
final CreatableTopicCollection topics = new CreatableTopicCollection();
for (NewTopic newTopic : newTopics) {
if (topicNameIsUnrepresentable(newTopic.name())) {
KafkaFutureImpl<TopicMetadataAndConfig> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" +
newTopic.name() + "' cannot be represented in a request."));
topicFutures.put(newTopic.name(), future);
} else if (!topicFutures.containsKey(newTopic.name())) {
topicFutures.put(newTopic.name(), new KafkaFutureImpl<>());
topics.add(newTopic.convertToCreatableTopic());
}
}
if (!topics.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getCreateTopicsCall(options, topicFutures, topics,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new CreateTopicsResult(new HashMap<>(topicFutures));
}
private Call getCreateTopicsCall(final CreateTopicsOptions options,
final Map<String, KafkaFutureImpl<TopicMetadataAndConfig>> futures,
final CreatableTopicCollection topics,
final Map<String, ThrottlingQuotaExceededException> quotaExceededExceptions,
final long now,
final long deadline) {
return new Call("createTopics", deadline, new ControllerNodeProvider()) {
@Override
public CreateTopicsRequest.Builder createRequest(int timeoutMs) {
return new CreateTopicsRequest.Builder(
new CreateTopicsRequestData()
.setTopics(topics)
.setTimeoutMs(timeoutMs)
.setValidateOnly(options.shouldValidateOnly()));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
// Check for controller change
handleNotControllerError(abstractResponse);
// Handle server responses for particular topics.
final CreateTopicsResponse response = (CreateTopicsResponse) abstractResponse;
final CreatableTopicCollection retryTopics = new CreatableTopicCollection();
final Map<String, ThrottlingQuotaExceededException> retryTopicQuotaExceededExceptions = new HashMap<>();
for (CreatableTopicResult result : response.data().topics()) {
KafkaFutureImpl<TopicMetadataAndConfig> future = futures.get(result.name());
if (future == null) {
log.warn("Server response mentioned unknown topic {}", result.name());
} else {
ApiError error = new ApiError(result.errorCode(), result.errorMessage());
if (error.isFailure()) {
if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(
response.throttleTimeMs(), error.messageWithFallback());
if (options.shouldRetryOnQuotaViolation()) {
retryTopics.add(topics.find(result.name()).duplicate());
retryTopicQuotaExceededExceptions.put(result.name(), quotaExceededException);
} else {
future.completeExceptionally(quotaExceededException);
}
} else {
future.completeExceptionally(error.exception());
}
} else {
TopicMetadataAndConfig topicMetadataAndConfig;
if (result.topicConfigErrorCode() != Errors.NONE.code()) {
topicMetadataAndConfig = new TopicMetadataAndConfig(
Errors.forCode(result.topicConfigErrorCode()).exception());
} else if (result.numPartitions() == CreateTopicsResult.UNKNOWN) {
topicMetadataAndConfig = new TopicMetadataAndConfig(new UnsupportedVersionException(
"Topic metadata and configs in CreateTopics response not supported"));
} else {
List<CreatableTopicConfigs> configs = result.configs();
Config topicConfig = new Config(configs.stream()
.map(this::configEntry)
.collect(Collectors.toSet()));
topicMetadataAndConfig = new TopicMetadataAndConfig(result.topicId(), result.numPartitions(),
result.replicationFactor(),
topicConfig);
}
future.complete(topicMetadataAndConfig);
}
}
}
// If there are topics to retry, retry them; complete unrealized futures otherwise.
if (retryTopics.isEmpty()) {
// The server should send back a response for every topic. But do a sanity check anyway.
completeUnrealizedFutures(futures.entrySet().stream(),
topic -> "The controller response did not contain a result for topic " + topic);
} else {
final long now = time.milliseconds();
final Call call = getCreateTopicsCall(options, futures, retryTopics,
retryTopicQuotaExceededExceptions, now, deadline);
runnable.call(call, now);
}
}
private ConfigEntry configEntry(CreatableTopicConfigs config) {
return new ConfigEntry(
config.name(),
config.value(),
configSource(DescribeConfigsResponse.ConfigSource.forId(config.configSource())),
config.isSensitive(),
config.readOnly(),
Collections.emptyList(),
null,
null);
}
@Override
void handleFailure(Throwable throwable) {
// If there were any topics retries due to a quota exceeded exception, we propagate
// the initial error back to the caller if the request timed out.
maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(),
throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
// Fail all the other remaining futures
completeAllExceptionally(futures.values(), throwable);
}
};
}
@Override
public DeleteTopicsResult deleteTopics(final TopicCollection topics,
final DeleteTopicsOptions options) {
if (topics instanceof TopicIdCollection)
return DeleteTopicsResult.ofTopicIds(handleDeleteTopicsUsingIds(((TopicIdCollection) topics).topicIds(), options));
else if (topics instanceof TopicNameCollection)
return DeleteTopicsResult.ofTopicNames(handleDeleteTopicsUsingNames(((TopicNameCollection) topics).topicNames(), options));
else
throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for deleteTopics.");
}
private Map<String, KafkaFuture<Void>> handleDeleteTopicsUsingNames(final Collection<String> topicNames,
final DeleteTopicsOptions options) {
final Map<String, KafkaFutureImpl<Void>> topicFutures = new HashMap<>(topicNames.size());
final List<String> validTopicNames = new ArrayList<>(topicNames.size());
for (String topicName : topicNames) {
if (topicNameIsUnrepresentable(topicName)) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" +
topicName + "' cannot be represented in a request."));
topicFutures.put(topicName, future);
} else if (!topicFutures.containsKey(topicName)) {
topicFutures.put(topicName, new KafkaFutureImpl<>());
validTopicNames.add(topicName);
}
}
if (!validTopicNames.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getDeleteTopicsCall(options, topicFutures, validTopicNames,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new HashMap<>(topicFutures);
}
private Map<Uuid, KafkaFuture<Void>> handleDeleteTopicsUsingIds(final Collection<Uuid> topicIds,
final DeleteTopicsOptions options) {
final Map<Uuid, KafkaFutureImpl<Void>> topicFutures = new HashMap<>(topicIds.size());
final List<Uuid> validTopicIds = new ArrayList<>(topicIds.size());
for (Uuid topicId : topicIds) {
if (topicId.equals(Uuid.ZERO_UUID)) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic ID '" +
topicId + "' cannot be represented in a request."));
topicFutures.put(topicId, future);
} else if (!topicFutures.containsKey(topicId)) {
topicFutures.put(topicId, new KafkaFutureImpl<>());
validTopicIds.add(topicId);
}
}
if (!validTopicIds.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getDeleteTopicsWithIdsCall(options, topicFutures, validTopicIds,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new HashMap<>(topicFutures);
}
private Call getDeleteTopicsCall(final DeleteTopicsOptions options,
final Map<String, KafkaFutureImpl<Void>> futures,
final List<String> topics,
final Map<String, ThrottlingQuotaExceededException> quotaExceededExceptions,
final long now,
final long deadline) {
return new Call("deleteTopics", deadline, new ControllerNodeProvider()) {
@Override
DeleteTopicsRequest.Builder createRequest(int timeoutMs) {
return new DeleteTopicsRequest.Builder(
new DeleteTopicsRequestData()
.setTopicNames(topics)
.setTimeoutMs(timeoutMs));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
// Check for controller change
handleNotControllerError(abstractResponse);
// Handle server responses for particular topics.
final DeleteTopicsResponse response = (DeleteTopicsResponse) abstractResponse;
final List<String> retryTopics = new ArrayList<>();
final Map<String, ThrottlingQuotaExceededException> retryTopicQuotaExceededExceptions = new HashMap<>();
for (DeletableTopicResult result : response.data().responses()) {
KafkaFutureImpl<Void> future = futures.get(result.name());
if (future == null) {
log.warn("Server response mentioned unknown topic {}", result.name());
} else {
ApiError error = new ApiError(result.errorCode(), result.errorMessage());
if (error.isFailure()) {
if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(
response.throttleTimeMs(), error.messageWithFallback());
if (options.shouldRetryOnQuotaViolation()) {
retryTopics.add(result.name());
retryTopicQuotaExceededExceptions.put(result.name(), quotaExceededException);
} else {
future.completeExceptionally(quotaExceededException);
}
} else {
future.completeExceptionally(error.exception());
}
} else {
future.complete(null);
}
}
}
// If there are topics to retry, retry them; complete unrealized futures otherwise.
if (retryTopics.isEmpty()) {
// The server should send back a response for every topic. But do a sanity check anyway.
completeUnrealizedFutures(futures.entrySet().stream(),
topic -> "The controller response did not contain a result for topic " + topic);
} else {
final long now = time.milliseconds();
final Call call = getDeleteTopicsCall(options, futures, retryTopics,
retryTopicQuotaExceededExceptions, now, deadline);
runnable.call(call, now);
}
}
@Override
void handleFailure(Throwable throwable) {
// If there were any topics retries due to a quota exceeded exception, we propagate
// the initial error back to the caller if the request timed out.
maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(),
throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
// Fail all the other remaining futures
completeAllExceptionally(futures.values(), throwable);
}
};
}
private Call getDeleteTopicsWithIdsCall(final DeleteTopicsOptions options,
final Map<Uuid, KafkaFutureImpl<Void>> futures,
final List<Uuid> topicIds,
final Map<Uuid, ThrottlingQuotaExceededException> quotaExceededExceptions,
final long now,
final long deadline) {
return new Call("deleteTopics", deadline, new ControllerNodeProvider()) {
@Override
DeleteTopicsRequest.Builder createRequest(int timeoutMs) {
return new DeleteTopicsRequest.Builder(
new DeleteTopicsRequestData()
.setTopics(topicIds.stream().map(
topic -> new DeleteTopicState().setTopicId(topic)).collect(Collectors.toList()))
.setTimeoutMs(timeoutMs));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
// Check for controller change
handleNotControllerError(abstractResponse);
// Handle server responses for particular topics.
final DeleteTopicsResponse response = (DeleteTopicsResponse) abstractResponse;
final List<Uuid> retryTopics = new ArrayList<>();
final Map<Uuid, ThrottlingQuotaExceededException> retryTopicQuotaExceededExceptions = new HashMap<>();
for (DeletableTopicResult result : response.data().responses()) {
KafkaFutureImpl<Void> future = futures.get(result.topicId());
if (future == null) {
log.warn("Server response mentioned unknown topic ID {}", result.topicId());
} else {
ApiError error = new ApiError(result.errorCode(), result.errorMessage());
if (error.isFailure()) {
if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(
response.throttleTimeMs(), error.messageWithFallback());
if (options.shouldRetryOnQuotaViolation()) {
retryTopics.add(result.topicId());
retryTopicQuotaExceededExceptions.put(result.topicId(), quotaExceededException);
} else {
future.completeExceptionally(quotaExceededException);
}
} else {
future.completeExceptionally(error.exception());
}
} else {
future.complete(null);
}
}
}
// If there are topics to retry, retry them; complete unrealized futures otherwise.
if (retryTopics.isEmpty()) {
// The server should send back a response for every topic. But do a sanity check anyway.
completeUnrealizedFutures(futures.entrySet().stream(),
topic -> "The controller response did not contain a result for topic " + topic);
} else {
final long now = time.milliseconds();
final Call call = getDeleteTopicsWithIdsCall(options, futures, retryTopics,
retryTopicQuotaExceededExceptions, now, deadline);
runnable.call(call, now);
}
}
@Override
void handleFailure(Throwable throwable) {
// If there were any topics retries due to a quota exceeded exception, we propagate
// the initial error back to the caller if the request timed out.
maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(),
throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
// Fail all the other remaining futures
completeAllExceptionally(futures.values(), throwable);
}
};
}
@Override
public ListTopicsResult listTopics(final ListTopicsOptions options) {
final KafkaFutureImpl<Map<String, TopicListing>> topicListingFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
runnable.call(new Call("listTopics", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
MetadataRequest.Builder createRequest(int timeoutMs) {
return MetadataRequest.Builder.allTopics();
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
Map<String, TopicListing> topicListing = new HashMap<>();
for (MetadataResponse.TopicMetadata topicMetadata : response.topicMetadata()) {
String topicName = topicMetadata.topic();
boolean isInternal = topicMetadata.isInternal();
if (!topicMetadata.isInternal() || options.shouldListInternal())
topicListing.put(topicName, new TopicListing(topicName, topicMetadata.topicId(), isInternal));
}
topicListingFuture.complete(topicListing);
}
@Override
void handleFailure(Throwable throwable) {
topicListingFuture.completeExceptionally(throwable);
}
}, now);
return new ListTopicsResult(topicListingFuture);
}
@Override
public DescribeTopicsResult describeTopics(final TopicCollection topics, DescribeTopicsOptions options) {
if (topics instanceof TopicIdCollection)
return DescribeTopicsResult.ofTopicIds(handleDescribeTopicsByIds(((TopicIdCollection) topics).topicIds(), options));
else if (topics instanceof TopicNameCollection)
return DescribeTopicsResult.ofTopicNames(handleDescribeTopicsByNamesWithDescribeTopicPartitionsApi(((TopicNameCollection) topics).topicNames(), options));
else
throw new IllegalArgumentException("The TopicCollection: " + topics + " provided did not match any supported classes for describeTopics.");
}
private Call generateDescribeTopicsCallWithMetadataApi(
List<String> topicNamesList,
Map<String, KafkaFutureImpl<TopicDescription>> topicFutures,
DescribeTopicsOptions options,
long now
) {
return new Call("describeTopics", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
private boolean supportsDisablingTopicCreation = true;
@Override
MetadataRequest.Builder createRequest(int timeoutMs) {
if (supportsDisablingTopicCreation)
return new MetadataRequest.Builder(new MetadataRequestData()
.setTopics(convertToMetadataRequestTopic(topicNamesList))
.setAllowAutoTopicCreation(false)
.setIncludeTopicAuthorizedOperations(options.includeAuthorizedOperations()));
else
return MetadataRequest.Builder.allTopics();
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
// Handle server responses for particular topics.
Cluster cluster = response.buildCluster();
Map<String, Errors> errors = response.errors();
for (Map.Entry<String, KafkaFutureImpl<TopicDescription>> entry : topicFutures.entrySet()) {
String topicName = entry.getKey();
KafkaFutureImpl<TopicDescription> future = entry.getValue();
Errors topicError = errors.get(topicName);
if (topicError != null) {
future.completeExceptionally(topicError.exception());
continue;
}
if (!cluster.topics().contains(topicName)) {
future.completeExceptionally(new UnknownTopicOrPartitionException("Topic " + topicName + " not found."));
continue;
}
Uuid topicId = cluster.topicId(topicName);
Integer authorizedOperations = response.topicAuthorizedOperations(topicName).get();
TopicDescription topicDescription = getTopicDescriptionFromCluster(cluster, topicName, topicId, authorizedOperations);
future.complete(topicDescription);
}
}
@Override
boolean handleUnsupportedVersionException(UnsupportedVersionException exception) {
if (supportsDisablingTopicCreation) {
supportsDisablingTopicCreation = false;
return true;
}
return false;
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(topicFutures.values(), throwable);
}
};
}
private Call generateDescribeTopicsCallWithDescribeTopicPartitionsApi(
List<String> topicNamesList,
Map<String, KafkaFutureImpl<TopicDescription>> topicFutures,
Map<Integer, Node> nodes,
DescribeTopicsOptions options,
long now
) {
final Map<String, TopicRequest> topicsRequests = new LinkedHashMap<>();
topicNamesList.stream().sorted().forEach(topic ->
topicsRequests.put(topic, new TopicRequest().setName(topic))
);
return new Call("describeTopicPartitions", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
TopicDescription partiallyFinishedTopicDescription = null;
@Override
DescribeTopicPartitionsRequest.Builder createRequest(int timeoutMs) {
DescribeTopicPartitionsRequestData request = new DescribeTopicPartitionsRequestData()
.setTopics(new ArrayList<>(topicsRequests.values()))
.setResponsePartitionLimit(options.partitionSizeLimitPerResponse());
if (partiallyFinishedTopicDescription != null) {
// If the previous cursor points to partition 0, it will not be set here. Instead, the previous
// cursor topic will be the first topic in the request.
request.setCursor(new DescribeTopicPartitionsRequestData.Cursor()
.setTopicName(partiallyFinishedTopicDescription.name())
.setPartitionIndex(partiallyFinishedTopicDescription.partitions().size())
);
}
return new DescribeTopicPartitionsRequest.Builder(request);
}
@SuppressWarnings("NPathComplexity")
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeTopicPartitionsResponse response = (DescribeTopicPartitionsResponse) abstractResponse;
DescribeTopicPartitionsResponseData.Cursor responseCursor = response.data().nextCursor();
// The topicDescription for the cursor topic of the current batch.
TopicDescription nextTopicDescription = null;
for (DescribeTopicPartitionsResponseTopic topic : response.data().topics()) {
String topicName = topic.name();
Errors error = Errors.forCode(topic.errorCode());
KafkaFutureImpl<TopicDescription> future = topicFutures.get(topicName);
if (error != Errors.NONE) {
future.completeExceptionally(error.exception());
topicsRequests.remove(topicName);
if (responseCursor != null && responseCursor.topicName().equals(topicName)) {
responseCursor = null;
}
continue;
}
TopicDescription currentTopicDescription = getTopicDescriptionFromDescribeTopicsResponseTopic(topic, nodes, options.includeAuthorizedOperations());
if (partiallyFinishedTopicDescription != null && partiallyFinishedTopicDescription.name().equals(topicName)) {
// Add the partitions for the cursor topic of the previous batch.
partiallyFinishedTopicDescription.partitions().addAll(currentTopicDescription.partitions());
continue;
}
if (responseCursor != null && responseCursor.topicName().equals(topicName)) {
// In the same batch of result, it may need to handle the partitions for the previous cursor
// topic and the current cursor topic. Cache the result in the nextTopicDescription.
nextTopicDescription = currentTopicDescription;
continue;
}
topicsRequests.remove(topicName);
future.complete(currentTopicDescription);
}
if (partiallyFinishedTopicDescription != null &&
(responseCursor == null || !responseCursor.topicName().equals(partiallyFinishedTopicDescription.name()))) {
// We can't simply check nextTopicDescription != null here to close the partiallyFinishedTopicDescription.
// Because the responseCursor topic may not show in the response.
String topicName = partiallyFinishedTopicDescription.name();
topicFutures.get(topicName).complete(partiallyFinishedTopicDescription);
topicsRequests.remove(topicName);
partiallyFinishedTopicDescription = null;
}
if (nextTopicDescription != null) {
partiallyFinishedTopicDescription = nextTopicDescription;
}
if (!topicsRequests.isEmpty()) {
runnable.call(this, time.milliseconds());
}
}
@Override
boolean handleUnsupportedVersionException(UnsupportedVersionException exception) {
final long now = time.milliseconds();
runnable.call(generateDescribeTopicsCallWithMetadataApi(topicNamesList, topicFutures, options, now), now);
return false;
}
@Override
void handleFailure(Throwable throwable) {
if (!(throwable instanceof UnsupportedVersionException)) {
completeAllExceptionally(topicFutures.values(), throwable);
}
}
};
}
private Map<String, KafkaFuture<TopicDescription>> handleDescribeTopicsByNamesWithDescribeTopicPartitionsApi(
final Collection<String> topicNames,
DescribeTopicsOptions options
) {
final Map<String, KafkaFutureImpl<TopicDescription>> topicFutures = new HashMap<>(topicNames.size());
final ArrayList<String> topicNamesList = new ArrayList<>();
for (String topicName : topicNames) {
if (topicNameIsUnrepresentable(topicName)) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" +
topicName + "' cannot be represented in a request."));
topicFutures.put(topicName, future);
} else if (!topicFutures.containsKey(topicName)) {
topicFutures.put(topicName, new KafkaFutureImpl<>());
topicNamesList.add(topicName);
}
}
if (topicNamesList.isEmpty()) {
return new HashMap<>(topicFutures);
}
// First, we need to retrieve the node info.
DescribeClusterResult clusterResult = describeCluster(new DescribeClusterOptions().timeoutMs(options.timeoutMs()));
clusterResult.nodes().whenComplete(
(nodes, exception) -> {
if (exception != null) {
completeAllExceptionally(topicFutures.values(), exception);
return;
}
final long now = time.milliseconds();
Map<Integer, Node> nodeIdMap = nodes.stream().collect(Collectors.toMap(Node::id, node -> node));
runnable.call(
generateDescribeTopicsCallWithDescribeTopicPartitionsApi(topicNamesList, topicFutures, nodeIdMap, options, now),
now
);
});
return new HashMap<>(topicFutures);
}
private Map<Uuid, KafkaFuture<TopicDescription>> handleDescribeTopicsByIds(Collection<Uuid> topicIds, DescribeTopicsOptions options) {
final Map<Uuid, KafkaFutureImpl<TopicDescription>> topicFutures = new HashMap<>(topicIds.size());
final List<Uuid> topicIdsList = new ArrayList<>();
for (Uuid topicId : topicIds) {
if (topicIdIsUnrepresentable(topicId)) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic id '" +
topicId + "' cannot be represented in a request."));
topicFutures.put(topicId, future);
} else if (!topicFutures.containsKey(topicId)) {
topicFutures.put(topicId, new KafkaFutureImpl<>());
topicIdsList.add(topicId);
}
}
final long now = time.milliseconds();
Call call = new Call("describeTopicsWithIds", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
MetadataRequest.Builder createRequest(int timeoutMs) {
return new MetadataRequest.Builder(new MetadataRequestData()
.setTopics(convertTopicIdsToMetadataRequestTopic(topicIdsList))
.setAllowAutoTopicCreation(false)
.setIncludeTopicAuthorizedOperations(options.includeAuthorizedOperations()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
// Handle server responses for particular topics.
Cluster cluster = response.buildCluster();
Map<Uuid, Errors> errors = response.errorsByTopicId();
for (Map.Entry<Uuid, KafkaFutureImpl<TopicDescription>> entry : topicFutures.entrySet()) {
Uuid topicId = entry.getKey();
KafkaFutureImpl<TopicDescription> future = entry.getValue();
String topicName = cluster.topicName(topicId);
if (topicName == null) {
future.completeExceptionally(new UnknownTopicIdException("TopicId " + topicId + " not found."));
continue;
}
Errors topicError = errors.get(topicId);
if (topicError != null) {
future.completeExceptionally(topicError.exception());
continue;
}
Integer authorizedOperations = response.topicAuthorizedOperations(topicName).get();
TopicDescription topicDescription = getTopicDescriptionFromCluster(cluster, topicName, topicId, authorizedOperations);
future.complete(topicDescription);
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(topicFutures.values(), throwable);
}
};
if (!topicIdsList.isEmpty()) {
runnable.call(call, now);
}
return new HashMap<>(topicFutures);
}
private TopicDescription getTopicDescriptionFromDescribeTopicsResponseTopic(
DescribeTopicPartitionsResponseTopic topic,
Map<Integer, Node> nodes,
boolean includeAuthorizedOperations
) {
List<DescribeTopicPartitionsResponsePartition> partitionInfos = topic.partitions();
List<TopicPartitionInfo> partitions = new ArrayList<>(partitionInfos.size());
for (DescribeTopicPartitionsResponsePartition partitionInfo : partitionInfos) {
partitions.add(DescribeTopicPartitionsResponse.partitionToTopicPartitionInfo(partitionInfo, nodes));
}
Set<AclOperation> authorisedOperations = includeAuthorizedOperations ? validAclOperations(topic.topicAuthorizedOperations()) : null;
return new TopicDescription(topic.name(), topic.isInternal(), partitions, authorisedOperations, topic.topicId());
}
private TopicDescription getTopicDescriptionFromCluster(Cluster cluster, String topicName, Uuid topicId,
Integer authorizedOperations) {
boolean isInternal = cluster.internalTopics().contains(topicName);
List<PartitionInfo> partitionInfos = cluster.partitionsForTopic(topicName);
List<TopicPartitionInfo> partitions = new ArrayList<>(partitionInfos.size());
for (PartitionInfo partitionInfo : partitionInfos) {
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(
partitionInfo.partition(), leader(partitionInfo), Arrays.asList(partitionInfo.replicas()),
Arrays.asList(partitionInfo.inSyncReplicas()));
partitions.add(topicPartitionInfo);
}
partitions.sort(Comparator.comparingInt(TopicPartitionInfo::partition));
return new TopicDescription(topicName, isInternal, partitions, validAclOperations(authorizedOperations), topicId);
}
private Node leader(PartitionInfo partitionInfo) {
if (partitionInfo.leader() == null || partitionInfo.leader().id() == Node.noNode().id())
return null;
return partitionInfo.leader();
}
@Override
public DescribeClusterResult describeCluster(DescribeClusterOptions options) {
final KafkaFutureImpl<Collection<Node>> describeClusterFuture = new KafkaFutureImpl<>();
final KafkaFutureImpl<Node> controllerFuture = new KafkaFutureImpl<>();
final KafkaFutureImpl<String> clusterIdFuture = new KafkaFutureImpl<>();
final KafkaFutureImpl<Set<AclOperation>> authorizedOperationsFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
runnable.call(new Call("listNodes", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedBrokerOrActiveKController()) {
private boolean useMetadataRequest = false;
@Override
AbstractRequest.Builder<?> createRequest(int timeoutMs) {
if (!useMetadataRequest) {
if (metadataManager.usingBootstrapControllers() && options.includeFencedBrokers()) {
throw new IllegalArgumentException("Cannot request fenced brokers from controller endpoint");
}
return new DescribeClusterRequest.Builder(new DescribeClusterRequestData()
.setIncludeClusterAuthorizedOperations(options.includeAuthorizedOperations())
.setEndpointType(metadataManager.usingBootstrapControllers() ?
EndpointType.CONTROLLER.id() : EndpointType.BROKER.id())
.setIncludeFencedBrokers(options.includeFencedBrokers()));
} else {
// Since this only requests node information, it's safe to pass true for allowAutoTopicCreation (and it
// simplifies communication with older brokers)
return new MetadataRequest.Builder(new MetadataRequestData()
.setTopics(Collections.emptyList())
.setAllowAutoTopicCreation(true)
.setIncludeClusterAuthorizedOperations(
options.includeAuthorizedOperations()));
}
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
if (!useMetadataRequest) {
DescribeClusterResponse response = (DescribeClusterResponse) abstractResponse;
Errors error = Errors.forCode(response.data().errorCode());
if (error != Errors.NONE) {
handleFailure(error.exception(response.data().errorMessage()));
return;
}
Map<Integer, Node> nodes = response.nodes();
describeClusterFuture.complete(nodes.values());
// Controller is null if controller id is equal to NO_CONTROLLER_ID
controllerFuture.complete(nodes.get(response.data().controllerId()));
clusterIdFuture.complete(response.data().clusterId());
authorizedOperationsFuture.complete(
validAclOperations(response.data().clusterAuthorizedOperations()));
} else {
MetadataResponse response = (MetadataResponse) abstractResponse;
describeClusterFuture.complete(response.brokers());
controllerFuture.complete(controller(response));
clusterIdFuture.complete(response.clusterId());
authorizedOperationsFuture.complete(
validAclOperations(response.clusterAuthorizedOperations()));
}
}
private Node controller(MetadataResponse response) {
if (response.controller() == null || response.controller().id() == MetadataResponse.NO_CONTROLLER_ID)
return null;
return response.controller();
}
@Override
void handleFailure(Throwable throwable) {
describeClusterFuture.completeExceptionally(throwable);
controllerFuture.completeExceptionally(throwable);
clusterIdFuture.completeExceptionally(throwable);
authorizedOperationsFuture.completeExceptionally(throwable);
}
@Override
boolean handleUnsupportedVersionException(final UnsupportedVersionException exception) {
if (metadataManager.usingBootstrapControllers()) {
return false;
}
if (useMetadataRequest) {
return false;
}
// If unsupportedVersion exception was caused by the option to include fenced brokers (only supported for version 2+)
// then we should not fall back to the metadataRequest.
if (options.includeFencedBrokers()) {
return false;
}
useMetadataRequest = true;
return true;
}
}, now);
return new DescribeClusterResult(describeClusterFuture, controllerFuture, clusterIdFuture,
authorizedOperationsFuture);
}
@Override
public DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options) {
if (filter.isUnknown()) {
KafkaFutureImpl<Collection<AclBinding>> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidRequestException("The AclBindingFilter " +
"must not contain UNKNOWN elements."));
return new DescribeAclsResult(future);
}
final long now = time.milliseconds();
final KafkaFutureImpl<Collection<AclBinding>> future = new KafkaFutureImpl<>();
runnable.call(new Call("describeAcls", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedBrokerOrActiveKController()) {
@Override
DescribeAclsRequest.Builder createRequest(int timeoutMs) {
return new DescribeAclsRequest.Builder(filter);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeAclsResponse response = (DescribeAclsResponse) abstractResponse;
if (response.error().isFailure()) {
future.completeExceptionally(response.error().exception());
} else {
future.complete(DescribeAclsResponse.aclBindings(response.acls()));
}
}
@Override
void handleFailure(Throwable throwable) {
future.completeExceptionally(throwable);
}
}, now);
return new DescribeAclsResult(future);
}
@Override
public CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) {
final long now = time.milliseconds();
final Map<AclBinding, KafkaFutureImpl<Void>> futures = new HashMap<>();
final List<AclCreation> aclCreations = new ArrayList<>();
final List<AclBinding> aclBindingsSent = new ArrayList<>();
for (AclBinding acl : acls) {
if (futures.get(acl) == null) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
futures.put(acl, future);
String indefinite = acl.toFilter().findIndefiniteField();
if (indefinite == null) {
aclCreations.add(CreateAclsRequest.aclCreation(acl));
aclBindingsSent.add(acl);
} else {
future.completeExceptionally(new InvalidRequestException("Invalid ACL creation: " +
indefinite));
}
}
}
final CreateAclsRequestData data = new CreateAclsRequestData().setCreations(aclCreations);
runnable.call(new Call("createAcls", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedBrokerOrActiveKController()) {
@Override
CreateAclsRequest.Builder createRequest(int timeoutMs) {
return new CreateAclsRequest.Builder(data);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
handleNotControllerError(abstractResponse);
CreateAclsResponse response = (CreateAclsResponse) abstractResponse;
List<AclCreationResult> responses = response.results();
Iterator<AclCreationResult> iter = responses.iterator();
for (AclBinding aclBinding : aclBindingsSent) {
KafkaFutureImpl<Void> future = futures.get(aclBinding);
if (!iter.hasNext()) {
future.completeExceptionally(new UnknownServerException(
"The broker reported no creation result for the given ACL: " + aclBinding));
} else {
AclCreationResult creation = iter.next();
Errors error = Errors.forCode(creation.errorCode());
ApiError apiError = new ApiError(error, creation.errorMessage());
if (apiError.isFailure())
future.completeExceptionally(apiError.exception());
else
future.complete(null);
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return new CreateAclsResult(new HashMap<>(futures));
}
@Override
public DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options) {
final long now = time.milliseconds();
final Map<AclBindingFilter, KafkaFutureImpl<FilterResults>> futures = new HashMap<>();
final List<AclBindingFilter> aclBindingFiltersSent = new ArrayList<>();
final List<DeleteAclsFilter> deleteAclsFilters = new ArrayList<>();
for (AclBindingFilter filter : filters) {
if (futures.get(filter) == null) {
aclBindingFiltersSent.add(filter);
deleteAclsFilters.add(DeleteAclsRequest.deleteAclsFilter(filter));
futures.put(filter, new KafkaFutureImpl<>());
}
}
final DeleteAclsRequestData data = new DeleteAclsRequestData().setFilters(deleteAclsFilters);
runnable.call(new Call("deleteAcls", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedBrokerOrActiveKController()) {
@Override
DeleteAclsRequest.Builder createRequest(int timeoutMs) {
return new DeleteAclsRequest.Builder(data);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
handleNotControllerError(abstractResponse);
DeleteAclsResponse response = (DeleteAclsResponse) abstractResponse;
List<DeleteAclsResponseData.DeleteAclsFilterResult> results = response.filterResults();
Iterator<DeleteAclsResponseData.DeleteAclsFilterResult> iter = results.iterator();
for (AclBindingFilter bindingFilter : aclBindingFiltersSent) {
KafkaFutureImpl<FilterResults> future = futures.get(bindingFilter);
if (!iter.hasNext()) {
future.completeExceptionally(new UnknownServerException(
"The broker reported no deletion result for the given filter."));
} else {
DeleteAclsFilterResult filterResult = iter.next();
ApiError error = new ApiError(Errors.forCode(filterResult.errorCode()), filterResult.errorMessage());
if (error.isFailure()) {
future.completeExceptionally(error.exception());
} else {
List<FilterResult> filterResults = new ArrayList<>();
for (DeleteAclsMatchingAcl matchingAcl : filterResult.matchingAcls()) {
Errors aclError = Errors.forCode(matchingAcl.errorCode());
AclBinding aclBinding = DeleteAclsResponse.aclBinding(matchingAcl);
filterResults.add(new FilterResult(aclBinding, aclError.exception(matchingAcl.errorMessage())));
}
future.complete(new FilterResults(filterResults));
}
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return new DeleteAclsResult(new HashMap<>(futures));
}
@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) {
// Partition the requested config resources based on which broker they must be sent to with the
// null broker being used for config resources which can be obtained from any broker
final Map<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> nodeFutures = new HashMap<>(configResources.size());
for (ConfigResource resource : configResources) {
Integer broker = nodeFor(resource);
nodeFutures.compute(broker, (key, value) -> {
if (value == null) {
value = new HashMap<>();
}
value.put(resource, new KafkaFutureImpl<>());
return value;
});
}
final long now = time.milliseconds();
for (Map.Entry<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> entry : nodeFutures.entrySet()) {
final Integer node = entry.getKey();
Map<ConfigResource, KafkaFutureImpl<Config>> unified = entry.getValue();
runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()),
node != null ? new ConstantNodeIdProvider(node, true) : new LeastLoadedBrokerOrActiveKController()) {
@Override
DescribeConfigsRequest.Builder createRequest(int timeoutMs) {
return new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData()
.setResources(unified.keySet().stream()
.map(config ->
new DescribeConfigsRequestData.DescribeConfigsResource()
.setResourceName(config.name())
.setResourceType(config.type().id())
.setConfigurationKeys(null))
.collect(Collectors.toList()))
.setIncludeSynonyms(options.includeSynonyms())
.setIncludeDocumentation(options.includeDocumentation()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
for (Map.Entry<ConfigResource, DescribeConfigsResponseData.DescribeConfigsResult> entry : response.resultMap().entrySet()) {
ConfigResource configResource = entry.getKey();
DescribeConfigsResponseData.DescribeConfigsResult describeConfigsResult = entry.getValue();
KafkaFutureImpl<Config> future = unified.get(configResource);
if (future == null) {
if (node != null) {
log.warn("The config {} in the response from node {} is not in the request",
configResource, node);
} else {
log.warn("The config {} in the response from the least loaded broker is not in the request",
configResource);
}
} else {
if (describeConfigsResult.errorCode() != Errors.NONE.code()) {
future.completeExceptionally(Errors.forCode(describeConfigsResult.errorCode())
.exception(describeConfigsResult.errorMessage()));
} else {
future.complete(describeConfigResult(describeConfigsResult));
}
}
}
completeUnrealizedFutures(
unified.entrySet().stream(),
configResource -> "The node response did not contain a result for config resource " + configResource);
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(unified.values(), throwable);
}
}, now);
}
return new DescribeConfigsResult(
nodeFutures.entrySet()
.stream()
.flatMap(x -> x.getValue().entrySet().stream())
.collect(Collectors.toMap(
Map.Entry::getKey,
Map.Entry::getValue,
(oldValue, newValue) -> {
// Duplicate keys should not occur, throw an exception to signal this issue
throw new IllegalStateException(String.format("Duplicate key for values: %s and %s", oldValue, newValue));
},
HashMap::new
))
);
}
private Config describeConfigResult(DescribeConfigsResponseData.DescribeConfigsResult describeConfigsResult) {
return new Config(describeConfigsResult.configs().stream().map(config -> new ConfigEntry(
config.name(),
config.value(),
DescribeConfigsResponse.ConfigSource.forId(config.configSource()).source(),
config.isSensitive(),
config.readOnly(),
(config.synonyms().stream().map(synonym -> new ConfigEntry.ConfigSynonym(synonym.name(), synonym.value(),
DescribeConfigsResponse.ConfigSource.forId(synonym.source()).source()))).collect(Collectors.toList()),
DescribeConfigsResponse.ConfigType.forId(config.configType()).type(),
config.documentation()
)).collect(Collectors.toList()));
}
private ConfigEntry.ConfigSource configSource(DescribeConfigsResponse.ConfigSource source) {
ConfigEntry.ConfigSource configSource;
switch (source) {
case TOPIC_CONFIG:
configSource = ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG;
break;
case DYNAMIC_BROKER_CONFIG:
configSource = ConfigEntry.ConfigSource.DYNAMIC_BROKER_CONFIG;
break;
case DYNAMIC_DEFAULT_BROKER_CONFIG:
configSource = ConfigEntry.ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG;
break;
case STATIC_BROKER_CONFIG:
configSource = ConfigEntry.ConfigSource.STATIC_BROKER_CONFIG;
break;
case DYNAMIC_BROKER_LOGGER_CONFIG:
configSource = ConfigEntry.ConfigSource.DYNAMIC_BROKER_LOGGER_CONFIG;
break;
case DEFAULT_CONFIG:
configSource = ConfigEntry.ConfigSource.DEFAULT_CONFIG;
break;
default:
throw new IllegalArgumentException("Unexpected config source " + source);
}
return configSource;
}
@Override
public AlterConfigsResult incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>> configs,
final AlterConfigsOptions options) {
final Map<ConfigResource, KafkaFutureImpl<Void>> allFutures = new HashMap<>();
// BROKER_LOGGER requests always go to a specific, constant broker or controller node.
//
// BROKER resource changes for a specific (non-default) resource go to either that specific
// node (if using bootstrap.servers), or directly to the active controller (if using
// bootstrap.controllers)
//
// All other requests go to the least loaded broker (if using bootstrap.servers) or the
// active controller (if using bootstrap.controllers)
final Collection<ConfigResource> unifiedRequestResources = new ArrayList<>();
for (ConfigResource resource : configs.keySet()) {
Integer node = nodeFor(resource);
if (metadataManager.usingBootstrapControllers()) {
if (!resource.type().equals(ConfigResource.Type.BROKER_LOGGER)) {
node = null;
}
}
if (node != null) {
NodeProvider nodeProvider = new ConstantNodeIdProvider(node, true);
allFutures.putAll(incrementalAlterConfigs(configs, options, Collections.singleton(resource), nodeProvider));
} else
unifiedRequestResources.add(resource);
}
if (!unifiedRequestResources.isEmpty())
allFutures.putAll(incrementalAlterConfigs(configs, options, unifiedRequestResources, new LeastLoadedBrokerOrActiveKController()));
return new AlterConfigsResult(new HashMap<>(allFutures));
}
private Map<ConfigResource, KafkaFutureImpl<Void>> incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>> configs,
final AlterConfigsOptions options,
Collection<ConfigResource> resources,
NodeProvider nodeProvider) {
final Map<ConfigResource, KafkaFutureImpl<Void>> futures = new HashMap<>();
for (ConfigResource resource : resources)
futures.put(resource, new KafkaFutureImpl<>());
final long now = time.milliseconds();
runnable.call(new Call("incrementalAlterConfigs", calcDeadlineMs(now, options.timeoutMs()), nodeProvider) {
@Override
public IncrementalAlterConfigsRequest.Builder createRequest(int timeoutMs) {
return new IncrementalAlterConfigsRequest.Builder(resources, configs, options.shouldValidateOnly());
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
handleNotControllerError(abstractResponse);
IncrementalAlterConfigsResponse response = (IncrementalAlterConfigsResponse) abstractResponse;
Map<ConfigResource, ApiError> errors = IncrementalAlterConfigsResponse.fromResponseData(response.data());
for (Map.Entry<ConfigResource, KafkaFutureImpl<Void>> entry : futures.entrySet()) {
KafkaFutureImpl<Void> future = entry.getValue();
ApiException exception = errors.get(entry.getKey()).exception();
if (exception != null) {
future.completeExceptionally(exception);
} else {
future.complete(null);
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return futures;
}
@Override
public AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment, final AlterReplicaLogDirsOptions options) {
final Map<TopicPartitionReplica, KafkaFutureImpl<Void>> futures = new HashMap<>(replicaAssignment.size());
for (TopicPartitionReplica replica : replicaAssignment.keySet())
futures.put(replica, new KafkaFutureImpl<>());
Map<Integer, AlterReplicaLogDirsRequestData> replicaAssignmentByBroker = new HashMap<>();
for (Map.Entry<TopicPartitionReplica, String> entry : replicaAssignment.entrySet()) {
TopicPartitionReplica replica = entry.getKey();
String logDir = entry.getValue();
int brokerId = replica.brokerId();
AlterReplicaLogDirsRequestData value = replicaAssignmentByBroker.computeIfAbsent(brokerId,
key -> new AlterReplicaLogDirsRequestData());
AlterReplicaLogDir alterReplicaLogDir = value.dirs().find(logDir);
if (alterReplicaLogDir == null) {
alterReplicaLogDir = new AlterReplicaLogDir();
alterReplicaLogDir.setPath(logDir);
value.dirs().add(alterReplicaLogDir);
}
AlterReplicaLogDirTopic alterReplicaLogDirTopic = alterReplicaLogDir.topics().find(replica.topic());
if (alterReplicaLogDirTopic == null) {
alterReplicaLogDirTopic = new AlterReplicaLogDirTopic().setName(replica.topic());
alterReplicaLogDir.topics().add(alterReplicaLogDirTopic);
}
alterReplicaLogDirTopic.partitions().add(replica.partition());
}
final long now = time.milliseconds();
for (Map.Entry<Integer, AlterReplicaLogDirsRequestData> entry : replicaAssignmentByBroker.entrySet()) {
final int brokerId = entry.getKey();
final AlterReplicaLogDirsRequestData assignment = entry.getValue();
runnable.call(new Call("alterReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()),
new ConstantNodeIdProvider(brokerId)) {
@Override
public AlterReplicaLogDirsRequest.Builder createRequest(int timeoutMs) {
return new AlterReplicaLogDirsRequest.Builder(assignment);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterReplicaLogDirsResponse response = (AlterReplicaLogDirsResponse) abstractResponse;
for (AlterReplicaLogDirTopicResult topicResult : response.data().results()) {
for (AlterReplicaLogDirPartitionResult partitionResult : topicResult.partitions()) {
TopicPartitionReplica replica = new TopicPartitionReplica(
topicResult.topicName(), partitionResult.partitionIndex(), brokerId);
KafkaFutureImpl<Void> future = futures.get(replica);
if (future == null) {
log.warn("The partition {} in the response from broker {} is not in the request",
new TopicPartition(topicResult.topicName(), partitionResult.partitionIndex()),
brokerId);
} else if (partitionResult.errorCode() == Errors.NONE.code()) {
future.complete(null);
} else {
future.completeExceptionally(Errors.forCode(partitionResult.errorCode()).exception());
}
}
}
// The server should send back a response for every replica. But do a sanity check anyway.
completeUnrealizedFutures(
futures.entrySet().stream().filter(entry -> entry.getKey().brokerId() == brokerId),
replica -> "The response from broker " + brokerId +
" did not contain a result for replica " + replica);
}
@Override
void handleFailure(Throwable throwable) {
// Only completes the futures of brokerId
completeAllExceptionally(
futures.entrySet().stream()
.filter(entry -> entry.getKey().brokerId() == brokerId)
.map(Map.Entry::getValue),
throwable);
}
}, now);
}
return new AlterReplicaLogDirsResult(new HashMap<>(futures));
}
@Override
public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options) {
final Map<Integer, KafkaFutureImpl<Map<String, LogDirDescription>>> futures = new HashMap<>(brokers.size());
final long now = time.milliseconds();
for (final Integer brokerId : brokers) {
KafkaFutureImpl<Map<String, LogDirDescription>> future = new KafkaFutureImpl<>();
futures.put(brokerId, future);
runnable.call(new Call("describeLogDirs", calcDeadlineMs(now, options.timeoutMs()),
new ConstantNodeIdProvider(brokerId)) {
@Override
public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) {
// Query selected partitions in all log directories
return new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(null));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
Map<String, LogDirDescription> descriptions = logDirDescriptions(response);
if (!descriptions.isEmpty()) {
future.complete(descriptions);
} else {
// Up to v3 DescribeLogDirsResponse did not have an error code field, hence it defaults to None
Errors error = response.data().errorCode() == Errors.NONE.code()
? Errors.CLUSTER_AUTHORIZATION_FAILED
: Errors.forCode(response.data().errorCode());
future.completeExceptionally(error.exception());
}
}
@Override
void handleFailure(Throwable throwable) {
future.completeExceptionally(throwable);
}
}, now);
}
return new DescribeLogDirsResult(new HashMap<>(futures));
}
private static Map<String, LogDirDescription> logDirDescriptions(DescribeLogDirsResponse response) {
Map<String, LogDirDescription> result = new HashMap<>(response.data().results().size());
for (DescribeLogDirsResponseData.DescribeLogDirsResult logDirResult : response.data().results()) {
Map<TopicPartition, ReplicaInfo> replicaInfoMap = new HashMap<>();
for (DescribeLogDirsResponseData.DescribeLogDirsTopic t : logDirResult.topics()) {
for (DescribeLogDirsResponseData.DescribeLogDirsPartition p : t.partitions()) {
replicaInfoMap.put(
new TopicPartition(t.name(), p.partitionIndex()),
new ReplicaInfo(p.partitionSize(), p.offsetLag(), p.isFutureKey()));
}
}
result.put(logDirResult.logDir(), new LogDirDescription(
Errors.forCode(logDirResult.errorCode()).exception(),
replicaInfoMap,
logDirResult.totalBytes(),
logDirResult.usableBytes()));
}
return result;
}
@Override
public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
final Map<TopicPartitionReplica, KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> futures = new HashMap<>(replicas.size());
for (TopicPartitionReplica replica : replicas) {
futures.put(replica, new KafkaFutureImpl<>());
}
Map<Integer, DescribeLogDirsRequestData> partitionsByBroker = new HashMap<>();
for (TopicPartitionReplica replica : replicas) {
DescribeLogDirsRequestData requestData = partitionsByBroker.computeIfAbsent(replica.brokerId(),
brokerId -> new DescribeLogDirsRequestData());
DescribableLogDirTopic describableLogDirTopic = requestData.topics().find(replica.topic());
if (describableLogDirTopic == null) {
List<Integer> partitions = new ArrayList<>();
partitions.add(replica.partition());
describableLogDirTopic = new DescribableLogDirTopic().setTopic(replica.topic())
.setPartitions(partitions);
requestData.topics().add(describableLogDirTopic);
} else {
describableLogDirTopic.partitions().add(replica.partition());
}
}
final long now = time.milliseconds();
for (Map.Entry<Integer, DescribeLogDirsRequestData> entry : partitionsByBroker.entrySet()) {
final int brokerId = entry.getKey();
final DescribeLogDirsRequestData topicPartitions = entry.getValue();
final Map<TopicPartition, ReplicaLogDirInfo> replicaDirInfoByPartition = new HashMap<>();
for (DescribableLogDirTopic topicPartition : topicPartitions.topics()) {
for (Integer partitionId : topicPartition.partitions()) {
replicaDirInfoByPartition.put(new TopicPartition(topicPartition.topic(), partitionId), new ReplicaLogDirInfo());
}
}
runnable.call(new Call("describeReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()),
new ConstantNodeIdProvider(brokerId)) {
@Override
public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) {
// Query selected partitions in all log directories
return new DescribeLogDirsRequest.Builder(topicPartitions);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
for (Map.Entry<String, LogDirDescription> responseEntry : logDirDescriptions(response).entrySet()) {
String logDir = responseEntry.getKey();
LogDirDescription logDirInfo = responseEntry.getValue();
// No replica info will be provided if the log directory is offline
if (logDirInfo.error() instanceof KafkaStorageException)
continue;
if (logDirInfo.error() != null)
handleFailure(new IllegalStateException(
"The error " + logDirInfo.error().getClass().getName() + " for log directory " + logDir + " in the response from broker " + brokerId + " is illegal"));
for (Map.Entry<TopicPartition, ReplicaInfo> replicaInfoEntry : logDirInfo.replicaInfos().entrySet()) {
TopicPartition tp = replicaInfoEntry.getKey();
ReplicaInfo replicaInfo = replicaInfoEntry.getValue();
ReplicaLogDirInfo replicaLogDirInfo = replicaDirInfoByPartition.get(tp);
if (replicaLogDirInfo == null) {
log.warn("Server response from broker {} mentioned unknown partition {}", brokerId, tp);
} else if (replicaInfo.isFuture()) {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(replicaLogDirInfo.getCurrentReplicaLogDir(),
replicaLogDirInfo.getCurrentReplicaOffsetLag(),
logDir,
replicaInfo.offsetLag()));
} else {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(logDir,
replicaInfo.offsetLag(),
replicaLogDirInfo.getFutureReplicaLogDir(),
replicaLogDirInfo.getFutureReplicaOffsetLag()));
}
}
}
for (Map.Entry<TopicPartition, ReplicaLogDirInfo> entry : replicaDirInfoByPartition.entrySet()) {
TopicPartition tp = entry.getKey();
KafkaFutureImpl<ReplicaLogDirInfo> future = futures.get(new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId));
future.complete(entry.getValue());
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
}
return new DescribeReplicaLogDirsResult(new HashMap<>(futures));
}
@Override
public CreatePartitionsResult createPartitions(final Map<String, NewPartitions> newPartitions,
final CreatePartitionsOptions options) {
final Map<String, KafkaFutureImpl<Void>> futures = new HashMap<>(newPartitions.size());
final CreatePartitionsTopicCollection topics = new CreatePartitionsTopicCollection(newPartitions.size());
for (Map.Entry<String, NewPartitions> entry : newPartitions.entrySet()) {
final String topic = entry.getKey();
final NewPartitions newPartition = entry.getValue();
List<List<Integer>> newAssignments = newPartition.assignments();
List<CreatePartitionsAssignment> assignments = newAssignments == null ? null :
newAssignments.stream()
.map(brokerIds -> new CreatePartitionsAssignment().setBrokerIds(brokerIds))
.collect(Collectors.toList());
topics.add(new CreatePartitionsTopic()
.setName(topic)
.setCount(newPartition.totalCount())
.setAssignments(assignments));
futures.put(topic, new KafkaFutureImpl<>());
}
if (!topics.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getCreatePartitionsCall(options, futures, topics,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new CreatePartitionsResult(new HashMap<>(futures));
}
private Call getCreatePartitionsCall(final CreatePartitionsOptions options,
final Map<String, KafkaFutureImpl<Void>> futures,
final CreatePartitionsTopicCollection topics,
final Map<String, ThrottlingQuotaExceededException> quotaExceededExceptions,
final long now,
final long deadline) {
return new Call("createPartitions", deadline, new ControllerNodeProvider()) {
@Override
public CreatePartitionsRequest.Builder createRequest(int timeoutMs) {
return new CreatePartitionsRequest.Builder(
new CreatePartitionsRequestData()
.setTopics(topics)
.setValidateOnly(options.validateOnly())
.setTimeoutMs(timeoutMs));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
// Check for controller change
handleNotControllerError(abstractResponse);
// Handle server responses for particular topics.
final CreatePartitionsResponse response = (CreatePartitionsResponse) abstractResponse;
final CreatePartitionsTopicCollection retryTopics = new CreatePartitionsTopicCollection();
final Map<String, ThrottlingQuotaExceededException> retryTopicQuotaExceededExceptions = new HashMap<>();
for (CreatePartitionsTopicResult result : response.data().results()) {
KafkaFutureImpl<Void> future = futures.get(result.name());
if (future == null) {
log.warn("Server response mentioned unknown topic {}", result.name());
} else {
ApiError error = new ApiError(result.errorCode(), result.errorMessage());
if (error.isFailure()) {
if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(
response.throttleTimeMs(), error.messageWithFallback());
if (options.shouldRetryOnQuotaViolation()) {
retryTopics.add(topics.find(result.name()).duplicate());
retryTopicQuotaExceededExceptions.put(result.name(), quotaExceededException);
} else {
future.completeExceptionally(quotaExceededException);
}
} else {
future.completeExceptionally(error.exception());
}
} else {
future.complete(null);
}
}
}
// If there are topics to retry, retry them; complete unrealized futures otherwise.
if (retryTopics.isEmpty()) {
// The server should send back a response for every topic. But do a sanity check anyway.
completeUnrealizedFutures(futures.entrySet().stream(),
topic -> "The controller response did not contain a result for topic " + topic);
} else {
final long now = time.milliseconds();
final Call call = getCreatePartitionsCall(options, futures, retryTopics,
retryTopicQuotaExceededExceptions, now, deadline);
runnable.call(call, now);
}
}
@Override
void handleFailure(Throwable throwable) {
// If there were any topics retries due to a quota exceeded exception, we propagate
// the initial error back to the caller if the request timed out.
maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(),
throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
// Fail all the other remaining futures
completeAllExceptionally(futures.values(), throwable);
}
};
}
@Override
public DeleteRecordsResult deleteRecords(final Map<TopicPartition, RecordsToDelete> recordsToDelete,
final DeleteRecordsOptions options) {
PartitionLeaderStrategy.PartitionLeaderFuture<DeletedRecords> future =
DeleteRecordsHandler.newFuture(recordsToDelete.keySet(), partitionLeaderCache);
int timeoutMs = defaultApiTimeoutMs;
if (options.timeoutMs() != null) {
timeoutMs = options.timeoutMs();
}
DeleteRecordsHandler handler = new DeleteRecordsHandler(recordsToDelete, logContext, timeoutMs);
invokeDriver(handler, future, options.timeoutMs);
return new DeleteRecordsResult(future.all());
}
@Override
public CreateDelegationTokenResult createDelegationToken(final CreateDelegationTokenOptions options) {
final KafkaFutureImpl<DelegationToken> delegationTokenFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
List<CreatableRenewers> renewers = new ArrayList<>();
for (KafkaPrincipal principal : options.renewers()) {
renewers.add(new CreatableRenewers()
.setPrincipalName(principal.getName())
.setPrincipalType(principal.getPrincipalType()));
}
runnable.call(new Call("createDelegationToken", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
CreateDelegationTokenRequest.Builder createRequest(int timeoutMs) {
CreateDelegationTokenRequestData data = new CreateDelegationTokenRequestData()
.setRenewers(renewers)
.setMaxLifetimeMs(options.maxLifetimeMs());
if (options.owner().isPresent()) {
data.setOwnerPrincipalName(options.owner().get().getName());
data.setOwnerPrincipalType(options.owner().get().getPrincipalType());
}
return new CreateDelegationTokenRequest.Builder(data);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
CreateDelegationTokenResponse response = (CreateDelegationTokenResponse) abstractResponse;
if (response.hasError()) {
delegationTokenFuture.completeExceptionally(response.error().exception());
} else {
CreateDelegationTokenResponseData data = response.data();
TokenInformation tokenInfo = new TokenInformation(data.tokenId(), new KafkaPrincipal(data.principalType(), data.principalName()),
new KafkaPrincipal(data.tokenRequesterPrincipalType(), data.tokenRequesterPrincipalName()),
options.renewers(), data.issueTimestampMs(), data.maxTimestampMs(), data.expiryTimestampMs());
DelegationToken token = new DelegationToken(tokenInfo, data.hmac());
delegationTokenFuture.complete(token);
}
}
@Override
void handleFailure(Throwable throwable) {
delegationTokenFuture.completeExceptionally(throwable);
}
}, now);
return new CreateDelegationTokenResult(delegationTokenFuture);
}
@Override
public RenewDelegationTokenResult renewDelegationToken(final byte[] hmac, final RenewDelegationTokenOptions options) {
final KafkaFutureImpl<Long> expiryTimeFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
runnable.call(new Call("renewDelegationToken", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
RenewDelegationTokenRequest.Builder createRequest(int timeoutMs) {
return new RenewDelegationTokenRequest.Builder(
new RenewDelegationTokenRequestData()
.setHmac(hmac)
.setRenewPeriodMs(options.renewTimePeriodMs()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
RenewDelegationTokenResponse response = (RenewDelegationTokenResponse) abstractResponse;
if (response.hasError()) {
expiryTimeFuture.completeExceptionally(response.error().exception());
} else {
expiryTimeFuture.complete(response.expiryTimestamp());
}
}
@Override
void handleFailure(Throwable throwable) {
expiryTimeFuture.completeExceptionally(throwable);
}
}, now);
return new RenewDelegationTokenResult(expiryTimeFuture);
}
@Override
public ExpireDelegationTokenResult expireDelegationToken(final byte[] hmac, final ExpireDelegationTokenOptions options) {
final KafkaFutureImpl<Long> expiryTimeFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
runnable.call(new Call("expireDelegationToken", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
ExpireDelegationTokenRequest.Builder createRequest(int timeoutMs) {
return new ExpireDelegationTokenRequest.Builder(
new ExpireDelegationTokenRequestData()
.setHmac(hmac)
.setExpiryTimePeriodMs(options.expiryTimePeriodMs()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
ExpireDelegationTokenResponse response = (ExpireDelegationTokenResponse) abstractResponse;
if (response.hasError()) {
expiryTimeFuture.completeExceptionally(response.error().exception());
} else {
expiryTimeFuture.complete(response.expiryTimestamp());
}
}
@Override
void handleFailure(Throwable throwable) {
expiryTimeFuture.completeExceptionally(throwable);
}
}, now);
return new ExpireDelegationTokenResult(expiryTimeFuture);
}
@Override
public DescribeDelegationTokenResult describeDelegationToken(final DescribeDelegationTokenOptions options) {
final KafkaFutureImpl<List<DelegationToken>> tokensFuture = new KafkaFutureImpl<>();
final long now = time.milliseconds();
runnable.call(new Call("describeDelegationToken", calcDeadlineMs(now, options.timeoutMs()),
new LeastLoadedNodeProvider()) {
@Override
DescribeDelegationTokenRequest.Builder createRequest(int timeoutMs) {
return new DescribeDelegationTokenRequest.Builder(options.owners());
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeDelegationTokenResponse response = (DescribeDelegationTokenResponse) abstractResponse;
if (response.hasError()) {
tokensFuture.completeExceptionally(response.error().exception());
} else {
tokensFuture.complete(response.tokens());
}
}
@Override
void handleFailure(Throwable throwable) {
tokensFuture.completeExceptionally(throwable);
}
}, now);
return new DescribeDelegationTokenResult(tokensFuture);
}
private static final | AdminClientRunnable |
java | spring-projects__spring-boot | module/spring-boot-micrometer-metrics/src/test/java/org/springframework/boot/micrometer/metrics/autoconfigure/export/kairos/KairosMetricsExportAutoConfigurationTests.java | {
"start": 1347,
"end": 3618
} | class ____ {
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(KairosMetricsExportAutoConfiguration.class));
@Test
void backsOffWithoutAClock() {
this.contextRunner.run((context) -> assertThat(context).doesNotHaveBean(KairosMeterRegistry.class));
}
@Test
void autoConfiguresItsConfigAndMeterRegistry() {
this.contextRunner.withUserConfiguration(BaseConfiguration.class)
.run((context) -> assertThat(context).hasSingleBean(KairosMeterRegistry.class)
.hasSingleBean(KairosConfig.class));
}
@Test
void autoConfigurationCanBeDisabledWithDefaultsEnabledProperty() {
this.contextRunner.withUserConfiguration(BaseConfiguration.class)
.withPropertyValues("management.defaults.metrics.export.enabled=false")
.run((context) -> assertThat(context).doesNotHaveBean(KairosMeterRegistry.class)
.doesNotHaveBean(KairosConfig.class));
}
@Test
void autoConfigurationCanBeDisabledWithSpecificEnabledProperty() {
this.contextRunner.withUserConfiguration(BaseConfiguration.class)
.withPropertyValues("management.kairos.metrics.export.enabled=false")
.run((context) -> assertThat(context).doesNotHaveBean(KairosMeterRegistry.class)
.doesNotHaveBean(KairosConfig.class));
}
@Test
void allowsCustomConfigToBeUsed() {
this.contextRunner.withUserConfiguration(CustomConfigConfiguration.class)
.run((context) -> assertThat(context).hasSingleBean(KairosMeterRegistry.class)
.hasSingleBean(KairosConfig.class)
.hasBean("customConfig"));
}
@Test
void allowsCustomRegistryToBeUsed() {
this.contextRunner.withUserConfiguration(CustomRegistryConfiguration.class)
.run((context) -> assertThat(context).hasSingleBean(KairosMeterRegistry.class)
.hasBean("customRegistry")
.hasSingleBean(KairosConfig.class));
}
@Test
void stopsMeterRegistryWhenContextIsClosed() {
this.contextRunner.withUserConfiguration(BaseConfiguration.class).run((context) -> {
KairosMeterRegistry registry = context.getBean(KairosMeterRegistry.class);
assertThat(registry.isClosed()).isFalse();
context.close();
assertThat(registry.isClosed()).isTrue();
});
}
@Configuration(proxyBeanMethods = false)
static | KairosMetricsExportAutoConfigurationTests |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1000/Issue1079.java | {
"start": 859,
"end": 980
} | class ____ {
@JSONField(name ="Response")
public List<Response> response;
public static | PdpResponse |
java | apache__camel | components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/model/csv/MyCsvRecord2.java | {
"start": 1098,
"end": 3378
} | class ____ implements Serializable {
private static final long serialVersionUID = 1L;
@DataField(pos = 1)
private String attention;
@DataField(pos = 2)
private String addressLine1;
@DataField(pos = 3)
private String addressLine2;
@DataField(pos = 4)
private String city;
@DataField(pos = 5)
private String state;
@DataField(pos = 6)
private String zip;
@DataField(pos = 7)
private String country;
@DataField(pos = 8)
private String dummy1;
@DataField(pos = 9)
private String dummy2;
public MyCsvRecord2() {
}
public String getAttention() {
return attention;
}
public void setAttention(String attention) {
this.attention = attention;
}
public String getAddressLine1() {
return addressLine1;
}
public void setAddressLine1(String addressLine1) {
this.addressLine1 = addressLine1;
}
public String getAddressLine2() {
return addressLine2;
}
public void setAddressLine2(String addressLine2) {
this.addressLine2 = addressLine2;
}
public String getCity() {
return city;
}
public void setCity(String city) {
this.city = city;
}
public String getState() {
return state;
}
public void setState(String state) {
this.state = state;
}
public String getZip() {
return zip;
}
public void setZip(String zip) {
this.zip = zip;
}
public String getCountry() {
return country;
}
public void setCountry(String country) {
this.country = country;
}
public String getDummy1() {
return dummy1;
}
public void setDummy1(String dummy1) {
this.dummy1 = dummy1;
}
public String getDummy2() {
return dummy2;
}
public void setDummy2(String dummy2) {
this.dummy2 = dummy2;
}
@Override
public String toString() {
return "Record [attention=" + attention + ", addressLine1=" + addressLine1 + ", addressLine2="
+ addressLine2 + ", city=" + city + ", state=" + state + ", zip=" + zip + ", country="
+ country + ", dummy1=" + dummy1 + ", dummy2=" + dummy2 + "]";
}
}
| MyCsvRecord2 |
java | apache__dubbo | dubbo-plugin/dubbo-qos/src/main/java/org/apache/dubbo/qos/command/ActuatorExecutor.java | {
"start": 848,
"end": 931
} | interface ____ {
String execute(String command, String[] args);
}
| ActuatorExecutor |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/typeutils/TypeSerializer.java | {
"start": 2887,
"end": 9269
} | class ____<T> implements Serializable {
private static final long serialVersionUID = 1L;
// --------------------------------------------------------------------------------------------
// General information about the type and the serializer
// --------------------------------------------------------------------------------------------
/**
* Gets whether the type is an immutable type.
*
* @return True, if the type is immutable.
*/
public abstract boolean isImmutableType();
/**
* Creates a deep copy of this serializer if it is necessary, i.e. if it is stateful. This can
* return itself if the serializer is not stateful.
*
* <p>We need this because Serializers might be used in several threads. Stateless serializers
* are inherently thread-safe while stateful serializers might not be thread-safe.
*/
public abstract TypeSerializer<T> duplicate();
// --------------------------------------------------------------------------------------------
// Instantiation & Cloning
// --------------------------------------------------------------------------------------------
/**
* Creates a new instance of the data type.
*
* @return A new instance of the data type.
*/
public abstract T createInstance();
/**
* Creates a deep copy of the given element in a new element.
*
* @param from The element reuse be copied.
* @return A deep copy of the element.
*/
public abstract T copy(T from);
/**
* Creates a copy from the given element. The method makes an attempt to store the copy in the
* given reuse element, if the type is mutable. This is, however, not guaranteed.
*
* @param from The element to be copied.
* @param reuse The element to be reused. May or may not be used.
* @return A deep copy of the element.
*/
public abstract T copy(T from, T reuse);
// --------------------------------------------------------------------------------------------
/**
* Gets the length of the data type, if it is a fix length data type.
*
* @return The length of the data type, or <code>-1</code> for variable length data types.
*/
public abstract int getLength();
// --------------------------------------------------------------------------------------------
/**
* Serializes the given record to the given target output view.
*
* @param record The record to serialize.
* @param target The output view to write the serialized data to.
* @throws IOException Thrown, if the serialization encountered an I/O related error. Typically
* raised by the output view, which may have an underlying I/O channel to which it
* delegates.
*/
public abstract void serialize(T record, DataOutputView target) throws IOException;
/**
* De-serializes a record from the given source input view.
*
* @param source The input view from which to read the data.
* @return The deserialized element.
* @throws IOException Thrown, if the de-serialization encountered an I/O related error.
* Typically raised by the input view, which may have an underlying I/O channel from which
* it reads.
*/
public abstract T deserialize(DataInputView source) throws IOException;
/**
* De-serializes a record from the given source input view into the given reuse record instance
* if mutable.
*
* @param reuse The record instance into which to de-serialize the data.
* @param source The input view from which to read the data.
* @return The deserialized element.
* @throws IOException Thrown, if the de-serialization encountered an I/O related error.
* Typically raised by the input view, which may have an underlying I/O channel from which
* it reads.
*/
public abstract T deserialize(T reuse, DataInputView source) throws IOException;
/**
* Copies exactly one record from the source input view to the target output view. Whether this
* operation works on binary data or partially de-serializes the record to determine its length
* (such as for records of variable length) is up to the implementer. Binary copies are
* typically faster. A copy of a record containing two integer numbers (8 bytes total) is most
* efficiently implemented as {@code target.write(source, 8);}.
*
* @param source The input view from which to read the record.
* @param target The target output view to which to write the record.
* @throws IOException Thrown if any of the two views raises an exception.
*/
public abstract void copy(DataInputView source, DataOutputView target) throws IOException;
public abstract boolean equals(Object obj);
public abstract int hashCode();
// --------------------------------------------------------------------------------------------
// Serializer configuration snapshot for checkpoints/savepoints
// --------------------------------------------------------------------------------------------
/**
* Snapshots the configuration of this TypeSerializer. This method is only relevant if the
* serializer is used to state stored in checkpoints/savepoints.
*
* <p>The snapshot of the TypeSerializer is supposed to contain all information that affects the
* serialization format of the serializer. The snapshot serves two purposes: First, to reproduce
* the serializer when the checkpoint/savepoint is restored, and second, to check whether the
* serialization format is compatible with the serializer used in the restored program.
*
* <p><b>IMPORTANT:</b> TypeSerializerSnapshots changed after Flink 1.6. Serializers implemented
* against Flink versions up to 1.6 should still work, but adjust to new model to enable state
* evolution and be future-proof. See the class-level comments, section "Upgrading
* TypeSerializers to the new TypeSerializerSnapshot model" for details.
*
* @see TypeSerializerSnapshot#resolveSchemaCompatibility(TypeSerializerSnapshot)
* @return snapshot of the serializer's current configuration (cannot be {@code null}).
*/
public abstract TypeSerializerSnapshot<T> snapshotConfiguration();
}
| TypeSerializer |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/AutoConfigurationImportSelectorTests.java | {
"start": 12967,
"end": 13961
} | class ____ implements AutoConfigurationImportFilter, BeanFactoryAware {
private final Set<String> nonMatching = new HashSet<>();
@SuppressWarnings("NullAway.Init")
private BeanFactory beanFactory;
TestAutoConfigurationImportFilter(String[] configurations, int... nonMatching) {
for (int i : nonMatching) {
this.nonMatching.add(configurations[i]);
}
}
@Override
public boolean[] match(@Nullable String[] autoConfigurationClasses,
AutoConfigurationMetadata autoConfigurationMetadata) {
boolean[] result = new boolean[autoConfigurationClasses.length];
for (int i = 0; i < result.length; i++) {
result[i] = !this.nonMatching.contains(autoConfigurationClasses[i]);
}
return result;
}
@Override
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = beanFactory;
}
BeanFactory getBeanFactory() {
return this.beanFactory;
}
}
@Configuration(proxyBeanMethods = false)
private final | TestAutoConfigurationImportFilter |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/LangChain4jToolsEndpointBuilderFactory.java | {
"start": 1593,
"end": 4867
} | interface ____
extends
EndpointConsumerBuilder {
default AdvancedLangChain4jToolsEndpointConsumerBuilder advanced() {
return (AdvancedLangChain4jToolsEndpointConsumerBuilder) this;
}
/**
* The tags for the tools.
*
* The option is a: <code>java.lang.String</code> type.
*
* Required: true
* Group: common
*
* @param tags the value to set
* @return the dsl builder
*/
default LangChain4jToolsEndpointConsumerBuilder tags(String tags) {
doSetProperty("tags", tags);
return this;
}
/**
* Tool description.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param description the value to set
* @return the dsl builder
*/
default LangChain4jToolsEndpointConsumerBuilder description(String description) {
doSetProperty("description", description);
return this;
}
/**
* Tool name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: consumer
*
* @param name the value to set
* @return the dsl builder
*/
default LangChain4jToolsEndpointConsumerBuilder name(String name) {
doSetProperty("name", name);
return this;
}
/**
* List of Tool parameters in the form of parameter.=. This is a
* multi-value option with prefix: parameter.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.String></code> type.
* The option is multivalued, and you can use the parameters(String,
* Object) method to add a value (call the method multiple times to set
* more values).
*
* Group: consumer
*
* @param key the option key
* @param value the option value
* @return the dsl builder
*/
default LangChain4jToolsEndpointConsumerBuilder parameters(String key, Object value) {
doSetMultiValueProperty("parameters", "parameter." + key, value);
return this;
}
/**
* List of Tool parameters in the form of parameter.=. This is a
* multi-value option with prefix: parameter.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.String></code> type.
* The option is multivalued, and you can use the parameters(String,
* Object) method to add a value (call the method multiple times to set
* more values).
*
* Group: consumer
*
* @param values the values
* @return the dsl builder
*/
default LangChain4jToolsEndpointConsumerBuilder parameters(Map values) {
doSetMultiValueProperties("parameters", "parameter.", values);
return this;
}
}
/**
* Advanced builder for endpoint consumers for the LangChain4j Tools component.
*/
public | LangChain4jToolsEndpointConsumerBuilder |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSink.java | {
"start": 1447,
"end": 9363
} | class ____ {
@Test
public void testInit() {
ConfigBuilder builder = new ConfigBuilder();
SubsetConfiguration conf =
builder.add("sink.roll-interval", "10m")
.add("sink.roll-offset-interval-millis", "1")
.add("sink.basepath", "path")
.add("sink.ignore-error", "true")
.add("sink.allow-append", "true")
.add("sink.source", "src")
.subset("sink");
RollingFileSystemSink sink = new RollingFileSystemSink();
sink.init(conf);
assertEquals(sink.rollIntervalMillis, 600000,
"The roll interval was not set correctly");
assertEquals(sink.rollOffsetIntervalMillis, 1,
"The roll offset interval was not set correctly");
assertEquals(sink.basePath, new Path("path"),
"The base path was not set correctly");
assertEquals(sink.ignoreError, true, "ignore-error was not set correctly");
assertEquals(sink.allowAppend, true, "allow-append was not set correctly");
assertEquals(sink.source, "src", "The source was not set correctly");
}
/**
* Test whether the initial roll interval is set correctly.
*/
@Test
public void testSetInitialFlushTime() {
RollingFileSystemSink rfsSink = new RollingFileSystemSink(1000, 0);
Calendar calendar = Calendar.getInstance();
calendar.set(Calendar.MILLISECOND, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.HOUR, 0);
calendar.set(Calendar.DAY_OF_YEAR, 1);
calendar.set(Calendar.YEAR, 2016);
assertNull(
rfsSink.nextFlush, "Last flush time should have been null prior to calling init()");
rfsSink.setInitialFlushTime(calendar.getTime());
long diff =
rfsSink.nextFlush.getTimeInMillis() - calendar.getTimeInMillis();
assertEquals(0L, diff, "The initial flush time was calculated incorrectly");
calendar.set(Calendar.MILLISECOND, 10);
rfsSink.setInitialFlushTime(calendar.getTime());
diff = rfsSink.nextFlush.getTimeInMillis() - calendar.getTimeInMillis();
assertEquals(
-10L, diff, "The initial flush time was calculated incorrectly");
calendar.set(Calendar.SECOND, 1);
calendar.set(Calendar.MILLISECOND, 10);
rfsSink.setInitialFlushTime(calendar.getTime());
diff = rfsSink.nextFlush.getTimeInMillis() - calendar.getTimeInMillis();
assertEquals(
-10L, diff, "The initial flush time was calculated incorrectly");
// Try again with a random offset
rfsSink = new RollingFileSystemSink(1000, 100);
assertNull(
rfsSink.nextFlush, "Last flush time should have been null prior to calling init()");
calendar.set(Calendar.MILLISECOND, 0);
calendar.set(Calendar.SECOND, 0);
rfsSink.setInitialFlushTime(calendar.getTime());
diff = rfsSink.nextFlush.getTimeInMillis() - calendar.getTimeInMillis();
assertTrue((diff == 0L) || ((diff > -1000L) && (diff < -900L)),
"The initial flush time was calculated incorrectly: " + diff);
calendar.set(Calendar.MILLISECOND, 10);
rfsSink.setInitialFlushTime(calendar.getTime());
diff = rfsSink.nextFlush.getTimeInMillis() - calendar.getTimeInMillis();
assertTrue((diff >= -10L) && (diff <= 0L) || ((diff > -1000L) && (diff < -910L)),
"The initial flush time was calculated incorrectly: " + diff);
calendar.set(Calendar.SECOND, 1);
calendar.set(Calendar.MILLISECOND, 10);
rfsSink.setInitialFlushTime(calendar.getTime());
diff = rfsSink.nextFlush.getTimeInMillis() - calendar.getTimeInMillis();
assertTrue((diff >= -10L) && (diff <= 0L) || ((diff > -1000L) && (diff < -910L)),
"The initial flush time was calculated incorrectly: " + diff);
// Now try pathological settings
rfsSink = new RollingFileSystemSink(1000, 1000000);
assertNull(rfsSink.nextFlush,
"Last flush time should have been null prior to calling init()");
calendar.set(Calendar.MILLISECOND, 1);
calendar.set(Calendar.SECOND, 0);
rfsSink.setInitialFlushTime(calendar.getTime());
diff = rfsSink.nextFlush.getTimeInMillis() - calendar.getTimeInMillis();
assertTrue((diff > -1000L) && (diff <= 0L),
"The initial flush time was calculated incorrectly: " + diff);
}
/**
* Test that the roll time updates correctly.
*/
@Test
public void testUpdateRollTime() {
RollingFileSystemSink rfsSink = new RollingFileSystemSink(1000, 0);
Calendar calendar = Calendar.getInstance();
calendar.set(Calendar.MILLISECOND, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.HOUR, 0);
calendar.set(Calendar.DAY_OF_YEAR, 1);
calendar.set(Calendar.YEAR, 2016);
rfsSink.nextFlush = Calendar.getInstance();
rfsSink.nextFlush.setTime(calendar.getTime());
rfsSink.updateFlushTime(calendar.getTime());
assertEquals(calendar.getTimeInMillis() + 1000,
rfsSink.nextFlush.getTimeInMillis(),
"The next roll time should have been 1 second in the future");
rfsSink.nextFlush.setTime(calendar.getTime());
calendar.add(Calendar.MILLISECOND, 10);
rfsSink.updateFlushTime(calendar.getTime());
assertEquals(calendar.getTimeInMillis() + 990,
rfsSink.nextFlush.getTimeInMillis(),
"The next roll time should have been 990 ms in the future");
rfsSink.nextFlush.setTime(calendar.getTime());
calendar.add(Calendar.SECOND, 2);
calendar.add(Calendar.MILLISECOND, 10);
rfsSink.updateFlushTime(calendar.getTime());
assertEquals(calendar.getTimeInMillis() + 990,
rfsSink.nextFlush.getTimeInMillis(),
"The next roll time should have been 990 ms in the future");
}
/**
* Test whether the roll interval is correctly calculated from the
* configuration settings.
*/
@Test
public void testGetRollInterval() {
doTestGetRollInterval(1, new String[] {"m", "min", "minute", "minutes"},
60 * 1000L);
doTestGetRollInterval(1, new String[] {"h", "hr", "hour", "hours"},
60 * 60 * 1000L);
doTestGetRollInterval(1, new String[] {"d", "day", "days"},
24 * 60 * 60 * 1000L);
ConfigBuilder builder = new ConfigBuilder();
SubsetConfiguration conf =
builder.add("sink.roll-interval", "1").subset("sink");
// We can reuse the same sink evry time because we're setting the same
// property every time.
RollingFileSystemSink sink = new RollingFileSystemSink();
sink.init(conf);
assertEquals(3600000L, sink.getRollInterval());
for (char c : "abcefgijklnopqrtuvwxyz".toCharArray()) {
builder = new ConfigBuilder();
conf = builder.add("sink.roll-interval", "90 " + c).subset("sink");
try {
sink.init(conf);
sink.getRollInterval();
fail("Allowed flush interval with bad units: " + c);
} catch (MetricsException ex) {
// Expected
}
}
}
/**
* Test the basic unit conversions with the given unit name modifier applied.
*
* @param mod a unit name modifier
*/
private void doTestGetRollInterval(int num, String[] units, long expected) {
RollingFileSystemSink sink = new RollingFileSystemSink();
ConfigBuilder builder = new ConfigBuilder();
for (String unit : units) {
sink.init(builder.add("sink.roll-interval", num + unit).subset("sink"));
assertEquals(expected, sink.getRollInterval());
sink.init(builder.add("sink.roll-interval",
num + unit.toUpperCase()).subset("sink"));
assertEquals(expected, sink.getRollInterval());
sink.init(builder.add("sink.roll-interval",
num + " " + unit).subset("sink"));
assertEquals(expected, sink.getRollInterval());
sink.init(builder.add("sink.roll-interval",
num + " " + unit.toUpperCase()).subset("sink"));
assertEquals(expected, sink.getRollInterval());
}
}
}
| TestRollingFileSystemSink |
java | playframework__playframework | dev-mode/play-build-link/src/main/java/play/core/BuildLink.java | {
"start": 1735,
"end": 1977
} | class ____ generated (eg a template), then the original source file should be returned,
* and the line number should be mapped back to the line number in the original source file, if
* possible.
*
* @param className The name of the | is |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/ProducerFailedException.java | {
"start": 1488,
"end": 1619
} | class ____.
*/
public ProducerFailedException(Throwable cause) {
super(new SerializedThrowable(cause));
}
}
| loader |
java | elastic__elasticsearch | x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportRemoveIndexLifecyclePolicyAction.java | {
"start": 1602,
"end": 4415
} | class ____ extends TransportMasterNodeAction<Request, Response> {
private final ProjectResolver projectResolver;
private final IndexNameExpressionResolver indexNameExpressionResolver;
@Inject
public TransportRemoveIndexLifecyclePolicyAction(
TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool,
ActionFilters actionFilters,
ProjectResolver projectResolver,
IndexNameExpressionResolver indexNameExpressionResolver
) {
super(
RemoveIndexLifecyclePolicyAction.NAME,
transportService,
clusterService,
threadPool,
actionFilters,
Request::new,
Response::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.projectResolver = projectResolver;
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
@Override
protected void masterOperation(Task task, Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
final var project = projectResolver.getProjectMetadata(state);
final Index[] indices = indexNameExpressionResolver.concreteIndices(project, request.indicesOptions(), true, request.indices());
submitUnbatchedTask("remove-lifecycle-for-index", new ClusterStateUpdateTask(request.masterNodeTimeout()) {
private final List<String> failedIndexes = new ArrayList<>();
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
final var currentProjet = currentState.metadata().getProject(project.id());
final var updatedProject = IndexLifecycleTransition.removePolicyForIndexes(indices, currentProjet, failedIndexes);
return ClusterState.builder(currentState).putProjectMetadata(updatedProject).build();
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
@Override
public void clusterStateProcessed(ClusterState oldState, ClusterState newState) {
listener.onResponse(new Response(failedIndexes));
}
});
}
@SuppressForbidden(reason = "legacy usage of unbatched task") // TODO add support for batching here
private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String source, ClusterStateUpdateTask task) {
clusterService.submitUnbatchedStateUpdateTask(source, task);
}
}
| TransportRemoveIndexLifecyclePolicyAction |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/KubernetesPodsComponentBuilderFactory.java | {
"start": 1956,
"end": 6052
} | interface ____ extends ComponentBuilder<KubernetesPodsComponent> {
/**
* To use an existing kubernetes client.
*
* The option is a:
* <code>io.fabric8.kubernetes.client.KubernetesClient</code> type.
*
* Group: common
*
* @param kubernetesClient the value to set
* @return the dsl builder
*/
default KubernetesPodsComponentBuilder kubernetesClient(io.fabric8.kubernetes.client.KubernetesClient kubernetesClient) {
doSetProperty("kubernetesClient", kubernetesClient);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default KubernetesPodsComponentBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default KubernetesPodsComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default KubernetesPodsComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
}
| KubernetesPodsComponentBuilder |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/ops/multiload/MultiLoadEntityGraphTest.java | {
"start": 1201,
"end": 5330
} | class ____ {
@BeforeEach
public void before(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
for ( int i = 0; i < 5; i++ ) {
Parent p = new Parent( i, "Entity #" + i );
for ( int j = 0; j < 5; j++ ) {
Child child = new Child();
child.setParent( p );
p.getChildren().add( child );
}
for ( int j = 0; j < 5; j++ ) {
Pet pet = new Pet();
pet.setMaster( p );
p.getPets().add( pet );
}
session.persist( p );
}
}
);
}
@AfterEach
public void cleanUp(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testFetchGraph(SessionFactoryScope scope) {
scope.inTransaction( session -> {
List<Parent> list = session.byMultipleIds( Parent.class ).multiLoad( 1, 2, 3 );
assertThat( list ).hasSize( 3 );
// Collections should be loaded according to their defaults
for ( Parent p : list ) {
assertThat( Hibernate.isInitialized( p.children ) ).isFalse();
assertThat( Hibernate.isInitialized( p.pets ) ).isTrue();
}
} );
scope.inTransaction( session -> {
List<Parent> list = session.byMultipleIds( Parent.class )
.with( (RootGraph) session.getEntityGraph( "eager" ), GraphSemantic.FETCH )
.multiLoad( 1, 2, 3 );
assertThat( list ).hasSize( 3 );
// Collections should be loaded eagerly if mentioned in the graph, or lazily otherwise.
// Since the graph contains all collections, all collections should be loaded eagerly.
for ( Parent p : list ) {
assertThat( Hibernate.isInitialized( p.children ) ).isTrue();
assertThat( Hibernate.isInitialized( p.pets ) ).isTrue();
}
} );
scope.inTransaction( session -> {
List<Parent> list = session.byMultipleIds( Parent.class )
.with( (RootGraph) session.getEntityGraph( "lazy" ), GraphSemantic.FETCH )
.multiLoad( 1, 2, 3 );
assertThat( list ).hasSize( 3 );
// Collections should be loaded eagerly if mentioned in the graph, or lazily otherwise.
// Since the graph is empty, all collections should be loaded lazily.
for ( Parent p : list ) {
assertThat( Hibernate.isInitialized( p.children ) ).isFalse();
assertThat( Hibernate.isInitialized( p.pets ) ).isFalse();
}
} );
}
@Test
public void testLoadGraph(SessionFactoryScope scope) {
scope.inTransaction( session -> {
List<Parent> list = session.byMultipleIds( Parent.class ).multiLoad( 1, 2, 3 );
assertThat( list ).hasSize( 3 );
// Collections should be loaded according to their defaults
for ( Parent p : list ) {
assertThat( Hibernate.isInitialized( p.children ) ).isFalse();
assertThat( Hibernate.isInitialized( p.pets ) ).isTrue();
}
} );
scope.inTransaction( session -> {
List<Parent> list = session.byMultipleIds( Parent.class )
.with( (RootGraph) session.getEntityGraph( "eager" ), GraphSemantic.LOAD )
.multiLoad( 1, 2, 3 );
assertThat( list ).hasSize( 3 );
// Collections should be loaded eagerly if mentioned in the graph, or according to their default otherwise.
// Since the graph contains all collections, all collections should be loaded eagerly.
for ( Parent p : list ) {
assertThat( Hibernate.isInitialized( p.children ) ).isTrue();
assertThat( Hibernate.isInitialized( p.pets ) ).isTrue();
}
} );
scope.inTransaction( session -> {
List<Parent> list = session.byMultipleIds( Parent.class )
.with( (RootGraph) session.getEntityGraph( "lazy" ), GraphSemantic.LOAD )
.multiLoad( 1, 2, 3 );
assertThat( list ).hasSize( 3 );
// Collections should be loaded eagerly if mentioned in the graph, or according to their default otherwise.
// Since the graph is empty, all collections should be loaded according to their default.
for ( Parent p : list ) {
assertThat( Hibernate.isInitialized( p.children ) ).isFalse();
assertThat( Hibernate.isInitialized( p.pets ) ).isTrue();
}
} );
}
@Entity(name = "Parent")
@NamedEntityGraph(name = "eager", includeAllAttributes = true)
@NamedEntityGraph(name = "lazy")
public static | MultiLoadEntityGraphTest |
java | quarkusio__quarkus | extensions/panache/hibernate-reactive-panache-common/deployment/src/main/java/io/quarkus/hibernate/reactive/panache/common/deployment/PanacheJpaCommonResourceProcessor.java | {
"start": 9698,
"end": 14524
} | class ____ uses a panache entity/repository
// - is annotated with @GET, @POST, @PUT, @DELETE ,@PATCH ,@HEAD or @OPTIONS
// - is not annotated with @ReactiveTransactional, @WithSession, @WithSessionOnDemand, or @WithTransaction
context.transform().add(DotNames.WITH_SESSION_ON_DEMAND).done();
}
}));
}
}
@BuildStep
void lookupNamedQueries(CombinedIndexBuildItem index,
BuildProducer<PanacheNamedQueryEntityClassBuildStep> namedQueries,
JpaModelBuildItem jpaModel) {
for (String modelClass : jpaModel.getAllModelClassNames()) {
// lookup for `@NamedQuery` on the hierarchy and produce NamedQueryEntityClassBuildStep
Map<String, String> typeNamedQueries = new HashMap<>();
lookupNamedQueries(index, DotName.createSimple(modelClass), typeNamedQueries);
namedQueries.produce(new PanacheNamedQueryEntityClassBuildStep(modelClass, typeNamedQueries));
}
}
@BuildStep
@Record(ExecutionTime.STATIC_INIT)
void buildNamedQueryMap(List<PanacheNamedQueryEntityClassBuildStep> namedQueryEntityClasses,
PanacheHibernateRecorder panacheHibernateRecorder) {
Map<String, Map<String, String>> namedQueryMap = new HashMap<>();
for (PanacheNamedQueryEntityClassBuildStep entityNamedQueries : namedQueryEntityClasses) {
namedQueryMap.put(entityNamedQueries.getClassName(), entityNamedQueries.getNamedQueries());
}
panacheHibernateRecorder.setNamedQueryMap(namedQueryMap);
}
@BuildStep
@Record(ExecutionTime.RUNTIME_INIT)
public void shutdown(ShutdownContextBuildItem shutdownContextBuildItem, PanacheHibernateRecorder panacheHibernateRecorder) {
panacheHibernateRecorder.clear(shutdownContextBuildItem);
}
private void lookupNamedQueries(CombinedIndexBuildItem index, DotName name, Map<String, String> namedQueries) {
ClassInfo classInfo = index.getComputingIndex().getClassByName(name);
if (classInfo == null) {
return;
}
List<AnnotationInstance> namedQueryInstances = classInfo.annotationsMap().get(DotNames.DOTNAME_NAMED_QUERY);
if (namedQueryInstances != null) {
for (AnnotationInstance namedQueryInstance : namedQueryInstances) {
namedQueries.put(namedQueryInstance.value("name").asString(),
namedQueryInstance.value("query").asString());
}
}
List<AnnotationInstance> namedQueriesInstances = classInfo.annotationsMap().get(DotNames.DOTNAME_NAMED_QUERIES);
if (namedQueriesInstances != null) {
for (AnnotationInstance namedQueriesInstance : namedQueriesInstances) {
AnnotationValue value = namedQueriesInstance.value();
AnnotationInstance[] nestedInstances = value.asNestedArray();
for (AnnotationInstance nested : nestedInstances) {
namedQueries.put(nested.value("name").asString(), nested.value("query").asString());
}
}
}
// climb up the hierarchy of types
if (!classInfo.superClassType().name().equals(JandexUtil.DOTNAME_OBJECT)) {
Type superType = classInfo.superClassType();
ClassInfo superClass = index.getComputingIndex().getClassByName(superType.name());
if (superClass != null) {
lookupNamedQueries(index, superClass.name(), namedQueries);
}
}
}
private void validateBindings(List<DotName> bindings, Entry<MethodInfo, Set<AnnotationInstance>> entry,
BuildProducer<ValidationErrorBuildItem> errors) {
for (DotName binding : bindings) {
for (AnnotationInstance annotation : entry.getValue()) {
if (annotation.name().equals(binding)) {
if (annotation.target().kind() == Kind.METHOD) {
errors.produce(new ValidationErrorBuildItem(
new IllegalStateException(
"A method annotated with @"
+ binding.withoutPackagePrefix()
+ " must return io.smallrye.mutiny.Uni: "
+ entry.getKey() + " declared on " + entry.getKey().declaringClass())));
} else {
LOG.debugf("Class-level binding %s will be ignored for method %s() declared on %s", binding,
entry.getKey().name(), entry.getKey().declaringClass());
}
return;
}
}
}
}
}
| that |
java | apache__flink | flink-formats/flink-avro-confluent-registry/src/main/java/org/apache/flink/formats/avro/registry/confluent/ConfluentSchemaRegistryCoder.java | {
"start": 1370,
"end": 3554
} | class ____ implements SchemaCoder {
private final SchemaRegistryClient schemaRegistryClient;
private String subject;
private static final int CONFLUENT_MAGIC_BYTE = 0;
/**
* Creates {@link SchemaCoder} that uses provided {@link SchemaRegistryClient} to connect to
* schema registry.
*
* @param schemaRegistryClient client to connect schema registry
* @param subject subject of schema registry to produce
*/
public ConfluentSchemaRegistryCoder(String subject, SchemaRegistryClient schemaRegistryClient) {
this.schemaRegistryClient = schemaRegistryClient;
this.subject = subject;
}
/**
* Creates {@link SchemaCoder} that uses provided {@link SchemaRegistryClient} to connect to
* schema registry.
*
* @param schemaRegistryClient client to connect schema registry
*/
public ConfluentSchemaRegistryCoder(SchemaRegistryClient schemaRegistryClient) {
this.schemaRegistryClient = schemaRegistryClient;
}
@Override
public Schema readSchema(InputStream in) throws IOException {
DataInputStream dataInputStream = new DataInputStream(in);
if (dataInputStream.readByte() != 0) {
throw new IOException("Unknown data format. Magic number does not match");
} else {
int schemaId = dataInputStream.readInt();
try {
return schemaRegistryClient.getById(schemaId);
} catch (RestClientException e) {
throw new IOException(
format("Could not find schema with id %s in registry", schemaId), e);
}
}
}
@Override
public void writeSchema(Schema schema, OutputStream out) throws IOException {
try {
int registeredId = schemaRegistryClient.register(subject, schema);
out.write(CONFLUENT_MAGIC_BYTE);
byte[] schemaIdBytes = ByteBuffer.allocate(4).putInt(registeredId).array();
out.write(schemaIdBytes);
} catch (RestClientException e) {
throw new IOException("Could not register schema in registry", e);
}
}
}
| ConfluentSchemaRegistryCoder |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-dubbo/src/main/java/org/apache/dubbo/rpc/protocol/dubbo/DubboIsolationExecutorSupport.java | {
"start": 1428,
"end": 3072
} | class ____ extends AbstractIsolationExecutorSupport {
private static final ErrorTypeAwareLogger logger =
LoggerFactory.getErrorTypeAwareLogger(DubboIsolationExecutorSupport.class);
private final FrameworkServiceRepository frameworkServiceRepository;
private final DubboProtocol dubboProtocol;
public DubboIsolationExecutorSupport(URL url) {
super(url);
frameworkServiceRepository = url.getOrDefaultFrameworkModel().getServiceRepository();
dubboProtocol = DubboProtocol.getDubboProtocol(url.getOrDefaultFrameworkModel());
}
@Override
protected ProviderModel getProviderModel(Object data) {
if (!(data instanceof Request)) {
return null;
}
Request request = (Request) data;
if (!(request.getData() instanceof DecodeableRpcInvocation)) {
return null;
}
try {
((DecodeableRpcInvocation) request.getData()).fillInvoker(dubboProtocol);
} catch (RemotingException e) {
// ignore here, and this exception will being rethrow in DubboProtocol
}
ServiceModel serviceModel = ((Invocation) request.getData()).getServiceModel();
if (serviceModel instanceof ProviderModel) {
return (ProviderModel) serviceModel;
}
String targetServiceUniqueName = ((Invocation) request.getData()).getTargetServiceUniqueName();
if (StringUtils.isNotEmpty(targetServiceUniqueName)) {
return frameworkServiceRepository.lookupExportedService(targetServiceUniqueName);
}
return null;
}
}
| DubboIsolationExecutorSupport |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/EqualsIncompatibleTypeTest.java | {
"start": 17693,
"end": 18205
} | class ____ {
public void test(Class<? extends Integer> a, Class<? extends String> b) {
// BUG: Diagnostic contains:
a.equals(b);
}
}
""")
.doTest();
}
@Test
public void unconstrainedWildcard_compatibleWithAnything() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.bugpatterns.proto.ProtoTest.TestProtoMessage;
public | Test |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java | {
"start": 57348,
"end": 60289
} | class ____ implements AuthorizationInfo {
private final Role role;
private final Map<String, Object> info;
private final RBACAuthorizationInfo authenticatedUserAuthorizationInfo;
RBACAuthorizationInfo(Role role, Role authenticatedUserRole) {
this.role = Objects.requireNonNull(role);
this.info = Collections.singletonMap(PRINCIPAL_ROLES_FIELD_NAME, role.names());
this.authenticatedUserAuthorizationInfo = authenticatedUserRole == null
? this
: new RBACAuthorizationInfo(authenticatedUserRole, null);
}
Role getRole() {
return role;
}
@Override
public Map<String, Object> asMap() {
return info;
}
@Override
public RBACAuthorizationInfo getAuthenticatedUserAuthorizationInfo() {
return authenticatedUserAuthorizationInfo;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
RBACAuthorizationInfo that = (RBACAuthorizationInfo) o;
if (this.role.equals(that.role) == false) {
return false;
}
// Because authenticatedUserAuthorizationInfo can be reference to this, calling `equals` can result in infinite recursion.
// But if both user-authz-info objects are references to their containing object, then they must be equal.
if (this.authenticatedUserAuthorizationInfo == this) {
return that.authenticatedUserAuthorizationInfo == that;
} else {
return this.authenticatedUserAuthorizationInfo.equals(that.authenticatedUserAuthorizationInfo);
}
}
@Override
public int hashCode() {
// Since authenticatedUserAuthorizationInfo can self reference, we handle it specially to avoid infinite recursion.
if (this.authenticatedUserAuthorizationInfo == this) {
return Objects.hashCode(role);
} else {
return Objects.hash(role, authenticatedUserAuthorizationInfo);
}
}
}
private static boolean isAsyncRelatedAction(String action) {
return action.equals(SubmitAsyncSearchAction.NAME)
|| action.equals(GetAsyncSearchAction.NAME)
|| action.equals(TransportDeleteAsyncResultAction.TYPE.name())
|| action.equals(EqlAsyncActionNames.EQL_ASYNC_GET_RESULT_ACTION_NAME)
|| action.equals(EsqlAsyncActionNames.ESQL_ASYNC_GET_RESULT_ACTION_NAME)
|| action.equals(EsqlAsyncActionNames.ESQL_ASYNC_STOP_ACTION_NAME)
|| action.equals(SqlAsyncActionNames.SQL_ASYNC_GET_RESULT_ACTION_NAME);
}
static final | RBACAuthorizationInfo |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/writing/MembersInjectionBindingRepresentation.java | {
"start": 1968,
"end": 2077
} | interface ____ {
MembersInjectionBindingRepresentation create(MembersInjectionBinding binding);
}
}
| Factory |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/ops/genericApi/BasicGetLoadAccessTest.java | {
"start": 3765,
"end": 4170
} | class ____ {
private Integer id;
private String name;
public User() {
}
public User(String name) {
this.name = name;
}
@Id
@GeneratedValue(generator = "increment")
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
}
| User |
java | google__dagger | javatests/dagger/hilt/android/processor/internal/GeneratorsTest.java | {
"start": 23819,
"end": 24593
} | class ____ extends Hilt_MyView {",
" public MyView(Context context, AttributeSet attributeSet){",
" super(context, attributeSet);",
" }",
"}");
HiltCompilerTests.hiltCompiler(myView)
.compile(
subject -> {
subject.hasErrorCount(0);
StringSubject stringSubject =
subject.generatedSourceFileWithPath("test/Hilt_MyView.java");
stringSubject.contains("package test;");
stringSubject.contains(
JOINER.join(
"@Generated(\"dagger.hilt.android.processor.internal.androidentrypoint.ViewGenerator\")",
"@SuppressWarnings(\"deprecation\")",
"abstract | MyView |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/ObjectParameterTypeForEmbeddableTest.java | {
"start": 4469,
"end": 4769
} | class ____ {
private String foo;
private String bar;
public AnEmbeddable(String foo, String bar) {
this.foo = foo;
this.bar = bar;
}
public AnEmbeddable() {
}
public String getFoo() {
return foo;
}
public String getBar() {
return bar;
}
}
public static | AnEmbeddable |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/GenericTypeResolverTests.java | {
"start": 14669,
"end": 14765
} | class ____ {
<T extends A> List<T> get() {
return List.of();
}
}
static | WithElementBounds |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1300/Issue1335.java | {
"start": 162,
"end": 1499
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
String json = "{\n" +
"\"id\": \"21496a63f5\",\n" +
"\"title\": \"\",\n" +
"\"url\": \"http://hl-img.peco.uodoo.com/hubble-test/app/sm/e9b884c1dcd671f128bac020e070e273.jpg;,,JPG;3,208x\",\n" +
"\"type\": \"JPG\",\n" +
"\"optimal_width\": 400,\n" +
"\"optimal_height\": 267,\n" +
"\"original_save_url\": \"http://hl-img.peco.uodoo.com/hubble-test/app/sm/e9b884c1dcd671f128bac020e070e273.jpg\",\n" +
"\"phash\": \"62717D190987A7AE\"\n" +
" }";
Image image = JSON.parseObject(json, Image.class);
assertEquals("21496a63f5", image.id);
assertEquals("http://hl-img.peco.uodoo.com/hubble-test/app/sm/e9b884c1dcd671f128bac020e070e273.jpg;,,JPG;3,208x", image.url);
assertEquals("", image.title);
assertEquals("JPG", image.type);
assertEquals(400, image.optimalWidth);
assertEquals(267, image.optimalHeight);
assertEquals("http://hl-img.peco.uodoo.com/hubble-test/app/sm/e9b884c1dcd671f128bac020e070e273.jpg", image.original_save_url);
assertEquals("62717D190987A7AE", image.phash);
}
public static | Issue1335 |
java | quarkusio__quarkus | extensions/opentelemetry/deployment/src/test/java/io/quarkus/opentelemetry/deployment/instrumentation/VertxClientOpenTelemetryTest.java | {
"start": 2302,
"end": 9266
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addPackage(TestSpanExporter.class.getPackage())
.addClasses(SemconvResolver.class)
.addAsResource(new StringAsset(TestSpanExporterProvider.class.getCanonicalName()),
"META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider")
.addAsResource(new StringAsset(InMemoryMetricExporterProvider.class.getCanonicalName()),
"META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.metrics.ConfigurableMetricExporterProvider")
.addAsResource(new StringAsset(InMemoryLogRecordExporterProvider.class.getCanonicalName()),
"META-INF/services/io.opentelemetry.sdk.autoconfigure.spi.logs.ConfigurableLogRecordExporterProvider"))
.withConfigurationResource("application-default.properties");
@Inject
TestSpanExporter spanExporter;
@Inject
Vertx vertx;
@TestHTTPResource
URI uri;
@AfterEach
void tearDown() {
spanExporter.reset();
}
@Test
void client() throws Exception {
HttpResponse<Buffer> response = WebClient.create(vertx)
.get(uri.getPort(), uri.getHost(), "/hello")
.send()
.toCompletionStage().toCompletableFuture()
.get();
assertEquals(HTTP_OK, response.statusCode());
List<SpanData> spans = spanExporter.getFinishedSpanItems(2);
SpanData client = getSpanByKindAndParentId(spans, CLIENT, "0000000000000000");
assertEquals("GET", client.getName());
assertSemanticAttribute(client, (long) HTTP_OK, HTTP_RESPONSE_STATUS_CODE);
assertSemanticAttribute(client, HttpMethod.GET, HTTP_REQUEST_METHOD);
assertSemanticAttribute(client, uri.toString() + "hello", URL_FULL);
assertSemanticAttribute(client, uri.getHost(), SERVER_ADDRESS);
assertSemanticAttribute(client, (long) uri.getPort(), SERVER_PORT);
SpanData server = getSpanByKindAndParentId(spans, SERVER, client.getSpanId());
assertEquals(SERVER, server.getKind());
assertEquals("GET /hello", server.getName());
assertSemanticAttribute(server, (long) HTTP_OK, HTTP_RESPONSE_STATUS_CODE);
assertSemanticAttribute(server, HttpMethod.GET, HTTP_REQUEST_METHOD);
assertEquals("/hello", server.getAttributes().get(HTTP_ROUTE));
assertSemanticAttribute(server, uri.getHost(), SERVER_ADDRESS);
assertSemanticAttribute(server, (long) uri.getPort(), SERVER_PORT);
assertTarget(server, uri.getPath() + "hello", null);
assertEquals(client.getTraceId(), server.getTraceId());
}
@Test
void path() throws Exception {
HttpResponse<Buffer> response = WebClient.create(vertx)
.get(uri.getPort(), uri.getHost(), "/hello/naruto")
.send()
.toCompletionStage().toCompletableFuture()
.get();
assertEquals(HTTP_OK, response.statusCode());
List<SpanData> spans = spanExporter.getFinishedSpanItems(2);
SpanData client = getSpanByKindAndParentId(spans, CLIENT, "0000000000000000");
assertEquals(CLIENT, client.getKind());
assertEquals("GET", client.getName());
assertSemanticAttribute(client, (long) HTTP_OK, HTTP_RESPONSE_STATUS_CODE);
assertSemanticAttribute(client, HttpMethod.GET, HTTP_REQUEST_METHOD);
assertSemanticAttribute(client, uri.toString() + "hello/naruto", URL_FULL);
assertSemanticAttribute(client, uri.getHost(), SERVER_ADDRESS);
assertSemanticAttribute(client, (long) uri.getPort(), SERVER_PORT);
SpanData server = getSpanByKindAndParentId(spans, SERVER, client.getSpanId());
assertEquals(SERVER, server.getKind());
assertEquals("GET /hello/:name", server.getName());
assertSemanticAttribute(server, (long) HTTP_OK, HTTP_RESPONSE_STATUS_CODE);
assertSemanticAttribute(server, HttpMethod.GET, HTTP_REQUEST_METHOD);
assertEquals("/hello/:name", server.getAttributes().get(HTTP_ROUTE));
assertSemanticAttribute(server, uri.getHost(), SERVER_ADDRESS);
assertSemanticAttribute(server, (long) uri.getPort(), SERVER_PORT);
assertTarget(server, uri.getPath() + "hello/naruto", null);
assertEquals(client.getTraceId(), server.getTraceId());
}
@Test
void query() throws Exception {
HttpResponse<Buffer> response = WebClient.create(vertx)
.get(uri.getPort(), uri.getHost(), "/hello?name=foo")
.send()
.toCompletionStage().toCompletableFuture()
.get();
assertEquals(HTTP_OK, response.statusCode());
List<SpanData> spans = spanExporter.getFinishedSpanItems(2);
SpanData client = getSpanByKindAndParentId(spans, CLIENT, "0000000000000000");
assertEquals(CLIENT, client.getKind());
assertEquals("GET", client.getName());
assertSemanticAttribute(client, (long) HTTP_OK, HTTP_RESPONSE_STATUS_CODE);
assertSemanticAttribute(client, HttpMethod.GET, HTTP_REQUEST_METHOD);
assertSemanticAttribute(client, uri.toString() + "hello?name=foo", URL_FULL);
assertSemanticAttribute(client, uri.getHost(), SERVER_ADDRESS);
assertSemanticAttribute(client, (long) uri.getPort(), SERVER_PORT);
SpanData server = getSpanByKindAndParentId(spans, SERVER, client.getSpanId());
assertEquals(SERVER, server.getKind());
assertEquals("GET /hello", server.getName());
assertSemanticAttribute(server, (long) HTTP_OK, HTTP_RESPONSE_STATUS_CODE);
assertSemanticAttribute(server, HttpMethod.GET, HTTP_REQUEST_METHOD);
assertEquals("/hello", server.getAttributes().get(HTTP_ROUTE));
assertSemanticAttribute(server, uri.getHost(), SERVER_ADDRESS);
assertSemanticAttribute(server, (long) uri.getPort(), SERVER_PORT);
assertTarget(server, uri.getPath() + "hello", "name=foo");
assertEquals(client.getTraceId(), server.getTraceId());
}
@Test
void multiple() throws Exception {
HttpResponse<Buffer> response = WebClient.create(vertx)
.get(uri.getPort(), uri.getHost(), "/multiple")
.putHeader("host", uri.getHost())
.putHeader("port", uri.getPort() + "")
.send()
.toCompletionStage().toCompletableFuture()
.get();
assertEquals(HTTP_OK, response.statusCode());
List<SpanData> spans = spanExporter.getFinishedSpanItems(6);
assertEquals(1, spans.stream().map(SpanData::getTraceId).collect(toSet()).size());
}
@ApplicationScoped
public static | VertxClientOpenTelemetryTest |
java | elastic__elasticsearch | libs/lz4/src/test/java/org/elasticsearch/lz4/AbstractLZ4TestCase.java | {
"start": 4700,
"end": 7017
} | class ____ extends ByteArrayTesterBase implements Tester<byte[]> {
@Override
public int compress(LZ4Compressor compressor, byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
return compressor.compress(src, srcOff, srcLen, dest, destOff, maxDestLen);
}
@Override
public int decompress(LZ4FastDecompressor decompressor, byte[] src, int srcOff, byte[] dest, int destOff, int destLen) {
return decompressor.decompress(src, srcOff, dest, destOff, destLen);
}
@Override
public int decompress(
LZ4SafeDecompressor decompressor,
byte[] src,
int srcOff,
int srcLen,
byte[] dest,
int destOff,
int maxDestLen
) {
return decompressor.decompress(src, srcOff, srcLen, dest, destOff, maxDestLen);
}
}
// Modified to remove redundant modifiers
Tester<byte[]> BYTE_ARRAY = new ByteArrayTester();
// Modified to remove redundant modifiers
Tester<byte[]> BYTE_ARRAY_WITH_LENGTH = new ByteArrayTester() {
@Override
public int compress(LZ4Compressor compressor, byte[] src, int srcOff, int srcLen, byte[] dest, int destOff, int maxDestLen) {
return new LZ4CompressorWithLength(compressor).compress(src, srcOff, srcLen, dest, destOff, maxDestLen);
}
@Override
public int decompress(LZ4FastDecompressor decompressor, byte[] src, int srcOff, byte[] dest, int destOff, int destLen) {
return new LZ4DecompressorWithLength(decompressor).decompress(src, srcOff, dest, destOff);
}
@Override
public int decompress(
LZ4SafeDecompressor decompressor,
byte[] src,
int srcOff,
int srcLen,
byte[] dest,
int destOff,
int maxDestLen
) {
return new LZ4DecompressorWithLength(decompressor).decompress(src, srcOff, srcLen, dest, destOff);
}
};
// Modified to remove redundant modifiers
| ByteArrayTester |
java | spring-projects__spring-security | oauth2/oauth2-core/src/main/java/org/springframework/security/oauth2/core/oidc/IdTokenClaimAccessor.java | {
"start": 1433,
"end": 4393
} | interface ____ extends StandardClaimAccessor {
/**
* Returns the Issuer identifier {@code (iss)}.
* @return the Issuer identifier
*/
default URL getIssuer() {
return this.getClaimAsURL(IdTokenClaimNames.ISS);
}
/**
* Returns the Subject identifier {@code (sub)}.
* @return the Subject identifier
*/
@Override
default String getSubject() {
return this.getClaimAsString(IdTokenClaimNames.SUB);
}
/**
* Returns the Audience(s) {@code (aud)} that this ID Token is intended for.
* @return the Audience(s) that this ID Token is intended for
*/
default List<String> getAudience() {
return this.getClaimAsStringList(IdTokenClaimNames.AUD);
}
/**
* Returns the Expiration time {@code (exp)} on or after which the ID Token MUST NOT
* be accepted.
* @return the Expiration time on or after which the ID Token MUST NOT be accepted
*/
default Instant getExpiresAt() {
return this.getClaimAsInstant(IdTokenClaimNames.EXP);
}
/**
* Returns the time at which the ID Token was issued {@code (iat)}.
* @return the time at which the ID Token was issued
*/
default Instant getIssuedAt() {
return this.getClaimAsInstant(IdTokenClaimNames.IAT);
}
/**
* Returns the time when the End-User authentication occurred {@code (auth_time)}.
* @return the time when the End-User authentication occurred
*/
default Instant getAuthenticatedAt() {
return this.getClaimAsInstant(IdTokenClaimNames.AUTH_TIME);
}
/**
* Returns a {@code String} value {@code (nonce)} used to associate a Client session
* with an ID Token, and to mitigate replay attacks.
* @return the nonce used to associate a Client session with an ID Token
*/
default String getNonce() {
return this.getClaimAsString(IdTokenClaimNames.NONCE);
}
/**
* Returns the Authentication Context Class Reference {@code (acr)}.
* @return the Authentication Context Class Reference
*/
default String getAuthenticationContextClass() {
return this.getClaimAsString(IdTokenClaimNames.ACR);
}
/**
* Returns the Authentication Methods References {@code (amr)}.
* @return the Authentication Methods References
*/
default List<String> getAuthenticationMethods() {
return this.getClaimAsStringList(IdTokenClaimNames.AMR);
}
/**
* Returns the Authorized party {@code (azp)} to which the ID Token was issued.
* @return the Authorized party to which the ID Token was issued
*/
default String getAuthorizedParty() {
return this.getClaimAsString(IdTokenClaimNames.AZP);
}
/**
* Returns the Access Token hash value {@code (at_hash)}.
* @return the Access Token hash value
*/
default String getAccessTokenHash() {
return this.getClaimAsString(IdTokenClaimNames.AT_HASH);
}
/**
* Returns the Authorization Code hash value {@code (c_hash)}.
* @return the Authorization Code hash value
*/
default String getAuthorizationCodeHash() {
return this.getClaimAsString(IdTokenClaimNames.C_HASH);
}
}
| IdTokenClaimAccessor |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/pool/DruidPooledCallableStatementTest.java | {
"start": 568,
"end": 4953
} | class ____ extends TestCase {
private DruidDataSource dataSource;
private boolean throwError = true;
protected void setUp() throws Exception {
dataSource = new DruidDataSource();
dataSource.setUrl("jdbc:mock:xxx");
dataSource.setTestOnBorrow(false);
dataSource.setInitialSize(1);
dataSource.getProxyFilters().add(new FilterAdapter() {
public boolean callableStatement_wasNull(FilterChain chain, CallableStatementProxy statement)
throws SQLException {
if (throwError) {
throw new SQLException();
} else {
return chain.callableStatement_wasNull(statement);
}
}
});
}
protected void tearDown() throws Exception {
dataSource.close();
}
public void test_wasNull_noerror() throws Exception {
Connection conn = dataSource.getConnection();
CallableStatement stmt = conn.prepareCall("select 1");
stmt.execute();
throwError = false;
stmt.wasNull();
assertEquals(0, dataSource.getErrorCount());
stmt.close();
conn.close();
assertEquals(1, dataSource.getPoolingCount());
}
public void test_wasNull_error() throws Exception {
Connection conn = dataSource.getConnection();
CallableStatement stmt = conn.prepareCall("select 1");
stmt.execute();
assertEquals(0, dataSource.getErrorCount());
Exception error = null;
try {
stmt.wasNull();
} catch (Exception e) {
error = e;
}
assertNotNull(error);
assertEquals(1, dataSource.getErrorCount());
stmt.close();
conn.close();
assertEquals(1, dataSource.getPoolingCount());
}
public void test_getObject() throws Exception {
Connection conn = dataSource.getConnection();
DruidPooledCallableStatement stmt = (DruidPooledCallableStatement) conn.prepareCall("select 1");
stmt.execute();
assertEquals(0, dataSource.getErrorCount());
Exception error = null;
try {
stmt.getObject(1, String.class);
} catch (SQLFeatureNotSupportedException e) {
error = e;
}
assertNotNull(error);
assertEquals(0, dataSource.getErrorCount());
stmt.close();
conn.close();
assertEquals(1, dataSource.getPoolingCount());
}
public void test_getObject_1() throws Exception {
Connection conn = dataSource.getConnection();
DruidPooledCallableStatement stmt = (DruidPooledCallableStatement) conn.prepareCall("select 1");
stmt.execute();
assertEquals(0, dataSource.getErrorCount());
Exception error = null;
try {
stmt.getObject("1", String.class);
} catch (SQLFeatureNotSupportedException e) {
error = e;
}
assertNotNull(error);
assertEquals(0, dataSource.getErrorCount());
stmt.close();
conn.close();
assertEquals(1, dataSource.getPoolingCount());
}
public void test_wrap() throws Exception {
Connection conn = dataSource.getConnection();
CallableStatement stmt = conn.prepareCall("select 1");
assertNotNull(stmt.unwrap(CallableStatement.class));
assertEquals(MockCallableStatement.class, stmt.unwrap(CallableStatement.class).getClass());
stmt.close();
conn.close();
}
public void test_wrap_1() throws Exception {
Connection conn = dataSource.getConnection();
CallableStatement stmt = conn.prepareCall("select 1");
assertNotNull(stmt.unwrap(PreparedStatement.class));
assertEquals(MockCallableStatement.class, stmt.unwrap(CallableStatement.class).getClass());
stmt.close();
conn.close();
}
public void test_wrap_2() throws Exception {
dataSource.getProxyFilters().clear();
Connection conn = dataSource.getConnection();
CallableStatement stmt = conn.prepareCall("select 1");
assertNotNull(stmt.unwrap(PreparedStatement.class));
assertEquals(MockCallableStatement.class, stmt.unwrap(CallableStatement.class).getClass());
stmt.close();
conn.close();
}
}
| DruidPooledCallableStatementTest |
java | google__error-prone | check_api/src/main/java/com/google/errorprone/util/ASTHelpers.java | {
"start": 65272,
"end": 66360
} | enum ____, being contained within an expression
* which is ultimately assigned to a static field.
*
* <p>This is very much a heuristic, and not fool-proof.
*/
public static boolean isInStaticInitializer(VisitorState state) {
return stream(state.getPath())
.anyMatch(
tree ->
(tree instanceof VariableTree && variableIsStaticFinal((VarSymbol) getSymbol(tree)))
|| (tree instanceof AssignmentTree assignmentTree
&& getSymbol(assignmentTree.getVariable()) instanceof VarSymbol varSymbol
&& variableIsStaticFinal(varSymbol)));
}
/**
* @deprecated use TargetType.targetType directly
*/
@Deprecated
public static @Nullable TargetType targetType(VisitorState state) {
return TargetType.targetType(state);
}
/**
* Whether the variable is (or should be regarded as) static final.
*
* <p>We regard instance fields within enums as "static final", as they will only have a finite
* number of instances tied to an (effectively) static final | fields |
java | apache__thrift | lib/java/src/main/java/org/apache/thrift/transport/sasl/FrameHeaderReader.java | {
"start": 1065,
"end": 2085
} | interface ____ {
/**
* As the thrift sasl specification states, all sasl messages (both for negotiatiing and for
* sending data) should have a header to indicate the size of the payload.
*
* @return size of the payload.
*/
int payloadSize();
/**
* @return The received bytes for the header.
* @throws IllegalStateException if isComplete returns false.
*/
byte[] toBytes();
/**
* @return true if this header has all its fields set.
*/
boolean isComplete();
/** Clear the header and make it available to read a new header. */
void clear();
/**
* (Nonblocking) Read fields from underlying transport layer.
*
* @param transport underlying transport.
* @return true if header is complete after read.
* @throws TSaslNegotiationException if fail to read a valid header of a sasl negotiation message.
* @throws TTransportException if io error.
*/
boolean read(TTransport transport) throws TSaslNegotiationException, TTransportException;
}
| FrameHeaderReader |
java | quarkusio__quarkus | integration-tests/reactive-messaging-rabbitmq-dyn/src/test/java/io/quarkus/it/rabbitmq/RabbitMQConnectorDynCredsTest.java | {
"start": 913,
"end": 2310
} | class ____ implements QuarkusTestResourceLifecycleManager {
RabbitMQContainer rabbit;
@Override
public Map<String, String> start() {
String username = "tester";
String password = RandomStringUtils.insecure().next(10);
rabbit = new RabbitMQContainer(DockerImageName.parse("rabbitmq:3.12-management"))
.withNetwork(Network.SHARED)
.withNetworkAliases("rabbitmq")
.withUser(username, password)
.withPermission("/", username, ".*", ".*", ".*");
rabbit.start();
return Map.of(
"rabbitmq-host", rabbit.getHost(),
"rabbitmq-port", rabbit.getAmqpPort().toString(),
"rabbitmq-username", "invalid",
"rabbitmq-password", "invalid",
"test-creds-provider.username", username,
"test-creds-provider.password", password);
}
@Override
public void stop() {
rabbit.stop();
}
}
protected static final TypeRef<List<Person>> TYPE_REF = new TypeRef<List<Person>>() {
};
@Test
public void test() {
await().atMost(30, SECONDS)
.untilAsserted(() -> Assertions.assertEquals(6, get("/rabbitmq/people").as(TYPE_REF).size()));
}
}
| RabbitMQResource |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java | {
"start": 2552,
"end": 21013
} | class ____ extends FileSystem {
protected FileSystem fs;
protected String swapScheme;
/*
* so that extending classes can define it
*/
public FilterFileSystem() {
}
public FilterFileSystem(FileSystem fs) {
this.fs = fs;
this.statistics = fs.statistics;
}
/**
* Get the raw file system
* @return FileSystem being filtered
*/
public FileSystem getRawFileSystem() {
return fs;
}
/** Called after a new FileSystem instance is constructed.
* @param name a uri whose authority section names the host, port, etc.
* for this FileSystem
* @param conf the configuration
*/
@Override
public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf);
// this is less than ideal, but existing filesystems sometimes neglect
// to initialize the embedded filesystem
if (fs.getConf() == null) {
fs.initialize(name, conf);
}
String scheme = name.getScheme();
if (!scheme.equals(fs.getUri().getScheme())) {
swapScheme = scheme;
}
}
/** Returns a URI whose scheme and authority identify this FileSystem.*/
@Override
public URI getUri() {
return fs.getUri();
}
@Override
protected URI getCanonicalUri() {
return fs.getCanonicalUri();
}
@Override
protected URI canonicalizeUri(URI uri) {
return fs.canonicalizeUri(uri);
}
/** Make sure that a path specifies a FileSystem. */
@Override
public Path makeQualified(Path path) {
Path fqPath = fs.makeQualified(path);
// swap in our scheme if the filtered fs is using a different scheme
if (swapScheme != null) {
try {
// NOTE: should deal with authority, but too much other stuff is broken
fqPath = new Path(
new URI(swapScheme, fqPath.toUri().getSchemeSpecificPart(), null)
);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
}
return fqPath;
}
///////////////////////////////////////////////////////////////
// FileSystem
///////////////////////////////////////////////////////////////
/** Check that a Path belongs to this FileSystem. */
@Override
protected void checkPath(Path path) {
fs.checkPath(path);
}
@Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start,
long len) throws IOException {
return fs.getFileBlockLocations(file, start, len);
}
@Override
public Path resolvePath(final Path p) throws IOException {
return fs.resolvePath(p);
}
/**
* Opens an FSDataInputStream at the indicated Path.
* @param f the file name to open
* @param bufferSize the size of the buffer to be used.
*/
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return fs.open(f, bufferSize);
}
@Override
public FSDataInputStream open(PathHandle fd, int bufferSize)
throws IOException {
return fs.open(fd, bufferSize);
}
@Override
protected PathHandle createPathHandle(FileStatus stat, HandleOpt... opts) {
return fs.getPathHandle(stat, opts);
}
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
return fs.append(f, bufferSize, progress);
}
@Override
public void concat(Path f, Path[] psrcs) throws IOException {
fs.concat(f, psrcs);
}
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return fs.create(f, permission,
overwrite, bufferSize, replication, blockSize, progress);
}
@Override
public FSDataOutputStream create(Path f,
FsPermission permission,
EnumSet<CreateFlag> flags,
int bufferSize,
short replication,
long blockSize,
Progressable progress,
ChecksumOpt checksumOpt) throws IOException {
return fs.create(f, permission,
flags, bufferSize, replication, blockSize, progress, checksumOpt);
}
@Override
protected RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f,
final PathFilter filter)
throws FileNotFoundException, IOException {
return fs.listLocatedStatus(f, filter);
}
@Override
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return fs.createNonRecursive(f, permission, flags, bufferSize, replication, blockSize,
progress);
}
/**
* Set replication for an existing file.
*
* @param src file name
* @param replication new replication
* @throws IOException raised on errors performing I/O.
* @return true if successful;
* false if file does not exist or is a directory
*/
@Override
public boolean setReplication(Path src, short replication) throws IOException {
return fs.setReplication(src, replication);
}
/**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
*/
@Override
public boolean rename(Path src, Path dst) throws IOException {
return fs.rename(src, dst);
}
@Override
protected void rename(Path src, Path dst, Rename... options)
throws IOException {
fs.rename(src, dst, options);
}
@Override
public boolean truncate(Path f, final long newLength) throws IOException {
return fs.truncate(f, newLength);
}
/** Delete a file */
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
return fs.delete(f, recursive);
}
/** List files in a directory. */
@Override
public FileStatus[] listStatus(Path f) throws IOException {
return fs.listStatus(f);
}
@Override
public RemoteIterator<Path> listCorruptFileBlocks(Path path)
throws IOException {
return fs.listCorruptFileBlocks(path);
}
/** List files and its block locations in a directory. */
@Override
public RemoteIterator<LocatedFileStatus> listLocatedStatus(Path f)
throws IOException {
return fs.listLocatedStatus(f);
}
/** Return a remote iterator for listing in a directory */
@Override
public RemoteIterator<FileStatus> listStatusIterator(Path f)
throws IOException {
return fs.listStatusIterator(f);
}
@Override
public Path getHomeDirectory() {
return fs.getHomeDirectory();
}
/**
* Set the current working directory for the given file system. All relative
* paths will be resolved relative to it.
*
* @param newDir new dir.
*/
@Override
public void setWorkingDirectory(Path newDir) {
fs.setWorkingDirectory(newDir);
}
/**
* Get the current working directory for the given file system
*
* @return the directory pathname
*/
@Override
public Path getWorkingDirectory() {
return fs.getWorkingDirectory();
}
@Override
protected Path getInitialWorkingDirectory() {
return fs.getInitialWorkingDirectory();
}
@Override
public FsStatus getStatus(Path p) throws IOException {
return fs.getStatus(p);
}
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
return fs.mkdirs(f, permission);
}
@Override
public boolean mkdirs(Path f) throws IOException {
return fs.mkdirs(f);
}
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
* delSrc indicates if the source should be removed
*/
@Override
public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
fs.copyFromLocalFile(delSrc, src, dst);
}
/**
* The src files are on the local disk. Add it to FS at
* the given dst name.
* delSrc indicates if the source should be removed
*/
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite,
Path[] srcs, Path dst)
throws IOException {
fs.copyFromLocalFile(delSrc, overwrite, srcs, dst);
}
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
* delSrc indicates if the source should be removed
*/
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite,
Path src, Path dst)
throws IOException {
fs.copyFromLocalFile(delSrc, overwrite, src, dst);
}
/**
* The src file is under FS, and the dst is on the local disk.
* Copy it from FS control to the local dst name.
* delSrc indicates if the src will be removed or not.
*/
@Override
public void copyToLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
fs.copyToLocalFile(delSrc, src, dst);
}
/**
* Returns a local File that the user can write output to. The caller
* provides both the eventual FS target name and the local working
* file. If the FS is local, we write directly into the target. If
* the FS is remote, we write into the tmp local area.
*/
@Override
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
return fs.startLocalOutput(fsOutputFile, tmpLocalFile);
}
/**
* Called when we're all done writing to the target. A local FS will
* do nothing, because we've written to exactly the right place. A remote
* FS will copy the contents of tmpLocalFile to the correct target at
* fsOutputFile.
*/
@Override
public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
fs.completeLocalOutput(fsOutputFile, tmpLocalFile);
}
/** Return the total size of all files in the filesystem.*/
@Override
public long getUsed() throws IOException{
return fs.getUsed();
}
/** Return the total size of all files from a specified path.*/
@Override
public long getUsed(Path path) throws IOException {
return fs.getUsed(path);
}
@Override
public long getDefaultBlockSize() {
return fs.getDefaultBlockSize();
}
@Override
public short getDefaultReplication() {
return fs.getDefaultReplication();
}
@Override
public FsServerDefaults getServerDefaults() throws IOException {
return fs.getServerDefaults();
}
// path variants delegate to underlying filesystem
@Override
public long getDefaultBlockSize(Path f) {
return fs.getDefaultBlockSize(f);
}
@Override
public short getDefaultReplication(Path f) {
return fs.getDefaultReplication(f);
}
@Override
public FsServerDefaults getServerDefaults(Path f) throws IOException {
return fs.getServerDefaults(f);
}
/**
* Get file status.
*/
@Override
public FileStatus getFileStatus(Path f) throws IOException {
return fs.getFileStatus(f);
}
@Override
public void msync() throws IOException, UnsupportedOperationException {
fs.msync();
}
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, IOException {
fs.access(path, mode);
}
public void createSymlink(final Path target, final Path link,
final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException,
IOException {
fs.createSymlink(target, link, createParent);
}
public FileStatus getFileLinkStatus(final Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
return fs.getFileLinkStatus(f);
}
public boolean supportsSymlinks() {
return fs.supportsSymlinks();
}
public Path getLinkTarget(Path f) throws IOException {
return fs.getLinkTarget(f);
}
protected Path resolveLink(Path f) throws IOException {
return fs.resolveLink(f);
}
@Override
public FileChecksum getFileChecksum(Path f) throws IOException {
return fs.getFileChecksum(f);
}
@Override
public FileChecksum getFileChecksum(Path f, long length) throws IOException {
return fs.getFileChecksum(f, length);
}
@Override
public void setVerifyChecksum(boolean verifyChecksum) {
fs.setVerifyChecksum(verifyChecksum);
}
@Override
public void setWriteChecksum(boolean writeChecksum) {
fs.setWriteChecksum(writeChecksum);
}
@Override
public Configuration getConf() {
return fs.getConf();
}
@Override
public void close() throws IOException {
super.close();
fs.close();
}
@Override
public void setOwner(Path p, String username, String groupname
) throws IOException {
fs.setOwner(p, username, groupname);
}
@Override
public void setTimes(Path p, long mtime, long atime
) throws IOException {
fs.setTimes(p, mtime, atime);
}
@Override
public void setPermission(Path p, FsPermission permission
) throws IOException {
fs.setPermission(p, permission);
}
@Override
protected FSDataOutputStream primitiveCreate(Path f,
FsPermission absolutePermission, EnumSet<CreateFlag> flag,
int bufferSize, short replication, long blockSize,
Progressable progress, ChecksumOpt checksumOpt)
throws IOException {
return fs.primitiveCreate(f, absolutePermission, flag,
bufferSize, replication, blockSize, progress, checksumOpt);
}
@Override
@SuppressWarnings("deprecation")
protected boolean primitiveMkdir(Path f, FsPermission abdolutePermission)
throws IOException {
return fs.primitiveMkdir(f, abdolutePermission);
}
@Override // FileSystem
public FileSystem[] getChildFileSystems() {
return new FileSystem[]{fs};
}
@Override // FileSystem
public Path createSnapshot(Path path, String snapshotName)
throws IOException {
return fs.createSnapshot(path, snapshotName);
}
@Override // FileSystem
public void renameSnapshot(Path path, String snapshotOldName,
String snapshotNewName) throws IOException {
fs.renameSnapshot(path, snapshotOldName, snapshotNewName);
}
@Override // FileSystem
public void deleteSnapshot(Path path, String snapshotName)
throws IOException {
fs.deleteSnapshot(path, snapshotName);
}
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
fs.modifyAclEntries(path, aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
fs.removeAclEntries(path, aclSpec);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
fs.removeDefaultAcl(path);
}
@Override
public void removeAcl(Path path) throws IOException {
fs.removeAcl(path);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
fs.setAcl(path, aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
return fs.getAclStatus(path);
}
@Override
public void setXAttr(Path path, String name, byte[] value)
throws IOException {
fs.setXAttr(path, name, value);
}
@Override
public void setXAttr(Path path, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException {
fs.setXAttr(path, name, value, flag);
}
@Override
public byte[] getXAttr(Path path, String name) throws IOException {
return fs.getXAttr(path, name);
}
@Override
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
return fs.getXAttrs(path);
}
@Override
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
throws IOException {
return fs.getXAttrs(path, names);
}
@Override
public List<String> listXAttrs(Path path) throws IOException {
return fs.listXAttrs(path);
}
@Override
public void removeXAttr(Path path, String name) throws IOException {
fs.removeXAttr(path, name);
}
@Override
public void satisfyStoragePolicy(Path src) throws IOException {
fs.satisfyStoragePolicy(src);
}
@Override
public void setStoragePolicy(Path src, String policyName)
throws IOException {
fs.setStoragePolicy(src, policyName);
}
@Override
public void unsetStoragePolicy(Path src) throws IOException {
fs.unsetStoragePolicy(src);
}
@Override
public BlockStoragePolicySpi getStoragePolicy(final Path src)
throws IOException {
return fs.getStoragePolicy(src);
}
@Override
public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
throws IOException {
return fs.getAllStoragePolicies();
}
@Override
public Path getTrashRoot(Path path) {
return fs.getTrashRoot(path);
}
@Override
public Collection<FileStatus> getTrashRoots(boolean allUsers) {
return fs.getTrashRoots(allUsers);
}
@Override
public FSDataOutputStreamBuilder createFile(Path path) {
return fs.createFile(path);
}
@Override
public FSDataOutputStreamBuilder appendFile(Path path) {
return fs.appendFile(path);
}
@Override
public FutureDataInputStreamBuilder openFile(final Path path)
throws IOException, UnsupportedOperationException {
return fs.openFile(path);
}
@Override
public FutureDataInputStreamBuilder openFile(final PathHandle pathHandle)
throws IOException, UnsupportedOperationException {
return fs.openFile(pathHandle);
}
@Override
protected CompletableFuture<FSDataInputStream> openFileWithOptions(
final Path path,
final OpenFileParameters parameters) throws IOException {
return fs.openFileWithOptions(path, parameters);
}
@Override
protected CompletableFuture<FSDataInputStream> openFileWithOptions(
final PathHandle pathHandle,
final OpenFileParameters parameters) throws IOException {
return fs.openFileWithOptions(pathHandle, parameters);
}
@Override
public Path getEnclosingRoot(Path path) throws IOException {
return fs.getEnclosingRoot(path);
}
@Override
public boolean hasPathCapability(final Path path, final String capability)
throws IOException {
switch (validatePathCapabilityArgs(makeQualified(path), capability)) {
case CommonPathCapabilities.FS_MULTIPART_UPLOADER:
case CommonPathCapabilities.FS_EXPERIMENTAL_BATCH_LISTING:
// operations known to be unsupported, irrespective of what
// the wrapped | FilterFileSystem |
java | apache__spark | sql/core/src/test/java/test/org/apache/spark/sql/connector/catalog/functions/JavaStrLen.java | {
"start": 2533,
"end": 2743
} | class ____ extends JavaStrLenBase {
@Override
public Integer produceResult(InternalRow input) {
String str = input.getString(0);
return str.length();
}
}
public static | JavaStrLenDefault |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/createTable/MySqlCreateTableTest145_tail_hint.java | {
"start": 458,
"end": 7837
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "CREATE TABLE `log_fake` (\n" +
" `id` varchar(37) NOT NULL COMMENT 'uuid',\n" +
" `merchant_id` varchar(37) DEFAULT NULL COMMENT '商户id',\n" +
" `type` int(11) NOT NULL COMMENT 'type字段决定payload如何解析',\n" +
" `payload` blob COMMENT 'payload存放以JSON格式编码的系统事件,例如订单成功支付。',\n" +
" `processed` tinyint(1) unsigned NOT NULL DEFAULT '0' COMMENT '是否已经处理(下游业务逻辑完成运算并更新数据或者同步到持久化的队列)',\n" +
" `partition` int(10) unsigned NOT NULL COMMENT '分区ID(0-999)。多线程方式处理日志记录的时候,防止同一条记录被重复处理。',\n" +
" `action_id` varchar(37) DEFAULT NULL COMMENT '外部业务id 比如提现记录id',\n" +
" `ctime` bigint(20) DEFAULT NULL,\n" +
" `mtime` bigint(20) DEFAULT NULL,\n" +
" `version` bigint(20) unsigned NOT NULL,\n" +
" `deleted` tinyint(1) NOT NULL DEFAULT '0',\n" +
" KEY `id` (`id`),\n" +
" KEY `log_processed` (`processed`),\n" +
" KEY `log_ctime` (`ctime`),\n" +
" KEY `log_merchant_id_ctime` (`merchant_id`,`ctime`),\n" +
" KEY `log_action_id` (`action_id`)\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='日志记录用于驱动下游逻辑(余额更新)和外部系统的数据同步(积分)。'\n" +
"/*!50100 PARTITION BY RANGE (`ctime`)\n" +
"(PARTITION p201804 VALUES LESS THAN (1525104000000) ENGINE = InnoDB,\n" +
" PARTITION p201805 VALUES LESS THAN (1527782400000) ENGINE = InnoDB,\n" +
" PARTITION p201806 VALUES LESS THAN (1530374400000) ENGINE = InnoDB,\n" +
" PARTITION p201807 VALUES LESS THAN (1533052800000) ENGINE = InnoDB,\n" +
" PARTITION p201808 VALUES LESS THAN (1535731200000) ENGINE = InnoDB,\n" +
" PARTITION p201809 VALUES LESS THAN (1538323200000) ENGINE = InnoDB,\n" +
" PARTITION p201810 VALUES LESS THAN (1541001600000) ENGINE = InnoDB,\n" +
" PARTITION p201811 VALUES LESS THAN (1543593600000) ENGINE = InnoDB,\n" +
" PARTITION p201812 VALUES LESS THAN (1546272000000) ENGINE = InnoDB,\n" +
" PARTITION pmax VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */ dbpartition by hash(`merchant_id`)";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
MySqlCreateTableStatement stmt = (MySqlCreateTableStatement) statementList.get(0);
assertEquals(1, statementList.size());
assertEquals("CREATE TABLE `log_fake` (\n" +
"\t`id` varchar(37) NOT NULL COMMENT 'uuid',\n" +
"\t`merchant_id` varchar(37) DEFAULT NULL COMMENT '商户id',\n" +
"\t`type` int(11) NOT NULL COMMENT 'type字段决定payload如何解析',\n" +
"\t`payload` blob COMMENT 'payload存放以JSON格式编码的系统事件,例如订单成功支付。',\n" +
"\t`processed` tinyint(1) UNSIGNED NOT NULL DEFAULT '0' COMMENT '是否已经处理(下游业务逻辑完成运算并更新数据或者同步到持久化的队列)',\n" +
"\t`partition` int(10) UNSIGNED NOT NULL COMMENT '分区ID(0-999)。多线程方式处理日志记录的时候,防止同一条记录被重复处理。',\n" +
"\t`action_id` varchar(37) DEFAULT NULL COMMENT '外部业务id 比如提现记录id',\n" +
"\t`ctime` bigint(20) DEFAULT NULL,\n" +
"\t`mtime` bigint(20) DEFAULT NULL,\n" +
"\t`version` bigint(20) UNSIGNED NOT NULL,\n" +
"\t`deleted` tinyint(1) NOT NULL DEFAULT '0',\n" +
"\tKEY `id` (`id`),\n" +
"\tKEY `log_processed` (`processed`),\n" +
"\tKEY `log_ctime` (`ctime`),\n" +
"\tKEY `log_merchant_id_ctime` (`merchant_id`, `ctime`),\n" +
"\tKEY `log_action_id` (`action_id`)\n" +
") ENGINE = InnoDB CHARSET = utf8 COMMENT '日志记录用于驱动下游逻辑(余额更新)和外部系统的数据同步(积分)。'\n" +
"DBPARTITION BY hash(`merchant_id`) /*!50100 PARTITION BY RANGE (`ctime`)\n" +
"(PARTITION p201804 VALUES LESS THAN (1525104000000) ENGINE = InnoDB,\n" +
" PARTITION p201805 VALUES LESS THAN (1527782400000) ENGINE = InnoDB,\n" +
" PARTITION p201806 VALUES LESS THAN (1530374400000) ENGINE = InnoDB,\n" +
" PARTITION p201807 VALUES LESS THAN (1533052800000) ENGINE = InnoDB,\n" +
" PARTITION p201808 VALUES LESS THAN (1535731200000) ENGINE = InnoDB,\n" +
" PARTITION p201809 VALUES LESS THAN (1538323200000) ENGINE = InnoDB,\n" +
" PARTITION p201810 VALUES LESS THAN (1541001600000) ENGINE = InnoDB,\n" +
" PARTITION p201811 VALUES LESS THAN (1543593600000) ENGINE = InnoDB,\n" +
" PARTITION p201812 VALUES LESS THAN (1546272000000) ENGINE = InnoDB,\n" +
" PARTITION pmax VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */", stmt.toString());
assertEquals("create table `log_fake` (\n" +
"\t`id` varchar(37) not null comment 'uuid',\n" +
"\t`merchant_id` varchar(37) default null comment '商户id',\n" +
"\t`type` int(11) not null comment 'type字段决定payload如何解析',\n" +
"\t`payload` blob comment 'payload存放以JSON格式编码的系统事件,例如订单成功支付。',\n" +
"\t`processed` tinyint(1) unsigned not null default '0' comment '是否已经处理(下游业务逻辑完成运算并更新数据或者同步到持久化的队列)',\n" +
"\t`partition` int(10) unsigned not null comment '分区ID(0-999)。多线程方式处理日志记录的时候,防止同一条记录被重复处理。',\n" +
"\t`action_id` varchar(37) default null comment '外部业务id 比如提现记录id',\n" +
"\t`ctime` bigint(20) default null,\n" +
"\t`mtime` bigint(20) default null,\n" +
"\t`version` bigint(20) unsigned not null,\n" +
"\t`deleted` tinyint(1) not null default '0',\n" +
"\tkey `id` (`id`),\n" +
"\tkey `log_processed` (`processed`),\n" +
"\tkey `log_ctime` (`ctime`),\n" +
"\tkey `log_merchant_id_ctime` (`merchant_id`, `ctime`),\n" +
"\tkey `log_action_id` (`action_id`)\n" +
") engine = InnoDB charset = utf8 comment '日志记录用于驱动下游逻辑(余额更新)和外部系统的数据同步(积分)。'\n" +
"dbpartition by hash(`merchant_id`) /*!50100 PARTITION BY RANGE (`ctime`)\n" +
"(PARTITION p201804 VALUES LESS THAN (1525104000000) ENGINE = InnoDB,\n" +
" PARTITION p201805 VALUES LESS THAN (1527782400000) ENGINE = InnoDB,\n" +
" PARTITION p201806 VALUES LESS THAN (1530374400000) ENGINE = InnoDB,\n" +
" PARTITION p201807 VALUES LESS THAN (1533052800000) ENGINE = InnoDB,\n" +
" PARTITION p201808 VALUES LESS THAN (1535731200000) ENGINE = InnoDB,\n" +
" PARTITION p201809 VALUES LESS THAN (1538323200000) ENGINE = InnoDB,\n" +
" PARTITION p201810 VALUES LESS THAN (1541001600000) ENGINE = InnoDB,\n" +
" PARTITION p201811 VALUES LESS THAN (1543593600000) ENGINE = InnoDB,\n" +
" PARTITION p201812 VALUES LESS THAN (1546272000000) ENGINE = InnoDB,\n" +
" PARTITION pmax VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */", stmt.toLowerCaseString());
}
}
| MySqlCreateTableTest145_tail_hint |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/oceanbase/OceanbaseSelectTest.java | {
"start": 954,
"end": 2678
} | class ____ extends MysqlTest {
protected final DbType dbType = JdbcConstants.MYSQL;
public void test_0() throws Exception {
String sql = "SELECT EmpID, EmpName, MgrId, Level FROM emp START WITH MgrId IS NULL CONNECT BY PRIOR EmpId = MgrId";
List<SQLStatement> stmtList = SQLUtils.parseStatements(sql, dbType);
SQLStatement stmt = stmtList.get(0);
{
String result = SQLUtils.toSQLString(stmt, dbType);
assertEquals("SELECT EmpID, EmpName, MgrId, Level\n" +
"FROM emp\n" +
"START WITH MgrId IS NULL\n" +
"CONNECT BY PRIOR EmpId = MgrId", result);
}
{
String result = SQLUtils.toSQLString(stmt, dbType, SQLUtils.DEFAULT_LCASE_FORMAT_OPTION);
assertEquals("select EmpID, EmpName, MgrId, Level\n" +
"from emp\n" +
"start with MgrId is null\n" +
"connect by prior EmpId = MgrId", result);
}
assertEquals(1, stmtList.size());
SchemaStatVisitor visitor = SQLUtils.createSchemaStatVisitor(dbType);
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(3, visitor.getColumns().size());
assertEquals(3, visitor.getConditions().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("t_basic_store")));
}
}
| OceanbaseSelectTest |
java | spring-projects__spring-security | web/src/test/java/org/springframework/security/web/authentication/logout/ForwardLogoutSuccessHandlerTests.java | {
"start": 1179,
"end": 2213
} | class ____ {
@Test
public void invalidTargetUrl() {
String targetUrl = "not.valid";
assertThatIllegalArgumentException().isThrownBy(() -> new ForwardLogoutSuccessHandler(targetUrl))
.withMessage("'" + targetUrl + "' is not a valid target URL");
}
@Test
public void emptyTargetUrl() {
String targetUrl = " ";
assertThatIllegalArgumentException().isThrownBy(() -> new ForwardLogoutSuccessHandler(targetUrl))
.withMessage("'" + targetUrl + "' is not a valid target URL");
}
@Test
public void logoutSuccessIsHandled() throws Exception {
String targetUrl = "/login?logout";
ForwardLogoutSuccessHandler handler = new ForwardLogoutSuccessHandler(targetUrl);
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
Authentication authentication = mock(Authentication.class);
handler.onLogoutSuccess(request, response, authentication);
assertThat(response.getForwardedUrl()).isEqualTo(targetUrl);
}
}
| ForwardLogoutSuccessHandlerTests |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/functions/Functions.java | {
"start": 18487,
"end": 19083
} | class ____<T1, T2, T3, T4, T5, R> implements Function<Object[], R> {
private final Function5<T1, T2, T3, T4, T5, R> f;
Array5Func(Function5<T1, T2, T3, T4, T5, R> f) {
this.f = f;
}
@SuppressWarnings("unchecked")
@Override
public R apply(Object[] a) throws Throwable {
if (a.length != 5) {
throw new IllegalArgumentException("Array of size 5 expected but got " + a.length);
}
return f.apply((T1)a[0], (T2)a[1], (T3)a[2], (T4)a[3], (T5)a[4]);
}
}
static final | Array5Func |
java | mapstruct__mapstruct | integrationtest/src/test/resources/autoValueBuilderTest/src/main/java/org/mapstruct/itest/auto/value/PersonDto.java | {
"start": 196,
"end": 884
} | class ____ {
private String name;
private int age;
private AddressDto address;
public PersonDto() {
}
public PersonDto(String name, int age, AddressDto address) {
this.name = name;
this.age = age;
this.address = address;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getAge() {
return age;
}
public void setAge(int age) {
this.age = age;
}
public AddressDto getAddress() {
return address;
}
public void setAddress(AddressDto address) {
this.address = address;
}
}
| PersonDto |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/convert/BeanConversionsTest.java | {
"start": 1884,
"end": 2085
} | class ____ {
public int x, y;
public ConvertingBean(int v1, int v2) {
x = v1;
y = v2;
}
}
@JsonPropertyOrder({ "a", "b" })
public static | ConvertingBean |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_1596/domain/Item.java | {
"start": 214,
"end": 265
} | class ____ {
public abstract String getId();
}
| Item |
java | apache__kafka | tools/src/main/java/org/apache/kafka/tools/TransactionsCommand.java | {
"start": 14607,
"end": 17667
} | class ____ extends TransactionsCommand {
static final List<String> HEADERS = List.of(
"CoordinatorId",
"TransactionalId",
"ProducerId",
"ProducerEpoch",
"TransactionState",
"TransactionTimeoutMs",
"CurrentTransactionStartTimeMs",
"TransactionDurationMs",
"TopicPartitions"
);
DescribeTransactionsCommand(Time time) {
super(time);
}
@Override
public String name() {
return "describe";
}
@Override
public void addSubparser(Subparsers subparsers) {
Subparser subparser = subparsers.addParser(name())
.description("Describe the state of an active transactional-id.")
.help("describe the state of an active transactional-id");
subparser.addArgument("--transactional-id")
.help("transactional id")
.action(store())
.type(String.class)
.required(true);
}
@Override
public void execute(Admin admin, Namespace ns, PrintStream out) throws Exception {
String transactionalId = ns.getString("transactional_id");
final TransactionDescription result;
try {
result = admin.describeTransactions(Set.of(transactionalId))
.description(transactionalId)
.get();
} catch (ExecutionException e) {
printErrorAndExit("Failed to describe transaction state of " +
"transactional-id `" + transactionalId + "`", e.getCause());
return;
}
final String transactionDurationMsColumnValue;
final String transactionStartTimeMsColumnValue;
if (result.transactionStartTimeMs().isPresent()) {
long transactionStartTimeMs = result.transactionStartTimeMs().getAsLong();
transactionStartTimeMsColumnValue = String.valueOf(transactionStartTimeMs);
transactionDurationMsColumnValue = String.valueOf(time.milliseconds() - transactionStartTimeMs);
} else {
transactionStartTimeMsColumnValue = "None";
transactionDurationMsColumnValue = "None";
}
List<String> row = List.of(
String.valueOf(result.coordinatorId()),
transactionalId,
String.valueOf(result.producerId()),
String.valueOf(result.producerEpoch()),
result.state().toString(),
String.valueOf(result.transactionTimeoutMs()),
transactionStartTimeMsColumnValue,
transactionDurationMsColumnValue,
result.topicPartitions().stream().map(TopicPartition::toString).collect(Collectors.joining(","))
);
ToolsUtils.prettyPrintTable(HEADERS, List.of(row), out);
}
}
static | DescribeTransactionsCommand |
java | netty__netty | codec-classes-quic/src/main/java/io/netty/handler/codec/quic/QuicheRecvInfo.java | {
"start": 816,
"end": 2931
} | class ____ {
private QuicheRecvInfo() { }
/**
* Set the {@link InetSocketAddress} into the {@code quiche_recv_info} struct.
*
* <pre>
* typedef struct {
* struct sockaddr *from;
* socklen_t from_len;
* struct sockaddr *to;
* socklen_t to_len;
* } quiche_recv_info;
* </pre>
*
* @param memory the memory of {@code quiche_recv_info}.
* @param from the {@link InetSocketAddress} to write into {@code quiche_recv_info}.
* @param to the {@link InetSocketAddress} to write into {@code quiche_recv_info}.
*/
static void setRecvInfo(ByteBuffer memory, InetSocketAddress from, InetSocketAddress to) {
int position = memory.position();
try {
setAddress(memory, Quiche.SIZEOF_QUICHE_RECV_INFO, Quiche.QUICHE_RECV_INFO_OFFSETOF_FROM,
Quiche.QUICHE_RECV_INFO_OFFSETOF_FROM_LEN, from);
setAddress(memory, Quiche.SIZEOF_QUICHE_RECV_INFO + Quiche.SIZEOF_SOCKADDR_STORAGE,
Quiche.QUICHE_RECV_INFO_OFFSETOF_TO, Quiche.QUICHE_RECV_INFO_OFFSETOF_TO_LEN, to);
} finally {
memory.position(position);
}
}
private static void setAddress(ByteBuffer memory, int socketAddressOffset, int addrOffset, int lenOffset,
InetSocketAddress address) {
int position = memory.position();
try {
int sockaddrPosition = position + socketAddressOffset;
memory.position(sockaddrPosition);
long sockaddrMemoryAddress = Quiche.memoryAddressWithPosition(memory);
int len = SockaddrIn.setAddress(memory, address);
if (Quiche.SIZEOF_SIZE_T == 4) {
memory.putInt(position + addrOffset, (int) sockaddrMemoryAddress);
} else {
memory.putLong(position + addrOffset, sockaddrMemoryAddress);
}
Quiche.setPrimitiveValue(memory, position + lenOffset, Quiche.SIZEOF_SOCKLEN_T, len);
} finally {
memory.position(position);
}
}
}
| QuicheRecvInfo |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/util/diff/Delta.java | {
"start": 1121,
"end": 1418
} | class ____<T> {
public static final String DEFAULT_END = "]";
public static final String DEFAULT_START = "[";
/** The original chunk. */
private Chunk<T> original;
/** The revised chunk. */
private Chunk<T> revised;
/**
* Specifies the type of the delta.
*
*/
public | Delta |
java | apache__dubbo | dubbo-configcenter/dubbo-configcenter-nacos/src/test/java/org/apache/dubbo/configcenter/support/nacos/RetryTest.java | {
"start": 1511,
"end": 5114
} | class ____ {
private static ApplicationModel applicationModel = ApplicationModel.defaultModel();
@Test
void testRetryCreate() {
try (MockedStatic<NacosFactory> nacosFactoryMockedStatic = Mockito.mockStatic(NacosFactory.class)) {
AtomicInteger atomicInteger = new AtomicInteger(0);
ConfigService mock = new MockConfigService() {
@Override
public String getServerStatus() {
return atomicInteger.incrementAndGet() > 10 ? UP : DOWN;
}
};
nacosFactoryMockedStatic
.when(() -> NacosFactory.createConfigService((Properties) any()))
.thenReturn(mock);
URL url = URL.valueOf("nacos://127.0.0.1:8848")
.addParameter("nacos.retry", 5)
.addParameter("nacos.retry-wait", 10);
Assertions.assertThrows(
IllegalStateException.class, () -> new NacosDynamicConfiguration(url, applicationModel));
try {
new NacosDynamicConfiguration(url, applicationModel);
} catch (Throwable t) {
Assertions.fail(t);
}
}
}
@Test
void testDisable() {
try (MockedStatic<NacosFactory> nacosFactoryMockedStatic = Mockito.mockStatic(NacosFactory.class)) {
ConfigService mock = new MockConfigService() {
@Override
public String getServerStatus() {
return DOWN;
}
};
nacosFactoryMockedStatic
.when(() -> NacosFactory.createConfigService((Properties) any()))
.thenReturn(mock);
URL url = URL.valueOf("nacos://127.0.0.1:8848")
.addParameter("nacos.retry", 5)
.addParameter("nacos.retry-wait", 10)
.addParameter("nacos.check", "false");
try {
new NacosDynamicConfiguration(url, applicationModel);
} catch (Throwable t) {
Assertions.fail(t);
}
}
}
@Test
void testRequest() {
try (MockedStatic<NacosFactory> nacosFactoryMockedStatic = Mockito.mockStatic(NacosFactory.class)) {
AtomicInteger atomicInteger = new AtomicInteger(0);
ConfigService mock = new MockConfigService() {
@Override
public String getConfig(String dataId, String group, long timeoutMs) throws NacosException {
if (atomicInteger.incrementAndGet() > 10) {
return "";
} else {
throw new NacosException();
}
}
@Override
public String getServerStatus() {
return UP;
}
};
nacosFactoryMockedStatic
.when(() -> NacosFactory.createConfigService((Properties) any()))
.thenReturn(mock);
URL url = URL.valueOf("nacos://127.0.0.1:8848")
.addParameter("nacos.retry", 5)
.addParameter("nacos.retry-wait", 10);
Assertions.assertThrows(
IllegalStateException.class, () -> new NacosDynamicConfiguration(url, applicationModel));
try {
new NacosDynamicConfiguration(url, applicationModel);
} catch (Throwable t) {
Assertions.fail(t);
}
}
}
}
| RetryTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java | {
"start": 1044,
"end": 3752
} | class ____ {
private ColumnHelper() {
}
/**
* @param columnPrefixBytes The byte representation for the column prefix.
* Should not contain {@link Separator#QUALIFIERS}.
* @param qualifier for the remainder of the column.
* {@link Separator#QUALIFIERS} is permissible in the qualifier
* as it is joined only with the column prefix bytes.
* @return fully sanitized column qualifier that is a combination of prefix
* and qualifier. If prefix is null, the result is simply the encoded
* qualifier without any separator.
*/
public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
String qualifier) {
// We don't want column names to have spaces / tabs.
byte[] encodedQualifier =
Separator.encode(qualifier, Separator.SPACE, Separator.TAB);
if (columnPrefixBytes == null) {
return encodedQualifier;
}
// Convert qualifier to lower case, strip of separators and tag on column
// prefix.
byte[] columnQualifier =
Separator.QUALIFIERS.join(columnPrefixBytes, encodedQualifier);
return columnQualifier;
}
/**
* @param columnPrefixBytes The byte representation for the column prefix.
* Should not contain {@link Separator#QUALIFIERS}.
* @param qualifier for the remainder of the column.
* @return fully sanitized column qualifier that is a combination of prefix
* and qualifier. If prefix is null, the result is simply the encoded
* qualifier without any separator.
*/
public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
long qualifier) {
if (columnPrefixBytes == null) {
return Bytes.toBytes(qualifier);
}
// Convert qualifier to lower case, strip of separators and tag on column
// prefix.
byte[] columnQualifier =
Separator.QUALIFIERS.join(columnPrefixBytes, Bytes.toBytes(qualifier));
return columnQualifier;
}
/**
* @param columnPrefixBytes The byte representation for the column prefix.
* Should not contain {@link Separator#QUALIFIERS}.
* @param qualifier the byte representation for the remainder of the column.
* @return fully sanitized column qualifier that is a combination of prefix
* and qualifier. If prefix is null, the result is simply the encoded
* qualifier without any separator.
*/
public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
byte[] qualifier) {
if (columnPrefixBytes == null) {
return qualifier;
}
byte[] columnQualifier =
Separator.QUALIFIERS.join(columnPrefixBytes, qualifier);
return columnQualifier;
}
}
| ColumnHelper |
java | quarkusio__quarkus | extensions/hibernate-search-orm-elasticsearch/deployment/src/test/java/io/quarkus/hibernate/search/orm/elasticsearch/test/configuration/NoConfigNoIndexedEntityTest.java | {
"start": 459,
"end": 1201
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.addAsResource("application-nohsearchconfig.properties", "application.properties"));
// When having no indexed entities, no configuration, no datasource,
// as long as the Hibernate Search beans are not injected anywhere,
// we should still be able to start the application.
@Test
public void testBootSucceedsButHibernateSearchDeactivated() {
// ... but Hibernate Search's beans should not be available.
assertThat(Arc.container().instance(SearchMapping.class).get()).isNull();
}
}
| NoConfigNoIndexedEntityTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MethodCanBeStaticTest.java | {
"start": 7654,
"end": 7985
} | class ____ {
@SuppressWarnings("static-method")
private String f() {
return "";
}
}
""")
.doTest();
}
@Test
public void negativeOverride() {
testHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/operators/sink/committables/CommittableCollectorSerializer.java | {
"start": 1747,
"end": 5335
} | class ____<CommT>
implements SimpleVersionedSerializer<CommittableCollector<CommT>> {
private static final int MAGIC_NUMBER = 0xb91f252c;
private final SimpleVersionedSerializer<CommT> committableSerializer;
/** Default values are used to deserialize from Flink 1 that didn't store the information. */
private final int owningSubtaskId;
/** Default values are used to deserialize from Flink 1 that didn't store the information. */
private final int owningNumberOfSubtasks;
private final SinkCommitterMetricGroup metricGroup;
public CommittableCollectorSerializer(
SimpleVersionedSerializer<CommT> committableSerializer,
int owningSubtaskId,
int owningNumberOfSubtasks,
SinkCommitterMetricGroup metricGroup) {
this.committableSerializer = checkNotNull(committableSerializer);
this.owningSubtaskId = owningSubtaskId;
this.owningNumberOfSubtasks = owningNumberOfSubtasks;
this.metricGroup = metricGroup;
}
@Override
public int getVersion() {
return 2;
}
@Override
public byte[] serialize(CommittableCollector<CommT> committableCollector) throws IOException {
DataOutputSerializer out = new DataOutputSerializer(256);
out.writeInt(MAGIC_NUMBER);
serializeV2(committableCollector, out);
return out.getCopyOfBuffer();
}
@Override
public CommittableCollector<CommT> deserialize(int version, byte[] serialized)
throws IOException {
final DataInputDeserializer in = new DataInputDeserializer(serialized);
if (version == 1) {
return deserializeV1(in);
}
if (version == 2) {
validateMagicNumber(in);
return deserializeV2(in);
}
throw new IOException("Unrecognized version or corrupt state: " + version);
}
private CommittableCollector<CommT> deserializeV1(DataInputView in) throws IOException {
return CommittableCollector.ofLegacy(
SinkV1CommittableDeserializer.readVersionAndDeserializeList(
committableSerializer, in),
metricGroup);
}
private void serializeV2(
CommittableCollector<CommT> committableCollector, DataOutputView dataOutputView)
throws IOException {
SimpleVersionedSerialization.writeVersionAndSerializeList(
new CheckpointSimpleVersionedSerializer(),
new ArrayList<>(committableCollector.getCheckpointCommittables()),
dataOutputView);
}
private CommittableCollector<CommT> deserializeV2(DataInputDeserializer in) throws IOException {
List<CheckpointCommittableManagerImpl<CommT>> checkpoints =
SimpleVersionedSerialization.readVersionAndDeserializeList(
new CheckpointSimpleVersionedSerializer(), in);
return new CommittableCollector<>(
checkpoints.stream()
.collect(
Collectors.toMap(
CheckpointCommittableManagerImpl::getCheckpointId, e -> e)),
metricGroup);
}
private static void validateMagicNumber(DataInputView in) throws IOException {
final int magicNumber = in.readInt();
if (magicNumber != MAGIC_NUMBER) {
throw new IOException(
String.format("Corrupt data: Unexpected magic number %08X", magicNumber));
}
}
private | CommittableCollectorSerializer |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/api/extension/ExecutableInvokerIntegrationTests.java | {
"start": 3109,
"end": 3624
} | class ____ implements ParameterResolver {
@Override
public boolean supportsParameter(ParameterContext parameterContext, ExtensionContext extensionContext)
throws ParameterResolutionException {
return ExtensionContext.class.equals(parameterContext.getParameter().getType());
}
@Override
public Object resolveParameter(ParameterContext parameterContext, ExtensionContext extensionContext)
throws ParameterResolutionException {
return extensionContext;
}
}
}
| ExtensionContextParameterResolver |
java | apache__camel | components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/api/utils/Version.java | {
"start": 939,
"end": 3146
} | class ____ implements Comparable<Version> {
private static final Pattern VERSION_PATTERN = Pattern.compile("(\\d+)\\.(\\d+)");
private final int major;
private final int minor;
private Version(final int major, final int minor) {
this.major = major;
this.minor = minor;
}
public static Version create(final String version) {
final Matcher matcher = VERSION_PATTERN.matcher(version);
if (!matcher.matches()) {
throw new IllegalArgumentException("API version needs to be in <number>.<number> format, given: " + version);
}
final int major = Integer.parseInt(matcher.group(1));
final int minor = Integer.parseInt(matcher.group(2));
return new Version(major, minor);
}
@Override
public int compareTo(final Version other) {
final int majorCompare = Integer.compare(major, other.major);
if (majorCompare == 0) {
return Integer.compare(minor, other.minor);
} else {
return majorCompare;
}
}
@Override
public boolean equals(final Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof Version)) {
return false;
}
final Version other = (Version) obj;
return compareTo(other) == 0;
}
public int getMajor() {
return major;
}
public int getMinor() {
return minor;
}
@Override
public int hashCode() {
return 1 + 31 * (1 + 31 * major) + minor;
}
@Override
public String toString() {
return "v" + major + "." + minor;
}
public void requireAtLeast(final int requiredMajor, final int requiredMinor) {
final Version required = new Version(requiredMajor, requiredMinor);
if (this.compareTo(required) < 0) {
throw new UnsupportedOperationException(
"This operation requires API version at least " + requiredMajor + "." + requiredMinor
+ ", currently configured for " + major
+ "." + minor);
}
}
}
| Version |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/decorators/validation/DelegateDoesNotImplementDecoratedTypeTest.java | {
"start": 1126,
"end": 1192
} | class ____ {
}
@Priority(1)
@Decorator
static | MyBean |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/domain/sample/SampleWithIdClassIncludingEntity.java | {
"start": 2024,
"end": 2080
} | class ____$$PsudoProxy extends OtherEntity {}
}
| OtherEntity |
java | alibaba__fastjson | src/test/java/com/alibaba/json/test/benchmark/BenchmarkCase.java | {
"start": 101,
"end": 446
} | class ____ {
private final String name;
public BenchmarkCase(String name){
super();
this.name = name;
}
public String getName() {
return name;
}
public void init(Codec codec) throws Exception {
}
public abstract void execute(Codec codec) throws Exception;
}
| BenchmarkCase |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/filter/GenericFilterBean.java | {
"start": 2866,
"end": 3398
} | class ____ no dependency on the Spring
* {@link org.springframework.context.ApplicationContext} concept.
* Filters usually don't load their own context but rather access service
* beans from the Spring root application context, accessible via the
* filter's {@link #getServletContext() ServletContext} (see
* {@link org.springframework.web.context.support.WebApplicationContextUtils}).
*
* @author Juergen Hoeller
* @since 06.12.2003
* @see #addRequiredProperty
* @see #initFilterBean
* @see #doFilter
*/
public abstract | has |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/join/adaptive/AdaptiveJoinOperatorFactory.java | {
"start": 1694,
"end": 2036
} | interface ____. Due to runtime access visibility constraints with the table-planner module,
* the {@link AdaptiveJoin} object will be serialized during the Table Planner phase and will only
* be lazily deserialized before the dynamic generation of the JobGraph.
*
* @param <OUT> The output type of the operator
*/
@Internal
public | externally |
java | apache__camel | components/camel-xslt-saxon/src/test/java/org/apache/camel/component/xslt/SaxonXslIncludeEmptyHrefTest.java | {
"start": 1041,
"end": 1889
} | class ____ extends CamelTestSupport {
@Test
public void testXsltOutput() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("<?xml version=\"1.0\" encoding=\"UTF-8\"?><MyDate>February</MyDate>");
mock.message(0).body().isInstanceOf(String.class);
template.sendBody("direct:start", "<root>1</root>");
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.to("xslt-saxon:org/apache/camel/component/xslt/transform_includes_data.xsl")
.to("mock:result");
}
};
}
}
| SaxonXslIncludeEmptyHrefTest |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/ClassUtils.java | {
"start": 34683,
"end": 35002
} | class ____.
* @return the primitive class.
*/
static Class<?> getPrimitiveClass(final String className) {
return NAME_PRIMITIVE_MAP.get(className);
}
/**
* Gets the desired Method much like {@code Class.getMethod}, however it ensures that the returned Method is from a
* public | name |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/verification/api/VerificationDataInOrderImpl.java | {
"start": 293,
"end": 1045
} | class ____ implements VerificationDataInOrder {
private final InOrderContext inOrder;
private final List<Invocation> allInvocations;
private final MatchableInvocation wanted;
public VerificationDataInOrderImpl(
InOrderContext inOrder, List<Invocation> allInvocations, MatchableInvocation wanted) {
this.inOrder = inOrder;
this.allInvocations = allInvocations;
this.wanted = wanted;
}
@Override
public List<Invocation> getAllInvocations() {
return allInvocations;
}
@Override
public InOrderContext getOrderingContext() {
return inOrder;
}
@Override
public MatchableInvocation getWanted() {
return wanted;
}
}
| VerificationDataInOrderImpl |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java | {
"start": 25807,
"end": 26154
} | class ____ implements ThreadFactory {
@Override
public Thread newThread(Runnable r) {
Thread t = new SubjectInheritingThread(r);
t.setName(String.format("%s-%d", THREAD_ID_PREFIX,
threadSequenceNumber.getAndIncrement()));
return t;
}
}
/**
* Upload block commands.
*/
private | UploaderThreadFactory |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/transaction/JtaGetTransactionThrowsExceptionTest.java | {
"start": 1115,
"end": 2057
} | class ____ {
@Test()
@JiraKey(value = "HHH-12487")
public void onCloseEntityManagerTest(EntityManagerFactoryScope scope) {
EntityManager em = createEntityManager( scope );
em.close();
assertThrows(
IllegalStateException.class,
() -> em.getTransaction(),
"Calling getTransaction on a JTA entity manager should throw an IllegalStateException"
);
}
@Test()
@JiraKey(value = "HHH-12487")
public void onOpenEntityManagerTest(EntityManagerFactoryScope scope) {
EntityManager em = createEntityManager( scope );
try {
assertThrows(
IllegalStateException.class,
() -> em.getTransaction(),
"Calling getTransaction on a JTA entity manager should throw an IllegalStateException"
);
}
finally {
em.close();
}
}
private EntityManager createEntityManager(EntityManagerFactoryScope scope) {
return scope.getEntityManagerFactory().createEntityManager();
}
}
| JtaGetTransactionThrowsExceptionTest |
java | apache__camel | components/camel-smb/src/test/java/org/apache/camel/component/smb/SmbConsumerStartingDirectoryMustExistIT.java | {
"start": 1050,
"end": 2060
} | class ____ extends SmbServerTestSupport {
@Override
public boolean isUseRouteBuilder() {
return false;
}
private String getSbmUrl() {
return String.format(
"smb:%s/%s/doesnotexist?username=%s&password=%s&delete=true&initialDelay=3000&autoCreate=false&startingDirectoryMustExist=true",
service.address(), service.shareName(), service.userName(), service.password());
}
@Test
public void testStartingDirectoryMustExist() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() throws Exception {
from(getSbmUrl()).to("mock:result");
}
});
try {
context.start();
Assertions.fail();
} catch (GenericFileOperationFailedException e) {
Assertions.assertEquals("Starting directory does not exist: doesnotexist", e.getMessage());
}
}
}
| SmbConsumerStartingDirectoryMustExistIT |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/spi-deployment/src/main/java/io/quarkus/resteasy/reactive/server/spi/TargetJavaVersionBuildItem.java | {
"start": 305,
"end": 677
} | class ____ extends SimpleBuildItem {
private final TargetJavaVersion targetJavaVersion;
public TargetJavaVersionBuildItem(TargetJavaVersion targetJavaVersion) {
this.targetJavaVersion = Objects.requireNonNull(targetJavaVersion);
}
public TargetJavaVersion getTargetJavaVersion() {
return targetJavaVersion;
}
}
| TargetJavaVersionBuildItem |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/internal/util/xml/XMLMappingHelper.java | {
"start": 580,
"end": 655
} | class ____ parsing XML mappings, to be used in unit tests.
*/
public final | for |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/EntityTypeChangeAuditTrackingRevisionListenerTest.java | {
"start": 1416,
"end": 3833
} | class ____ extends EntityManagerFactoryBasedFunctionalTest {
@Override
protected Class<?>[] getAnnotatedClasses() {
return new Class<?>[] {
Customer.class,
CustomTrackingRevisionEntity.class,
EntityType.class
};
}
@Test
public void testLifecycle() {
try (final EntityManagerFactory testEmf = produceEntityManagerFactory()) {
testEmf.runInTransaction( entityManager -> {
Customer customer = new Customer();
customer.setId( 1L );
customer.setFirstName( "John" );
customer.setLastName( "Doe" );
entityManager.persist( customer );
} );
try (EntityManagerFactory entityManagerFactory = buildEntityManagerFactory()) {
entityManagerFactory.runInTransaction( entityManager -> {
ApplicationCustomer customer = new ApplicationCustomer();
customer.setId( 2L );
customer.setFirstName( "John" );
customer.setLastName( "Doe Jr." );
entityManager.persist( customer );
} );
entityManagerFactory.runInTransaction( entityManager -> {
//tag::envers-tracking-modified-entities-revchanges-query-example[]
AuditReader auditReader = AuditReaderFactory.get( entityManager );
List<Number> revisions = auditReader.getRevisions(
ApplicationCustomer.class,
1L
);
CustomTrackingRevisionEntity revEntity = auditReader.findRevision(
CustomTrackingRevisionEntity.class,
revisions.get( 0 )
);
Set<EntityType> modifiedEntityTypes = revEntity.getModifiedEntityTypes();
assertThat( modifiedEntityTypes ).hasSize( 1 );
EntityType entityType = modifiedEntityTypes.iterator().next();
assertThat( entityType.getEntityClassName() ).isEqualTo( Customer.class.getName() );
//end::envers-tracking-modified-entities-revchanges-query-example[]
} );
}
}
}
private EntityManagerFactory buildEntityManagerFactory() {
Map<Object, Object> settings = buildSettings();
settings.put(
AvailableSettings.LOADED_CLASSES,
Arrays.asList(
ApplicationCustomer.class,
CustomTrackingRevisionEntity.class,
EntityType.class
)
);
settings.put( AvailableSettings.HBM2DDL_AUTO, "update" );
return Bootstrap.getEntityManagerFactoryBuilder(
new TestingPersistenceUnitDescriptorImpl( getClass().getSimpleName() ),
settings )
.build();
}
@Audited
@Entity(name = "Customer")
public static | EntityTypeChangeAuditTrackingRevisionListenerTest |
java | micronaut-projects__micronaut-core | http-server-tck/src/main/java/io/micronaut/http/server/tck/tests/filter/ResponseFilterExceptionHandlerTest.java | {
"start": 2652,
"end": 2915
} | class ____ {
@ResponseFilter
public void doFilter(MutableHttpResponse<?> response) {
throw new FooException();
}
}
@Requires(property = "spec.name", value = SPEC_NAME)
@Controller("/foo")
static | ErrorThrowingFilter |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/plugin/PluginTest.java | {
"start": 2228,
"end": 2665
} | class ____ {
private static ThreadLocal<String> value = ThreadLocal.withInitial(() -> "PUBLIC");
public static void set(String tenantName) {
value.set(tenantName);
}
public static String get() {
return value.get();
}
private SchemaHolder() {
}
}
@Intercepts(@Signature(type = StatementHandler.class, method = "prepare", args = { Connection.class, Integer.class }))
public static | SchemaHolder |
java | micronaut-projects__micronaut-core | http-tck/src/main/java/io/micronaut/http/tck/TestScenario.java | {
"start": 955,
"end": 4492
} | class ____ {
private final String specName;
private final Map<String, Object> configuration;
private final RequestSupplier request;
private final BiConsumer<ServerUnderTest, HttpRequest<?>> assertion;
private TestScenario(String specName,
Map<String, Object> configuration,
RequestSupplier request,
BiConsumer<ServerUnderTest, HttpRequest<?>> assertion) {
this.specName = specName;
this.configuration = configuration;
this.request = request;
this.assertion = assertion;
}
/**
*
* @param specName Value for {@literal spec.name} property. Used to avoid bean pollution.
* @param configuration Test Scenario configuration
* @param request HTTP Request to be sent in the test scenario
* @param assertion Assertion for a request and server.
* @throws IOException Exception thrown while getting the server under test.
*/
public static void asserts(String specName,
Map<String, Object> configuration,
HttpRequest<?> request,
BiConsumer<ServerUnderTest, HttpRequest<?>> assertion) throws IOException {
TestScenario.builder()
.specName(specName)
.configuration(configuration)
.request(request)
.assertion(assertion)
.run();
}
/**
*
* @param specName Value for {@literal spec.name} property. Used to avoid bean pollution.
* @param configuration Test Scenario configuration
* @param request HTTP Request to be sent in the test scenario
* @param assertion Assertion for a request and server.
* @throws IOException Exception thrown while getting the server under test.
*/
public static void asserts(String specName,
Map<String, Object> configuration,
RequestSupplier request,
BiConsumer<ServerUnderTest, HttpRequest<?>> assertion) throws IOException {
TestScenario.builder()
.specName(specName)
.configuration(configuration)
.request(request)
.assertion(assertion)
.run();
}
/**
*
* @param specName Value for {@literal spec.name} property. Used to avoid bean pollution.
* @param request HTTP Request to be sent in the test scenario
* @param assertion Assertion for a request and server.
* @throws IOException Exception thrown while getting the server under test.
*/
public static void asserts(String specName,
HttpRequest<?> request,
BiConsumer<ServerUnderTest, HttpRequest<?>> assertion) throws IOException {
TestScenario.builder()
.specName(specName)
.request(request)
.assertion(assertion)
.run();
}
/**
*
* @return A Test Scenario builder.
*/
public static TestScenario.Builder builder() {
return new Builder();
}
private void run() throws IOException {
try (ServerUnderTest server = ServerUnderTestProviderUtils.getServerUnderTestProvider().getServer(specName, configuration)) {
if (assertion != null) {
assertion.accept(server, request.apply(server));
}
}
}
/**
* Test Scenario Builder.
*/
public static | TestScenario |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/impl/future/Composition.java | {
"start": 713,
"end": 1564
} | class ____<T, U> extends Operation<U> implements Completable<T> {
private final Function<? super T, Future<U>> successMapper;
private final Function<Throwable, Future<U>> failureMapper;
Composition(ContextInternal context, Function<? super T, Future<U>> successMapper, Function<Throwable, Future<U>> failureMapper) {
super(context);
this.successMapper = successMapper;
this.failureMapper = failureMapper;
}
@Override
public void complete(T result, Throwable failure) {
FutureBase<U> future;
try {
if (failure == null) {
future = (FutureBase<U>) successMapper.apply(result);
} else {
future = (FutureBase<U>) failureMapper.apply(failure);
}
} catch (Throwable e) {
completeInternal(null, e);
return;
}
future.addListener(this::completeInternal);
}
}
| Composition |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/ClassUtils.java | {
"start": 15141,
"end": 15796
} | class ____ check (typically an interface)
* @param classLoader the ClassLoader to check against
* (can be {@code null} in which case this method will always return {@code true})
*/
@Contract("_, null -> true")
public static boolean isVisible(Class<?> clazz, @Nullable ClassLoader classLoader) {
if (classLoader == null) {
return true;
}
try {
if (clazz.getClassLoader() == classLoader) {
return true;
}
}
catch (SecurityException ex) {
// Fall through to loadable check below
}
// Visible if same Class can be loaded from given ClassLoader
return isLoadable(clazz, classLoader);
}
/**
* Check whether the given | to |
java | apache__camel | components/camel-olingo4/camel-olingo4-component/src/generated/java/org/apache/camel/component/olingo4/internal/Olingo4ApiCollection.java | {
"start": 1923,
"end": 2055
} | class ____ {
private static final Olingo4ApiCollection INSTANCE = new Olingo4ApiCollection();
}
}
| Olingo4ApiCollectionHolder |
java | apache__kafka | tools/src/test/java/org/apache/kafka/tools/consumer/ConsoleShareConsumerOptionsTest.java | {
"start": 1614,
"end": 23353
} | class ____ {
@Test
public void shouldParseValidConsumerValidConfig() throws IOException {
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
assertEquals("localhost:9092", config.bootstrapServer());
assertEquals("test", config.topicArg());
assertFalse(config.rejectMessageOnError());
assertEquals(-1, config.maxMessages());
assertEquals(-1, config.timeoutMs());
}
@Test
public void shouldExitOnUnrecognizedNewConsumerOption() {
Exit.setExitProcedure((code, message) -> {
throw new IllegalArgumentException(message);
});
String[] args = new String[]{
"--new-consumer",
"--bootstrap-server", "localhost:9092",
"--topic", "test"
};
try {
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args));
} finally {
Exit.resetExitProcedure();
}
}
@Test
public void shouldParseValidConsumerConfigWithSessionTimeoutDeprecated() throws IOException {
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--consumer-property", "session.timeout.ms=10000"
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
Properties consumerProperties = config.consumerProps();
assertEquals("localhost:9092", config.bootstrapServer());
assertEquals("test", config.topicArg());
assertEquals("10000", consumerProperties.getProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG));
}
@Test
public void shouldParseConfigsFromFileDeprecated() throws IOException {
Map<String, String> configs = new HashMap<>();
configs.put("request.timeout.ms", "1000");
configs.put("group.id", "group1");
File propsFile = ToolsTestUtils.tempPropertiesFile(configs);
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--consumer-config", propsFile.getAbsolutePath()
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
// KafkaShareConsumer uses Utils.propsToMap to convert the properties to a map,
// so using the same method to check the map has the expected values
Map<String, Object> configMap = Utils.propsToMap(config.consumerProps());
assertEquals("1000", configMap.get("request.timeout.ms"));
assertEquals("group1", configMap.get("group.id"));
}
@Test
public void groupIdsProvidedInDifferentPlacesMustMatchDeprecated() throws IOException {
Exit.setExitProcedure((code, message) -> {
throw new IllegalArgumentException(message);
});
try {
// different in all three places
File propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "group-from-file"));
final String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--group", "group-from-arguments",
"--consumer-property", "group.id=group-from-properties",
"--consumer-config", propsFile.getAbsolutePath()
};
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args));
// the same in all three places
propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "test-group"));
final String[] args1 = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--group", "test-group",
"--consumer-property", "group.id=test-group",
"--consumer-config", propsFile.getAbsolutePath()
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args1);
Properties props = config.consumerProps();
assertEquals("test-group", props.getProperty("group.id"));
// different via --consumer-property and --consumer-config
propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "group-from-file"));
final String[] args2 = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--consumer-property", "group.id=group-from-properties",
"--consumer-config", propsFile.getAbsolutePath()
};
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args2));
// different via --consumer-property and --group
final String[] args3 = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--group", "group-from-arguments",
"--consumer-property", "group.id=group-from-properties"
};
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args3));
// different via --group and --consumer-config
propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "group-from-file"));
final String[] args4 = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--group", "group-from-arguments",
"--consumer-config", propsFile.getAbsolutePath()
};
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args4));
// via --group only
final String[] args5 = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--group", "group-from-arguments"
};
config = new ConsoleShareConsumerOptions(args5);
props = config.consumerProps();
assertEquals("group-from-arguments", props.getProperty("group.id"));
} finally {
Exit.resetExitProcedure();
}
}
@Test
public void shouldExitIfNoTopicSpecified() {
Exit.setExitProcedure((code, message) -> {
throw new IllegalArgumentException(message);
});
String[] args = new String[]{
"--bootstrap-server", "localhost:9092"
};
try {
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args));
} finally {
Exit.resetExitProcedure();
}
}
@Test
public void testClientIdOverrideDeprecated() throws IOException {
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--consumer-property", "client.id=consumer-1"
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
Properties consumerProperties = config.consumerProps();
assertEquals("consumer-1", consumerProperties.getProperty(ConsumerConfig.CLIENT_ID_CONFIG));
}
@Test
public void testCustomPropertyShouldBePassedToConfigureMethodDeprecated() throws Exception {
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--property", "print.key=true",
"--property", "key.deserializer=org.apache.kafka.test.MockDeserializer",
"--property", "key.deserializer.my-props=abc"
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
assertInstanceOf(DefaultMessageFormatter.class, config.formatter());
assertTrue(config.formatterArgs().containsKey("key.deserializer.my-props"));
DefaultMessageFormatter formatter = (DefaultMessageFormatter) config.formatter();
assertTrue(formatter.keyDeserializer().isPresent());
assertInstanceOf(MockDeserializer.class, formatter.keyDeserializer().get());
MockDeserializer keyDeserializer = (MockDeserializer) formatter.keyDeserializer().get();
assertEquals(1, keyDeserializer.configs.size());
assertEquals("abc", keyDeserializer.configs.get("my-props"));
assertTrue(keyDeserializer.isKey);
}
@Test
public void testCustomPropertyShouldBePassedToConfigureMethod() throws Exception {
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--formatter-property", "print.key=true",
"--formatter-property", "key.deserializer=org.apache.kafka.test.MockDeserializer",
"--formatter-property", "key.deserializer.my-props=abc"
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
assertInstanceOf(DefaultMessageFormatter.class, config.formatter());
assertTrue(config.formatterArgs().containsKey("key.deserializer.my-props"));
DefaultMessageFormatter formatter = (DefaultMessageFormatter) config.formatter();
assertTrue(formatter.keyDeserializer().isPresent());
assertInstanceOf(MockDeserializer.class, formatter.keyDeserializer().get());
MockDeserializer keyDeserializer = (MockDeserializer) formatter.keyDeserializer().get();
assertEquals(1, keyDeserializer.configs.size());
assertEquals("abc", keyDeserializer.configs.get("my-props"));
assertTrue(keyDeserializer.isKey);
}
@Test
public void testCustomConfigShouldBePassedToConfigureMethodDeprecated() throws Exception {
Map<String, String> configs = new HashMap<>();
configs.put("key.deserializer.my-props", "abc");
configs.put("print.key", "false");
File propsFile = ToolsTestUtils.tempPropertiesFile(configs);
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--property", "print.key=true",
"--property", "key.deserializer=org.apache.kafka.test.MockDeserializer",
"--formatter-config", propsFile.getAbsolutePath()
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
assertInstanceOf(DefaultMessageFormatter.class, config.formatter());
assertTrue(config.formatterArgs().containsKey("key.deserializer.my-props"));
DefaultMessageFormatter formatter = (DefaultMessageFormatter) config.formatter();
assertTrue(formatter.keyDeserializer().isPresent());
assertInstanceOf(MockDeserializer.class, formatter.keyDeserializer().get());
MockDeserializer keyDeserializer = (MockDeserializer) formatter.keyDeserializer().get();
assertEquals(1, keyDeserializer.configs.size());
assertEquals("abc", keyDeserializer.configs.get("my-props"));
assertTrue(keyDeserializer.isKey);
}
@Test
public void testCustomConfigShouldBePassedToConfigureMethod() throws Exception {
Map<String, String> configs = new HashMap<>();
configs.put("key.deserializer.my-props", "abc");
configs.put("print.key", "false");
File propsFile = ToolsTestUtils.tempPropertiesFile(configs);
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--formatter-property", "print.key=true",
"--formatter-property", "key.deserializer=org.apache.kafka.test.MockDeserializer",
"--formatter-config", propsFile.getAbsolutePath()
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
assertInstanceOf(DefaultMessageFormatter.class, config.formatter());
assertTrue(config.formatterArgs().containsKey("key.deserializer.my-props"));
DefaultMessageFormatter formatter = (DefaultMessageFormatter) config.formatter();
assertTrue(formatter.keyDeserializer().isPresent());
assertInstanceOf(MockDeserializer.class, formatter.keyDeserializer().get());
MockDeserializer keyDeserializer = (MockDeserializer) formatter.keyDeserializer().get();
assertEquals(1, keyDeserializer.configs.size());
assertEquals("abc", keyDeserializer.configs.get("my-props"));
assertTrue(keyDeserializer.isKey);
}
@Test
public void testDefaultClientId() throws IOException {
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
Properties consumerProperties = config.consumerProps();
assertEquals("console-share-consumer", consumerProperties.getProperty(ConsumerConfig.CLIENT_ID_CONFIG));
}
@Test
public void testRejectOption() throws IOException {
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--reject"
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
assertEquals(AcknowledgeType.REJECT, config.acknowledgeType());
}
@Test
public void testReleaseOption() throws IOException {
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--release"
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
assertEquals(AcknowledgeType.RELEASE, config.acknowledgeType());
}
@Test
public void testRejectAndReleaseOption() throws IOException {
Exit.setExitProcedure((code, message) -> {
throw new IllegalArgumentException(message);
});
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--reject",
"--release"
};
try {
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args));
} finally {
Exit.resetExitProcedure();
}
}
@Test
public void shouldExitOnBothConsumerPropertyAndCommandProperty() {
Exit.setExitProcedure((code, message) -> {
throw new IllegalArgumentException(message);
});
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--consumer-property", "session.timeout.ms=10000",
"--command-property", "request.timeout.ms=30000"
};
try {
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args));
} finally {
Exit.resetExitProcedure();
}
}
@Test
public void shouldExitOnBothConsumerConfigAndCommandConfig() throws IOException {
Exit.setExitProcedure((code, message) -> {
throw new IllegalArgumentException(message);
});
Map<String, String> configs = new HashMap<>();
configs.put("request.timeout.ms", "1000");
File propsFile = ToolsTestUtils.tempPropertiesFile(configs);
Map<String, String> configs2 = new HashMap<>();
configs2.put("session.timeout.ms", "10000");
File propsFile2 = ToolsTestUtils.tempPropertiesFile(configs2);
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--consumer-config", propsFile.getAbsolutePath(),
"--command-config", propsFile2.getAbsolutePath()
};
try {
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args));
} finally {
Exit.resetExitProcedure();
}
}
@Test
public void shouldParseValidConsumerConfigWithSessionTimeout() throws IOException {
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--command-property", "session.timeout.ms=10000"
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
Properties consumerProperties = config.consumerProps();
assertEquals("localhost:9092", config.bootstrapServer());
assertEquals("test", config.topicArg());
assertEquals("10000", consumerProperties.getProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG));
}
@Test
public void shouldParseConfigsFromFile() throws IOException {
Map<String, String> configs = new HashMap<>();
configs.put("request.timeout.ms", "1000");
configs.put("group.id", "group1");
File propsFile = ToolsTestUtils.tempPropertiesFile(configs);
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--command-config", propsFile.getAbsolutePath()
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
// KafkaShareConsumer uses Utils.propsToMap to convert the properties to a map,
// so using the same method to check the map has the expected values
Map<String, Object> configMap = Utils.propsToMap(config.consumerProps());
assertEquals("1000", configMap.get("request.timeout.ms"));
assertEquals("group1", configMap.get("group.id"));
}
@Test
public void groupIdsProvidedInDifferentPlacesMustMatch() throws IOException {
Exit.setExitProcedure((code, message) -> {
throw new IllegalArgumentException(message);
});
try {
// different in all three places
File propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "group-from-file"));
final String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--group", "group-from-arguments",
"--command-property", "group.id=group-from-properties",
"--command-config", propsFile.getAbsolutePath()
};
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args));
// the same in all three places
propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "test-group"));
final String[] args1 = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--group", "test-group",
"--command-property", "group.id=test-group",
"--command-config", propsFile.getAbsolutePath()
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args1);
Properties props = config.consumerProps();
assertEquals("test-group", props.getProperty("group.id"));
// different via --command-property and --command-config
propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "group-from-file"));
final String[] args2 = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--command-property", "group.id=group-from-properties",
"--command-config", propsFile.getAbsolutePath()
};
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args2));
// different via --command-property and --group
final String[] args3 = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--group", "group-from-arguments",
"--command-property", "group.id=group-from-properties"
};
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args3));
// different via --group and --command-config
propsFile = ToolsTestUtils.tempPropertiesFile(Map.of("group.id", "group-from-file"));
final String[] args4 = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--group", "group-from-arguments",
"--command-config", propsFile.getAbsolutePath()
};
assertThrows(IllegalArgumentException.class, () -> new ConsoleShareConsumerOptions(args4));
// via --group only
final String[] args5 = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--group", "group-from-arguments"
};
config = new ConsoleShareConsumerOptions(args5);
props = config.consumerProps();
assertEquals("group-from-arguments", props.getProperty("group.id"));
} finally {
Exit.resetExitProcedure();
}
}
@Test
public void testClientIdOverride() throws IOException {
String[] args = new String[]{
"--bootstrap-server", "localhost:9092",
"--topic", "test",
"--command-property", "client.id=consumer-1"
};
ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args);
Properties consumerProperties = config.consumerProps();
assertEquals("consumer-1", consumerProperties.getProperty(ConsumerConfig.CLIENT_ID_CONFIG));
}
}
| ConsoleShareConsumerOptionsTest |
java | quarkusio__quarkus | devtools/maven/src/main/java/io/quarkus/maven/RemoveExtensionMojo.java | {
"start": 893,
"end": 2744
} | class ____ extends QuarkusProjectMojoBase {
/**
* The list of extensions to be removed.
*/
@Parameter(property = "extensions")
Set<String> extensions;
/**
* For usability reason, this parameter allows removing a single extension.
*/
@Parameter(property = "extension")
String extension;
@Override
protected void validateParameters() throws MojoExecutionException {
if ((StringUtils.isBlank(extension) && (extensions == null || extensions.isEmpty())) // None are set
|| (!StringUtils.isBlank(extension) && extensions != null && !extensions.isEmpty())) { // Both are set
throw new MojoExecutionException("Either the `extension` or `extensions` parameter must be set");
}
}
@Override
public void doExecute(final QuarkusProject quarkusProject, final MessageWriter log) throws MojoExecutionException {
Set<String> ext = new HashSet<>();
if (extensions != null && !extensions.isEmpty()) {
ext.addAll(extensions);
} else {
// Parse the "extension" just in case it contains several comma-separated values
// https://github.com/quarkusio/quarkus/issues/2393
ext.addAll(Arrays.stream(extension.split(",")).map(s -> s.trim()).collect(Collectors.toSet()));
}
try {
final QuarkusCommandOutcome outcome = new RemoveExtensions(quarkusProject)
.extensions(ext.stream().map(String::trim).collect(Collectors.toSet()))
.execute();
if (!outcome.isSuccess()) {
throw new MojoExecutionException("Unable to remove extensions");
}
} catch (Exception e) {
throw new MojoExecutionException("Unable to update the pom.xml file", e);
}
}
}
| RemoveExtensionMojo |
java | quarkusio__quarkus | extensions/devui/runtime/src/main/java/io/quarkus/devui/runtime/logstream/MutinyLogHandler.java | {
"start": 320,
"end": 2045
} | class ____ extends ExtHandler {
private LogStreamBroadcaster logStreamBroadcaster;
private final boolean decorateStack;
private final String srcMainJava;
private final List<String> knownClasses;
public MutinyLogHandler(boolean decorateStack, String srcMainJava, List<String> knownClasses) {
this.decorateStack = decorateStack;
this.srcMainJava = srcMainJava;
this.knownClasses = knownClasses;
setFormatter(new JsonFormatter());
}
@Override
public final void doPublish(final ExtLogRecord record) {
// Don't log empty messages
if (record.getMessage() == null || record.getMessage().isEmpty()) {
return;
}
if (isLoggable(record)) {
LogStreamBroadcaster broadcaster = getBroadcaster();
if (broadcaster != null) {
JsonObject message = ((JsonFormatter) getFormatter()).toJsonObject(record);
if (decorateStack) {
String decoratedString = DecorateStackUtil.getDecoratedString(record.getThrown(), this.srcMainJava,
knownClasses);
if (decoratedString != null) {
message.put("decoration", decoratedString);
}
}
broadcaster.onNext(message);
}
}
}
private LogStreamBroadcaster getBroadcaster() {
synchronized (this) {
if (this.logStreamBroadcaster == null && Arc.container() != null) {
this.logStreamBroadcaster = Arc.container().instance(LogStreamBroadcaster.class).get();
}
}
return this.logStreamBroadcaster;
}
}
| MutinyLogHandler |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/procedure/spi/FunctionReturnImplementor.java | {
"start": 410,
"end": 812
} | interface ____<T> extends FunctionReturn<T>, ProcedureParameterImplementor<T> {
@Override
default JdbcCallParameterRegistration toJdbcParameterRegistration(
int startIndex,
ProcedureCallImplementor<?> procedureCall) {
return toJdbcFunctionReturn( procedureCall.getSession() );
}
JdbcCallFunctionReturn toJdbcFunctionReturn(SharedSessionContractImplementor session);
}
| FunctionReturnImplementor |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.