language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
elastic__elasticsearch
|
x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java
|
{
"start": 1293,
"end": 8291
}
|
class ____ extends AbstractRepositoryAnalysisRestTestCase {
private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true"));
private static final boolean USE_HTTPS_FIXTURE = USE_FIXTURE && ESTestCase.inFipsJvm() == false;
// TODO when https://github.com/elastic/elasticsearch/issues/111532 addressed, use a HTTPS fixture in FIPS mode too
private static final String AZURE_TEST_ACCOUNT = System.getProperty("test.azure.account");
private static final String AZURE_TEST_CONTAINER = System.getProperty("test.azure.container");
private static final String AZURE_TEST_KEY = System.getProperty("test.azure.key");
private static final String AZURE_TEST_SASTOKEN = System.getProperty("test.azure.sas_token");
private static final String AZURE_TEST_TENANT_ID = System.getProperty("test.azure.tenant_id");
private static final String AZURE_TEST_CLIENT_ID = System.getProperty("test.azure.client_id");
private static final AzureHttpFixture fixture = new AzureHttpFixture(
USE_HTTPS_FIXTURE ? AzureHttpFixture.Protocol.HTTPS : USE_FIXTURE ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.NONE,
AZURE_TEST_ACCOUNT,
AZURE_TEST_CONTAINER,
AZURE_TEST_TENANT_ID,
AZURE_TEST_CLIENT_ID,
decideAuthHeaderPredicate(),
// 5% of the time, in a contended lease scenario, expire the existing lease
(currentLeaseId, requestLeaseId) -> currentLeaseId.equals(requestLeaseId) == false
&& ThreadLocalRandom.current().nextDouble() < 0.05
);
private static Predicate<String> decideAuthHeaderPredicate() {
if (Strings.hasText(AZURE_TEST_KEY) || Strings.hasText(AZURE_TEST_SASTOKEN)) {
return AzureHttpFixture.sharedKeyForAccountPredicate(AZURE_TEST_ACCOUNT);
} else if (Strings.hasText(AZURE_TEST_TENANT_ID) && Strings.hasText(AZURE_TEST_CLIENT_ID)) {
return AzureHttpFixture.WORK_IDENTITY_BEARER_TOKEN_PREDICATE;
} else if (Strings.hasText(AZURE_TEST_TENANT_ID) || Strings.hasText(AZURE_TEST_CLIENT_ID)) {
fail(null, "Both [test.azure.tenant_id] and [test.azure.client_id] must be set if either is set");
}
return AzureHttpFixture.MANAGED_IDENTITY_BEARER_TOKEN_PREDICATE;
}
private static final TestTrustStore trustStore = new TestTrustStore(
() -> AzureHttpFixture.class.getResourceAsStream("azure-http-fixture.pem")
);
private static final ElasticsearchCluster cluster = ElasticsearchCluster.local()
.module("repository-azure")
.module("snapshot-repo-test-kit")
.setting("thread_pool.snapshot.max", "10")
.keystore("azure.client.repository_test_kit.account", AZURE_TEST_ACCOUNT)
.keystore("azure.client.repository_test_kit.key", () -> AZURE_TEST_KEY, s -> Strings.hasText(AZURE_TEST_KEY))
.keystore("azure.client.repository_test_kit.sas_token", () -> AZURE_TEST_SASTOKEN, s -> Strings.hasText(AZURE_TEST_SASTOKEN))
.setting(
"azure.client.repository_test_kit.endpoint_suffix",
() -> "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=" + fixture.getAddress(),
s -> USE_FIXTURE
)
.systemProperty(
"tests.azure.credentials.disable_instance_discovery",
() -> "true",
s -> USE_HTTPS_FIXTURE && Strings.hasText(AZURE_TEST_CLIENT_ID) && Strings.hasText(AZURE_TEST_TENANT_ID)
)
.systemProperty("AZURE_POD_IDENTITY_AUTHORITY_HOST", fixture::getMetadataAddress, s -> USE_FIXTURE)
.systemProperty("AZURE_AUTHORITY_HOST", fixture::getOAuthTokenServiceAddress, s -> USE_HTTPS_FIXTURE)
.systemProperty("AZURE_CLIENT_ID", () -> AZURE_TEST_CLIENT_ID, s -> Strings.hasText(AZURE_TEST_CLIENT_ID))
.systemProperty("AZURE_TENANT_ID", () -> AZURE_TEST_TENANT_ID, s -> Strings.hasText(AZURE_TEST_TENANT_ID))
.configFile("storage-azure/azure-federated-token", Resource.fromString(fixture.getFederatedToken()))
.environment(
nodeSpec -> USE_HTTPS_FIXTURE && Strings.hasText(AZURE_TEST_CLIENT_ID) && Strings.hasText(AZURE_TEST_TENANT_ID)
? Map.of("AZURE_FEDERATED_TOKEN_FILE", "${ES_PATH_CONF}/storage-azure/azure-federated-token")
: Map.of()
)
.systemProperty("javax.net.ssl.trustStore", () -> trustStore.getTrustStorePath().toString(), s -> USE_HTTPS_FIXTURE)
.systemProperty("javax.net.ssl.trustStoreType", () -> "jks", s -> USE_HTTPS_FIXTURE)
.build();
@ClassRule(order = 1)
public static TestRule ruleChain = RuleChain.outerRule(fixture).around(trustStore).around(cluster);
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
@Override
protected String repositoryType() {
return "azure";
}
@Override
protected Settings repositorySettings() {
final String container = System.getProperty("test.azure.container");
assertThat(container, not(blankOrNullString()));
final String basePath = System.getProperty("test.azure.base_path");
assertThat(basePath, not(blankOrNullString()));
return Settings.builder().put("client", "repository_test_kit").put("container", container).put("base_path", basePath).build();
}
public void testClusterStats() throws IOException {
registerRepository(randomIdentifier(), repositoryType(), true, repositorySettings());
final var request = new Request(HttpGet.METHOD_NAME, "/_cluster/stats");
final var response = client().performRequest(request);
assertOK(response);
final var objectPath = ObjectPath.createFromResponse(response);
assertThat(objectPath.evaluate("repositories.azure.count"), isSetIff(true));
assertThat(objectPath.evaluate("repositories.azure.read_write"), isSetIff(true));
assertThat(objectPath.evaluate("repositories.azure.uses_key_credentials"), isSetIff(Strings.hasText(AZURE_TEST_KEY)));
assertThat(objectPath.evaluate("repositories.azure.uses_sas_token"), isSetIff(Strings.hasText(AZURE_TEST_SASTOKEN)));
assertThat(
objectPath.evaluate("repositories.azure.uses_default_credentials"),
isSetIff((Strings.hasText(AZURE_TEST_SASTOKEN) || Strings.hasText(AZURE_TEST_KEY)) == false)
);
assertThat(
objectPath.evaluate("repositories.azure.uses_managed_identity"),
isSetIff(
(Strings.hasText(AZURE_TEST_SASTOKEN) || Strings.hasText(AZURE_TEST_KEY) || Strings.hasText(AZURE_TEST_CLIENT_ID)) == false
)
);
assertThat(objectPath.evaluate("repositories.azure.uses_workload_identity"), isSetIff(Strings.hasText(AZURE_TEST_CLIENT_ID)));
}
private static Matcher<Integer> isSetIff(boolean predicate) {
return predicate ? equalTo(1) : nullValue(Integer.class);
}
}
|
AzureRepositoryAnalysisRestIT
|
java
|
quarkusio__quarkus
|
extensions/tls-registry/deployment/src/test/java/io/quarkus/tls/TrustStoreWithSelectedCredentialsProviderTest.java
|
{
"start": 1003,
"end": 2334
}
|
class ____ {
private static final String configuration = """
quarkus.tls.foo.trust-store.p12.path=target/certs/test-credentials-provider-truststore.p12
quarkus.tls.foo.trust-store.credentials-provider.name=tls
quarkus.tls.foo.trust-store.credentials-provider.bean-name=my-provider
""";
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.addClass(MyCredentialProvider.class)
.add(new StringAsset(configuration), "application.properties"));
@Inject
TlsConfigurationRegistry certificates;
@Test
void test() throws KeyStoreException, CertificateParsingException {
TlsConfiguration def = certificates.get("foo").orElseThrow();
X509Certificate certificate = (X509Certificate) def.getTrustStore().getCertificate("test-credentials-provider");
assertThat(certificate).isNotNull();
assertThat(certificate.getSubjectAlternativeNames()).anySatisfy(l -> {
assertThat(l.get(0)).isEqualTo(2);
assertThat(l.get(1)).isEqualTo("localhost");
});
}
@ApplicationScoped
@Named("my-provider")
public static
|
TrustStoreWithSelectedCredentialsProviderTest
|
java
|
hibernate__hibernate-orm
|
hibernate-vector/src/main/java/org/hibernate/vector/internal/VectorArgumentTypeResolver.java
|
{
"start": 803,
"end": 1907
}
|
class ____ implements AbstractFunctionArgumentTypeResolver {
public static final FunctionArgumentTypeResolver INSTANCE = new VectorArgumentTypeResolver( 0 );
public static final FunctionArgumentTypeResolver DISTANCE_INSTANCE = new VectorArgumentTypeResolver( 0, 1 );
private final int[] vectorIndices;
public VectorArgumentTypeResolver(int... vectorIndices) {
this.vectorIndices = vectorIndices;
}
@Override
public @Nullable MappingModelExpressible<?> resolveFunctionArgumentType(List<? extends SqmTypedNode<?>> arguments, int argumentIndex, SqmToSqlAstConverter converter) {
for ( int i : vectorIndices ) {
if ( i != argumentIndex ) {
final SqmTypedNode<?> node = arguments.get( i );
if ( node instanceof SqmExpression<?> ) {
final MappingModelExpressible<?> expressible = converter.determineValueMapping( (SqmExpression<?>) node );
if ( expressible != null ) {
return expressible;
}
}
}
}
return converter.getCreationContext().getTypeConfiguration().getBasicTypeRegistry()
.resolve( StandardBasicTypes.VECTOR );
}
}
|
VectorArgumentTypeResolver
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverStepTests.java
|
{
"start": 1472,
"end": 15073
}
|
class ____ extends AbstractStepTestCase<RolloverStep> {
@Override
public RolloverStep createRandomInstance() {
StepKey stepKey = randomStepKey();
StepKey nextStepKey = randomStepKey();
return new RolloverStep(stepKey, nextStepKey, client);
}
@Override
public RolloverStep mutateInstance(RolloverStep instance) {
StepKey key = instance.getKey();
StepKey nextKey = instance.getNextStepKey();
switch (between(0, 1)) {
case 0 -> key = new StepKey(key.phase(), key.action(), key.name() + randomAlphaOfLength(5));
case 1 -> nextKey = new StepKey(nextKey.phase(), nextKey.action(), nextKey.name() + randomAlphaOfLength(5));
default -> throw new AssertionError("Illegal randomisation branch");
}
return new RolloverStep(key, nextKey, instance.getClientWithoutProject());
}
@Override
public RolloverStep copyInstance(RolloverStep instance) {
return new RolloverStep(instance.getKey(), instance.getNextStepKey(), instance.getClientWithoutProject());
}
private IndexMetadata getIndexMetadata(String alias) {
return IndexMetadata.builder(randomAlphaOfLength(10))
.putAlias(AliasMetadata.builder(alias))
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
}
private static void assertRolloverIndexRequest(RolloverRequest request, String rolloverTarget, boolean targetFailureStores) {
String target = targetFailureStores
? IndexNameExpressionResolver.combineSelector(rolloverTarget, IndexComponentSelector.FAILURES)
: rolloverTarget;
assertNotNull(request);
assertEquals(1, request.indices().length);
assertEquals(target, request.indices()[0]);
assertEquals(target, request.getRolloverTarget());
assertFalse(request.isDryRun());
assertEquals(0, request.getConditions().getConditions().size());
}
public void testPerformAction() throws Exception {
String alias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = getIndexMetadata(alias);
RolloverStep step = createRandomInstance();
mockClientRolloverCall(alias, false);
ProjectState state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
performActionAndWait(step, indexMetadata, state, null);
Mockito.verify(client).projectClient(state.projectId());
Mockito.verify(projectClient).admin();
Mockito.verifyNoMoreInteractions(client);
Mockito.verify(adminClient, Mockito.only()).indices();
Mockito.verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any());
}
public void testPerformActionOnDataStream() throws Exception {
String dataStreamName = "test-datastream";
long ts = System.currentTimeMillis();
IndexMetadata indexMetadata = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, 1, ts))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
IndexMetadata failureIndexMetadata = IndexMetadata.builder(DataStream.getDefaultFailureStoreName(dataStreamName, 1, ts))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
RolloverStep step = createRandomInstance();
ProjectState state = projectStateFromProject(
ProjectMetadata.builder(randomProjectIdOrDefault())
.put(newInstance(dataStreamName, List.of(indexMetadata.getIndex()), List.of(failureIndexMetadata.getIndex())))
.put(indexMetadata, true)
.put(failureIndexMetadata, true)
);
boolean useFailureStore = randomBoolean();
IndexMetadata indexToOperateOn = useFailureStore ? failureIndexMetadata : indexMetadata;
mockClientRolloverCall(dataStreamName, useFailureStore);
performActionAndWait(step, indexToOperateOn, state, null);
Mockito.verify(client).projectClient(state.projectId());
Mockito.verify(projectClient).admin();
Mockito.verifyNoMoreInteractions(client);
Mockito.verify(adminClient, Mockito.only()).indices();
Mockito.verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any());
}
public void testSkipRolloverIfDataStreamIsAlreadyRolledOver() throws Exception {
String dataStreamName = "test-datastream";
long ts = System.currentTimeMillis();
IndexMetadata firstGenerationIndex = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, 1, ts))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
IndexMetadata failureFirstGenerationIndex = IndexMetadata.builder(DataStream.getDefaultFailureStoreName(dataStreamName, 1, ts))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
IndexMetadata writeIndex = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, 2, ts))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
IndexMetadata failureWriteIndex = IndexMetadata.builder(DataStream.getDefaultFailureStoreName(dataStreamName, 2, ts))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
RolloverStep step = createRandomInstance();
ProjectState state = projectStateFromProject(
ProjectMetadata.builder(randomProjectIdOrDefault())
.put(firstGenerationIndex, true)
.put(writeIndex, true)
.put(failureFirstGenerationIndex, true)
.put(failureWriteIndex, true)
.put(
newInstance(
dataStreamName,
List.of(firstGenerationIndex.getIndex(), writeIndex.getIndex()),
List.of(failureFirstGenerationIndex.getIndex(), failureWriteIndex.getIndex())
)
)
);
boolean useFailureStore = randomBoolean();
IndexMetadata indexToOperateOn = useFailureStore ? failureFirstGenerationIndex : firstGenerationIndex;
performActionAndWait(step, indexToOperateOn, state, null);
verifyNoMoreInteractions(client);
verifyNoMoreInteractions(adminClient);
verifyNoMoreInteractions(indicesClient);
}
private void mockClientRolloverCall(String rolloverTarget, boolean targetFailureStore) {
Mockito.doAnswer(invocation -> {
RolloverRequest request = (RolloverRequest) invocation.getArguments()[0];
@SuppressWarnings("unchecked")
ActionListener<RolloverResponse> listener = (ActionListener<RolloverResponse>) invocation.getArguments()[1];
assertRolloverIndexRequest(request, rolloverTarget, targetFailureStore);
listener.onResponse(new RolloverResponse(null, null, Map.of(), request.isDryRun(), true, true, true, false));
return null;
}).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any());
}
public void testPerformActionWithIndexingComplete() throws Exception {
String alias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.putAlias(AliasMetadata.builder(alias))
.settings(
settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)
.put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, true)
)
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
RolloverStep step = createRandomInstance();
ProjectState state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
performActionAndWait(step, indexMetadata, state, null);
}
public void testPerformActionSkipsRolloverForAlreadyRolledIndex() throws Exception {
String rolloverAlias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.putAlias(AliasMetadata.builder(rolloverAlias))
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, rolloverAlias))
.putRolloverInfo(
new RolloverInfo(rolloverAlias, List.of(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))), System.currentTimeMillis())
)
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
RolloverStep step = createRandomInstance();
ProjectState state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
performActionAndWait(step, indexMetadata, state, null);
Mockito.verify(indicesClient, Mockito.never()).rolloverIndex(Mockito.any(), Mockito.any());
}
public void testPerformActionFailure() {
String alias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = getIndexMetadata(alias);
Exception exception = new RuntimeException();
RolloverStep step = createRandomInstance();
Mockito.doAnswer(invocation -> {
RolloverRequest request = (RolloverRequest) invocation.getArguments()[0];
@SuppressWarnings("unchecked")
ActionListener<RolloverResponse> listener = (ActionListener<RolloverResponse>) invocation.getArguments()[1];
assertRolloverIndexRequest(request, alias, false);
listener.onFailure(exception);
return null;
}).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any());
ProjectState state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
assertSame(exception, expectThrows(Exception.class, () -> performActionAndWait(step, indexMetadata, state, null)));
Mockito.verify(client).projectClient(state.projectId());
Mockito.verify(projectClient).admin();
Mockito.verifyNoMoreInteractions(client);
Mockito.verify(adminClient, Mockito.only()).indices();
Mockito.verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any());
}
public void testPerformActionInvalidNullOrEmptyAlias() {
String alias = randomBoolean() ? "" : null;
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
RolloverStep step = createRandomInstance();
ProjectState state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
Exception e = expectThrows(IllegalArgumentException.class, () -> performActionAndWait(step, indexMetadata, state, null));
assertThat(
e.getMessage(),
Matchers.is(
String.format(
Locale.ROOT,
"setting [%s] for index [%s] is empty or not defined, it must be set to the name of the alias pointing to the group of "
+ "indices being rolled over",
RolloverAction.LIFECYCLE_ROLLOVER_ALIAS,
indexMetadata.getIndex().getName()
)
)
);
}
public void testPerformActionAliasDoesNotPointToIndex() {
String alias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
RolloverStep step = createRandomInstance();
ProjectState state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
Exception e = expectThrows(IllegalArgumentException.class, () -> performActionAndWait(step, indexMetadata, state, null));
assertThat(
e.getMessage(),
Matchers.is(
String.format(
Locale.ROOT,
"%s [%s] does not point to index [%s]",
RolloverAction.LIFECYCLE_ROLLOVER_ALIAS,
alias,
indexMetadata.getIndex().getName()
)
)
);
}
}
|
RolloverStepTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsModelTests.java
|
{
"start": 1039,
"end": 7509
}
|
class ____ extends ESTestCase {
public void testOverrideWith_DoesNotOverrideAndModelRemainsEqual_WhenSettingsAreEmpty() {
var model = createModel("url", "api_key", null, null, null);
var overriddenModel = CohereEmbeddingsModel.of(model, Map.of());
MatcherAssert.assertThat(overriddenModel, is(model));
}
public void testOverrideWith_DoesNotOverrideAndModelRemainsEqual_WhenSettingsAreNull() {
var model = createModel("url", "api_key", null, null, null);
var overriddenModel = CohereEmbeddingsModel.of(model, null);
MatcherAssert.assertThat(overriddenModel, is(model));
}
public void testOverrideWith_SetsInputType_FromRequestTaskSettings_IfValid_OverridingStoredTaskSettings() {
var model = createModel(
"url",
"api_key",
new CohereEmbeddingsTaskSettings(InputType.INGEST, null),
null,
null,
"model",
CohereEmbeddingType.FLOAT
);
var overriddenModel = CohereEmbeddingsModel.of(model, getTaskSettingsMap(InputType.SEARCH, null));
var expectedModel = createModel(
"url",
"api_key",
new CohereEmbeddingsTaskSettings(InputType.SEARCH, null),
null,
null,
"model",
CohereEmbeddingType.FLOAT
);
MatcherAssert.assertThat(overriddenModel, is(expectedModel));
}
public void testOverrideWith_DoesNotOverrideInputType_WhenRequestTaskSettingsIsNull() {
var model = createModel(
"url",
"api_key",
new CohereEmbeddingsTaskSettings(InputType.INGEST, null),
null,
null,
"model",
CohereEmbeddingType.FLOAT
);
var overriddenModel = CohereEmbeddingsModel.of(model, getTaskSettingsMap(null, null));
var expectedModel = createModel(
"url",
"api_key",
new CohereEmbeddingsTaskSettings(InputType.INGEST, null),
null,
null,
"model",
CohereEmbeddingType.FLOAT
);
MatcherAssert.assertThat(overriddenModel, is(expectedModel));
}
public static CohereEmbeddingsModel createModel(
String url,
String apiKey,
@Nullable Integer tokenLimit,
@Nullable String model,
@Nullable CohereEmbeddingType embeddingType
) {
return createModel(url, apiKey, CohereEmbeddingsTaskSettings.EMPTY_SETTINGS, tokenLimit, null, model, embeddingType);
}
public static CohereEmbeddingsModel createModel(
String url,
String apiKey,
@Nullable Integer tokenLimit,
@Nullable Integer dimensions,
@Nullable String model,
@Nullable CohereEmbeddingType embeddingType
) {
return createModel(url, apiKey, CohereEmbeddingsTaskSettings.EMPTY_SETTINGS, tokenLimit, dimensions, model, embeddingType);
}
public static CohereEmbeddingsModel createModel(
String url,
String apiKey,
CohereEmbeddingsTaskSettings taskSettings,
ChunkingSettings chunkingSettings,
@Nullable Integer tokenLimit,
@Nullable Integer dimensions,
@Nullable String model,
@Nullable CohereEmbeddingType embeddingType
) {
return new CohereEmbeddingsModel(
"id",
new CohereEmbeddingsServiceSettings(
new CohereServiceSettings(
url,
SimilarityMeasure.DOT_PRODUCT,
dimensions,
tokenLimit,
model,
null,
CohereServiceSettings.CohereApiVersion.V2
),
Objects.requireNonNullElse(embeddingType, CohereEmbeddingType.FLOAT)
),
taskSettings,
chunkingSettings,
new DefaultSecretSettings(new SecureString(apiKey.toCharArray()))
);
}
public static CohereEmbeddingsModel createModel(
String url,
String apiKey,
CohereEmbeddingsTaskSettings taskSettings,
@Nullable Integer tokenLimit,
@Nullable Integer dimensions,
String model,
@Nullable CohereEmbeddingType embeddingType
) {
return createModel(
url,
apiKey,
taskSettings,
tokenLimit,
dimensions,
model,
embeddingType,
CohereServiceSettings.CohereApiVersion.V2
);
}
public static CohereEmbeddingsModel createModel(
String url,
String apiKey,
CohereEmbeddingsTaskSettings taskSettings,
@Nullable Integer tokenLimit,
@Nullable Integer dimensions,
String model,
@Nullable CohereEmbeddingType embeddingType,
CohereServiceSettings.CohereApiVersion apiVersion
) {
return new CohereEmbeddingsModel(
"id",
new CohereEmbeddingsServiceSettings(
new CohereServiceSettings(url, SimilarityMeasure.DOT_PRODUCT, dimensions, tokenLimit, model, null, apiVersion),
Objects.requireNonNullElse(embeddingType, CohereEmbeddingType.FLOAT)
),
taskSettings,
null,
new DefaultSecretSettings(new SecureString(apiKey.toCharArray()))
);
}
public static CohereEmbeddingsModel createModel(
String url,
String apiKey,
CohereEmbeddingsTaskSettings taskSettings,
@Nullable Integer tokenLimit,
@Nullable Integer dimensions,
@Nullable String model,
@Nullable CohereEmbeddingType embeddingType,
@Nullable SimilarityMeasure similarityMeasure
) {
return new CohereEmbeddingsModel(
"id",
new CohereEmbeddingsServiceSettings(
new CohereServiceSettings(
url,
similarityMeasure,
dimensions,
tokenLimit,
model,
null,
CohereServiceSettings.CohereApiVersion.V2
),
Objects.requireNonNullElse(embeddingType, CohereEmbeddingType.FLOAT)
),
taskSettings,
null,
new DefaultSecretSettings(new SecureString(apiKey.toCharArray()))
);
}
}
|
CohereEmbeddingsModelTests
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/RuncContainerExecutorConfig.java
|
{
"start": 33863,
"end": 34419
}
|
class ____ {
final private int classID;
final private List<NetworkPriority> priorities;
public int getClassID() {
return classID;
}
public List<NetworkPriority> getPriorities() {
return priorities;
}
public Network(int classID, List<NetworkPriority> priorities) {
this.classID = classID;
this.priorities = priorities;
}
public Network() {
this(0, null);
}
/**
* This
|
Network
|
java
|
apache__flink
|
flink-clients/src/main/java/org/apache/flink/client/program/DefaultPackagedProgramRetriever.java
|
{
"start": 4118,
"end": 11878
}
|
class ____ use; if {@code null} the user classpath (or, if not set,
* the system classpath) will be scanned for possible main class.
* @param programArgs The program arguments.
* @param configuration The Flink configuration for the given job.
* @return The {@code PackageProgramRetrieverImpl} that can be used to create a {@link
* PackagedProgram} instance.
* @throws FlinkException If something goes wrong during instantiation.
*/
public static DefaultPackagedProgramRetriever create(
@Nullable File userLibDir,
@Nullable File jarFile,
@Nullable Collection<File> userArtifacts,
@Nullable String jobClassName,
String[] programArgs,
Configuration configuration)
throws FlinkException {
List<URL> userClasspaths;
try {
final List<URL> classpathsFromUserLibDir =
getClasspathsFromUserLibDir(userLibDir, jarFile);
final List<URL> classpathsFromUserArtifactDir =
getClasspathsFromArtifacts(userArtifacts, jarFile);
final List<URL> classpathsFromConfiguration =
getClasspathsFromConfiguration(configuration);
final List<URL> classpaths = new ArrayList<>();
classpaths.addAll(classpathsFromUserLibDir);
classpaths.addAll(classpathsFromUserArtifactDir);
classpaths.addAll(classpathsFromConfiguration);
userClasspaths = Collections.unmodifiableList(classpaths);
} catch (IOException e) {
throw new FlinkException("An error occurred while extracting the user classpath.", e);
}
final EntryClassInformationProvider entryClassInformationProvider =
createEntryClassInformationProvider(
(userLibDir == null && userArtifacts == null) ? null : userClasspaths,
jarFile,
jobClassName,
programArgs);
return new DefaultPackagedProgramRetriever(
entryClassInformationProvider, programArgs, userClasspaths, configuration);
}
@VisibleForTesting
static EntryClassInformationProvider createEntryClassInformationProvider(
@Nullable Iterable<URL> userClasspath,
@Nullable File jarFile,
@Nullable String jobClassName,
String[] programArgs)
throws FlinkException {
if (PackagedProgramUtils.isPython(jobClassName)
|| PackagedProgramUtils.isPython(programArgs)) {
return FromJarEntryClassInformationProvider.createFromPythonJar();
}
if (jarFile != null) {
return FromJarEntryClassInformationProvider.createFromCustomJar(jarFile, jobClassName);
}
if (userClasspath != null) {
return fromUserClasspath(jobClassName, userClasspath);
}
return fromSystemClasspath(jobClassName);
}
private static EntryClassInformationProvider fromSystemClasspath(@Nullable String jobClassName)
throws FlinkException {
if (jobClassName != null) {
return FromClasspathEntryClassInformationProvider
.createWithJobClassAssumingOnSystemClasspath(jobClassName);
}
try {
return FromClasspathEntryClassInformationProvider.createFromSystemClasspath();
} catch (IOException | NoSuchElementException | IllegalArgumentException t) {
throw createGenericFlinkException(t);
}
}
private static EntryClassInformationProvider fromUserClasspath(
@Nullable String jobClassName, Iterable<URL> userClasspath) throws FlinkException {
try {
if (jobClassName != null) {
return FromClasspathEntryClassInformationProvider.create(
jobClassName, userClasspath);
}
return FromClasspathEntryClassInformationProvider.createFromClasspath(userClasspath);
} catch (IOException e) {
throw createGenericFlinkException(e);
}
}
private static FlinkException createGenericFlinkException(Throwable t) {
return new FlinkException("An error occurred while access the provided classpath.", t);
}
private DefaultPackagedProgramRetriever(
EntryClassInformationProvider entryClassInformationProvider,
String[] programArguments,
List<URL> userClasspath,
Configuration configuration) {
this.entryClassInformationProvider =
checkNotNull(
entryClassInformationProvider, "No EntryClassInformationProvider passed.");
this.programArguments =
checkNotNull(programArguments, "No program parameter array passed.");
this.userClasspath = checkNotNull(userClasspath, "No user classpath passed.");
this.configuration = checkNotNull(configuration, "No Flink configuration was passed.");
}
@Override
public PackagedProgram getPackagedProgram() throws FlinkException {
try {
final PackagedProgram.Builder packagedProgramBuilder =
PackagedProgram.newBuilder()
.setUserClassPaths(userClasspath)
.setArguments(programArguments)
.setConfiguration(configuration);
entryClassInformationProvider
.getJobClassName()
.ifPresent(packagedProgramBuilder::setEntryPointClassName);
entryClassInformationProvider
.getJarFile()
.ifPresent(packagedProgramBuilder::setJarFile);
return packagedProgramBuilder.build();
} catch (ProgramInvocationException e) {
throw new FlinkException("Could not load the provided entrypoint class.", e);
}
}
private static List<URL> getClasspathsFromUserLibDir(
@Nullable File userLibDir, @Nullable File jarFile) throws IOException {
if (userLibDir == null) {
return Collections.emptyList();
}
try (Stream<Path> files = Files.walk(userLibDir.toPath(), FileVisitOption.FOLLOW_LINKS)) {
return getClasspathsFromArtifacts(files, jarFile);
}
}
private static List<URL> getClasspathsFromArtifacts(
@Nullable Collection<File> userArtifacts, @Nullable File jarFile) {
if (userArtifacts == null) {
return Collections.emptyList();
}
return getClasspathsFromArtifacts(userArtifacts.stream().map(File::toPath), jarFile);
}
private static List<URL> getClasspathsFromArtifacts(
Stream<Path> userArtifacts, @Nullable File jarFile) {
checkNotNull(userArtifacts);
final Path workingDirectory = FileUtils.getCurrentWorkingDirectory();
final List<URL> relativeJarURLs =
userArtifacts
.filter(path -> FileUtils.isJarFile(path) && !path.toFile().equals(jarFile))
.map(path -> FileUtils.relativizePath(workingDirectory, path))
.map(FunctionUtils.uncheckedFunction(FileUtils::toURL))
.collect(Collectors.toList());
return Collections.unmodifiableList(relativeJarURLs);
}
private static List<URL> getClasspathsFromConfiguration(Configuration configuration)
throws MalformedURLException {
if (configuration == null) {
return Collections.emptyList();
}
return ConfigUtils.decodeListFromConfig(
configuration, PipelineOptions.CLASSPATHS, URL::new);
}
}
|
to
|
java
|
apache__camel
|
components/camel-pulsar/src/test/java/org/apache/camel/component/pulsar/utils/consumers/ConsumerCreationStrategyFactoryTest.java
|
{
"start": 1188,
"end": 3497
}
|
class ____ {
@Test
public void givenPulsarConsumerIsNullwhenICreateFactoryverifyIllegalArgumentExceptionIsThrown() {
assertThrows(IllegalArgumentException.class,
() -> ConsumerCreationStrategyFactory.create(null));
}
@Test
public void givenPulsarConsumerAndRetryPolicyNonNullwhenICreateFactoryverifyIllegalArgumentExceptionIsNotThrown() {
ConsumerCreationStrategyFactory factory = ConsumerCreationStrategyFactory.create(mock(PulsarConsumer.class));
assertNotNull(factory);
}
@Test
public void verifyFailOverStrategy() {
ConsumerCreationStrategyFactory factory = ConsumerCreationStrategyFactory.create(mock(PulsarConsumer.class));
ConsumerCreationStrategy strategy = factory.getStrategy(SubscriptionType.FAILOVER);
assertEquals(FailoverConsumerStrategy.class, strategy.getClass());
}
@Test
public void verifySharedStrategy() {
ConsumerCreationStrategyFactory factory = ConsumerCreationStrategyFactory.create(mock(PulsarConsumer.class));
ConsumerCreationStrategy strategy = factory.getStrategy(SubscriptionType.SHARED);
assertEquals(SharedConsumerStrategy.class, strategy.getClass());
}
@Test
public void verifyExclusiveStrategy() {
ConsumerCreationStrategyFactory factory = ConsumerCreationStrategyFactory.create(mock(PulsarConsumer.class));
ConsumerCreationStrategy strategy = factory.getStrategy(SubscriptionType.EXCLUSIVE);
assertEquals(ExclusiveConsumerStrategy.class, strategy.getClass());
}
@Test
public void verifyKeySharedStrategy() {
ConsumerCreationStrategyFactory factory = ConsumerCreationStrategyFactory.create(mock(PulsarConsumer.class));
ConsumerCreationStrategy strategy = factory.getStrategy(SubscriptionType.KEY_SHARED);
assertEquals(KeySharedConsumerStrategy.class, strategy.getClass());
}
@Test
public void verifyDefaultStrategyIsExclusiveStrategy() {
ConsumerCreationStrategyFactory factory = ConsumerCreationStrategyFactory.create(mock(PulsarConsumer.class));
ConsumerCreationStrategy strategy = factory.getStrategy(null);
assertEquals(ExclusiveConsumerStrategy.class, strategy.getClass());
}
}
|
ConsumerCreationStrategyFactoryTest
|
java
|
google__guice
|
core/src/com/google/inject/internal/ExposedBindingImpl.java
|
{
"start": 1033,
"end": 2383
}
|
class ____<T> extends BindingImpl<T> implements ExposedBinding<T> {
private final PrivateElements privateElements;
ExposedBindingImpl(
InjectorImpl injector,
Object source,
Key<T> key,
InternalFactory<T> factory,
PrivateElements privateElements) {
super(injector, key, source, factory, Scoping.UNSCOPED);
this.privateElements = privateElements;
}
@Override
public <V> V acceptTargetVisitor(BindingTargetVisitor<? super T, V> visitor) {
return visitor.visit(this);
}
@Override
public Set<Dependency<?>> getDependencies() {
return ImmutableSet.<Dependency<?>>of(Dependency.get(Key.get(Injector.class)));
}
@Override
public PrivateElements getPrivateElements() {
return privateElements;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(ExposedBinding.class)
.add("key", getKey())
.add("source", getSource())
.add("privateElements", privateElements)
.toString();
}
@Override
public void applyTo(Binder binder) {
throw new UnsupportedOperationException("This element represents a synthetic binding.");
}
// Purposely does not override equals/hashcode, because exposed bindings are only equal to
// themselves right now -- that is, there cannot be "duplicate" exposed bindings.
}
|
ExposedBindingImpl
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/testutils/TestingJobResultStore.java
|
{
"start": 4827,
"end": 7756
}
|
class ____ {
private Function<JobResultEntry, CompletableFuture<Void>> createDirtyResultConsumer =
jobResultEntry -> FutureUtils.completedVoidFuture();
private Function<JobID, CompletableFuture<Void>> markResultAsCleanConsumer =
jobID -> FutureUtils.completedVoidFuture();
private Function<JobID, CompletableFuture<Boolean>> hasJobResultEntryFunction =
jobID -> CompletableFuture.completedFuture(false);
private Function<JobID, CompletableFuture<Boolean>> hasDirtyJobResultEntryFunction =
jobID -> CompletableFuture.completedFuture(false);
private Function<JobID, CompletableFuture<Boolean>> hasCleanJobResultEntryFunction =
jobID -> CompletableFuture.completedFuture(false);
private SupplierWithException<Set<JobResult>, ? extends IOException>
getDirtyResultsSupplier = Collections::emptySet;
public Builder withCreateDirtyResultConsumer(
Function<JobResultEntry, CompletableFuture<Void>> createDirtyResultConsumer) {
this.createDirtyResultConsumer = createDirtyResultConsumer;
return this;
}
public Builder withMarkResultAsCleanConsumer(
Function<JobID, CompletableFuture<Void>> markResultAsCleanConsumer) {
this.markResultAsCleanConsumer = markResultAsCleanConsumer;
return this;
}
public Builder withHasJobResultEntryFunction(
Function<JobID, CompletableFuture<Boolean>> hasJobResultEntryFunction) {
this.hasJobResultEntryFunction = hasJobResultEntryFunction;
return this;
}
public Builder withHasDirtyJobResultEntryFunction(
Function<JobID, CompletableFuture<Boolean>> hasDirtyJobResultEntryFunction) {
this.hasDirtyJobResultEntryFunction = hasDirtyJobResultEntryFunction;
return this;
}
public Builder withHasCleanJobResultEntryFunction(
Function<JobID, CompletableFuture<Boolean>> hasCleanJobResultEntryFunction) {
this.hasCleanJobResultEntryFunction = hasCleanJobResultEntryFunction;
return this;
}
public Builder withGetDirtyResultsSupplier(
SupplierWithException<Set<JobResult>, ? extends IOException>
getDirtyResultsSupplier) {
this.getDirtyResultsSupplier = getDirtyResultsSupplier;
return this;
}
public TestingJobResultStore build() {
return new TestingJobResultStore(
createDirtyResultConsumer,
markResultAsCleanConsumer,
hasJobResultEntryFunction,
hasDirtyJobResultEntryFunction,
hasCleanJobResultEntryFunction,
getDirtyResultsSupplier);
}
}
}
|
Builder
|
java
|
quarkusio__quarkus
|
independent-projects/arc/processor/src/test/java/io/quarkus/arc/processor/MethodUtilsTest.java
|
{
"start": 3959,
"end": 4236
}
|
class ____ extends SuperClass<Boolean> {
@Override
void generic(Integer param) {
}
@Override
void nonGeneric(String param) {
}
@Override
void fromSuperClass(int param) {
}
}
public static
|
SomeClass
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/PulsarEndpointBuilderFactory.java
|
{
"start": 65661,
"end": 65978
}
|
class ____ extends AbstractEndpointBuilder implements PulsarEndpointBuilder, AdvancedPulsarEndpointBuilder {
public PulsarEndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new PulsarEndpointBuilderImpl(path);
}
}
|
PulsarEndpointBuilderImpl
|
java
|
apache__dubbo
|
dubbo-common/src/main/java/org/apache/dubbo/common/utils/Log.java
|
{
"start": 925,
"end": 3397
}
|
class ____ implements Serializable {
private static final long serialVersionUID = -534113138054377073L;
private String logName;
private Level logLevel;
private String logMessage;
private String logThread;
public String getLogName() {
return logName;
}
public void setLogName(String logName) {
this.logName = logName;
}
public Level getLogLevel() {
return logLevel;
}
public void setLogLevel(Level logLevel) {
this.logLevel = logLevel;
}
public String getLogMessage() {
return logMessage;
}
public void setLogMessage(String logMessage) {
this.logMessage = logMessage;
}
public String getLogThread() {
return logThread;
}
public void setLogThread(String logThread) {
this.logThread = logThread;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((logLevel == null) ? 0 : logLevel.hashCode());
result = prime * result + ((logMessage == null) ? 0 : logMessage.hashCode());
result = prime * result + ((logName == null) ? 0 : logName.hashCode());
result = prime * result + ((logThread == null) ? 0 : logThread.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Log other = (Log) obj;
if (logLevel == null) {
if (other.logLevel != null) {
return false;
}
} else if (!logLevel.equals(other.logLevel)) {
return false;
}
if (logMessage == null) {
if (other.logMessage != null) {
return false;
}
} else if (!logMessage.equals(other.logMessage)) {
return false;
}
if (logName == null) {
if (other.logName != null) {
return false;
}
} else if (!logName.equals(other.logName)) {
return false;
}
if (logThread == null) {
if (other.logThread != null) {
return false;
}
} else if (!logThread.equals(other.logThread)) {
return false;
}
return true;
}
}
|
Log
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/selector/JndiContextSelector.java
|
{
"start": 3757,
"end": 7623
}
|
class ____ implements NamedContextSelector {
private static final LoggerContext CONTEXT = new LoggerContext("Default");
private static final ConcurrentMap<String, LoggerContext> CONTEXT_MAP = new ConcurrentHashMap<>();
private static final StatusLogger LOGGER = StatusLogger.getLogger();
public JndiContextSelector() {
if (!JndiManager.isJndiContextSelectorEnabled()) {
throw new IllegalStateException("JNDI must be enabled by setting log4j2.enableJndiContextSelector=true");
}
}
@Override
public void shutdown(
final String fqcn, final ClassLoader loader, final boolean currentContext, final boolean allContexts) {
LoggerContext ctx = ContextAnchor.THREAD_CONTEXT.get();
if (ctx == null) {
final String loggingContextName = getContextName();
if (loggingContextName != null) {
ctx = CONTEXT_MAP.get(loggingContextName);
}
}
if (ctx != null) {
ctx.stop(DEFAULT_STOP_TIMEOUT, TimeUnit.MILLISECONDS);
}
}
@Override
public boolean hasContext(final String fqcn, final ClassLoader loader, final boolean currentContext) {
LoggerContext ctx = ContextAnchor.THREAD_CONTEXT.get();
if (ctx == null) {
final String loggingContextName = getContextName();
if (loggingContextName == null) {
return false;
}
ctx = CONTEXT_MAP.get(loggingContextName);
}
return ctx != null && ctx.isStarted();
}
@Override
public LoggerContext getContext(final String fqcn, final ClassLoader loader, final boolean currentContext) {
return getContext(fqcn, loader, currentContext, null);
}
@Override
public LoggerContext getContext(
final String fqcn, final ClassLoader loader, final boolean currentContext, final URI configLocation) {
final LoggerContext lc = ContextAnchor.THREAD_CONTEXT.get();
if (lc != null) {
return lc;
}
String loggingContextName = getContextName();
return loggingContextName == null ? CONTEXT : locateContext(loggingContextName, null, configLocation);
}
private static String getContextName() {
String loggingContextName = null;
try (final JndiManager jndiManager = JndiManager.getDefaultManager()) {
loggingContextName = jndiManager.lookup(Constants.JNDI_CONTEXT_NAME);
} catch (final NamingException ne) {
LOGGER.error("Unable to lookup {}", Constants.JNDI_CONTEXT_NAME, ne);
}
return loggingContextName;
}
@Override
public LoggerContext locateContext(final String name, final Object externalContext, final URI configLocation) {
if (name == null) {
LOGGER.error("A context name is required to locate a LoggerContext");
return null;
}
if (!CONTEXT_MAP.containsKey(name)) {
final LoggerContext ctx = new LoggerContext(name, externalContext, configLocation);
CONTEXT_MAP.putIfAbsent(name, ctx);
}
return CONTEXT_MAP.get(name);
}
@Override
public void removeContext(final LoggerContext context) {
for (final Map.Entry<String, LoggerContext> entry : CONTEXT_MAP.entrySet()) {
if (entry.getValue().equals(context)) {
CONTEXT_MAP.remove(entry.getKey());
}
}
}
@Override
public boolean isClassLoaderDependent() {
return false;
}
@Override
public LoggerContext removeContext(final String name) {
return CONTEXT_MAP.remove(name);
}
@Override
public List<LoggerContext> getLoggerContexts() {
return Collections.unmodifiableList(new ArrayList<>(CONTEXT_MAP.values()));
}
}
|
JndiContextSelector
|
java
|
alibaba__nacos
|
naming/src/main/java/com/alibaba/nacos/naming/cluster/remote/request/DistroDataRequest.java
|
{
"start": 936,
"end": 1689
}
|
class ____ extends AbstractClusterRequest {
private DistroData distroData;
private DataOperation dataOperation;
public DistroDataRequest() {
}
public DistroDataRequest(DistroData distroData, DataOperation dataOperation) {
this.distroData = distroData;
this.dataOperation = dataOperation;
}
public DistroData getDistroData() {
return distroData;
}
public void setDistroData(DistroData distroData) {
this.distroData = distroData;
}
public DataOperation getDataOperation() {
return dataOperation;
}
public void setDataOperation(DataOperation dataOperation) {
this.dataOperation = dataOperation;
}
}
|
DistroDataRequest
|
java
|
apache__flink
|
flink-dstl/flink-dstl-dfs/src/main/java/org/apache/flink/changelog/fs/TaskChangelogRegistryImpl.java
|
{
"start": 1286,
"end": 3830
}
|
class ____ implements TaskChangelogRegistry {
private static final Logger LOG = LoggerFactory.getLogger(TaskChangelogRegistryImpl.class);
private final Map<PhysicalStateHandleID, Long> entries = new ConcurrentHashMap<>();
private final Executor executor;
public TaskChangelogRegistryImpl(Executor executor) {
this.executor = executor;
}
@Override
public void startTracking(StreamStateHandle handle, long refCount) {
Preconditions.checkState(refCount > 0, "Initial refCount of state must larger than zero");
LOG.debug(
"start tracking state, key: {}, state: {}",
handle.getStreamStateHandleID(),
handle);
entries.put(handle.getStreamStateHandleID(), refCount);
}
@Override
public void stopTracking(StreamStateHandle handle) {
LOG.debug(
"stop tracking state, key: {}, state: {}", handle.getStreamStateHandleID(), handle);
entries.remove(handle.getStreamStateHandleID());
}
@Override
public void release(StreamStateHandle handle) {
PhysicalStateHandleID key = handle.getStreamStateHandleID();
LOG.debug("state reference count decreased by one, key: {}, state: {}", key, handle);
entries.compute(
key,
(handleID, refCount) -> {
if (refCount == null) {
LOG.warn("state is not in tracking, key: {}, state: {}", key, handle);
return null;
}
long newRefCount = refCount - 1;
if (newRefCount == 0) {
LOG.debug(
"state is not used by any backend, schedule discard: {}/{}",
key,
handle);
scheduleDiscard(handle);
return null;
} else {
return newRefCount;
}
});
}
private void scheduleDiscard(StreamStateHandle handle) {
executor.execute(
() -> {
try {
LOG.trace("discard uploaded but unused state changes: {}", handle);
handle.discardState();
} catch (Exception e) {
LOG.warn("unable to discard uploaded but unused state changes", e);
}
});
}
}
|
TaskChangelogRegistryImpl
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/runtime-dev/src/main/java/io/quarkus/hibernate/orm/dev/HibernateOrmDevController.java
|
{
"start": 1626,
"end": 4444
}
|
class ____ {
private static final HibernateOrmDevController INSTANCE = new HibernateOrmDevController();
public static HibernateOrmDevController get() {
return INSTANCE;
}
private HibernateOrmDevInfo info = new HibernateOrmDevInfo();
private HibernateOrmDevController() {
}
public HibernateOrmDevInfo getInfo() {
return info;
}
void pushPersistenceUnit(SessionFactoryImplementor sessionFactoryImplementor, QuarkusPersistenceUnitDescriptor descriptor,
String persistenceUnitName, Metadata metadata, ServiceRegistry serviceRegistry, String importFile) {
List<HibernateOrmDevInfo.Entity> managedEntities = new ArrayList<>();
for (PersistentClass entityBinding : metadata.getEntityBindings()) {
managedEntities.add(new HibernateOrmDevInfo.Entity(entityBinding.getJpaEntityName(), entityBinding.getClassName(),
entityBinding.getTable().getName()));
}
// Sort entities alphabetically by JPA entity name
managedEntities.sort(Comparator.comparing(HibernateOrmDevInfo.Entity::getName));
List<HibernateOrmDevInfo.Query> namedQueries = new ArrayList<>();
{
List<NamedHqlQueryDefinition> namedQueriesHqlDefs = new ArrayList<>();
metadata.visitNamedHqlQueryDefinitions(namedQueriesHqlDefs::add);
for (NamedHqlQueryDefinition queryDefinition : namedQueriesHqlDefs) {
namedQueries.add(new HibernateOrmDevInfo.Query(queryDefinition));
}
}
// Sort named queries alphabetically by name
namedQueries.sort(Comparator.comparing(HibernateOrmDevInfo.Query::getName));
List<HibernateOrmDevInfo.Query> namedNativeQueries = new ArrayList<>();
{
List<NamedNativeQueryDefinition> namedNativeQueriesNativeDefs = new ArrayList<>();
metadata.visitNamedNativeQueryDefinitions(namedNativeQueriesNativeDefs::add);
for (NamedNativeQueryDefinition staticQueryDefinition : namedNativeQueriesNativeDefs) {
namedNativeQueries.add(new HibernateOrmDevInfo.Query(staticQueryDefinition));
}
}
DDLSupplier createDDLSupplier = new DDLSupplier(Action.CREATE, metadata, serviceRegistry, importFile);
DDLSupplier dropDDLSupplier = new DDLSupplier(Action.DROP, metadata, serviceRegistry, importFile);
DDLSupplier updateDDLSupplier = new DDLSupplier(Action.UPDATE, metadata, serviceRegistry, importFile);
info.add(new HibernateOrmDevInfo.PersistenceUnit(sessionFactoryImplementor, persistenceUnitName, managedEntities,
namedQueries, namedNativeQueries, createDDLSupplier, dropDDLSupplier, updateDDLSupplier,
descriptor.isReactive()));
}
|
HibernateOrmDevController
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFactory.java
|
{
"start": 3661,
"end": 4875
}
|
class ____ have been registered via
* {@link #addClass(Class, String...)}
* @param cmd name of the command
* @return instance of the requested command
*/
public Command getInstance(String cmd) {
return getInstance(cmd, getConf());
}
/**
* Get an instance of the requested command
* @param cmdName name of the command to lookup
* @param conf the hadoop configuration
* @return the {@link Command} or null if the command is unknown
*/
public Command getInstance(String cmdName, Configuration conf) {
if (conf == null) throw new NullPointerException("configuration is null");
Command instance = objectMap.get(cmdName);
if (instance == null) {
Class<? extends Command> cmdClass = classMap.get(cmdName);
if (cmdClass != null) {
instance = ReflectionUtils.newInstance(cmdClass, conf);
instance.setName(cmdName);
instance.setCommandFactory(this);
}
}
return instance;
}
/**
* Gets all of the registered commands
* @return a sorted list of command names
*/
public String[] getNames() {
String[] names = classMap.keySet().toArray(new String[0]);
Arrays.sort(names);
return names;
}
}
|
must
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/orm/jpa/domain/PersonRepository.java
|
{
"start": 792,
"end": 972
}
|
interface ____ {
List<Person> findAll();
Person findById(Long id);
Person findByName(String name);
Person save(Person person);
void remove(Person person);
}
|
PersonRepository
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/state/UserFacingMapState.java
|
{
"start": 1170,
"end": 3130
}
|
class ____<K, V> implements MapState<K, V> {
private final MapState<K, V> originalState;
private final Map<K, V> emptyState = Collections.<K, V>emptyMap();
UserFacingMapState(MapState<K, V> originalState) {
this.originalState = originalState;
}
// ------------------------------------------------------------------------
@Override
public V get(K key) throws Exception {
return originalState.get(key);
}
@Override
public void put(K key, V value) throws Exception {
originalState.put(key, value);
}
@Override
public void putAll(Map<K, V> value) throws Exception {
originalState.putAll(value);
}
@Override
public void clear() {
originalState.clear();
}
@Override
public void remove(K key) throws Exception {
originalState.remove(key);
}
@Override
public boolean contains(K key) throws Exception {
return originalState.contains(key);
}
@Override
public Iterable<Map.Entry<K, V>> entries() throws Exception {
Iterable<Map.Entry<K, V>> original = originalState.entries();
return original != null ? original : emptyState.entrySet();
}
@Override
public Iterable<K> keys() throws Exception {
Iterable<K> original = originalState.keys();
return original != null ? original : emptyState.keySet();
}
@Override
public Iterable<V> values() throws Exception {
Iterable<V> original = originalState.values();
return original != null ? original : emptyState.values();
}
@Override
public Iterator<Map.Entry<K, V>> iterator() throws Exception {
Iterator<Map.Entry<K, V>> original = originalState.iterator();
return original != null ? original : emptyState.entrySet().iterator();
}
@Override
public boolean isEmpty() throws Exception {
return originalState.isEmpty();
}
}
|
UserFacingMapState
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/LuceneEndpointBuilderFactory.java
|
{
"start": 2774,
"end": 6404
}
|
class ____ extends the abstract class
* org.apache.lucene.analysis.Analyzer. Lucene also offers a rich set of
* analyzers out of the box.
*
* The option will be converted to a
* <code>org.apache.lucene.analysis.Analyzer</code> type.
*
* Group: producer
*
* @param analyzer the value to set
* @return the dsl builder
*/
default LuceneEndpointBuilder analyzer(String analyzer) {
doSetProperty("analyzer", analyzer);
return this;
}
/**
* A file system directory in which index files are created upon
* analysis of the document by the specified analyzer.
*
* The option is a: <code>java.io.File</code> type.
*
* Group: producer
*
* @param indexDir the value to set
* @return the dsl builder
*/
default LuceneEndpointBuilder indexDir(java.io.File indexDir) {
doSetProperty("indexDir", indexDir);
return this;
}
/**
* A file system directory in which index files are created upon
* analysis of the document by the specified analyzer.
*
* The option will be converted to a <code>java.io.File</code> type.
*
* Group: producer
*
* @param indexDir the value to set
* @return the dsl builder
*/
default LuceneEndpointBuilder indexDir(String indexDir) {
doSetProperty("indexDir", indexDir);
return this;
}
/**
* An integer value that limits the result set of the search operation.
*
* The option is a: <code>int</code> type.
*
* Group: producer
*
* @param maxHits the value to set
* @return the dsl builder
*/
default LuceneEndpointBuilder maxHits(int maxHits) {
doSetProperty("maxHits", maxHits);
return this;
}
/**
* An integer value that limits the result set of the search operation.
*
* The option will be converted to a <code>int</code> type.
*
* Group: producer
*
* @param maxHits the value to set
* @return the dsl builder
*/
default LuceneEndpointBuilder maxHits(String maxHits) {
doSetProperty("maxHits", maxHits);
return this;
}
/**
* An optional directory containing files to be used to be analyzed and
* added to the index at producer startup.
*
* The option is a: <code>java.io.File</code> type.
*
* Group: producer
*
* @param srcDir the value to set
* @return the dsl builder
*/
default LuceneEndpointBuilder srcDir(java.io.File srcDir) {
doSetProperty("srcDir", srcDir);
return this;
}
/**
* An optional directory containing files to be used to be analyzed and
* added to the index at producer startup.
*
* The option will be converted to a <code>java.io.File</code> type.
*
* Group: producer
*
* @param srcDir the value to set
* @return the dsl builder
*/
default LuceneEndpointBuilder srcDir(String srcDir) {
doSetProperty("srcDir", srcDir);
return this;
}
}
/**
* Advanced builder for endpoint for the Lucene component.
*/
public
|
that
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/join/TestJoinDatamerge.java
|
{
"start": 4205,
"end": 5019
}
|
class ____
extends Reducer<IntWritable, IntWritable, IntWritable, IntWritable> {
protected final static IntWritable one = new IntWritable(1);
int srcs;
public void setup(Context context) {
srcs = context.getConfiguration().getInt("testdatamerge.sources", 0);
assertTrue(srcs > 0, "Invalid src count: " + srcs);
}
public void reduce(IntWritable key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int seen = 0;
for (IntWritable value : values) {
seen += value.get();
}
assertTrue(verify(key.get(), seen), "Bad count for " + key.get());
context.write(key, new IntWritable(seen));
}
public abstract boolean verify(int key, int occ);
}
private static
|
SimpleCheckerReduceBase
|
java
|
quarkusio__quarkus
|
extensions/mongodb-client/runtime/src/main/java/io/quarkus/mongodb/reactive/ReactiveMongoCollection.java
|
{
"start": 73637,
"end": 74383
}
|
class ____ cast any documents returned from the database into.
* @param <NewTDocument> The type that the new collection will encode documents from and decode documents to
* @return a new ReactiveMongoCollection instance with the different default class
*/
<NewTDocument> ReactiveMongoCollection<NewTDocument> withDocumentClass(Class<NewTDocument> clazz);
/**
* Create a new ReactiveMongoCollection instance with a different read preference.
*
* @param readPreference the new {@link com.mongodb.ReadPreference} for the collection
* @return a new ReactiveMongoCollection instance with the different readPreference
*/
ReactiveMongoCollection<T> withReadPreference(ReadPreference readPreference);
}
|
to
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotTests.java
|
{
"start": 17563,
"end": 18815
}
|
class ____ extends FilterDirectoryReader {
DropFullDeletedSegmentsReader(DirectoryReader in) throws IOException {
super(in, new SubReaderWrapper() {
@Override
protected LeafReader[] wrap(List<? extends LeafReader> readers) {
List<LeafReader> wrapped = new ArrayList<>(readers.size());
for (LeafReader reader : readers) {
LeafReader wrap = wrap(reader);
assert wrap != null;
if (wrap.numDocs() != 0) {
wrapped.add(wrap);
}
}
return wrapped.toArray(new LeafReader[0]);
}
@Override
public LeafReader wrap(LeafReader reader) {
return reader;
}
});
}
@Override
protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
return new DropFullDeletedSegmentsReader(in);
}
@Override
public CacheHelper getReaderCacheHelper() {
return in.getReaderCacheHelper();
}
}
}
|
DropFullDeletedSegmentsReader
|
java
|
apache__flink
|
flink-python/src/main/java/org/apache/flink/python/FlinkSlf4jLogWriter.java
|
{
"start": 1068,
"end": 2544
}
|
class ____ implements LogWriter {
static final Logger LOGGER = LoggerFactory.getLogger("PythonWorker");
@Override
public void log(BeamFnApi.LogEntry entry) {
String location = entry.getLogLocation();
String message = entry.getMessage();
String trace = entry.getTrace();
switch (entry.getSeverity()) {
case ERROR:
case CRITICAL:
if (trace == null) {
LOGGER.error("{} {}", location, message);
} else {
LOGGER.error("{} {} {}", location, message, trace);
}
break;
case WARN:
if (trace == null) {
LOGGER.warn("{} {}", location, message);
} else {
LOGGER.warn("{} {} {}", location, message, trace);
}
break;
case INFO:
case NOTICE:
LOGGER.info("{} {}", location, message);
break;
case DEBUG:
LOGGER.debug("{} {}", location, message);
break;
case UNSPECIFIED:
case TRACE:
LOGGER.trace("{} {}", location, message);
break;
default:
LOGGER.warn("Unknown message severity {}", entry.getSeverity());
LOGGER.info("{} {}", location, message);
break;
}
}
}
|
FlinkSlf4jLogWriter
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java
|
{
"start": 2415,
"end": 6844
}
|
class ____ implements Closeable {
/**
* A setting to enable or disable request caching on an index level. Its dynamic by default
* since we are checking on the cluster state IndexMetadata always.
*/
public static final Setting<Boolean> INDEX_CACHE_REQUEST_ENABLED_SETTING = Setting.boolSetting(
"index.requests.cache.enable",
true,
Property.Dynamic,
Property.IndexScope
);
public static final Setting<ByteSizeValue> INDICES_CACHE_QUERY_SIZE = Setting.memorySizeSetting(
"indices.requests.cache.size",
"1%",
Property.NodeScope
);
public static final Setting<TimeValue> INDICES_CACHE_QUERY_EXPIRE = Setting.positiveTimeSetting(
"indices.requests.cache.expire",
new TimeValue(0),
Property.NodeScope
);
private final ConcurrentMap<CleanupKey, Boolean> registeredClosedListeners = ConcurrentCollections.newConcurrentMap();
private final Set<CleanupKey> keysToClean = ConcurrentCollections.newConcurrentSet();
private final Cache<Key, BytesReference> cache;
IndicesRequestCache(Settings settings) {
TimeValue expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null;
CacheBuilder<Key, BytesReference> cacheBuilder = CacheBuilder.<Key, BytesReference>builder()
.setMaximumWeight(INDICES_CACHE_QUERY_SIZE.get(settings).getBytes())
.weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed())
.removalListener(notification -> notification.getKey().entity.onRemoval(notification));
if (expire != null) {
cacheBuilder.setExpireAfterAccess(expire);
}
cache = cacheBuilder.build();
}
@Override
public void close() {
cache.invalidateAll();
}
void clear(CacheEntity entity) {
keysToClean.add(new CleanupKey(entity, null));
cleanCache();
}
BytesReference getOrCompute(
CacheEntity cacheEntity,
CheckedSupplier<BytesReference, IOException> loader,
MappingLookup.CacheKey mappingCacheKey,
DirectoryReader reader,
BytesReference cacheKey
) throws Exception {
final ESCacheHelper cacheHelper = ElasticsearchDirectoryReader.getESReaderCacheHelper(reader);
assert cacheHelper != null;
final Key key = new Key(cacheEntity, mappingCacheKey, cacheHelper.getKey(), cacheKey);
Loader cacheLoader = new Loader(cacheEntity, loader);
BytesReference value = cache.computeIfAbsent(key, cacheLoader);
if (cacheLoader.isLoaded()) {
key.entity.onMiss();
// see if its the first time we see this reader, and make sure to register a cleanup key
CleanupKey cleanupKey = new CleanupKey(cacheEntity, cacheHelper.getKey());
if (registeredClosedListeners.containsKey(cleanupKey) == false) {
Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE);
if (previous == null) {
cacheHelper.addClosedListener(cleanupKey);
}
}
/*
* Note that we don't use a closed listener for the mapping. Instead
* we let cache entries for out of date mappings age out. We do this
* because we don't reference count the MappingLookup so we can't tell
* when one is no longer used. Mapping updates should be a lot less
* frequent than reader closes so this is probably ok. On the other
* hand, for read only indices mapping changes are, well, possible,
* and readers are never changed. Oh well.
*/
} else {
key.entity.onHit();
}
return value;
}
/**
* Invalidates the given the cache entry for the given key and it's context
* @param cacheEntity the cache entity to invalidate for
* @param reader the reader to invalidate the cache entry for
* @param cacheKey the cache key to invalidate
*/
void invalidate(CacheEntity cacheEntity, MappingLookup.CacheKey mappingCacheKey, DirectoryReader reader, BytesReference cacheKey) {
assert reader.getReaderCacheHelper() != null;
cache.invalidate(new Key(cacheEntity, mappingCacheKey, reader.getReaderCacheHelper().getKey(), cacheKey));
}
private static
|
IndicesRequestCache
|
java
|
lettuce-io__lettuce-core
|
src/test/jmh/io/lettuce/core/support/JmhMain.java
|
{
"start": 434,
"end": 1745
}
|
class ____ {
public static void main(String... args) throws RunnerException {
// run selectively
// runCommandBenchmark();
// runGenericConnectionPoolBenchmark();
runAsyncConnectionPoolBenchmark();
}
private static void runGenericConnectionPoolBenchmark() throws RunnerException {
new Runner(prepareOptions().mode(Mode.AverageTime).timeUnit(TimeUnit.NANOSECONDS)
.include(".*GenericConnectionPoolBenchmark.*").build()).run();
new Runner(prepareOptions().mode(Mode.Throughput).timeUnit(TimeUnit.SECONDS)
.include(".*GenericConnectionPoolBenchmark.*").build()).run();
}
private static void runAsyncConnectionPoolBenchmark() throws RunnerException {
new Runner(prepareOptions().mode(Mode.AverageTime).timeUnit(TimeUnit.NANOSECONDS)
.include(".*AsyncConnectionPoolBenchmark.*").build()).run();
new Runner(prepareOptions().mode(Mode.Throughput).timeUnit(TimeUnit.SECONDS)
.include(".*AsyncConnectionPoolBenchmark.*").build()).run();
}
private static ChainedOptionsBuilder prepareOptions() {
return new OptionsBuilder().forks(1).warmupIterations(5).threads(1).measurementIterations(5)
.timeout(TimeValue.seconds(2));
}
}
|
JmhMain
|
java
|
apache__camel
|
core/camel-xml-io/src/main/java/org/apache/camel/xml/io/XmlPullParser.java
|
{
"start": 41509,
"end": 45239
}
|
class ____ above).
*
* @see #next
* @see #START_TAG
* @see #TEXT
* @see #END_TAG
* @see #END_DOCUMENT
* @see #COMMENT
* @see #DOCDECL
* @see #PROCESSING_INSTRUCTION
* @see #ENTITY_REF
* @see #IGNORABLE_WHITESPACE
*/
int nextToken() throws XmlPullParserException, IOException;
// -----------------------------------------------------------------------------
// utility methods to mak XML parsing easier ...
/**
* Test if the current event is of the given type and if the namespace and name do match. null will match any
* namespace and any name. If the test is not passed, an exception is thrown. The exception text indicates the
* parser position, the expected event and the current event that is not meeting the requirement.
* <p>
* Essentially it does this
*
* <pre>
* if (type != getEventType() || (namespace != null && !namespace.equals(getNamespace()))
* || (name != null && !name.equals(getName())))
* throw new XmlPullParserException("expected " + TYPES[type] + getPositionDescription());
* </pre>
*/
void require(int type, String namespace, String name) throws XmlPullParserException, IOException;
/**
* If current event is START_TAG then if next element is TEXT then element content is returned or if next event is
* END_TAG then empty string is returned, otherwise exception is thrown. After calling this function successfully
* parser will be positioned on END_TAG.
* <p>
* The motivation for this function is to allow to parse consistently both empty elements and elements that has non
* empty content, for example for input:
* <ol>
* <li><tag>foo</tag>
* <li><tag></tag> (which is equivalent to <tag/> both input can be parsed with the same code:
*
* <pre>
* p.nextTag()
* p.requireEvent(p.START_TAG, "", "tag");
* String content = p.nextText();
* p.requireEvent(p.END_TAG, "", "tag");
* </pre>
*
* </li>
* </ol>
* This function together with nextTag make it very easy to parse XML that has no mixed content.
* <p>
* Essentially it does this
*
* <pre>
* if (getEventType() != START_TAG) {
* throw new XmlPullParserException("parser must be on START_TAG to read next text", this, null);
* }
* int eventType = next();
* if (eventType == TEXT) {
* String result = getText();
* eventType = next();
* if (eventType != END_TAG) {
* throw new XmlPullParserException("event TEXT it must be immediately followed by END_TAG", this, null);
* }
* return result;
* } else if (eventType == END_TAG) {
* return "";
* } else {
* throw new XmlPullParserException("parser must be on START_TAG or TEXT to read text", this, null);
* }
* </pre>
*/
String nextText() throws XmlPullParserException, IOException;
/**
* Call next() and return event if it is START_TAG or END_TAG otherwise throw an exception. It will skip whitespace
* TEXT before actual tag if any.
* <p>
* essentially it does this
*
* <pre>
* int eventType = next();
* if (eventType == TEXT && isWhitespace()) { // skip whitespace
* eventType = next();
* }
* if (eventType != START_TAG && eventType != END_TAG) {
* throw new XmlPullParserException("expected start or end tag", this, null);
* }
* return eventType;
* </pre>
*/
int nextTag() throws XmlPullParserException, IOException;
}
|
description
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/CheckReturnValueTest.java
|
{
"start": 32635,
"end": 32795
}
|
interface ____ extends Builder<SomeBuilder> {}")
.expectUnchanged()
.addInputLines(
"Test.java",
"""
|
SomeBuilder
|
java
|
processing__processing4
|
java/src/processing/mode/java/debug/Debugger.java
|
{
"start": 2424,
"end": 2523
}
|
class ____'s currently being debugged
protected String mainClassName;
/// the debuggee's main
|
that
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jersey/src/test/java/org/springframework/boot/jersey/autoconfigure/metrics/JerseyServerMetricsAutoConfigurationTests.java
|
{
"start": 5492,
"end": 5667
}
|
class ____ {
@Bean
ResourceConfig resourceConfig() {
return new ResourceConfig().register(new TestResource());
}
@Path("/users")
public static
|
ResourceConfiguration
|
java
|
apache__avro
|
lang/java/trevni/core/src/main/java/org/apache/trevni/BlockDescriptor.java
|
{
"start": 864,
"end": 1639
}
|
class ____ {
int rowCount;
int uncompressedSize;
int compressedSize;
BlockDescriptor() {
}
BlockDescriptor(int rowCount, int uncompressedSize, int compressedSize) {
this.rowCount = rowCount;
this.uncompressedSize = uncompressedSize;
this.compressedSize = compressedSize;
}
public void writeTo(OutputBuffer out) throws IOException {
out.writeFixed32(rowCount);
out.writeFixed32(uncompressedSize);
out.writeFixed32(compressedSize);
}
public static BlockDescriptor read(InputBuffer in) throws IOException {
BlockDescriptor result = new BlockDescriptor();
result.rowCount = in.readFixed32();
result.uncompressedSize = in.readFixed32();
result.compressedSize = in.readFixed32();
return result;
}
}
|
BlockDescriptor
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/context/TestExecutionListeners.java
|
{
"start": 2523,
"end": 4507
}
|
interface ____ {
/**
* Alias for {@link #listeners}.
* <p>This attribute may <strong>not</strong> be used in conjunction with
* {@link #listeners}, but it may be used instead of {@link #listeners}.
*/
@AliasFor("listeners")
Class<? extends TestExecutionListener>[] value() default {};
/**
* The {@link TestExecutionListener TestExecutionListeners} to register with
* the {@link TestContextManager}.
* <p>This attribute may <strong>not</strong> be used in conjunction with
* {@link #value}, but it may be used instead of {@link #value}.
* @see org.springframework.test.context.web.ServletTestExecutionListener
* @see org.springframework.test.context.support.DirtiesContextBeforeModesTestExecutionListener
* @see org.springframework.test.context.event.ApplicationEventsTestExecutionListener
* @see org.springframework.test.context.bean.override.BeanOverrideTestExecutionListener
* @see org.springframework.test.context.support.DependencyInjectionTestExecutionListener
* @see org.springframework.test.context.support.DirtiesContextTestExecutionListener
* @see org.springframework.test.context.support.CommonCachesTestExecutionListener
* @see org.springframework.test.context.transaction.TransactionalTestExecutionListener
* @see org.springframework.test.context.jdbc.SqlScriptsTestExecutionListener
* @see org.springframework.test.context.event.EventPublishingTestExecutionListener
* @see org.springframework.test.context.bean.override.mockito.MockitoResetTestExecutionListener
*/
@AliasFor("value")
Class<? extends TestExecutionListener>[] listeners() default {};
/**
* Whether {@link #listeners TestExecutionListeners} from superclasses
* and enclosing classes should be <em>inherited</em>.
* <p>The default value is {@code true}, which means that an annotated class
* will <em>inherit</em> the listeners defined by an annotated superclass or
* enclosing class. Specifically, the listeners for an annotated
|
TestExecutionListeners
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/decorators/validation/NoDelegateInjectionPointTest.java
|
{
"start": 1075,
"end": 1268
}
|
class ____ implements Converter<String, String> {
@Override
public String convert(String value) {
return null;
}
}
}
|
DecoratorWithNoDelegateInjectionPoint
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/CoAPEndpointBuilderFactory.java
|
{
"start": 53196,
"end": 53503
}
|
class ____ extends AbstractEndpointBuilder implements CoAPEndpointBuilder, AdvancedCoAPEndpointBuilder {
public CoAPEndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new CoAPEndpointBuilderImpl(path);
}
}
|
CoAPEndpointBuilderImpl
|
java
|
micronaut-projects__micronaut-core
|
management/src/main/java/io/micronaut/management/endpoint/routes/impl/DefaultRouteData.java
|
{
"start": 1214,
"end": 2132
}
|
class ____ implements RouteData<Map<String, String>> {
@Override
public Map<String, String> getData(UriRouteInfo<?, ?> routeInfo) {
return Collections.singletonMap("method", getMethodString(routeInfo.getTargetMethod()));
}
/**
* @param targetMethod The {@link MethodExecutionHandle}
* @return A String with the target method
*/
protected String getMethodString(MethodExecutionHandle<?, ?> targetMethod) {
return targetMethod.getReturnType().asArgument().getTypeString(false) +
" " +
targetMethod.getDeclaringType().getName() +
'.' +
targetMethod.getMethodName() +
"(" +
Arrays.stream(targetMethod.getArguments())
.map(argument -> argument.getType().getName() + " " + argument.getName())
.collect(Collectors.joining(", ")) +
")";
}
}
|
DefaultRouteData
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/client/processor/src/main/java/org/jboss/resteasy/reactive/client/processor/beanparam/BeanParamItem.java
|
{
"start": 96,
"end": 574
}
|
class ____ extends Item {
private final List<Item> items;
private final String className;
public BeanParamItem(String fieldName, List<Item> items, String className, ValueExtractor extractor) {
super(fieldName, ItemType.BEAN_PARAM, false, extractor);
this.items = items;
this.className = className;
}
public String className() {
return className;
}
public List<Item> items() {
return items;
}
}
|
BeanParamItem
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/GuardedByCheckerTest.java
|
{
"start": 24726,
"end": 24876
}
|
class ____ {
final Object lock = new Object();
@GuardedBy("lock")
int x;
}
|
A
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/test/entity/pagemodel/SegmentInstance.java
|
{
"start": 545,
"end": 911
}
|
class ____ extends ComponentInstance implements Serializable {
private static final long serialVersionUID = -2307992962779806227L;
List<LayoutInstance> layouts;
public List<LayoutInstance> getLayouts() {
return layouts;
}
public void setLayouts(List<LayoutInstance> layouts) {
this.layouts = layouts;
}
}
|
SegmentInstance
|
java
|
spring-projects__spring-security
|
itest/context/src/integration-test/java/org/springframework/security/integration/python/PythonInterpreterBasedSecurityTests.java
|
{
"start": 1249,
"end": 1590
}
|
class ____ {
@Autowired
private TestService service;
@Test
public void serviceMethod() {
SecurityContextHolder.getContext()
.setAuthentication(UsernamePasswordAuthenticationToken.unauthenticated("bob", "bobspassword"));
// for (int i=0; i < 1000; i++) {
this.service.someMethod();
// }
}
}
|
PythonInterpreterBasedSecurityTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/query/NativeQueryResultTypeAutoDiscoveryTest.java
|
{
"start": 18731,
"end": 18897
}
|
class ____ extends TestedEntity<Byte> {
public Byte getTestedProperty() {
return testedProperty;
}
}
@Entity(name = "doubleEntity")
public static
|
TinyintEntity
|
java
|
google__guava
|
android/guava/src/com/google/common/util/concurrent/AbstractScheduledService.java
|
{
"start": 9686,
"end": 15157
}
|
class ____ implements Runnable {
@Override
public void run() {
lock.lock();
try {
/*
* requireNonNull is safe because Task isn't run (or at least it doesn't succeed in taking
* the lock) until after it's scheduled and the runningTask field is set.
*/
if (requireNonNull(runningTask).isCancelled()) {
// task may have been cancelled while blocked on the lock.
return;
}
AbstractScheduledService.this.runOneIteration();
} catch (Throwable t) {
restoreInterruptIfIsInterruptedException(t);
try {
shutDown();
} catch (Exception ignored) {
restoreInterruptIfIsInterruptedException(ignored);
logger
.get()
.log(
Level.WARNING,
"Error while attempting to shut down the service after failure.",
ignored);
}
notifyFailed(t);
// requireNonNull is safe now, just as it was above.
requireNonNull(runningTask).cancel(false); // prevent future invocations.
} finally {
lock.unlock();
}
}
}
private final Runnable task = new Task();
@Override
protected final void doStart() {
executorService =
MoreExecutors.renamingDecorator(executor(), () -> serviceName() + " " + state());
executorService.execute(
() -> {
lock.lock();
try {
startUp();
/*
* requireNonNull is safe because executorService is never cleared after the
* assignment above.
*/
requireNonNull(executorService);
runningTask = scheduler().schedule(delegate, executorService, task);
notifyStarted();
} catch (Throwable t) {
restoreInterruptIfIsInterruptedException(t);
notifyFailed(t);
if (runningTask != null) {
// prevent the task from running if possible
runningTask.cancel(false);
}
} finally {
lock.unlock();
}
});
}
@Override
protected final void doStop() {
// Both requireNonNull calls are safe because doStop can run only after a successful doStart.
requireNonNull(runningTask);
requireNonNull(executorService);
runningTask.cancel(false);
executorService.execute(
() -> {
try {
lock.lock();
try {
if (state() != State.STOPPING) {
// This means that the state has changed since we were scheduled. This implies
// that an execution of runOneIteration has thrown an exception and we have
// transitioned to a failed state, also this means that shutDown has already
// been called, so we do not want to call it again.
return;
}
shutDown();
} finally {
lock.unlock();
}
notifyStopped();
} catch (Throwable t) {
restoreInterruptIfIsInterruptedException(t);
notifyFailed(t);
}
});
}
@Override
public String toString() {
return AbstractScheduledService.this.toString();
}
}
/** Constructor for use by subclasses. */
protected AbstractScheduledService() {}
/**
* Run one iteration of the scheduled task. If any invocation of this method throws an exception,
* the service will transition to the {@link Service.State#FAILED} state and this method will no
* longer be called.
*/
protected abstract void runOneIteration() throws Exception;
/**
* Start the service.
*
* <p>By default this method does nothing.
*/
protected void startUp() throws Exception {}
/**
* Stop the service. This is guaranteed not to run concurrently with {@link #runOneIteration}.
*
* <p>By default this method does nothing.
*/
protected void shutDown() throws Exception {}
/**
* Returns the {@link Scheduler} object used to configure this service. This method will only be
* called once.
*/
// TODO(cpovirk): @ForOverride
protected abstract Scheduler scheduler();
/**
* Returns the {@link ScheduledExecutorService} that will be used to execute the {@link #startUp},
* {@link #runOneIteration} and {@link #shutDown} methods. If this method is overridden the
* executor will not be {@linkplain ScheduledExecutorService#shutdown shutdown} when this service
* {@linkplain Service.State#TERMINATED terminates} or {@linkplain Service.State#TERMINATED
* fails}. Subclasses may override this method to supply a custom {@link ScheduledExecutorService}
* instance. This method is guaranteed to only be called once.
*
* <p>By default this returns a new {@link ScheduledExecutorService} with a single thread pool
* that sets the name of the thread to the {@linkplain #serviceName() service name}. Also, the
* pool will be {@linkplain ScheduledExecutorService#shutdown() shut down} when the service
* {@linkplain Service.State#TERMINATED terminates} or {@linkplain Service.State#TERMINATED
* fails}.
*/
protected ScheduledExecutorService executor() {
@WeakOuter
final
|
Task
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/query/sqm/tree/AbstractSqmRestrictedDmlStatement.java
|
{
"start": 1008,
"end": 5083
}
|
class ____<T> extends AbstractSqmDmlStatement<T>
implements JpaCriteriaBase {
private @Nullable SqmWhereClause whereClause;
/**
* Constructor for HQL statements.
*/
public AbstractSqmRestrictedDmlStatement(SqmQuerySource querySource, NodeBuilder nodeBuilder) {
super( querySource, nodeBuilder );
}
/**
* Constructor for Criteria statements.
*/
public AbstractSqmRestrictedDmlStatement(SqmRoot<T> target, SqmQuerySource querySource, NodeBuilder nodeBuilder) {
super( target, querySource, nodeBuilder );
}
protected AbstractSqmRestrictedDmlStatement(
NodeBuilder builder,
SqmQuerySource querySource,
@Nullable Set<SqmParameter<?>> parameters,
Map<String, SqmCteStatement<?>> cteStatements,
SqmRoot<T> target) {
super( builder, querySource, parameters, cteStatements, target );
}
protected @Nullable SqmWhereClause copyWhereClause(SqmCopyContext context) {
if ( whereClause == null ) {
return null;
}
else {
final SqmPredicate predicate = whereClause.getPredicate();
return new SqmWhereClause( predicate == null ? null : predicate.copy( context ), nodeBuilder() );
}
}
public SqmRoot<T> from(Class<T> entityClass) {
return from( nodeBuilder().getDomainModel().entity( entityClass ) );
}
public SqmRoot<T> from(EntityType<T> entity) {
final EntityDomainType<T> entityDomainType = (EntityDomainType<T>) entity;
final SqmRoot<T> root = getTarget();
if ( root.getModel() != entity ) {
throw new IllegalArgumentException(
String.format(
"Expecting DML target entity type [%s] but got [%s]",
root.getModel().getHibernateEntityName(),
entityDomainType.getName()
)
);
}
return root;
}
public SqmRoot<T> getRoot() {
return getTarget();
}
public @Nullable SqmWhereClause getWhereClause() {
return whereClause;
}
public void applyPredicate(@Nullable SqmPredicate predicate) {
if ( predicate != null ) {
initAndGetWhereClause().applyPredicate( predicate );
}
}
public void setWhereClause(@Nullable SqmWhereClause whereClause) {
this.whereClause = whereClause;
}
@Override
public @Nullable JpaPredicate getRestriction() {
return whereClause == null ? null : whereClause.getPredicate();
}
protected void setWhere(@Nullable Expression<Boolean> restriction) {
// Replaces the current predicate if one is present
initAndGetWhereClause().setPredicate( (SqmPredicate) restriction );
}
protected SqmWhereClause initAndGetWhereClause() {
if ( whereClause == null ) {
whereClause = new SqmWhereClause( nodeBuilder() );
}
return whereClause;
}
protected void setWhere(Predicate @Nullable ... restrictions) {
final SqmWhereClause whereClause = initAndGetWhereClause();
// Clear the current predicate if one is present
whereClause.setPredicate( null );
if ( restrictions != null ) {
for ( Predicate restriction : restrictions ) {
whereClause.applyPredicate( (SqmPredicate) restriction );
}
}
}
@Override
public void appendHqlString(StringBuilder hql, SqmRenderContext context) {
if ( whereClause != null ) {
final var predicate = whereClause.getPredicate();
if ( predicate != null ) {
hql.append( " where " );
predicate.appendHqlString( hql, context );
}
}
}
@Override
public boolean equals(@Nullable Object object) {
return object instanceof AbstractSqmRestrictedDmlStatement<?> that
&& super.equals( object )
&& Objects.equals( getWhereClause(), that.getWhereClause() );
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + Objects.hashCode( getWhereClause() );
return result;
}
@Override
public boolean isCompatible(Object object) {
return object instanceof AbstractSqmRestrictedDmlStatement<?> that
&& super.isCompatible( object )
&& SqmCacheable.areCompatible( getWhereClause(), that.getWhereClause() );
}
@Override
public int cacheHashCode() {
int result = super.cacheHashCode();
result = 31 * result + SqmCacheable.cacheHashCode( getWhereClause() );
return result;
}
}
|
AbstractSqmRestrictedDmlStatement
|
java
|
micronaut-projects__micronaut-core
|
inject/src/main/java/io/micronaut/inject/BeanDefinitionMethodReference.java
|
{
"start": 689,
"end": 904
}
|
interface ____ a {@link ExecutableMethod} that is associated with a {@link BeanDefinitionReference}.
*
* @param <T> The type
* @param <R> The result type
* @author graemerocher
* @since 1.0
*/
@Internal
public
|
for
|
java
|
spring-projects__spring-framework
|
spring-beans/src/test/java/org/springframework/beans/factory/annotation/InjectAnnotationBeanPostProcessorTests.java
|
{
"start": 39851,
"end": 40273
}
|
class ____ implements Serializable {
@Inject
private Provider<Map<String, TestBean>> testBeanFactory;
public void setTestBeanFactory(Provider<Map<String, TestBean>> testBeanFactory) {
this.testBeanFactory = testBeanFactory;
}
public TestBean getTestBean() {
return this.testBeanFactory.get().values().iterator().next();
}
}
@SuppressWarnings("serial")
public static
|
ObjectFactoryMapFieldInjectionBean
|
java
|
google__guava
|
guava/src/com/google/common/collect/CollectSpliterators.java
|
{
"start": 5470,
"end": 11194
}
|
class ____ implements Spliterator<T>, Consumer<T> {
@Nullable T holder = null;
@Override
public void accept(@ParametricNullness T t) {
this.holder = t;
}
@Override
public boolean tryAdvance(Consumer<? super T> action) {
while (fromSpliterator.tryAdvance(this)) {
try {
// The cast is safe because tryAdvance puts a T into `holder`.
T next = uncheckedCastNullableTToT(holder);
if (predicate.test(next)) {
action.accept(next);
return true;
}
} finally {
holder = null;
}
}
return false;
}
@Override
public @Nullable Spliterator<T> trySplit() {
Spliterator<T> fromSplit = fromSpliterator.trySplit();
return (fromSplit == null) ? null : filter(fromSplit, predicate);
}
@Override
public long estimateSize() {
return fromSpliterator.estimateSize() / 2;
}
@Override
public @Nullable Comparator<? super T> getComparator() {
return fromSpliterator.getComparator();
}
@Override
public int characteristics() {
return fromSpliterator.characteristics()
& (Spliterator.DISTINCT
| Spliterator.NONNULL
| Spliterator.ORDERED
| Spliterator.SORTED);
}
}
return new Splitr();
}
/**
* Returns a {@code Spliterator} that iterates over the elements of the spliterators generated by
* applying {@code function} to the elements of {@code fromSpliterator}.
*/
static <InElementT extends @Nullable Object, OutElementT extends @Nullable Object>
Spliterator<OutElementT> flatMap(
Spliterator<InElementT> fromSpliterator,
Function<? super InElementT, @Nullable Spliterator<OutElementT>> function,
int topCharacteristics,
long topSize) {
checkArgument(
(topCharacteristics & Spliterator.SUBSIZED) == 0,
"flatMap does not support SUBSIZED characteristic");
checkArgument(
(topCharacteristics & Spliterator.SORTED) == 0,
"flatMap does not support SORTED characteristic");
checkNotNull(fromSpliterator);
checkNotNull(function);
return new FlatMapSpliteratorOfObject<>(
null, fromSpliterator, function, topCharacteristics, topSize);
}
/**
* Returns a {@code Spliterator.OfInt} that iterates over the elements of the spliterators
* generated by applying {@code function} to the elements of {@code fromSpliterator}. (If {@code
* function} returns {@code null} for an input, it is replaced with an empty stream.)
*/
static <InElementT extends @Nullable Object> Spliterator.OfInt flatMapToInt(
Spliterator<InElementT> fromSpliterator,
Function<? super InElementT, Spliterator.@Nullable OfInt> function,
int topCharacteristics,
long topSize) {
checkArgument(
(topCharacteristics & Spliterator.SUBSIZED) == 0,
"flatMap does not support SUBSIZED characteristic");
checkArgument(
(topCharacteristics & Spliterator.SORTED) == 0,
"flatMap does not support SORTED characteristic");
checkNotNull(fromSpliterator);
checkNotNull(function);
return new FlatMapSpliteratorOfInt<>(
null, fromSpliterator, function, topCharacteristics, topSize);
}
/**
* Returns a {@code Spliterator.OfLong} that iterates over the elements of the spliterators
* generated by applying {@code function} to the elements of {@code fromSpliterator}. (If {@code
* function} returns {@code null} for an input, it is replaced with an empty stream.)
*/
static <InElementT extends @Nullable Object> Spliterator.OfLong flatMapToLong(
Spliterator<InElementT> fromSpliterator,
Function<? super InElementT, Spliterator.@Nullable OfLong> function,
int topCharacteristics,
long topSize) {
checkArgument(
(topCharacteristics & Spliterator.SUBSIZED) == 0,
"flatMap does not support SUBSIZED characteristic");
checkArgument(
(topCharacteristics & Spliterator.SORTED) == 0,
"flatMap does not support SORTED characteristic");
checkNotNull(fromSpliterator);
checkNotNull(function);
return new FlatMapSpliteratorOfLong<>(
null, fromSpliterator, function, topCharacteristics, topSize);
}
/**
* Returns a {@code Spliterator.OfDouble} that iterates over the elements of the spliterators
* generated by applying {@code function} to the elements of {@code fromSpliterator}. (If {@code
* function} returns {@code null} for an input, it is replaced with an empty stream.)
*/
static <InElementT extends @Nullable Object> Spliterator.OfDouble flatMapToDouble(
Spliterator<InElementT> fromSpliterator,
Function<? super InElementT, Spliterator.@Nullable OfDouble> function,
int topCharacteristics,
long topSize) {
checkArgument(
(topCharacteristics & Spliterator.SUBSIZED) == 0,
"flatMap does not support SUBSIZED characteristic");
checkArgument(
(topCharacteristics & Spliterator.SORTED) == 0,
"flatMap does not support SORTED characteristic");
checkNotNull(fromSpliterator);
checkNotNull(function);
return new FlatMapSpliteratorOfDouble<>(
null, fromSpliterator, function, topCharacteristics, topSize);
}
/**
* Implements the {@link Stream#flatMap} operation on spliterators.
*
* @param <InElementT> the element type of the input spliterator
* @param <OutElementT> the element type of the output spliterators
* @param <OutSpliteratorT> the type of the output spliterators
*/
abstract static
|
Splitr
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/derivedidentities/OneToManyEmbeddableId.java
|
{
"start": 2174,
"end": 2579
}
|
class ____ {
@Id
private BigDecimal id;
private String name;
public Parent() {
}
public Parent(BigDecimal id, String name) {
this.id = id;
this.name = name;
}
@OneToMany(mappedBy = "id.parent")
private List<FirstChild> children = new ArrayList<>();
void addChild(FirstChild firstChild) {
children.add( firstChild );
}
}
@Entity(name = "FirstChild")
public static
|
Parent
|
java
|
apache__spark
|
sql/catalyst/src/main/java/org/apache/spark/sql/connector/read/VariantAccessInfo.java
|
{
"start": 1104,
"end": 1631
}
|
class ____ the information needed by data sources to optimize reading variant columns.
* Instead of reading the entire variant value, the data source can read only the fields that
* are actually accessed, represented as a structured schema.
* <p>
* For example, if a query accesses `variant_get(v, '$.a', 'int')` and
* `variant_get(v, '$.b', 'string')`, the extracted schema would be
* `struct<0:int, 1:string>` where field ordinals correspond to the access order.
*
* @since 4.1.0
*/
@Evolving
public final
|
captures
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/entitygraph/named/subgraph/Author.java
|
{
"start": 241,
"end": 755
}
|
class ____ {
private Long id;
private String name;
private Date birth;
private Set<Book> books;
public Author() {
super();
}
public Long getId() {
return id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Date getBirth() {
return birth;
}
public void setBirth(Date birth) {
this.birth = birth;
}
public Set<Book> getBooks() {
return books;
}
public void setBooks(Set<Book> books) {
this.books = books;
}
}
|
Author
|
java
|
resilience4j__resilience4j
|
resilience4j-spring-boot3/src/main/java/io/github/resilience4j/springboot3/micrometer/monitoring/endpoint/TimerEndpoint.java
|
{
"start": 1190,
"end": 1656
}
|
class ____ {
private final TimerRegistry timerRegistry;
public TimerEndpoint(TimerRegistry timerRegistry) {
this.timerRegistry = timerRegistry;
}
@ReadOperation
public TimerEndpointResponse getAllRetries() {
List<String> retries = timerRegistry.getAllTimers()
.map(Timer::getName)
.sorted()
.collect(toList());
return new TimerEndpointResponse(retries);
}
}
|
TimerEndpoint
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/api/AbstractClassAssert.java
|
{
"start": 34346,
"end": 34422
}
|
class ____ {
* public void superMethod() {}
* }
*
*
|
MySuperClass
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/test/java/io/vertx/test/fakemetrics/FakeHttpClientMetrics.java
|
{
"start": 1074,
"end": 3854
}
|
class ____ extends FakeTCPMetrics implements HttpClientMetrics<HttpClientMetric, WebSocketMetric, SocketMetric> {
private final String name;
private final ConcurrentMap<WebSocketBase, WebSocketMetric> webSockets = new ConcurrentHashMap<>();
private final ConcurrentMap<SocketAddress, EndpointMetric> endpoints = new ConcurrentHashMap<>();
public FakeHttpClientMetrics(String name) {
this.name = name;
}
public WebSocketMetric getMetric(WebSocket ws) {
return webSockets.get(ws);
}
public HttpClientMetric getMetric(HttpClientRequest request) {
for (EndpointMetric metric : endpoints.values()) {
for (HttpRequest req : metric.requests.keySet()) {
if (req.uri().equals(request.getURI()) &&
req.remoteAddress().equals(request.connection().remoteAddress()) &&
req.method() == request.getMethod()) {
return metric.requests.get(req);
}
}
}
return null;
}
public String getName() {
return name;
}
public Set<String> endpoints() {
return endpoints.keySet().stream().map(Object::toString).collect(Collectors.toSet());
}
public EndpointMetric endpoint(String name) {
for (Map.Entry<SocketAddress, EndpointMetric> entry : endpoints.entrySet()) {
if (entry.getKey().toString().equalsIgnoreCase(name)) {
return entry.getValue();
}
}
return null;
}
// public Integer queueSize(String name) {
// EndpointMetric server = endpoint(name);
// return server != null ? server.queueSize.get() : null;
// }
public Integer connectionCount(String name) {
EndpointMetric endpoint = endpoint(name);
return endpoint != null ? endpoint.connectionCount.get() : null;
}
@Override
public ClientMetrics<HttpClientMetric, HttpRequest, HttpResponse> createEndpointMetrics(SocketAddress remoteAddress, int maxPoolSize) {
EndpointMetric metric = new EndpointMetric() {
@Override
public void close() {
endpoints.remove(remoteAddress);
}
};
endpoints.put(remoteAddress, metric);
return metric;
}
@Override
public void endpointConnected(ClientMetrics<HttpClientMetric, ?, ?> endpointMetric) {
((EndpointMetric)endpointMetric).connectionCount.incrementAndGet();
}
@Override
public void endpointDisconnected(ClientMetrics<HttpClientMetric, ?, ?> endpointMetric) {
((EndpointMetric)endpointMetric).connectionCount.decrementAndGet();
}
@Override
public WebSocketMetric connected(WebSocket webSocket) {
WebSocketMetric metric = new WebSocketMetric(webSocket);
webSockets.put(webSocket, metric);
return metric;
}
@Override
public void disconnected(WebSocketMetric webSocketMetric) {
webSockets.remove(webSocketMetric.ws);
}
}
|
FakeHttpClientMetrics
|
java
|
junit-team__junit5
|
junit-platform-engine/src/main/java/org/junit/platform/engine/support/descriptor/ClassSource.java
|
{
"start": 1673,
"end": 1837
}
|
class ____ implements TestSource {
@Serial
private static final long serialVersionUID = 1L;
/**
* {@link URI} {@linkplain URI#getScheme() scheme} for
|
ClassSource
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/metrics/dump/MetricDump.java
|
{
"start": 1013,
"end": 2290
}
|
class ____ {
/** Categories to be returned by {@link MetricDump#getCategory()} to avoid instanceof checks. */
public static final byte METRIC_CATEGORY_COUNTER = 0;
public static final byte METRIC_CATEGORY_GAUGE = 1;
public static final byte METRIC_CATEGORY_HISTOGRAM = 2;
public static final byte METRIC_CATEGORY_METER = 3;
/** The scope information for the stored metric. */
public final QueryScopeInfo scopeInfo;
/** The name of the stored metric. */
public final String name;
private MetricDump(QueryScopeInfo scopeInfo, String name) {
this.scopeInfo = Preconditions.checkNotNull(scopeInfo);
this.name = Preconditions.checkNotNull(name);
}
/**
* Returns the category for this MetricDump.
*
* @return category
*/
public abstract byte getCategory();
@Override
public String toString() {
return "MetricDump{"
+ "scopeInfo="
+ scopeInfo
+ ", name='"
+ name
+ '\''
+ ", category='"
+ getCategory()
+ '\''
+ '}';
}
/** Container for the value of a {@link org.apache.flink.metrics.Counter}. */
public static
|
MetricDump
|
java
|
quarkusio__quarkus
|
extensions/resteasy-classic/resteasy/deployment/src/test/java/io/quarkus/resteasy/test/security/inheritance/classdenyall/ClassDenyAllInterfaceWithPath_SecurityOnParent.java
|
{
"start": 1385,
"end": 2926
}
|
interface ____ {
@Path(CLASS_PATH_ON_INTERFACE + SUB_DECLARED_ON_INTERFACE + SUB_IMPL_ON_PARENT + CLASS_DENY_ALL_PATH)
ClassDenyAllSubResourceWithoutPath classPathOnInterface_SubDeclaredOnInterface_SubImplOnParent_ClassDenyAll();
@Path(CLASS_PATH_ON_INTERFACE + SUB_DECLARED_ON_INTERFACE + SUB_IMPL_ON_PARENT
+ CLASS_DENY_ALL_METHOD_PERMIT_ALL_PATH)
ClassDenyAllSubResourceWithoutPath classPathOnInterface_SubDeclaredOnInterface_SubImplOnParent_ClassDenyAllMethodPermitAll();
@Path(CLASS_PATH_ON_INTERFACE + SUB_DECLARED_ON_INTERFACE + SUB_IMPL_ON_PARENT
+ CLASS_DENY_ALL_METHOD_ROLES_ALLOWED_PATH)
ClassDenyAllSubResourceWithoutPath classPathOnInterface_SubDeclaredOnInterface_SubImplOnParent_ClassDenyAllMethodRolesAllowed();
@POST
@Path(CLASS_PATH_ON_INTERFACE + IMPL_ON_PARENT + INTERFACE_METHOD_WITH_PATH + CLASS_DENY_ALL_PATH)
String classPathOnInterface_ImplOnParent_InterfaceMethodWithPath_ClassDenyAll(JsonObject array);
@POST
@Path(CLASS_PATH_ON_INTERFACE + IMPL_ON_PARENT + INTERFACE_METHOD_WITH_PATH + CLASS_DENY_ALL_METHOD_PERMIT_ALL_PATH)
String classPathOnInterface_ImplOnParent_InterfaceMethodWithPath_ClassDenyAllMethodPermitAll(JsonObject array);
@POST
@Path(CLASS_PATH_ON_INTERFACE + IMPL_ON_PARENT + INTERFACE_METHOD_WITH_PATH + CLASS_DENY_ALL_METHOD_ROLES_ALLOWED_PATH)
String classPathOnInterface_ImplOnParent_InterfaceMethodWithPath_ClassDenyAllMethodRolesAllowed(JsonObject array);
}
|
ClassDenyAllInterfaceWithPath_SecurityOnParent
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/processors/AsyncProcessorTest.java
|
{
"start": 8214,
"end": 15955
}
|
class ____ extends Thread {
private final AsyncProcessor<String> processor;
private final AtomicReference<String> value = new AtomicReference<>();
SubjectSubscriberThread(AsyncProcessor<String> processor) {
this.processor = processor;
}
@Override
public void run() {
try {
// a timeout exception will happen if we don't get a terminal state
String v = processor.timeout(2000, TimeUnit.MILLISECONDS).blockingSingle();
value.set(v);
} catch (Exception e) {
e.printStackTrace();
}
}
}
@Test
public void currentStateMethodsNormal() {
AsyncProcessor<Object> as = AsyncProcessor.create();
assertFalse(as.hasValue());
assertFalse(as.hasThrowable());
assertFalse(as.hasComplete());
assertNull(as.getValue());
assertNull(as.getThrowable());
as.onNext(1);
assertFalse(as.hasValue()); // AP no longer reports it has a value until it is terminated
assertFalse(as.hasThrowable());
assertFalse(as.hasComplete());
assertNull(as.getValue()); // AP no longer reports it has a value until it is terminated
assertNull(as.getThrowable());
as.onComplete();
assertTrue(as.hasValue());
assertFalse(as.hasThrowable());
assertTrue(as.hasComplete());
assertEquals(1, as.getValue());
assertNull(as.getThrowable());
}
@Test
public void currentStateMethodsEmpty() {
AsyncProcessor<Object> as = AsyncProcessor.create();
assertFalse(as.hasValue());
assertFalse(as.hasThrowable());
assertFalse(as.hasComplete());
assertNull(as.getValue());
assertNull(as.getThrowable());
as.onComplete();
assertFalse(as.hasValue());
assertFalse(as.hasThrowable());
assertTrue(as.hasComplete());
assertNull(as.getValue());
assertNull(as.getThrowable());
}
@Test
public void currentStateMethodsError() {
AsyncProcessor<Object> as = AsyncProcessor.create();
assertFalse(as.hasValue());
assertFalse(as.hasThrowable());
assertFalse(as.hasComplete());
assertNull(as.getValue());
assertNull(as.getThrowable());
as.onError(new TestException());
assertFalse(as.hasValue());
assertTrue(as.hasThrowable());
assertFalse(as.hasComplete());
assertNull(as.getValue());
assertTrue(as.getThrowable() instanceof TestException);
}
@Test
public void fusionLive() {
AsyncProcessor<Integer> ap = new AsyncProcessor<>();
TestSubscriberEx<Integer> ts = new TestSubscriberEx<Integer>().setInitialFusionMode(QueueFuseable.ANY);
ap.subscribe(ts);
ts
.assertFuseable()
.assertFusionMode(QueueFuseable.ASYNC);
ts.assertNoValues().assertNoErrors().assertNotComplete();
ap.onNext(1);
ts.assertNoValues().assertNoErrors().assertNotComplete();
ap.onComplete();
ts.assertResult(1);
}
@Test
public void fusionOfflie() {
AsyncProcessor<Integer> ap = new AsyncProcessor<>();
ap.onNext(1);
ap.onComplete();
TestSubscriberEx<Integer> ts = new TestSubscriberEx<Integer>().setInitialFusionMode(QueueFuseable.ANY);
ap.subscribe(ts);
ts
.assertFuseable()
.assertFusionMode(QueueFuseable.ASYNC)
.assertResult(1);
}
@Test
public void onSubscribeAfterDone() {
AsyncProcessor<Object> p = AsyncProcessor.create();
BooleanSubscription bs = new BooleanSubscription();
p.onSubscribe(bs);
assertFalse(bs.isCancelled());
p.onComplete();
bs = new BooleanSubscription();
p.onSubscribe(bs);
assertTrue(bs.isCancelled());
p.test().assertResult();
}
@Test
public void cancelUpfront() {
AsyncProcessor<Object> p = AsyncProcessor.create();
assertFalse(p.hasSubscribers());
p.test().assertEmpty();
p.test().assertEmpty();
p.test(0L, true).assertEmpty();
assertTrue(p.hasSubscribers());
}
@Test
public void cancelRace() {
AsyncProcessor<Object> p = AsyncProcessor.create();
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final TestSubscriber<Object> ts1 = p.test();
final TestSubscriber<Object> ts2 = p.test();
Runnable r1 = new Runnable() {
@Override
public void run() {
ts1.cancel();
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
ts2.cancel();
}
};
TestHelper.race(r1, r2);
}
}
@Test
@SuppressUndeliverable
public void onErrorCancelRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final AsyncProcessor<Object> p = AsyncProcessor.create();
final TestSubscriberEx<Object> ts1 = p.to(TestHelper.<Object>testConsumer());
Runnable r1 = new Runnable() {
@Override
public void run() {
ts1.cancel();
}
};
final TestException ex = new TestException();
Runnable r2 = new Runnable() {
@Override
public void run() {
p.onError(ex);
}
};
TestHelper.race(r1, r2);
if (ts1.errors().size() != 0) {
ts1.assertFailure(TestException.class);
} else {
ts1.assertEmpty();
}
}
}
@Test
public void onNextCrossCancel() {
AsyncProcessor<Object> p = AsyncProcessor.create();
final TestSubscriber<Object> ts2 = new TestSubscriber<>();
TestSubscriber<Object> ts1 = new TestSubscriber<Object>() {
@Override
public void onNext(Object t) {
ts2.cancel();
super.onNext(t);
}
};
p.subscribe(ts1);
p.subscribe(ts2);
p.onNext(1);
p.onComplete();
ts1.assertResult(1);
ts2.assertEmpty();
}
@Test
@SuppressUndeliverable
public void onErrorCrossCancel() {
AsyncProcessor<Object> p = AsyncProcessor.create();
final TestSubscriber<Object> ts2 = new TestSubscriber<>();
TestSubscriber<Object> ts1 = new TestSubscriber<Object>() {
@Override
public void onError(Throwable t) {
ts2.cancel();
super.onError(t);
}
};
p.subscribe(ts1);
p.subscribe(ts2);
p.onError(new TestException());
ts1.assertFailure(TestException.class);
ts2.assertEmpty();
}
@Test
public void onCompleteCrossCancel() {
AsyncProcessor<Object> p = AsyncProcessor.create();
final TestSubscriber<Object> ts2 = new TestSubscriber<>();
TestSubscriber<Object> ts1 = new TestSubscriber<Object>() {
@Override
public void onComplete() {
ts2.cancel();
super.onComplete();
}
};
p.subscribe(ts1);
p.subscribe(ts2);
p.onComplete();
ts1.assertResult();
ts2.assertEmpty();
}
@Test
public void cancel() {
TestHelper.checkDisposed(AsyncProcessor.create());
}
}
|
SubjectSubscriberThread
|
java
|
apache__camel
|
components/camel-kamelet/src/main/java/org/apache/camel/component/kamelet/KameletConsumerNotAvailableException.java
|
{
"start": 938,
"end": 1142
}
|
class ____ extends CamelExchangeException {
public KameletConsumerNotAvailableException(String message, Exchange exchange) {
super(message, exchange);
}
}
|
KameletConsumerNotAvailableException
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/main/java/io/vertx/core/http/HttpRequestHead.java
|
{
"start": 382,
"end": 1460
}
|
interface ____ {
/**
* @return the HTTP method for the request.
*/
HttpMethod method();
/**
* @return the URI of the request. This is usually a relative URI
*/
String uri();
/**
* @return The path part of the uri. For example {@code /somepath/somemorepath/someresource.foo}
*/
@Nullable
String path();
/**
* @return the query part of the uri. For example {@code someparam=32&someotherparam=x}
*/
@Nullable
String query();
/**
* @return the headers
*/
MultiMap headers();
/**
* Return the first header value with the specified name
*
* @param headerName the header name
* @return the header value
*/
@Nullable
default String getHeader(String headerName) {
return headers().get(headerName);
}
/**
* Return the first header value with the specified name
*
* @param headerName the header name
* @return the header value
*/
@GenIgnore(GenIgnore.PERMITTED_TYPE)
default String getHeader(CharSequence headerName) {
return headers().get(headerName);
}
}
|
HttpRequestHead
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/aot/generate/ValueCodeGenerationException.java
|
{
"start": 845,
"end": 1684
}
|
class ____ extends RuntimeException {
private final @Nullable Object value;
protected ValueCodeGenerationException(String message, @Nullable Object value, @Nullable Throwable cause) {
super(message, cause);
this.value = value;
}
public ValueCodeGenerationException(@Nullable Object value, Throwable cause) {
super(buildErrorMessage(value), cause);
this.value = value;
}
private static String buildErrorMessage(@Nullable Object value) {
StringBuilder message = new StringBuilder("Failed to generate code for '");
message.append(value).append("'");
if (value != null) {
message.append(" with type ").append(value.getClass());
}
return message.toString();
}
/**
* Return the value that failed to be generated.
*/
public @Nullable Object getValue() {
return this.value;
}
}
|
ValueCodeGenerationException
|
java
|
elastic__elasticsearch
|
qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLogsTestSetup.java
|
{
"start": 518,
"end": 767
}
|
class ____ {
private static boolean initialized = false;
public static void init() {
if (initialized == false) {
LogConfigurator.setNodeName("sample-name");
initialized = true;
}
}
}
|
JsonLogsTestSetup
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/fetchprofile/MappedByFetchProfileFunctionTest.java
|
{
"start": 715,
"end": 1479
}
|
class ____ {
@Test
public void testFetchWithOneToOneMappedBy(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
session.enableFetchProfile( "address-with-customer" );
Address address = new Address();
address.setStreet( "Test Road 1" );
Customer6 customer = new Customer6();
customer.setName( "Tester" );
customer.setAddress( address );
session.persist( address );
session.persist( customer );
session.flush();
session.clear();
address = session.get( Address.class, address.getId() );
assertThat( Hibernate.isInitialized( address.getCustomer() ) ).isTrue();
session.remove( address.getCustomer() );
session.remove( address );
}
);
}
}
|
MappedByFetchProfileFunctionTest
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/ast/expr/SQLTextLiteralExpr.java
|
{
"start": 774,
"end": 2013
}
|
class ____ extends SQLExprImpl implements SQLLiteralExpr {
protected String text;
public SQLTextLiteralExpr() {
}
public SQLTextLiteralExpr(String text) {
this.text = text;
}
public String getText() {
return this.text;
}
public void setText(String text) {
this.text = text;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((text == null) ? 0 : text.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
SQLTextLiteralExpr other = (SQLTextLiteralExpr) obj;
if (text == null) {
if (other.text != null) {
return false;
}
} else if (!text.equals(other.text)) {
return false;
}
return true;
}
public abstract SQLTextLiteralExpr clone();
@Override
public List getChildren() {
return Collections.emptyList();
}
}
|
SQLTextLiteralExpr
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/association_nested/FolderFlatTree.java
|
{
"start": 768,
"end": 965
}
|
class ____ {
public Folder root;
public Folder level1;
public Folder level2;
@Override
public String toString() {
return root + "\n\t" + level1 + "\n\t\t" + level2;
}
}
|
FolderFlatTree
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/settings/ClientSettings.java
|
{
"start": 4268,
"end": 7529
}
|
class ____ extends AbstractBuilder<ClientSettings, Builder> {
private Builder() {
}
/**
* Set to {@code true} if the client is required to provide a proof key challenge
* and verifier when performing the Authorization Code Grant flow.
* @param requireProofKey {@code true} if the client is required to provide a
* proof key challenge and verifier, {@code false} otherwise
* @return the {@link Builder} for further configuration
*/
public Builder requireProofKey(boolean requireProofKey) {
return setting(ConfigurationSettingNames.Client.REQUIRE_PROOF_KEY, requireProofKey);
}
/**
* Set to {@code true} if authorization consent is required when the client
* requests access. This applies to {@code authorization_code} flow.
* @param requireAuthorizationConsent {@code true} if authorization consent is
* required when the client requests access, {@code false} otherwise
* @return the {@link Builder} for further configuration
*/
public Builder requireAuthorizationConsent(boolean requireAuthorizationConsent) {
return setting(ConfigurationSettingNames.Client.REQUIRE_AUTHORIZATION_CONSENT, requireAuthorizationConsent);
}
/**
* Sets the {@code URL} for the Client's JSON Web Key Set.
* @param jwkSetUrl the {@code URL} for the Client's JSON Web Key Set
* @return the {@link Builder} for further configuration
*/
public Builder jwkSetUrl(String jwkSetUrl) {
return setting(ConfigurationSettingNames.Client.JWK_SET_URL, jwkSetUrl);
}
/**
* Sets the {@link JwsAlgorithm JWS} algorithm that must be used for signing the
* {@link Jwt JWT} used to authenticate the Client at the Token Endpoint for the
* {@link ClientAuthenticationMethod#PRIVATE_KEY_JWT private_key_jwt} and
* {@link ClientAuthenticationMethod#CLIENT_SECRET_JWT client_secret_jwt}
* authentication methods.
* @param authenticationSigningAlgorithm the {@link JwsAlgorithm JWS} algorithm
* that must be used for signing the {@link Jwt JWT} used to authenticate the
* Client at the Token Endpoint
* @return the {@link Builder} for further configuration
*/
public Builder tokenEndpointAuthenticationSigningAlgorithm(JwsAlgorithm authenticationSigningAlgorithm) {
return setting(ConfigurationSettingNames.Client.TOKEN_ENDPOINT_AUTHENTICATION_SIGNING_ALGORITHM,
authenticationSigningAlgorithm);
}
/**
* Sets the expected subject distinguished name associated to the client
* {@code X509Certificate} received during client authentication when using the
* {@code tls_client_auth} method.
* @param x509CertificateSubjectDN the expected subject distinguished name
* associated to the client {@code X509Certificate} received during client
* authentication * @return the {@link Builder} for further configuration
* @return the {@link Builder} for further configuration
*/
public Builder x509CertificateSubjectDN(String x509CertificateSubjectDN) {
return setting(ConfigurationSettingNames.Client.X509_CERTIFICATE_SUBJECT_DN, x509CertificateSubjectDN);
}
/**
* Builds the {@link ClientSettings}.
* @return the {@link ClientSettings}
*/
@Override
public ClientSettings build() {
return new ClientSettings(getSettings());
}
}
}
|
Builder
|
java
|
elastic__elasticsearch
|
x-pack/plugin/gpu/src/internalClusterTest/java/org/elasticsearch/xpack/gpu/TestCuVSServiceProvider.java
|
{
"start": 1118,
"end": 1668
}
|
class ____ implements GPUInfoProvider {
private final List<GPUInfo> gpuList;
TestGPUInfoProvider(List<GPUInfo> gpuList) {
this.gpuList = gpuList;
}
@Override
public List<GPUInfo> availableGPUs() {
return gpuList;
}
@Override
public List<GPUInfo> compatibleGPUs() {
return gpuList;
}
@Override
public CuVSResourcesInfo getCurrentInfo(CuVSResources cuVSResources) {
return null;
}
}
}
|
TestGPUInfoProvider
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java
|
{
"start": 298493,
"end": 299017
}
|
class ____ extends ParserRuleContext {
public UnquoteIdentifierContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override
public int getRuleIndex() {
return RULE_unquoteIdentifier;
}
public UnquoteIdentifierContext() {}
public void copyFrom(UnquoteIdentifierContext ctx) {
super.copyFrom(ctx);
}
}
@SuppressWarnings("CheckReturnValue")
public static
|
UnquoteIdentifierContext
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/cascade/multilevel/MultiLevelCascadeRegularIdBasedParentChildAssociationTest.java
|
{
"start": 4015,
"end": 4121
}
|
class ____ {
@Id
private Long id;
@ManyToOne(fetch = FetchType.LAZY)
private Child owner;
}
}
|
Hobby
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
|
{
"start": 945,
"end": 1388
}
|
class ____ extends WebHdfsFileSystem {
@Override
public String getScheme() {
return WebHdfsConstants.SWEBHDFS_SCHEME;
}
@Override
protected String getTransportScheme() {
return "https";
}
@Override
protected Text getTokenKind() {
return WebHdfsConstants.SWEBHDFS_TOKEN_KIND;
}
@Override
protected int getDefaultPort() {
return HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
}
}
|
SWebHdfsFileSystem
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/recording/substitutions/AdditionalSubstitutionsBuildStep.java
|
{
"start": 864,
"end": 1518
}
|
class ____ and not a super class
*/
Class<ZoneId> zoneRegionClass = (Class<ZoneId>) Class.forName("java.time.ZoneRegion");
producer.produce(new ObjectSubstitutionBuildItem(zoneRegionClass, String.class, ZoneIdSubstitution.class));
Class<ZoneId> zoneOffsetClass = (Class<ZoneId>) Class.forName("java.time.ZoneOffset");
producer.produce(new ObjectSubstitutionBuildItem(zoneOffsetClass, String.class, ZoneIdSubstitution.class));
} catch (ClassNotFoundException e) {
throw new IllegalStateException("Improper registration of ZoneId substitution", e);
}
}
}
|
registered
|
java
|
spring-projects__spring-security
|
saml2/saml2-service-provider/src/main/java/org/springframework/security/saml2/core/Saml2X509Credential.java
|
{
"start": 1281,
"end": 8189
}
|
class ____ implements Serializable {
private static final long serialVersionUID = -1015853414272603517L;
private final PrivateKey privateKey;
private final X509Certificate certificate;
private final Set<Saml2X509CredentialType> credentialTypes;
/**
* Creates a {@link Saml2X509Credential} using the provided parameters
* @param certificate the credential's public certificiate
* @param types the credential's intended usages, must be one of
* {@link Saml2X509CredentialType#VERIFICATION} or
* {@link Saml2X509CredentialType#ENCRYPTION} or both.
*/
public Saml2X509Credential(X509Certificate certificate, Saml2X509CredentialType... types) {
this(null, false, certificate, types);
validateUsages(types, Saml2X509CredentialType.VERIFICATION, Saml2X509CredentialType.ENCRYPTION);
}
/**
* Creates a {@link Saml2X509Credential} using the provided parameters
* @param privateKey the credential's private key
* @param certificate the credential's public certificate
* @param types the credential's intended usages, must be one of
* {@link Saml2X509CredentialType#SIGNING} or
* {@link Saml2X509CredentialType#DECRYPTION} or both.
*/
public Saml2X509Credential(PrivateKey privateKey, X509Certificate certificate, Saml2X509CredentialType... types) {
this(privateKey, true, certificate, types);
validateUsages(types, Saml2X509CredentialType.SIGNING, Saml2X509CredentialType.DECRYPTION);
}
/**
* Creates a {@link Saml2X509Credential} using the provided parameters
* @param privateKey the credential's private key
* @param certificate the credential's public certificate
* @param types the credential's intended usages
*/
public Saml2X509Credential(PrivateKey privateKey, X509Certificate certificate, Set<Saml2X509CredentialType> types) {
Assert.notNull(certificate, "certificate cannot be null");
Assert.notNull(types, "credentialTypes cannot be null");
this.privateKey = privateKey;
this.certificate = certificate;
this.credentialTypes = types;
}
/**
* Create a {@link Saml2X509Credential} that can be used for encryption.
* @param certificate the certificate to use for encryption
* @return an encrypting {@link Saml2X509Credential}
*/
public static Saml2X509Credential encryption(X509Certificate certificate) {
return new Saml2X509Credential(certificate, Saml2X509Credential.Saml2X509CredentialType.ENCRYPTION);
}
/**
* Create a {@link Saml2X509Credential} that can be used for verification.
* @param certificate the certificate to use for verification
* @return a verifying {@link Saml2X509Credential}
*/
public static Saml2X509Credential verification(X509Certificate certificate) {
return new Saml2X509Credential(certificate, Saml2X509Credential.Saml2X509CredentialType.VERIFICATION);
}
/**
* Create a {@link Saml2X509Credential} that can be used for decryption.
* @param privateKey the private key to use for decryption
* @param certificate the certificate to use for decryption
* @return an decrypting {@link Saml2X509Credential}
*/
public static Saml2X509Credential decryption(PrivateKey privateKey, X509Certificate certificate) {
return new Saml2X509Credential(privateKey, certificate, Saml2X509Credential.Saml2X509CredentialType.DECRYPTION);
}
/**
* Create a {@link Saml2X509Credential} that can be used for signing.
* @param privateKey the private key to use for signing
* @param certificate the certificate to use for signing
* @return a signing {@link Saml2X509Credential}
*/
public static Saml2X509Credential signing(PrivateKey privateKey, X509Certificate certificate) {
return new Saml2X509Credential(privateKey, certificate, Saml2X509Credential.Saml2X509CredentialType.SIGNING);
}
private Saml2X509Credential(PrivateKey privateKey, boolean keyRequired, X509Certificate certificate,
Saml2X509CredentialType... types) {
Assert.notNull(certificate, "certificate cannot be null");
Assert.notEmpty(types, "credentials types cannot be empty");
if (keyRequired) {
Assert.notNull(privateKey, "privateKey cannot be null");
}
this.privateKey = privateKey;
this.certificate = certificate;
this.credentialTypes = new LinkedHashSet<>(Arrays.asList(types));
}
/**
* Get the private key for this credential
* @return the private key, may be null
* @see #Saml2X509Credential(PrivateKey, X509Certificate, Saml2X509CredentialType...)
*/
public PrivateKey getPrivateKey() {
return this.privateKey;
}
/**
* Get the public certificate for this credential
* @return the public certificate
*/
public X509Certificate getCertificate() {
return this.certificate;
}
/**
* Indicate whether this credential can be used for signing
* @return true if the credential has a {@link Saml2X509CredentialType#SIGNING} type
*/
public boolean isSigningCredential() {
return getCredentialTypes().contains(Saml2X509CredentialType.SIGNING);
}
/**
* Indicate whether this credential can be used for decryption
* @return true if the credential has a {@link Saml2X509CredentialType#DECRYPTION}
* type
*/
public boolean isDecryptionCredential() {
return getCredentialTypes().contains(Saml2X509CredentialType.DECRYPTION);
}
/**
* Indicate whether this credential can be used for verification
* @return true if the credential has a {@link Saml2X509CredentialType#VERIFICATION}
* type
*/
public boolean isVerificationCredential() {
return getCredentialTypes().contains(Saml2X509CredentialType.VERIFICATION);
}
/**
* Indicate whether this credential can be used for encryption
* @return true if the credential has a {@link Saml2X509CredentialType#ENCRYPTION}
* type
*/
public boolean isEncryptionCredential() {
return getCredentialTypes().contains(Saml2X509CredentialType.ENCRYPTION);
}
/**
* List all this credential's intended usages
* @return the set of this credential's intended usages
*/
public Set<Saml2X509CredentialType> getCredentialTypes() {
return this.credentialTypes;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Saml2X509Credential that = (Saml2X509Credential) o;
return Objects.equals(this.privateKey, that.privateKey) && this.certificate.equals(that.certificate)
&& this.credentialTypes.equals(that.credentialTypes);
}
@Override
public int hashCode() {
return Objects.hash(this.privateKey, this.certificate, this.credentialTypes);
}
private void validateUsages(Saml2X509CredentialType[] usages, Saml2X509CredentialType... validUsages) {
for (Saml2X509CredentialType usage : usages) {
boolean valid = false;
for (Saml2X509CredentialType validUsage : validUsages) {
if (usage == validUsage) {
valid = true;
break;
}
}
Assert.state(valid, () -> usage + " is not a valid usage for this credential");
}
}
public
|
Saml2X509Credential
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/tuple/StandardProperty.java
|
{
"start": 414,
"end": 1808
}
|
class ____ extends AbstractNonIdentifierAttribute implements NonIdentifierAttribute {
/**
* Constructs NonIdentifierProperty instances.
*
* @param name The name by which the property can be referenced within
* its owner.
* @param type The Hibernate Type of this property.
* @param lazy Should this property be handled lazily?
* @param insertable Is this property an insertable value?
* @param updateable Is this property an updateable value?
* @param nullable Is this property a nullable value?
* @param cascadeStyle The cascade style for this property's value.
* @param fetchMode Any fetch mode defined for this property
*/
public StandardProperty(
String name,
Type type,
boolean lazy,
boolean insertable,
boolean updateable,
boolean nullable,
boolean checkable,
boolean versionable,
CascadeStyle cascadeStyle,
OnDeleteAction onDeleteAction,
FetchMode fetchMode) {
super(
null,
null,
-1,
name,
type,
new BaselineAttributeInformation.Builder()
.setLazy( lazy )
.setInsertable( insertable )
.setUpdateable( updateable )
.setNullable( nullable )
.setDirtyCheckable( checkable )
.setVersionable( versionable )
.setCascadeStyle( cascadeStyle )
.setOnDeleteAction( onDeleteAction )
.setFetchMode( fetchMode )
.createInformation()
);
}
}
|
StandardProperty
|
java
|
quarkusio__quarkus
|
extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/loop/LoopTest.java
|
{
"start": 442,
"end": 860
}
|
class ____ {
static StringAsset template = new StringAsset("{#for i in total}{i}:{/for}");
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot(root -> root
.addAsResource(template, "templates/loop1.html")
.addAsResource(template, "templates/LoopTest/loopInt.html"));
@CheckedTemplate
static
|
LoopTest
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/internal/operators/observable/ObservableTakeTest.java
|
{
"start": 7283,
"end": 12392
}
|
class ____ implements ObservableSource<String> {
final String[] values;
Thread t;
TestObservableFunc(String... values) {
this.values = values;
}
@Override
public void subscribe(final Observer<? super String> observer) {
observer.onSubscribe(Disposable.empty());
System.out.println("TestObservable subscribed to ...");
t = new Thread(new Runnable() {
@Override
public void run() {
try {
System.out.println("running TestObservable thread");
for (String s : values) {
System.out.println("TestObservable onNext: " + s);
observer.onNext(s);
}
observer.onComplete();
} catch (Throwable e) {
throw new RuntimeException(e);
}
}
});
System.out.println("starting TestObservable thread");
t.start();
System.out.println("done starting TestObservable thread");
}
}
private static Observable<Long> INFINITE_OBSERVABLE = Observable.unsafeCreate(new ObservableSource<Long>() {
@Override
public void subscribe(Observer<? super Long> op) {
Disposable d = Disposable.empty();
op.onSubscribe(d);
long l = 1;
while (!d.isDisposed()) {
op.onNext(l++);
}
op.onComplete();
}
});
@Test
public void takeObserveOn() {
Observer<Object> o = TestHelper.mockObserver();
TestObserver<Object> to = new TestObserver<>(o);
INFINITE_OBSERVABLE
.observeOn(Schedulers.newThread()).take(1).subscribe(to);
to.awaitDone(5, TimeUnit.SECONDS);
to.assertNoErrors();
verify(o).onNext(1L);
verify(o, never()).onNext(2L);
verify(o).onComplete();
verify(o, never()).onError(any(Throwable.class));
}
@Test
public void interrupt() throws InterruptedException {
final AtomicReference<Object> exception = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(1);
Observable.just(1).subscribeOn(Schedulers.computation()).take(1)
.subscribe(new Consumer<Integer>() {
@Override
public void accept(Integer t1) {
try {
Thread.sleep(100);
} catch (Exception e) {
exception.set(e);
e.printStackTrace();
} finally {
latch.countDown();
}
}
});
latch.await();
assertNull(exception.get());
}
@Test
public void takeFinalValueThrows() {
Observable<Integer> source = Observable.just(1).take(1);
TestObserver<Integer> to = new TestObserver<Integer>() {
@Override
public void onNext(Integer t) {
throw new TestException();
}
};
source.safeSubscribe(to);
to.assertNoValues();
to.assertError(TestException.class);
to.assertNotComplete();
}
@Test
public void reentrantTake() {
final PublishSubject<Integer> source = PublishSubject.create();
TestObserver<Integer> to = new TestObserver<>();
source.take(1).doOnNext(new Consumer<Integer>() {
@Override
public void accept(Integer v) {
source.onNext(2);
}
}).subscribe(to);
source.onNext(1);
to.assertValue(1);
to.assertNoErrors();
to.assertComplete();
}
@Test
public void takeNegative() {
try {
Observable.just(1).take(-99);
fail("Should have thrown");
} catch (IllegalArgumentException ex) {
assertEquals("count >= 0 required but it was -99", ex.getMessage());
}
}
@Test
public void takeZero() {
Observable.just(1)
.take(0)
.test()
.assertResult();
}
@Test
public void dispose() {
TestHelper.checkDisposed(PublishSubject.create().take(2));
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeObservable(new Function<Observable<Object>, ObservableSource<Object>>() {
@Override
public ObservableSource<Object> apply(Observable<Object> o) throws Exception {
return o.take(2);
}
});
}
@Test
public void errorAfterLimitReached() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
Observable.error(new TestException())
.take(0)
.test()
.assertResult();
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
}
|
TestObservableFunc
|
java
|
apache__rocketmq
|
controller/src/main/java/org/apache/rocketmq/controller/ControllerManager.java
|
{
"start": 16597,
"end": 18202
}
|
class ____ {
private ExecutorService executorService;
private Map<String/*brokerAddress*/, NotifyTask/*currentNotifyTask*/> currentNotifyFutures;
public NotifyService() {
}
public void initialize() {
this.executorService = Executors.newFixedThreadPool(3, new ThreadFactoryImpl("ControllerManager_NotifyService_"));
this.currentNotifyFutures = new ConcurrentHashMap<>();
}
public void notifyBroker(String brokerAddress, RoleChangeNotifyEntry entry) {
int masterEpoch = entry.getMasterEpoch();
NotifyTask oldTask = this.currentNotifyFutures.get(brokerAddress);
if (oldTask != null && masterEpoch > oldTask.getMasterEpoch()) {
// cancel current future
Future oldFuture = oldTask.getFuture();
if (oldFuture != null && !oldFuture.isDone()) {
oldFuture.cancel(true);
}
}
final NotifyTask task = new NotifyTask(masterEpoch, null);
Runnable runnable = () -> {
doNotifyBrokerRoleChanged(brokerAddress, entry);
this.currentNotifyFutures.remove(brokerAddress, task);
};
this.currentNotifyFutures.put(brokerAddress, task);
Future<?> future = this.executorService.submit(runnable);
task.setFuture(future);
}
public void shutdown() {
if (!this.executorService.isShutdown()) {
this.executorService.shutdownNow();
}
}
|
NotifyService
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/PhysicalSlotRequestBulkWithTimestampTest.java
|
{
"start": 1107,
"end": 2394
}
|
class ____ {
private final ManualClock clock = new ManualClock();
@Test
void testMarkBulkUnfulfillable() {
final PhysicalSlotRequestBulkWithTimestamp bulk =
createPhysicalSlotRequestBulkWithTimestamp();
clock.advanceTime(456, TimeUnit.MILLISECONDS);
bulk.markUnfulfillable(clock.relativeTimeMillis());
assertThat(bulk.getUnfulfillableSince()).isEqualTo(clock.relativeTimeMillis());
}
@Test
void testUnfulfillableTimestampWillNotBeOverriddenByFollowingUnfulfillableTimestamp() {
final PhysicalSlotRequestBulkWithTimestamp bulk =
createPhysicalSlotRequestBulkWithTimestamp();
final long unfulfillableSince = clock.relativeTimeMillis();
bulk.markUnfulfillable(unfulfillableSince);
clock.advanceTime(456, TimeUnit.MILLISECONDS);
bulk.markUnfulfillable(clock.relativeTimeMillis());
assertThat(bulk.getUnfulfillableSince()).isEqualTo(unfulfillableSince);
}
private static PhysicalSlotRequestBulkWithTimestamp
createPhysicalSlotRequestBulkWithTimestamp() {
return TestingPhysicalSlotRequestBulkBuilder.newBuilder()
.buildPhysicalSlotRequestBulkWithTimestamp();
}
}
|
PhysicalSlotRequestBulkWithTimestampTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/RedundantOverrideTest.java
|
{
"start": 6866,
"end": 7226
}
|
class ____ extends foo.A {
@Override
protected void swap() {
super.swap();
}
}
""")
.doTest();
}
@Test
public void protectedOverrideInSamePackage() {
testHelper
.addSourceLines(
"A.java",
"""
package foo;
|
B
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/bytecode/internal/bytebuddy/SimpleEnhancerTests.java
|
{
"start": 558,
"end": 989
}
|
class ____ {
@Test
public void generateEnhancedClass() throws EnhancementException, IOException {
Enhancer enhancer = new EnhancerImpl( new DefaultEnhancementContext(), new ByteBuddyState() );
enhancer.enhance( SimpleEntity.class.getName(),
ByteCodeHelper.readByteCode( SimpleEntity.class.getClassLoader()
.getResourceAsStream( SimpleEntity.class.getName().replace( '.', '/' ) + ".class" ) ) );
}
}
|
SimpleEnhancerTests
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/objectid/TestObjectIdWithPolymorphic.java
|
{
"start": 754,
"end": 942
}
|
class ____
{
public int value;
public Base next;
public Base() { this(0); }
protected Base(int v) {
value = v;
}
}
static
|
Base
|
java
|
apache__dubbo
|
dubbo-registry/dubbo-registry-api/src/test/java/org/apache/dubbo/registry/client/migration/MigrationRuleHandlerTest.java
|
{
"start": 1191,
"end": 4152
}
|
class ____ {
@Test
void test() {
MigrationClusterInvoker<?> invoker = Mockito.mock(MigrationClusterInvoker.class);
URL url = Mockito.mock(URL.class);
Mockito.when(url.getDisplayServiceKey()).thenReturn("test");
Mockito.when(url.getParameter(Mockito.any(), (String) Mockito.any())).thenAnswer(i -> i.getArgument(1));
Mockito.when(url.getOrDefaultApplicationModel()).thenReturn(ApplicationModel.defaultModel());
MigrationRuleHandler<?> handler = new MigrationRuleHandler<>(invoker, url);
Mockito.when(invoker.migrateToForceApplicationInvoker(Mockito.any())).thenReturn(true);
Mockito.when(invoker.migrateToForceInterfaceInvoker(Mockito.any())).thenReturn(true);
MigrationRule initRule = MigrationRule.getInitRule();
handler.doMigrate(initRule);
Mockito.verify(invoker, Mockito.times(1)).migrateToApplicationFirstInvoker(initRule);
MigrationRule rule = Mockito.mock(MigrationRule.class);
Mockito.when(rule.getStep(url)).thenReturn(MigrationStep.FORCE_APPLICATION);
handler.doMigrate(rule);
Mockito.verify(invoker, Mockito.times(1)).migrateToForceApplicationInvoker(rule);
Mockito.when(rule.getStep(url)).thenReturn(MigrationStep.APPLICATION_FIRST);
handler.doMigrate(rule);
Mockito.verify(invoker, Mockito.times(1)).migrateToApplicationFirstInvoker(rule);
Mockito.when(rule.getStep(url)).thenReturn(MigrationStep.FORCE_INTERFACE);
handler.doMigrate(rule);
Mockito.verify(invoker, Mockito.times(1)).migrateToForceInterfaceInvoker(rule);
// migration failed, current rule not changed
testMigrationFailed(rule, url, handler, invoker);
// rule not changed, check migration not actually executed
testMigrationWithStepUnchanged(rule, url, handler, invoker);
}
private void testMigrationFailed(
MigrationRule rule, URL url, MigrationRuleHandler<?> handler, MigrationClusterInvoker<?> invoker) {
Assertions.assertEquals(MigrationStep.FORCE_INTERFACE, handler.getMigrationStep());
Mockito.when(invoker.migrateToForceApplicationInvoker(Mockito.any())).thenReturn(false);
Mockito.when(rule.getStep(url)).thenReturn(MigrationStep.FORCE_APPLICATION);
handler.doMigrate(rule);
Mockito.verify(invoker, Mockito.times(2)).migrateToForceApplicationInvoker(rule);
Assertions.assertEquals(MigrationStep.FORCE_INTERFACE, handler.getMigrationStep());
}
private void testMigrationWithStepUnchanged(
MigrationRule rule, URL url, MigrationRuleHandler<?> handler, MigrationClusterInvoker<?> invoker) {
// set the same as
Mockito.when(rule.getStep(url)).thenReturn(handler.getMigrationStep());
handler.doMigrate(rule);
// no interaction
Mockito.verify(invoker, Mockito.times(1)).migrateToForceInterfaceInvoker(rule);
}
}
|
MigrationRuleHandlerTest
|
java
|
junit-team__junit5
|
junit-jupiter-api/src/main/java/org/junit/jupiter/api/Tags.java
|
{
"start": 1170,
"end": 1263
}
|
interface ____ {
/**
* An array of one or more {@link Tag Tags}.
*/
Tag[] value();
}
|
Tags
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java
|
{
"start": 1382,
"end": 2322
}
|
class ____ {
/**
* Inspect the contents of the given storage directory.
*/
abstract void inspectDirectory(StorageDirectory sd) throws IOException;
/**
* @return false if any of the storage directories have an unfinalized upgrade
*/
abstract boolean isUpgradeFinalized();
/**
* Get the image files which should be loaded into the filesystem.
* @throws IOException if not enough files are available (eg no image found in any directory)
*/
abstract List<FSImageFile> getLatestImages() throws IOException;
/**
* Get the minimum tx id which should be loaded with this set of images.
*/
abstract long getMaxSeenTxId();
/**
* @return true if the directories are in such a state that the image should be re-saved
* following the load
*/
abstract boolean needToSave();
/**
* Record of an image that has been located and had its filename parsed.
*/
static
|
FSImageStorageInspector
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/timezones/AutoZonedTest.java
|
{
"start": 3144,
"end": 3260
}
|
class ____ {
@Id
@GeneratedValue Long id;
ZonedDateTime zonedDateTime;
OffsetDateTime offsetDateTime;
}
}
|
Zoned
|
java
|
apache__flink
|
flink-core/src/test/java/org/apache/flink/api/java/typeutils/TypeExtractorTest.java
|
{
"start": 62785,
"end": 63717
}
|
class ____<T> extends RichMapFunction<T[], T[]> {
private static final long serialVersionUID = 1L;
@Override
public T[] map(T[] value) throws Exception {
return null;
}
}
@SuppressWarnings({"rawtypes", "unchecked"})
@Test
void testParameterizedArrays() {
GenericArrayClass<Boolean> function =
new GenericArrayClass<Boolean>() {
private static final long serialVersionUID = 1L;
};
TypeInformation<?> ti =
TypeExtractor.getMapReturnTypes(
function, TypeInformation.of(new TypeHint<Boolean[]>() {}));
assertThat(ti).isInstanceOf(ObjectArrayTypeInfo.class);
ObjectArrayTypeInfo<?, ?> oati = (ObjectArrayTypeInfo<?, ?>) ti;
assertThat(oati.getComponentInfo()).isEqualTo(BasicTypeInfo.BOOLEAN_TYPE_INFO);
}
public static
|
GenericArrayClass
|
java
|
elastic__elasticsearch
|
plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesService.java
|
{
"start": 800,
"end": 2603
}
|
interface ____ extends Closeable {
/**
* GCE API Version: Elasticsearch/GceCloud/1.0
*/
String VERSION = "Elasticsearch/GceCloud/1.0";
// cloud.gce settings
/**
* cloud.gce.project_id: Google project id
*/
Setting<String> PROJECT_SETTING = Setting.simpleString("cloud.gce.project_id", Property.NodeScope);
/**
* cloud.gce.zone: Google Compute Engine zones
*/
Setting<List<String>> ZONE_SETTING = Setting.stringListSetting("cloud.gce.zone", Property.NodeScope);
/**
* cloud.gce.refresh_interval: How long the list of hosts is cached to prevent further requests to the AWS API. 0 disables caching.
* A negative value will cause infinite caching. Defaults to 0s.
*/
Setting<TimeValue> REFRESH_SETTING = Setting.timeSetting(
"cloud.gce.refresh_interval",
TimeValue.timeValueSeconds(0),
Property.NodeScope
);
/**
* cloud.gce.retry: Should we retry calling GCE API in case of error? Defaults to true.
*/
Setting<Boolean> RETRY_SETTING = Setting.boolSetting("cloud.gce.retry", true, Property.NodeScope);
/**
* cloud.gce.max_wait: How long exponential backoff should retry before definitely failing.
* It's a total time since the initial call is made.
* A negative value will retry indefinitely. Defaults to `-1s` (retry indefinitely).
*/
Setting<TimeValue> MAX_WAIT_SETTING = Setting.timeSetting("cloud.gce.max_wait", TimeValue.timeValueSeconds(-1), Property.NodeScope);
/**
* Return a collection of running instances within the same GCE project
* @return a collection of running instances within the same GCE project
*/
Collection<Instance> instances();
String projectId();
List<String> zones();
}
|
GceInstancesService
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntities.java
|
{
"start": 1530,
"end": 2066
}
|
class ____ {
private List<TimelineEntity> entities = new ArrayList<>();
public TimelineEntities() {
}
@XmlElement(name = "entities")
public List<TimelineEntity> getEntities() {
return entities;
}
public void setEntities(List<TimelineEntity> timelineEntities) {
this.entities = timelineEntities;
}
public void addEntities(List<TimelineEntity> timelineEntities) {
this.entities.addAll(timelineEntities);
}
public void addEntity(TimelineEntity entity) {
entities.add(entity);
}
}
|
TimelineEntities
|
java
|
spring-projects__spring-framework
|
spring-r2dbc/src/main/java/org/springframework/r2dbc/core/NamedParameterUtils.java
|
{
"start": 1801,
"end": 13082
}
|
class ____ {
/**
* Set of characters that qualify as comment or quote starting characters.
*/
private static final String[] START_SKIP = {"'", "\"", "--", "/*", "`"};
/**
* Set of characters that are the corresponding comment or quote ending characters.
*/
private static final String[] STOP_SKIP = {"'", "\"", "\n", "*/", "`"};
/**
* Set of characters that qualify as parameter separators,
* indicating that a parameter name in an SQL String has ended.
*/
private static final String PARAMETER_SEPARATORS = "\"':&,;()|=+-*%/\\<>^";
/**
* An index with separator flags per character code.
* Technically only needed between 34 and 124 at this point.
*/
private static final boolean[] separatorIndex = new boolean[128];
static {
for (char c : PARAMETER_SEPARATORS.toCharArray()) {
separatorIndex[c] = true;
}
}
// -------------------------------------------------------------------------
// Core methods used by NamedParameterExpander
// -------------------------------------------------------------------------
/**
* Parse the SQL statement and locate any placeholders or named parameters.
* Named parameters are substituted for an R2DBC placeholder.
* @param sql the SQL statement
* @return the parsed statement, represented as {@link ParsedSql} instance
*/
public static ParsedSql parseSqlStatement(String sql) {
Assert.notNull(sql, "SQL must not be null");
Set<String> namedParameters = new HashSet<>();
StringBuilder sqlToUse = new StringBuilder(sql);
List<ParameterHolder> parameterList = new ArrayList<>();
char[] statement = sql.toCharArray();
int namedParameterCount = 0;
int unnamedParameterCount = 0;
int totalParameterCount = 0;
int escapes = 0;
int i = 0;
while (i < statement.length) {
int skipToPosition = i;
while (i < statement.length) {
skipToPosition = skipCommentsAndQuotes(statement, i);
if (i == skipToPosition) {
break;
}
else {
i = skipToPosition;
}
}
if (i >= statement.length) {
break;
}
char c = statement[i];
if (c == ':' || c == '&') {
int j = i + 1;
if (c == ':' && j < statement.length && statement[j] == ':') {
// Postgres-style "::" casting operator should be skipped
i = i + 2;
continue;
}
String parameter = null;
if (c == ':' && j < statement.length && statement[j] == '{') {
// :{x} style parameter
while (statement[j] != '}') {
j++;
if (j >= statement.length) {
throw new InvalidDataAccessApiUsageException(
"Non-terminated named parameter declaration at position " + i +
" in statement: " + sql);
}
if (statement[j] == ':' || statement[j] == '{') {
throw new InvalidDataAccessApiUsageException(
"Parameter name contains invalid character '" + statement[j] +
"' at position " + i + " in statement: " + sql);
}
}
if (j - i > 2) {
parameter = sql.substring(i + 2, j);
namedParameterCount = addNewNamedParameter(
namedParameters, namedParameterCount, parameter);
totalParameterCount = addNamedParameter(
parameterList, totalParameterCount, escapes, i, j + 1, parameter);
}
j++;
}
else {
boolean paramWithSquareBrackets = false;
while (j < statement.length) {
c = statement[j];
if (isParameterSeparator(c)) {
break;
}
if (c == '[') {
paramWithSquareBrackets = true;
}
else if (c == ']') {
if (!paramWithSquareBrackets) {
break;
}
paramWithSquareBrackets = false;
}
j++;
}
if (j - i > 1) {
parameter = sql.substring(i + 1, j);
namedParameterCount = addNewNamedParameter(
namedParameters, namedParameterCount, parameter);
totalParameterCount = addNamedParameter(
parameterList, totalParameterCount, escapes, i, j, parameter);
}
}
i = j - 1;
}
else {
if (c == '\\') {
int j = i + 1;
if (j < statement.length && statement[j] == ':') {
// escaped ":" should be skipped
sqlToUse.deleteCharAt(i - escapes);
escapes++;
i = i + 2;
continue;
}
}
}
i++;
}
ParsedSql parsedSql = new ParsedSql(sqlToUse.toString());
for (ParameterHolder ph : parameterList) {
parsedSql.addNamedParameter(ph.getParameterName(), ph.getStartIndex(), ph.getEndIndex());
}
parsedSql.setNamedParameterCount(namedParameterCount);
parsedSql.setUnnamedParameterCount(unnamedParameterCount);
parsedSql.setTotalParameterCount(totalParameterCount);
return parsedSql;
}
private static int addNamedParameter(List<ParameterHolder> parameterList,
int totalParameterCount, int escapes, int i, int j, String parameter) {
parameterList.add(new ParameterHolder(parameter, i - escapes, j - escapes));
totalParameterCount++;
return totalParameterCount;
}
private static int addNewNamedParameter(Set<String> namedParameters, int namedParameterCount, String parameter) {
if (!namedParameters.contains(parameter)) {
namedParameters.add(parameter);
namedParameterCount++;
}
return namedParameterCount;
}
/**
* Skip over comments and quoted names present in an SQL statement.
* @param statement character array containing SQL statement
* @param position current position of statement
* @return next position to process after any comments or quotes are skipped
*/
private static int skipCommentsAndQuotes(char[] statement, int position) {
for (int i = 0; i < START_SKIP.length; i++) {
if (statement[position] == START_SKIP[i].charAt(0)) {
boolean match = true;
for (int j = 1; j < START_SKIP[i].length(); j++) {
if (statement[position + j] != START_SKIP[i].charAt(j)) {
match = false;
break;
}
}
if (match) {
int offset = START_SKIP[i].length();
for (int m = position + offset; m < statement.length; m++) {
if (statement[m] == STOP_SKIP[i].charAt(0)) {
boolean endMatch = true;
int endPos = m;
for (int n = 1; n < STOP_SKIP[i].length(); n++) {
if (m + n >= statement.length) {
// last comment not closed properly
return statement.length;
}
if (statement[m + n] != STOP_SKIP[i].charAt(n)) {
endMatch = false;
break;
}
endPos = m + n;
}
if (endMatch) {
// found character sequence ending comment or quote
return endPos + 1;
}
}
}
// character sequence ending comment or quote not found
return statement.length;
}
}
}
return position;
}
/**
* Parse the SQL statement and locate any placeholders or named parameters. Named
* parameters are substituted for an R2DBC placeholder, and any select list is expanded
* to the required number of placeholders. Select lists may contain an array of objects,
* and in that case the placeholders will be grouped and enclosed with parentheses.
* This allows for the use of "expression lists" in the SQL statement like:
* {@code select id, name, state from table where (name, age) in (('John', 35), ('Ann', 50))}
* <p>The parameter values passed in are used to determine the number of
* placeholders to be used for a select list. Select lists should not be empty
* and should be limited to 100 or fewer elements. An empty list or a larger
* number of elements is not guaranteed to be supported by the database and
* is strictly vendor-dependent.
* @param parsedSql the parsed representation of the SQL statement
* @param bindMarkersFactory the bind marker factory.
* @param paramSource the source for named parameters
* @return the expanded query that accepts bind parameters and allows for execution
* without further translation
* @see #parseSqlStatement
*/
public static PreparedOperation<String> substituteNamedParameters(ParsedSql parsedSql,
BindMarkersFactory bindMarkersFactory, BindParameterSource paramSource) {
NamedParameters markerHolder = new NamedParameters(bindMarkersFactory);
String originalSql = parsedSql.getOriginalSql();
List<String> paramNames = parsedSql.getParameterNames();
if (paramNames.isEmpty()) {
return new ExpandedQuery(originalSql, markerHolder, paramSource);
}
StringBuilder actualSql = new StringBuilder(originalSql.length());
int lastIndex = 0;
for (int i = 0; i < paramNames.size(); i++) {
String paramName = paramNames.get(i);
int[] indexes = parsedSql.getParameterIndexes(i);
int startIndex = indexes[0];
int endIndex = indexes[1];
actualSql.append(originalSql, lastIndex, startIndex);
NamedParameters.NamedParameter marker = markerHolder.getOrCreate(paramName);
if (paramSource.hasValue(paramName)) {
Parameter parameter = paramSource.getValue(paramName);
if (parameter.getValue() instanceof Collection<?> collection) {
int k = 0;
int counter = 0;
for (Object entryItem : collection) {
if (k > 0) {
actualSql.append(", ");
}
k++;
if (entryItem instanceof Object[] expressionList) {
actualSql.append('(');
for (int m = 0; m < expressionList.length; m++) {
if (m > 0) {
actualSql.append(", ");
}
actualSql.append(marker.getPlaceholder(counter));
counter++;
}
actualSql.append(')');
}
else {
actualSql.append(marker.getPlaceholder(counter));
counter++;
}
}
}
else {
actualSql.append(marker.getPlaceholder());
}
}
else {
actualSql.append(marker.getPlaceholder());
}
lastIndex = endIndex;
}
actualSql.append(originalSql, lastIndex, originalSql.length());
return new ExpandedQuery(actualSql.toString(), markerHolder, paramSource);
}
/**
* Determine whether a parameter name ends at the current position,
* that is, whether the given character qualifies as a separator.
*/
private static boolean isParameterSeparator(char c) {
return (c < 128 && separatorIndex[c]) || Character.isWhitespace(c);
}
// -------------------------------------------------------------------------
// Convenience methods operating on a plain SQL String
// -------------------------------------------------------------------------
/**
* Parse the SQL statement and locate any placeholders or named parameters.
* <p>Named parameters are substituted for a native placeholder and any
* select list is expanded to the required number of placeholders.
* <p>This is a shortcut version of
* {@link #parseSqlStatement(String)} in combination with
* {@link #substituteNamedParameters(ParsedSql, BindMarkersFactory, BindParameterSource)}.
* @param sql the SQL statement
* @param bindMarkersFactory the bind marker factory
* @param paramSource the source for named parameters
* @return the expanded query that accepts bind parameters and allows for execution
* without further translation
*/
public static PreparedOperation<String> substituteNamedParameters(String sql,
BindMarkersFactory bindMarkersFactory, BindParameterSource paramSource) {
ParsedSql parsedSql = parseSqlStatement(sql);
return substituteNamedParameters(parsedSql, bindMarkersFactory, paramSource);
}
private static
|
NamedParameterUtils
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/DigitalSignatureEndpointBuilderFactory.java
|
{
"start": 1630,
"end": 10081
}
|
interface ____
extends
EndpointProducerBuilder {
default AdvancedDigitalSignatureEndpointBuilder advanced() {
return (AdvancedDigitalSignatureEndpointBuilder) this;
}
/**
* Sets the JCE name of the Algorithm that should be used for the
* signer.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: SHA256withRSA
* Group: producer
*
* @param algorithm the value to set
* @return the dsl builder
*/
default DigitalSignatureEndpointBuilder algorithm(String algorithm) {
doSetProperty("algorithm", algorithm);
return this;
}
/**
* Sets the alias used to query the KeyStore for keys and {link
* java.security.cert.Certificate Certificates} to be used in signing
* and verifying exchanges. This value can be provided at runtime via
* the message header
* org.apache.camel.component.crypto.DigitalSignatureConstants#KEYSTORE_ALIAS.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param alias the value to set
* @return the dsl builder
*/
default DigitalSignatureEndpointBuilder alias(String alias) {
doSetProperty("alias", alias);
return this;
}
/**
* Sets the reference name for a PrivateKey that can be found in the
* registry.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param certificateName the value to set
* @return the dsl builder
*/
default DigitalSignatureEndpointBuilder certificateName(String certificateName) {
doSetProperty("certificateName", certificateName);
return this;
}
/**
* Sets the KeyStore that can contain keys and Certficates for use in
* signing and verifying exchanges. A KeyStore is typically used with an
* alias, either one supplied in the Route definition or dynamically via
* the message header CamelSignatureKeyStoreAlias. If no alias is
* supplied and there is only a single entry in the Keystore, then this
* single entry will be used.
*
* The option is a: <code>java.security.KeyStore</code> type.
*
* Group: producer
*
* @param keystore the value to set
* @return the dsl builder
*/
default DigitalSignatureEndpointBuilder keystore(java.security.KeyStore keystore) {
doSetProperty("keystore", keystore);
return this;
}
/**
* Sets the KeyStore that can contain keys and Certficates for use in
* signing and verifying exchanges. A KeyStore is typically used with an
* alias, either one supplied in the Route definition or dynamically via
* the message header CamelSignatureKeyStoreAlias. If no alias is
* supplied and there is only a single entry in the Keystore, then this
* single entry will be used.
*
* The option will be converted to a <code>java.security.KeyStore</code>
* type.
*
* Group: producer
*
* @param keystore the value to set
* @return the dsl builder
*/
default DigitalSignatureEndpointBuilder keystore(String keystore) {
doSetProperty("keystore", keystore);
return this;
}
/**
* Sets the reference name for a Keystore that can be found in the
* registry.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param keystoreName the value to set
* @return the dsl builder
*/
default DigitalSignatureEndpointBuilder keystoreName(String keystoreName) {
doSetProperty("keystoreName", keystoreName);
return this;
}
/**
* Set the PrivateKey that should be used to sign the exchange.
*
* The option is a: <code>java.security.PrivateKey</code> type.
*
* Group: producer
*
* @param privateKey the value to set
* @return the dsl builder
*/
default DigitalSignatureEndpointBuilder privateKey(java.security.PrivateKey privateKey) {
doSetProperty("privateKey", privateKey);
return this;
}
/**
* Set the PrivateKey that should be used to sign the exchange.
*
* The option will be converted to a
* <code>java.security.PrivateKey</code> type.
*
* Group: producer
*
* @param privateKey the value to set
* @return the dsl builder
*/
default DigitalSignatureEndpointBuilder privateKey(String privateKey) {
doSetProperty("privateKey", privateKey);
return this;
}
/**
* Sets the reference name for a PrivateKey that can be found in the
* registry.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param privateKeyName the value to set
* @return the dsl builder
*/
default DigitalSignatureEndpointBuilder privateKeyName(String privateKeyName) {
doSetProperty("privateKeyName", privateKeyName);
return this;
}
/**
* Set the id of the security provider that provides the configured
* Signature algorithm.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param provider the value to set
* @return the dsl builder
*/
default DigitalSignatureEndpointBuilder provider(String provider) {
doSetProperty("provider", provider);
return this;
}
/**
* references that should be resolved when the context changes.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param publicKeyName the value to set
* @return the dsl builder
*/
default DigitalSignatureEndpointBuilder publicKeyName(String publicKeyName) {
doSetProperty("publicKeyName", publicKeyName);
return this;
}
/**
* Sets the reference name for a SecureRandom that can be found in the
* registry.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param secureRandomName the value to set
* @return the dsl builder
*/
default DigitalSignatureEndpointBuilder secureRandomName(String secureRandomName) {
doSetProperty("secureRandomName", secureRandomName);
return this;
}
/**
* Set the name of the message header that should be used to store the
* base64 encoded signature. This defaults to 'CamelDigitalSignature'.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: producer
*
* @param signatureHeaderName the value to set
* @return the dsl builder
*/
default DigitalSignatureEndpointBuilder signatureHeaderName(String signatureHeaderName) {
doSetProperty("signatureHeaderName", signatureHeaderName);
return this;
}
/**
* Sets the password used to access an aliased PrivateKey in the
* KeyStore.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param password the value to set
* @return the dsl builder
*/
default DigitalSignatureEndpointBuilder password(String password) {
doSetProperty("password", password);
return this;
}
}
/**
* Advanced builder for endpoint for the Crypto (JCE) component.
*/
public
|
DigitalSignatureEndpointBuilder
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/JsonPatchEndpointBuilderFactory.java
|
{
"start": 1436,
"end": 1565
}
|
interface ____ {
/**
* Builder for endpoint for the JsonPatch component.
*/
public
|
JsonPatchEndpointBuilderFactory
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimitsByPartition.java
|
{
"start": 4142,
"end": 43020
}
|
class ____ {
final static int GB = 1024;
final static String A1_PATH = CapacitySchedulerConfiguration.ROOT + ".a" + ".a1";
final static String B1_PATH = CapacitySchedulerConfiguration.ROOT + ".b" + ".b1";
final static String B2_PATH = CapacitySchedulerConfiguration.ROOT + ".b" + ".b2";
final static String C1_PATH = CapacitySchedulerConfiguration.ROOT + ".c" + ".c1";
final static QueuePath ROOT = new QueuePath(CapacitySchedulerConfiguration.ROOT);
final static QueuePath A1 = new QueuePath(A1_PATH);
final static QueuePath B1 = new QueuePath(B1_PATH);
final static QueuePath B2 = new QueuePath(B2_PATH);
final static QueuePath C1 = new QueuePath(C1_PATH);
LeafQueue queue;
RMNodeLabelsManager mgr;
private YarnConfiguration conf;
private final ResourceCalculator resourceCalculator =
new DefaultResourceCalculator();
@BeforeEach
public void setUp() throws IOException {
conf = new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
mgr = new NullRMNodeLabelsManager();
mgr.init(conf);
}
private void simpleNodeLabelMappingToManager() throws IOException {
// set node -> label
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0),
TestUtils.toSet("x"), NodeId.newInstance("h2", 0),
TestUtils.toSet("y")));
}
private void complexNodeLabelMappingToManager() throws IOException {
// set node -> label
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y",
"z"));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0),
TestUtils.toSet("x"), NodeId.newInstance("h2", 0),
TestUtils.toSet("y"), NodeId.newInstance("h3", 0),
TestUtils.toSet("y"), NodeId.newInstance("h4", 0),
TestUtils.toSet("z"), NodeId.newInstance("h5", 0),
RMNodeLabelsManager.EMPTY_STRING_SET));
}
@Test
@Timeout(value = 120)
public void testAMResourceLimitWithLabels() throws Exception {
/*
* Test Case:
* Verify AM resource limit per partition level and per queue level. So
* we use 2 queues to verify this case.
* Queue a1 supports labels (x,y). Configure am-resource-limit as 0.2 (x)
* Queue c1 supports default label. Configure am-resource-limit as 0.2
*
* Queue A1 for label X can only support 2Gb AM resource.
* Queue C1 (empty label) can support 2Gb AM resource.
*
* Verify atleast one AM is launched, and AM resources should not go more
* than 2GB in each queue.
*/
simpleNodeLabelMappingToManager();
CapacitySchedulerConfiguration config = (CapacitySchedulerConfiguration)
TestUtils.getConfigurationWithQueueLabels(conf);
// After getting queue conf, configure AM resource percent for Queue A1
// as 0.2 (Label X) and for Queue C1 as 0.2 (Empty Label)
config.setMaximumAMResourcePercentPerPartition(A1, "x", 0.2f);
config.setMaximumApplicationMasterResourcePerQueuePercent(C1, 0.2f);
// Now inject node label manager with this updated config
MockRM rm1 = new MockRM(config) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x
rm1.registerNode("h2:1234", 10 * GB); // label = y
MockNM nm3 = rm1.registerNode("h3:1234", 10 * GB); // label = <empty>
// Submit app1 with 1Gb AM resource to Queue A1 for label X
MockRMAppSubmissionData data5 =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data5);
// Submit app2 with 1Gb AM resource to Queue A1 for label X
MockRMAppSubmissionData data4 =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build();
RMApp app2 = MockRMAppSubmitter.submit(rm1, data4);
// Submit 3rd app to Queue A1 for label X, and this will be pending as
// AM limit is already crossed for label X. (2GB)
MockRMAppSubmissionData data3 =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build();
RMApp pendingApp = MockRMAppSubmitter.submit(rm1, data3);
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1");
assertNotNull(leafQueue);
// Only one AM will be activated here and second AM will be still
// pending.
assertEquals(2, leafQueue.getNumActiveApplications());
assertEquals(1, leafQueue.getNumPendingApplications());
assertTrue(app1.getDiagnostics().toString().contains(
AMState.ACTIVATED.getDiagnosticMessage()), "AM diagnostics not set properly");
assertTrue(app2.getDiagnostics().toString().contains(AMState.ACTIVATED.getDiagnosticMessage()),
"AM diagnostics not set properly");
assertTrue(pendingApp.getDiagnostics().toString()
.contains(AMState.INACTIVATED.getDiagnosticMessage()),
"AM diagnostics not set properly");
assertTrue(pendingApp.getDiagnostics().toString().contains(
CSAMContainerLaunchDiagnosticsConstants.QUEUE_AM_RESOURCE_LIMIT_EXCEED),
"AM diagnostics not set properly");
// Now verify the same test case in Queue C1 where label is not configured.
// Submit an app to Queue C1 with empty label
MockRMAppSubmissionData data2 =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("c1")
.withUnmanagedAM(false)
.build();
RMApp app3 = MockRMAppSubmitter.submit(rm1, data2);
MockRM.launchAndRegisterAM(app3, rm1, nm3);
// Submit next app to Queue C1 with empty label
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("c1")
.withUnmanagedAM(false)
.build();
RMApp app4 = MockRMAppSubmitter.submit(rm1, data1);
MockRM.launchAndRegisterAM(app4, rm1, nm3);
// Submit 3rd app to Queue C1. This will be pending as Queue's am-limit
// is reached.
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("c1")
.withUnmanagedAM(false)
.build();
pendingApp = MockRMAppSubmitter.submit(rm1, data);
leafQueue = (LeafQueue) cs.getQueue("c1");
assertNotNull(leafQueue);
// 2 apps will be activated, third one will be pending as am-limit
// is reached.
assertEquals(2, leafQueue.getNumActiveApplications());
assertEquals(1, leafQueue.getNumPendingApplications());
assertTrue(pendingApp.getDiagnostics().toString()
.contains(AMState.INACTIVATED.getDiagnosticMessage()),
"AM diagnostics not set properly");
assertTrue(pendingApp.getDiagnostics().toString().contains(
CSAMContainerLaunchDiagnosticsConstants.QUEUE_AM_RESOURCE_LIMIT_EXCEED),
"AM diagnostics not set properly");
rm1.killApp(app3.getApplicationId());
Thread.sleep(1000);
// After killing one running app, pending app will also get activated.
assertEquals(2, leafQueue.getNumActiveApplications());
assertEquals(0, leafQueue.getNumPendingApplications());
rm1.close();
}
@Test
@Timeout(value = 120)
public void testAtleastOneAMRunPerPartition() throws Exception {
/*
* Test Case:
* Even though am-resource-limit per queue/partition may cross if we
* activate an app (high am resource demand), we have to activate it
* since no other apps are running in that Queue/Partition. Here also
* we run one test case for partition level and one in queue level to
* ensure no breakage in existing functionality.
*
* Queue a1 supports labels (x,y). Configure am-resource-limit as 0.15 (x)
* Queue c1 supports default label. Configure am-resource-limit as 0.15
*
* Queue A1 for label X can only support 1.5Gb AM resource.
* Queue C1 (empty label) can support 1.5Gb AM resource.
*
* Verify atleast one AM is launched in each Queue.
*/
simpleNodeLabelMappingToManager();
CapacitySchedulerConfiguration config = (CapacitySchedulerConfiguration)
TestUtils.getConfigurationWithQueueLabels(conf);
// After getting queue conf, configure AM resource percent for Queue A1
// as 0.15 (Label X) and for Queue C1 as 0.15 (Empty Label)
config.setMaximumAMResourcePercentPerPartition(A1, "x", 0.15f);
config.setMaximumApplicationMasterResourcePerQueuePercent(C1, 0.15f);
// inject node label manager
MockRM rm1 = new MockRM(config) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x
rm1.registerNode("h2:1234", 10 * GB); // label = y
MockNM nm3 = rm1.registerNode("h3:1234", 10 * GB); // label = <empty>
// Submit app1 (2 GB) to Queue A1 and label X
MockRMAppSubmissionData data3 =
MockRMAppSubmissionData.Builder.createWithMemory(2 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data3);
// This app must be activated eventhough the am-resource per-partition
// limit is only for 1.5GB.
MockRM.launchAndRegisterAM(app1, rm1, nm1);
// Submit 2nd app to label "X" with one GB and it must be pending since
// am-resource per-partition limit is crossed (1.5 GB was the limit).
MockRMAppSubmissionData data2 =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build();
MockRMAppSubmitter.submit(rm1, data2);
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1");
assertNotNull(leafQueue);
// Only 1 app will be activated as am-limit for partition "x" is 0.15
assertEquals(1, leafQueue.getNumActiveApplications());
assertEquals(1, leafQueue.getNumPendingApplications());
// Now verify the same test case in Queue C1 which takes default label
// to see queue level am-resource-limit is still working as expected.
// Submit an app to Queue C1 with empty label (2 GB)
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(2 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("c1")
.withUnmanagedAM(false)
.build();
RMApp app3 = MockRMAppSubmitter.submit(rm1, data1);
// This app must be activated even though the am-resource per-queue
// limit is only for 1.5GB
MockRM.launchAndRegisterAM(app3, rm1, nm3);
// Submit 2nd app to C1 (Default label, hence am-limit per-queue will be
// considered).
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("c1")
.withUnmanagedAM(false)
.build();
MockRMAppSubmitter.submit(rm1, data);
leafQueue = (LeafQueue) cs.getQueue("c1");
assertNotNull(leafQueue);
// 1 app will be activated (and it has AM resource more than queue limit)
assertEquals(1, leafQueue.getNumActiveApplications());
assertEquals(1, leafQueue.getNumPendingApplications());
rm1.close();
}
@Test
@Timeout(value = 120)
public void testDefaultAMLimitFromQueueForPartition() throws Exception {
/*
* Test Case:
* Configure AM resource limit per queue level. If partition level config
* is not found, we will be considering per-queue level am-limit. Ensure
* this is working as expected.
*
* Queue A1 am-resource limit to be configured as 0.2 (not for partition x)
*
* Eventhough per-partition level config is not done, CS should consider
* the configuration done for queue level.
*/
simpleNodeLabelMappingToManager();
CapacitySchedulerConfiguration config = (CapacitySchedulerConfiguration)
TestUtils.getConfigurationWithQueueLabels(conf);
// After getting queue conf, configure AM resource percent for Queue A1
// as 0.2 (not for partition, rather in queue level)
config.setMaximumApplicationMasterResourcePerQueuePercent(A1, 0.2f);
// inject node label manager
MockRM rm1 = new MockRM(config) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x
rm1.registerNode("h2:1234", 10 * GB); // label = y
rm1.registerNode("h3:1234", 10 * GB); // label = <empty>
// Submit app1 (2 GB) to Queue A1 and label X
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(2 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data1);
// Submit 2nd app to label "X" with one GB. Since queue am-limit is 2GB,
// 2nd app will be pending and first one will get activated.
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build();
RMApp pendingApp = MockRMAppSubmitter.submit(rm1, data);
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1");
assertNotNull(leafQueue);
// Only 1 app will be activated as am-limit for queue is 0.2 and same is
// used for partition "x" also.
assertEquals(1, leafQueue.getNumActiveApplications());
assertEquals(1, leafQueue.getNumPendingApplications());
assertTrue(app1.getDiagnostics().toString().contains(AMState.ACTIVATED.getDiagnosticMessage()),
"AM diagnostics not set properly");
assertTrue(pendingApp.getDiagnostics().toString()
.contains(AMState.INACTIVATED.getDiagnosticMessage()),
"AM diagnostics not set properly");
assertTrue(pendingApp.getDiagnostics().toString()
.contains(CSAMContainerLaunchDiagnosticsConstants.QUEUE_AM_RESOURCE_LIMIT_EXCEED),
"AM diagnostics not set properly");
rm1.close();
}
@Test
@Timeout(value = 120)
public void testUserAMResourceLimitWithLabels() throws Exception {
/*
* Test Case:
* Verify user level AM resource limit. This test case is ran with two
* users. And per-partition level am-resource-limit will be 0.4, which
* internally will be 4GB. Hence 2GB will be available for each
* user for its AM resource.
*
* Now this test case will create a scenario where AM resource limit per
* partition is not met, but user level am-resource limit is reached.
* Hence app will be pending.
*/
final String user_0 = "user_0";
final String user_1 = "user_1";
simpleNodeLabelMappingToManager();
CapacitySchedulerConfiguration config = (CapacitySchedulerConfiguration)
TestUtils.getConfigurationWithQueueLabels(conf);
// After getting queue conf, configure AM resource percent for Queue A1
// as 0.4 (Label X). Also set userlimit as 50% for this queue. So when we
// have two users submitting applications, each user will get 50% of AM
// resource which is available in this partition.
config.setMaximumAMResourcePercentPerPartition(A1, "x", 0.4f);
config.setUserLimit(A1, 50);
// Now inject node label manager with this updated config
MockRM rm1 = new MockRM(config) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB); // label = x
rm1.registerNode("h2:1234", 10 * GB); // label = y
rm1.registerNode("h3:1234", 10 * GB); // label = <empty>
// Submit app1 with 1Gb AM resource to Queue A1 for label X for user0
MockRMAppSubmissionData data3 =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser(user_0)
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data3);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// Place few allocate requests to make it an active application
am1.allocate("*", 1 * GB, 15, new ArrayList<ContainerId>(), "");
// Now submit 2nd app to Queue A1 for label X for user1
MockRMAppSubmissionData data2 =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser(user_1)
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build();
RMApp app2 = MockRMAppSubmitter.submit(rm1, data2);
MockRM.launchAndRegisterAM(app2, rm1, nm1);
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1");
assertNotNull(leafQueue);
// Verify active applications count in this queue.
assertEquals(2, leafQueue.getNumActiveApplications());
assertEquals(1, leafQueue.getNumActiveApplications(user_0));
assertEquals(0, leafQueue.getNumPendingApplications());
// Submit 3rd app to Queue A1 for label X for user1. Now user1 will have
// 2 applications (2 GB resource) and user0 will have one app (1GB).
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser(user_1)
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build();
RMApp app3 = MockRMAppSubmitter.submit(rm1, data1);
MockAM am2 = MockRM.launchAndRegisterAM(app3, rm1, nm1);
// Place few allocate requests to make it an active application. This is
// to ensure that user1 and user0 are active users.
am2.allocate("*", 1 * GB, 10, new ArrayList<ContainerId>(), "");
// Submit final app to Queue A1 for label X. Since we are trying to submit
// for user1, we need 3Gb resource for AMs.
// 4Gb -> 40% of label "X" in queue A1
// Since we have 2 users, 50% of 4Gb will be max for each user. Here user1
// has already crossed this 2GB limit, hence this app will be pending.
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser(user_1)
.withAcls(null)
.withQueue("a1")
.withAmLabel("x")
.build();
RMApp pendingApp = MockRMAppSubmitter.submit(rm1, data);
// Verify active applications count per user and also in queue level.
assertEquals(3, leafQueue.getNumActiveApplications());
assertEquals(1, leafQueue.getNumActiveApplications(user_0));
assertEquals(2, leafQueue.getNumActiveApplications(user_1));
assertEquals(1, leafQueue.getNumPendingApplications(user_1));
assertEquals(1, leafQueue.getNumPendingApplications());
//verify Diagnostic messages
assertTrue(pendingApp.getDiagnostics().toString()
.contains(AMState.INACTIVATED.getDiagnosticMessage()),
"AM diagnostics not set properly");
assertTrue(pendingApp.getDiagnostics().toString().contains(
CSAMContainerLaunchDiagnosticsConstants.USER_AM_RESOURCE_LIMIT_EXCEED),
"AM diagnostics not set properly");
rm1.close();
}
@Test
public void testAMResourceLimitForMultipleApplications() throws Exception {
/*
* Test Case:
* In a complex node label setup, verify am-resource-percentage calculation
* and check whether applications can get activated as per expectation.
*/
complexNodeLabelMappingToManager();
CapacitySchedulerConfiguration config = (CapacitySchedulerConfiguration)
TestUtils.getComplexConfigurationWithQueueLabels(conf);
/*
* Queue structure:
* root (*)
* ________________
* / \
* a x(100%), y(50%) b y(50%), z(100%)
* ________________ ______________
* / / \
* a1 (x,y) b1(no) b2(y,z)
* 100% y = 100%, z = 100%
*
* Node structure:
* h1 : x
* h2 : y
* h3 : y
* h4 : z
* h5 : NO
*
* Total resource:
* x: 10G
* y: 20G
* z: 10G
* *: 10G
*
* AM resource percentage config:
* A1 : 0.25
* B2 : 0.15
*/
config.setMaximumAMResourcePercentPerPartition(A1, "y", 0.25f);
config.setMaximumApplicationMasterResourcePerQueuePercent(B1, 0.15f);
// Now inject node label manager with this updated config
MockRM rm1 = new MockRM(config) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
rm1.registerNode("h1:1234", 10 * GB); // label = x
MockNM nm2 = rm1.registerNode("h2:1234", 10 * GB); // label = y
MockNM nm3 = rm1.registerNode("h3:1234", 10 * GB); // label = y
rm1.registerNode("h4:1234", 10 * GB); // label = z
MockNM nm5 = rm1.registerNode("h5:1234", 10 * GB); // label = <empty>
// Submit app1 with 2Gb AM resource to Queue A1 for label Y
MockRMAppSubmissionData data4 =
MockRMAppSubmissionData.Builder.createWithMemory(2 * GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("y")
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm1, data4);
MockRM.launchAndRegisterAM(app1, rm1, nm2);
// Submit app2 with 1Gb AM resource to Queue A1 for label Y
MockRMAppSubmissionData data3 =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("y")
.build();
RMApp app2 = MockRMAppSubmitter.submit(rm1, data3);
MockRM.launchAndRegisterAM(app2, rm1, nm3);
// Submit another app with 1Gb AM resource to Queue A1 for label Y
MockRMAppSubmissionData data2 =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("a1")
.withAmLabel("y")
.build();
MockRMAppSubmitter.submit(rm1, data2);
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
LeafQueue leafQueue = (LeafQueue) cs.getQueue("a1");
assertNotNull(leafQueue);
/*
* capacity of queue A -> 50% for label Y
* capacity of queue A1 -> 100% for label Y
*
* Total resources available for label Y -> 20GB (nm2 and nm3)
* Hence in queue A1, max resource for label Y is 10GB.
*
* AM resource percent config for queue A1 -> 0.25
* ==> 2.5Gb (3 Gb) is max-am-resource-limit
*/
assertEquals(2, leafQueue.getNumActiveApplications());
assertEquals(1, leafQueue.getNumPendingApplications());
// Submit app3 with 1Gb AM resource to Queue B1 (no_label)
MockRMAppSubmissionData data1 =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b1")
.withUnmanagedAM(false)
.build();
RMApp app3 = MockRMAppSubmitter.submit(rm1, data1);
MockRM.launchAndRegisterAM(app3, rm1, nm5);
// Submit another app with 1Gb AM resource to Queue B1 (no_label)
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(GB, rm1)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("b1")
.withUnmanagedAM(false)
.build();
MockRMAppSubmitter.submit(rm1, data);
leafQueue = (LeafQueue) cs.getQueue("b1");
assertNotNull(leafQueue);
/*
* capacity of queue B -> 90% for queue
* -> and 100% for no-label
* capacity of queue B1 -> 50% for no-label/queue
*
* Total resources available for no-label -> 10GB (nm5)
* Hence in queue B1, max resource for no-label is 5GB.
*
* AM resource percent config for queue B1 -> 0.15
* ==> 1Gb is max-am-resource-limit
*
* Only one app will be activated and all othe will be pending.
*/
assertEquals(1, leafQueue.getNumActiveApplications());
assertEquals(1, leafQueue.getNumPendingApplications());
rm1.close();
}
@Test
public void testHeadroom() throws Exception {
/*
* Test Case: Verify Headroom calculated is sum of headrooms for each
* partition requested. So submit a app with requests for default partition
* and 'x' partition, so the total headroom for the user should be sum of
* the head room for both labels.
*/
simpleNodeLabelMappingToManager();
CapacitySchedulerConfiguration csConf =
(CapacitySchedulerConfiguration) TestUtils
.getComplexConfigurationWithQueueLabels(conf);
csConf.setUserLimit(A1, 25);
csConf.setUserLimit(B2, 25);
YarnConfiguration conf = new YarnConfiguration();
CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class);
when(csContext.getConfiguration()).thenReturn(csConf);
when(csContext.getConf()).thenReturn(conf);
when(csContext.getMinimumResourceCapability())
.thenReturn(Resources.createResource(GB));
when(csContext.getMaximumResourceCapability())
.thenReturn(Resources.createResource(16 * GB));
when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
RMContext rmContext = TestUtils.getMockRMContext();
RMContext spyRMContext = spy(rmContext);
when(spyRMContext.getNodeLabelManager()).thenReturn(mgr);
when(csContext.getRMContext()).thenReturn(spyRMContext);
when(csContext.getPreemptionManager()).thenReturn(new PreemptionManager());
CapacitySchedulerQueueManager queueManager =
new CapacitySchedulerQueueManager(csConf, mgr, null);
when(csContext.getCapacitySchedulerQueueManager()).thenReturn(queueManager);
// Setup nodelabels
queueManager.reinitConfiguredNodeLabels(csConf);
mgr.activateNode(NodeId.newInstance("h0", 0),
Resource.newInstance(160 * GB, 16)); // default Label
mgr.activateNode(NodeId.newInstance("h1", 0),
Resource.newInstance(160 * GB, 16)); // label x
mgr.activateNode(NodeId.newInstance("h2", 0),
Resource.newInstance(160 * GB, 16)); // label y
// Say cluster has 100 nodes of 16G each
Resource clusterResource = Resources.createResource(160 * GB);
when(csContext.getClusterResource()).thenReturn(clusterResource);
CapacitySchedulerQueueContext queueContext = new CapacitySchedulerQueueContext(csContext);
CSQueueStore queues = new CSQueueStore();
CSQueue rootQueue = CapacitySchedulerQueueManager.parseQueue(queueContext,
csConf, null, "root", queues, queues, TestUtils.spyHook);
queueManager.setRootQueue(rootQueue);
rootQueue.updateClusterResource(clusterResource,
new ResourceLimits(clusterResource));
// Manipulate queue 'a'
LeafQueue queue = TestLeafQueue.stubLeafQueue((LeafQueue) queues.get("b2"));
queue.updateClusterResource(clusterResource,
new ResourceLimits(clusterResource));
String rack_0 = "rack_0";
FiCaSchedulerNode node_0 = TestUtils.getMockNode("h0", rack_0, 0, 160 * GB);
FiCaSchedulerNode node_1 = TestUtils.getMockNode("h1", rack_0, 0, 160 * GB);
final String user_0 = "user_0";
final String user_1 = "user_1";
RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
ConcurrentMap<ApplicationId, RMApp> spyApps =
spy(new ConcurrentHashMap<ApplicationId, RMApp>());
RMApp rmApp = mock(RMApp.class);
ResourceRequest amResourceRequest = mock(ResourceRequest.class);
Resource amResource = Resources.createResource(0, 0);
when(amResourceRequest.getCapability()).thenReturn(amResource);
when(rmApp.getAMResourceRequests()).thenReturn(
Collections.singletonList(amResourceRequest));
doReturn(rmApp)
.when(spyApps).get(ArgumentMatchers.<ApplicationId>any());
when(spyRMContext.getRMApps()).thenReturn(spyApps);
RMAppAttempt rmAppAttempt = mock(RMAppAttempt.class);
when(rmApp.getRMAppAttempt(any()))
.thenReturn(rmAppAttempt);
when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt);
doReturn(rmApp)
.when(spyApps).get(ArgumentMatchers.<ApplicationId>any());
doReturn(true).when(spyApps)
.containsKey(ArgumentMatchers.<ApplicationId>any());
Priority priority_1 = TestUtils.createMockPriority(1);
// Submit first application with some resource-requests from user_0,
// and check headroom
final ApplicationAttemptId appAttemptId_0_0 =
TestUtils.getMockApplicationAttemptId(0, 0);
FiCaSchedulerApp app_0_0 = new FiCaSchedulerApp(appAttemptId_0_0, user_0,
queue, queue.getAbstractUsersManager(), spyRMContext);
queue.submitApplicationAttempt(app_0_0, user_0);
List<ResourceRequest> app_0_0_requests = new ArrayList<ResourceRequest>();
app_0_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY,
1 * GB, 2, true, priority_1, recordFactory));
app_0_0.updateResourceRequests(app_0_0_requests);
// Schedule to compute
queue.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
//head room = queue capacity = 50 % 90% 160 GB * 0.25 (UL)
Resource expectedHeadroom =
Resources.createResource((int) (0.5 * 0.9 * 160 * 0.25) * GB, 1);
assertEquals(expectedHeadroom, app_0_0.getHeadroom());
// Submit second application from user_0, check headroom
final ApplicationAttemptId appAttemptId_0_1 =
TestUtils.getMockApplicationAttemptId(1, 0);
FiCaSchedulerApp app_0_1 = new FiCaSchedulerApp(appAttemptId_0_1, user_0,
queue, queue.getAbstractUsersManager(), spyRMContext);
queue.submitApplicationAttempt(app_0_1, user_0);
List<ResourceRequest> app_0_1_requests = new ArrayList<ResourceRequest>();
app_0_1_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY,
1 * GB, 2, true, priority_1, recordFactory));
app_0_1.updateResourceRequests(app_0_1_requests);
app_0_1_requests.clear();
app_0_1_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY,
1 * GB, 2, true, priority_1, recordFactory, "y"));
app_0_1.updateResourceRequests(app_0_1_requests);
// Schedule to compute
queue.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); // Schedule to compute
queue.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); // Schedule to compute
assertEquals(expectedHeadroom, app_0_0.getHeadroom());// no change
//head room for default label + head room for y partition
//head room for y partition = 100% 50%(b queue capacity ) * 160 * GB
Resource expectedHeadroomWithReqInY = Resources.add(
Resources.createResource((int) (0.25 * 0.5 * 160) * GB, 1),
expectedHeadroom);
assertEquals(expectedHeadroomWithReqInY, app_0_1.getHeadroom());
// Submit first application from user_1, check for new headroom
final ApplicationAttemptId appAttemptId_1_0 =
TestUtils.getMockApplicationAttemptId(2, 0);
FiCaSchedulerApp app_1_0 = new FiCaSchedulerApp(appAttemptId_1_0, user_1,
queue, queue.getAbstractUsersManager(), spyRMContext);
queue.submitApplicationAttempt(app_1_0, user_1);
List<ResourceRequest> app_1_0_requests = new ArrayList<ResourceRequest>();
app_1_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY,
1 * GB, 2, true, priority_1, recordFactory));
app_1_0.updateResourceRequests(app_1_0_requests);
app_1_0_requests.clear();
app_1_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY,
1 * GB, 2, true, priority_1, recordFactory, "y"));
app_1_0.updateResourceRequests(app_1_0_requests);
// Schedule to compute
queue.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); // Schedule to compute
//head room = queue capacity = (50 % 90% 160 GB)/2 (for 2 users)
expectedHeadroom =
Resources.createResource((int) (0.5 * 0.9 * 160 * 0.25) * GB, 1);
//head room for default label + head room for y partition
//head room for y partition = 100% 50%(b queue capacity ) * 160 * GB
expectedHeadroomWithReqInY = Resources.add(
Resources.createResource((int) (0.25 * 0.5 * 160) * GB, 1),
expectedHeadroom);
assertEquals(expectedHeadroom, app_0_0.getHeadroom());
assertEquals(expectedHeadroomWithReqInY, app_0_1.getHeadroom());
assertEquals(expectedHeadroomWithReqInY, app_1_0.getHeadroom());
}
/**
* {@link LeafQueue#activateApplications()} should validate values of all
* resourceTypes before activating application.
*
* @throws Exception
*/
@Test
public void testAMLimitByAllResources() throws Exception {
CapacitySchedulerConfiguration csconf =
new CapacitySchedulerConfiguration();
csconf.setResourceComparator(DominantResourceCalculator.class);
String queueName = "a1";
csconf.setQueues(ROOT,
new String[] {queueName});
csconf.setCapacity(new QueuePath("root.a1"), 100);
ResourceInformation res0 = ResourceInformation.newInstance("memory-mb",
ResourceInformation.MEMORY_MB.getUnits(), GB, Long.MAX_VALUE);
ResourceInformation res1 = ResourceInformation.newInstance("vcores",
ResourceInformation.VCORES.getUnits(), 1, Integer.MAX_VALUE);
ResourceInformation res2 = ResourceInformation.newInstance("gpu",
ResourceInformation.GPUS.getUnits(), 0, Integer.MAX_VALUE);
Map<String, ResourceInformation> riMap = new HashMap<>();
riMap.put(ResourceInformation.MEMORY_URI, res0);
riMap.put(ResourceInformation.VCORES_URI, res1);
riMap.put(ResourceInformation.GPU_URI, res2);
ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
YarnConfiguration config = new YarnConfiguration(csconf);
config.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
config.setBoolean(TestResourceProfiles.TEST_CONF_RESET_RESOURCE_TYPES,
false);
MockRM rm = new MockRM(config);
rm.start();
Map<String, Long> res = new HashMap<>();
res.put("gpu", 0L);
Resource clusterResource = Resource.newInstance(16 * GB, 64, res);
// Cluster Resource - 16GB, 64vcores
// AMLimit 16384 x .1 mb , 64 x .1 vcore
// Effective AM limit after normalized to minimum resource 2048,7
rm.registerNode("127.0.0.1:1234", clusterResource);
String userName = "user_0";
ResourceScheduler scheduler = rm.getRMContext().getScheduler();
LeafQueue queueA = (LeafQueue) ((CapacityScheduler) scheduler)
.getQueue(queueName);
Resource amResource = Resource.newInstance(GB, 1);
MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithResource(amResource, rm)
.withAppName("app-1")
.withUser(userName)
.withAcls(null)
.withQueue(queueName)
.build());
MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithResource(amResource, rm)
.withAppName("app-2")
.withUser(userName)
.withAcls(null)
.withQueue(queueName)
.build());
// app-3 should not be activated as amLimit will be reached
// for memory
MockRMAppSubmitter.submit(rm,
MockRMAppSubmissionData.Builder.createWithResource(amResource, rm)
.withAppName("app-3")
.withUser(userName)
.withAcls(null)
.withQueue(queueName)
.build());
assertEquals(1, queueA.getNumPendingApplications(), "PendingApplications should be 1");
assertEquals(2, queueA.getNumActiveApplications(), "Active applications should be 2");
// AMLimit is 2048,7
assertEquals(2048,
queueA.getQueueResourceUsage().getAMLimit().getMemorySize());
assertEquals(7,
queueA.getQueueResourceUsage().getAMLimit().getVirtualCores());
// Used AM Resource is 2048,2
assertEquals(2048,
queueA.getQueueResourceUsage().getAMUsed().getMemorySize());
assertEquals(2,
queueA.getQueueResourceUsage().getAMUsed().getVirtualCores());
rm.close();
}
}
|
TestApplicationLimitsByPartition
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDenseVectorFromDoubleEvaluator.java
|
{
"start": 1061,
"end": 3877
}
|
class ____ extends AbstractConvertFunction.AbstractEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ToDenseVectorFromDoubleEvaluator.class);
private final EvalOperator.ExpressionEvaluator d;
public ToDenseVectorFromDoubleEvaluator(Source source, EvalOperator.ExpressionEvaluator d,
DriverContext driverContext) {
super(driverContext, source);
this.d = d;
}
@Override
public EvalOperator.ExpressionEvaluator next() {
return d;
}
@Override
public Block evalVector(Vector v) {
DoubleVector vector = (DoubleVector) v;
int positionCount = v.getPositionCount();
if (vector.isConstant()) {
return driverContext.blockFactory().newConstantFloatBlockWith(evalValue(vector, 0), positionCount);
}
try (FloatBlock.Builder builder = driverContext.blockFactory().newFloatBlockBuilder(positionCount)) {
for (int p = 0; p < positionCount; p++) {
builder.appendFloat(evalValue(vector, p));
}
return builder.build();
}
}
private float evalValue(DoubleVector container, int index) {
double value = container.getDouble(index);
return ToDenseVector.fromDouble(value);
}
@Override
public Block evalBlock(Block b) {
DoubleBlock block = (DoubleBlock) b;
int positionCount = block.getPositionCount();
try (FloatBlock.Builder builder = driverContext.blockFactory().newFloatBlockBuilder(positionCount)) {
for (int p = 0; p < positionCount; p++) {
int valueCount = block.getValueCount(p);
int start = block.getFirstValueIndex(p);
int end = start + valueCount;
boolean positionOpened = false;
boolean valuesAppended = false;
for (int i = start; i < end; i++) {
float value = evalValue(block, i);
if (positionOpened == false && valueCount > 1) {
builder.beginPositionEntry();
positionOpened = true;
}
builder.appendFloat(value);
valuesAppended = true;
}
if (valuesAppended == false) {
builder.appendNull();
} else if (positionOpened) {
builder.endPositionEntry();
}
}
return builder.build();
}
}
private float evalValue(DoubleBlock container, int index) {
double value = container.getDouble(index);
return ToDenseVector.fromDouble(value);
}
@Override
public String toString() {
return "ToDenseVectorFromDoubleEvaluator[" + "d=" + d + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(d);
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += d.baseRamBytesUsed();
return baseRamBytesUsed;
}
public static
|
ToDenseVectorFromDoubleEvaluator
|
java
|
apache__kafka
|
connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatConnector.java
|
{
"start": 1513,
"end": 4752
}
|
class ____ extends SourceConnector {
private MirrorHeartbeatConfig config;
private Scheduler scheduler;
private Admin targetAdminClient;
public MirrorHeartbeatConnector() {
// nop
}
// visible for testing
MirrorHeartbeatConnector(MirrorHeartbeatConfig config) {
this.config = config;
}
@Override
public void start(Map<String, String> props) {
config = new MirrorHeartbeatConfig(props);
targetAdminClient = config.forwardingAdmin(config.targetAdminConfig("heartbeats-target-admin"));
scheduler = new Scheduler(getClass(), config.entityLabel(), config.adminTimeout());
scheduler.execute(this::createInternalTopics, "creating internal topics");
}
@Override
public void stop() {
Utils.closeQuietly(scheduler, "scheduler");
Utils.closeQuietly(targetAdminClient, "target admin client");
}
@Override
public Class<? extends Task> taskClass() {
return MirrorHeartbeatTask.class;
}
@Override
public List<Map<String, String>> taskConfigs(int maxTasks) {
// if the heartbeats emission is disabled by setting `emit.heartbeats.enabled` to `false`,
// the interval heartbeat emission will be negative and no `MirrorHeartbeatTask` will be created
if (config.emitHeartbeatsInterval().isNegative()) {
return List.of();
}
// just need a single task
return List.of(config.originalsStrings());
}
@Override
public ConfigDef config() {
return MirrorHeartbeatConfig.CONNECTOR_CONFIG_DEF;
}
@Override
public String version() {
return AppInfoParser.getVersion();
}
@Override
public boolean alterOffsets(Map<String, String> config, Map<Map<String, ?>, Map<String, ?>> offsets) {
for (Map.Entry<Map<String, ?>, Map<String, ?>> offsetEntry : offsets.entrySet()) {
Map<String, ?> sourceOffset = offsetEntry.getValue();
if (sourceOffset == null) {
// We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't
// want to prevent users from being able to clean it up using the REST API
continue;
}
Map<String, ?> sourcePartition = offsetEntry.getKey();
if (sourcePartition == null) {
throw new ConnectException("Source partitions may not be null");
}
MirrorUtils.validateSourcePartitionString(sourcePartition, SOURCE_CLUSTER_ALIAS_KEY);
MirrorUtils.validateSourcePartitionString(sourcePartition, TARGET_CLUSTER_ALIAS_KEY);
MirrorUtils.validateSourceOffset(sourcePartition, sourceOffset, true);
}
// We don't actually use these offsets in the task class, so no additional effort is required beyond just validating
// the format of the user-supplied offsets
return true;
}
private void createInternalTopics() {
MirrorUtils.createSinglePartitionCompactedTopic(
config.heartbeatsTopic(),
config.heartbeatsTopicReplicationFactor(),
targetAdminClient
);
}
}
|
MirrorHeartbeatConnector
|
java
|
quarkusio__quarkus
|
extensions/agroal/deployment/src/test/java/io/quarkus/agroal/test/ConfigUrlMissingNamedDatasourceStaticInjectionTest.java
|
{
"start": 1980,
"end": 2190
}
|
class ____ {
@Inject
@io.quarkus.agroal.DataSource("users")
DataSource ds;
public void useDatasource() throws SQLException {
ds.getConnection();
}
}
}
|
MyBean
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/error/ShouldNotEndWithWhitespaces.java
|
{
"start": 808,
"end": 1411
}
|
class ____ extends BasicErrorMessageFactory {
/**
* Creates a new <code>{@link ShouldNotEndWithWhitespaces}</code>.
* @param actual the actual value in the failed assertion.
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldNotEndWithWhitespaces(CharSequence actual) {
return new ShouldNotEndWithWhitespaces(actual);
}
private ShouldNotEndWithWhitespaces(Object actual) {
super("%n" +
"Expecting string not to end with whitespaces but found one, string was:%n" +
" %s", actual);
}
}
|
ShouldNotEndWithWhitespaces
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/RestorableContextClassLoader.java
|
{
"start": 792,
"end": 2024
}
|
class ____ implements AutoCloseable {
private final Thread thread;
private ClassLoader restore;
public RestorableContextClassLoader(Class<?> fromClass) throws PrivilegedActionException {
this(Thread.currentThread(), getClassLoader(fromClass));
}
private static ClassLoader getClassLoader(Class<?> fromClass) throws PrivilegedActionException {
return AccessController.doPrivileged((PrivilegedExceptionAction<ClassLoader>) fromClass::getClassLoader);
}
public RestorableContextClassLoader(Thread thread, ClassLoader setClassLoader) throws PrivilegedActionException {
this.thread = thread;
SpecialPermission.check();
AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> {
restore = thread.getContextClassLoader();
thread.setContextClassLoader(setClassLoader);
return null;
});
}
@Override
public void close() throws PrivilegedActionException {
SpecialPermission.check();
AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> {
this.thread.setContextClassLoader(this.restore);
return null;
});
}
}
|
RestorableContextClassLoader
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/writeAsArray/WriteAsArray_long_private.java
|
{
"start": 204,
"end": 551
}
|
class ____ extends TestCase {
public void test_0 () throws Exception {
VO vo = new VO();
vo.setId(123);
vo.setName("wenshao");
String text = JSON.toJSONString(vo, SerializerFeature.BeanToArray);
Assert.assertEquals("[123,\"wenshao\"]", text);
}
private static
|
WriteAsArray_long_private
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/RedisPublisher.java
|
{
"start": 12157,
"end": 15204
}
|
interface ____ indicate that as error has occurred.
*
* @param t the error
*/
final void onError(Throwable t) {
State state = state();
if (LOG.isErrorEnabled()) {
LOG.trace("{} onError(): {}", state, t.toString(), t);
}
state.onError(this, t);
}
/**
* Reads data from the input, if possible.
*
* @return the data that was read or {@code null}
*/
protected T read() {
return data.poll();
}
boolean hasDemand() {
return getDemand() > 0;
}
private long getDemand() {
return DEMAND.get(this);
}
boolean changeState(State oldState, State newState) {
return STATE.compareAndSet(this, oldState, newState);
}
boolean afterRead() {
return changeState(State.READING, getDemand() > 0 ? State.DEMAND : State.NO_DEMAND);
}
public boolean complete() {
return changeState(State.READING, State.COMPLETED);
}
void checkCommandDispatch() {
COMMAND_DISPATCH.get(this).dispatch(this);
}
@SuppressWarnings({ "unchecked", "rawtypes" })
void dispatchCommand() {
connection.dispatch((RedisCommand) subscriptionCommand);
}
void checkOnDataAvailable() {
if (data.isEmpty()) {
potentiallyReadMore();
}
if (!data.isEmpty()) {
onDataAvailable();
}
}
void potentiallyReadMore() {
/*
* getDemand() maybe is Long.MAX_VALUE,because MonoNext.NextSubscriber#request(long n) inner use the Long.MAX_VALUE,
* so maybe "getDemand() + 1" will be overflow,we use "getDemand() > data.size() - 1" replace the
* "(getDemand() + 1) > data.size()"
*/
if (getDemand() > data.size() - 1) {
state().readData(this);
}
}
/**
* Reads and publishes data from the input. Continues until either there is no more demand, or until there is no more
* data to be read.
*/
void readAndPublish() {
while (hasDemand()) {
T data = read();
if (data == null) {
return;
}
DEMAND.decrementAndGet(this);
this.subscriber.onNext(data);
}
}
RedisPublisher.State state() {
return STATE.get(this);
}
}
/**
* Represents a state for command dispatch of the {@link Subscription}. The following figure indicates the two different
* states that exist, and the relationships between them.
*
* <pre>
* UNDISPATCHED
* |
* v
* DISPATCHED
* </pre>
*
* Refer to the individual states for more information.
*/
private
|
to
|
java
|
playframework__playframework
|
core/play/src/main/java/play/inject/Injector.java
|
{
"start": 868,
"end": 961
}
|
class ____ the injector.
*
* @param <T> the type of the instance
* @param clazz The
|
from
|
java
|
hibernate__hibernate-orm
|
hibernate-testing/src/main/java/org/hibernate/testing/orm/junit/DomainModelProducer.java
|
{
"start": 481,
"end": 621
}
|
interface ____ {
/// Produce the domain model
MetadataImplementor produceModel(StandardServiceRegistry serviceRegistry);
}
|
DomainModelProducer
|
java
|
spring-projects__spring-framework
|
spring-beans/src/main/java/org/springframework/beans/factory/BeanFactoryInitializer.java
|
{
"start": 686,
"end": 1349
}
|
interface ____ initializing a Spring {@link ListableBeanFactory}
* prior to entering the singleton pre-instantiation phase. Can be used to
* trigger early initialization of specific beans before regular singletons.
*
* <p>Can be programmatically applied to a {@code ListableBeanFactory} instance.
* In an {@code ApplicationContext}, beans of type {@code BeanFactoryInitializer}
* will be autodetected and automatically applied to the underlying bean factory.
*
* @author Juergen Hoeller
* @since 6.2
* @param <F> the bean factory type
* @see org.springframework.beans.factory.config.ConfigurableListableBeanFactory#preInstantiateSingletons()
*/
public
|
for
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/bug/Bug_for_issue_479.java
|
{
"start": 137,
"end": 1166
}
|
class ____ extends TestCase {
public void test_for_issue_blankinput() throws Exception {
VO vo = JSON.parseObject("", VO.class);
Assert.assertNull(vo);
}
public void test_for_issue() throws Exception {
VO vo = JSON.parseObject("{\"doubleParam\":\"\",\"floatParam\":\"\",\"intParam\":\"\",\"longParam\":\"\"}",
VO.class);
Assert.assertTrue(vo.doubleParam == 0);
Assert.assertTrue(vo.floatParam == 0);
Assert.assertTrue(vo.intParam == 0);
Assert.assertTrue(vo.longParam == 0);
}
public void test_for_issue_private() throws Exception {
V1 vo = JSON.parseObject("{\"doubleParam\":\"\",\"floatParam\":\"\",\"intParam\":\"\",\"longParam\":\"\"}",
V1.class);
Assert.assertTrue(vo.doubleParam == 0);
Assert.assertTrue(vo.floatParam == 0);
Assert.assertTrue(vo.intParam == 0);
Assert.assertTrue(vo.longParam == 0);
}
public static
|
Bug_for_issue_479
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.