language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__camel | components/camel-quartz/src/test/java/org/apache/camel/routepolicy/quartz/SimpleScheduledCombinedRoutePolicyTest.java | {
"start": 1318,
"end": 3292
} | class ____ extends CamelTestSupport {
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Test
public void testScheduledStartAndStopRoutePolicy() throws Exception {
MockEndpoint success = context.getEndpoint("mock:success", MockEndpoint.class);
success.expectedMessageCount(1);
context.getComponent("direct", DirectComponent.class).setBlock(false);
context.getComponent("quartz", QuartzComponent.class)
.setPropertiesFile("org/apache/camel/routepolicy/quartz/myquartz.properties");
context.addRoutes(new RouteBuilder() {
public void configure() {
SimpleScheduledRoutePolicy policy = new SimpleScheduledRoutePolicy();
long startTime = System.currentTimeMillis() + 3000L;
long stopTime = System.currentTimeMillis() + 8000L;
policy.setRouteStartDate(new Date(startTime));
policy.setRouteStartRepeatCount(1);
policy.setRouteStartRepeatInterval(3000);
policy.setRouteStopDate(new Date(stopTime));
policy.setRouteStopRepeatCount(1);
policy.setRouteStopRepeatInterval(3000);
from("direct:start")
.routeId("test")
.routePolicy(policy)
.to("mock:success");
}
});
context.start();
Awaitility.await()
.untilAsserted(() -> assertSame(ServiceStatus.Started, context.getRouteController().getRouteStatus("test")));
template.sendBody("direct:start", "Ready or not, Here, I come");
Awaitility.await()
.untilAsserted(() -> assertSame(ServiceStatus.Stopped, context.getRouteController().getRouteStatus("test")));
context.getComponent("quartz", QuartzComponent.class).stop();
success.assertIsSatisfied();
}
}
| SimpleScheduledCombinedRoutePolicyTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ImmutableCheckerTest.java | {
"start": 67703,
"end": 68104
} | class ____ {
Object o = new @A Object();
}
""")
.doTest();
}
@Ignore("b/77333859")
@Test
public void immutableInterfaceImplementationCapturesMutableState() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.annotations.Immutable;
@Immutable
| Test |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/LifecycleMethodExecutionExceptionHandlerTests.java | {
"start": 13828,
"end": 13932
} | class ____ extends BaseTestCase {
}
@ExtendWith(SwallowExceptionHandler.class)
static | ConvertingTestCase |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/commons/function/TryTests.java | {
"start": 718,
"end": 4491
} | class ____ {
@Test
void successfulTriesCanBeTransformed() throws Exception {
var success = Try.success("foo");
assertThat(success.get()).isEqualTo("foo");
assertThat(success.getNonNull()).isEqualTo("foo");
assertThat(success.getOrThrow(RuntimeException::new)).isEqualTo("foo");
assertThat(success.getNonNullOrThrow(RuntimeException::new)).isEqualTo("foo");
assertThat(success.toOptional()).contains("foo");
assertThat(success.andThen(v -> {
assertThat(v).isEqualTo("foo");
return Try.success("bar");
}).get()).isEqualTo("bar");
assertThat(success.andThenTry(v -> {
assertThat(v).isEqualTo("foo");
return "bar";
}).get()).isEqualTo("bar");
assertThat(success.orElse(() -> fail("should not be called"))).isSameAs(success);
assertThat(success.orElseTry(() -> fail("should not be called"))).isSameAs(success);
var value = new AtomicReference<String>();
assertThat(success.ifSuccess(value::set)).isSameAs(success);
assertThat(value.get()).isEqualTo("foo");
assertThat(success.ifFailure(cause -> fail("should not be called"))).isSameAs(success);
}
@Test
void failedTriesCanBeTransformed() throws Exception {
var cause = new JUnitException("foo");
var failure = Try.failure(cause);
assertThat(assertThrows(JUnitException.class, failure::get)).isSameAs(cause);
assertThat(assertThrows(RuntimeException.class, () -> failure.getOrThrow(RuntimeException::new))).isInstanceOf(
RuntimeException.class).hasCause(cause);
assertThat(failure.toOptional()).isEmpty();
assertThat(failure.andThen(v -> fail("should not be called"))).isSameAs(failure);
assertThat(failure.andThenTry(v -> fail("should not be called"))).isSameAs(failure);
assertThat(failure.orElse(() -> Try.success("bar")).get()).isEqualTo("bar");
assertThat(failure.orElseTry(() -> "bar").get()).isEqualTo("bar");
assertThat(failure.ifSuccess(v -> fail("should not be called"))).isSameAs(failure);
var exception = new AtomicReference<Exception>();
assertThat(failure.ifFailure(exception::set)).isSameAs(failure);
assertThat(exception.get()).isSameAs(cause);
}
@SuppressWarnings("DataFlowIssue")
@Test
void successfulTriesCanStoreNull() throws Exception {
var success = Try.success(null);
assertThat(success.get()).isNull();
assertThrows(JUnitException.class, success::getNonNull);
assertThat(success.getOrThrow(RuntimeException::new)).isNull();
assertThrows(RuntimeException.class, () -> success.getNonNullOrThrow(RuntimeException::new));
assertThat(success.toOptional()).isEmpty();
}
@Test
void triesWithSameContentAreEqual() {
var cause = new Exception();
Callable<Object> failingCallable = () -> {
throw cause;
};
var success = Try.call(() -> "foo");
assertThat(success).isEqualTo(success).hasSameHashCodeAs(success);
assertThat(success).isEqualTo(Try.success("foo"));
assertThat(success).isNotEqualTo(Try.failure(cause));
var failure = Try.call(failingCallable);
assertThat(failure).isEqualTo(failure).hasSameHashCodeAs(failure);
assertThat(failure).isNotEqualTo(Try.success("foo"));
assertThat(failure).isEqualTo(Try.failure(cause));
}
@SuppressWarnings("DataFlowIssue")
@Test
void methodPreconditionsAreChecked() {
assertThrows(JUnitException.class, () -> Try.call(null));
var success = Try.success("foo");
assertThrows(JUnitException.class, () -> success.andThen(null));
assertThrows(JUnitException.class, () -> success.andThenTry(null));
assertThrows(JUnitException.class, () -> success.ifSuccess(null));
var failure = Try.failure(new Exception());
assertThrows(JUnitException.class, () -> failure.orElse(null));
assertThrows(JUnitException.class, () -> failure.orElseTry(null));
assertThrows(JUnitException.class, () -> failure.ifFailure(null));
}
}
| TryTests |
java | google__error-prone | check_api/src/main/java/com/google/errorprone/util/Comments.java | {
"start": 13643,
"end": 14569
} | class ____ {
private final LineMap lineMap;
private int tokensOnCurrentLine = 0;
private int currentLineNumber = -1;
private boolean previousLineEmpty = true;
TokenTracker(LineMap lineMap) {
this.lineMap = lineMap;
}
void advance(ErrorProneToken token) {
int line = lineMap.getLineNumber(token.pos());
if (line != currentLineNumber) {
currentLineNumber = line;
previousLineEmpty = tokensOnCurrentLine == 0;
tokensOnCurrentLine = 0;
} else {
tokensOnCurrentLine++;
}
}
boolean isCommentOnPreviousLine(ErrorProneComment c) {
int tokenLine = lineMap.getLineNumber(c.getSourcePos(0));
return tokenLine == currentLineNumber - 1;
}
boolean atStartOfLine() {
return tokensOnCurrentLine == 0;
}
boolean wasPreviousLineEmpty() {
return previousLineEmpty;
}
}
/**
* This | TokenTracker |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/admin/internals/DeleteShareGroupOffsetsHandler.java | {
"start": 1856,
"end": 7712
} | class ____ extends AdminApiHandler.Batched<CoordinatorKey, Map<String, ApiException>> {
private final CoordinatorKey groupId;
private final Logger log;
private final Set<String> topics;
private final CoordinatorStrategy lookupStrategy;
public DeleteShareGroupOffsetsHandler(String groupId, Set<String> topics, LogContext logContext) {
this.groupId = CoordinatorKey.byGroupId(groupId);
this.topics = topics;
this.log = logContext.logger(DeleteShareGroupOffsetsHandler.class);
this.lookupStrategy = new CoordinatorStrategy(FindCoordinatorRequest.CoordinatorType.GROUP, logContext);
}
@Override
public String apiName() {
return "deleteShareGroupOffsets";
}
@Override
public AdminApiLookupStrategy<CoordinatorKey> lookupStrategy() {
return lookupStrategy;
}
public static AdminApiFuture.SimpleAdminApiFuture<CoordinatorKey, Map<String, ApiException>> newFuture(String groupId) {
return AdminApiFuture.forKeys(Collections.singleton(CoordinatorKey.byGroupId(groupId)));
}
private void validateKeys(Set<CoordinatorKey> groupIds) {
if (!groupIds.equals(Collections.singleton(groupId))) {
throw new IllegalArgumentException("Received unexpected group ids " + groupIds +
" (expected only " + Collections.singleton(groupId) + ")");
}
}
@Override
DeleteShareGroupOffsetsRequest.Builder buildBatchedRequest(int brokerId, Set<CoordinatorKey> groupIds) {
validateKeys(groupIds);
final List<DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic> requestTopics =
new ArrayList<>();
topics.forEach(topic -> requestTopics.add(
new DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic()
.setTopicName(topic)
));
return new DeleteShareGroupOffsetsRequest.Builder(
new DeleteShareGroupOffsetsRequestData()
.setGroupId(groupId.idValue)
.setTopics(requestTopics)
);
}
@Override
public ApiResult<CoordinatorKey, Map<String, ApiException>> handleResponse(
Node coordinator,
Set<CoordinatorKey> groupIds,
AbstractResponse abstractResponse
) {
validateKeys(groupIds);
final DeleteShareGroupOffsetsResponse response = (DeleteShareGroupOffsetsResponse) abstractResponse;
final Errors groupError = Errors.forCode(response.data().errorCode());
final String groupErrorMessage = response.data().errorMessage();
if (groupError != Errors.NONE) {
final Set<CoordinatorKey> groupsToUnmap = new HashSet<>();
final Map<CoordinatorKey, Throwable> groupsFailed = new HashMap<>();
handleGroupError(groupId, groupError, groupErrorMessage, groupsFailed, groupsToUnmap);
return new ApiResult<>(Collections.emptyMap(), groupsFailed, new ArrayList<>(groupsToUnmap));
} else {
final Map<String, ApiException> topicResults = new HashMap<>();
response.data().responses().forEach(topic -> {
if (topic.errorCode() != Errors.NONE.code()) {
final Errors topicError = Errors.forCode(topic.errorCode());
final String topicErrorMessage = topic.errorMessage();
log.debug("DeleteShareGroupOffsets request for group id {} and topic {} failed and returned error {}. {}",
groupId.idValue, topic.topicName(), topicError, topicErrorMessage);
}
topicResults.put(
topic.topicName(),
Errors.forCode(topic.errorCode()).exception(topic.errorMessage())
);
});
return ApiResult.completed(groupId, topicResults);
}
}
private void handleGroupError(
CoordinatorKey groupId,
Errors error,
String errorMessage,
Map<CoordinatorKey, Throwable> failed,
Set<CoordinatorKey> groupsToUnmap
) {
switch (error) {
case COORDINATOR_LOAD_IN_PROGRESS:
case REBALANCE_IN_PROGRESS:
// If the coordinator is in the middle of loading, then we just need to retry
log.debug("DeleteShareGroupOffsets request for group id {} failed because the coordinator" +
" is still in the process of loading state. Will retry. {}", groupId.idValue, errorMessage);
break;
case COORDINATOR_NOT_AVAILABLE:
case NOT_COORDINATOR:
// If the coordinator is unavailable or there was a coordinator change, then we unmap
// the key so that we retry the `FindCoordinator` request
log.debug("DeleteShareGroupOffsets request for group id {} returned error {}. Will rediscover the coordinator and retry. {}",
groupId.idValue, error, errorMessage);
groupsToUnmap.add(groupId);
break;
case INVALID_GROUP_ID:
case GROUP_ID_NOT_FOUND:
case NON_EMPTY_GROUP:
case INVALID_REQUEST:
case UNKNOWN_SERVER_ERROR:
case KAFKA_STORAGE_ERROR:
case GROUP_AUTHORIZATION_FAILED:
log.debug("DeleteShareGroupOffsets request for group id {} failed due to error {}. {}", groupId.idValue, error, errorMessage);
failed.put(groupId, error.exception(errorMessage));
break;
default:
log.error("DeleteShareGroupOffsets request for group id {} failed due to unexpected error {}. {}", groupId.idValue, error, errorMessage);
failed.put(groupId, error.exception(errorMessage));
}
}
} | DeleteShareGroupOffsetsHandler |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Issue939.java | {
"start": 155,
"end": 879
} | class ____ extends TestCase {
public void test_for_issue_false() throws Exception {
String jsonString = "" +
"{" +
" \"age\": 25," +
" \"is_stop\":false/*comment*/" +
"}";
Model testUser = JSON.parseObject(jsonString, Model.class);
System.out.println(testUser);
}
public void test_for_issue_true() throws Exception {
String jsonString = "" +
"{" +
" \"age\": 25," +
" \"is_stop\":true/*comment*/" +
"}";
Model testUser = JSON.parseObject(jsonString, Model.class);
System.out.println(testUser);
}
public static | Issue939 |
java | dropwizard__dropwizard | dropwizard-http2/src/test/java/io/dropwizard/http2/FakeApplication.java | {
"start": 332,
"end": 946
} | class ____ extends Application<Configuration> {
public static final String HELLO_WORLD = "{\"hello\": \"World\"}";
@Override
public void run(Configuration configuration, Environment environment) throws Exception {
environment.jersey().register(new FakeResource());
environment.healthChecks().register("fake-health-check", new HealthCheck() {
@Override
protected Result check() throws Exception {
return Result.healthy();
}
});
}
@Path("/test")
@Produces(MediaType.APPLICATION_JSON)
public static | FakeApplication |
java | apache__flink | flink-datastream-api/src/main/java/org/apache/flink/datastream/api/stream/KeyedPartitionStream.java | {
"start": 10394,
"end": 10565
} | class ____ a combination of two {@link KeyedPartitionStream}. It will be used as
* the return value of operation with two output.
*/
@Experimental
| represents |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated-src/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InBooleanEvaluator.java | {
"start": 7744,
"end": 8745
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory lhs;
private final EvalOperator.ExpressionEvaluator.Factory[] rhs;
Factory(Source source, EvalOperator.ExpressionEvaluator.Factory lhs, EvalOperator.ExpressionEvaluator.Factory[] rhs) {
this.source = source;
this.lhs = lhs;
this.rhs = rhs;
}
@Override
public InBooleanEvaluator get(DriverContext context) {
EvalOperator.ExpressionEvaluator[] rhs = Arrays.stream(this.rhs)
.map(a -> a.get(context))
.toArray(EvalOperator.ExpressionEvaluator[]::new);
return new InBooleanEvaluator(source, lhs.get(context), rhs, context);
}
@Override
public String toString() {
return "InBooleanEvaluator[" + "lhs=" + lhs + ", rhs=" + Arrays.toString(rhs) + "]";
}
}
}
| Factory |
java | apache__rocketmq | remoting/src/main/java/org/apache/rocketmq/remoting/RPCHook.java | {
"start": 912,
"end": 1156
} | interface ____ {
void doBeforeRequest(final String remoteAddr, final RemotingCommand request);
void doAfterResponse(final String remoteAddr, final RemotingCommand request,
final RemotingCommand response);
}
| RPCHook |
java | dropwizard__dropwizard | dropwizard-auth/src/main/java/io/dropwizard/auth/JettyAuthenticationUtil.java | {
"start": 1771,
"end": 2544
} | class ____ implements UserIdentity {
private final Subject subject;
private final SecurityContext securityContext;
public DropwizardJettyUserIdentity(SecurityContext securityContext) {
this.securityContext = securityContext;
this.subject = new Subject(true, Set.of(securityContext.getUserPrincipal()), Set.of(), Set.of());
}
@Override
public Subject getSubject() {
return subject;
}
@Override
public Principal getUserPrincipal() {
return securityContext.getUserPrincipal();
}
@Override
public boolean isUserInRole(String role) {
return securityContext.isUserInRole(role);
}
}
}
| DropwizardJettyUserIdentity |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java | {
"start": 4483,
"end": 4972
} | enum ____ {
INITIALIZE_CONTAINER(0),
LAUNCH_CONTAINER(1),
SIGNAL_CONTAINER(2),
DELETE_AS_USER(3),
LAUNCH_DOCKER_CONTAINER(4),
LIST_AS_USER(5),
SYNC_YARN_SYSFS(6);
private int value;
RunAsUserCommand(int value) {
this.value = value;
}
public int getValue() {
return value;
}
}
/**
* Result codes returned from the C container-executor.
* These must match the values in container-executor.h.
*/
public | RunAsUserCommand |
java | google__guava | guava/src/com/google/common/collect/ImmutableMap.java | {
"start": 27780,
"end": 28368
} | class ____<K, V> extends ImmutableMap<K, V> {
abstract UnmodifiableIterator<Entry<K, V>> entryIterator();
@GwtIncompatible("Spliterator")
Spliterator<Entry<K, V>> entrySpliterator() {
return Spliterators.spliterator(
entryIterator(),
size(),
Spliterator.DISTINCT | Spliterator.NONNULL | Spliterator.IMMUTABLE | Spliterator.ORDERED);
}
@Override
ImmutableSet<K> createKeySet() {
return new ImmutableMapKeySet<>(this);
}
@Override
ImmutableSet<Entry<K, V>> createEntrySet() {
final | IteratorBasedImmutableMap |
java | apache__camel | components/camel-opentelemetry/src/main/java/org/apache/camel/opentelemetry/OpenTelemetryTracingStrategy.java | {
"start": 1418,
"end": 2475
} | class ____ implements InterceptStrategy {
private static final String UNNAMED = "unnamed";
private final OpenTelemetryTracer tracer;
private boolean propagateContext;
public OpenTelemetryTracingStrategy(OpenTelemetryTracer tracer) {
this.tracer = tracer;
}
@Override
public Processor wrapProcessorInInterceptors(
CamelContext camelContext,
NamedNode processorDefinition, Processor target, Processor nextTarget)
throws Exception {
if (shouldTrace(processorDefinition)) {
return new PropagateContextAndCreateSpan(processorDefinition, target);
} else if (isPropagateContext()) {
return new PropagateContext(target);
} else {
return new DelegateAsyncProcessor(target);
}
}
public boolean isPropagateContext() {
return propagateContext;
}
public void setPropagateContext(boolean propagateContext) {
this.propagateContext = propagateContext;
}
private | OpenTelemetryTracingStrategy |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/config/YamlMapFactoryBean.java | {
"start": 2031,
"end": 3854
} | class ____ extends YamlProcessor implements FactoryBean<Map<String, Object>>, InitializingBean {
private boolean singleton = true;
private @Nullable Map<String, Object> map;
/**
* Set if a singleton should be created, or a new object on each request
* otherwise. Default is {@code true} (a singleton).
*/
public void setSingleton(boolean singleton) {
this.singleton = singleton;
}
@Override
public boolean isSingleton() {
return this.singleton;
}
@Override
public void afterPropertiesSet() {
if (isSingleton()) {
this.map = createMap();
}
}
@Override
public @Nullable Map<String, Object> getObject() {
return (this.map != null ? this.map : createMap());
}
@Override
public Class<?> getObjectType() {
return Map.class;
}
/**
* Template method that subclasses may override to construct the object
* returned by this factory.
* <p>Invoked lazily the first time {@link #getObject()} is invoked in
* case of a shared singleton; else, on each {@link #getObject()} call.
* <p>The default implementation returns the merged {@code Map} instance.
* @return the object returned by this factory
* @see #process(MatchCallback)
*/
protected Map<String, Object> createMap() {
Map<String, Object> result = new LinkedHashMap<>();
process((properties, map) -> merge(result, map));
return result;
}
@SuppressWarnings({"rawtypes", "unchecked"})
private void merge(Map<String, Object> output, Map<String, Object> map) {
map.forEach((key, value) -> {
Object existing = output.get(key);
if (value instanceof Map valueMap && existing instanceof Map existingMap) {
Map<String, Object> result = new LinkedHashMap<>(existingMap);
merge(result, valueMap);
output.put(key, result);
}
else {
output.put(key, value);
}
});
}
}
| YamlMapFactoryBean |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineTransportAction.java | {
"start": 1391,
"end": 3146
} | class ____ extends AcknowledgedTransportMasterNodeAction<DeletePipelineRequest> {
public static final ActionType<AcknowledgedResponse> TYPE = new ActionType<>("cluster:admin/ingest/pipeline/delete");
private final IngestService ingestService;
private final ProjectResolver projectResolver;
@Inject
public DeletePipelineTransportAction(
ThreadPool threadPool,
IngestService ingestService,
TransportService transportService,
ActionFilters actionFilters,
ProjectResolver projectResolver
) {
super(
TYPE.name(),
transportService,
ingestService.getClusterService(),
threadPool,
actionFilters,
DeletePipelineRequest::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.ingestService = ingestService;
this.projectResolver = projectResolver;
}
@Override
protected void masterOperation(
Task task,
DeletePipelineRequest request,
ClusterState state,
ActionListener<AcknowledgedResponse> listener
) throws Exception {
ingestService.delete(projectResolver.getProjectId(), request, listener);
}
@Override
protected ClusterBlockException checkBlock(DeletePipelineRequest request, ClusterState state) {
return state.blocks().globalBlockedException(projectResolver.getProjectId(), ClusterBlockLevel.METADATA_WRITE);
}
@Override
public Optional<String> reservedStateHandlerName() {
return Optional.of(ReservedPipelineAction.NAME);
}
@Override
public Set<String> modifiedKeys(DeletePipelineRequest request) {
return Set.of(request.getId());
}
}
| DeletePipelineTransportAction |
java | spring-projects__spring-boot | smoke-test/spring-boot-smoke-test-data-mongodb/src/dockerTest/java/smoketest/data/mongodb/SecureMongoContainer.java | {
"start": 983,
"end": 1693
} | class ____ extends MongoDBContainer {
SecureMongoContainer(DockerImageName dockerImageName) {
super(dockerImageName);
}
@Override
public void configure() {
// test-server.pem is a single PEM file containing server certificate and key
// content combined
withCopyFileToContainer(MountableFile.forClasspathResource("/ssl/test-server.pem"), "/ssl/server.pem");
withCopyFileToContainer(MountableFile.forClasspathResource("/ssl/test-ca.crt"), "/ssl/ca.crt");
withCommand("mongod --tlsMode requireTLS --tlsCertificateKeyFile /ssl/server.pem --tlsCAFile /ssl/ca.crt");
}
@Override
protected void containerIsStarted(InspectContainerResponse containerInfo, boolean reused) {
}
}
| SecureMongoContainer |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/TestPropertySource.java | {
"start": 1155,
"end": 2663
} | class ____ configure the {@link #locations} of properties files and inlined
* {@link #properties} to be added to the {@code Environment}'s set of
* {@code PropertySources} for an
* {@link org.springframework.context.ApplicationContext ApplicationContext}
* for integration tests.
*
* <h3>Precedence</h3>
* <p>Test property sources have higher precedence than those loaded from the
* operating system's environment or Java system properties as well as property
* sources added by the application declaratively via
* {@link org.springframework.context.annotation.PropertySource @PropertySource}
* or programmatically (for example, via an
* {@link org.springframework.context.ApplicationContextInitializer ApplicationContextInitializer}
* or some other means). Thus, test property sources can be used to selectively
* override properties defined in system and application property sources.
* Furthermore, inlined {@link #properties} have higher precedence than
* properties loaded from resource {@link #locations}. Note, however, that
* properties registered via {@link DynamicPropertySource @DynamicPropertySource}
* have higher precedence than those loaded via {@code @TestPropertySource}.
*
* <h3>Default Properties File Detection</h3>
* <p>If {@code @TestPropertySource} is declared as an <em>empty</em> annotation
* (i.e., without explicit values for {@link #locations} or {@link #properties}),
* an attempt will be made to detect a <em>default</em> properties file relative
* to the | to |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_DiffType.java | {
"start": 502,
"end": 756
} | class ____ {
public String value;
public long getValue() {
return Long.parseLong(value);
}
public void setValue(long value) {
this.value = Long.toString(value);
}
}
}
| Model |
java | elastic__elasticsearch | x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java | {
"start": 181797,
"end": 195249
} | class ____ {
private static final AuthenticationContextSerializer authenticationContextSerializer = new AuthenticationContextSerializer();
public static Authentication createApiKeyAuthentication(
ApiKeyService apiKeyService,
Authentication authentication,
Set<RoleDescriptor> userRoles,
List<RoleDescriptor> keyRoles,
TransportVersion version
) throws Exception {
XContentBuilder keyDocSource = ApiKeyService.newDocument(
getFastStoredHashAlgoForTests().hash(new SecureString(randomAlphaOfLength(16).toCharArray())),
"test",
authentication,
userRoles,
Instant.now(),
Instant.now().plus(Duration.ofSeconds(3600)),
keyRoles,
ApiKey.Type.REST,
ApiKey.CURRENT_API_KEY_VERSION,
randomBoolean() ? null : Map.of(randomAlphaOfLengthBetween(3, 8), randomAlphaOfLengthBetween(3, 8)),
null
);
final ApiKeyDoc apiKeyDoc = ApiKeyDoc.fromXContent(
XContentHelper.createParser(
NamedXContentRegistry.EMPTY,
LoggingDeprecationHandler.INSTANCE,
BytesReference.bytes(keyDocSource),
XContentType.JSON
)
);
PlainActionFuture<AuthenticationResult<User>> authenticationResultFuture = new PlainActionFuture<>();
apiKeyService.completeApiKeyAuthentication(
apiKeyDoc,
new ApiKeyService.ApiKeyCredentials("id", new SecureString(randomAlphaOfLength(16).toCharArray()), ApiKey.Type.REST),
Clock.systemUTC(),
authenticationResultFuture
);
AuthenticationResult<User> authenticationResult = authenticationResultFuture.get();
if (randomBoolean()) {
// maybe remove realm name to simulate old API Key authentication
assert authenticationResult.getStatus() == AuthenticationResult.Status.SUCCESS;
Map<String, Object> authenticationResultMetadata = new HashMap<>(authenticationResult.getMetadata());
authenticationResultMetadata.remove(AuthenticationField.API_KEY_CREATOR_REALM_NAME);
authenticationResult = AuthenticationResult.success(authenticationResult.getValue(), authenticationResultMetadata);
}
if (randomBoolean()) {
// simulate authentication with nameless API Key, see https://github.com/elastic/elasticsearch/issues/59484
assert authenticationResult.getStatus() == AuthenticationResult.Status.SUCCESS;
Map<String, Object> authenticationResultMetadata = new HashMap<>(authenticationResult.getMetadata());
authenticationResultMetadata.put(AuthenticationField.API_KEY_NAME_KEY, null);
authenticationResult = AuthenticationResult.success(authenticationResult.getValue(), authenticationResultMetadata);
}
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext);
authenticationContextSerializer.writeToContext(
Authentication.newApiKeyAuthentication(authenticationResult, "node01"),
threadContext
);
return safeAwait(
l -> securityContext.executeAfterRewritingAuthentication(
c -> ActionListener.completeWith(l, () -> authenticationContextSerializer.readFromContext(threadContext)),
version
)
);
}
public static Authentication createApiKeyAuthentication(ApiKeyService apiKeyService, Authentication authentication)
throws Exception {
return createApiKeyAuthentication(
apiKeyService,
authentication,
Collections.singleton(new RoleDescriptor("user_role_" + randomAlphaOfLength(4), new String[] { "manage" }, null, null)),
null,
TransportVersion.current()
);
}
}
private ApiKeyService createApiKeyService() {
final Settings settings = Settings.builder().put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true).build();
return createApiKeyService(settings);
}
private ApiKeyService createApiKeyService(Settings baseSettings) {
return createApiKeyService(baseSettings, MeterRegistry.NOOP);
}
private ApiKeyService createApiKeyService(Settings baseSettings, MeterRegistry meterRegistry) {
final Settings settings = Settings.builder()
.put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true)
.put(baseSettings)
.build();
final ClusterSettings clusterSettings = new ClusterSettings(
settings,
Sets.union(
ClusterSettings.BUILT_IN_CLUSTER_SETTINGS,
Set.of(ApiKeyService.DELETE_RETENTION_PERIOD, ApiKeyService.DELETE_INTERVAL)
)
);
final ApiKeyService service = new ApiKeyService(
settings,
clock,
client,
securityIndex,
ClusterServiceUtils.createClusterService(threadPool, clusterSettings),
cacheInvalidatorRegistry,
threadPool,
meterRegistry,
mock(FeatureService.class)
);
if ("0s".equals(settings.get(ApiKeyService.CACHE_TTL_SETTING.getKey()))) {
verify(cacheInvalidatorRegistry, never()).registerCacheInvalidator(eq("api_key"), any());
} else {
verify(cacheInvalidatorRegistry).registerCacheInvalidator(eq("api_key"), any());
}
return service;
}
private Map<String, Object> buildApiKeySourceDoc(char[] hash) {
Map<String, Object> sourceMap = new HashMap<>();
sourceMap.put("doc_type", "api_key");
if (randomBoolean()) {
sourceMap.put("type", randomFrom(ApiKey.Type.values()).value());
}
sourceMap.put("creation_time", Clock.systemUTC().instant().toEpochMilli());
sourceMap.put("expiration_time", -1);
sourceMap.put("api_key_hash", new String(hash));
sourceMap.put("name", randomAlphaOfLength(12));
sourceMap.put("version", 0);
sourceMap.put("role_descriptors", Collections.singletonMap("a role", Collections.singletonMap("cluster", List.of("all"))));
sourceMap.put(
"limited_by_role_descriptors",
Collections.singletonMap("limited role", Collections.singletonMap("cluster", List.of("all")))
);
Map<String, Object> creatorMap = new HashMap<>();
creatorMap.put("principal", "test_user");
creatorMap.put("full_name", "test user");
creatorMap.put("email", "test@user.com");
creatorMap.put("metadata", Collections.emptyMap());
creatorMap.put("realm", randomAlphaOfLength(4));
if (randomBoolean()) {
creatorMap.put("realm_type", randomAlphaOfLength(4));
}
sourceMap.put("creator", creatorMap);
sourceMap.put("api_key_invalidated", false);
// noinspection unchecked
sourceMap.put("metadata_flattened", ApiKeyTests.randomMetadata());
return sourceMap;
}
private void mockSourceDocument(String id, Map<String, Object> sourceMap) throws IOException {
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
builder.map(sourceMap);
SecurityMocks.mockGetRequest(client, id, BytesReference.bytes(builder));
}
}
private ApiKeyDoc buildApiKeyDoc(char[] hash, long expirationTime, boolean invalidated, long invalidation) throws IOException {
return buildApiKeyDoc(hash, expirationTime, invalidated, invalidation, randomAlphaOfLength(12));
}
private ApiKeyDoc buildApiKeyDoc(char[] hash, long expirationTime, boolean invalidated, long invalidation, String name)
throws IOException {
return buildApiKeyDoc(hash, expirationTime, invalidated, invalidation, name, 0);
}
private ApiKeyDoc buildApiKeyDoc(char[] hash, long expirationTime, boolean invalidated, long invalidation, String name, int version)
throws IOException {
final BytesReference metadataBytes = XContentTestUtils.convertToXContent(ApiKeyTests.randomMetadata(), XContentType.JSON);
return new ApiKeyDoc(
"api_key",
randomBoolean() ? randomFrom(ApiKey.Type.values()) : null,
Clock.systemUTC().instant().toEpochMilli(),
expirationTime,
invalidated,
invalidation,
new String(hash),
name,
version,
new BytesArray("{\"a role\": {\"cluster\": [\"all\"]}}"),
new BytesArray("{\"limited role\": {\"cluster\": [\"all\"]}}"),
Map.of(
"principal",
"test_user",
"full_name",
"test user",
"email",
"test@user.com",
"realm",
"realm1",
"realm_type",
"realm_type1",
"metadata",
Map.of()
),
metadataBytes,
null
);
}
@SuppressWarnings("unchecked")
private void checkAuthApiKeyMetadata(Object metadata, AuthenticationResult<User> authResult1) throws IOException {
if (metadata == null) {
assertThat(authResult1.getMetadata().containsKey(API_KEY_METADATA_KEY), is(false));
} else {
assertThat(
asInstanceOf(BytesReference.class, authResult1.getMetadata().get(API_KEY_METADATA_KEY)),
equalBytes(XContentTestUtils.convertToXContent((Map<String, Object>) metadata, XContentType.JSON))
);
}
}
private RoleReference.ApiKeyRoleType randomApiKeyRoleType() {
return randomFrom(RoleReference.ApiKeyRoleType.values());
}
private ApiKeyCredentials getApiKeyCredentials(String id, String key, ApiKey.Type type) {
return new ApiKeyCredentials(id, new SecureString(key.toCharArray()), type);
}
private ApiKey.Type parseTypeFromSourceMap(Map<String, Object> sourceMap) {
if (sourceMap.containsKey("type")) {
return ApiKey.Type.parse((String) sourceMap.get("type"));
} else {
return ApiKey.Type.REST;
}
}
private ApiKeyDoc createCrossClusterApiKeyDocWithCertificateIdentity(String certificateIdentity) {
final String apiKey = randomAlphaOfLength(16);
final char[] hash = getFastStoredHashAlgoForTests().hash(new SecureString(apiKey.toCharArray()));
return new ApiKeyDoc(
"api_key",
ApiKey.Type.CROSS_CLUSTER,
Instant.now().toEpochMilli(),
-1L,
false,
-1L,
new String(hash),
"test_key",
ApiKey.CURRENT_API_KEY_VERSION.version(),
new BytesArray("{}"),
new BytesArray("{}"),
createTestCreatorMap(),
null,
certificateIdentity
);
}
private Map<String, Object> createTestCreatorMap() {
final User user = new User("test-user", new String[0], "Test User", "test@example.com", Map.of("key", "value"), true);
return Map.of(
"principal",
user.principal(),
"full_name",
user.fullName(),
"email",
user.email(),
"metadata",
user.metadata(),
"realm",
"file",
"realm_type",
"file"
);
}
private Authentication createTestAuthentication() {
final User user = new User("test-user", new String[0], "Test User", "test@example.com", Map.of("key", "value"), true);
return AuthenticationTestHelper.builder().user(user).realmRef(new RealmRef("file", "file", "node-1")).build(false);
}
private static BaseBulkUpdateApiKeyRequest createUpdateRequestWithCertificateIdentity(
final String apiKeyId,
final CertificateIdentity certificateIdentity,
final Map<String, Object> metadata
) {
return new BaseBulkUpdateApiKeyRequest(List.of(apiKeyId), null, metadata, null, certificateIdentity) {
@Override
public ApiKey.Type getType() {
return ApiKey.Type.CROSS_CLUSTER;
}
};
}
private Map<String, Object> extractDocumentContent(XContentBuilder builder) throws IOException {
return XContentHelper.convertToMap(BytesReference.bytes(builder), false, XContentType.JSON).v2();
}
private static Authenticator.Context getAuthenticatorContext(ThreadContext threadContext) {
return new Authenticator.Context(
threadContext,
mock(AuthenticationService.AuditableRequest.class),
null,
randomBoolean(),
mock(Realms.class)
);
}
private static ApiKey.Version randomApiKeyVersion() {
return new ApiKey.Version(randomIntBetween(1, ApiKey.CURRENT_API_KEY_VERSION.version()));
}
}
| Utils |
java | apache__flink | flink-connectors/flink-file-sink-common/src/test/java/org/apache/flink/streaming/api/functions/sink/filesystem/bucketassigners/DateTimeBucketAssignerTest.java | {
"start": 1978,
"end": 2387
} | class ____ implements BucketAssigner.Context {
@Override
public long currentProcessingTime() {
return TEST_TIME_IN_MILLIS;
}
@Override
public long currentWatermark() {
throw new UnsupportedOperationException();
}
@Nullable
@Override
public Long timestamp() {
return null;
}
}
}
| MockedContext |
java | apache__camel | components/camel-kubernetes/src/generated/java/org/apache/camel/component/kubernetes/customresources/KubernetesCustomResourcesEndpointUriFactory.java | {
"start": 536,
"end": 3842
} | class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":masterUrl";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(34);
props.add("apiVersion");
props.add("bridgeErrorHandler");
props.add("caCertData");
props.add("caCertFile");
props.add("clientCertData");
props.add("clientCertFile");
props.add("clientKeyAlgo");
props.add("clientKeyData");
props.add("clientKeyFile");
props.add("clientKeyPassphrase");
props.add("connectionTimeout");
props.add("crdGroup");
props.add("crdName");
props.add("crdPlural");
props.add("crdScope");
props.add("crdVersion");
props.add("dnsDomain");
props.add("exceptionHandler");
props.add("exchangePattern");
props.add("kubernetesClient");
props.add("labelKey");
props.add("labelValue");
props.add("lazyStartProducer");
props.add("masterUrl");
props.add("namespace");
props.add("oauthToken");
props.add("operation");
props.add("password");
props.add("poolSize");
props.add("portName");
props.add("portProtocol");
props.add("resourceName");
props.add("trustCerts");
props.add("username");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(12);
secretProps.add("caCertData");
secretProps.add("caCertFile");
secretProps.add("clientCertData");
secretProps.add("clientCertFile");
secretProps.add("clientKeyAlgo");
secretProps.add("clientKeyData");
secretProps.add("clientKeyFile");
secretProps.add("clientKeyPassphrase");
secretProps.add("oauthToken");
secretProps.add("password");
secretProps.add("trustCerts");
secretProps.add("username");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
return "kubernetes-custom-resources".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "masterUrl", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
| KubernetesCustomResourcesEndpointUriFactory |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/maps/Maps_assertContainsKeys_Test.java | {
"start": 2294,
"end": 10971
} | class ____ extends MapsBaseTest {
@Test
void should_fail_if_actual_is_null() {
// GIVEN
String[] keys = { "name" };
// WHEN
var assertionError = expectAssertionError(() -> maps.assertContainsKeys(INFO, null, keys));
// THEN
then(assertionError).hasMessage(actualIsNull());
}
@Test
void should_fail_if_given_keys_array_is_null() {
// GIVEN
String[] keys = null;
// WHEN
Throwable thrown = catchThrowable(() -> maps.assertContainsKeys(INFO, actual, keys));
// THEN
then(thrown).isInstanceOf(NullPointerException.class).hasMessage(keysToLookForIsNull("array of keys"));
}
@Test
void should_fail_if_given_keys_array_is_empty() {
// GIVEN
String[] keys = emptyKeys();
// WHEN
Throwable thrown = catchThrowable(() -> maps.assertContainsKeys(INFO, actual, keys));
// THEN
then(thrown).isInstanceOf(IllegalArgumentException.class).hasMessage(keysToLookForIsEmpty("array of keys"));
}
@Test
void should_pass_with_Properties() {
// GIVEN
Properties actual = mapOf(Properties::new, entry("name", "Yoda"), entry("job", "Jedi"));
Object[] expected = array("name", "job");
// WHEN/THEN
maps.assertContainsKeys(info, actual, expected);
}
@Test
void should_fail_with_Properties() {
// GIVEN
Properties actual = mapOf(Properties::new, entry("name", "Yoda"), entry("job", "Jedi"));
Object[] expected = array("name", "color");
Set<Object> notFound = set("color");
// WHEN
var assertionError = expectAssertionError(() -> maps.assertContainsKeys(info, actual, expected));
// THEN
then(assertionError).hasMessage(shouldContainKeys(actual, notFound).create());
}
@ParameterizedTest
@MethodSource({
"unmodifiableMapsSuccessfulTestCases",
"modifiableMapsSuccessfulTestCases",
"caseInsensitiveMapsSuccessfulTestCases",
})
void should_pass(Map<String, String> actual, String[] expected) {
// WHEN/THEN
assertThatNoException().as(actual.getClass().getName())
.isThrownBy(() -> maps.assertContainsKeys(info, actual, expected));
}
private static Stream<Arguments> unmodifiableMapsSuccessfulTestCases() {
return Stream.of(arguments(emptyMap(), emptyKeys()),
arguments(singletonMap("name", "Yoda"), array("name")),
arguments(new SingletonMap<>("name", "Yoda"), array("name")),
arguments(unmodifiableMap(mapOf(entry("name", "Yoda"), entry("job", "Jedi"))), array("name", "job")),
arguments(unmodifiableMap(mapOf(entry("name", "Yoda"), entry("job", "Jedi"))), array("job", "name")),
arguments(ImmutableMap.of("name", "Yoda", "job", "Jedi"), array("name", "job")),
arguments(ImmutableMap.of("name", "Yoda", "job", "Jedi"), array("job", "name")),
arguments(Map.of("name", "Yoda", "job", "Jedi"), array("name", "job")),
arguments(Map.of("name", "Yoda", "job", "Jedi"), array("job", "name")));
}
private static Stream<Arguments> modifiableMapsSuccessfulTestCases() {
return Stream.of(MODIFIABLE_MAPS)
.flatMap(supplier -> Stream.of(arguments(mapOf(supplier, entry("name", "Yoda"), entry("job", "Jedi")),
array("name")),
arguments(mapOf(supplier, entry("name", "Yoda"), entry("job", "Jedi")),
array("name", "job")),
arguments(mapOf(supplier, entry("name", "Yoda"), entry("job", "Jedi")),
array("job", "name"))));
}
private static Stream<Arguments> caseInsensitiveMapsSuccessfulTestCases() {
return Stream.of(ArrayUtils.add(CASE_INSENSITIVE_MAPS, CaseInsensitiveMap::new))
.flatMap(supplier -> Stream.of(arguments(mapOf(supplier, entry("NAME", "Yoda"), entry("Job", "Jedi")),
array("name", "job")),
arguments(mapOf(supplier, entry("NAME", "Yoda"), entry("Job", "Jedi")),
array("job", "name")),
arguments(mapOf(supplier, entry("NAME", "Yoda"), entry("Job", "Jedi")),
array("Name", "Job")),
arguments(mapOf(supplier, entry("NAME", "Yoda"), entry("Job", "Jedi")),
array("Job", "Name"))));
}
@ParameterizedTest
@MethodSource({
"unmodifiableMapsFailureTestCases",
"modifiableMapsFailureTestCases",
"caseInsensitiveMapsFailureTestCases",
"commonsCollectionsCaseInsensitiveMapFailureTestCases",
})
void should_fail(Map<String, String> actual, String[] expected, Set<String> notFound) {
// WHEN
assertThatExceptionOfType(AssertionError.class).as(actual.getClass().getName())
.isThrownBy(() -> maps.assertContainsKeys(info, actual, expected))
// THEN
.withMessage(shouldContainKeys(actual, notFound).create());
}
private static Stream<Arguments> unmodifiableMapsFailureTestCases() {
return Stream.of(arguments(emptyMap(),
array("name"),
set("name")),
arguments(singletonMap("name", "Yoda"),
array("color"),
set("color")),
arguments(new SingletonMap<>("name", "Yoda"),
array("color"),
set("color")),
arguments(unmodifiableMap(mapOf(entry("name", "Yoda"), entry("job", "Jedi"))),
array("name", "color"),
set("color")),
arguments(ImmutableMap.of("name", "Yoda", "job", "Jedi"),
array("name", "color"),
set("color")),
arguments(Map.of("name", "Yoda", "job", "Jedi"),
array("name", "color"),
set("color")),
arguments(Map.of("name", "Yoda"),
array((String) null), // implementation not permitting null keys
set((String) null)));
}
private static Stream<Arguments> modifiableMapsFailureTestCases() {
return Stream.of(MODIFIABLE_MAPS)
.flatMap(supplier -> Stream.of(arguments(mapOf(supplier, entry("name", "Yoda")),
array("name", "color"),
set("color")),
arguments(mapOf(supplier, entry("name", "Yoda"), entry("job", "Jedi")),
array("name", "color"),
set("color"))));
}
private static Stream<Arguments> caseInsensitiveMapsFailureTestCases() {
return Stream.of(CASE_INSENSITIVE_MAPS)
.flatMap(supplier -> Stream.of(arguments(mapOf(supplier, entry("NAME", "Yoda"), entry("Job", "Jedi")),
array("name", "color"),
set("color")),
arguments(mapOf(supplier, entry("NAME", "Yoda"), entry("Job", "Jedi")),
array("Name", "Color"),
set("Color"))));
}
private static Stream<Arguments> commonsCollectionsCaseInsensitiveMapFailureTestCases() {
return Stream.of(arguments(mapOf(CaseInsensitiveMap::new, entry("NAME", "Yoda"), entry("Job", "Jedi")),
array("name", "color"),
set("color")), // internal keys are always lowercase
arguments(mapOf(CaseInsensitiveMap::new, entry("NAME", "Yoda"), entry("Job", "Jedi")),
array("Name", "Color"),
set("Color"))); // internal keys are always lowercase
}
}
| Maps_assertContainsKeys_Test |
java | alibaba__nacos | naming/src/test/java/com/alibaba/nacos/naming/cluster/remote/response/DistroDataResponseTest.java | {
"start": 888,
"end": 1218
} | class ____ {
@Test
void test() {
DistroDataResponse distroDataResponse = new DistroDataResponse();
DistroData distroData = mock(DistroData.class);
distroDataResponse.setDistroData(distroData);
assertEquals(distroData, distroDataResponse.getDistroData());
}
} | DistroDataResponseTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/LookupSerializationTests.java | {
"start": 790,
"end": 2485
} | class ____ extends AbstractLogicalPlanSerializationTests<Lookup> {
public static Lookup randomLookup(int depth) {
Source source = randomSource();
LogicalPlan child = randomChild(depth);
Expression tableName = AbstractExpressionSerializationTests.randomChild();
List<Attribute> matchFields = randomFieldAttributes(1, 10, false);
LocalRelation localRelation = randomBoolean() ? null : LocalRelationSerializationTests.randomLocalRelation();
return new Lookup(source, child, tableName, matchFields, localRelation);
}
@Override
protected Lookup createTestInstance() {
return randomLookup(0);
}
@Override
protected Lookup mutateInstance(Lookup instance) throws IOException {
Source source = instance.source();
LogicalPlan child = instance.child();
Expression tableName = instance.tableName();
List<Attribute> matchFields = instance.matchFields();
LocalRelation localRelation = instance.localRelation();
switch (between(0, 3)) {
case 0 -> child = randomValueOtherThan(child, () -> randomChild(0));
case 1 -> tableName = randomValueOtherThan(tableName, AbstractExpressionSerializationTests::randomChild);
case 2 -> matchFields = randomValueOtherThan(matchFields, () -> randomFieldAttributes(1, 10, false));
case 3 -> localRelation = randomValueOtherThan(
localRelation,
() -> randomBoolean() ? null : LocalRelationSerializationTests.randomLocalRelation()
);
}
return new Lookup(source, child, tableName, matchFields, localRelation);
}
}
| LookupSerializationTests |
java | apache__camel | core/camel-xml-io-util/src/main/java/org/apache/camel/xml/io/util/XmlStreamInfo.java | {
"start": 1113,
"end": 2650
} | class ____ {
/** Indication that there's some critical problem with the stream and it should not be handled normally */
Throwable problem;
String rootElementName;
String rootElementNamespace;
/** Prefix to namespace mapping. default prefix is available as empty String (and not as null) */
final Map<String, String> namespaceMapping = new HashMap<>();
/**
* Attributes of the root element. Keys are full qualified names of the attributes and each attribute may be
* available as two keys: {@code prefix:localName} or {@code {namespaceURI}localName}
*/
final Map<String, String> attributes = new HashMap<>();
/**
* Trimmed and unparsed lines starting with Camel-recognized modeline markers (now: {@code camel-k:}).
*/
// TODO: remove modeline support after 4.10
final List<String> modelines = new ArrayList<>();
public boolean isValid() {
return problem == null;
}
public Throwable getProblem() {
return problem;
}
public void setProblem(Throwable problem) {
this.problem = problem;
}
public String getRootElementName() {
return rootElementName;
}
public String getRootElementNamespace() {
return rootElementNamespace;
}
public Map<String, String> getNamespaces() {
return namespaceMapping;
}
public Map<String, String> getAttributes() {
return attributes;
}
public List<String> getModelines() {
return modelines;
}
}
| XmlStreamInfo |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/pi/math/TestSummation.java | {
"start": 3097,
"end": 5100
} | class ____ extends Summation {
Summation2(ArithmeticProgression N, ArithmeticProgression E) {
super(N, E);
}
final Montgomery2 m2 = new Montgomery2();
double compute_montgomery2() {
long e = E.value;
long n = N.value;
double s = 0;
for(; e > E.limit; e += E.delta) {
m2.set(n);
s = Modular.addMod(s, m2.mod2(e)/(double)n);
n += N.delta;
}
return s;
}
double compute_modBigInteger() {
long e = E.value;
long n = N.value;
double s = 0;
for(; e > E.limit; e += E.delta) {
s = Modular.addMod(s, TestModular.modBigInteger(e, n)/(double)n);
n += N.delta;
}
return s;
}
double compute_modPow() {
long e = E.value;
long n = N.value;
double s = 0;
for(; e > E.limit; e += E.delta) {
s = Modular.addMod(s,
TWO.modPow(BigInteger.valueOf(e), BigInteger.valueOf(n))
.doubleValue() / n);
n += N.delta;
}
return s;
}
}
private static void computeBenchmarks(final Summation2 sigma) {
final Timer t = new Timer(false);
t.tick("sigma=" + sigma);
final double value = sigma.compute();
t.tick("compute=" + value);
assertEquals(value, sigma.compute_modular(), DOUBLE_DELTA);
t.tick("compute_modular");
assertEquals(value, sigma.compute_montgomery(), DOUBLE_DELTA);
t.tick("compute_montgomery");
assertEquals(value, sigma.compute_montgomery2(), DOUBLE_DELTA);
t.tick("compute_montgomery2");
assertEquals(value, sigma.compute_modBigInteger(), DOUBLE_DELTA);
t.tick("compute_modBigInteger");
assertEquals(value, sigma.compute_modPow(), DOUBLE_DELTA);
t.tick("compute_modPow");
}
/** Benchmarks */
public static void main(String[] args) {
final long delta = 1L << 4;
final long range = 1L << 20;
for(int i = 20; i < 40; i += 2)
computeBenchmarks(newSummation(1L << i, range, delta));
}
}
| Summation2 |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering.java | {
"start": 1490,
"end": 11424
} | class ____
extends ProportionalCapacityPreemptionPolicyMockFramework {
@BeforeEach
public void setup() {
super.setup();
conf.setBoolean(
CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED, true);
policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
}
/*
* When the capacity scheduler fair ordering policy is enabled, preempt first
* from the application owned by the user that is the farthest over their
* user limit.
*/
@Test
public void testIntraQueuePreemptionFairOrderingPolicyEnabledOneAppPerUser()
throws IOException {
// Enable FairOrderingPolicy for yarn.scheduler.capacity.root.a
conf.set(CapacitySchedulerConfiguration.PREFIX
+ CapacitySchedulerConfiguration.ROOT + ".a.ordering-policy", "fair");
// Make sure all containers will be preempted in a single round.
conf.setFloat(CapacitySchedulerConfiguration.
INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT,
(float) 1.0);
String labelsConfig = "=100,true;";
String nodesConfig = // n1 has no label
"n1= res=100";
String queuesConfig =
// guaranteed,max,used,pending,reserved
"root(=[100 100 100 1 0]);" + // root
"-a(=[100 100 100 1 0])"; // a
// user1/app1 has 60 resources in queue a
// user2/app2 has 40 resources in queue a
// user3/app3 is requesting 20 resources in queue a
// With 3 users, preemptable user limit should be around 35 resources each.
// With FairOrderingPolicy enabled on queue a, all 20 resources should be
// preempted from app1
String appsConfig =
// queueName\t(priority,resource,host,expression,#repeat,reserved,pending,user)
"a\t" // app1, user1 in a
+ "(1,1,n1,,60,false,0,user1);" +
"a\t" // app2, user2 in a
+ "(1,1,n1,,40,false,0,user2);" +
"a\t" // app3, user3 in a
+ "(1,1,n1,,0,false,20,user3)"
;
buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
policy.editSchedule();
verify(eventHandler, times(20)).handle(argThat(
new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
getAppAttemptId(1))));
}
/*
* When the capacity scheduler fifo ordering policy is enabled, preempt first
* from the youngest application until reduced to user limit, then preempt
* from next youngest app.
*/
@Test
public void testIntraQueuePreemptionFifoOrderingPolicyEnabled()
throws IOException {
// Enable FifoOrderingPolicy for yarn.scheduler.capacity.root.a
conf.set(CapacitySchedulerConfiguration.PREFIX
+ CapacitySchedulerConfiguration.ROOT + ".a.ordering-policy", "fifo");
// Make sure all containers will be preempted in a single round.
conf.setFloat(CapacitySchedulerConfiguration.
INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT,
(float) 1.0);
String labelsConfig = "=100,true;";
String nodesConfig = // n1 has no label
"n1= res=100";
String queuesConfig =
// guaranteed,max,used,pending,reserved
"root(=[100 100 100 1 0]);" + // root
"-a(=[100 100 100 1 0])"; // a
// user1/app1 has 60 resources in queue a
// user2/app2 has 40 resources in queue a
// user3/app3 is requesting 20 resources in queue a
// With 3 users, preemptable user limit should be around 35 resources each.
// With FifoOrderingPolicy enabled on queue a, the first 5 should come from
// the youngest app, app2, until app2 is reduced to the user limit of 35.
String appsConfig =
// queueName\t(priority,resource,host,expression,#repeat,reserved,pending,user)
"a\t" // app1, user1 in a
+ "(1,1,n1,,60,false,0,user1);" +
"a\t" // app2, user2 in a
+ "(1,1,n1,,40,false,0,user2);" +
"a\t" // app3, user3 in a
+ "(1,1,n1,,0,false,5,user3)"
;
buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
policy.editSchedule();
verify(eventHandler, times(5)).handle(argThat(
new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
getAppAttemptId(2))));
// user1/app1 has 60 resources in queue a
// user2/app2 has 35 resources in queue a
// user3/app3 has 5 resources and is requesting 15 resources in queue a
// With 3 users, preemptable user limit should be around 35 resources each.
// The next 15 should come from app1 even though app2 is younger since app2
// has already been reduced to its user limit.
appsConfig =
// queueName\t(priority,resource,host,expression,#repeat,reserved,pending,user)
"a\t" // app1, user1 in a
+ "(1,1,n1,,60,false,0,user1);" +
"a\t" // app2, user2 in a
+ "(1,1,n1,,35,false,0,user2);" +
"a\t" // app3, user3 in a
+ "(1,1,n1,,5,false,15,user3)"
;
buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
policy.editSchedule();
verify(eventHandler, times(15)).handle(argThat(
new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
getAppAttemptId(1))));
}
/*
* When the capacity scheduler fair ordering policy is enabled, preempt first
* from the youngest application from the user that is the farthest over their
* user limit.
*/
@Test
public void testIntraQueuePreemptionFairOrderingPolicyMulitipleAppsPerUser()
throws IOException {
// Enable FairOrderingPolicy for yarn.scheduler.capacity.root.a
conf.set(CapacitySchedulerConfiguration.PREFIX
+ CapacitySchedulerConfiguration.ROOT + ".a.ordering-policy", "fair");
// Make sure all containers will be preempted in a single round.
conf.setFloat(CapacitySchedulerConfiguration.
INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT,
(float) 1.0);
String labelsConfig = "=100,true;";
String nodesConfig = // n1 has no label
"n1= res=100";
String queuesConfig =
// guaranteed,max,used,pending,reserved
"root(=[100 100 100 1 0]);" + // root
"-a(=[100 100 100 1 0])"; // a
// user1/app1 has 35 resources in queue a
// user1/app2 has 25 resources in queue a
// user2/app3 has 40 resources in queue a
// user3/app4 is requesting 20 resources in queue a
// With 3 users, preemptable user limit should be around 35 resources each.
// With FairOrderingPolicy enabled on queue a, all 20 resources should be
// preempted from app1 since it's the most over served app from the most
// over served user
String appsConfig =
// queueName\t(priority,resource,host,expression,#repeat,reserved,pending,user)
"a\t" // app1 and app2, user1 in a
+ "(1,1,n1,,35,false,0,user1);" +
"a\t"
+ "(1,1,n1,,25,false,0,user1);" +
"a\t" // app3, user2 in a
+ "(1,1,n1,,40,false,0,user2);" +
"a\t" // app4, user3 in a
+ "(1,1,n1,,0,false,20,user3)"
;
buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
policy.editSchedule();
verify(eventHandler, times(20)).handle(argThat(
new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
getAppAttemptId(1))));
}
/*
* When the capacity scheduler fifo ordering policy is enabled and a user has
* multiple apps, preempt first from the youngest application.
*/
@Test
public void testIntraQueuePreemptionFifoOrderingPolicyMultipleAppsPerUser()
throws IOException {
// Enable FifoOrderingPolicy for yarn.scheduler.capacity.root.a
conf.set(CapacitySchedulerConfiguration.PREFIX
+ CapacitySchedulerConfiguration.ROOT + ".a.ordering-policy", "fifo");
// Make sure all containers will be preempted in a single round.
conf.setFloat(CapacitySchedulerConfiguration.
INTRAQUEUE_PREEMPTION_MAX_ALLOWABLE_LIMIT,
(float) 1.0);
String labelsConfig = "=100,true;";
String nodesConfig = // n1 has no label
"n1= res=100";
String queuesConfig =
// guaranteed,max,used,pending,reserved
"root(=[100 100 100 1 0]);" + // root
"-a(=[100 100 100 1 0])"; // a
// user1/app1 has 40 resources in queue a
// user1/app2 has 20 resources in queue a
// user3/app3 has 40 resources in queue a
// user4/app4 is requesting 20 resources in queue a
// With 3 users, preemptable user limit should be around 35 resources each.
String appsConfig =
// queueName\t(priority,resource,host,expression,#repeat,reserved,pending,user)
"a\t" // app1, user1 in a
+ "(1,1,n1,,40,false,0,user1);" +
"a\t" // app2, user1 in a
+ "(1,1,n1,,20,false,0,user1);" +
"a\t" // app3, user3 in a
+ "(1,1,n1,,40,false,0,user3);" +
"a\t" // app4, user4 in a
+ "(1,1,n1,,0,false,25,user4)"
;
buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
policy.editSchedule();
// app3 is the younges and also over its user limit. 5 should be preempted
// from app3 until it comes down to user3's user limit.
verify(eventHandler, times(5)).handle(argThat(
new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
getAppAttemptId(3))));
// User1's app2 is its youngest. 19 should be preempted from app2, leaving
// only the AM
verify(eventHandler, times(19)).handle(argThat(
new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
getAppAttemptId(2))));
// Preempt the remaining resource from User1's oldest app1.
verify(eventHandler, times(1)).handle(argThat(
new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
getAppAttemptId(1))));
}
}
| TestProportionalCapacityPreemptionPolicyIntraQueueFairOrdering |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/component/seda/SedaRemoveRouteThenAddAgainTest.java | {
"start": 1039,
"end": 2273
} | class ____ extends ContextTestSupport {
@RepeatedTest(5)
public void testRemoveRouteAndThenAddAgain() throws Exception {
MockEndpoint out = getMockEndpoint("mock:out");
out.expectedMessageCount(1);
out.expectedBodiesReceived("before removing the route");
template.sendBody("seda:in", "before removing the route");
out.assertIsSatisfied();
// now stop & remove the route
context.getRouteController().stopRoute("sedaToMock");
context.removeRoute("sedaToMock");
// and then add it back again
context.addRoutes(createRouteBuilder());
// the mock endpoint was removed, so need to grab it again
out = getMockEndpoint("mock:out");
out.expectedMessageCount(1);
out.expectedBodiesReceived("after removing the route");
template.sendBody("seda:in", "after removing the route");
out.assertIsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("seda:in").routeId("sedaToMock").to("mock:out");
}
};
}
}
| SedaRemoveRouteThenAddAgainTest |
java | grpc__grpc-java | protobuf/src/test/java/io/grpc/protobuf/StatusProtoTest.java | {
"start": 1107,
"end": 7775
} | class ____ {
private Metadata metadata;
@Before
public void setup() {
metadata = new Metadata();
metadata.put(METADATA_KEY, METADATA_VALUE);
}
@Test
public void toStatusRuntimeException() throws Exception {
StatusRuntimeException sre = StatusProto.toStatusRuntimeException(STATUS_PROTO);
com.google.rpc.Status extractedStatusProto = StatusProto.fromThrowable(sre);
assertEquals(STATUS_PROTO.getCode(), sre.getStatus().getCode().value());
assertEquals(STATUS_PROTO.getMessage(), sre.getStatus().getDescription());
assertEquals(STATUS_PROTO, extractedStatusProto);
}
@Test
public void toStatusRuntimeExceptionWithMetadata_shouldIncludeMetadata() throws Exception {
StatusRuntimeException sre = StatusProto.toStatusRuntimeException(STATUS_PROTO, metadata);
com.google.rpc.Status extractedStatusProto = StatusProto.fromThrowable(sre);
assertEquals(STATUS_PROTO.getCode(), sre.getStatus().getCode().value());
assertEquals(STATUS_PROTO.getMessage(), sre.getStatus().getDescription());
assertEquals(STATUS_PROTO, extractedStatusProto);
assertNotNull(sre.getTrailers());
assertEquals(METADATA_VALUE, sre.getTrailers().get(METADATA_KEY));
}
@Test
public void toStatusRuntimeExceptionWithMetadata_shouldThrowIfMetadataIsNull() throws Exception {
try {
StatusProto.toStatusRuntimeException(STATUS_PROTO, null);
fail("NullPointerException expected");
} catch (NullPointerException npe) {
assertEquals("metadata must not be null", npe.getMessage());
}
}
@Test
public void toStatusRuntimeException_shouldThrowIfStatusCodeInvalid() throws Exception {
try {
StatusProto.toStatusRuntimeException(INVALID_STATUS_PROTO);
fail("IllegalArgumentException expected");
} catch (IllegalArgumentException expectedException) {
assertEquals("invalid status code", expectedException.getMessage());
}
}
@Test
public void toStatusException() throws Exception {
StatusException se = StatusProto.toStatusException(STATUS_PROTO);
com.google.rpc.Status extractedStatusProto = StatusProto.fromThrowable(se);
assertEquals(STATUS_PROTO.getCode(), se.getStatus().getCode().value());
assertEquals(STATUS_PROTO.getMessage(), se.getStatus().getDescription());
assertEquals(STATUS_PROTO, extractedStatusProto);
}
@Test
public void toStatusExceptionWithMetadata_shouldIncludeMetadata() throws Exception {
StatusException se = StatusProto.toStatusException(STATUS_PROTO, metadata);
com.google.rpc.Status extractedStatusProto = StatusProto.fromThrowable(se);
assertEquals(STATUS_PROTO.getCode(), se.getStatus().getCode().value());
assertEquals(STATUS_PROTO.getMessage(), se.getStatus().getDescription());
assertEquals(STATUS_PROTO, extractedStatusProto);
assertNotNull(se.getTrailers());
assertEquals(METADATA_VALUE, se.getTrailers().get(METADATA_KEY));
}
@Test
public void toStatusExceptionWithMetadata_shouldThrowIfMetadataIsNull() throws Exception {
try {
StatusProto.toStatusException(STATUS_PROTO, null);
fail("NullPointerException expected");
} catch (NullPointerException npe) {
assertEquals("metadata must not be null", npe.getMessage());
}
}
@Test
public void toStatusException_shouldThrowIfStatusCodeInvalid() throws Exception {
try {
StatusProto.toStatusException(INVALID_STATUS_PROTO);
fail("IllegalArgumentException expected");
} catch (IllegalArgumentException expectedException) {
assertEquals("invalid status code", expectedException.getMessage());
}
}
@Test
public void fromThrowable_runtimeException_shouldReturnDerivedStatusIfTrailersAreNull() {
Status status = Status.UNAVAILABLE.withDescription("not available");
com.google.rpc.Status statusFromThrowable =
StatusProto.fromThrowable(status.asRuntimeException());
assertEquals(statusFromThrowable.getCode(), status.getCode().value());
assertEquals(statusFromThrowable.getMessage(), status.getDescription());
}
@Test
public void fromThrowable_exception_shouldReturnDerivedStatusIfTrailersAreNull() {
Status status = Status.UNAVAILABLE.withDescription("not available");
com.google.rpc.Status statusFromThrowable =
StatusProto.fromThrowable(status.asException());
assertEquals(statusFromThrowable.getCode(), status.getCode().value());
assertEquals(statusFromThrowable.getMessage(), status.getDescription());
}
@Test
public void fromThrowableWithNestedStatusRuntimeException() {
StatusRuntimeException sre = StatusProto.toStatusRuntimeException(STATUS_PROTO);
Throwable nestedSre = new Throwable(sre);
com.google.rpc.Status extractedStatusProto = StatusProto.fromThrowable(sre);
com.google.rpc.Status extractedStatusProtoFromNestedSre = StatusProto.fromThrowable(nestedSre);
assertEquals(extractedStatusProto, extractedStatusProtoFromNestedSre);
}
@Test
public void fromThrowableWithNestedStatusException() {
StatusException se = StatusProto.toStatusException(STATUS_PROTO);
Throwable nestedSe = new Throwable(se);
com.google.rpc.Status extractedStatusProto = StatusProto.fromThrowable(se);
com.google.rpc.Status extractedStatusProtoFromNestedSe = StatusProto.fromThrowable(nestedSe);
assertEquals(extractedStatusProto, extractedStatusProtoFromNestedSe);
}
@Test
public void fromThrowable_shouldReturnNullIfNoEmbeddedStatus() {
Throwable nestedSe = new Throwable(new Throwable("no status found"));
assertNull(StatusProto.fromThrowable(nestedSe));
}
@Test
public void toStatusExceptionWithMetadataAndCause_shouldCaptureCause() {
RuntimeException exc = new RuntimeException("This is a test exception.");
StatusException se = StatusProto.toStatusException(STATUS_PROTO, new Metadata(), exc);
assertEquals(exc, se.getCause());
}
private static final Metadata.Key<String> METADATA_KEY =
Metadata.Key.of("test-metadata", Metadata.ASCII_STRING_MARSHALLER);
private static final String METADATA_VALUE = "test metadata value";
private static final com.google.rpc.Status STATUS_PROTO =
com.google.rpc.Status.newBuilder()
.setCode(2)
.setMessage("status message")
.addDetails(
com.google.protobuf.Any.pack(
com.google.rpc.Status.newBuilder()
.setCode(13)
.setMessage("nested message")
.build()))
.build();
private static final com.google.rpc.Status INVALID_STATUS_PROTO =
com.google.rpc.Status.newBuilder().setCode(-1).build();
}
| StatusProtoTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/feature/WriteNullStringAsEmptyTest.java | {
"start": 322,
"end": 944
} | class ____ extends TestCase {
public void test_features() throws Exception {
PropertyFilter filter = new PropertyFilter() {
public boolean apply(Object object, String name, Object value) {
if (value == null && object instanceof Model && "id".equals(name)) {
return false;
}
return true;
}
};
Model model = new Model();
String json = JSON.toJSONString(model, filter, SerializerFeature.WriteNullStringAsEmpty);
System.out.println(json);
}
private static | WriteNullStringAsEmptyTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/embeddables/generics/GenericEmbeddedIdentifierMappedSuperclassTest.java | {
"start": 11318,
"end": 11426
} | class ____<O, E> extends GenericObject<EmbeddableKey<O, E>> {
}
@MappedSuperclass
public static | AccessReport |
java | alibaba__nacos | common/src/test/java/com/alibaba/nacos/common/remote/client/RpcConstantsTest.java | {
"start": 797,
"end": 1332
} | class ____ {
@Test
void testGetRpcParams() {
Field[] declaredFields = RpcConstants.class.getDeclaredFields();
int i = 0;
for (Field declaredField : declaredFields) {
declaredField.setAccessible(true);
if (declaredField.getType().equals(String.class) && null != declaredField.getAnnotation(
RpcConstants.RpcConfigLabel.class)) {
i++;
}
}
assertEquals(i, RpcConstants.getRpcParams().size());
}
}
| RpcConstantsTest |
java | apache__camel | components/camel-ironmq/src/main/java/org/apache/camel/component/ironmq/IronMQEndpoint.java | {
"start": 1689,
"end": 4026
} | class ____ extends ScheduledPollEndpoint implements EndpointServiceLocation {
private static final Logger LOG = LoggerFactory.getLogger(IronMQEndpoint.class);
@UriParam
private IronMQConfiguration configuration;
private Client client;
public IronMQEndpoint(String uri, IronMQComponent component, IronMQConfiguration ironMQConfiguration) {
super(uri, component);
this.configuration = ironMQConfiguration;
}
@Override
public Producer createProducer() throws Exception {
return new IronMQProducer(this, getClient().queue(configuration.getQueueName()));
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
IronMQConsumer consumer = new IronMQConsumer(this, processor);
configureConsumer(consumer);
consumer.setMaxMessagesPerPoll(configuration.getMaxMessagesPerPoll());
return consumer;
}
@Override
protected void doStart() throws Exception {
super.doStart();
client = getConfiguration().getClient() != null ? getConfiguration().getClient() : getClient();
}
@Override
protected void doStop() throws Exception {
client = null;
super.doStop();
}
@Override
public String getServiceUrl() {
return configuration.getIronMQCloud();
}
@Override
public String getServiceProtocol() {
return "ironmq";
}
public Client getClient() {
if (client == null) {
client = createClient();
}
return client;
}
public void setClient(Client client) {
this.client = client;
}
/**
* Provide the possibility to override this method for an mock implementation
*
* @return Client
*/
Client createClient() {
Cloud cloud;
try {
cloud = new Cloud(configuration.getIronMQCloud());
} catch (MalformedURLException e) {
cloud = Cloud.ironAWSUSEast;
LOG.warn("Unable to parse ironMQCloud {} will use {}", configuration.getIronMQCloud(), cloud.getHost());
}
client = new Client(configuration.getProjectId(), configuration.getToken(), cloud);
return client;
}
public IronMQConfiguration getConfiguration() {
return configuration;
}
}
| IronMQEndpoint |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/formatmapper/FormatMapperBehaviorWithFormatMapperTest.java | {
"start": 409,
"end": 1166
} | class ____ {
@RegisterExtension
static QuarkusUnitTest TEST = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyJsonEntity.class, MyJsonFormatMapper.class)
.addClasses(SchemaUtil.class, SmokeTestUtils.class))
.withConfigurationResource("application.properties");
@Inject
SessionFactory sessionFactory;
@Test
void smoke() {
// We really just care ot see if the SF is built successfully here or not;
assertThat(SchemaUtil.getColumnNames(sessionFactory, MyJsonEntity.class))
.contains("properties", "amount1", "amount2")
.doesNotContain("amountDifference");
}
}
| FormatMapperBehaviorWithFormatMapperTest |
java | apache__flink | flink-fs-tests/src/test/java/org/apache/flink/hdfstests/ContinuousFileProcessingTest.java | {
"start": 34735,
"end": 37408
} | class ____
implements SourceFunction.SourceContext<TimestampedFileInputSplit> {
private final Object lock = new Object();
@Override
public void collectWithTimestamp(TimestampedFileInputSplit element, long timestamp) {}
@Override
public void emitWatermark(Watermark mark) {}
@Override
public void markAsTemporarilyIdle() {}
@Override
public Object getCheckpointLock() {
return lock;
}
@Override
public void close() {}
}
///////// Auxiliary Methods /////////////
private static int getLineNo(String line) {
String[] tkns = line.split("\\s");
Assert.assertEquals(6, tkns.length);
return Integer.parseInt(tkns[tkns.length - 1]);
}
/**
* Create a file with pre-determined String format of the form: {@code fileIdx +": "+ sampleLine
* +" "+ lineNo}.
*/
private static Tuple2<org.apache.hadoop.fs.Path, String> createFileAndFillWithData(
String base, String fileName, int fileIdx, String sampleLine) throws IOException {
assert (hdfs != null);
final String fileRandSuffix = UUID.randomUUID().toString();
org.apache.hadoop.fs.Path file =
new org.apache.hadoop.fs.Path(base + "/" + fileName + fileRandSuffix);
Assert.assertFalse(hdfs.exists(file));
org.apache.hadoop.fs.Path tmp =
new org.apache.hadoop.fs.Path(base + "/." + fileName + fileRandSuffix);
FSDataOutputStream stream = hdfs.create(tmp);
StringBuilder str = new StringBuilder();
for (int i = 0; i < LINES_PER_FILE; i++) {
String line = fileIdx + ": " + sampleLine + " " + i + "\n";
str.append(line);
stream.write(line.getBytes(ConfigConstants.DEFAULT_CHARSET));
}
stream.close();
hdfs.rename(tmp, file);
Assert.assertTrue("No result file present", hdfs.exists(file));
return new Tuple2<>(file, str.toString());
}
/**
* Create continuous monitoring function with 1 reader-parallelism and interval: {@link
* #INTERVAL}.
*/
private <OUT> ContinuousFileMonitoringFunction<OUT> createTestContinuousFileMonitoringFunction(
FileInputFormat<OUT> format, FileProcessingMode fileProcessingMode) {
ContinuousFileMonitoringFunction<OUT> monitoringFunction =
new ContinuousFileMonitoringFunction<>(format, fileProcessingMode, 1, INTERVAL);
monitoringFunction.setRuntimeContext(new MockStreamingRuntimeContext(1, 0));
return monitoringFunction;
}
}
| DummySourceContext |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/PlaygroundWithDemoOfUnclonedParametersProblemTest.java | {
"start": 4624,
"end": 4851
} | interface ____ {
boolean anyImportRunningOrRunnedToday(int importType, Date currentDate);
void include(ImportLogBean importLogBean);
void alter(ImportLogBean importLogBean);
}
private | ImportLogDao |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/keymanytoone/bidir/component/LazyKeyManyToOneTest.java | {
"start": 731,
"end": 2830
} | class ____ {
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testQueryingOnMany2One(SessionFactoryScope scope) {
Customer cust = new Customer( "Acme, Inc." );
Order order = new Order( new Order.Id( cust, 1 ) );
cust.getOrders().add( order );
scope.inTransaction(
session -> session.persist( cust )
);
scope.inTransaction(
session -> {
List results = session.createQuery( "from Order o where o.id.customer.name = :name" )
.setParameter( "name", cust.getName() )
.list();
assertEquals( 1, results.size() );
}
);
}
@Test
public void testSaveCascadedToKeyManyToOne(SessionFactoryScope scope) {
// test cascading a save to an association with a key-many-to-one which refers to a
// just saved entity
StatisticsImplementor statistics = scope.getSessionFactory().getStatistics();
scope.inTransaction(
session -> {
Customer cust = new Customer( "Acme, Inc." );
Order order = new Order( new Order.Id( cust, 1 ) );
cust.getOrders().add( order );
statistics.clear();
session.persist( cust );
session.flush();
assertEquals( 2, statistics.getEntityInsertCount() );
}
);
}
@Test
public void testLoadingStrategies(SessionFactoryScope scope) {
Customer customer = new Customer( "Acme, Inc." );
Order order = new Order( new Order.Id( customer, 1 ) );
customer.getOrders().add( order );
scope.inTransaction(
session -> session.persist( customer )
);
scope.inTransaction(
session -> {
Customer cust = session.get( Customer.class, customer.getId() );
assertEquals( 1, cust.getOrders().size() );
session.clear();
cust = (Customer) session.createQuery( "from Customer" ).uniqueResult();
assertEquals( 1, cust.getOrders().size() );
session.clear();
cust = (Customer) session.createQuery( "from Customer c join fetch c.orders" ).uniqueResult();
assertEquals( 1, cust.getOrders().size() );
session.clear();
}
);
}
}
| LazyKeyManyToOneTest |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/FluxDistinct.java | {
"start": 9421,
"end": 14028
} | class ____<T, K, C>
implements ConditionalSubscriber<T>, InnerOperator<T, T>,
QueueSubscription<T> {
final CoreSubscriber<? super T> actual;
final Context ctx;
final C collection;
final Function<? super T, ? extends K> keyExtractor;
final BiPredicate<C, K> distinctPredicate;
final Consumer<C> cleanupCallback;
@SuppressWarnings("NotNullFieldNotInitialized") // initialized in onSubscribe
QueueSubscription<T> qs;
boolean done;
int sourceMode;
DistinctFuseableSubscriber(CoreSubscriber<? super T> actual, C collection,
Function<? super T, ? extends K> keyExtractor,
BiPredicate<C, K> predicate,
Consumer<C> callback) {
this.actual = actual;
this.ctx = actual.currentContext();
this.collection = collection;
this.keyExtractor = keyExtractor;
this.distinctPredicate = predicate;
this.cleanupCallback = callback;
}
@SuppressWarnings("unchecked")
@Override
public void onSubscribe(Subscription s) {
if (Operators.validate(this.qs, s)) {
this.qs = (QueueSubscription<T>) s;
actual.onSubscribe(this);
}
}
@Override
public void onNext(T t) {
if (!tryOnNext(t)) {
qs.request(1);
}
}
// in the latest NullAway we can configure DataFlowIssue to be treated as
// NullAway suppression
@SuppressWarnings("DataFlowIssue") // fusion passes nulls via onNext
@Override
public boolean tryOnNext(T t) {
if (sourceMode == Fuseable.ASYNC) {
actual.onNext(null);
return true;
}
if (done) {
Operators.onNextDropped(t, this.ctx);
return true;
}
K k;
try {
k = Objects.requireNonNull(keyExtractor.apply(t),
"The distinct extractor returned a null value.");
}
catch (Throwable e) {
onError(Operators.onOperatorError(qs, e, t, this.ctx));
Operators.onDiscard(t, this.ctx);
return true;
}
boolean b;
try {
b = distinctPredicate.test(collection, k);
}
catch (Throwable e) {
onError(Operators.onOperatorError(qs, e, t, this.ctx));
Operators.onDiscard(t, this.ctx);
return true;
}
if (b) {
actual.onNext(t);
return true;
}
Operators.onDiscard(t, ctx);
return false;
}
@Override
public void onError(Throwable t) {
if (done) {
Operators.onErrorDropped(t, this.ctx);
return;
}
done = true;
cleanupCallback.accept(collection);
actual.onError(t);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
cleanupCallback.accept(collection);
actual.onComplete();
}
@Override
public CoreSubscriber<? super T> actual() {
return actual;
}
@Override
public void request(long n) {
qs.request(n);
}
@Override
public void cancel() {
qs.cancel();
if (collection != null) {
cleanupCallback.accept(collection);
}
}
@Override
public int requestFusion(int requestedMode) {
int m = qs.requestFusion(requestedMode);
sourceMode = m;
return m;
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.PARENT) return qs;
if (key == Attr.TERMINATED) return done;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return InnerOperator.super.scanUnsafe(key);
}
@Override
public @Nullable T poll() {
if (sourceMode == Fuseable.ASYNC) {
long dropped = 0;
for (; ; ) {
T v = qs.poll();
if (v == null) {
return null;
}
try {
K r = Objects.requireNonNull(keyExtractor.apply(v),
"The keyExtractor returned a null collection");
if (distinctPredicate.test(collection, r)) {
if (dropped != 0) {
request(dropped);
}
return v;
}
Operators.onDiscard(v, ctx);
dropped++;
}
catch (Throwable error) {
Operators.onDiscard(v, this.ctx);
throw error;
}
}
}
else {
for (; ; ) {
T v = qs.poll();
if (v == null) {
return null;
}
try {
K r = Objects.requireNonNull(keyExtractor.apply(v),
"The keyExtractor returned a null collection");
if (distinctPredicate.test(collection, r)) {
return v;
}
else {
Operators.onDiscard(v, ctx);
}
}
catch (Throwable error) {
Operators.onDiscard(v, this.ctx);
throw error;
}
}
}
}
@Override
public boolean isEmpty() {
return qs.isEmpty();
}
@Override
public void clear() {
qs.clear();
cleanupCallback.accept(collection);
}
@Override
public int size() {
return qs.size();
}
}
}
| DistinctFuseableSubscriber |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/atomic/referencearray/AtomicReferenceArrayAssert_isSorted_Test.java | {
"start": 834,
"end": 1170
} | class ____ extends AtomicReferenceArrayAssertBaseTest {
@Override
protected AtomicReferenceArrayAssert<Object> invoke_api_method() {
return assertions.isSorted();
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertIsSorted(info(), internalArray());
}
}
| AtomicReferenceArrayAssert_isSorted_Test |
java | quarkusio__quarkus | extensions/mongodb-client/deployment/src/main/java/io/quarkus/mongodb/deployment/MongoConnectionNameBuildItem.java | {
"start": 138,
"end": 378
} | class ____ extends MultiBuildItem {
private final String name;
public MongoConnectionNameBuildItem(String name) {
this.name = name;
}
public String getName() {
return name;
}
}
| MongoConnectionNameBuildItem |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/support/ContextLoaderUtilsContextHierarchyTests.java | {
"start": 24715,
"end": 24785
} | interface ____ {
}
@ContextHierarchyA
private static | ContextHierarchyC |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/net/KeyManagerFactoryOptions.java | {
"start": 1851,
"end": 3335
} | class ____ implements KeyCertOptions {
private final KeyManagerFactory keyManagerFactory;
KeyManagerFactoryOptions(KeyManagerFactory keyManagerFactory) {
if (keyManagerFactory == null
|| keyManagerFactory.getKeyManagers() == null
|| keyManagerFactory.getKeyManagers().length == 0) {
throw new IllegalArgumentException("KeyManagerFactory is not present or is not initialized yet");
}
this.keyManagerFactory = keyManagerFactory;
}
KeyManagerFactoryOptions(X509KeyManager keyManager) {
this(new KeyManagerFactoryWrapper(keyManager));
}
private KeyManagerFactoryOptions(KeyManagerFactoryOptions other) {
this.keyManagerFactory = other.keyManagerFactory;
}
@Override
public KeyCertOptions copy() {
return new KeyManagerFactoryOptions(this);
}
@Override
public KeyManagerFactory getKeyManagerFactory(Vertx vertx) {
return keyManagerFactory;
}
@Override
public Function<String, KeyManagerFactory> keyManagerFactoryMapper(Vertx vertx) throws Exception {
return name -> null;
}
@Override
public int hashCode() {
return keyManagerFactory.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj instanceof KeyManagerFactoryOptions) {
KeyManagerFactoryOptions that = (KeyManagerFactoryOptions) obj;
return Objects.equals(keyManagerFactory, that.keyManagerFactory);
}
return false;
}
}
| KeyManagerFactoryOptions |
java | junit-team__junit5 | junit-vintage-engine/src/test/java/org/junit/vintage/engine/descriptor/TestSourceProviderTests.java | {
"start": 713,
"end": 1198
} | class ____ {
@Test
void findsInheritedMethod() {
var description = Description.createTestDescription(ConcreteJUnit4TestCase.class, "theTest");
var source = new TestSourceProvider().findTestSource(description);
assertThat(source).isInstanceOf(MethodSource.class);
var methodSource = (MethodSource) source;
assertEquals(ConcreteJUnit4TestCase.class.getName(), methodSource.getClassName());
assertEquals("theTest", methodSource.getMethodName());
}
}
| TestSourceProviderTests |
java | apache__camel | components/camel-salesforce/camel-salesforce-component/src/test/java/org/apache/camel/component/salesforce/BulkApiV2IngestJobManualIT.java | {
"start": 1880,
"end": 6497
} | class ____ extends AbstractSalesforceTestBase {
@Test
public void testLifecycle() throws Exception {
Job job = new Job();
job.setObject("Contact");
job.setOperation(OperationEnum.INSERT);
job = template().requestBody("salesforce:bulk2CreateJob", job, Job.class);
assertNotNull(job.getId(), "JobId");
job = template().requestBody("salesforce:bulk2GetJob", job, Job.class);
assertSame(JobStateEnum.OPEN, job.getState(), "Job state");
Exchange exchange = new DefaultExchange(context());
exchange.getIn().setBody("FirstName,LastName\nTestFirst,TestLast");
exchange.getIn().setHeader("jobId", job.getId());
template.send("salesforce:bulk2CreateBatch", exchange);
assertNull(exchange.getException());
job = template().requestBody("salesforce:bulk2GetJob", job, Job.class);
assertSame(JobStateEnum.OPEN, job.getState(), "Job state");
job = template().requestBodyAndHeader("salesforce:bulk2CloseJob", "", "jobId", job.getId(),
Job.class);
assertEquals(JobStateEnum.UPLOAD_COMPLETE, job.getState(), "Job state");
// wait for job to finish
while (job.getState() != JobStateEnum.JOB_COMPLETE) {
Thread.sleep(2000);
job = template().requestBodyAndHeader("salesforce:bulk2GetJob", "", "jobId",
job.getId(), Job.class);
}
InputStream is = template().requestBodyAndHeader("salesforce:bulk2GetSuccessfulResults",
"", "jobId", job.getId(), InputStream.class);
assertNotNull(is, "Successful results");
List<String> successful = IOUtils.readLines(is, StandardCharsets.UTF_8);
assertEquals(2, successful.size());
assertTrue(successful.get(1).contains("TestFirst"));
is = template().requestBodyAndHeader("salesforce:bulk2GetFailedResults",
"", "jobId", job.getId(), InputStream.class);
assertNotNull(is, "Failed results");
List<String> failed = IOUtils.readLines(is, StandardCharsets.UTF_8);
assertEquals(1, failed.size());
is = template().requestBodyAndHeader("salesforce:bulk2GetUnprocessedRecords",
"", "jobId", job.getId(), InputStream.class);
assertNotNull(is, "Unprocessed records");
List<String> unprocessed = IOUtils.readLines(is, StandardCharsets.UTF_8);
assertEquals(1, unprocessed.size());
assertEquals("FirstName,LastName", unprocessed.get(0));
}
@Test
public void testAbort() {
Job job = new Job();
job.setObject("Contact");
job.setOperation(OperationEnum.INSERT);
job = createJob(job);
job = template().requestBody("salesforce:bulk2GetJob", job, Job.class);
assertSame(JobStateEnum.OPEN, job.getState(), "Job should be OPEN");
template().sendBodyAndHeader("salesforce:bulk2AbortJob", "", "jobId", job.getId());
job = template().requestBody("salesforce:bulk2GetJob", job, Job.class);
assertSame(JobStateEnum.ABORTED, job.getState(), "Job state");
}
@Test
public void testDelete() {
Job job = new Job();
job.setObject("Contact");
job.setOperation(OperationEnum.INSERT);
job = createJob(job);
job = template().requestBody("salesforce:bulk2GetJob", job, Job.class);
assertSame(JobStateEnum.OPEN, job.getState(), "Job should be OPEN");
template().sendBodyAndHeader("salesforce:bulk2AbortJob", "", "jobId", job.getId());
job = template().requestBody("salesforce:bulk2GetJob", job, Job.class);
assertSame(JobStateEnum.ABORTED, job.getState(), "Job state");
template().sendBodyAndHeader("salesforce:bulk2DeleteJob", "", "jobId", job.getId());
final Job finalJob = job;
CamelExecutionException ex = Assertions.assertThrows(CamelExecutionException.class,
() -> template().requestBody("salesforce:bulk2GetJob", finalJob, Job.class));
assertEquals(SalesforceException.class, ex.getCause().getClass());
SalesforceException sfEx = (SalesforceException) ex.getCause();
assertEquals(404, sfEx.getStatusCode());
}
@Test
public void testGetAll() {
Jobs jobs = template().requestBody("salesforce:bulk2GetAllJobs", "", Jobs.class);
assertNotNull(jobs);
}
private Job createJob(Job job) {
job = template().requestBody("salesforce:bulk2CreateJob", job, Job.class);
assertNotNull(job.getId(), "Missing JobId");
return job;
}
}
| BulkApiV2IngestJobManualIT |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/builditem/RunTimeConfigurationProxyBuildItem.java | {
"start": 272,
"end": 610
} | class ____ extends SimpleBuildItem {
private final Map<Class<?>, Object> objects;
public RunTimeConfigurationProxyBuildItem(final Map<Class<?>, Object> objects) {
this.objects = objects;
}
public Object getProxyObjectFor(Class<?> clazz) {
return objects.get(clazz);
}
}
| RunTimeConfigurationProxyBuildItem |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/SpringTraceUsingPropertyTest.java | {
"start": 939,
"end": 1003
} | class ____ extends SpringTraceTest {
}
| SpringTraceUsingPropertyTest |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldHaveName.java | {
"start": 826,
"end": 1546
} | class ____ extends BasicErrorMessageFactory {
private static final String SHOULD_HAVE_NAME = "%nExpecting%n %s%nto have name:%n %s%nbut had:%n %s";
public static ShouldHaveName shouldHaveName(File actual, String expectedName) {
return new ShouldHaveName(actual, expectedName);
}
private ShouldHaveName(File actual, String expectedName) {
super(SHOULD_HAVE_NAME, actual, expectedName, actual.getName());
}
public static ShouldHaveName shouldHaveName(Path actual, String expectedName) {
return new ShouldHaveName(actual, expectedName);
}
private ShouldHaveName(Path actual, String expectedName) {
super(SHOULD_HAVE_NAME, actual, expectedName, actual.getFileName());
}
}
| ShouldHaveName |
java | apache__camel | components/camel-debezium/camel-debezium-common/camel-debezium-common-component/src/test/java/org/apache/camel/component/debezium/DebeziumEndpointTest.java | {
"start": 1991,
"end": 13732
} | class ____ {
private DebeziumEndpoint debeziumEndpoint;
@Mock
private Processor processor;
@BeforeEach
public void setUp() {
debeziumEndpoint = new DebeziumTestEndpoint(
"", new DebeziumTestComponent(new DefaultCamelContext()),
new FileConnectorEmbeddedDebeziumConfiguration());
}
@Test
void testIfCreatesConsumer() throws Exception {
final Consumer debeziumConsumer = debeziumEndpoint.createConsumer(processor);
assertNotNull(debeziumConsumer);
}
@Test
void testIfFailsToCreateProducer() {
assertThrows(UnsupportedOperationException.class, () -> {
debeziumEndpoint.createProducer();
});
}
@Test
void testIfCreatesExchangeFromSourceCreateRecord() {
final SourceRecord sourceRecord = createCreateRecord();
final Exchange exchange = debeziumEndpoint.createDbzExchange(null, sourceRecord);
final Message inMessage = exchange.getIn();
assertNotNull(exchange);
// assert headers
assertEquals("dummy", inMessage.getHeader(DebeziumConstants.HEADER_IDENTIFIER));
assertEquals(Envelope.Operation.CREATE.code(),
inMessage.getHeader(DebeziumConstants.HEADER_OPERATION));
final Struct key = (Struct) inMessage.getHeader(DebeziumConstants.HEADER_KEY);
assertEquals(12345, key.getInt32("id").intValue());
assertSourceMetadata(inMessage);
assertNotNull(inMessage.getHeader(DebeziumConstants.HEADER_TIMESTAMP));
// assert value
final Struct body = (Struct) inMessage.getBody();
assertNotNull(body);
assertEquals((byte) 1, body.getInt8("id").byteValue());
// assert schema
assertSchema(body.schema());
}
@Test
void testIfCreatesExchangeFromSourceDeleteRecord() {
final SourceRecord sourceRecord = createDeleteRecord();
final Exchange exchange = debeziumEndpoint.createDbzExchange(null, sourceRecord);
final Message inMessage = exchange.getIn();
assertNotNull(exchange);
// assert headers
assertEquals("dummy", inMessage.getHeader(DebeziumConstants.HEADER_IDENTIFIER));
assertEquals(Envelope.Operation.DELETE.code(),
inMessage.getHeader(DebeziumConstants.HEADER_OPERATION));
final Struct key = (Struct) inMessage.getHeader(DebeziumConstants.HEADER_KEY);
assertEquals(12345, key.getInt32("id").intValue());
assertNotNull(inMessage.getHeader(DebeziumConstants.HEADER_BEFORE));
// assert value
final Struct body = (Struct) inMessage.getBody();
assertNull(body); // we expect body to be null since is a delete
}
@Test
void testIfCreatesExchangeFromSourceDeleteRecordWithNull() {
final SourceRecord sourceRecord = createDeleteRecordWithNull();
final Exchange exchange = debeziumEndpoint.createDbzExchange(null, sourceRecord);
final Message inMessage = exchange.getIn();
assertNotNull(exchange);
// assert headers
assertEquals("dummy", inMessage.getHeader(DebeziumConstants.HEADER_IDENTIFIER));
final Struct key = (Struct) inMessage.getHeader(DebeziumConstants.HEADER_KEY);
assertEquals(12345, key.getInt32("id").intValue());
// assert value
final Struct body = (Struct) inMessage.getBody();
assertNull(body);
}
@Test
void testIfCreatesExchangeFromSourceUpdateRecord() {
final SourceRecord sourceRecord = createUpdateRecord();
final Exchange exchange = debeziumEndpoint.createDbzExchange(null, sourceRecord);
final Message inMessage = exchange.getIn();
assertNotNull(exchange);
// assert headers
assertEquals("dummy", inMessage.getHeader(DebeziumConstants.HEADER_IDENTIFIER));
assertEquals(Envelope.Operation.UPDATE.code(),
inMessage.getHeader(DebeziumConstants.HEADER_OPERATION));
final Struct key = (Struct) inMessage.getHeader(DebeziumConstants.HEADER_KEY);
assertEquals(12345, key.getInt32("id").intValue());
assertSourceMetadata(inMessage);
// assert value
final Struct before = (Struct) inMessage.getHeader(DebeziumConstants.HEADER_BEFORE);
final Struct after = (Struct) inMessage.getBody();
assertNotNull(before);
assertNotNull(after);
assertEquals((byte) 1, before.getInt8("id").byteValue());
assertEquals((byte) 2, after.getInt8("id").byteValue());
}
@Test
void testIfCreatesExchangeFromSourceRecordOtherThanStruct() {
final SourceRecord sourceRecord = createStringRecord();
final Exchange exchange = debeziumEndpoint.createDbzExchange(null, sourceRecord);
final Message inMessage = exchange.getIn();
assertNotNull(exchange);
// assert headers
assertEquals("dummy", inMessage.getHeader(DebeziumConstants.HEADER_IDENTIFIER));
assertNull(inMessage.getHeader(DebeziumConstants.HEADER_OPERATION));
// assert value
final String value = (String) inMessage.getBody();
assertEquals(sourceRecord.value(), value);
}
@Test
void testIfHandlesUnknownSchema() {
final SourceRecord sourceRecord = createUnknownUnnamedSchemaRecord();
final Exchange exchange = debeziumEndpoint.createDbzExchange(null, sourceRecord);
final Message inMessage = exchange.getIn();
assertNotNull(exchange);
// assert headers
assertEquals("dummy", inMessage.getHeader(DebeziumConstants.HEADER_IDENTIFIER));
assertNull(inMessage.getHeader(DebeziumConstants.HEADER_OPERATION));
assertNull(inMessage.getHeader(DebeziumConstants.HEADER_KEY));
// assert value
final Struct body = (Struct) inMessage.getBody();
// we have to get value of after with struct, we are strict about this case
assertNull(body);
}
@Test
void testIfCreatesExchangeFromSourceDdlRecord() {
final SourceRecord sourceRecord = createDdlSQLRecord();
final Exchange exchange = debeziumEndpoint.createDbzExchange(null, sourceRecord);
final Message inMessage = exchange.getIn();
assertNotNull(exchange);
// assert headers
assertEquals("dummy", inMessage.getHeader(DebeziumConstants.HEADER_IDENTIFIER));
assertEquals("SET character_set_server=utf8, collation_server=utf8_bin",
inMessage.getHeader(DebeziumConstants.HEADER_DDL_SQL));
}
private SourceRecord createCreateRecord() {
final Schema recordSchema = SchemaBuilder.struct().field("id", SchemaBuilder.int8()).build();
final Schema sourceSchema = SchemaBuilder.struct().field("lsn", SchemaBuilder.int32()).build();
Envelope envelope = Envelope.defineSchema().withName("dummy.Envelope").withRecord(recordSchema)
.withSource(sourceSchema).build();
final Struct after = new Struct(recordSchema);
final Struct source = new Struct(sourceSchema);
after.put("id", (byte) 1);
source.put("lsn", 1234);
final Struct payload = envelope.create(after, source, Instant.now());
return new SourceRecord(
new HashMap<>(), createSourceOffset(), "dummy", createKeySchema(),
createKeyRecord(), envelope.schema(), payload);
}
private SourceRecord createDeleteRecord() {
final Schema recordSchema = SchemaBuilder.struct().field("id", SchemaBuilder.int8()).build();
Envelope envelope = Envelope.defineSchema().withName("dummy.Envelope").withRecord(recordSchema)
.withSource(SchemaBuilder.struct().build()).build();
final Struct before = new Struct(recordSchema);
before.put("id", (byte) 1);
final Struct payload = envelope.delete(before, null, Instant.now());
return new SourceRecord(
new HashMap<>(), createSourceOffset(), "dummy", createKeySchema(),
createKeyRecord(), envelope.schema(), payload);
}
private SourceRecord createDeleteRecordWithNull() {
final Schema recordSchema = SchemaBuilder.struct().field("id", SchemaBuilder.int8()).build();
Envelope.defineSchema().withName("dummy.Envelope").withRecord(recordSchema).withSource(SchemaBuilder.struct().build())
.build();
final Struct before = new Struct(recordSchema);
before.put("id", (byte) 1);
return new SourceRecord(
new HashMap<>(), createSourceOffset(), "dummy", createKeySchema(),
createKeyRecord(), null, null);
}
private SourceRecord createUpdateRecord() {
final Schema recordSchema = SchemaBuilder.struct().field("id", SchemaBuilder.int8()).build();
final Schema sourceSchema = SchemaBuilder.struct().field("lsn", SchemaBuilder.int32()).build();
Envelope envelope = Envelope.defineSchema().withName("dummy.Envelope").withRecord(recordSchema)
.withSource(sourceSchema).build();
final Struct before = new Struct(recordSchema);
final Struct source = new Struct(sourceSchema);
final Struct after = new Struct(recordSchema);
before.put("id", (byte) 1);
after.put("id", (byte) 2);
source.put("lsn", 1234);
final Struct payload = envelope.update(before, after, source, Instant.now());
return new SourceRecord(
new HashMap<>(), createSourceOffset(), "dummy", createKeySchema(),
createKeyRecord(), envelope.schema(), payload);
}
private SourceRecord createDdlSQLRecord() {
final Schema recordSchema = SchemaBuilder.struct().field("ddl", SchemaBuilder.string()).build();
Envelope.defineSchema().withName("dummy.Envelope").withRecord(recordSchema).withSource(SchemaBuilder.struct().build())
.build();
final Struct recordValue = new Struct(recordSchema);
recordValue.put("ddl", "SET character_set_server=utf8, collation_server=utf8_bin");
return new SourceRecord(
new HashMap<>(), createSourceOffset(), "dummy", null,
null, recordValue.schema(), recordValue);
}
private SourceRecord createUnknownUnnamedSchemaRecord() {
final Schema recordSchema = SchemaBuilder.struct().field("id", SchemaBuilder.int8()).build();
final Struct before = new Struct(recordSchema);
before.put("id", (byte) 1);
return new SourceRecord(new HashMap<>(), new HashMap<>(), "dummy", recordSchema, before);
}
private SourceRecord createStringRecord() {
final Schema recordSchema = Schema.STRING_SCHEMA;
return new SourceRecord(new HashMap<>(), createSourceOffset(), "dummy", recordSchema, "test_record");
}
private HashMap<String, ?> createSourceOffset() {
final HashMap<String, Integer> sourceOffsets = new HashMap<>();
sourceOffsets.put("pos", 111);
return sourceOffsets;
}
private Schema createKeySchema() {
return SchemaBuilder.struct().field("id", SchemaBuilder.int32().build());
}
private Struct createKeyRecord() {
final Struct key = new Struct(createKeySchema());
key.put("id", 12345);
return key;
}
private void assertSourceMetadata(final Message inMessage) {
@SuppressWarnings("unchecked")
final Map<String, Object> source = inMessage.getHeader(DebeziumConstants.HEADER_SOURCE_METADATA, Map.class);
assertEquals(1234, source.get("lsn"));
}
private void assertSchema(final Schema schema) {
assertNotNull(schema);
assertFalse(schema.fields().isEmpty());
}
}
| DebeziumEndpointTest |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldContain.java | {
"start": 2245,
"end": 5281
} | class ____ the failed assertion.
* @param actual the actual value in the failed assertion.
* @param expected values expected to be in {@code actual}.
* @param notFound the values in {@code expected} not found in {@code actual}.
* @param comparisonStrategy the {@link ComparisonStrategy} used to evaluate assertion.
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldContain(Class<?> clazz, Object actual, Object expected, Object notFound,
ComparisonStrategy comparisonStrategy) {
GroupTypeDescription groupTypeDescription = getGroupTypeDescription(clazz);
return new ShouldContain(actual, expected, notFound, comparisonStrategy, groupTypeDescription);
}
/**
* Creates a new <code>{@link ShouldContain}</code>.
* @param actual the actual value in the failed assertion.
* @param expected values expected to be in {@code actual}.
* @param notFound the values in {@code expected} not found in {@code actual}.
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldContain(Object actual, Object expected, Object notFound) {
return shouldContain(actual, expected, notFound, StandardComparisonStrategy.instance());
}
public static ErrorMessageFactory directoryShouldContain(File actual, List<File> directoryContent, String filterDescription) {
return new ShouldContain(actual, toFileNames(directoryContent), filterDescription);
}
private static List<String> toFileNames(List<File> files) {
return files.stream()
.map(File::getName)
.collect(toList());
}
public static ErrorMessageFactory directoryShouldContain(Path actual, List<Path> directoryContent, String filterDescription) {
return new ShouldContain(actual, toPathNames(directoryContent), filterDescription);
}
private static List<String> toPathNames(List<Path> files) {
return files.stream()
.map(Path::toString)
.collect(toList());
}
private ShouldContain(Object actual, Object expected, Object notFound, ComparisonStrategy comparisonStrategy,
GroupTypeDescription groupTypeDescription) {
super("%nExpecting " + groupTypeDescription.getGroupTypeName()
+ ":%n %s%nto contain:%n %s%nbut could not find the following " + groupTypeDescription.getElementTypeName()
+ ":%n %s%n%s", actual, expected, notFound,
comparisonStrategy);
}
private ShouldContain(Object actual, List<String> directoryContent, String filterDescription) {
// not passing directoryContent and filterDescription as parameter to avoid AssertJ default String formatting
super("%nExpecting directory:%n" +
" %s%n" +
"to contain at least one file matching " + escapePercent(filterDescription) + " but there was none.%n" +
"The directory content was:%n " + escapePercent(directoryContent.toString()),
actual);
}
}
| in |
java | apache__camel | components/camel-openstack/src/generated/java/org/apache/camel/component/openstack/glance/GlanceEndpointConfigurer.java | {
"start": 743,
"end": 3635
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
GlanceEndpoint target = (GlanceEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": target.setApiVersion(property(camelContext, java.lang.String.class, value)); return true;
case "config": target.setConfig(property(camelContext, org.openstack4j.core.transport.Config.class, value)); return true;
case "domain": target.setDomain(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "operation": target.setOperation(property(camelContext, java.lang.String.class, value)); return true;
case "password": target.setPassword(property(camelContext, java.lang.String.class, value)); return true;
case "project": target.setProject(property(camelContext, java.lang.String.class, value)); return true;
case "username": target.setUsername(property(camelContext, java.lang.String.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": return java.lang.String.class;
case "config": return org.openstack4j.core.transport.Config.class;
case "domain": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "operation": return java.lang.String.class;
case "password": return java.lang.String.class;
case "project": return java.lang.String.class;
case "username": return java.lang.String.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
GlanceEndpoint target = (GlanceEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "apiversion":
case "apiVersion": return target.getApiVersion();
case "config": return target.getConfig();
case "domain": return target.getDomain();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "operation": return target.getOperation();
case "password": return target.getPassword();
case "project": return target.getProject();
case "username": return target.getUsername();
default: return null;
}
}
}
| GlanceEndpointConfigurer |
java | square__retrofit | retrofit-adapters/rxjava/src/test/java/retrofit2/adapter/rxjava/CancelDisposeTest.java | {
"start": 1165,
"end": 2247
} | interface ____ {
@GET("/")
Observable<String> go();
}
private final OkHttpClient client = new OkHttpClient();
private Service service;
@Before
public void setUp() {
Retrofit retrofit =
new Retrofit.Builder()
.baseUrl(server.url("/"))
.addConverterFactory(new StringConverterFactory())
.addCallAdapterFactory(RxJavaCallAdapterFactory.createAsync())
.callFactory(client)
.build();
service = retrofit.create(Service.class);
}
@Test
public void disposeCancelsCall() {
Subscription subscription = service.go().subscribe();
List<Call> calls = client.dispatcher().runningCalls();
assertEquals(1, calls.size());
subscription.unsubscribe();
assertTrue(calls.get(0).isCanceled());
}
@Test
public void cancelDoesNotDispose() {
Subscription subscription = service.go().subscribe();
List<Call> calls = client.dispatcher().runningCalls();
assertEquals(1, calls.size());
calls.get(0).cancel();
assertFalse(subscription.isUnsubscribed());
}
}
| Service |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/oracle/ast/stmt/OracleConstraint.java | {
"start": 1460,
"end": 1544
} | enum ____ {
DEFERRED, IMMEDIATE
}
OracleConstraint clone();
}
| Initially |
java | quarkusio__quarkus | integration-tests/oidc/src/test/java/io/quarkus/it/keycloak/ClassLevelTestSecurityLazyAuthTest.java | {
"start": 496,
"end": 1461
} | class ____ {
@Test
public void testClassAnnotationWithDummyUser() {
RestAssured.when().get("test-security").then().body(is("user1"));
}
@Test
@OidcSecurity(claims = {
@Claim(key = "email", value = "user@gmail.com")
})
public void testClassAnnotationWithOidcSecurity() {
// verify information form security context
RestAssured.when().get("test-security").then().body(is("user1"));
// verify information from JWT
RestAssured.when().get("test-security-jwt").then().body(is("user1:viewer:user@gmail.com"));
}
@Test
@TestSecurity(user = "userJwt", roles = "viewer")
@OidcSecurity(claims = {
@Claim(key = "email", value = "user@gmail.com")
})
public void testMethodLevelTestSecurityOverridesClassAnnotation() {
RestAssured.when().get("test-security-jwt").then().body(is("userJwt:viewer:user@gmail.com"));
}
}
| ClassLevelTestSecurityLazyAuthTest |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/cluster/api/sync/NodeSelectionAclCommands.java | {
"start": 1232,
"end": 5859
} | interface ____<K, V> {
/**
* The command shows the available ACL categories if called without arguments.
*
* @return List<AclCategory> a list of ACL categories or
*/
Executions<Set<AclCategory>> aclCat();
/**
* The command shows all the Redis commands in the specified category.
*
* @param category the specified category
* @return List<CommandType> a list of commands inside a given category
*/
Executions<Set<CommandType>> aclCat(AclCategory category);
/**
* Delete all the specified ACL users and terminate all the connections that are authenticated with such users.
*
* @param usernames the specified usernames
* @return Long The number of users that were deleted
*/
Executions<Long> aclDeluser(String... usernames);
/**
* Simulate the execution of a given command by a given user.
*
* @param username the specified username
* @param command the specified command
* @param args the specified args of command
* @return String reply: OK on success.
* @since 6.2
*/
Executions<String> aclDryRun(String username, String command, String... args);
/**
* Simulate the execution of a given command by a given user.
*
* @param username the specified username
* @param command the specified command to inspect
* @return String reply: OK on success.
* @since 6.2
*/
Executions<String> aclDryRun(String username, RedisCommand<K, V, ?> command);
/**
* The command generates a password.
*
* @return String bulk-string-reply 64 bytes string password representing 256 bits of pseudorandom data.
*/
Executions<String> aclGenpass();
/**
* The command generates a password.
*
* @param bits amount of bits
* @return String bulk-string-reply N/4 bytes string password representing N bits of pseudorandom data.
*/
Executions<String> aclGenpass(int bits);
/**
* The command returns all the rules defined for an existing ACL user.
*
* @param username the specified username
* @return Map<String, Object> a map of ACL rule definitions for the user.
*/
Executions<List<Object>> aclGetuser(String username);
/**
* The command shows the currently active ACL rules in the Redis server.
*
* @return List<String> a list of strings.
*/
Executions<List<String>> aclList();
/**
* When Redis is configured to use an ACL file (with the aclfile configuration option), this command will reload the ACLs
* from the file, replacing all the current ACL rules with the ones defined in the file.
*
* @return String simple-string-reply OK or error message.
*/
Executions<String> aclLoad();
/**
* The command shows a list of recent ACL security events.
*
* @return List<Map<K,Object>> list of security events.
*/
Executions<List<Map<String, Object>>> aclLog();
/**
* The command shows a list of recent ACL security events.
*
* @param count max count of events
* @return List<Map<K, Object>> list of security events.
*/
Executions<List<Map<String, Object>>> aclLog(int count);
/**
* The command clears ACL security events.
*
* @return String simple-string-reply OK if the security log was cleared.
*/
Executions<String> aclLogReset();
/**
* When Redis is configured to use an ACL file (with the aclfile configuration option), this command will save the currently
* defined ACLs from the server memory to the ACL file.
*
* @return String simple-string-reply OK or error message.
*/
Executions<String> aclSave();
/**
* Create an ACL user with the specified rules or modify the rules of an existing user.
*
* @param username the specified username
* @param setuserArgs rules
* @return String simple-string-reply OK or error message.
*/
Executions<String> aclSetuser(String username, AclSetuserArgs setuserArgs);
/**
* The command shows a list of all the usernames of the currently configured users in the Redis ACL system.
*
* @return List<K> a list of usernames.
*/
Executions<List<String>> aclUsers();
/**
* The command shows a list of all the usernames of the currently configured users in the Redis ACL system.
*
* @return K bulk-string-reply the username of the current connection.
*/
Executions<String> aclWhoami();
}
| NodeSelectionAclCommands |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java | {
"start": 1583,
"end": 11800
} | enum ____ implements LongBitFormat.Enum {
MODE(null, 16),
GROUP(MODE.BITS, 24),
USER(GROUP.BITS, 24);
final LongBitFormat BITS;
private PermissionStatusFormat(LongBitFormat previous, int length) {
BITS = new LongBitFormat(name(), previous, length, 0);
}
static String getUser(long permission) {
final int n = (int)USER.BITS.retrieve(permission);
String s = SerialNumberManager.USER.getString(n);
assert s != null;
return s;
}
static String getGroup(long permission) {
final int n = (int)GROUP.BITS.retrieve(permission);
return SerialNumberManager.GROUP.getString(n);
}
static short getMode(long permission) {
return (short)MODE.BITS.retrieve(permission);
}
/** Encode the {@link PermissionStatus} to a long. */
static long toLong(PermissionStatus ps) {
long permission = 0L;
final int user = SerialNumberManager.USER.getSerialNumber(
ps.getUserName());
assert user != 0;
permission = USER.BITS.combine(user, permission);
// ideally should assert on group but inodes are created with null
// group and then updated only when added to a directory.
final int group = SerialNumberManager.GROUP.getSerialNumber(
ps.getGroupName());
permission = GROUP.BITS.combine(group, permission);
final int mode = ps.getPermission().toShort();
permission = MODE.BITS.combine(mode, permission);
return permission;
}
static PermissionStatus toPermissionStatus(long id,
SerialNumberManager.StringTable stringTable) {
int uid = (int)USER.BITS.retrieve(id);
int gid = (int)GROUP.BITS.retrieve(id);
return new PermissionStatus(
SerialNumberManager.USER.getString(uid, stringTable),
SerialNumberManager.GROUP.getString(gid, stringTable),
new FsPermission(getMode(id)));
}
@Override
public int getLength() {
return BITS.getLength();
}
}
/** The inode id. */
final private long id;
/**
* The inode name is in java UTF8 encoding;
* The name in HdfsFileStatus should keep the same encoding as this.
* if this encoding is changed, implicitly getFileInfo and listStatus in
* clientProtocol are changed; The decoding at the client
* side should change accordingly.
*/
private byte[] name = null;
/**
* Permission encoded using {@link PermissionStatusFormat}.
* Codes other than {@link #clonePermissionStatus(INodeWithAdditionalFields)}
* and {@link #updatePermissionStatus(PermissionStatusFormat, long)}
* should not modify it.
*/
private long permission = 0L;
/** The last modification time*/
private long modificationTime = 0L;
/** The last access time*/
private long accessTime = 0L;
/** For implementing {@link LinkedElement}. */
private LinkedElement next = null;
/** An array {@link Feature}s. */
private static final Feature[] EMPTY_FEATURE = new Feature[0];
protected Feature[] features = EMPTY_FEATURE;
private INodeWithAdditionalFields(INode parent, long id, byte[] name,
long permission, long modificationTime, long accessTime) {
super(parent);
this.id = id;
this.name = name;
this.permission = permission;
this.modificationTime = modificationTime;
this.accessTime = accessTime;
}
INodeWithAdditionalFields(long id, byte[] name, PermissionStatus permissions,
long modificationTime, long accessTime) {
this(null, id, name, PermissionStatusFormat.toLong(permissions),
modificationTime, accessTime);
}
/** @param other Other node to be copied */
INodeWithAdditionalFields(INodeWithAdditionalFields other) {
this(other.getParentReference() != null ? other.getParentReference()
: other.getParent(), other.getId(), other.getLocalNameBytes(),
other.permission, other.modificationTime, other.accessTime);
}
@Override
public void setNext(LinkedElement next) {
this.next = next;
}
@Override
public LinkedElement getNext() {
return next;
}
/** Get inode id */
@Override
public final long getId() {
return this.id;
}
@Override
public final byte[] getLocalNameBytes() {
return name;
}
@Override
public final void setLocalName(byte[] name) {
this.name = name;
}
/** Clone the {@link PermissionStatus}. */
final void clonePermissionStatus(INodeWithAdditionalFields that) {
this.permission = that.permission;
}
@Override
public final PermissionStatus getPermissionStatus(int snapshotId) {
return new PermissionStatus(getUserName(snapshotId), getGroupName(snapshotId),
getFsPermission(snapshotId));
}
private final void updatePermissionStatus(PermissionStatusFormat f, long n) {
this.permission = f.BITS.combine(n, permission);
}
@Override
final String getUserName(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getUserName();
}
return PermissionStatusFormat.getUser(permission);
}
@Override
final void setUser(String user) {
int n = SerialNumberManager.USER.getSerialNumber(user);
updatePermissionStatus(PermissionStatusFormat.USER, n);
}
@Override
final String getGroupName(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getGroupName();
}
return PermissionStatusFormat.getGroup(permission);
}
@Override
final void setGroup(String group) {
int n = SerialNumberManager.GROUP.getSerialNumber(group);
updatePermissionStatus(PermissionStatusFormat.GROUP, n);
}
@Override
final FsPermission getFsPermission(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getFsPermission();
}
return new FsPermission(getFsPermissionShort());
}
@Override
public final short getFsPermissionShort() {
return PermissionStatusFormat.getMode(permission);
}
@Override
void setPermission(FsPermission permission) {
final short mode = permission.toShort();
updatePermissionStatus(PermissionStatusFormat.MODE, mode);
}
@Override
public long getPermissionLong() {
return permission;
}
@Override
public final AclFeature getAclFeature(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getAclFeature();
}
return getFeature(AclFeature.class);
}
@Override
final long getModificationTime(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getModificationTime();
}
return this.modificationTime;
}
/** Update modification time if it is larger than the current value. */
@Override
public final INode updateModificationTime(long mtime, int latestSnapshotId) {
Preconditions.checkState(isDirectory());
if (mtime <= modificationTime) {
return this;
}
return setModificationTime(mtime, latestSnapshotId);
}
final void cloneModificationTime(INodeWithAdditionalFields that) {
this.modificationTime = that.modificationTime;
}
@Override
public final void setModificationTime(long modificationTime) {
this.modificationTime = modificationTime;
}
@Override
final long getAccessTime(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getAccessTime();
}
return accessTime;
}
/**
* Set last access time of inode.
*/
@Override
public final void setAccessTime(long accessTime) {
this.accessTime = accessTime;
}
protected void addFeature(Feature f) {
int size = features.length;
Feature[] arr = new Feature[size + 1];
if (size != 0) {
System.arraycopy(features, 0, arr, 0, size);
}
arr[size] = f;
features = arr;
}
protected void removeFeature(Feature f) {
int size = features.length;
if (size == 0) {
throwFeatureNotFoundException(f);
}
if (size == 1) {
if (features[0] != f) {
throwFeatureNotFoundException(f);
}
features = EMPTY_FEATURE;
return;
}
Feature[] arr = new Feature[size - 1];
int j = 0;
boolean overflow = false;
for (Feature f1 : features) {
if (f1 != f) {
if (j == size - 1) {
overflow = true;
break;
} else {
arr[j++] = f1;
}
}
}
if (overflow || j != size - 1) {
throwFeatureNotFoundException(f);
}
features = arr;
}
private void throwFeatureNotFoundException(Feature f) {
throw new IllegalStateException(
"Feature " + f.getClass().getSimpleName() + " not found.");
}
protected <T extends Feature> T getFeature(Class<? extends Feature> clazz) {
Preconditions.checkArgument(clazz != null);
final int size = features.length;
for (int i=0; i < size; i++) {
Feature f = features[i];
if (clazz.isAssignableFrom(f.getClass())) {
@SuppressWarnings("unchecked")
T ret = (T) f;
return ret;
}
}
return null;
}
public void removeAclFeature() {
AclFeature f = getAclFeature();
Preconditions.checkNotNull(f);
removeFeature(f);
AclStorage.removeAclFeature(f);
}
public void addAclFeature(AclFeature f) {
AclFeature f1 = getAclFeature();
if (f1 != null)
throw new IllegalStateException("Duplicated ACLFeature");
addFeature(AclStorage.addAclFeature(f));
}
@Override
XAttrFeature getXAttrFeature(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getXAttrFeature();
}
return getFeature(XAttrFeature.class);
}
@Override
public void removeXAttrFeature() {
XAttrFeature f = getXAttrFeature();
Preconditions.checkNotNull(f);
removeFeature(f);
}
@Override
public void addXAttrFeature(XAttrFeature f) {
XAttrFeature f1 = getXAttrFeature();
Preconditions.checkState(f1 == null, "Duplicated XAttrFeature");
addFeature(f);
}
public final Feature[] getFeatures() {
return features;
}
}
| PermissionStatusFormat |
java | apache__kafka | core/src/main/java/kafka/server/QuotaFactory.java | {
"start": 1506,
"end": 5774
} | class ____ {
public static final ReplicaQuota UNBOUNDED_QUOTA = new ReplicaQuota() {
@Override
public boolean isThrottled(TopicPartition topicPartition) {
return false;
}
@Override
public boolean isQuotaExceeded() {
return false;
}
@Override
public void record(long value) {
// No-op
}
};
public record QuotaManagers(ClientQuotaManager fetch,
ClientQuotaManager produce,
ClientRequestQuotaManager request,
ControllerMutationQuotaManager controllerMutation,
ReplicationQuotaManager leader,
ReplicationQuotaManager follower,
ReplicationQuotaManager alterLogDirs,
Optional<Plugin<ClientQuotaCallback>> clientQuotaCallbackPlugin) {
public void shutdown() {
fetch.shutdown();
produce.shutdown();
request.shutdown();
controllerMutation.shutdown();
clientQuotaCallbackPlugin.ifPresent(plugin -> Utils.closeQuietly(plugin, "client quota callback plugin"));
}
}
public static QuotaManagers instantiate(
KafkaConfig cfg,
Metrics metrics,
Time time,
String threadNamePrefix,
String role
) {
Optional<Plugin<ClientQuotaCallback>> clientQuotaCallbackPlugin = createClientQuotaCallback(cfg, metrics, role);
return new QuotaManagers(
new ClientQuotaManager(clientConfig(cfg), metrics, QuotaType.FETCH, time, threadNamePrefix, clientQuotaCallbackPlugin),
new ClientQuotaManager(clientConfig(cfg), metrics, QuotaType.PRODUCE, time, threadNamePrefix, clientQuotaCallbackPlugin),
new ClientRequestQuotaManager(clientConfig(cfg), metrics, time, threadNamePrefix, clientQuotaCallbackPlugin),
new ControllerMutationQuotaManager(clientControllerMutationConfig(cfg), metrics, time, threadNamePrefix, clientQuotaCallbackPlugin),
new ReplicationQuotaManager(replicationConfig(cfg), metrics, QuotaType.LEADER_REPLICATION, time),
new ReplicationQuotaManager(replicationConfig(cfg), metrics, QuotaType.FOLLOWER_REPLICATION, time),
new ReplicationQuotaManager(alterLogDirsReplicationConfig(cfg), metrics, QuotaType.ALTER_LOG_DIRS_REPLICATION, time),
clientQuotaCallbackPlugin
);
}
private static Optional<Plugin<ClientQuotaCallback>> createClientQuotaCallback(
KafkaConfig cfg,
Metrics metrics,
String role
) {
ClientQuotaCallback clientQuotaCallback = cfg.getConfiguredInstance(
QuotaConfig.CLIENT_QUOTA_CALLBACK_CLASS_CONFIG, ClientQuotaCallback.class);
return clientQuotaCallback == null ? Optional.empty() : Optional.of(Plugin.wrapInstance(
clientQuotaCallback,
metrics,
QuotaConfig.CLIENT_QUOTA_CALLBACK_CLASS_CONFIG,
"role", role
));
}
private static ClientQuotaManagerConfig clientConfig(KafkaConfig cfg) {
return new ClientQuotaManagerConfig(
cfg.quotaConfig().numQuotaSamples(),
cfg.quotaConfig().quotaWindowSizeSeconds()
);
}
private static ClientQuotaManagerConfig clientControllerMutationConfig(KafkaConfig cfg) {
return new ClientQuotaManagerConfig(
cfg.quotaConfig().numControllerQuotaSamples(),
cfg.quotaConfig().controllerQuotaWindowSizeSeconds()
);
}
private static ReplicationQuotaManagerConfig replicationConfig(KafkaConfig cfg) {
return new ReplicationQuotaManagerConfig(
cfg.quotaConfig().numReplicationQuotaSamples(),
cfg.quotaConfig().replicationQuotaWindowSizeSeconds()
);
}
private static ReplicationQuotaManagerConfig alterLogDirsReplicationConfig(KafkaConfig cfg) {
return new ReplicationQuotaManagerConfig(
cfg.quotaConfig().numAlterLogDirsReplicationQuotaSamples(),
cfg.quotaConfig().alterLogDirsReplicationQuotaWindowSizeSeconds()
);
}
}
| QuotaFactory |
java | micronaut-projects__micronaut-core | inject-java/src/test/groovy/io/micronaut/inject/qualifiers/replaces/defaultimpl/F.java | {
"start": 767,
"end": 793
} | interface ____ extends E {
}
| F |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java | {
"start": 2534,
"end": 2756
} | class ____ extends Plugin implements BarActionPlugin {}
assertThat(pluginIntrospector.interfaces(FooPlugin.class), contains("ActionPlugin"));
}
public void testInterfacesInterfaceExtends2() {
| FooPlugin |
java | apache__dubbo | dubbo-metrics/dubbo-metrics-api/src/main/java/org/apache/dubbo/metrics/aggregate/TimeWindowCounter.java | {
"start": 1019,
"end": 2344
} | class ____ {
private final LongAdderSlidingWindow slidingWindow;
public TimeWindowCounter(int bucketNum, long timeWindowSeconds) {
this.slidingWindow = new LongAdderSlidingWindow(bucketNum, TimeUnit.SECONDS.toMillis(timeWindowSeconds));
}
public double get() {
double result = 0.0;
List<LongAdder> windows = this.slidingWindow.values();
for (LongAdder window : windows) {
result += window.sum();
}
return result;
}
public long bucketLivedSeconds() {
return TimeUnit.MILLISECONDS.toSeconds(
this.slidingWindow.values().size() * this.slidingWindow.getPaneIntervalInMs());
}
public long bucketLivedMillSeconds() {
return this.slidingWindow.getIntervalInMs()
- (System.currentTimeMillis() - this.slidingWindow.currentPane().getEndInMs());
}
public void increment() {
this.increment(1L);
}
public void increment(Long step) {
this.slidingWindow.currentPane().getValue().add(step);
}
public void decrement() {
this.decrement(1L);
}
public void decrement(Long step) {
this.slidingWindow.currentPane().getValue().add(-step);
}
/**
* Sliding window of type LongAdder.
*/
private static | TimeWindowCounter |
java | elastic__elasticsearch | libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/core/RefCountingRunnableBridge.java | {
"start": 1689,
"end": 2258
} | class ____ extends StableBridgeAPI.ProxyInternal<RefCountingRunnable> implements RefCountingRunnableBridge {
private ProxyInternal(final RefCountingRunnable delegate) {
super(delegate);
}
@Override
public void close() {
toInternal().close();
}
@Override
public ReleasableBridge acquire() {
@SuppressWarnings("resource")
final Releasable releasable = toInternal().acquire();
return ReleasableBridge.fromInternal(releasable);
}
}
}
| ProxyInternal |
java | quarkusio__quarkus | extensions/tls-registry/cli/src/main/java/io/quarkus/tls/cli/letsencrypt/LetsEncryptHelpers.java | {
"start": 1181,
"end": 12004
} | class ____ {
static System.Logger LOGGER = System.getLogger("lets-encrypt");
public static void writePrivateKeyAndCertificateChainsAsPem(PrivateKey pk, X509Certificate[] chain, File privateKeyFile,
File certificateChainFile) throws Exception {
if (pk == null) {
throw new IllegalArgumentException("The private key cannot be null");
}
if (chain == null || chain.length == 0) {
throw new IllegalArgumentException("The certificate chain cannot be null or empty");
}
CertificateUtils.writePrivateKeyToPem(pk, null, privateKeyFile);
if (chain.length == 1) {
CertificateUtils.writeCertificateToPEM(chain[0], certificateChainFile);
return;
}
// For some reason the method from CertificateUtils distinguishes the first certificate and the rest of the chain
X509Certificate[] restOfTheChain = new X509Certificate[chain.length - 1];
System.arraycopy(chain, 1, restOfTheChain, 0, chain.length - 1);
CertificateUtils.writeCertificateToPEM(chain[0], certificateChainFile, restOfTheChain);
}
public static X509Certificate loadCertificateFromPEM(String pemFilePath) throws IOException, CertificateException {
try (PemReader pemReader = new PemReader(new FileReader(pemFilePath))) {
PemObject pemObject = pemReader.readPemObject();
if (pemObject == null) {
throw new IOException("Invalid PEM file: No PEM content found.");
}
byte[] content = pemObject.getContent();
CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509");
return (X509Certificate) certificateFactory.generateCertificate(new ByteArrayInputStream(content));
}
}
public static String createAccount(AcmeClient acmeClient,
String letsEncryptPath,
boolean staging,
String contactEmail) {
LOGGER.log(INFO, "\uD83D\uDD35 Creating {0} Let's Encrypt account", (staging ? "staging" : "production"));
AcmeAccount acmeAccount = AcmeAccount.builder()
.setTermsOfServiceAgreed(true)
.setServerUrl("https://acme-v02.api.letsencrypt.org/directory") // TODO Should this be configurable?
.setStagingServerUrl("https://acme-staging-v02.api.letsencrypt.org/directory") // TODO Should this be configurable?
.setContactUrls(new String[] { "mailto:" + contactEmail })
.build();
try {
if (!acmeClient.createAccount(acmeAccount, staging)) {
LOGGER.log(INFO, "\uD83D\uDD35 {0} Let's Encrypt account {1} already exists",
(staging ? "Staging" : "Production"),
contactEmail);
} else {
LOGGER.log(INFO, "\uD83D\uDD35 {0} Let's Encrypt account {1} has been created",
(staging ? "Staging" : "Production"),
contactEmail);
}
} catch (AcmeException ex) {
LOGGER.log(ERROR, "⚠\uFE0F Failed to create Let's Encrypt account");
throw new RuntimeException(ex);
}
JsonObject accountJson = convertAccountToJson(acmeAccount);
saveAccount(letsEncryptPath, accountJson);
return accountJson.encode();
}
private static JsonObject convertAccountToJson(AcmeAccount acmeAccount) {
JsonObject json = new JsonObject();
json.put("account-url", acmeAccount.getAccountUrl());
json.put("contact-url", acmeAccount.getContactUrls()[0]);
if (acmeAccount.getPrivateKey() != null) {
json.put("private-key", new String(Base64.getEncoder().encode(acmeAccount.getPrivateKey().getEncoded()),
StandardCharsets.US_ASCII));
}
if (acmeAccount.getCertificate() != null) {
try {
json.put("certificate", new String(Base64.getEncoder().encode(acmeAccount.getCertificate().getEncoded()),
StandardCharsets.US_ASCII));
} catch (CertificateEncodingException ex) {
LOGGER.log(INFO, "⚠\uFE0F Failed to get encoded certificate data");
throw new RuntimeException(ex);
}
}
if (acmeAccount.getKeyAlgorithmName() != null) {
json.put("key-algorithm", acmeAccount.getKeyAlgorithmName());
}
json.put("key-size", acmeAccount.getKeySize());
return json;
}
private static void saveAccount(String letsEncryptPath, JsonObject accountJson) {
LOGGER.log(DEBUG, "Saving account to {0}", letsEncryptPath);
// If more than one account must be supported, we can save accounts to unique files in .lets-encrypt/accounts
// and require an account alias/id during operations requiring an account
java.nio.file.Path accountPath = Paths.get(letsEncryptPath + "/account.json");
try {
Files.copy(new ByteArrayInputStream(accountJson.encode().getBytes(StandardCharsets.US_ASCII)), accountPath,
StandardCopyOption.REPLACE_EXISTING);
} catch (IOException ex) {
throw new RuntimeException("Failure to save the account", ex);
}
}
public static void issueCertificate(
AcmeClient acmeClient,
File letsEncryptPath,
boolean staging,
String domain,
File certChainPemLoc,
File privateKeyPemLoc) {
AcmeAccount acmeAccount = getAccount(letsEncryptPath);
X509CertificateChainAndSigningKey certChainAndPrivateKey;
try {
certChainAndPrivateKey = acmeClient.obtainCertificateChain(acmeAccount, staging, domain);
} catch (AcmeException t) {
throw new RuntimeException(t.getMessage());
}
LOGGER.log(INFO, "\uD83D\uDD35 Certificate and private key issued, converting them to PEM files");
try {
LetsEncryptHelpers.writePrivateKeyAndCertificateChainsAsPem(certChainAndPrivateKey.getSigningKey(),
certChainAndPrivateKey.getCertificateChain(), privateKeyPemLoc, certChainPemLoc);
} catch (Exception ex) {
throw new RuntimeException("Failure to copy certificate pem");
}
}
private static AcmeAccount getAccount(File letsEncryptPath) {
LOGGER.log(DEBUG, "Getting account from {0}", letsEncryptPath);
JsonObject json = readAccountJson(letsEncryptPath);
AcmeAccount.Builder builder = AcmeAccount.builder().setTermsOfServiceAgreed(true)
.setServerUrl("https://acme-v02.api.letsencrypt.org/directory")
.setStagingServerUrl("https://acme-staging-v02.api.letsencrypt.org/directory");
String keyAlgorithm = json.getString("key-algorithm");
builder.setKeyAlgorithmName(keyAlgorithm);
builder.setKeySize(json.getInteger("key-size"));
if (json.containsKey("private-key") && json.containsKey("certificate")) {
PrivateKey privateKey = getPrivateKey(json.getString("private-key"), keyAlgorithm);
X509Certificate certificate = getCertificate(json.getString("certificate"));
builder.setKey(certificate, privateKey);
}
AcmeAccount acmeAccount = builder.build();
acmeAccount.setContactUrls(new String[] { json.getString("contact-url") });
acmeAccount.setAccountUrl(json.getString("account-url"));
return acmeAccount;
}
private static JsonObject readAccountJson(File letsEncryptPath) {
LOGGER.log(DEBUG, "Reading account information from {0}", letsEncryptPath);
java.nio.file.Path accountPath = Paths.get(letsEncryptPath + "/account.json");
try (FileInputStream fis = new FileInputStream(accountPath.toString())) {
return new JsonObject(new String(fis.readAllBytes(), StandardCharsets.US_ASCII));
} catch (IOException e) {
throw new RuntimeException("Unable to read the account file, you must create account first");
}
}
private static X509Certificate getCertificate(String encodedCert) {
try {
byte[] encodedBytes = Base64.getDecoder().decode(encodedCert);
return (X509Certificate) CertificateFactory.getInstance("X.509")
.generateCertificate(new ByteArrayInputStream(encodedBytes));
} catch (Exception ex) {
throw new RuntimeException("Failure to create a certificate", ex);
}
}
private static PrivateKey getPrivateKey(String encodedKey, String keyAlgorithm) {
try {
KeyFactory f = KeyFactory.getInstance((keyAlgorithm == null || "RSA".equals(keyAlgorithm) ? "RSA" : "EC"));
byte[] encodedBytes = Base64.getDecoder().decode(encodedKey);
PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec(encodedBytes);
return f.generatePrivate(spec);
} catch (Exception ex) {
throw new RuntimeException("Failure to create a private key", ex);
}
}
public static void renewCertificate(AcmeClient acmeClient,
File letsEncryptPath,
boolean staging,
String domain,
File certChainPemLoc,
File privateKeyPemLoc) {
LOGGER.log(INFO, "\uD83D\uDD35 Renewing {0} Let's Encrypt certificate chain and private key",
(staging ? "staging" : "production"));
issueCertificate(acmeClient, letsEncryptPath, staging, domain, certChainPemLoc, privateKeyPemLoc);
}
public static void deactivateAccount(AcmeClient acmeClient, File letsEncryptPath, boolean staging) throws IOException {
AcmeAccount acmeAccount = getAccount(letsEncryptPath);
LOGGER.log(INFO, "Deactivating {0} Let's Encrypt account", (staging ? "staging" : "production"));
acmeClient.deactivateAccount(acmeAccount, staging);
LOGGER.log(INFO, "Removing account file from {0}", letsEncryptPath);
java.nio.file.Path accountPath = Paths.get(letsEncryptPath + "/account.json");
Files.deleteIfExists(accountPath);
}
public static void adjustPermissions(File certFile, File keyFile) {
if (!certFile.setReadable(true, false)) {
LOGGER.log(ERROR, "Failed to set certificate file readable");
}
if (!certFile.setWritable(true, true)) {
LOGGER.log(ERROR, "Failed to set certificate file as not writable");
}
if (!keyFile.setReadable(true, false)) {
LOGGER.log(ERROR, "Failed to set key file as readable");
}
if (!keyFile.setWritable(true, true)) {
LOGGER.log(ERROR, "Failed to set key file as not writable");
}
}
}
| LetsEncryptHelpers |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java | {
"start": 36952,
"end": 51231
} | class ____ another thread that
// require acquiring a lock. This leads to deadlock. Instead, just copy the info we need and process
// the updates after unlocking.
connectorConfigUpdatesCopy.set(connectorConfigUpdates);
connectorConfigUpdates = new HashSet<>();
}
if (!connectorTargetStateChanges.isEmpty()) {
// Similarly for target state changes which can cause connectors to be restarted
connectorTargetStateChangesCopy.set(connectorTargetStateChanges);
connectorTargetStateChanges = new HashSet<>();
}
if (!taskConfigUpdates.isEmpty()) {
// Similarly for task config updates
taskConfigUpdatesCopy.set(taskConfigUpdates);
taskConfigUpdates = new HashSet<>();
}
} else {
log.trace("Skipping config updates with incremental cooperative rebalancing "
+ "since no config rebalance is required "
+ "and there are no connector config, task config, or target state changes pending");
}
return retValue;
}
private void processConnectorConfigUpdates(Set<String> connectorConfigUpdates) {
// If we only have connector config updates, we can just bounce the updated connectors that are
// currently assigned to this worker.
Set<String> localConnectors = assignment == null ? Set.of() : new HashSet<>(assignment.connectors());
Collection<Callable<Void>> connectorsToStart = new ArrayList<>();
log.trace("Processing connector config updates; "
+ "currently-owned connectors are {}, and to-be-updated connectors are {}",
localConnectors,
connectorConfigUpdates);
for (String connectorName : connectorConfigUpdates) {
if (!localConnectors.contains(connectorName)) {
log.trace("Skipping config update for connector {} as it is not owned by this worker",
connectorName);
continue;
}
boolean remains = configState.contains(connectorName);
log.info("Handling connector-only config update by {} connector {}",
remains ? "restarting" : "stopping", connectorName);
try (TickThreadStage stage = new TickThreadStage("stopping connector " + connectorName)) {
worker.stopAndAwaitConnector(connectorName);
}
// The update may be a deletion, so verify we actually need to restart the connector
if (remains) {
connectorsToStart.add(getConnectorStartingCallable(connectorName));
}
}
String stageDescription = "restarting " + connectorsToStart.size() + " reconfigured connectors";
startAndStop(connectorsToStart, stageDescription);
}
private void processTargetStateChanges(Set<String> connectorTargetStateChanges) {
log.trace("Processing target state updates; "
+ "currently-known connectors are {}, and to-be-updated connectors are {}",
configState.connectors(), connectorTargetStateChanges);
for (String connector : connectorTargetStateChanges) {
TargetState targetState = configState.targetState(connector);
if (!configState.connectors().contains(connector)) {
log.debug("Received target state change for unknown connector: {}", connector);
continue;
}
// we must propagate the state change to the worker so that the connector's
// tasks can transition to the new target state
worker.setTargetState(connector, targetState, (error, newState) -> {
if (error != null) {
log.error("Failed to transition connector to target state", error);
return;
}
// additionally, if the worker is running the connector itself, then we need to
// request reconfiguration to ensure that config changes while paused take effect
if (newState == TargetState.STARTED) {
requestTaskReconfiguration(connector);
}
});
}
}
private void processTaskConfigUpdatesWithIncrementalCooperative(Set<ConnectorTaskId> taskConfigUpdates) {
Set<ConnectorTaskId> localTasks = assignment == null
? Set.of()
: new HashSet<>(assignment.tasks());
log.trace("Processing task config updates with incremental cooperative rebalance protocol; "
+ "currently-owned tasks are {}, and to-be-updated tasks are {}",
localTasks, taskConfigUpdates);
Set<String> connectorsWhoseTasksToStop = taskConfigUpdates.stream()
.map(ConnectorTaskId::connector).collect(Collectors.toSet());
stopReconfiguredTasks(connectorsWhoseTasksToStop);
}
private void stopReconfiguredTasks(Set<String> connectors) {
Set<ConnectorTaskId> localTasks = assignment == null
? Set.of()
: new HashSet<>(assignment.tasks());
List<ConnectorTaskId> tasksToStop = localTasks.stream()
.filter(taskId -> connectors.contains(taskId.connector()))
.collect(Collectors.toList());
if (tasksToStop.isEmpty()) {
// The rest of the method would essentially be a no-op so this isn't strictly necessary,
// but it prevents an unnecessary log message from being emitted
return;
}
log.info("Handling task config update by stopping tasks {}, which will be restarted after rebalance if still assigned to this worker", tasksToStop);
try (TickThreadStage stage = new TickThreadStage("stopping " + tasksToStop.size() + " reconfigured tasks")) {
worker.stopAndAwaitTasks(tasksToStop);
}
tasksToRestart.addAll(tasksToStop);
}
/**
* Perform an orderly shutdown when triggered via {@link #stop()}
*/
// public for testing
public void halt() {
synchronized (this) {
// Clean up any connectors and tasks that are still running.
log.info("Stopping connectors and tasks that are still assigned to this worker.");
worker.stopAndAwaitConnectors();
worker.stopAndAwaitTasks();
// Explicitly fail any outstanding requests so they actually get a response and get an
// understandable reason for their failure.
DistributedHerderRequest request = requests.pollFirst();
while (request != null) {
request.callback().onCompletion(new ConnectException("Worker is shutting down"), null);
request = requests.pollFirst();
}
stopServices();
}
}
@Override
protected void stopServices() {
try {
super.stopServices();
} finally {
closeResources();
}
}
/**
* Close resources managed by this herder but which are not explicitly started.
*/
private void closeResources() {
Utils.closeQuietly(member::stop, "worker group member");
Utils.closeQuietly(herderMetrics::close, "herder metrics");
this.uponShutdown.forEach(closeable -> Utils.closeQuietly(closeable, closeable != null ? closeable.toString() : "<unknown>"));
}
// Timeout for herderExecutor to gracefully terminate is set to a value to accommodate
// reading to the end of the config topic + successfully attempting to stop all connectors and tasks and a buffer of 10s
private long herderExecutorTimeoutMs() {
return this.workerSyncTimeoutMs +
config.getLong(DistributedConfig.TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG) +
Worker.CONNECTOR_GRACEFUL_SHUTDOWN_TIMEOUT_MS + 10000;
}
@Override
public void stop() {
log.info("Herder stopping");
stopping.set(true);
member.wakeup();
ThreadUtils.shutdownExecutorServiceQuietly(herderExecutor, herderExecutorTimeoutMs(), TimeUnit.MILLISECONDS);
ThreadUtils.shutdownExecutorServiceQuietly(forwardRequestExecutor, FORWARD_REQUEST_SHUTDOWN_TIMEOUT_MS, TimeUnit.MILLISECONDS);
ThreadUtils.shutdownExecutorServiceQuietly(startAndStopExecutor, START_AND_STOP_SHUTDOWN_TIMEOUT_MS, TimeUnit.MILLISECONDS);
log.info("Herder stopped");
}
@Override
public void healthCheck(Callback<Void> callback) {
addRequest(
() -> {
callback.onCompletion(null, null);
return null;
},
forwardErrorAndTickThreadStages(callback)
);
}
@Override
public void connectors(final Callback<Collection<String>> callback) {
log.trace("Submitting connector listing request");
addRequest(
() -> {
if (!checkRebalanceNeeded(callback))
callback.onCompletion(null, configState.connectors());
return null;
},
forwardErrorAndTickThreadStages(callback)
);
}
@Override
public void connectorInfo(final String connName, final Callback<ConnectorInfo> callback) {
log.trace("Submitting connector info request {}", connName);
addRequest(
() -> {
if (checkRebalanceNeeded(callback))
return null;
if (!configState.contains(connName)) {
callback.onCompletion(
new NotFoundException("Connector " + connName + " not found"), null);
} else {
callback.onCompletion(null, connectorInfo(connName));
}
return null;
},
forwardErrorAndTickThreadStages(callback)
);
}
@Override
protected Map<String, String> rawConfig(String connName) {
return configState.rawConnectorConfig(connName);
}
@Override
public void connectorConfig(String connName, final Callback<Map<String, String>> callback) {
log.trace("Submitting connector config read request {}", connName);
super.connectorConfig(connName, callback);
}
@Override
public void deleteConnectorConfig(final String connName, final Callback<Created<ConnectorInfo>> callback) {
addRequest(
() -> {
log.trace("Handling connector config request {}", connName);
if (!isLeader()) {
callback.onCompletion(new NotLeaderException("Only the leader can delete connector configs.", leaderUrl()), null);
return null;
}
if (!configState.contains(connName)) {
callback.onCompletion(new NotFoundException("Connector " + connName + " not found"), null);
} else {
log.trace("Removing connector config {} {}", connName, configState.connectors());
writeToConfigTopicAsLeader(
"removing the config for connector " + connName + " from the config topic",
() -> configBackingStore.removeConnectorConfig(connName)
);
callback.onCompletion(null, new Created<>(false, null));
}
return null;
},
forwardErrorAndTickThreadStages(callback)
);
}
@Override
protected Map<String, ConfigValue> validateSinkConnectorConfig(SinkConnector connector, ConfigDef configDef, Map<String, String> config) {
Map<String, ConfigValue> result = super.validateSinkConnectorConfig(connector, configDef, config);
validateSinkConnectorGroupId(config, result);
return result;
}
@Override
protected Map<String, ConfigValue> validateSourceConnectorConfig(SourceConnector connector, ConfigDef configDef, Map<String, String> config) {
Map<String, ConfigValue> result = super.validateSourceConnectorConfig(connector, configDef, config);
validateSourceConnectorExactlyOnceSupport(config, result, connector);
validateSourceConnectorTransactionBoundary(config, result, connector);
return result;
}
private void validateSinkConnectorGroupId(Map<String, String> config, Map<String, ConfigValue> validatedConfig) {
String overriddenConsumerGroupIdConfig = CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX + GROUP_ID_CONFIG;
if (config.containsKey(overriddenConsumerGroupIdConfig)) {
String consumerGroupId = config.get(overriddenConsumerGroupIdConfig);
ConfigValue validatedGroupId = validatedConfig.computeIfAbsent(
overriddenConsumerGroupIdConfig,
p -> new ConfigValue(overriddenConsumerGroupIdConfig, consumerGroupId, List.of(), new ArrayList<>())
);
if (workerGroupId.equals(consumerGroupId)) {
validatedGroupId.addErrorMessage("Consumer group " + consumerGroupId +
" conflicts with Connect worker group " + workerGroupId);
}
} else {
ConfigValue validatedName = validatedConfig.get(ConnectorConfig.NAME_CONFIG);
String name = (String) validatedName.value();
if (workerGroupId.equals(SinkUtils.consumerGroupId(name))) {
validatedName.addErrorMessage("Consumer group for sink connector named " + name +
" conflicts with Connect worker group " + workerGroupId);
}
}
}
private void validateSourceConnectorExactlyOnceSupport(
Map<String, String> rawConfig,
Map<String, ConfigValue> validatedConfig,
SourceConnector connector) {
ConfigValue validatedExactlyOnceSupport = validatedConfig.get(SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_CONFIG);
if (validatedExactlyOnceSupport.errorMessages().isEmpty()) {
// Should be safe to parse the | from |
java | apache__camel | components/camel-google/camel-google-drive/src/test/java/org/apache/camel/component/google/drive/DriveRevisionsIT.java | {
"start": 1329,
"end": 1584
} | class ____ com.google.api.services.drive.Drive$Revisions APIs.
*/
@EnabledIf(value = "org.apache.camel.component.google.drive.AbstractGoogleDriveTestSupport#hasCredentials",
disabledReason = "Google Drive credentials were not provided")
public | for |
java | apache__flink | flink-datastream-api/src/main/java/org/apache/flink/datastream/api/function/ApplyPartitionFunction.java | {
"start": 1181,
"end": 1519
} | interface ____<OUT> extends Function {
/**
* The actual method to be applied to each partition.
*
* @param collector to output data.
* @param ctx runtime context in which this function is executed.
*/
void apply(Collector<OUT> collector, PartitionedContext<OUT> ctx) throws Exception;
}
| ApplyPartitionFunction |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/core/MaybeConverter.java | {
"start": 971,
"end": 1300
} | interface ____<@NonNull T, @NonNull R> {
/**
* Applies a function to the upstream {@link Maybe} and returns a converted value of type {@code R}.
*
* @param upstream the upstream {@code Maybe} instance
* @return the converted value
*/
@NonNull
R apply(@NonNull Maybe<T> upstream);
}
| MaybeConverter |
java | square__okhttp | samples/guide/src/main/java/okhttp3/recipes/RequestBodyCompression.java | {
"start": 2692,
"end": 3183
} | class ____ implements Interceptor {
@Override public Response intercept(Chain chain) throws IOException {
Request originalRequest = chain.request();
if (originalRequest.body() == null || originalRequest.header("Content-Encoding") != null) {
return chain.proceed(originalRequest);
}
Request compressedRequest = originalRequest.newBuilder()
.gzip()
.build();
return chain.proceed(compressedRequest);
}
}
}
| GzipRequestInterceptor |
java | google__truth | core/src/test/java/com/google/common/truth/ExpectFailureWithStackTraceTest.java | {
"start": 1553,
"end": 2387
} | class ____ implements TestRule {
final Expect delegate = Expect.create();
@Override
public Statement apply(Statement base, Description description) {
Statement s = delegate.apply(base, description);
return new Statement() {
@Override
public void evaluate() throws Throwable {
AssertionError e = assertThrows(AssertionError.class, () -> s.evaluate());
// Check that error message contains stack traces. Method name should appear twice,
// once for each expect error.
int firstIndex = e.getMessage().indexOf(METHOD_NAME);
assertThat(firstIndex).isGreaterThan(0);
int secondIndex = e.getMessage().indexOf(METHOD_NAME, firstIndex + 1);
assertThat(secondIndex).isGreaterThan(firstIndex);
}
};
}
}
}
| FailingExpect |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/DeadLetterChannelHandleNewExceptionTest.java | {
"start": 973,
"end": 2472
} | class ____ extends ContextTestSupport {
// should not log any exceptions in the log as they are all handled
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Test
public void testDeadLetterChannelHandleNewException() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
errorHandler(deadLetterChannel("mock:dead").deadLetterHandleNewException(true));
from("direct:start").log("Incoming ${body}").throwException(new IllegalArgumentException("Forced"));
}
});
context.start();
getMockEndpoint("mock:dead").expectedMessageCount(1);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Test
public void testDeadLetterChannelNotHandleNewException() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
errorHandler(deadLetterChannel("mock:dead").deadLetterHandleNewException(false));
from("direct:start").log("Incoming ${body}").throwException(new IllegalArgumentException("Forced"));
}
});
context.start();
getMockEndpoint("mock:dead").expectedMessageCount(1);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
}
| DeadLetterChannelHandleNewExceptionTest |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/stream/PartitionCommitter.java | {
"start": 8665,
"end": 9726
} | class ____ implements PartitionCommitPolicy.Context {
private final List<String> partitionValues;
private final Path partitionPath;
private CommitPolicyContextImpl(List<String> partitionValues, Path partitionPath) {
this.partitionValues = partitionValues;
this.partitionPath = partitionPath;
}
@Override
public String catalogName() {
return tableIdentifier.getCatalogName();
}
@Override
public String databaseName() {
return tableIdentifier.getDatabaseName();
}
@Override
public String tableName() {
return tableIdentifier.getObjectName();
}
@Override
public List<String> partitionKeys() {
return partitionKeys;
}
@Override
public List<String> partitionValues() {
return partitionValues;
}
@Override
public Path partitionPath() {
return partitionPath;
}
}
}
| CommitPolicyContextImpl |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/task/OrderedTaskDecorator.java | {
"start": 869,
"end": 1206
} | class ____ implements TaskDecorator, Ordered {
private final int order;
OrderedTaskDecorator() {
this(0);
}
OrderedTaskDecorator(int order) {
this.order = order;
}
@Override
public int getOrder() {
return this.order;
}
@Override
public Runnable decorate(Runnable runnable) {
return runnable;
}
}
| OrderedTaskDecorator |
java | elastic__elasticsearch | plugins/store-smb/src/internalClusterTest/java/org/elasticsearch/index/store/smb/SmbNIOFSTests.java | {
"start": 572,
"end": 823
} | class ____ extends AbstractAzureFsTestCase {
@Override
public Settings indexSettings() {
return Settings.builder().put(super.indexSettings()).put("index.store.type", randomFrom("smb_simple_fs", "smb_nio_fs")).build();
}
}
| SmbNIOFSTests |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/AnnotationBeanNameGeneratorTests.java | {
"start": 6922,
"end": 7017
} | class ____ {
}
@Component("myComponent")
@Service("myComponent")
static | ComponentWithBlankName |
java | hibernate__hibernate-orm | hibernate-spatial/src/test/java/org/hibernate/spatial/integration/Model.java | {
"start": 520,
"end": 1219
} | enum ____ {
JTSMODEL(
JtsGeomEntity.class,
JTS::to,
org.locationtech.jts.geom.Geometry.class
),
GLMODEL(
GeomEntity.class,
geom -> geom,
Geometry.class
);
/**
* Test Entity class
*/
public final Class<?> entityClass;
/**
* How to translate from Geolatte Geometry to the geometry type
* expected by the entity in this model
*/
public final Function<Geometry, Object> from;
/**
* The geometry type in this model
*/
public final Class<?> geometryClass;
Model(
Class<?> entityClass,
Function<Geometry, Object> from,
Class<?> geometryClass
) {
this.entityClass = entityClass;
this.from = from;
this.geometryClass = geometryClass;
}
}
| Model |
java | hibernate__hibernate-orm | hibernate-jcache/src/main/java/org/hibernate/cache/jcache/internal/JCacheAccessImpl.java | {
"start": 448,
"end": 1558
} | class ____ implements DomainDataStorageAccess {
private final Cache underlyingCache;
public JCacheAccessImpl(Cache underlyingCache) {
this.underlyingCache = underlyingCache;
}
public Cache getUnderlyingCache() {
return underlyingCache;
}
@Override
public boolean contains(Object key) {
return underlyingCache.containsKey( key );
}
@Override
public Object getFromCache(Object key, SharedSessionContractImplementor session) {
return underlyingCache.get( key );
}
@Override
public void putIntoCache(Object key, Object value, SharedSessionContractImplementor session) {
underlyingCache.put( key, value );
}
@Override
public void removeFromCache(Object key, SharedSessionContractImplementor session) {
underlyingCache.remove( key );
}
@Override
public void evictData(Object key) {
underlyingCache.remove( key );
}
@Override
public void clearCache(SharedSessionContractImplementor session) {
underlyingCache.clear();
}
@Override
public void evictData() {
underlyingCache.clear();
}
@Override
public void release() {
underlyingCache.close();
}
}
| JCacheAccessImpl |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/script/TermsSetQueryScript.java | {
"start": 3740,
"end": 3980
} | interface ____ {
TermsSetQueryScript newInstance(LeafReaderContext ctx) throws IOException;
}
/**
* A factory to construct stateful {@link TermsSetQueryScript} factories for a specific index.
*/
public | LeafFactory |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/vectors/DenseVectorQuery.java | {
"start": 6199,
"end": 7161
} | class ____ extends Scorer {
private final VectorScorer vectorScorer;
private final DocIdSetIterator iterator;
private final float boost;
DenseVectorScorer(VectorScorer vectorScorer, float boost) {
this.vectorScorer = vectorScorer;
this.iterator = vectorScorer.iterator();
this.boost = boost;
}
@Override
public DocIdSetIterator iterator() {
return vectorScorer.iterator();
}
@Override
public float getMaxScore(int i) throws IOException {
// TODO: can we optimize this at all?
return Float.POSITIVE_INFINITY;
}
@Override
public float score() throws IOException {
assert iterator.docID() != -1;
return vectorScorer.score() * boost;
}
@Override
public int docID() {
return iterator.docID();
}
}
}
| DenseVectorScorer |
java | qos-ch__slf4j | slf4j-api/src/main/java/org/slf4j/MDC.java | {
"start": 2357,
"end": 2438
} | class ____ static.
*
* @author Ceki Gülcü
* @since 1.4.1
*/
public | are |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/selector/BasicContextSelector.java | {
"start": 1202,
"end": 3046
} | class ____ implements ContextSelector {
private final Lazy<LoggerContext> defaultLoggerContext = Lazy.lazy(() -> new LoggerContext("Default"));
@Override
public void shutdown(
final String fqcn, final ClassLoader loader, final boolean currentContext, final boolean allContexts) {
final LoggerContext ctx = getContext(fqcn, loader, currentContext);
if (ctx != null && ctx.isStarted()) {
ctx.stop(DEFAULT_STOP_TIMEOUT, TimeUnit.MILLISECONDS);
}
}
@Override
public boolean hasContext(final String fqcn, final ClassLoader loader, final boolean currentContext) {
final LoggerContext ctx = getContext(fqcn, loader, currentContext);
return ctx != null && ctx.isStarted();
}
@Override
public LoggerContext getContext(final String fqcn, final ClassLoader loader, final boolean currentContext) {
final LoggerContext ctx = ContextAnchor.THREAD_CONTEXT.get();
return ctx != null ? ctx : defaultLoggerContext.get();
}
@Override
public LoggerContext getContext(
final String fqcn, final ClassLoader loader, final boolean currentContext, final URI configLocation) {
final LoggerContext ctx = ContextAnchor.THREAD_CONTEXT.get();
return ctx != null ? ctx : defaultLoggerContext.get();
}
public LoggerContext locateContext(final String name, final String configLocation) {
return defaultLoggerContext.get();
}
@Override
public void removeContext(final LoggerContext context) {
// does not remove anything
}
@Override
public boolean isClassLoaderDependent() {
return false;
}
@Override
public List<LoggerContext> getLoggerContexts() {
return Collections.singletonList(defaultLoggerContext.get());
}
}
| BasicContextSelector |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/JSONReaderScannerTest__entity_stringList.java | {
"start": 328,
"end": 1368
} | class ____ extends TestCase {
public void test_scanInt() throws Exception {
StringBuffer buf = new StringBuffer();
buf.append('[');
for (int i = 0; i < 10; ++i) {
if (i != 0) {
buf.append(',');
}
//1000000000000
//
buf.append("{\"id\":[\"" + i + "\",\"" + (10000 + i) + "\"]}");
}
buf.append(']');
Reader reader = new StringReader(buf.toString());
JSONReaderScanner scanner = new JSONReaderScanner(reader);
DefaultJSONParser parser = new DefaultJSONParser(scanner);
List<VO> array = parser.parseArray(VO.class);
for (int i = 0; i < array.size(); ++i) {
Assert.assertEquals(2, array.get(i).getId().size());
Assert.assertEquals(Integer.toString(i), array.get(i).getId().get(0));
Assert.assertEquals(Integer.toString(10000 + i), array.get(i).getId().get(1));
}
}
public static | JSONReaderScannerTest__entity_stringList |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/join/TestJoinProperties.java | {
"start": 1648,
"end": 5717
} | class ____ {
private static MiniDFSCluster cluster = null;
final static int SOURCES = 3;
final static int ITEMS = (SOURCES + 1) * (SOURCES + 1);
static int[][] source = new int[SOURCES][];
static Path[] src;
static Path base;
@BeforeAll
public static void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
base = cluster.getFileSystem().makeQualified(new Path("/nested"));
src = generateSources(conf);
}
@AfterAll
public static void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
// Sources from 0 to srcs-2 have IntWritable key and IntWritable value
// src-1 source has IntWritable key and LongWritable value.
private static SequenceFile.Writer[] createWriters(Path testdir,
Configuration conf, int srcs, Path[] src) throws IOException {
for (int i = 0; i < srcs; ++i) {
src[i] = new Path(testdir, Integer.toString(i + 10, 36));
}
SequenceFile.Writer out[] = new SequenceFile.Writer[srcs];
for (int i = 0; i < srcs - 1; ++i) {
out[i] = new SequenceFile.Writer(testdir.getFileSystem(conf), conf,
src[i], IntWritable.class, IntWritable.class);
}
out[srcs - 1] = new SequenceFile.Writer(testdir.getFileSystem(conf), conf,
src[srcs - 1], IntWritable.class, LongWritable.class);
return out;
}
private static String stringify(IntWritable key, Writable val) {
StringBuilder sb = new StringBuilder();
sb.append("(" + key);
sb.append("," + val + ")");
return sb.toString();
}
private static Path[] generateSources(Configuration conf)
throws IOException {
for (int i = 0; i < SOURCES; ++i) {
source[i] = new int[ITEMS];
for (int j = 0; j < ITEMS; ++j) {
source[i][j] = (i + 2) * (j + 1);
}
}
Path[] src = new Path[SOURCES];
SequenceFile.Writer out[] = createWriters(base, conf, SOURCES, src);
IntWritable k = new IntWritable();
for (int i = 0; i < SOURCES; ++i) {
Writable v;
if (i != SOURCES -1) {
v = new IntWritable();
((IntWritable)v).set(i);
} else {
v = new LongWritable();
((LongWritable)v).set(i);
}
for (int j = 0; j < ITEMS; ++j) {
k.set(source[i][j]);
out[i].append(k, v);
}
out[i].close();
}
return src;
}
private String A() {
return CompositeInputFormat.compose(SequenceFileInputFormat.class,
src[0].toString());
}
private String B() {
return CompositeInputFormat.compose(SequenceFileInputFormat.class,
src[1].toString());
}
private String C() {
return CompositeInputFormat.compose(SequenceFileInputFormat.class,
src[2].toString());
}
// construct op(op(A,B),C)
private String constructExpr1(String op) {
StringBuilder sb = new StringBuilder();
sb.append(op + "(" +op +"(");
sb.append(A());
sb.append(",");
sb.append(B());
sb.append("),");
sb.append(C());
sb.append(")");
return sb.toString();
}
// construct op(A,op(B,C))
private String constructExpr2(String op) {
StringBuilder sb = new StringBuilder();
sb.append(op + "(");
sb.append(A());
sb.append(",");
sb.append(op +"(");
sb.append(B());
sb.append(",");
sb.append(C());
sb.append("))");
return sb.toString();
}
// construct op(A, B, C))
private String constructExpr3(String op) {
StringBuilder sb = new StringBuilder();
sb.append(op + "(");
sb.append(A());
sb.append(",");
sb.append(B());
sb.append(",");
sb.append(C());
sb.append(")");
return sb.toString();
}
// construct override(inner(A, B), A)
private String constructExpr4() {
StringBuilder sb = new StringBuilder();
sb.append("override(inner(");
sb.append(A());
sb.append(",");
sb.append(B());
sb.append("),");
sb.append(A());
sb.append(")");
return sb.toString();
}
| TestJoinProperties |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java | {
"start": 1480,
"end": 11433
} | class ____ extends StreamOutput implements org.elasticsearch.xpack.esql.core.util.PlanStreamOutput {
/**
* max number of attributes that can be cached for serialization
* <p>
* TODO should this be a cluster setting...?
*/
protected static final int MAX_SERIALIZED_ATTRIBUTES = 1_000_000;
/**
* Cache of written blocks. We use an {@link IdentityHashMap} for this
* because calculating the {@link Object#hashCode} of a {@link Block}
* is slow. And so is {@link Object#equals}. So, instead we just use
* object identity.
*/
private final Map<Block, BytesReference> cachedBlocks = new IdentityHashMap<>();
/**
* Cache for field attributes.
* Field attributes can be a significant part of the query execution plan, especially
* for queries like `from *`, that can have thousands of output columns.
* Attributes can be shared by many plan nodes (eg. ExcahngeSink output, Project output, EsRelation fields);
* in addition, multiple Attributes can share the same parent field.
* This cache allows to send each attribute only once; from the second occurrence, only an id will be sent
*/
protected final Map<Attribute, Integer> cachedAttributes = new IdentityHashMap<>();
/**
* Cache for EsFields.
*/
protected final Map<EsField, Integer> cachedEsFields = new IdentityHashMap<>();
protected final Map<String, Integer> stringCache = new HashMap<>();
private final StreamOutput delegate;
private int nextCachedBlock = 0;
private final int maxSerializedAttributes;
public PlanStreamOutput(StreamOutput delegate, @Nullable Configuration configuration) throws IOException {
this(delegate, configuration, MAX_SERIALIZED_ATTRIBUTES);
}
public PlanStreamOutput(StreamOutput delegate, @Nullable Configuration configuration, int maxSerializedAttributes) throws IOException {
this.delegate = delegate;
if (configuration != null) {
for (Map.Entry<String, Map<String, Column>> table : configuration.tables().entrySet()) {
for (Map.Entry<String, Column> column : table.getValue().entrySet()) {
cachedBlocks.put(column.getValue().values(), fromConfigKey(table.getKey(), column.getKey()));
}
}
}
this.maxSerializedAttributes = maxSerializedAttributes;
}
@Override
public void writeByte(byte b) throws IOException {
delegate.writeByte(b);
}
@Override
public void writeBytes(byte[] b, int offset, int length) throws IOException {
delegate.writeBytes(b, offset, length);
}
@Override
public void flush() throws IOException {
delegate.flush();
}
@Override
public void close() throws IOException {
delegate.close();
stringCache.clear();
cachedEsFields.clear();
cachedAttributes.clear();
}
@Override
public TransportVersion getTransportVersion() {
return delegate.getTransportVersion();
}
@Override
public void setTransportVersion(TransportVersion version) {
delegate.setTransportVersion(version);
super.setTransportVersion(version);
}
/**
* Write a {@link Block} as part of the plan.
* <p>
* These {@link Block}s are not tracked by {@link BlockFactory} and closing them
* does nothing so they should be small. We do make sure not to send duplicates,
* reusing blocks sent as part of the {@link Configuration#tables()} if
* possible, otherwise sending a {@linkplain Block} inline.
* </p>
*/
public void writeCachedBlock(Block block) throws IOException {
assert block instanceof LongBigArrayBlock == false : "BigArrays not supported because we don't close";
assert block instanceof IntBigArrayBlock == false : "BigArrays not supported because we don't close";
assert block instanceof DoubleBigArrayBlock == false : "BigArrays not supported because we don't close";
assert block instanceof BooleanBigArrayBlock == false : "BigArrays not supported because we don't close";
BytesReference key = cachedBlocks.get(block);
if (key != null) {
key.writeTo(this);
return;
}
writeByte(NEW_BLOCK_KEY);
writeVInt(nextCachedBlock);
cachedBlocks.put(block, fromPreviousKey(nextCachedBlock));
Block.writeTypedBlock(block, this);
nextCachedBlock++;
}
@Override
public boolean writeAttributeCacheHeader(Attribute attribute) throws IOException {
Integer cacheId = attributeIdFromCache(attribute);
if (cacheId != null) {
writeZLong(cacheId);
return false;
}
cacheId = cacheAttribute(attribute);
writeZLong(-1 - cacheId);
return true;
}
private Integer attributeIdFromCache(Attribute attr) {
return cachedAttributes.get(attr);
}
private int cacheAttribute(Attribute attr) {
if (cachedAttributes.containsKey(attr)) {
throw new IllegalArgumentException("Attribute already present in the serialization cache [" + attr + "]");
}
int id = cachedAttributes.size();
if (id >= maxSerializedAttributes) {
throw new InvalidArgumentException("Limit of the number of serialized attributes exceeded [{}]", maxSerializedAttributes);
}
cachedAttributes.put(attr, id);
return id;
}
@Override
public boolean writeEsFieldCacheHeader(EsField field) throws IOException {
Integer cacheId = esFieldIdFromCache(field);
if (cacheId != null) {
writeZLong(cacheId);
return false;
}
cacheId = cacheEsField(field);
writeZLong(-1 - cacheId);
writeCachedString(field.getWriteableName());
return true;
}
/**
* Writes a string caching it, ie. the second time the same string is written, only a small, numeric ID will be sent.
* This should be used only to serialize recurring strings.
*
* Values serialized with this method have to be deserialized with {@link PlanStreamInput#readCachedString()}
*/
@Override
public void writeCachedString(String string) throws IOException {
Integer cacheId = stringCache.get(string);
if (cacheId != null) {
writeZLong(cacheId);
return;
}
cacheId = stringCache.size();
if (cacheId >= maxSerializedAttributes) {
throw new InvalidArgumentException("Limit of the number of serialized strings exceeded [{}]", maxSerializedAttributes);
}
stringCache.put(string, cacheId);
writeZLong(-1 - cacheId);
writeString(string);
}
@Override
public void writeOptionalCachedString(String str) throws IOException {
if (str == null) {
writeBoolean(false);
} else {
writeBoolean(true);
writeCachedString(str);
}
}
private Integer esFieldIdFromCache(EsField field) {
return cachedEsFields.get(field);
}
private int cacheEsField(EsField attr) {
if (cachedEsFields.containsKey(attr)) {
throw new IllegalArgumentException("EsField already present in the serialization cache [" + attr + "]");
}
int id = cachedEsFields.size();
if (id >= maxSerializedAttributes) {
throw new InvalidArgumentException("Limit of the number of serialized EsFields exceeded [{}]", maxSerializedAttributes);
}
cachedEsFields.put(attr, id);
return id;
}
/**
* The byte representing a {@link Block} sent for the first time. The byte
* will be followed by a {@link StreamOutput#writeVInt} encoded identifier
* and then the contents of the {@linkplain Block} will immediately follow
* this byte.
*/
static final byte NEW_BLOCK_KEY = 0;
/**
* The byte representing a {@link Block} that has previously been sent.
* This byte will be followed up a {@link StreamOutput#writeVInt} encoded
* identifier pointing to the block to read.
*/
static final byte FROM_PREVIOUS_KEY = 1;
/**
* The byte representing a {@link Block} that was part of the
* {@link Configuration#tables()} map. It is followed a string for
* the table name and then a string for the column name.
*/
static final byte FROM_CONFIG_KEY = 2;
/**
* Build the key for reading a {@link Block} from the cache of previously
* received {@linkplain Block}s.
*/
static BytesReference fromPreviousKey(int id) throws IOException {
try (BytesStreamOutput key = new BytesStreamOutput()) {
key.writeByte(FROM_PREVIOUS_KEY);
key.writeVInt(id);
return key.bytes();
}
}
/**
* Build the key for reading a {@link Block} from the {@link Configuration}.
* This is important because some operations like {@code LOOKUP} frequently read
* {@linkplain Block}s directly from the configuration.
* <p>
* It'd be possible to implement this by adding all of the Blocks as "previous"
* keys in the constructor and never use this construct at all, but that'd
* require there be a consistent ordering of Blocks there. We could make one,
* but I'm afraid that'd be brittle as we evolve the code. It'd make wire
* compatibility difficult. This signal is much simpler to deal with even though
* it is more bytes over the wire.
* </p>
*/
static BytesReference fromConfigKey(String table, String column) throws IOException {
try (BytesStreamOutput key = new BytesStreamOutput()) {
key.writeByte(FROM_CONFIG_KEY);
key.writeString(table);
key.writeString(column);
return key.bytes();
}
}
}
| PlanStreamOutput |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/MockLog.java | {
"start": 7709,
"end": 8357
} | class ____ extends AbstractEventExpectation {
public SeenEventExpectation(String name, String logger, Level level, String message) {
super(name, logger, level, message);
}
@Override
public void assertMatched() {
assertThat("expected to see " + name + " but did not", seenLatch.getCount(), equalTo(0L));
}
@Override
public void awaitMatched(long millis) throws InterruptedException {
assertThat("expected to see " + name + " but did not", seenLatch.await(millis, TimeUnit.MILLISECONDS), equalTo(true));
}
}
public static | SeenEventExpectation |
java | spring-projects__spring-security | web/src/test/java/org/springframework/security/web/authentication/SimpleUrlAuthenticationFailureHandlerTests.java | {
"start": 1158,
"end": 3791
} | class ____ {
@Test
public void error401IsReturnedIfNoUrlIsSet() throws Exception {
SimpleUrlAuthenticationFailureHandler afh = new SimpleUrlAuthenticationFailureHandler();
RedirectStrategy rs = mock(RedirectStrategy.class);
afh.setRedirectStrategy(rs);
assertThat(afh.getRedirectStrategy()).isSameAs(rs);
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
afh.onAuthenticationFailure(request, response, mock(AuthenticationException.class));
assertThat(response.getStatus()).isEqualTo(401);
}
@Test
public void exceptionIsSavedToSessionOnRedirect() throws Exception {
SimpleUrlAuthenticationFailureHandler afh = new SimpleUrlAuthenticationFailureHandler();
afh.setDefaultFailureUrl("/target");
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
AuthenticationException e = mock(AuthenticationException.class);
afh.onAuthenticationFailure(request, response, e);
assertThat(request.getSession().getAttribute(WebAttributes.AUTHENTICATION_EXCEPTION)).isSameAs(e);
assertThat(response.getRedirectedUrl()).isEqualTo("/target");
}
@Test
public void exceptionIsNotSavedIfAllowSessionCreationIsFalse() throws Exception {
SimpleUrlAuthenticationFailureHandler afh = new SimpleUrlAuthenticationFailureHandler("/target");
afh.setAllowSessionCreation(false);
assertThat(afh.isAllowSessionCreation()).isFalse();
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
afh.onAuthenticationFailure(request, response, mock(AuthenticationException.class));
assertThat(request.getSession(false)).isNull();
}
// SEC-462
@Test
public void responseIsForwardedIfUseForwardIsTrue() throws Exception {
SimpleUrlAuthenticationFailureHandler afh = new SimpleUrlAuthenticationFailureHandler("/target");
afh.setUseForward(true);
assertThat(afh.isUseForward()).isTrue();
MockHttpServletRequest request = new MockHttpServletRequest();
MockHttpServletResponse response = new MockHttpServletResponse();
AuthenticationException e = mock(AuthenticationException.class);
afh.onAuthenticationFailure(request, response, e);
assertThat(request.getSession(false)).isNull();
assertThat(response.getRedirectedUrl()).isNull();
assertThat(response.getForwardedUrl()).isEqualTo("/target");
// Request scope should be used for forward
assertThat(request.getAttribute(WebAttributes.AUTHENTICATION_EXCEPTION)).isSameAs(e);
}
}
| SimpleUrlAuthenticationFailureHandlerTests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/JavaInstantGetSecondsGetNanoTest.java | {
"start": 4631,
"end": 5145
} | class ____ {
public static void foo(Instant instant) {
// BUG: Diagnostic contains: JavaInstantGetSecondsGetNano
int nanos = instant.getNano();
}
}
""")
.doTest();
}
@Test
public void getNanoInMethodGetSecondsInClassVariable() {
compilationHelper
.addSourceLines(
"test/TestCase.java",
"""
package test;
import java.time.Instant;
public | TestCase |
java | spring-projects__spring-security | buildSrc/src/main/java/s101/S101PluginExtension.java | {
"start": 1040,
"end": 4088
} | class ____ {
private final Property<String> licenseId;
private final Property<String> repository;
private final Property<String> version;
private final Property<File> installationDirectory;
private final Property<File> configurationDirectory;
private final Property<String> label;
@Input
public Property<String> getLicenseId() {
return this.licenseId;
}
public void setLicenseId(String licenseId) {
this.licenseId.set(licenseId);
}
@InputDirectory
public Property<File> getInstallationDirectory() {
return this.installationDirectory;
}
public void setInstallationDirectory(String installationDirectory) {
this.installationDirectory.set(new File(installationDirectory));
}
@InputDirectory
public Property<File> getConfigurationDirectory() {
return this.configurationDirectory;
}
public void setConfigurationDirectory(String configurationDirectory) {
this.configurationDirectory.set(new File(configurationDirectory));
}
@Input
public Property<String> getLabel() {
return this.label;
}
public void setLabel(String label) {
this.label.set(label);
}
@Input
public Property<String> getRepository() {
return repository;
}
public void setRepository(String repository) {
this.repository.set(repository);
}
@Input
public Property<String> getVersion() {
return this.version;
}
public void setVersion(String version) {
this.version.set(version);
}
public S101PluginExtension(Project project) {
this.licenseId = project.getObjects().property(String.class);
if (project.hasProperty("s101.licenseId")) {
setLicenseId((String) project.findProperty("s101.licenseId"));
}
this.installationDirectory = project.getObjects().property(File.class)
.convention(new File(project.getBuildDir(), "s101"));
this.configurationDirectory = project.getObjects().property(File.class)
.convention(new File(project.getProjectDir(), "s101"));
this.label = project.getObjects().property(String.class);
if (project.hasProperty("s101.label")) {
setLabel((String) project.findProperty("s101.label"));
}
this.repository = project.getObjects().property(String.class);
if (project.hasProperty("s101.repository")) {
setRepository((String) project.findProperty("s101.repository"));
} else {
setRepository("https://structure101.com/binaries/v6");
}
this.version = project.getObjects().property(String.class);
if (project.hasProperty("s101.version")) {
setVersion((String) project.findProperty("s101.version"));
} else {
try (final WebClient webClient = new WebClient()) {
HtmlPage page = webClient.getPage(getRepository().get());
Matcher matcher = null;
for (HtmlAnchor anchor : page.getAnchors()) {
Matcher candidate = Pattern.compile("(structure101-build-java-all-)(.*).zip").matcher(anchor.getHrefAttribute());
if (candidate.find()) {
matcher = candidate;
}
}
if (matcher != null) {
setVersion(matcher.group(2));
}
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
}
}
| S101PluginExtension |
java | apache__flink | flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/MinioTestContainerTest.java | {
"start": 1543,
"end": 4380
} | class ____ {
private static final String DEFAULT_BUCKET_NAME = "test-bucket";
@RegisterExtension
private static final EachCallbackWrapper<TestContainerExtension<MinioTestContainer>>
MINIO_EXTENSION =
new EachCallbackWrapper<>(
new TestContainerExtension<>(
() -> new MinioTestContainer(DEFAULT_BUCKET_NAME)));
private static MinioTestContainer getTestContainer() {
return MINIO_EXTENSION.getCustomExtension().getTestContainer();
}
private static AmazonS3 getClient() {
return getTestContainer().getClient();
}
@Test
void testBucketCreation() {
final String bucketName = "other-bucket";
final Bucket otherBucket = getClient().createBucket(bucketName);
assertThat(otherBucket).isNotNull();
assertThat(otherBucket).extracting(Bucket::getName).isEqualTo(bucketName);
assertThat(getClient().listBuckets())
.map(Bucket::getName)
.containsExactlyInAnyOrder(getTestContainer().getDefaultBucketName(), bucketName);
}
@Test
void testPutObject() throws IOException {
final String bucketName = "other-bucket";
getClient().createBucket(bucketName);
final String objectId = "test-object";
final String content = "test content";
getClient().putObject(bucketName, objectId, content);
final BufferedReader reader =
new BufferedReader(
new InputStreamReader(
getClient().getObject(bucketName, objectId).getObjectContent()));
assertThat(reader.readLine()).isEqualTo(content);
}
@Test
void testSetS3ConfigOptions() {
final Configuration config = new Configuration();
getTestContainer().setS3ConfigOptions(config);
assertThat(config.containsKey("s3.endpoint")).isTrue();
assertThat(config.containsKey("s3.path.style.access")).isTrue();
assertThat(config.containsKey("s3.access-key")).isTrue();
assertThat(config.containsKey("s3.secret-key")).isTrue();
}
@Test
void testGetDefaultBucketName() {
assertThat(getTestContainer().getDefaultBucketName()).isEqualTo(DEFAULT_BUCKET_NAME);
}
@Test
void testDefaultBucketCreation() {
assertThat(getClient().listBuckets())
.singleElement()
.extracting(Bucket::getName)
.isEqualTo(getTestContainer().getDefaultBucketName());
}
@Test
void testS3EndpointNeedsToBeSpecifiedBeforeInitializingFileSyste() {
assertThatThrownBy(() -> getTestContainer().initializeFileSystem(new Configuration()))
.isInstanceOf(IllegalArgumentException.class);
}
}
| MinioTestContainerTest |
java | netty__netty | handler/src/test/java/io/netty/handler/ssl/MockAlternativeKeyProvider.java | {
"start": 8135,
"end": 8380
} | class ____ extends MockSignature {
public MockSha1WithRsaSignature() throws NoSuchAlgorithmException, NoSuchProviderException {
super("SHA1withRSA", "SunRsaSign");
}
}
public static final | MockSha1WithRsaSignature |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java | {
"start": 713,
"end": 2093
} | class ____ extends BaseRandomBinaryDocValuesRangeQueryTestCase {
@Override
protected String fieldName() {
return "ip_range_dv_field";
}
@Override
protected RangeType rangeType() {
return RangeType.IP;
}
@Override
protected Range nextRange(int dimensions) throws Exception {
InetAddress min = nextInetaddress();
byte[] bMin = InetAddressPoint.encode(min);
InetAddress max = nextInetaddress();
byte[] bMax = InetAddressPoint.encode(max);
if (Arrays.compareUnsigned(bMin, 0, bMin.length, bMax, 0, bMin.length) > 0) {
return new IpRange(max, min);
}
return new IpRange(min, max);
}
private InetAddress nextInetaddress() throws UnknownHostException {
byte[] b = random().nextBoolean() ? new byte[4] : new byte[16];
switch (random().nextInt(5)) {
case 0:
return InetAddress.getByAddress(b);
case 1:
Arrays.fill(b, (byte) 0xff);
return InetAddress.getByAddress(b);
case 2:
Arrays.fill(b, (byte) 42);
return InetAddress.getByAddress(b);
default:
random().nextBytes(b);
return InetAddress.getByAddress(b);
}
}
private static | InetAddressRandomBinaryDocValuesRangeQueryTests |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/assertion/RecursiveAssertionDriver_PrimitiveFieldHandlingTest.java | {
"start": 3572,
"end": 3710
} | class ____ extends ClassWithPrimitiveAndObjectField {
private Object anotherObjectField = new Object();
}
}
| SubClassWithAdditionalField |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/runtime/partitioner/ShufflePartitionerTest.java | {
"start": 1046,
"end": 1824
} | class ____ extends StreamPartitionerTest {
@Override
StreamPartitioner<Tuple> createPartitioner() {
StreamPartitioner<Tuple> partitioner = new ShufflePartitioner<>();
assertThat(partitioner.isBroadcast()).isFalse();
return partitioner;
}
@Test
void testSelectChannelsInterval() {
assertSelectedChannelWithSetup(0, 1);
streamPartitioner.setup(2);
assertThat(streamPartitioner.selectChannel(serializationDelegate))
.isGreaterThanOrEqualTo(0)
.isLessThan(2);
streamPartitioner.setup(1024);
assertThat(streamPartitioner.selectChannel(serializationDelegate))
.isGreaterThanOrEqualTo(0)
.isLessThan(1024);
}
}
| ShufflePartitionerTest |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/window/groupwindow/triggers/ProcessingTimeTriggers.java | {
"start": 6488,
"end": 8315
} | class ____<W extends Window> extends WindowTrigger<W> {
private static final long serialVersionUID = 2369815941792574642L;
/**
* Creates a new {@code Trigger} like the this, except that it fires repeatedly whenever the
* given {@code Trigger} fires before the processing time has passed the end of the window.
*/
public AfterEndOfWindowNoLate<W> withEarlyFirings(Trigger<W> earlyFirings) {
checkNotNull(earlyFirings);
return new AfterEndOfWindowNoLate<>(earlyFirings);
}
@Override
public void open(TriggerContext ctx) throws Exception {
this.ctx = ctx;
}
@Override
public boolean onElement(Object element, long timestamp, W window) throws Exception {
ctx.registerProcessingTimeTimer(triggerTime(window));
return false;
}
@Override
public boolean onProcessingTime(long time, W window) throws Exception {
return time == triggerTime(window);
}
@Override
public boolean onEventTime(long time, W window) throws Exception {
return false;
}
@Override
public void clear(W window) throws Exception {
ctx.deleteProcessingTimeTimer(triggerTime(window));
}
@Override
public boolean canMerge() {
return true;
}
@Override
public void onMerge(W window, OnMergeContext mergeContext) throws Exception {
ctx.registerProcessingTimeTimer(triggerTime(window));
}
@Override
public String toString() {
return TO_STRING;
}
}
/** A composite {@link Trigger} that consist of AfterEndOfWindow and a early trigger. */
public static final | AfterEndOfWindow |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/ConfigurationClassPostProcessorTests.java | {
"start": 77321,
"end": 77664
} | class ____ {
@Bean @Lazy
public DependingFoo foo(BarArgument bar) {
return new DependingFoo(bar);
}
@Bean
public FooFactory fooFactory() {
return new FooFactory() {
@Override
public DependingFoo createFoo(BarArgument bar) {
return foo(bar);
}
};
}
}
@Configuration
static | BeanArgumentConfigWithSingleton |
java | google__dagger | javatests/dagger/internal/codegen/InjectConstructorFactoryGeneratorTest.java | {
"start": 18010,
"end": 18538
} | interface ____ {}");
daggerCompiler(component, usage, genericClass, packagePrivateFoo, packagePrivateBar)
.compile(subject -> subject.hasErrorCount(0));
}
@Test
public void boundedGenerics_withPackagePrivateTypeArgumentAndIntersectionBounds() {
Source component =
CompilerTests.javaSource(
"other.MyComponent",
"package other;",
"",
"import dagger.Component;",
"import test.Usage;",
"",
"@Component",
" | Bar |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/MethodInvoker.java | {
"start": 6046,
"end": 6735
} | class ____ was invalid
*/
protected Class<?> resolveClassName(String className) throws ClassNotFoundException {
return ClassUtils.forName(className, ClassUtils.getDefaultClassLoader());
}
/**
* Find a matching method with the specified name for the specified arguments.
* @return a matching method, or {@code null} if none
* @see #getTargetClass()
* @see #getTargetMethod()
* @see #getArguments()
*/
protected @Nullable Method findMatchingMethod() {
String targetMethod = getTargetMethod();
@Nullable Object[] arguments = getArguments();
int argCount = arguments.length;
Class<?> targetClass = getTargetClass();
Assert.state(targetClass != null, "No target | name |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractBondedFSContract.java | {
"start": 1155,
"end": 1462
} | class ____ bonds to a filesystem
* through the configuration.
*
* It looks for a definition of the test filesystem with the key
* derived from "fs.contract.test.fs.%s" -if found the value
* is converted to a URI and used to create a filesystem. If not -the
* tests are not enabled
*/
public abstract | that |
java | google__guice | core/test/com/google/inject/spi/ProviderMethodsTest.java | {
"start": 10437,
"end": 14122
} | class ____ {
@Inject List<? extends CharSequence> charSequences;
@Inject List<? super Integer> numbers;
@Inject Class<?> type;
}
@Test
public void testProviderMethodDependenciesAreExposed() throws Exception {
Module module =
new AbstractModule() {
@Override
protected void configure() {
bind(Integer.class).toInstance(50);
bindConstant().annotatedWith(Names.named("units")).to("Kg");
}
@Provides
@Named("weight")
String provideWeight(Integer count, @Named("units") String units) {
return count + units;
}
};
Injector injector = Guice.createInjector(module);
ProviderInstanceBinding<?> binding =
(ProviderInstanceBinding<?>)
injector.getBinding(Key.get(String.class, Names.named("weight")));
Method method =
module.getClass().getDeclaredMethod("provideWeight", Integer.class, String.class);
InjectionPoint point = new InjectionPoint(TypeLiteral.get(module.getClass()), method, false);
assertEquals(
ImmutableSet.<Dependency<?>>of(
new Dependency<Integer>(point, Key.get(Integer.class), false, 0),
new Dependency<String>(point, Key.get(String.class, Names.named("units")), false, 1)),
binding.getDependencies());
}
@Test
public void testNonModuleProviderMethods() {
final Object methodsObject =
new Object() {
@Provides
@Named("foo")
String provideFoo() {
return "foo-value";
}
};
Module module =
new AbstractModule() {
@Override
protected void configure() {
install(ProviderMethodsModule.forObject(methodsObject));
}
};
Injector injector = Guice.createInjector(module);
Key<String> key = Key.get(String.class, Names.named("foo"));
assertEquals("foo-value", injector.getInstance(key));
// Test the provider method object itself. This makes sure getInstance works, since GIN uses it
List<Element> elements = Elements.getElements(module);
assertEquals(1, elements.size());
Element element = elements.get(0);
assertTrue(
element + " instanceof ProviderInstanceBinding",
element instanceof ProviderInstanceBinding);
ProviderInstanceBinding<?> binding = (ProviderInstanceBinding<?>) element;
jakarta.inject.Provider<?> provider = binding.getUserSuppliedProvider();
assertTrue(provider instanceof ProviderMethod);
assertEquals(methodsObject, ((ProviderMethod) provider).getInstance());
assertSame(provider, binding.getProviderInstance());
}
@Test
public void testVoidProviderMethods() {
try {
Guice.createInjector(
new AbstractModule() {
@Provides
void provideFoo() {}
});
fail();
} catch (CreationException expected) {
assertContains(
expected.getMessage(),
"Provider methods must return a value. Do not return void.",
"at ProviderMethodsTest$14.provideFoo(ProviderMethodsTest.java:");
}
}
@Test
public void testInjectsJustOneLogger() {
AtomicReference<Logger> loggerRef = new AtomicReference<>();
Injector injector = Guice.createInjector(new FooModule(loggerRef));
assertNull(loggerRef.get());
injector.getInstance(Integer.class);
Logger lastLogger = loggerRef.getAndSet(null);
assertNotNull(lastLogger);
injector.getInstance(Integer.class);
assertSame(lastLogger, loggerRef.get());
assertEquals(FooModule.class.getName(), lastLogger.getName());
}
private static | HasWildcardInjection |
java | apache__camel | components/camel-http/src/test/java/org/apache/camel/component/http/HttpNoConnectionRedeliveryTest.java | {
"start": 1535,
"end": 3861
} | class ____ extends BaseHttpTest {
private HttpServer localServer;
@Override
public void setupResources() throws Exception {
localServer = ServerBootstrap.bootstrap()
.setCanonicalHostName("localhost").setHttpProcessor(getBasicHttpProcessor())
.setConnectionReuseStrategy(getConnectionReuseStrategy()).setResponseFactory(getHttpResponseFactory())
.setSslContext(getSSLContext())
.register("/search", new BasicValidationHandler(GET.name(), null, null, getExpectedContent())).create();
localServer.start();
}
@Override
public void cleanupResources() throws Exception {
if (localServer != null) {
localServer.stop();
}
}
@Test
public void httpConnectionOk() {
Exchange exchange = template.request("direct:start", null);
assertExchange(exchange);
}
@Test
@Disabled
public void httpConnectionNotOk() throws Exception {
// stop server so there are no connection
// and wait for it to terminate
localServer.stop();
localServer.awaitTermination(TimeValue.ofSeconds(5));
Exchange exchange = template.request("direct:start", null);
assertTrue(exchange.isFailed());
ConnectException cause = assertIsInstanceOf(ConnectException.class, exchange.getException());
assertTrue(cause.getMessage().contains("failed"));
assertEquals(true, exchange.getIn().getHeader(Exchange.REDELIVERED));
assertEquals(4, exchange.getIn().getHeader(Exchange.REDELIVERY_COUNTER));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.onException(ConnectException.class)
.maximumRedeliveries(4)
.backOffMultiplier(2)
.redeliveryDelay(100)
.maximumRedeliveryDelay(5000)
.useExponentialBackOff()
.end()
.to("http://localhost:" + localServer.getLocalPort()
+ "/search");
}
};
}
}
| HttpNoConnectionRedeliveryTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java | {
"start": 8420,
"end": 11734
} | class ____ extends SubjectInheritingThread {
private static final int NUM_CREATE_THREADS = 10;
private static volatile int numFinishedThreads;
private static volatile int numRunningThreads;
private static FileStatus[] jhLogFiles;
FileSystem fs;
int start;
int end;
FileCreateDaemon(FileSystem fs, int start, int end) {
this.fs = fs;
this.start = start;
this.end = end;
}
public void work() {
try {
for(int i=start; i < end; i++) {
String name = getFileName(i);
Path controlFile = new Path(INPUT_DIR, "in_file_" + name);
SequenceFile.Writer writer = null;
try {
writer = SequenceFile.createWriter(fs, fs.getConf(), controlFile,
Text.class, LongWritable.class,
CompressionType.NONE);
String logFile = jhLogFiles[i].getPath().toString();
writer.append(new Text(logFile), new LongWritable(0));
} catch(Exception e) {
throw new IOException(e);
} finally {
if (writer != null)
writer.close();
writer = null;
}
}
} catch(IOException ex) {
LOG.error("FileCreateDaemon failed.", ex);
}
numFinishedThreads++;
}
private static void createControlFile(FileSystem fs, Path jhLogDir
) throws IOException {
fs.delete(INPUT_DIR, true);
jhLogFiles = fs.listStatus(jhLogDir);
numFinishedThreads = 0;
try {
int start = 0;
int step = jhLogFiles.length / NUM_CREATE_THREADS
+ ((jhLogFiles.length % NUM_CREATE_THREADS) > 0 ? 1 : 0);
FileCreateDaemon[] daemons = new FileCreateDaemon[NUM_CREATE_THREADS];
numRunningThreads = 0;
for(int tIdx=0; tIdx < NUM_CREATE_THREADS && start < jhLogFiles.length; tIdx++) {
int end = Math.min(start + step, jhLogFiles.length);
daemons[tIdx] = new FileCreateDaemon(fs, start, end);
start += step;
numRunningThreads++;
}
for(int tIdx=0; tIdx < numRunningThreads; tIdx++) {
daemons[tIdx].start();
}
} finally {
int prevValue = 0;
while(numFinishedThreads < numRunningThreads) {
if(prevValue < numFinishedThreads) {
LOG.info("Finished " + numFinishedThreads + " threads out of " + numRunningThreads);
prevValue = numFinishedThreads;
}
try {Thread.sleep(500);} catch (InterruptedException e) {}
}
}
}
}
private static void createControlFile(FileSystem fs, Path jhLogDir
) throws IOException {
LOG.info("creating control file: JH log dir = " + jhLogDir);
FileCreateDaemon.createControlFile(fs, jhLogDir);
LOG.info("created control file: JH log dir = " + jhLogDir);
}
private static String getFileName(int fIdx) {
return BASE_INPUT_FILE_NAME + Integer.toString(fIdx);
}
/**
* If keyVal is of the form KEY="VALUE", then this will return [KEY, VALUE]
*/
private static String [] getKeyValue(String t) throws IOException {
String[] keyVal = t.split("=\"*|\"");
return keyVal;
}
/**
* JobHistory log record.
*/
private static | FileCreateDaemon |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.