language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityTestPlugin.java | {
"start": 3818,
"end": 7490
} | class ____ implements ActionFilter {
@Override
public int order() {
return Integer.MIN_VALUE;
}
@Override
public <Request extends ActionRequest, Response extends ActionResponse> void apply(
Task task,
String action,
Request request,
ActionListener<Response> listener,
ActionFilterChain<Request, Response> chain
) {
if (action.equals(GetInferenceModelAction.INSTANCE.name())) {
assert request instanceof GetInferenceModelAction.Request;
handleGetInferenceModelActionRequest((GetInferenceModelAction.Request) request, listener);
} else if (action.equals(InferenceAction.INSTANCE.name())) {
assert request instanceof InferenceAction.Request;
handleInferenceActionRequest((InferenceAction.Request) request, listener);
} else {
// For any other action than get model and inference, execute normally
chain.proceed(task, action, request, listener);
}
}
@SuppressWarnings("unchecked")
private <Response extends ActionResponse> void handleGetInferenceModelActionRequest(
GetInferenceModelAction.Request request,
ActionListener<Response> listener
) {
String inferenceEntityId = request.getInferenceEntityId();
Integer topN = null;
Matcher extractTopN = Pattern.compile(".*(task-settings-top-\\d+).*").matcher(inferenceEntityId);
if (extractTopN.find()) {
topN = Integer.parseInt(extractTopN.group(1).replaceAll("\\D", ""));
}
ActionResponse response = new GetInferenceModelAction.Response(
List.of(
new ModelConfigurations(
request.getInferenceEntityId(),
request.getTaskType(),
CohereService.NAME,
new CohereRerankServiceSettings("uri", "model", null, CohereServiceSettings.CohereApiVersion.V2),
topN == null ? new EmptyTaskSettings() : new CohereRerankTaskSettings(topN, null, null)
)
)
);
listener.onResponse((Response) response);
}
@SuppressWarnings("unchecked")
private <Response extends ActionResponse> void handleInferenceActionRequest(
InferenceAction.Request request,
ActionListener<Response> listener
) {
Map<String, Object> taskSettings = request.getTaskSettings();
boolean shouldThrow = (boolean) taskSettings.getOrDefault("throwing", false);
Integer inferenceResultCount = (Integer) taskSettings.get("inferenceResultCount");
if (shouldThrow) {
listener.onFailure(new UnsupportedOperationException("simulated failure"));
} else {
List<RankedDocsResults.RankedDoc> rankedDocsResults = new ArrayList<>();
List<String> inputs = request.getInput();
int resultCount = inferenceResultCount == null ? inputs.size() : inferenceResultCount;
for (int i = 0; i < resultCount; i++) {
rankedDocsResults.add(new RankedDocsResults.RankedDoc(i, Float.parseFloat(inputs.get(i)), inputs.get(i)));
}
ActionResponse response = new InferenceAction.Response(new RankedDocsResults(rankedDocsResults));
listener.onResponse((Response) response);
}
}
}
public static | TestFilter |
java | apache__camel | components/camel-cxf/camel-cxf-soap/src/test/java/org/apache/camel/component/cxf/jaxws/CxfConsumerPayloadXPathTest.java | {
"start": 6489,
"end": 7434
} | class ____ implements Processor {
@Override
public void process(Exchange exchange) throws Exception {
Object obj = exchange.getIn().getBody();
@SuppressWarnings("unchecked")
CxfPayload<SoapHeader> payload = (CxfPayload<SoapHeader>) obj;
Element el = payload.getBody().get(0);
Text textnode = (Text) el.getFirstChild();
StringBuilder b = new StringBuilder();
b.append(textnode.getNodeValue());
textnode = (Text) textnode.getNextSibling();
while (textnode != null) {
//the textnode appears to have siblings!
b.append(textnode.getNodeValue());
textnode = (Text) textnode.getNextSibling();
}
exchange.getMessage().setBody(b.toString());
exchange.getMessage().setHeaders(exchange.getIn().getHeaders());
}
}
private | DomProcessor |
java | apache__logging-log4j2 | log4j-taglib/src/main/java/org/apache/logging/log4j/taglib/IfEnabledTag.java | {
"start": 1013,
"end": 1091
} | class ____ the {@code <log:ifEnabled>} tag.
*
* @since 2.0
*/
public | implements |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java | {
"start": 4220,
"end": 6997
} | class ____ implements Application {
final Dispatcher dispatcher;
final String user;
// flow context is set only if the timeline service v.2 is enabled
private FlowContext flowContext;
final ApplicationId appId;
final Credentials credentials;
Map<ApplicationAccessType, String> applicationACLs;
final ApplicationACLsManager aclsManager;
private final ReadLock readLock;
private final WriteLock writeLock;
private final Context context;
private static final Logger LOG =
LoggerFactory.getLogger(ApplicationImpl.class);
private LogAggregationContext logAggregationContext;
Map<ContainerId, Container> containers =
new ConcurrentHashMap<>();
/**
* The timestamp when the log aggregation has started for this application.
* Used to determine the age of application log files during log aggregation.
* When logAggregationRentention policy is enabled, log files older than
* the retention policy will not be uploaded but scheduled for deletion.
*/
private long applicationLogInitedTimestamp = -1;
private final NMStateStoreService appStateStore;
public ApplicationImpl(Dispatcher dispatcher, String user,
ApplicationId appId, Credentials credentials, Context context) {
this(dispatcher, user, null, appId, credentials, context, -1L);
}
public ApplicationImpl(Dispatcher dispatcher, String user,
FlowContext flowContext, ApplicationId appId, Credentials credentials,
Context context, long recoveredLogInitedTime) {
this.dispatcher = dispatcher;
this.user = user;
this.appId = appId;
this.credentials = credentials;
this.aclsManager = context.getApplicationACLsManager();
Configuration conf = context.getConf();
if (YarnConfiguration.timelineServiceV2Enabled(conf)) {
if (flowContext == null) {
throw new IllegalArgumentException("flow context cannot be null");
}
this.flowContext = flowContext;
if (YarnConfiguration.systemMetricsPublisherEnabled(conf)) {
context.getNMTimelinePublisher().createTimelineClient(appId);
}
}
this.context = context;
this.appStateStore = context.getNMStateStore();
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
readLock = lock.readLock();
writeLock = lock.writeLock();
stateMachine = stateMachineFactory.make(this);
setAppLogInitedTimestamp(recoveredLogInitedTime);
}
public ApplicationImpl(Dispatcher dispatcher, String user,
FlowContext flowContext, ApplicationId appId,
Credentials credentials, Context context) {
this(dispatcher, user, flowContext, appId, credentials,
context, -1);
}
/**
* Data object that encapsulates the flow context for the application purpose.
*/
public static | ApplicationImpl |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/stream/AsyncCalcTestPrograms.java | {
"start": 10993,
"end": 11224
} | class ____ extends AsyncScalarFunction {
public void eval(CompletableFuture<Long> future, Long l) {
future.complete(l + 1);
}
}
/** Concatenate inputs as strings. */
public static | AsyncJavaFunc0 |
java | google__dagger | hilt-core/main/java/dagger/hilt/InstallIn.java | {
"start": 1279,
"end": 1566
} | class ____ {
* {@literal @}Provides
* static Foo provideFoo() {
* return new Foo();
* }
* }
* </code></pre>
*
* @see <a href="https://dagger.dev/hilt/modules">Hilt Modules</a>
*/
@Retention(CLASS)
@Target({ElementType.TYPE})
@GeneratesRootInput
public @ | FooModule |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/HighAvailabilityTaskAssignorTest.java | {
"start": 6444,
"end": 89013
} | class ____ {
private AssignmentConfigs getConfigWithoutStandbys(final String rackAwareStrategy) {
return new AssignmentConfigs(
/*acceptableRecoveryLag*/ 100L,
/*maxWarmupReplicas*/ 2,
/*numStandbyReplicas*/ 0,
/*probingRebalanceIntervalMs*/ 60 * 1000L,
/*rackAwareAssignmentTags*/ EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
}
private AssignmentConfigs getConfigWithStandbys(final String rackAwareStrategy) {
return getConfigWithStandbys(1, rackAwareStrategy);
}
private AssignmentConfigs getConfigWithStandbys(final int replicaNum, final String rackAwareStrategy) {
return new AssignmentConfigs(
/*acceptableRecoveryLag*/ 100L,
/*maxWarmupReplicas*/ 2,
/*numStandbyReplicas*/ replicaNum,
/*probingRebalanceIntervalMs*/ 60 * 1000L,
/*rackAwareAssignmentTags*/ EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
}
private final Time time = new MockTime();
static Stream<Arguments> parameter() {
return Stream.of(
Arguments.of(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE, false, 1),
Arguments.of(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC, true, 1),
Arguments.of(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY, true, 4)
);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldBeStickyForActiveAndStandbyTasksWhileWarmingUp(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTaskIds = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_1_0, TASK_1_1, TASK_1_2, TASK_2_0, TASK_2_1, TASK_2_2);
final ClientState clientState1 = new ClientState(allTaskIds, emptySet(), allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> 0L)), EMPTY_CLIENT_TAGS, 1,
PID_1
);
final ClientState clientState2 = new ClientState(emptySet(), allTaskIds, allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> 10L)), EMPTY_CLIENT_TAGS, 1,
PID_2
);
final ClientState clientState3 = new ClientState(emptySet(), emptySet(), allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> Long.MAX_VALUE)), EMPTY_CLIENT_TAGS, 1,
PID_3
);
final Map<ProcessId, ClientState> clientStates = mkMap(
mkEntry(PID_1, clientState1),
mkEntry(PID_2, clientState2),
mkEntry(PID_3, clientState3)
);
final AssignmentConfigs configs = new AssignmentConfigs(
11L,
2,
1,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1, TASK_1_2)),
mkEntry(new Subtopology(2, null), Set.of(TASK_2_0, TASK_2_1, TASK_2_2))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean unstable = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTaskIds,
allTaskIds,
rackAwareTaskAssignor,
configs
);
assertThat(clientState1, hasAssignedTasks(allTaskIds.size()));
assertThat(clientState2, hasAssignedTasks(allTaskIds.size()));
assertThat(clientState3, hasAssignedTasks(2));
assertThat(unstable, is(true));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTaskIds, clientStates, true, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldSkipWarmupsWhenAcceptableLagIsMax(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTaskIds = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_1_0, TASK_1_1, TASK_1_2, TASK_2_0, TASK_2_1, TASK_2_2);
final ClientState clientState1 = new ClientState(allTaskIds, emptySet(), allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> 0L)), EMPTY_CLIENT_TAGS, 1,
PID_1
);
final ClientState clientState2 = new ClientState(emptySet(), emptySet(), allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> Long.MAX_VALUE)), EMPTY_CLIENT_TAGS, 1,
PID_2
);
final ClientState clientState3 = new ClientState(emptySet(), emptySet(), allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> Long.MAX_VALUE)), EMPTY_CLIENT_TAGS, 1,
PID_3
);
final Map<ProcessId, ClientState> clientStates = mkMap(
mkEntry(PID_1, clientState1),
mkEntry(PID_2, clientState2),
mkEntry(PID_3, clientState3)
);
final AssignmentConfigs configs = new AssignmentConfigs(
Long.MAX_VALUE,
1,
1,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1, TASK_1_2)),
mkEntry(new Subtopology(2, null), Set.of(TASK_2_0, TASK_2_1, TASK_2_2))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean unstable = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTaskIds,
allTaskIds,
rackAwareTaskAssignor,
configs
);
assertThat(clientState1, hasAssignedTasks(6));
assertThat(clientState2, hasAssignedTasks(6));
assertThat(clientState3, hasAssignedTasks(6));
assertThat(unstable, is(false));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTaskIds, clientStates, true, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAssignActiveStatefulTasksEvenlyOverClientsWhereNumberOfClientsIntegralDivisorOfNumberOfTasks(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor,
final int maxSkew) {
final Set<TaskId> allTaskIds = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_1_0, TASK_1_1, TASK_1_2, TASK_2_0, TASK_2_1, TASK_2_2);
final Map<TaskId, Long> lags = allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> 10L));
final ClientState clientState1 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 1,
PID_1
);
final ClientState clientState2 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 1,
PID_2
);
final ClientState clientState3 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 1,
PID_3
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(clientState1, clientState2, clientState3);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1, TASK_1_2)),
mkEntry(new Subtopology(2, null), Set.of(TASK_2_0, TASK_2_1, TASK_2_2))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean unstable = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTaskIds,
allTaskIds,
rackAwareTaskAssignor,
configs
);
assertThat(unstable, is(false));
assertValidAssignment(0, allTaskIds, emptySet(), clientStates, new StringBuilder());
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertBalancedStatefulAssignment(allTaskIds, clientStates, new StringBuilder());
if (!rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC)) {
// Subtopology is not balanced with min_traffic rack aware assignment
assertBalancedTasks(clientStates, maxSkew);
}
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTaskIds, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAssignActiveStatefulTasksEvenlyOverClientsWhereNumberOfThreadsIntegralDivisorOfNumberOfTasks(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor,
final int maxSkew) {
final Set<TaskId> allTaskIds = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_1_0, TASK_1_1, TASK_1_2, TASK_2_0, TASK_2_1, TASK_2_2);
final Map<TaskId, Long> lags = allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> 10L));
final ClientState clientState1 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 3,
PID_1
);
final ClientState clientState2 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 3,
PID_2
);
final ClientState clientState3 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 3,
PID_3
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(clientState1, clientState2, clientState3);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1, TASK_1_2)),
mkEntry(new Subtopology(2, null), Set.of(TASK_2_0, TASK_2_1, TASK_2_2))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean unstable = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTaskIds,
allTaskIds,
rackAwareTaskAssignor,
configs
);
assertThat(unstable, is(false));
assertValidAssignment(0, allTaskIds, emptySet(), clientStates, new StringBuilder());
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertBalancedStatefulAssignment(allTaskIds, clientStates, new StringBuilder());
if (!rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC)) {
// Subtopology is not balanced with min_traffic rack aware assignment
assertBalancedTasks(clientStates, maxSkew);
}
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTaskIds, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAssignActiveStatefulTasksEvenlyOverClientsWhereNumberOfClientsNotIntegralDivisorOfNumberOfTasks(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor,
final int maxSkew) {
final Set<TaskId> allTaskIds = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_1_0, TASK_1_1, TASK_1_2, TASK_2_0, TASK_2_1, TASK_2_2);
final Map<TaskId, Long> lags = allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> 10L));
final ClientState clientState1 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 1,
PID_1
);
final ClientState clientState2 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 1,
PID_2
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(clientState1, clientState2);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1, TASK_1_2)),
mkEntry(new Subtopology(2, null), Set.of(TASK_2_0, TASK_2_1, TASK_2_2))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean unstable = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTaskIds,
allTaskIds,
rackAwareTaskAssignor,
configs
);
assertThat(unstable, is(false));
assertValidAssignment(0, allTaskIds, emptySet(), clientStates, new StringBuilder());
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertBalancedStatefulAssignment(allTaskIds, clientStates, new StringBuilder());
assertBalancedTasks(clientStates, maxSkew);
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTaskIds, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAssignActiveStatefulTasksEvenlyOverUnevenlyDistributedStreamThreads(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTaskIds = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_1_0, TASK_1_1, TASK_1_2);
final Map<TaskId, Long> lags = allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> 10L));
final ClientState clientState1 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 1,
PID_1
);
final ClientState clientState2 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 2,
PID_2
);
final ClientState clientState3 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 3,
PID_3
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(clientState1, clientState2, clientState3);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1, TASK_1_2))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean unstable = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTaskIds,
allTaskIds,
rackAwareTaskAssignor,
configs
);
assertThat(unstable, is(false));
assertValidAssignment(0, allTaskIds, emptySet(), clientStates, new StringBuilder());
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertBalancedStatefulAssignment(allTaskIds, clientStates, new StringBuilder());
assertThat(clientState1, hasActiveTasks(1));
assertThat(clientState2, hasActiveTasks(2));
assertThat(clientState3, hasActiveTasks(3));
final AssignmentTestUtils.TaskSkewReport taskSkewReport = analyzeTaskAssignmentBalance(clientStates, 1);
if (taskSkewReport.totalSkewedTasks() == 0) {
fail("Expected a skewed task assignment, but was: " + taskSkewReport);
}
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTaskIds, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAssignActiveStatefulTasksEvenlyOverClientsWithMoreClientsThanTasks(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor,
final int maxSkew) {
final Set<TaskId> allTaskIds = Set.of(TASK_0_0, TASK_0_1);
final Map<TaskId, Long> lags = allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> 10L));
final ClientState clientState1 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 1,
PID_1
);
final ClientState clientState2 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 1,
PID_2
);
final ClientState clientState3 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 1,
PID_3
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(clientState1, clientState2, clientState3);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean unstable = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTaskIds,
allTaskIds,
rackAwareTaskAssignor,
configs
);
assertThat(unstable, is(false));
assertValidAssignment(0, allTaskIds, emptySet(), clientStates, new StringBuilder());
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertBalancedStatefulAssignment(allTaskIds, clientStates, new StringBuilder());
assertBalancedTasks(clientStates, maxSkew);
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTaskIds, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAssignActiveStatefulTasksEvenlyOverClientsAndStreamThreadsWithEqualStreamThreadsPerClientAsTasks(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor,
final int maxSkew) {
final Set<TaskId> allTaskIds = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_1_0, TASK_1_1, TASK_1_2, TASK_2_0, TASK_2_1, TASK_2_2);
final Map<TaskId, Long> lags = allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> 10L));
final ClientState clientState1 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 9,
PID_1
);
final ClientState clientState2 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 9,
PID_2
);
final ClientState clientState3 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 9,
PID_3
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(clientState1, clientState2, clientState3);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1, TASK_1_2)),
mkEntry(new Subtopology(2, null), Set.of(TASK_2_0, TASK_2_1, TASK_2_2))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean unstable = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTaskIds,
allTaskIds,
rackAwareTaskAssignor,
configs
);
assertThat(unstable, is(false));
assertValidAssignment(0, allTaskIds, emptySet(), clientStates, new StringBuilder());
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertBalancedStatefulAssignment(allTaskIds, clientStates, new StringBuilder());
if (!rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC)) {
// Subtopology is not balanced with min_traffic rack aware assignment
assertBalancedTasks(clientStates, maxSkew);
}
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTaskIds, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAssignWarmUpTasksIfStatefulActiveTasksBalancedOverStreamThreadsButNotOverClients(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTaskIds = Set.of(TASK_0_0, TASK_0_1, TASK_1_0, TASK_1_1);
final Map<TaskId, Long> lagsForCaughtUpClient = allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> 0L));
final Map<TaskId, Long> lagsForNotCaughtUpClient =
allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> Long.MAX_VALUE));
final ClientState caughtUpClientState = new ClientState(allTaskIds, emptySet(), lagsForCaughtUpClient, EMPTY_CLIENT_TAGS, 5,
PID_1
);
final ClientState notCaughtUpClientState1 = new ClientState(emptySet(), emptySet(), lagsForNotCaughtUpClient, EMPTY_CLIENT_TAGS, 5,
PID_2
);
final ClientState notCaughtUpClientState2 = new ClientState(emptySet(), emptySet(), lagsForNotCaughtUpClient, EMPTY_CLIENT_TAGS, 5,
PID_3
);
final Map<ProcessId, ClientState> clientStates =
getClientStatesMap(caughtUpClientState, notCaughtUpClientState1, notCaughtUpClientState2);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
allTaskIds.size() / 3 + 1,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean unstable = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTaskIds,
allTaskIds,
rackAwareTaskAssignor,
configs
);
assertThat(unstable, is(true));
assertThat(notCaughtUpClientState1.standbyTaskCount(), greaterThanOrEqualTo(allTaskIds.size() / 3));
assertThat(notCaughtUpClientState2.standbyTaskCount(), greaterThanOrEqualTo(allTaskIds.size() / 3));
assertValidAssignment(0, allTaskIds.size() / 3 + 1, allTaskIds, emptySet(), clientStates, new StringBuilder());
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTaskIds, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldEvenlyAssignActiveStatefulTasksIfClientsAreWarmedUpToBalanceTaskOverClients(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor,
final int maxSkew) {
final Set<TaskId> allTaskIds = Set.of(TASK_0_0, TASK_0_1, TASK_1_0, TASK_1_1);
// If RackAwareTaskAssignor is enabled, TASK_1_1 is assigned ProcessId_2
final TaskId warmupTaskId1 = enableRackAwareTaskAssignor ? TASK_1_1 : TASK_0_1;
// If RackAwareTaskAssignor is enabled, TASK_0_1 is assigned ProcessId_3
final TaskId warmupTaskId2 = enableRackAwareTaskAssignor ? TASK_0_1 : TASK_1_0;
final Set<TaskId> warmedUpTaskIds1 = Set.of(warmupTaskId1);
final Set<TaskId> warmedUpTaskIds2 = Set.of(warmupTaskId2);
final Map<TaskId, Long> lagsForCaughtUpClient = allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> 0L));
final Map<TaskId, Long> lagsForWarmedUpClient1 =
allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> Long.MAX_VALUE));
lagsForWarmedUpClient1.put(warmupTaskId1, 0L);
final Map<TaskId, Long> lagsForWarmedUpClient2 =
allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> Long.MAX_VALUE));
lagsForWarmedUpClient2.put(warmupTaskId2, 0L);
final ClientState caughtUpClientState = new ClientState(allTaskIds, emptySet(), lagsForCaughtUpClient, EMPTY_CLIENT_TAGS, 5,
PID_1
);
final ClientState warmedUpClientState1 = new ClientState(emptySet(), warmedUpTaskIds1, lagsForWarmedUpClient1, EMPTY_CLIENT_TAGS, 5,
PID_2
);
final ClientState warmedUpClientState2 = new ClientState(emptySet(), warmedUpTaskIds2, lagsForWarmedUpClient2, EMPTY_CLIENT_TAGS, 5,
PID_3
);
final Map<ProcessId, ClientState> clientStates =
getClientStatesMap(caughtUpClientState, warmedUpClientState1, warmedUpClientState2);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
allTaskIds.size() / 3 + 1,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean unstable = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTaskIds,
allTaskIds,
rackAwareTaskAssignor,
new AssignmentConfigs(0L, allTaskIds.size() / 3 + 1, 0, 60_000L, EMPTY_RACK_AWARE_ASSIGNMENT_TAGS)
);
assertThat(unstable, is(false));
assertValidAssignment(0, allTaskIds, emptySet(), clientStates, new StringBuilder());
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertBalancedStatefulAssignment(allTaskIds, clientStates, new StringBuilder());
assertBalancedTasks(clientStates, maxSkew);
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTaskIds, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAssignActiveStatefulTasksEvenlyOverStreamThreadsButBestEffortOverClients(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTaskIds = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_1_0, TASK_1_1, TASK_1_2, TASK_2_0, TASK_2_1, TASK_2_2);
final Map<TaskId, Long> lags = allTaskIds.stream().collect(Collectors.toMap(k -> k, k -> 10L));
final ClientState clientState1 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 6,
PID_1
);
final ClientState clientState2 = new ClientState(emptySet(), emptySet(), lags, EMPTY_CLIENT_TAGS, 3,
PID_2
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(clientState1, clientState2);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1, TASK_1_2)),
mkEntry(new Subtopology(2, null), Set.of(TASK_2_0, TASK_2_1, TASK_2_2))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean unstable = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTaskIds,
allTaskIds,
rackAwareTaskAssignor,
configs
);
assertThat(unstable, is(false));
assertValidAssignment(0, allTaskIds, emptySet(), clientStates, new StringBuilder());
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertBalancedStatefulAssignment(allTaskIds, clientStates, new StringBuilder());
assertThat(clientState1, hasActiveTasks(6));
assertThat(clientState2, hasActiveTasks(3));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTaskIds, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldComputeNewAssignmentIfThereAreUnassignedActiveTasks(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor,
final int maxSkew) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1);
final ClientState client1 = new ClientState(singleton(TASK_0_0), emptySet(), singletonMap(TASK_0_0, 0L), EMPTY_CLIENT_TAGS, 1,
PID_1
);
final Map<ProcessId, ClientState> clientStates = singletonMap(PID_1, client1);
final AssignmentConfigs configs = getConfigWithoutStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded = new HighAvailabilityTaskAssignor().assign(clientStates,
allTasks,
singleton(TASK_0_0),
rackAwareTaskAssignor,
configs);
assertThat(probingRebalanceNeeded, is(false));
assertThat(client1, hasActiveTasks(2));
assertThat(client1, hasStandbyTasks(0));
assertValidAssignment(0, allTasks, emptySet(), clientStates, new StringBuilder());
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertBalancedStatefulAssignment(allTasks, clientStates, new StringBuilder());
assertBalancedTasks(clientStates, maxSkew);
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldComputeNewAssignmentIfThereAreUnassignedStandbyTasks(final String rackAwareStrategy, final boolean enableRackAwareTaskAssignor, final int maxSkew) {
final Set<TaskId> allTasks = Set.of(TASK_0_0);
final Set<TaskId> statefulTasks = Set.of(TASK_0_0);
final ClientState client1 = new ClientState(singleton(TASK_0_0), emptySet(), singletonMap(TASK_0_0, 0L), EMPTY_CLIENT_TAGS, 1,
PID_1
);
final ClientState client2 = new ClientState(emptySet(), emptySet(), singletonMap(TASK_0_0, 0L), EMPTY_CLIENT_TAGS, 1,
PID_2
);
final Map<ProcessId, ClientState> clientStates = mkMap(mkEntry(PID_1, client1), mkEntry(PID_2, client2));
final AssignmentConfigs configs = getConfigWithStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded = new HighAvailabilityTaskAssignor().assign(clientStates,
allTasks,
statefulTasks,
rackAwareTaskAssignor,
configs);
assertThat(clientStates.get(PID_2).standbyTasks(), not(empty()));
assertThat(probingRebalanceNeeded, is(false));
assertValidAssignment(1, allTasks, emptySet(), clientStates, new StringBuilder());
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertBalancedStatefulAssignment(allTasks, clientStates, new StringBuilder());
assertBalancedTasks(clientStates, maxSkew);
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, true, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldComputeNewAssignmentIfActiveTasksWasNotOnCaughtUpClient(final String rackAwareStrategy, final boolean enableRackAwareTaskAssignor, final int maxSkew) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1);
final Set<TaskId> statefulTasks = Set.of(TASK_0_0);
final ClientState client1 = new ClientState(singleton(TASK_0_0), emptySet(), singletonMap(TASK_0_0, 500L), EMPTY_CLIENT_TAGS, 1,
PID_1
);
final ClientState client2 = new ClientState(singleton(TASK_0_1), emptySet(), singletonMap(TASK_0_0, 0L), EMPTY_CLIENT_TAGS, 1,
PID_2
);
final Map<ProcessId, ClientState> clientStates = mkMap(
mkEntry(PID_1, client1),
mkEntry(PID_2, client2)
);
final AssignmentConfigs configs = getConfigWithoutStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded =
new HighAvailabilityTaskAssignor().assign(clientStates, allTasks, statefulTasks, rackAwareTaskAssignor, configs);
assertThat(clientStates.get(PID_1).activeTasks(), is(singleton(TASK_0_1)));
assertThat(clientStates.get(PID_2).activeTasks(), is(singleton(TASK_0_0)));
// we'll warm up task 0_0 on client1 because it's first in sorted order,
// although this isn't an optimal convergence
assertThat(probingRebalanceNeeded, is(true));
assertValidAssignment(0, 1, allTasks, emptySet(), clientStates, new StringBuilder());
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertBalancedStatefulAssignment(allTasks, clientStates, new StringBuilder());
assertBalancedTasks(clientStates, maxSkew);
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAssignToMostCaughtUpIfActiveTasksWasNotOnCaughtUpClient(final String rackAwareStrategy, final boolean enableRackAwareTaskAssignor, final int maxSkew) {
final Set<TaskId> allTasks = Set.of(TASK_0_0);
final Set<TaskId> statefulTasks = Set.of(TASK_0_0);
final ClientState client1 = new ClientState(emptySet(), emptySet(), singletonMap(TASK_0_0, Long.MAX_VALUE), EMPTY_CLIENT_TAGS, 1,
PID_1
);
final ClientState client2 = new ClientState(emptySet(), emptySet(), singletonMap(TASK_0_0, 1000L), EMPTY_CLIENT_TAGS, 1,
PID_2
);
final ClientState client3 = new ClientState(emptySet(), emptySet(), singletonMap(TASK_0_0, 500L), EMPTY_CLIENT_TAGS, 1,
PID_3
);
final Map<ProcessId, ClientState> clientStates = mkMap(
mkEntry(PID_1, client1),
mkEntry(PID_2, client2),
mkEntry(PID_3, client3)
);
final AssignmentConfigs configs = getConfigWithStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded =
new HighAvailabilityTaskAssignor().assign(clientStates, allTasks, statefulTasks, rackAwareTaskAssignor, configs);
assertThat(clientStates.get(PID_1).activeTasks(), is(emptySet()));
assertThat(clientStates.get(PID_2).activeTasks(), is(emptySet()));
assertThat(clientStates.get(PID_3).activeTasks(), is(singleton(TASK_0_0)));
assertThat(clientStates.get(PID_1).standbyTasks(), is(singleton(TASK_0_0))); // warm up
assertThat(clientStates.get(PID_2).standbyTasks(), is(singleton(TASK_0_0))); // standby
assertThat(clientStates.get(PID_3).standbyTasks(), is(emptySet()));
assertThat(probingRebalanceNeeded, is(true));
assertValidAssignment(1, 1, allTasks, emptySet(), clientStates, new StringBuilder());
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertBalancedStatefulAssignment(allTasks, clientStates, new StringBuilder());
assertBalancedTasks(clientStates, maxSkew);
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, true, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAssignStandbysForStatefulTasks(final String rackAwareStrategy, final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1);
final Set<TaskId> statefulTasks = Set.of(TASK_0_0, TASK_0_1);
final ClientState client1 = getMockClientWithPreviousCaughtUpTasks(Set.of(TASK_0_0), statefulTasks,
PID_1
);
final ClientState client2 = getMockClientWithPreviousCaughtUpTasks(Set.of(TASK_0_1), statefulTasks,
PID_2
);
final AssignmentConfigs configs = getConfigWithStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2);
final boolean probingRebalanceNeeded =
new HighAvailabilityTaskAssignor().assign(clientStates, allTasks, statefulTasks, rackAwareTaskAssignor, configs);
assertThat(client1.activeTasks(), equalTo(Set.of(TASK_0_0)));
assertThat(client2.activeTasks(), equalTo(Set.of(TASK_0_1)));
assertThat(client1.standbyTasks(), equalTo(Set.of(TASK_0_1)));
assertThat(client2.standbyTasks(), equalTo(Set.of(TASK_0_0)));
assertThat(probingRebalanceNeeded, is(false));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, true, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldNotAssignStandbysForStatelessTasks(final String rackAwareStrategy, final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1);
final Set<TaskId> statefulTasks = EMPTY_TASKS;
final ClientState client1 = getMockClientWithPreviousCaughtUpTasks(EMPTY_TASKS, statefulTasks,
PID_1
);
final ClientState client2 = getMockClientWithPreviousCaughtUpTasks(EMPTY_TASKS, statefulTasks,
PID_2
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2);
final AssignmentConfigs configs = getConfigWithStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded =
new HighAvailabilityTaskAssignor().assign(clientStates, allTasks, statefulTasks, rackAwareTaskAssignor, configs);
assertThat(client1.activeTaskCount(), equalTo(1));
assertThat(client2.activeTaskCount(), equalTo(1));
assertHasNoStandbyTasks(client1, client2);
assertThat(probingRebalanceNeeded, is(false));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, true, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAssignWarmupReplicasEvenIfNoStandbyReplicasConfigured(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1);
final Set<TaskId> statefulTasks = Set.of(TASK_0_0, TASK_0_1);
final ClientState client1 = getMockClientWithPreviousCaughtUpTasks(Set.of(TASK_0_0, TASK_0_1), statefulTasks,
PID_1
);
final ClientState client2 = getMockClientWithPreviousCaughtUpTasks(EMPTY_TASKS, statefulTasks,
PID_2
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2);
final AssignmentConfigs configs = getConfigWithoutStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded =
new HighAvailabilityTaskAssignor().assign(clientStates, allTasks, statefulTasks, rackAwareTaskAssignor, configs);
assertThat(client1.activeTasks(), equalTo(Set.of(TASK_0_0, TASK_0_1)));
assertThat(client2.standbyTaskCount(), equalTo(1));
assertHasNoStandbyTasks(client1);
assertHasNoActiveTasks(client2);
assertThat(probingRebalanceNeeded, is(true));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldNotAssignMoreThanMaxWarmupReplicas(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3);
final Set<TaskId> statefulTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3);
final ClientState client1 = getMockClientWithPreviousCaughtUpTasks(Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3), statefulTasks,
PID_1
);
final ClientState client2 = getMockClientWithPreviousCaughtUpTasks(EMPTY_TASKS, statefulTasks,
PID_2
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2);
final AssignmentConfigs configs = new AssignmentConfigs(
/*acceptableRecoveryLag*/ 100L,
/*maxWarmupReplicas*/ 1,
/*numStandbyReplicas*/ 0,
/*probingRebalanceIntervalMs*/ 60 * 1000L,
/*rackAwareAssignmentTags*/ EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTasks,
statefulTasks,
rackAwareTaskAssignor,
configs
);
assertThat(client1.activeTasks(), equalTo(Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3)));
assertThat(client2.standbyTaskCount(), equalTo(1));
assertHasNoStandbyTasks(client1);
assertHasNoActiveTasks(client2);
assertThat(probingRebalanceNeeded, is(true));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldNotAssignWarmupAndStandbyToTheSameClient(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3);
final Set<TaskId> statefulTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3);
final ClientState client1 = getMockClientWithPreviousCaughtUpTasks(Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3), statefulTasks,
PID_1
);
final ClientState client2 = getMockClientWithPreviousCaughtUpTasks(EMPTY_TASKS, statefulTasks,
PID_2
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2);
final AssignmentConfigs configs = new AssignmentConfigs(
/*acceptableRecoveryLag*/ 100L,
/*maxWarmupReplicas*/ 1,
/*numStandbyReplicas*/ 1,
/*probingRebalanceIntervalMs*/ 60 * 1000L,
/*rackAwareAssignmentTags*/ EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTasks,
statefulTasks,
rackAwareTaskAssignor,
configs
);
assertThat(client1.activeTasks(), equalTo(Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3)));
assertThat(client2.standbyTasks(), equalTo(Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3)));
assertHasNoStandbyTasks(client1);
assertHasNoActiveTasks(client2);
assertThat(probingRebalanceNeeded, is(true));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, true, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldNotAssignAnyStandbysWithInsufficientCapacity(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1);
final Set<TaskId> statefulTasks = Set.of(TASK_0_0, TASK_0_1);
final ClientState client1 = getMockClientWithPreviousCaughtUpTasks(Set.of(TASK_0_0, TASK_0_1), statefulTasks,
PID_1
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1);
final AssignmentConfigs configs = getConfigWithStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded =
new HighAvailabilityTaskAssignor().assign(clientStates, allTasks, statefulTasks, rackAwareTaskAssignor, configs);
assertThat(client1.activeTasks(), equalTo(Set.of(TASK_0_0, TASK_0_1)));
assertHasNoStandbyTasks(client1);
assertThat(probingRebalanceNeeded, is(false));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, true, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAssignActiveTasksToNotCaughtUpClientIfNoneExist(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1);
final Set<TaskId> statefulTasks = Set.of(TASK_0_0, TASK_0_1);
final ClientState client1 = getMockClientWithPreviousCaughtUpTasks(EMPTY_TASKS, statefulTasks,
PID_1
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1);
final AssignmentConfigs configs = getConfigWithStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded =
new HighAvailabilityTaskAssignor().assign(clientStates, allTasks, statefulTasks, rackAwareTaskAssignor, configs);
assertThat(client1.activeTasks(), equalTo(Set.of(TASK_0_0, TASK_0_1)));
assertHasNoStandbyTasks(client1);
assertThat(probingRebalanceNeeded, is(false));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, true, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldNotAssignMoreThanMaxWarmupReplicasWithStandbys(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3);
final Set<TaskId> statefulTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3);
final ClientState client1 = getMockClientWithPreviousCaughtUpTasks(statefulTasks, statefulTasks,
PID_1
);
final ClientState client2 = getMockClientWithPreviousCaughtUpTasks(EMPTY_TASKS, statefulTasks,
PID_2
);
final ClientState client3 = getMockClientWithPreviousCaughtUpTasks(EMPTY_TASKS, statefulTasks,
PID_3
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2, client3);
final AssignmentConfigs configs = getConfigWithStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded =
new HighAvailabilityTaskAssignor().assign(clientStates, allTasks, statefulTasks, rackAwareTaskAssignor, configs);
assertValidAssignment(
1,
2,
statefulTasks,
emptySet(),
clientStates,
new StringBuilder()
);
assertThat(probingRebalanceNeeded, is(true));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, true, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldDistributeStatelessTasksToBalanceTotalTaskLoad(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_1_0, TASK_1_1, TASK_1_2);
final Set<TaskId> statefulTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3);
final Set<TaskId> statelessTasks = Set.of(TASK_1_0, TASK_1_1, TASK_1_2);
final ClientState client1 = getMockClientWithPreviousCaughtUpTasks(statefulTasks, statefulTasks,
PID_1
);
final ClientState client2 = getMockClientWithPreviousCaughtUpTasks(EMPTY_TASKS, statefulTasks,
PID_2
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2);
final AssignmentConfigs configs = getConfigWithStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1, TASK_1_2))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded =
new HighAvailabilityTaskAssignor().assign(clientStates, allTasks, statefulTasks, rackAwareTaskAssignor, configs);
assertValidAssignment(
1,
2,
statefulTasks,
statelessTasks,
clientStates,
new StringBuilder()
);
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertBalancedStatefulAssignment(statefulTasks, clientStates, new StringBuilder());
// since only client1 is caught up on the stateful tasks, we expect it to get _all_ the active tasks,
// which means that client2 should have gotten all of the stateless tasks, so the tasks should be skewed
final AssignmentTestUtils.TaskSkewReport taskSkewReport = analyzeTaskAssignmentBalance(clientStates, 1);
assertThat(taskSkewReport.toString(), taskSkewReport.skewedSubtopologies(), not(empty()));
assertThat(probingRebalanceNeeded, is(true));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, true, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldDistributeStatefulActiveTasksToAllClients(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks =
Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_1_0, TASK_1_1, TASK_1_2, TASK_1_3, TASK_2_0); // 9 total
final Map<TaskId, Long> allTaskLags = allTasks.stream().collect(Collectors.toMap(t -> t, t -> 0L));
final Set<TaskId> statefulTasks = new HashSet<>(allTasks);
final ClientState client1 = new ClientState(emptySet(), emptySet(), allTaskLags, EMPTY_CLIENT_TAGS, 100);
final ClientState client2 = new ClientState(emptySet(), emptySet(), allTaskLags, EMPTY_CLIENT_TAGS, 50);
final ClientState client3 = new ClientState(emptySet(), emptySet(), allTaskLags, EMPTY_CLIENT_TAGS, 1);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2, client3);
final AssignmentConfigs configs = getConfigWithoutStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1, TASK_1_2, TASK_1_3)),
mkEntry(new Subtopology(2, null), Set.of(TASK_2_0))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded =
new HighAvailabilityTaskAssignor().assign(clientStates, allTasks, statefulTasks, rackAwareTaskAssignor, configs);
assertThat(client1.activeTasks(), not(empty()));
assertThat(client2.activeTasks(), not(empty()));
assertThat(client3.activeTasks(), not(empty()));
assertThat(probingRebalanceNeeded, is(false));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldReturnFalseIfPreviousAssignmentIsReused(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3);
final Set<TaskId> statefulTasks = new HashSet<>(allTasks);
final Set<TaskId> caughtUpTasks1 = enableRackAwareTaskAssignor ? Set.of(TASK_0_0, TASK_0_3) : Set.of(TASK_0_0, TASK_0_2);
final Set<TaskId> caughtUpTasks2 = enableRackAwareTaskAssignor ? Set.of(TASK_0_1, TASK_0_2) : Set.of(TASK_0_1, TASK_0_3);
final ClientState client1 = getMockClientWithPreviousCaughtUpTasks(caughtUpTasks1, statefulTasks,
PID_1
);
final ClientState client2 = getMockClientWithPreviousCaughtUpTasks(caughtUpTasks2, statefulTasks,
PID_2
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2);
final AssignmentConfigs configs = getConfigWithoutStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded =
new HighAvailabilityTaskAssignor().assign(clientStates, allTasks, statefulTasks, rackAwareTaskAssignor, configs);
assertThat(probingRebalanceNeeded, is(false));
assertThat(client1.activeTasks(), equalTo(client1.prevActiveTasks()));
assertThat(client2.activeTasks(), equalTo(client2.prevActiveTasks()));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldReturnFalseIfNoWarmupTasksAreAssigned(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3);
final Set<TaskId> statefulTasks = EMPTY_TASKS;
final ClientState client1 = getMockClientWithPreviousCaughtUpTasks(EMPTY_TASKS, statefulTasks,
PID_1
);
final ClientState client2 = getMockClientWithPreviousCaughtUpTasks(EMPTY_TASKS, statefulTasks,
PID_2
);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2);
final AssignmentConfigs configs = getConfigWithoutStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded =
new HighAvailabilityTaskAssignor().assign(clientStates, allTasks, statefulTasks, rackAwareTaskAssignor, configs);
assertThat(probingRebalanceNeeded, is(false));
assertHasNoStandbyTasks(client1, client2);
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldReturnTrueIfWarmupTasksAreAssigned(final String rackAwareStrategy, final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1);
final Set<TaskId> statefulTasks = Set.of(TASK_0_0, TASK_0_1);
final ClientState client1 = getMockClientWithPreviousCaughtUpTasks(allTasks, statefulTasks,
PID_1
);
final ClientState client2 = getMockClientWithPreviousCaughtUpTasks(EMPTY_TASKS, statefulTasks,
PID_2
);
final AssignmentConfigs configs = getConfigWithoutStandbys(rackAwareStrategy);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2);
final boolean probingRebalanceNeeded =
new HighAvailabilityTaskAssignor().assign(clientStates, allTasks, statefulTasks, rackAwareTaskAssignor, configs);
assertThat(probingRebalanceNeeded, is(true));
assertThat(client2.standbyTaskCount(), equalTo(1));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldDistributeStatelessTasksEvenlyOverClientsWithEqualStreamThreadsPerClientAsTasksAndNoStatefulTasks(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_1_0, TASK_1_1, TASK_1_2);
final Set<TaskId> statefulTasks = EMPTY_TASKS;
final Set<TaskId> statelessTasks = new HashSet<>(allTasks);
final Map<TaskId, Long> taskLags = new HashMap<>();
final ClientState client1 = new ClientState(emptySet(), emptySet(), taskLags, EMPTY_CLIENT_TAGS, 7);
final ClientState client2 = new ClientState(emptySet(), emptySet(), taskLags, EMPTY_CLIENT_TAGS, 7);
final ClientState client3 = new ClientState(emptySet(), emptySet(), taskLags, EMPTY_CLIENT_TAGS, 7);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2, client3);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1, TASK_1_2))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTasks,
statefulTasks,
rackAwareTaskAssignor,
configs
);
assertValidAssignment(
0,
EMPTY_TASKS,
statelessTasks,
clientStates,
new StringBuilder()
);
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertThat(probingRebalanceNeeded, is(false));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldDistributeStatelessTasksEvenlyOverClientsWithLessStreamThreadsPerClientAsTasksAndNoStatefulTasks(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_1_0, TASK_1_1, TASK_1_2);
final Set<TaskId> statefulTasks = EMPTY_TASKS;
final Set<TaskId> statelessTasks = new HashSet<>(allTasks);
final Map<TaskId, Long> taskLags = new HashMap<>();
final ClientState client1 = new ClientState(emptySet(), emptySet(), taskLags, EMPTY_CLIENT_TAGS, 2);
final ClientState client2 = new ClientState(emptySet(), emptySet(), taskLags, EMPTY_CLIENT_TAGS, 2);
final ClientState client3 = new ClientState(emptySet(), emptySet(), taskLags, EMPTY_CLIENT_TAGS, 2);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2, client3);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1, TASK_1_2))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTasks,
statefulTasks,
rackAwareTaskAssignor,
configs
);
assertValidAssignment(
0,
EMPTY_TASKS,
statelessTasks,
clientStates,
new StringBuilder()
);
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertThat(probingRebalanceNeeded, is(false));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldDistributeStatelessTasksEvenlyOverClientsWithUnevenlyDistributedStreamThreadsAndNoStatefulTasks(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_1_0, TASK_1_1, TASK_1_2);
final Set<TaskId> statefulTasks = EMPTY_TASKS;
final Set<TaskId> statelessTasks = new HashSet<>(allTasks);
final Map<TaskId, Long> taskLags = new HashMap<>();
final ClientState client1 = new ClientState(emptySet(), emptySet(), taskLags, EMPTY_CLIENT_TAGS, 1);
final ClientState client2 = new ClientState(emptySet(), emptySet(), taskLags, EMPTY_CLIENT_TAGS, 2);
final ClientState client3 = new ClientState(emptySet(), emptySet(), taskLags, EMPTY_CLIENT_TAGS, 3);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2, client3);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1, TASK_1_2))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTasks,
statefulTasks,
rackAwareTaskAssignor,
configs
);
assertValidAssignment(
0,
EMPTY_TASKS,
statelessTasks,
clientStates,
new StringBuilder()
);
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertThat(probingRebalanceNeeded, is(false));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldDistributeStatelessTasksEvenlyWithPreviousAssignmentAndNoStatefulTasks(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor) {
final Set<TaskId> allTasks = Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3, TASK_1_0, TASK_1_1, TASK_1_2);
final Set<TaskId> statefulTasks = EMPTY_TASKS;
final Set<TaskId> statelessTasks = new HashSet<>(allTasks);
final Map<TaskId, Long> taskLags = new HashMap<>();
final ClientState client1 = new ClientState(statelessTasks, emptySet(), taskLags, EMPTY_CLIENT_TAGS, 3);
final ClientState client2 = new ClientState(emptySet(), emptySet(), taskLags, EMPTY_CLIENT_TAGS, 3);
final ClientState client3 = new ClientState(emptySet(), emptySet(), taskLags, EMPTY_CLIENT_TAGS, 3);
final Map<ProcessId, ClientState> clientStates = getClientStatesMap(client1, client2, client3);
final AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
0,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
null,
null,
rackAwareStrategy
);
final Map<Subtopology, Set<TaskId>> tasksForTopicGroup = mkMap(
mkEntry(new Subtopology(0, null), Set.of(TASK_0_0, TASK_0_1, TASK_0_2, TASK_0_3)),
mkEntry(new Subtopology(1, null), Set.of(TASK_1_0, TASK_1_1, TASK_1_2))
);
final RackAwareTaskAssignor rackAwareTaskAssignor = getRackAwareTaskAssignor(configs, tasksForTopicGroup);
final boolean probingRebalanceNeeded = new HighAvailabilityTaskAssignor().assign(
clientStates,
allTasks,
statefulTasks,
rackAwareTaskAssignor,
configs
);
assertValidAssignment(
0,
EMPTY_TASKS,
statelessTasks,
clientStates,
new StringBuilder()
);
assertBalancedActiveAssignment(clientStates, new StringBuilder());
assertThat(probingRebalanceNeeded, is(false));
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, allTasks, clientStates, false, enableRackAwareTaskAssignor);
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldAssignRandomInput(final String rackAwareStrategy, final boolean enableRackAwareTaskAssignor, final int maxSkew) {
final int nodeSize = 50;
final int tpSize = 60;
final int partitionSize = 3;
final int clientSize = 50;
final int replicaCount = 3;
final int maxCapacity = 3;
final SortedMap<TaskId, Set<TopicPartition>> taskTopicPartitionMap = getTaskTopicPartitionMap(
tpSize, partitionSize, false);
final AssignmentConfigs assignorConfiguration = getConfigWithStandbys(replicaCount, rackAwareStrategy);
final RackAwareTaskAssignor rackAwareTaskAssignor = spy(new RackAwareTaskAssignor(
getRandomCluster(nodeSize, tpSize, partitionSize),
taskTopicPartitionMap,
getTaskTopicPartitionMap(tpSize, partitionSize, true),
getTasksForTopicGroup(tpSize, partitionSize),
getRandomProcessRacks(clientSize, nodeSize),
mockInternalTopicManagerForRandomChangelog(nodeSize, tpSize, partitionSize),
assignorConfiguration,
time
));
final SortedSet<TaskId> taskIds = (SortedSet<TaskId>) taskTopicPartitionMap.keySet();
final List<Set<TaskId>> statefulAndStatelessTasks = getRandomSubset(taskIds, 2);
final Set<TaskId> statefulTasks = statefulAndStatelessTasks.get(0);
final Set<TaskId> statelessTasks = statefulAndStatelessTasks.get(1);
final SortedMap<ProcessId, ClientState> clientStateMap = getRandomClientState(clientSize,
tpSize, partitionSize, maxCapacity, false, statefulTasks);
new HighAvailabilityTaskAssignor().assign(
clientStateMap,
taskIds,
statefulTasks,
rackAwareTaskAssignor,
assignorConfiguration
);
assertValidAssignment(
replicaCount,
statefulTasks,
statelessTasks,
clientStateMap,
new StringBuilder()
);
assertBalancedActiveAssignment(clientStateMap, new StringBuilder());
verifyTaskPlacementWithRackAwareAssignor(rackAwareTaskAssignor, taskIds, clientStateMap, true, enableRackAwareTaskAssignor);
if (rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY)) {
assertBalancedTasks(clientStateMap, maxSkew);
}
}
@ParameterizedTest
@MethodSource("parameter")
public void shouldRemainOriginalAssignmentWithoutTrafficCostForMinCostStrategy(final String rackAwareStrategy,
final boolean enableRackAwareTaskAssignor,
final int maxSkew) {
// This test tests that if the traffic cost is 0, we should have same assignment with or without
// rack aware assignor enabled
final int nodeSize = 50;
final int tpSize = 60;
final int partitionSize = 3;
final int clientSize = 50;
final int replicaCount = 1;
final int maxCapacity = 3;
final SortedMap<TaskId, Set<TopicPartition>> taskTopicPartitionMap = getTaskTopicPartitionMap(
tpSize, partitionSize, false);
final Cluster cluster = getRandomCluster(nodeSize, tpSize, partitionSize);
final Map<TaskId, Set<TopicPartition>> taskChangelogTopicPartitionMap = getTaskTopicPartitionMap(tpSize, partitionSize, true);
final Map<Subtopology, Set<TaskId>> subtopologySetMap = getTasksForTopicGroup(tpSize, partitionSize);
final Map<ProcessId, Map<String, Optional<String>>> processRackMap = getRandomProcessRacks(clientSize, nodeSize);
final InternalTopicManager mockInternalTopicManager = mockInternalTopicManagerForRandomChangelog(nodeSize, tpSize, partitionSize);
AssignmentConfigs configs = new AssignmentConfigs(
0L,
1,
replicaCount,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
0,
10,
rackAwareStrategy
);
RackAwareTaskAssignor rackAwareTaskAssignor = spy(new RackAwareTaskAssignor(
cluster,
taskTopicPartitionMap,
taskChangelogTopicPartitionMap,
subtopologySetMap,
processRackMap,
mockInternalTopicManager,
configs,
time
));
final SortedSet<TaskId> taskIds = (SortedSet<TaskId>) taskTopicPartitionMap.keySet();
final List<Set<TaskId>> statefulAndStatelessTasks = getRandomSubset(taskIds, 2);
final Set<TaskId> statefulTasks = statefulAndStatelessTasks.get(0);
final Set<TaskId> statelessTasks = statefulAndStatelessTasks.get(1);
final SortedMap<ProcessId, ClientState> clientStateMap = getRandomClientState(clientSize,
tpSize, partitionSize, maxCapacity, false, statefulTasks);
new HighAvailabilityTaskAssignor().assign(
clientStateMap,
taskIds,
statefulTasks,
rackAwareTaskAssignor,
configs
);
assertValidAssignment(1, statefulTasks, statelessTasks, clientStateMap, new StringBuilder());
if (rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE)) {
return;
}
if (rackAwareStrategy.equals(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY)) {
// Original assignment won't be maintained because we calculate the assignment using max flow first
// in balance subtopology strategy
assertBalancedTasks(clientStateMap, maxSkew);
return;
}
final SortedMap<ProcessId, ClientState> clientStateMapCopy = copyClientStateMap(clientStateMap);
configs = new AssignmentConfigs(
0L,
1,
replicaCount,
60_000L,
EMPTY_RACK_AWARE_ASSIGNMENT_TAGS,
0,
10,
StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_NONE
);
rackAwareTaskAssignor = spy(new RackAwareTaskAssignor(
cluster,
taskTopicPartitionMap,
taskChangelogTopicPartitionMap,
subtopologySetMap,
processRackMap,
mockInternalTopicManager,
configs,
time
));
new HighAvailabilityTaskAssignor().assign(
clientStateMapCopy,
taskIds,
statefulTasks,
rackAwareTaskAssignor,
configs
);
for (final Map.Entry<ProcessId, ClientState> entry : clientStateMap.entrySet()) {
assertThat(entry.getValue().statefulActiveTasks(), Matchers.equalTo(clientStateMapCopy.get(entry.getKey()).statefulActiveTasks()));
assertThat(entry.getValue().standbyTasks(), Matchers.equalTo(clientStateMapCopy.get(entry.getKey()).standbyTasks()));
}
}
private static void assertHasNoActiveTasks(final ClientState... clients) {
for (final ClientState client : clients) {
assertThat(client.activeTasks(), is(empty()));
}
}
private static void assertHasNoStandbyTasks(final ClientState... clients) {
for (final ClientState client : clients) {
assertThat(client, hasStandbyTasks(0));
}
}
private static ClientState getMockClientWithPreviousCaughtUpTasks(final Set<TaskId> statefulActiveTasks,
final Set<TaskId> statefulTasks,
final ProcessId processId) {
if (!statefulTasks.containsAll(statefulActiveTasks)) {
throw new IllegalArgumentException("Need to initialize stateful tasks set before creating mock clients");
}
final Map<TaskId, Long> taskLags = new HashMap<>();
for (final TaskId task : statefulTasks) {
if (statefulActiveTasks.contains(task)) {
taskLags.put(task, 0L);
} else {
taskLags.put(task, Long.MAX_VALUE);
}
}
return new ClientState(statefulActiveTasks, emptySet(), taskLags, EMPTY_CLIENT_TAGS, 1, processId);
}
}
| HighAvailabilityTaskAssignorTest |
java | resilience4j__resilience4j | resilience4j-core/src/main/java/io/github/resilience4j/core/metrics/SlidingTimeWindowMetrics.java | {
"start": 2272,
"end": 6043
} | class ____ implements Metrics {
final PartialAggregation[] partialAggregations;
private final int timeWindowSizeInSeconds;
private final TotalAggregation totalAggregation;
private final Clock clock;
int headIndex;
private final ReentrantLock lock = new ReentrantLock();
/**
* Creates a new {@link SlidingTimeWindowMetrics} with the given clock and window of time.
*
* @param timeWindowSizeInSeconds the window time size in seconds
* @param clock the {@link Clock} to use
*/
public SlidingTimeWindowMetrics(int timeWindowSizeInSeconds, Clock clock) {
this.clock = clock;
this.timeWindowSizeInSeconds = timeWindowSizeInSeconds;
this.partialAggregations = new PartialAggregation[timeWindowSizeInSeconds];
this.headIndex = 0;
long epochSecond = clock.instant().getEpochSecond();
for (int i = 0; i < timeWindowSizeInSeconds; i++) {
partialAggregations[i] = new PartialAggregation(epochSecond);
epochSecond++;
}
this.totalAggregation = new TotalAggregation();
}
@Override
public Snapshot record(long duration, TimeUnit durationUnit, Outcome outcome) {
lock.lock();
try {
totalAggregation.record(duration, durationUnit, outcome);
moveWindowToCurrentEpochSecond(getLatestPartialAggregation())
.record(duration, durationUnit, outcome);
return new SnapshotImpl(totalAggregation);
} finally {
lock.unlock();
}
}
public Snapshot getSnapshot() {
lock.lock();
try {
moveWindowToCurrentEpochSecond(getLatestPartialAggregation());
return new SnapshotImpl(totalAggregation);
} finally {
lock.unlock();
}
}
/**
* Moves the end of the time window to the current epoch second. The latest bucket of the
* circular array is used to calculate how many seconds the window must be moved. The difference
* is calculated by subtracting the epoch second from the latest bucket from the current epoch
* second. If the difference is greater than the time window size, the time window size is
* used.
*
* @param latestPartialAggregation the latest partial aggregation of the circular array
*/
private PartialAggregation moveWindowToCurrentEpochSecond(
PartialAggregation latestPartialAggregation) {
long currentEpochSecond = clock.instant().getEpochSecond();
long differenceInSeconds = currentEpochSecond - latestPartialAggregation.getEpochSecond();
if (differenceInSeconds == 0) {
return latestPartialAggregation;
}
long secondsToMoveTheWindow = Math.min(differenceInSeconds, timeWindowSizeInSeconds);
PartialAggregation currentPartialAggregation;
do {
secondsToMoveTheWindow--;
moveHeadIndexByOne();
currentPartialAggregation = getLatestPartialAggregation();
totalAggregation.removeBucket(currentPartialAggregation);
currentPartialAggregation.reset(currentEpochSecond - secondsToMoveTheWindow);
} while (secondsToMoveTheWindow > 0);
return currentPartialAggregation;
}
/**
* Returns the head partial aggregation of the circular array.
*
* @return the head partial aggregation of the circular array
*/
private PartialAggregation getLatestPartialAggregation() {
return partialAggregations[headIndex];
}
/**
* Moves the headIndex to the next bucket.
*/
void moveHeadIndexByOne() {
this.headIndex = (headIndex + 1) % timeWindowSizeInSeconds;
}
} | SlidingTimeWindowMetrics |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/loading/multiLoad/MultiNaturalIdLoadTest.java | {
"start": 1442,
"end": 6753
} | class ____ {
private final Pattern p = Pattern.compile( "\\(\\?,\\?\\)" );
@BeforeEach
public void setup(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
session.setCacheMode( CacheMode.IGNORE );
for ( int i = 1; i <= 10; i++ ) {
session.persist( new SimpleNaturalIdEntity( i, "Entity" + i ) );
}
for ( int i = 1; i <= 10; i++ ) {
session.persist( new SimpleMutableNaturalIdEntity( i, "MIdEntity" + i ) );
}
for ( int i = 1; i <= 10; i++ ) {
session.persist( new CompositeNaturalIdEntity( i, "Entity" + i, i + "Entity" ) );
}
}
);
}
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
public void testBasicUnorderedMultiLoad(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
scope.inTransaction(
session -> {
statementInspector.getSqlQueries().clear();
List<SimpleNaturalIdEntity> results = session
.byMultipleNaturalId( SimpleNaturalIdEntity.class )
.enableOrderedReturn( false )
.multiLoad( "Entity1","Entity2","Entity3","Entity4","Entity5" );
assertEquals( 5, results.size() );
Iterator<SimpleNaturalIdEntity> it = results.iterator();
for ( int i = 1; i <= 5; i++ ) {
SimpleNaturalIdEntity se= it.next();
if ( i == se.getId() ) {
it.remove();
}
}
assertEquals( 0, results.size() );
final int paramCount = StringHelper.countUnquoted(
statementInspector.getSqlQueries().get( 0 ),
'?'
);
final Dialect dialect = session.getSessionFactory()
.getJdbcServices()
.getDialect();
if ( MultiKeyLoadHelper.supportsSqlArrayType( dialect ) ) {
assertEquals(1, paramCount );
}
else {
assertEquals(5, paramCount );
}
}
);
}
@Test
public void testBasicOrderedMultiLoad(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
scope.inTransaction(
session -> {
statementInspector.getSqlQueries().clear();
List<String> ids = List.of("Entity4","Entity2","Entity5","Entity1","Entity3");
List<SimpleNaturalIdEntity> results = session
.byMultipleNaturalId( SimpleNaturalIdEntity.class )
.enableOrderedReturn( true )
.multiLoad( ids );
assertEquals( 5, results.size() );
for ( int i = 0; i < 5; i++ ) {
assertEquals(ids.get(i), results.get(i).getSsn() );
}
final int paramCount = StringHelper.countUnquoted(
statementInspector.getSqlQueries().get( 0 ),
'?'
);
final Dialect dialect = session.getSessionFactory()
.getJdbcServices()
.getDialect();
if ( MultiKeyLoadHelper.supportsSqlArrayType( dialect ) ) {
assertEquals(1, paramCount );
}
else {
assertEquals(5, paramCount );
}
}
);
}
@Test
public void testCompoundNaturalIdUnorderedMultiLoad(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
scope.inTransaction(
session -> {
statementInspector.getSqlQueries().clear();
List<String[]> ids = List.of( new String[]{"Entity1", "1Entity"}, new String[]{"Entity2", "2Entity"}, new String[]{"Entity3", "3Entity"} );
List<CompositeNaturalIdEntity> results = session
.byMultipleNaturalId( CompositeNaturalIdEntity.class )
.enableOrderedReturn( true )
.multiLoad( ids );
Iterator<CompositeNaturalIdEntity> it = results.iterator();
for ( int i = 1; i <= 3; i++ ) {
CompositeNaturalIdEntity se= it.next();
if ( i == se.getId() ) {
it.remove();
}
}
assertEquals( 0, results.size() );
}
);
verify( scope.getSessionFactory().getJdbcServices().getDialect(), statementInspector );
}
@Test
public void testCompoundNaturalIdOrderedMultiLoad(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
scope.inTransaction(
session -> {
statementInspector.getSqlQueries().clear();
List<String[]> ids = List.of( new String[]{"Entity4", "4Entity"}, new String[]{"Entity2", "2Entity"}, new String[]{"Entity5", "5Entity"} );
List<CompositeNaturalIdEntity> results = session
.byMultipleNaturalId( CompositeNaturalIdEntity.class )
.enableOrderedReturn( true )
.multiLoad( ids );
assertEquals( 3, results.size() );
for ( int i = 0; i < 3; i++ ) {
assertEquals(ids.get(i)[0], results.get(i).getSsn() );
assertEquals(ids.get(i)[1], results.get(i).getSsn2() );
}
}
);
verify( scope.getSessionFactory().getJdbcServices().getDialect(), statementInspector );
}
private void verify( Dialect dialect, SQLStatementInspector statementInspector ) {
if ( dialect.supportsRowValueConstructorSyntaxInInList() ) {
Matcher m = p.matcher( statementInspector.getSqlQueries().get( 0 ) );
int paramCount = 0;
while ( m.find() ) {
paramCount++;
}
assertEquals( 3, paramCount );
}
else {
// DB2Dialect.class, HSQLDialect.class, SQLServerDialect.class, SybaseDialect.class, SpannerDialect. | MultiNaturalIdLoadTest |
java | elastic__elasticsearch | x-pack/plugin/core/src/yamlRestTest/java/org/elasticsearch/license/XPackCoreClientYamlTestSuiteIT.java | {
"start": 821,
"end": 2027
} | class ____ extends ESClientYamlSuiteTestCase {
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.setting("xpack.security.enabled", "true")
.setting("xpack.license.self_generated.type", "trial")
.keystore("bootstrap.password", "x-pack-test-password")
.user("x_pack_rest_user", "x-pack-test-password")
.systemProperty("es.queryable_built_in_roles_enabled", "false")
.build();
private static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("x_pack_rest_user", new SecureString("x-pack-test-password"));
public XPackCoreClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws Exception {
return ESClientYamlSuiteTestCase.createParameters();
}
@Override
protected Settings restClientSettings() {
return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build();
}
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
}
| XPackCoreClientYamlTestSuiteIT |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/issues/ContextScopedOnExceptionNotHandledErrorHandlerRefIssueTest.java | {
"start": 1249,
"end": 2685
} | class ____ extends ContextTestSupport {
@Test
public void testOnExceptionErrorHandlerRef() throws Exception {
getMockEndpoint("mock:a").expectedMessageCount(1);
getMockEndpoint("mock:handled").expectedMessageCount(1);
getMockEndpoint("mock:dead").expectedMessageCount(0);
CamelExecutionException e = assertThrows(CamelExecutionException.class,
() -> template.sendBody("direct:start", "Hello World"),
"Should have thrown exception");
IllegalArgumentException cause = assertIsInstanceOf(IllegalArgumentException.class, e.getCause());
assertEquals("Damn", cause.getMessage());
assertMockEndpointsSatisfied();
}
@Override
protected Registry createCamelRegistry() throws Exception {
Registry jndi = super.createCamelRegistry();
jndi.bind("myDLC", new DeadLetterChannelBuilder("mock:dead"));
return jndi;
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
onException(IllegalArgumentException.class).handled(false).to("mock:handled").end();
errorHandler("myDLC");
from("direct:start").to("mock:a").throwException(new IllegalArgumentException("Damn"));
}
};
}
}
| ContextScopedOnExceptionNotHandledErrorHandlerRefIssueTest |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldBeAfterOrEqualTo.java | {
"start": 977,
"end": 2318
} | class ____ extends BasicErrorMessageFactory {
/**
* Creates a new <code>{@link ShouldBeAfterOrEqualTo}</code>.
*
* @param actual the actual value in the failed assertion.
* @param other the value used in the failed assertion to compare the actual value to.
* @param comparisonStrategy the {@link ComparisonStrategy} used to evaluate assertion.
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldBeAfterOrEqualTo(Object actual, Object other, ComparisonStrategy comparisonStrategy) {
return new ShouldBeAfterOrEqualTo(actual, other, comparisonStrategy);
}
/**
* Creates a new <code>{@link ShouldBeAfterOrEqualTo}</code>.
*
* @param actual the actual value in the failed assertion.
* @param other the value used in the failed assertion to compare the actual value to.
* @return the created {@code ErrorMessageFactory}.
*/
public static ErrorMessageFactory shouldBeAfterOrEqualTo(Object actual, Object other) {
return new ShouldBeAfterOrEqualTo(actual, other, StandardComparisonStrategy.instance());
}
private ShouldBeAfterOrEqualTo(Object actual, Object other, ComparisonStrategy comparisonStrategy) {
super("%nExpecting actual:%n %s%nto be after or equal to:%n %s%n%s", actual, other, comparisonStrategy);
}
}
| ShouldBeAfterOrEqualTo |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/info/GitPropertiesTests.java | {
"start": 1057,
"end": 4724
} | class ____ {
@Test
void basicInfo() {
GitProperties properties = new GitProperties(
createProperties("master", "abcdefghijklmno", "abcdefg", "1457527123"));
assertThat(properties.getBranch()).isEqualTo("master");
assertThat(properties.getCommitId()).isEqualTo("abcdefghijklmno");
assertThat(properties.getShortCommitId()).isEqualTo("abcdefg");
}
@Test
void noInfo() {
GitProperties properties = new GitProperties(new Properties());
assertThat(properties.getBranch()).isNull();
assertThat(properties.getCommitId()).isNull();
assertThat(properties.getShortCommitId()).isNull();
assertThat(properties.getCommitTime()).isNull();
}
@Test
void coerceEpochSecond() {
GitProperties properties = new GitProperties(createProperties("master", "abcdefg", null, "1457527123"));
assertThat(properties.getCommitTime()).isNotNull();
assertThat(properties.get("commit.time")).isEqualTo("1457527123000");
assertThat(properties.getCommitTime().toEpochMilli()).isEqualTo(1457527123000L);
}
@Test
void coerceLegacyDateString() {
GitProperties properties = new GitProperties(
createProperties("master", "abcdefg", null, "2016-03-04T14:36:33+0100"));
assertThat(properties.getCommitTime()).isNotNull();
assertThat(properties.get("commit.time")).isEqualTo("1457098593000");
assertThat(properties.getCommitTime().toEpochMilli()).isEqualTo(1457098593000L);
}
@Test
void coerceDateString() {
GitProperties properties = new GitProperties(
createProperties("master", "abcdefg", null, "2016-03-04T14:36:33+01:00"));
assertThat(properties.getCommitTime()).isNotNull();
assertThat(properties.get("commit.time")).isEqualTo("1457098593000");
assertThat(properties.getCommitTime().toEpochMilli()).isEqualTo(1457098593000L);
}
@Test
void coerceUnsupportedFormat() {
GitProperties properties = new GitProperties(
createProperties("master", "abcdefg", null, "2016-03-04 15:22:24"));
assertThat(properties.getCommitTime()).isNull();
assertThat(properties.get("commit.time")).isEqualTo("2016-03-04 15:22:24");
}
@Test
void shortCommitUsedIfPresent() {
GitProperties properties = new GitProperties(
createProperties("master", "abcdefghijklmno", "abcdefgh", "1457527123"));
assertThat(properties.getCommitId()).isEqualTo("abcdefghijklmno");
assertThat(properties.getShortCommitId()).isEqualTo("abcdefgh");
}
@Test
void shortenCommitIdShorterThan7() {
GitProperties properties = new GitProperties(createProperties("master", "abc", null, "1457527123"));
assertThat(properties.getCommitId()).isEqualTo("abc");
assertThat(properties.getShortCommitId()).isEqualTo("abc");
}
@Test
void shortenCommitIdLongerThan7() {
GitProperties properties = new GitProperties(createProperties("master", "abcdefghijklmno", null, "1457527123"));
assertThat(properties.getCommitId()).isEqualTo("abcdefghijklmno");
assertThat(properties.getShortCommitId()).isEqualTo("abcdefg");
}
@Test
void shouldRegisterHints() {
RuntimeHints runtimeHints = new RuntimeHints();
new GitProperties.GitPropertiesRuntimeHints().registerHints(runtimeHints, getClass().getClassLoader());
assertThat(RuntimeHintsPredicates.resource().forResource("git.properties")).accepts(runtimeHints);
}
private static Properties createProperties(String branch, String commitId, @Nullable String commitIdAbbrev,
String commitTime) {
Properties properties = new Properties();
properties.put("branch", branch);
properties.put("commit.id", commitId);
if (commitIdAbbrev != null) {
properties.put("commit.id.abbrev", commitIdAbbrev);
}
properties.put("commit.time", commitTime);
return properties;
}
}
| GitPropertiesTests |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ConstEnumCounters.java | {
"start": 1140,
"end": 1213
} | class ____ modification on ConstEnumCounters.
*/
public static final | for |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/SpringToDynamicVariableTest.java | {
"start": 1042,
"end": 1307
} | class ____ extends ToDynamicVariableTest {
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this, "org/apache/camel/spring/processor/ToDynamicVariableTest.xml");
}
}
| SpringToDynamicVariableTest |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/tags/UrlTag.java | {
"start": 11944,
"end": 11993
} | enum ____ classifies URLs by type.
*/
private | that |
java | spring-projects__spring-framework | spring-messaging/src/main/java/org/springframework/messaging/converter/JsonbMessageConverter.java | {
"start": 1254,
"end": 3376
} | class ____ extends AbstractJsonMessageConverter {
private Jsonb jsonb;
/**
* Construct a new {@code JsonbMessageConverter} with default configuration.
*/
public JsonbMessageConverter() {
this.jsonb = JsonbBuilder.create();
}
/**
* Construct a new {@code JsonbMessageConverter} with the given configuration.
* @param config the {@code JsonbConfig} for the underlying delegate
*/
public JsonbMessageConverter(JsonbConfig config) {
this.jsonb = JsonbBuilder.create(config);
}
/**
* Construct a new {@code JsonbMessageConverter} with the given delegate.
* @param jsonb the Jsonb instance to use
*/
public JsonbMessageConverter(Jsonb jsonb) {
Assert.notNull(jsonb, "A Jsonb instance is required");
this.jsonb = jsonb;
}
/**
* Set the {@code Jsonb} instance to use.
* If not set, a default {@code Jsonb} instance will be created.
* <p>Setting a custom-configured {@code Jsonb} is one way to take further
* control of the JSON serialization process.
* @see #JsonbMessageConverter(Jsonb)
* @see #JsonbMessageConverter(JsonbConfig)
* @see JsonbBuilder
*/
public void setJsonb(Jsonb jsonb) {
Assert.notNull(jsonb, "A Jsonb instance is required");
this.jsonb = jsonb;
}
/**
* Return the configured {@code Jsonb} instance for this converter.
*/
public Jsonb getJsonb() {
return this.jsonb;
}
@Override
protected Object fromJson(Reader reader, Type resolvedType) {
return getJsonb().fromJson(reader, resolvedType);
}
@Override
protected Object fromJson(String payload, Type resolvedType) {
return getJsonb().fromJson(payload, resolvedType);
}
@Override
protected void toJson(Object payload, Type resolvedType, Writer writer) {
if (resolvedType instanceof ParameterizedType) {
getJsonb().toJson(payload, resolvedType, writer);
}
else {
getJsonb().toJson(payload, writer);
}
}
@Override
protected String toJson(Object payload, Type resolvedType) {
if (resolvedType instanceof ParameterizedType) {
return getJsonb().toJson(payload, resolvedType);
}
else {
return getJsonb().toJson(payload);
}
}
}
| JsonbMessageConverter |
java | elastic__elasticsearch | modules/repository-s3/qa/insecure-credentials/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java | {
"start": 14000,
"end": 14981
} | class ____ extends AmazonS3Wrapper {
final AwsCredentialsProvider credentials;
// The httpClient must be explicitly closed. Closure of the S3Client, which uses the httpClient, will not do so.
private final SdkHttpClient httpClient;
ClientAndCredentials(S3Client delegate, SdkHttpClient httpClient, AwsCredentialsProvider credentials) {
super(delegate);
this.httpClient = httpClient;
this.credentials = credentials;
}
@Override
public String serviceName() {
return "ClientAndCredentials";
}
@Override
public void close() {
super.close();
httpClient.close();
}
}
/**
* A {@link S3Service} wrapper that supports access to a copy of the credentials given to the S3Client.
*/
public static final | ClientAndCredentials |
java | google__auto | value/src/it/functional/src/test/java/com/google/auto/value/gwt/EmptyExtension.java | {
"start": 1306,
"end": 1875
} | class ____ extends AutoValueExtension {
// TODO(emcmanus): it is way too difficult to write a trivial extension. Problems we have here:
// (1) We have to generate a constructor that calls the superclass constructor, which means
// declaring the appropriate constructor parameters and then forwarding them to a super
// call.
// (2) We have to avoid generating variable names that are keywords (we append $ here
// to avoid that).
// (3) We have to concoct appropriate type parameter strings, for example
// final | EmptyExtension |
java | quarkusio__quarkus | extensions/opentelemetry/runtime/src/test/java/io/quarkus/opentelemetry/runtime/exporter/otlp/HttpClientOptionsConsumerTest.java | {
"start": 4793,
"end": 5229
} | class ____ implements TlsConfigurationRegistry {
@Override
public Optional<TlsConfiguration> get(String name) {
return Optional.empty();
}
@Override
public Optional<TlsConfiguration> getDefault() {
return Optional.empty();
}
@Override
public void register(String name, TlsConfiguration configuration) {
}
}
}
| NoopTlsConfigurationRegistry |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/RestGetAnalyticsCollectionAction.java | {
"start": 1168,
"end": 2291
} | class ____ extends EnterpriseSearchBaseRestHandler {
public RestGetAnalyticsCollectionAction(XPackLicenseState licenseState) {
super(licenseState, LicenseUtils.Product.BEHAVIORAL_ANALYTICS);
}
@Override
public String getName() {
return "get_analytics_collection_action";
}
@Override
public List<Route> routes() {
return List.of(
new Route(GET, "/" + EnterpriseSearch.BEHAVIORAL_ANALYTICS_API_ENDPOINT + "/{collection_name}"),
new Route(GET, "/" + EnterpriseSearch.BEHAVIORAL_ANALYTICS_API_ENDPOINT)
);
}
@Override
protected RestChannelConsumer innerPrepareRequest(RestRequest restRequest, NodeClient client) {
GetAnalyticsCollectionAction.Request request = new GetAnalyticsCollectionAction.Request(
RestUtils.getMasterNodeTimeout(restRequest),
Strings.splitStringByCommaToArray(restRequest.param("collection_name"))
);
return channel -> client.execute(GetAnalyticsCollectionAction.INSTANCE, request, new RestToXContentListener<>(channel));
}
}
| RestGetAnalyticsCollectionAction |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForRolloverReadyStepTests.java | {
"start": 2722,
"end": 39781
} | class ____ extends AbstractStepTestCase<WaitForRolloverReadyStep> {
@Override
protected WaitForRolloverReadyStep createRandomInstance() {
Step.StepKey stepKey = randomStepKey();
Step.StepKey nextStepKey = randomStepKey();
ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values());
ByteSizeValue maxSize = randomBoolean() ? null : ByteSizeValue.of(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit);
ByteSizeUnit maxPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
ByteSizeValue maxPrimaryShardSize = randomBoolean()
? null
: ByteSizeValue.of(randomNonNegativeLong() / maxPrimaryShardSizeUnit.toBytes(1), maxPrimaryShardSizeUnit);
Long maxDocs = randomBoolean() ? null : randomNonNegativeLong();
TimeValue maxAge = (maxDocs == null && maxSize == null || randomBoolean()) ? randomPositiveTimeValue() : null;
Long maxPrimaryShardDocs = randomBoolean() ? null : randomNonNegativeLong();
ByteSizeUnit minSizeUnit = randomFrom(ByteSizeUnit.values());
ByteSizeValue minSize = randomBoolean() ? null : ByteSizeValue.of(randomNonNegativeLong() / minSizeUnit.toBytes(1), minSizeUnit);
ByteSizeUnit minPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
ByteSizeValue minPrimaryShardSize = randomBoolean()
? null
: ByteSizeValue.of(randomNonNegativeLong() / minPrimaryShardSizeUnit.toBytes(1), minPrimaryShardSizeUnit);
Long minDocs = randomBoolean() ? null : randomNonNegativeLong();
TimeValue minAge = (minDocs == null || randomBoolean()) ? randomPositiveTimeValue() : null;
Long minPrimaryShardDocs = randomBoolean() ? null : randomNonNegativeLong();
return new WaitForRolloverReadyStep(
stepKey,
nextStepKey,
client,
maxSize,
maxPrimaryShardSize,
maxAge,
maxDocs,
maxPrimaryShardDocs,
minSize,
minPrimaryShardSize,
minAge,
minDocs,
minPrimaryShardDocs
);
}
@Override
protected WaitForRolloverReadyStep mutateInstance(WaitForRolloverReadyStep instance) {
Step.StepKey key = instance.getKey();
RolloverConditions configuration = instance.getConditions();
Step.StepKey nextKey = instance.getNextStepKey();
ByteSizeValue maxSize = configuration.getMaxSize();
ByteSizeValue maxPrimaryShardSize = configuration.getMaxPrimaryShardSize();
TimeValue maxAge = configuration.getMaxAge();
Long maxDocs = configuration.getMaxDocs();
Long maxPrimaryShardDocs = configuration.getMaxPrimaryShardDocs();
ByteSizeValue minSize = configuration.getMinSize();
ByteSizeValue minPrimaryShardSize = configuration.getMinPrimaryShardSize();
TimeValue minAge = configuration.getMinAge();
Long minDocs = configuration.getMinDocs();
Long minPrimaryShardDocs = configuration.getMinPrimaryShardDocs();
switch (between(0, 11)) {
case 0 -> key = new Step.StepKey(key.phase(), key.action(), key.name() + randomAlphaOfLength(5));
case 1 -> nextKey = new Step.StepKey(nextKey.phase(), nextKey.action(), nextKey.name() + randomAlphaOfLength(5));
case 2 -> maxSize = randomValueOtherThan(maxSize, () -> {
ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values());
return ByteSizeValue.of(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit);
});
case 3 -> maxPrimaryShardSize = randomValueOtherThan(maxPrimaryShardSize, () -> {
ByteSizeUnit maxPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
return ByteSizeValue.of(randomNonNegativeLong() / maxPrimaryShardSizeUnit.toBytes(1), maxPrimaryShardSizeUnit);
});
case 4 -> maxAge = randomValueOtherThan(maxAge, () -> randomPositiveTimeValue());
case 5 -> maxDocs = randomValueOtherThan(maxDocs, ESTestCase::randomNonNegativeLong);
case 6 -> maxPrimaryShardDocs = randomValueOtherThan(maxPrimaryShardDocs, ESTestCase::randomNonNegativeLong);
case 7 -> minSize = randomValueOtherThan(minSize, () -> {
ByteSizeUnit minSizeUnit = randomFrom(ByteSizeUnit.values());
return ByteSizeValue.of(randomNonNegativeLong() / minSizeUnit.toBytes(1), minSizeUnit);
});
case 8 -> minPrimaryShardSize = randomValueOtherThan(minPrimaryShardSize, () -> {
ByteSizeUnit minPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
return ByteSizeValue.of(randomNonNegativeLong() / minPrimaryShardSizeUnit.toBytes(1), minPrimaryShardSizeUnit);
});
case 9 -> minAge = randomValueOtherThan(minAge, () -> randomPositiveTimeValue());
case 10 -> minDocs = randomValueOtherThan(minDocs, ESTestCase::randomNonNegativeLong);
case 11 -> minPrimaryShardDocs = randomValueOtherThan(minPrimaryShardDocs, ESTestCase::randomNonNegativeLong);
default -> throw new AssertionError("Illegal randomisation branch");
}
return new WaitForRolloverReadyStep(
key,
nextKey,
instance.getClient(),
maxSize,
maxPrimaryShardSize,
maxAge,
maxDocs,
maxPrimaryShardDocs,
minSize,
minPrimaryShardSize,
minAge,
minDocs,
minPrimaryShardDocs
);
}
@Override
protected WaitForRolloverReadyStep copyInstance(WaitForRolloverReadyStep instance) {
return new WaitForRolloverReadyStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), instance.getConditions());
}
private static void assertRolloverIndexRequest(RolloverRequest request, String rolloverTarget, Set<Condition<?>> expectedConditions) {
assertNotNull(request);
assertEquals(1, request.indices().length);
assertEquals(rolloverTarget, request.indices()[0]);
assertEquals(rolloverTarget, request.getRolloverTarget());
assertTrue(request.isDryRun());
assertEquals(expectedConditions.size(), request.getConditions().getConditions().size());
Set<Object> expectedConditionValues = expectedConditions.stream().map(Condition::value).collect(Collectors.toSet());
Set<Object> actualConditionValues = request.getConditionValues().stream().map(Condition::value).collect(Collectors.toSet());
assertEquals(expectedConditionValues, actualConditionValues);
}
private static Set<Condition<?>> getExpectedConditions(WaitForRolloverReadyStep step, boolean maybeAddMinDocs) {
Set<Condition<?>> expectedConditions = new HashSet<>();
RolloverConditions conditions = step.getConditions();
if (conditions.getMaxSize() != null) {
expectedConditions.add(new MaxSizeCondition(conditions.getMaxSize()));
}
if (conditions.getMaxPrimaryShardSize() != null) {
expectedConditions.add(new MaxPrimaryShardSizeCondition(conditions.getMaxPrimaryShardSize()));
}
if (conditions.getMaxAge() != null) {
expectedConditions.add(new MaxAgeCondition(conditions.getMaxAge()));
}
if (conditions.getMaxDocs() != null) {
expectedConditions.add(new MaxDocsCondition(conditions.getMaxDocs()));
}
long maxPrimaryShardDocs;
if (conditions.getMaxPrimaryShardDocs() != null) {
maxPrimaryShardDocs = conditions.getMaxPrimaryShardDocs();
if (maxPrimaryShardDocs > WaitForRolloverReadyStep.MAX_PRIMARY_SHARD_DOCS) {
maxPrimaryShardDocs = WaitForRolloverReadyStep.MAX_PRIMARY_SHARD_DOCS;
}
} else {
maxPrimaryShardDocs = WaitForRolloverReadyStep.MAX_PRIMARY_SHARD_DOCS;
}
expectedConditions.add(new MaxPrimaryShardDocsCondition(maxPrimaryShardDocs));
if (conditions.getMinSize() != null) {
expectedConditions.add(new MinSizeCondition(conditions.getMinSize()));
}
if (conditions.getMinPrimaryShardSize() != null) {
expectedConditions.add(new MinPrimaryShardSizeCondition(conditions.getMinPrimaryShardSize()));
}
if (conditions.getMinAge() != null) {
expectedConditions.add(new MinAgeCondition(conditions.getMinAge()));
}
if (conditions.getMinDocs() != null) {
expectedConditions.add(new MinDocsCondition(conditions.getMinDocs()));
}
if (conditions.getMinPrimaryShardDocs() != null) {
expectedConditions.add(new MinPrimaryShardDocsCondition(conditions.getMinPrimaryShardDocs()));
}
// if no minimum document condition was specified, then a default min_docs: 1 condition will be injected (if desired)
if (maybeAddMinDocs && conditions.getMinDocs() == null && conditions.getMinPrimaryShardDocs() == null) {
expectedConditions.add(new MinDocsCondition(1L));
}
return expectedConditions;
}
public void testEvaluateCondition() {
String alias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.putAlias(AliasMetadata.builder(alias).writeIndex(randomFrom(true, null)))
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
WaitForRolloverReadyStep step = createRandomInstance();
mockRolloverIndexCall(alias, step, true);
SetOnce<Boolean> conditionsMet = new SetOnce<>();
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
step.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean complete, ToXContentObject informationContext) {
conditionsMet.set(complete);
}
@Override
public void onFailure(Exception e) {
throw new AssertionError("Unexpected method call", e);
}
}, MASTER_TIMEOUT);
assertEquals(true, conditionsMet.get());
verify(client).projectClient(state.projectId());
verify(projectClient).admin();
verifyNoMoreInteractions(client);
verify(adminClient, Mockito.only()).indices();
verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any());
}
public void testEvaluateConditionOnDataStreamTarget() {
String dataStreamName = "test-datastream";
long ts = System.currentTimeMillis();
boolean failureStoreIndex = randomBoolean();
IndexMetadata indexMetadata = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, 1, ts))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
IndexMetadata failureStoreMetadata = IndexMetadata.builder(DataStream.getDefaultFailureStoreName(dataStreamName, 1, ts))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
WaitForRolloverReadyStep step = createRandomInstance();
mockRolloverIndexCall(
failureStoreIndex ? dataStreamName + SELECTOR_SEPARATOR + IndexComponentSelector.FAILURES.getKey() : dataStreamName,
step,
true
);
SetOnce<Boolean> conditionsMet = new SetOnce<>();
var state = projectStateFromProject(
ProjectMetadata.builder(randomProjectIdOrDefault())
.put(indexMetadata, true)
.put(failureStoreMetadata, true)
.put(
DataStreamTestHelper.newInstance(
dataStreamName,
List.of(indexMetadata.getIndex()),
List.of(failureStoreMetadata.getIndex())
)
)
);
IndexMetadata indexToOperateOn = failureStoreIndex ? failureStoreMetadata : indexMetadata;
step.evaluateCondition(state, indexToOperateOn, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean complete, ToXContentObject informationContext) {
conditionsMet.set(complete);
}
@Override
public void onFailure(Exception e) {
throw new AssertionError("Unexpected method call", e);
}
}, MASTER_TIMEOUT);
assertEquals(true, conditionsMet.get());
verify(client).projectClient(state.projectId());
verify(projectClient).admin();
verifyNoMoreInteractions(client);
verify(adminClient, Mockito.only()).indices();
ArgumentCaptor<RolloverRequest> requestCaptor = ArgumentCaptor.forClass(RolloverRequest.class);
verify(indicesClient, Mockito.only()).rolloverIndex(requestCaptor.capture(), Mockito.any());
RolloverRequest request = requestCaptor.getValue();
if (failureStoreIndex == false) {
assertThat(request.getRolloverTarget(), equalTo(dataStreamName));
} else {
assertThat(
request.getRolloverTarget(),
equalTo(dataStreamName + SELECTOR_SEPARATOR + IndexComponentSelector.FAILURES.getKey())
);
}
}
public void testSkipRolloverIfDataStreamIsAlreadyRolledOver() {
String dataStreamName = "test-datastream";
long ts = System.currentTimeMillis();
boolean failureStoreIndex = randomBoolean();
IndexMetadata firstGenerationIndex = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, 1, ts))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
IndexMetadata writeIndex = IndexMetadata.builder(DataStream.getDefaultBackingIndexName(dataStreamName, 2, ts))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
IndexMetadata firstGenerationFailureIndex = IndexMetadata.builder(DataStream.getDefaultFailureStoreName(dataStreamName, 1, ts))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
IndexMetadata writeFailureIndex = IndexMetadata.builder(DataStream.getDefaultFailureStoreName(dataStreamName, 2, ts))
.settings(settings(IndexVersion.current()))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
WaitForRolloverReadyStep step = createRandomInstance();
SetOnce<Boolean> conditionsMet = new SetOnce<>();
var state = projectStateFromProject(
ProjectMetadata.builder(randomProjectIdOrDefault())
.put(firstGenerationIndex, true)
.put(writeIndex, true)
.put(firstGenerationFailureIndex, true)
.put(writeFailureIndex, true)
.put(
DataStreamTestHelper.newInstance(
dataStreamName,
List.of(firstGenerationIndex.getIndex(), writeIndex.getIndex()),
List.of(firstGenerationFailureIndex.getIndex(), writeFailureIndex.getIndex())
)
)
);
IndexMetadata indexToOperateOn = failureStoreIndex ? firstGenerationFailureIndex : firstGenerationIndex;
step.evaluateCondition(state, indexToOperateOn, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean complete, ToXContentObject informationContext) {
conditionsMet.set(complete);
}
@Override
public void onFailure(Exception e) {
throw new AssertionError("Unexpected method call", e);
}
}, MASTER_TIMEOUT);
assertEquals(true, conditionsMet.get());
verifyNoMoreInteractions(client);
verifyNoMoreInteractions(adminClient);
verifyNoMoreInteractions(indicesClient);
}
private void mockRolloverIndexCall(String rolloverTarget, WaitForRolloverReadyStep step, boolean conditionResult) {
Mockito.doAnswer(invocation -> {
RolloverRequest request = (RolloverRequest) invocation.getArguments()[0];
@SuppressWarnings("unchecked")
ActionListener<RolloverResponse> listener = (ActionListener<RolloverResponse>) invocation.getArguments()[1];
Set<Condition<?>> expectedConditions = getExpectedConditions(step, true);
assertRolloverIndexRequest(request, rolloverTarget, expectedConditions);
Map<String, Boolean> conditionResults = expectedConditions.stream()
.collect(Collectors.toMap(Condition::toString, condition -> conditionResult));
listener.onResponse(new RolloverResponse(null, null, conditionResults, request.isDryRun(), false, false, false, false));
return null;
}).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any());
}
public void testEvaluateDoesntTriggerRolloverForIndexManuallyRolledOnLifecycleRolloverAlias() {
String rolloverAlias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.putAlias(AliasMetadata.builder(rolloverAlias))
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, rolloverAlias))
.putRolloverInfo(
new RolloverInfo(rolloverAlias, List.of(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))), System.currentTimeMillis())
)
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
WaitForRolloverReadyStep step = createRandomInstance();
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
step.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean complete, ToXContentObject informationContext) {
assertThat(complete, is(true));
}
@Override
public void onFailure(Exception e) {
throw new AssertionError("Unexpected method call", e);
}
}, MASTER_TIMEOUT);
verify(indicesClient, Mockito.never()).rolloverIndex(Mockito.any(), Mockito.any());
}
public void testEvaluateTriggersRolloverForIndexManuallyRolledOnDifferentAlias() {
String rolloverAlias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.putAlias(AliasMetadata.builder(rolloverAlias))
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, rolloverAlias))
.putRolloverInfo(
new RolloverInfo(
randomAlphaOfLength(5),
List.of(new MaxSizeCondition(ByteSizeValue.ofBytes(2L))),
System.currentTimeMillis()
)
)
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
WaitForRolloverReadyStep step = createRandomInstance();
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
step.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean complete, ToXContentObject informationContext) {
assertThat(complete, is(true));
}
@Override
public void onFailure(Exception e) {
throw new AssertionError("Unexpected method call", e);
}
}, MASTER_TIMEOUT);
verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any());
}
public void testPerformActionWriteIndexIsFalse() {
String alias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.putAlias(AliasMetadata.builder(alias).writeIndex(false))
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
WaitForRolloverReadyStep step = createRandomInstance();
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
step.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean complete, ToXContentObject informationContext) {
fail("expecting failure as the write index must be set to true or null");
}
@Override
public void onFailure(Exception e) {
assertThat(
e.getMessage(),
is(
String.format(
Locale.ROOT,
"index [%s] is not the write index for alias [%s]",
indexMetadata.getIndex().getName(),
alias
)
)
);
}
}, MASTER_TIMEOUT);
verify(client, times(0)).admin();
}
public void testPerformActionWithIndexingComplete() {
String alias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.putAlias(AliasMetadata.builder(alias).writeIndex(randomFrom(false, null)))
.settings(
settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)
.put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, true)
)
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
WaitForRolloverReadyStep step = createRandomInstance();
SetOnce<Boolean> conditionsMet = new SetOnce<>();
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
step.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean complete, ToXContentObject informationContext) {
conditionsMet.set(complete);
}
@Override
public void onFailure(Exception e) {
throw new AssertionError("Unexpected method call", e);
}
}, MASTER_TIMEOUT);
assertEquals(true, conditionsMet.get());
}
public void testPerformActionWithIndexingCompleteStillWriteIndex() {
String alias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.putAlias(AliasMetadata.builder(alias).writeIndex(true))
.settings(
settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)
.put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, true)
)
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
WaitForRolloverReadyStep step = createRandomInstance();
SetOnce<Boolean> correctFailureCalled = new SetOnce<>();
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
step.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean complete, ToXContentObject informationContext) {
throw new AssertionError("Should have failed with indexing_complete but index is not write index");
}
@Override
public void onFailure(Exception e) {
assertTrue(e instanceof IllegalStateException);
correctFailureCalled.set(true);
}
}, MASTER_TIMEOUT);
assertEquals(true, correctFailureCalled.get());
}
public void testPerformActionNotComplete() {
String alias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.putAlias(AliasMetadata.builder(alias))
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
WaitForRolloverReadyStep step = createRandomInstance();
mockRolloverIndexCall(alias, step, false);
SetOnce<Boolean> actionCompleted = new SetOnce<>();
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
step.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean complete, ToXContentObject informationContext) {
actionCompleted.set(complete);
}
@Override
public void onFailure(Exception e) {
throw new AssertionError("Unexpected method call", e);
}
}, MASTER_TIMEOUT);
assertEquals(false, actionCompleted.get());
verify(client).projectClient(state.projectId());
verify(projectClient).admin();
verifyNoMoreInteractions(client);
verify(adminClient, Mockito.only()).indices();
verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any());
}
public void testPerformActionFailure() {
String alias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.putAlias(AliasMetadata.builder(alias))
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
Exception exception = new RuntimeException();
WaitForRolloverReadyStep step = createRandomInstance();
Mockito.doAnswer(invocation -> {
RolloverRequest request = (RolloverRequest) invocation.getArguments()[0];
@SuppressWarnings("unchecked")
ActionListener<RolloverResponse> listener = (ActionListener<RolloverResponse>) invocation.getArguments()[1];
Set<Condition<?>> expectedConditions = getExpectedConditions(step, true);
assertRolloverIndexRequest(request, alias, expectedConditions);
listener.onFailure(exception);
return null;
}).when(indicesClient).rolloverIndex(Mockito.any(), Mockito.any());
SetOnce<Boolean> exceptionThrown = new SetOnce<>();
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
step.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean complete, ToXContentObject informationContext) {
throw new AssertionError("Unexpected method call");
}
@Override
public void onFailure(Exception e) {
assertSame(exception, e);
exceptionThrown.set(true);
}
}, MASTER_TIMEOUT);
assertEquals(true, exceptionThrown.get());
verify(client).projectClient(state.projectId());
verify(projectClient).admin();
verifyNoMoreInteractions(client);
verify(adminClient, Mockito.only()).indices();
verify(indicesClient, Mockito.only()).rolloverIndex(Mockito.any(), Mockito.any());
}
public void testPerformActionInvalidNullOrEmptyAlias() {
String alias = randomBoolean() ? "" : null;
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
WaitForRolloverReadyStep step = createRandomInstance();
SetOnce<Exception> exceptionThrown = new SetOnce<>();
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
step.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean complete, ToXContentObject informationContext) {
throw new AssertionError("Unexpected method call");
}
@Override
public void onFailure(Exception e) {
exceptionThrown.set(e);
}
}, MASTER_TIMEOUT);
assertThat(exceptionThrown.get().getClass(), equalTo(IllegalArgumentException.class));
assertThat(
exceptionThrown.get().getMessage(),
equalTo(
String.format(
Locale.ROOT,
"setting [%s] for index [%s] is empty or not defined",
RolloverAction.LIFECYCLE_ROLLOVER_ALIAS,
indexMetadata.getIndex().getName()
)
)
);
}
public void testPerformActionAliasDoesNotPointToIndex() {
String alias = randomAlphaOfLength(5);
IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(10))
.settings(settings(IndexVersion.current()).put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias))
.numberOfShards(randomIntBetween(1, 5))
.numberOfReplicas(randomIntBetween(0, 5))
.build();
WaitForRolloverReadyStep step = createRandomInstance();
SetOnce<Exception> exceptionThrown = new SetOnce<>();
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
step.evaluateCondition(state, indexMetadata, new AsyncWaitStep.Listener() {
@Override
public void onResponse(boolean complete, ToXContentObject informationContext) {
throw new AssertionError("Unexpected method call");
}
@Override
public void onFailure(Exception e) {
exceptionThrown.set(e);
}
}, MASTER_TIMEOUT);
assertThat(exceptionThrown.get().getClass(), equalTo(IllegalArgumentException.class));
assertThat(
exceptionThrown.get().getMessage(),
equalTo(
String.format(
Locale.ROOT,
"%s [%s] does not point to index [%s]",
RolloverAction.LIFECYCLE_ROLLOVER_ALIAS,
alias,
indexMetadata.getIndex().getName()
)
)
);
}
public void testCreateRolloverRequestRolloverOnlyIfHasDocuments() {
boolean rolloverOnlyIfHasDocuments = randomBoolean();
WaitForRolloverReadyStep step = createRandomInstance();
String rolloverTarget = randomAlphaOfLength(5);
TimeValue masterTimeout = randomPositiveTimeValue();
RolloverRequest request = step.createRolloverRequest(rolloverTarget, masterTimeout, rolloverOnlyIfHasDocuments, false);
assertThat(request.getRolloverTarget(), is(rolloverTarget));
assertThat(request.masterNodeTimeout(), is(masterTimeout));
assertThat(request.isDryRun(), is(true)); // it's always a dry_run
Set<Condition<?>> expectedConditions = getExpectedConditions(step, rolloverOnlyIfHasDocuments);
assertEquals(expectedConditions.size(), request.getConditions().getConditions().size());
Set<Object> expectedConditionValues = expectedConditions.stream().map(Condition::value).collect(Collectors.toSet());
Set<Object> actualConditionValues = request.getConditions()
.getConditions()
.values()
.stream()
.map(Condition::value)
.collect(Collectors.toSet());
assertEquals(expectedConditionValues, actualConditionValues);
}
public void testCreateRolloverRequestRolloverBeyondMaximumPrimaryShardDocCount() {
WaitForRolloverReadyStep step = createRandomInstance();
String rolloverTarget = randomAlphaOfLength(5);
TimeValue masterTimeout = randomPositiveTimeValue();
var c = step.getConditions();
// If beyond MAX_PRIMARY_SHARD_DOCS_FOR_TSDB then expected is always MAX_PRIMARY_SHARD_DOCS_FOR_TSDB
step = new WaitForRolloverReadyStep(
step.getKey(),
step.getNextStepKey(),
step.getClient(),
c.getMaxSize(),
c.getMaxPrimaryShardSize(),
c.getMaxAge(),
c.getMaxDocs(),
randomLongBetween(WaitForRolloverReadyStep.MAX_PRIMARY_SHARD_DOCS, Long.MAX_VALUE),
c.getMinSize(),
c.getMinPrimaryShardSize(),
c.getMinAge(),
c.getMinDocs(),
c.getMinPrimaryShardDocs()
);
RolloverRequest request = step.createRolloverRequest(rolloverTarget, masterTimeout, true, false);
assertThat(request.getRolloverTarget(), is(rolloverTarget));
assertThat(request.masterNodeTimeout(), is(masterTimeout));
assertThat(request.isDryRun(), is(true)); // it's always a dry_run
assertThat(request.getConditions().getMaxPrimaryShardDocs(), equalTo(WaitForRolloverReadyStep.MAX_PRIMARY_SHARD_DOCS));
// If null then expected is always MAX_PRIMARY_SHARD_DOCS_FOR_TSDB
step = new WaitForRolloverReadyStep(
step.getKey(),
step.getNextStepKey(),
step.getClient(),
c.getMaxSize(),
c.getMaxPrimaryShardSize(),
c.getMaxAge(),
c.getMaxDocs(),
null,
c.getMinSize(),
c.getMinPrimaryShardSize(),
c.getMinAge(),
c.getMinDocs(),
c.getMinPrimaryShardDocs()
);
request = step.createRolloverRequest(rolloverTarget, masterTimeout, true, false);
assertThat(request.getRolloverTarget(), is(rolloverTarget));
assertThat(request.masterNodeTimeout(), is(masterTimeout));
assertThat(request.isDryRun(), is(true)); // it's always a dry_run
assertThat(request.getConditions().getMaxPrimaryShardDocs(), equalTo(WaitForRolloverReadyStep.MAX_PRIMARY_SHARD_DOCS));
// If less then WaitForRolloverReadyStep.MAX_PRIMARY_SHARD_DOCS_FOR_TSDB then expected is what has been defined
long maxPrimaryShardDocCount;
step = new WaitForRolloverReadyStep(
step.getKey(),
step.getNextStepKey(),
step.getClient(),
c.getMaxSize(),
c.getMaxPrimaryShardSize(),
c.getMaxAge(),
c.getMaxDocs(),
maxPrimaryShardDocCount = randomLongBetween(1, WaitForRolloverReadyStep.MAX_PRIMARY_SHARD_DOCS - 1),
c.getMinSize(),
c.getMinPrimaryShardSize(),
c.getMinAge(),
c.getMinDocs(),
c.getMinPrimaryShardDocs()
);
request = step.createRolloverRequest(rolloverTarget, masterTimeout, true, false);
assertThat(request.getRolloverTarget(), is(rolloverTarget));
assertThat(request.masterNodeTimeout(), is(masterTimeout));
assertThat(request.isDryRun(), is(true)); // it's always a dry_run
assertThat(request.getConditions().getMaxPrimaryShardDocs(), equalTo(maxPrimaryShardDocCount));
}
}
| WaitForRolloverReadyStepTests |
java | apache__flink | flink-kubernetes/src/test/java/org/apache/flink/kubernetes/KubernetesClientTestBase.java | {
"start": 2755,
"end": 2898
} | class ____ {@link KubernetesClusterDescriptorTest} and {@link
* org.apache.flink.kubernetes.kubeclient.Fabric8FlinkKubeClientTest}.
*/
public | for |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/RxReturnValueIgnoredTest.java | {
"start": 7526,
"end": 7940
} | interface ____ {
@CanIgnoreReturnValue
Observable<Object> getObservable();
@CanIgnoreReturnValue
Single<Object> getSingle();
@CanIgnoreReturnValue
Flowable<Object> getFlowable();
@CanIgnoreReturnValue
Maybe<Object> getMaybe();
}
public static | CanIgnoreMethod |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/creators/ValueInstantiatorTest.java | {
"start": 8172,
"end": 8487
} | class ____ extends SimpleModule
{
public MyModule(Class<?> cls, ValueInstantiator inst)
{
super("Test", Version.unknownVersion());
this.addValueInstantiator(cls, inst);
}
}
@JsonValueInstantiator(AnnotatedBeanDelegatingInstantiator.class)
static | MyModule |
java | apache__camel | components/camel-groovy/src/test/java/org/apache/camel/language/groovy/GroovyResourceTest.java | {
"start": 1053,
"end": 1714
} | class ____ extends CamelTestSupport {
@Test
public void testGroovyResource() throws Exception {
getMockEndpoint("mock:result").expectedBodiesReceived("The result is 6");
template.sendBody("direct:start", 3);
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.transform().groovy("resource:classpath:mygroovy.groovy")
.to("mock:result");
}
};
}
}
| GroovyResourceTest |
java | spring-projects__spring-security | access/src/main/java/org/springframework/security/messaging/access/expression/MessageExpressionVoter.java | {
"start": 1840,
"end": 3493
} | class ____<T> implements AccessDecisionVoter<Message<T>> {
private SecurityExpressionHandler<Message<T>> expressionHandler = new DefaultMessageSecurityExpressionHandler<>();
@Override
public int vote(Authentication authentication, Message<T> message, Collection<ConfigAttribute> attributes) {
Assert.notNull(authentication, "authentication must not be null");
Assert.notNull(message, "message must not be null");
Assert.notNull(attributes, "attributes must not be null");
MessageExpressionConfigAttribute attr = findConfigAttribute(attributes);
if (attr == null) {
return ACCESS_ABSTAIN;
}
EvaluationContext ctx = this.expressionHandler.createEvaluationContext(authentication, message);
ctx = attr.postProcess(ctx, message);
return ExpressionUtils.evaluateAsBoolean(attr.getAuthorizeExpression(), ctx) ? ACCESS_GRANTED : ACCESS_DENIED;
}
private @Nullable MessageExpressionConfigAttribute findConfigAttribute(Collection<ConfigAttribute> attributes) {
for (ConfigAttribute attribute : attributes) {
if (attribute instanceof MessageExpressionConfigAttribute) {
return (MessageExpressionConfigAttribute) attribute;
}
}
return null;
}
@Override
public boolean supports(ConfigAttribute attribute) {
return attribute instanceof MessageExpressionConfigAttribute;
}
@Override
public boolean supports(Class<?> clazz) {
return Message.class.isAssignableFrom(clazz);
}
public void setExpressionHandler(SecurityExpressionHandler<Message<T>> expressionHandler) {
Assert.notNull(expressionHandler, "expressionHandler cannot be null");
this.expressionHandler = expressionHandler;
}
}
| MessageExpressionVoter |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/convert/ConversionServiceProvider.java | {
"start": 825,
"end": 1027
} | interface ____ {
/**
* Provides the conversion service.
*
* @return the conversion service
*/
@NonNull
ConversionService getConversionService();
}
| ConversionServiceProvider |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/ModelineFactory.java | {
"start": 928,
"end": 1383
} | interface ____ {
/**
* Service factory key.
*/
String FACTORY = "dsl-modeline-factory";
/**
* Parses the resources to discover camel-jbang modeline snippets which is parsed and processed.
*
* @param resource the resource with Camel routes such as a yaml, xml or java source file.
* @throws Exception is thrown if error parsing
*/
void parseModeline(Resource resource) throws Exception;
}
| ModelineFactory |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/cache/spi/QueryKey.java | {
"start": 1090,
"end": 4318
} | interface ____ extends Serializable {
}
public static QueryKey from(
String sqlQueryString,
Limit limit,
QueryParameterBindings parameterBindings,
SharedSessionContractImplementor session) {
// todo (6.0) : here is where we should centralize cacheable-or-not
// if this method returns null, the query should be considered un-cacheable
//
// todo (6.0) : should limited (first/max) results be cacheable?
// todo (6.0) : should filtered results be cacheable?
final Limit limitToUse = limit == null ? Limit.NONE : limit;
return new QueryKey(
sqlQueryString,
parameterBindings.generateQueryKeyMemento( session ),
limitToUse.getFirstRow(),
limitToUse.getMaxRows(),
session.getLoadQueryInfluencers().getEnabledFilterNames()
);
}
private final String sqlQueryString;
private final ParameterBindingsMemento parameterBindingsMemento;
private final Integer firstRow;
private final Integer maxRows;
private final String[] enabledFilterNames;
/**
* For performance reasons, the hashCode is cached; however, it is marked transient so that it can be
* recalculated as part of the serialization process which allows distributed query caches to work properly.
*/
private transient int hashCode;
public QueryKey(
String sqlQueryString,
ParameterBindingsMemento parameterBindingsMemento,
Integer firstRow,
Integer maxRows,
Set<String> enabledFilterNames) {
this.sqlQueryString = sqlQueryString;
this.parameterBindingsMemento = parameterBindingsMemento;
this.firstRow = firstRow;
this.maxRows = maxRows;
this.enabledFilterNames = enabledFilterNames.toArray( String[]::new );
this.hashCode = generateHashCode();
}
/**
* Deserialization hook used to re-init the cached hashcode which is needed for proper clustering support.
*
* @param in The object input stream.
*
* @throws IOException Thrown by normal deserialization
* @throws ClassNotFoundException Thrown by normal deserialization
*/
@Serial
private void readObject(java.io.ObjectInputStream in)
throws IOException, ClassNotFoundException {
in.defaultReadObject();
hashCode = generateHashCode();
}
private int generateHashCode() {
int result = 13;
result = 37 * result + sqlQueryString.hashCode();
// Don't include the firstRow and maxRows in the hash
// as these values are rarely useful for query caching
// result = 37 * result + ( firstRow==null ? 0 : firstRow );
// result = 37 * result + ( maxRows==null ? 0 : maxRows );
result = 37 * result + parameterBindingsMemento.hashCode();
result = 37 * result + Arrays.hashCode( enabledFilterNames );
return result;
}
@Override
public boolean equals(Object other) {
return other instanceof QueryKey that
&& Objects.equals( this.sqlQueryString, that.sqlQueryString )
&& Objects.equals( this.firstRow, that.firstRow )
&& Objects.equals( this.maxRows, that.maxRows )
// Set's `#equals` impl does a deep check, so `Objects#equals` is a good check
&& Objects.equals( this.parameterBindingsMemento, that.parameterBindingsMemento )
&& Arrays.equals( this.enabledFilterNames, that.enabledFilterNames );
}
@Override
public int hashCode() {
return hashCode;
}
}
| ParameterBindingsMemento |
java | netty__netty | codec-socks/src/main/java/io/netty/handler/codec/socksx/SocksPortUnificationServerHandler.java | {
"start": 1520,
"end": 3827
} | class ____ extends ByteToMessageDecoder {
private static final InternalLogger logger =
InternalLoggerFactory.getInstance(SocksPortUnificationServerHandler.class);
private final Socks5ServerEncoder socks5encoder;
/**
* Creates a new instance with the default configuration.
*/
public SocksPortUnificationServerHandler() {
this(Socks5ServerEncoder.DEFAULT);
}
/**
* Creates a new instance with the specified {@link Socks5ServerEncoder}.
* This constructor is useful when a user wants to use an alternative {@link Socks5AddressEncoder}.
*/
public SocksPortUnificationServerHandler(Socks5ServerEncoder socks5encoder) {
this.socks5encoder = ObjectUtil.checkNotNull(socks5encoder, "socks5encoder");
}
@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
final int readerIndex = in.readerIndex();
if (in.writerIndex() == readerIndex) {
return;
}
ChannelPipeline p = ctx.pipeline();
final byte versionVal = in.getByte(readerIndex);
SocksVersion version = SocksVersion.valueOf(versionVal);
switch (version) {
case SOCKS4a:
logKnownVersion(ctx, version);
p.addAfter(ctx.name(), null, Socks4ServerEncoder.INSTANCE);
p.addAfter(ctx.name(), null, new Socks4ServerDecoder());
break;
case SOCKS5:
logKnownVersion(ctx, version);
p.addAfter(ctx.name(), null, socks5encoder);
p.addAfter(ctx.name(), null, new Socks5InitialRequestDecoder());
break;
default:
logUnknownVersion(ctx, versionVal);
in.skipBytes(in.readableBytes());
ctx.close();
return;
}
p.remove(this);
}
private static void logKnownVersion(ChannelHandlerContext ctx, SocksVersion version) {
logger.debug("{} Protocol version: {}({})", ctx.channel(), version);
}
private static void logUnknownVersion(ChannelHandlerContext ctx, byte versionVal) {
if (logger.isDebugEnabled()) {
logger.debug("{} Unknown protocol version: {}", ctx.channel(), versionVal & 0xFF);
}
}
}
| SocksPortUnificationServerHandler |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/propertyeditors/CurrencyEditor.java | {
"start": 1068,
"end": 1454
} | class ____ extends PropertyEditorSupport {
@Override
public void setAsText(String text) throws IllegalArgumentException {
if (StringUtils.hasText(text)) {
text = text.trim();
}
setValue(Currency.getInstance(text));
}
@Override
public String getAsText() {
Currency value = (Currency) getValue();
return (value != null ? value.getCurrencyCode() : "");
}
}
| CurrencyEditor |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/array_result_type/ArrayResultTypeTest.java | {
"start": 1102,
"end": 2862
} | class ____ {
private static SqlSessionFactory sqlSessionFactory;
@BeforeAll
static void setUp() throws Exception {
// create an SqlSessionFactory
try (Reader reader = Resources
.getResourceAsReader("org/apache/ibatis/submitted/array_result_type/mybatis-config.xml")) {
sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader);
}
// populate in-memory database
BaseDataTest.runScript(sqlSessionFactory.getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/array_result_type/CreateDB.sql");
}
@Test
void shouldGetUserArray() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
User[] users = mapper.getUsers();
assertEquals("User1", users[0].getName());
assertEquals("User2", users[1].getName());
}
}
@Test
void shouldGetUserArrayXml() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
User[] users = mapper.getUsersXml();
assertEquals("User1", users[0].getName());
assertEquals("User2", users[1].getName());
}
}
@Test
void shouldGetSimpleTypeArray() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
Integer[] ids = mapper.getUserIds();
assertEquals(Integer.valueOf(1), ids[0]);
}
}
@Test
void shouldGetPrimitiveArray() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
int[] ids = mapper.getUserIdsPrimitive();
assertEquals(1, ids[0]);
}
}
}
| ArrayResultTypeTest |
java | apache__camel | components/camel-google/camel-google-mail/src/generated/java/org/apache/camel/component/google/mail/internal/GoogleMailApiName.java | {
"start": 269,
"end": 676
} | enum ____ implements ApiName {
THREADS("threads"),
MESSAGES("messages"),
ATTACHMENTS("attachments"),
LABELS("labels"),
HISTORY("history"),
DRAFTS("drafts"),
USERS("users");
private final String name;
private GoogleMailApiName(String name) {
this.name = name;
}
@Override
public String getName() {
return name;
}
}
| GoogleMailApiName |
java | spring-projects__spring-framework | spring-tx/src/main/java/org/springframework/transaction/jta/UserTransactionAdapter.java | {
"start": 1288,
"end": 1406
} | interface ____ an exact subset of the JTA
* TransactionManager interface. Unfortunately, it does not serve as
* super- | is |
java | apache__flink | flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java | {
"start": 43986,
"end": 46858
} | class ____
// and initialization errors
ExceptionUtils.rethrowIfFatalErrorOrOOM(t);
LOG.error("Failed to load a file system via services", t);
}
}
}
/**
* Utility loader for the Hadoop file system factory. We treat the Hadoop FS factory in a
* special way, because we use it as a catch all for file systems schemes not supported directly
* in Flink.
*
* <p>This method does a set of eager checks for availability of certain classes, to be able to
* give better error messages.
*/
private static FileSystemFactory loadHadoopFsFactory() {
final ClassLoader cl = FileSystem.class.getClassLoader();
// first, see if the Flink runtime classes are available
final Class<? extends FileSystemFactory> factoryClass;
try {
factoryClass =
Class.forName("org.apache.flink.runtime.fs.hdfs.HadoopFsFactory", false, cl)
.asSubclass(FileSystemFactory.class);
} catch (ClassNotFoundException e) {
LOG.info(
"No Flink runtime dependency present. "
+ "The extended set of supported File Systems via Hadoop is not available.");
return new UnsupportedSchemeFactory(
"Flink runtime classes missing in classpath/dependencies.");
} catch (Exception | LinkageError e) {
LOG.warn("Flink's Hadoop file system factory could not be loaded", e);
return new UnsupportedSchemeFactory(
"Flink's Hadoop file system factory could not be loaded", e);
}
// check (for eager and better exception messages) if the Hadoop classes are available here
try {
Class.forName("org.apache.hadoop.conf.Configuration", false, cl);
Class.forName("org.apache.hadoop.fs.FileSystem", false, cl);
} catch (ClassNotFoundException e) {
LOG.info(
"Hadoop is not in the classpath/dependencies. "
+ "The extended set of supported File Systems via Hadoop is not available.");
return new UnsupportedSchemeFactory("Hadoop is not in the classpath/dependencies.");
}
// Create the factory.
try {
return factoryClass.newInstance();
} catch (Exception | LinkageError e) {
LOG.warn("Flink's Hadoop file system factory could not be created", e);
return new UnsupportedSchemeFactory(
"Flink's Hadoop file system factory could not be created", e);
}
}
// ------------------------------------------------------------------------
/** An identifier of a file system, via its scheme and its authority. */
@Internal
public static final | loading |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java | {
"start": 3259,
"end": 3350
} | class ____ a container for the JNI Win32 native methods used by WSCE.
*/
private static | is |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/test/java/org/springframework/cloud/gateway/handler/predicate/VersionRoutePredicateFactoryIntegrationTests.java | {
"start": 2150,
"end": 5686
} | class ____ extends BaseWebClientTests {
@Test
public void versionHeaderWorks() {
testClient.mutate()
.build()
.get()
.uri("/anything/version11plus")
.header("X-API-Version", "1.1.0")
.exchange()
.expectStatus()
.isOk()
.expectHeader()
.valueEquals("X-Matched-Version", "1.1+");
testClient.mutate()
.build()
.get()
.uri("/anything/version11plus")
.header("X-API-Version", "1.5.0")
.exchange()
.expectStatus()
.isOk()
.expectHeader()
.valueEquals("X-Matched-Version", "1.1+");
testClient.mutate()
.build()
.get()
.uri("/anything/version13")
.header("X-API-Version", "1.3.0")
.exchange()
.expectStatus()
.isOk()
.expectHeader()
.valueEquals("X-Matched-Version", "1.3");
testClient.mutate()
.build()
.get()
.uri("/anything/version20plus")
.header("X-API-Version", "2.1.0")
.exchange()
.expectStatus()
.isOk()
.expectHeader()
.valueEquals("X-Matched-Version", "2.0+");
}
@Test
public void versionMediaTypeWorks() {
testClient.mutate()
.build()
.get()
.uri("/anything/version11plus")
.accept(new MediaType(MediaType.APPLICATION_JSON, Map.of("version", "1.1.0")))
.exchange()
.expectStatus()
.isOk()
.expectHeader()
.valueEquals("X-Matched-Version", "1.1+");
}
@Test
public void versionRequestParamWorks() {
testClient.mutate()
.build()
.get()
.uri("/anything/version11plus?apiVersion=1.1.0")
.exchange()
.expectStatus()
.isOk()
.expectHeader()
.valueEquals("X-Matched-Version", "1.1+");
}
@Test
public void customVersionResolverBeanWorks() {
testClient.mutate()
.build()
.get()
.uri("/anything/version11plus?customApiVersionParam=1.1.0")
.exchange()
.expectStatus()
.isOk()
.expectHeader()
.valueEquals("X-Matched-Version", "1.1+");
}
@Test
public void invalidVersionNotFound() {
testClient.mutate()
.build()
.get()
.uri("/anything/version11plus")
.header("X-API-Version", "1.0.0")
.exchange()
.expectStatus()
.isNotFound();
}
@Test
public void toStringFormat() {
Config config = new Config();
config.setVersion("1.1+");
Predicate<ServerWebExchange> predicate = new VersionRoutePredicateFactory(
VersionRoutePredicateFactoryTests.apiVersionStrategy())
.apply(config);
assertThat(predicate.toString()).contains("Version: 1.1+");
}
@Test
public void testConfig() {
try (ValidatorFactory factory = Validation.buildDefaultValidatorFactory()) {
Validator validator = factory.getValidator();
Config config = new Config();
config.setVersion("1.1+");
assertThat(validator.validate(config)).isEmpty();
}
}
@Test
public void testConfigNullField() {
try (ValidatorFactory factory = Validation.buildDefaultValidatorFactory()) {
Validator validator = factory.getValidator();
Config config = new Config();
Set<ConstraintViolation<Config>> validate = validator.validate(config);
assertThat(validate).hasSize(1);
}
}
@Test
public void testConfigBlankField() {
try (ValidatorFactory factory = Validation.buildDefaultValidatorFactory()) {
Validator validator = factory.getValidator();
Config config = new Config();
config.setVersion(" ");
Set<ConstraintViolation<Config>> validate = validator.validate(config);
assertThat(validate).hasSize(1);
}
}
@EnableAutoConfiguration
@SpringBootConfiguration
@Import(DefaultTestConfig.class)
// implements WebFluxConfigurer required
public static | VersionRoutePredicateFactoryIntegrationTests |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy/deployment/src/test/java/io/quarkus/resteasy/test/security/authzpolicy/AbstractAuthorizationPolicyTest.java | {
"start": 336,
"end": 4550
} | class ____ {
protected static final Class<?>[] TEST_CLASSES = { TestIdentityProvider.class, TestIdentityController.class,
ForbidAllButViewerAuthorizationPolicy.class, ForbidViewerClassLevelPolicyResource.class,
ForbidViewerMethodLevelPolicyResource.class, NoAuthorizationPolicyResource.class,
PermitUserAuthorizationPolicy.class, ClassRolesAllowedMethodAuthZPolicyResource.class,
ClassAuthZPolicyMethodRolesAllowedResource.class, ViewerAugmentingPolicy.class,
AuthorizationPolicyAndPathMatchingPoliciesResource.class };
protected static final String APPLICATION_PROPERTIES = """
quarkus.http.auth.policy.admin-role.roles-allowed=admin
quarkus.http.auth.policy.viewer-role.roles-allowed=viewer
quarkus.http.auth.permission.jax-rs1.paths=/no-authorization-policy/jax-rs-path-matching-http-perm
quarkus.http.auth.permission.jax-rs1.policy=admin-role
quarkus.http.auth.permission.jax-rs1.applies-to=JAXRS
quarkus.http.auth.permission.standard1.paths=/no-authorization-policy/path-matching-http-perm
quarkus.http.auth.permission.standard1.policy=admin-role
quarkus.http.auth.permission.jax-rs2.paths=/authz-policy-and-path-matching-policies/jax-rs-path-matching-http-perm
quarkus.http.auth.permission.jax-rs2.policy=viewer-role
quarkus.http.auth.permission.jax-rs2.applies-to=JAXRS
quarkus.http.auth.permission.standard2.paths=/authz-policy-and-path-matching-policies/path-matching-http-perm
quarkus.http.auth.permission.standard2.policy=viewer-role
""";
@BeforeAll
public static void setupUsers() {
TestIdentityController.resetRoles()
.add("admin", "admin", "admin", "viewer")
.add("user", "user")
.add("viewer", "viewer", "viewer");
}
@Test
public void testNoAuthorizationPolicy() {
// unsecured endpoint
RestAssured.given().auth().preemptive().basic("viewer", "viewer").get("/no-authorization-policy/unsecured")
.then().statusCode(200).body(Matchers.equalTo("viewer"));
// secured with JAX-RS path-matching roles allowed HTTP permission requiring 'admin' role
RestAssured.given().auth().preemptive().basic("user", "user")
.get("/no-authorization-policy/jax-rs-path-matching-http-perm")
.then().statusCode(403);
RestAssured.given().auth().preemptive().basic("admin", "admin")
.get("/no-authorization-policy/jax-rs-path-matching-http-perm")
.then().statusCode(200).body(Matchers.equalTo("admin"));
// secured with path-matching roles allowed HTTP permission requiring 'admin' role
RestAssured.given().auth().preemptive().basic("user", "user").get("/no-authorization-policy/path-matching-http-perm")
.then().statusCode(403);
RestAssured.given().auth().preemptive().basic("admin", "admin").get("/no-authorization-policy/path-matching-http-perm")
.then().statusCode(200).body(Matchers.equalTo("admin"));
// secured with @RolesAllowed("admin")
RestAssured.given().auth().preemptive().basic("user", "user").get("/no-authorization-policy/roles-allowed-annotation")
.then().statusCode(403);
RestAssured.given().auth().preemptive().basic("admin", "admin").get("/no-authorization-policy/roles-allowed-annotation")
.then().statusCode(200).body(Matchers.equalTo("admin"));
}
@Test
public void testMethodLevelAuthorizationPolicy() {
// policy placed on the endpoint directly, requires 'viewer' principal and must not pass anyone else
RestAssured.given().auth().preemptive().basic("admin", "admin").get("/forbid-viewer-method-level-policy")
.then().statusCode(403);
RestAssured.given().auth().preemptive().basic("viewer", "viewer").get("/forbid-viewer-method-level-policy")
.then().statusCode(200).body(Matchers.equalTo("viewer"));
// which means the other endpoint inside same resource | AbstractAuthorizationPolicyTest |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/mapper/NonDynamicFieldMapperTests.java | {
"start": 1523,
"end": 1845
} | class ____ extends FieldMapper {
private static final String NAME = "non_dynamic";
private static final TypeParser PARSER = new TypeParser(
(n, c) -> new Builder(n),
List.of(notFromDynamicTemplates(NAME), notInMultiFields(NAME))
);
private static | NonDynamicFieldMapper |
java | apache__camel | components/camel-csv/src/test/java/org/apache/camel/dataformat/csv/CsvUnmarshalTabDelimiterSpringTest.java | {
"start": 1209,
"end": 2909
} | class ____ extends CamelSpringTestSupport {
@EndpointInject("mock:result")
private MockEndpoint result;
@SuppressWarnings("unchecked")
@Test
void testCsvUnMarshal() throws Exception {
result.expectedMessageCount(1);
template.sendBody("direct:start", "123\tCamel in Action\t1\n124\tActiveMQ in Action\t2");
MockEndpoint.assertIsSatisfied(context);
List<List<String>> body = result.getReceivedExchanges().get(0).getIn().getBody(List.class);
assertEquals(2, body.size());
assertEquals("123", body.get(0).get(0));
assertEquals("Camel in Action", body.get(0).get(1));
assertEquals("1", body.get(0).get(2));
assertEquals("124", body.get(1).get(0));
assertEquals("ActiveMQ in Action", body.get(1).get(1));
assertEquals("2", body.get(1).get(2));
}
@SuppressWarnings("unchecked")
@Test
void testCsvUnMarshalSingleLine() throws Exception {
result.expectedMessageCount(1);
template.sendBody("direct:start", "123\tCamel in Action\t1");
MockEndpoint.assertIsSatisfied(context);
List<List<String>> body = result.getReceivedExchanges().get(0).getIn().getBody(List.class);
assertEquals(1, body.size());
assertEquals("123", body.get(0).get(0));
assertEquals("Camel in Action", body.get(0).get(1));
assertEquals("1", body.get(0).get(2));
}
@Override
protected ClassPathXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext(
"org/apache/camel/dataformat/csv/CsvUnmarshalTabDelimiterSpringTest-context.xml");
}
}
| CsvUnmarshalTabDelimiterSpringTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_field.java | {
"start": 219,
"end": 483
} | class ____ extends TestCase {
public void test_annotation() throws Exception {
VO vo = new VO();
vo.setId(123);
String text = JSON.toJSONString(vo);
System.out.println(text);
}
public static | Bug_for_field |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java | {
"start": 6838,
"end": 7493
} | class ____ extends SourceBlockLoader {
public GeometriesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) {
super(fetcher, lookup);
}
@Override
public final Builder builder(BlockFactory factory, int expectedCount) {
return factory.bytesRefs(expectedCount);
}
@Override
protected RowStrideReader rowStrideReader(LeafReaderContext context, DocIdSetIterator iter) {
return new Geometries(fetcher, iter);
}
@Override
protected String name() {
return "Geometries";
}
}
private static | GeometriesBlockLoader |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestTests.java | {
"start": 1208,
"end": 6038
} | class ____ extends ESTestCase {
/**
* Make sure that we can set, serialize, and deserialize arbitrary sets
* of metrics.
*/
public void testAddMetrics() throws Exception {
NodesStatsRequest request = new NodesStatsRequest(randomAlphaOfLength(8));
request.indices(randomFrom(CommonStatsFlags.ALL));
List<Metric> metrics = randomSubsetOf(Metric.ALL);
request.addMetrics(metrics);
NodesStatsRequest deserializedRequest = roundTripRequest(request);
assertRequestsEqual(request, deserializedRequest);
}
/**
* Check that we can add a metric.
*/
public void testAddSingleMetric() throws Exception {
NodesStatsRequest request = new NodesStatsRequest();
request.addMetric(randomFrom(Metric.ALL));
NodesStatsRequest deserializedRequest = roundTripRequest(request);
assertRequestsEqual(request, deserializedRequest);
}
/**
* Check that we can remove a metric.
*/
public void testRemoveSingleMetric() throws Exception {
NodesStatsRequest request = new NodesStatsRequest();
request.all();
Metric metric = randomFrom(Metric.ALL);
request.removeMetric(metric);
NodesStatsRequest deserializedRequest = roundTripRequest(request);
assertThat(request.requestedMetrics(), equalTo(deserializedRequest.requestedMetrics()));
assertThat(metric, not(in(request.requestedMetrics())));
}
/**
* Test that a newly constructed NodesStatsRequestObject requests only index metrics.
*/
public void testNodesStatsRequestDefaults() {
NodesStatsRequest defaultNodesStatsRequest = new NodesStatsRequest(randomAlphaOfLength(8));
NodesStatsRequest constructedNodesStatsRequest = new NodesStatsRequest(randomAlphaOfLength(8));
constructedNodesStatsRequest.clear();
constructedNodesStatsRequest.indices(CommonStatsFlags.ALL);
assertRequestsEqual(defaultNodesStatsRequest, constructedNodesStatsRequest);
}
/**
* Test that the {@link NodesStatsRequest#all()} method enables all metrics.
*/
public void testNodesInfoRequestAll() throws Exception {
NodesStatsRequest request = new NodesStatsRequest("node");
request.all();
assertThat(request.indices().getFlags(), equalTo(CommonStatsFlags.ALL.getFlags()));
assertThat(request.requestedMetrics(), equalTo(Metric.ALL));
}
/**
* Test that the {@link NodesStatsRequest#clear()} method removes all metrics.
*/
public void testNodesInfoRequestClear() throws Exception {
NodesStatsRequest request = new NodesStatsRequest("node");
request.clear();
assertThat(request.indices().getFlags(), equalTo(CommonStatsFlags.NONE.getFlags()));
assertThat(request.requestedMetrics(), empty());
}
/**
* Serialize and deserialize a request.
* @param request A request to serialize.
* @return The deserialized, "round-tripped" request.
*/
private static NodesStatsRequest roundTripRequest(NodesStatsRequest request) throws Exception {
try (BytesStreamOutput out = new BytesStreamOutput()) {
request.getNodesStatsRequestParameters().writeTo(out);
try (StreamInput in = out.bytes().streamInput()) {
return new NodesStatsRequest(new NodesStatsRequestParameters(in), request.nodesIds());
}
}
}
private static void assertRequestsEqual(NodesStatsRequest request1, NodesStatsRequest request2) {
assertThat(request1.indices().getFlags(), equalTo(request2.indices().getFlags()));
assertThat(request1.requestedMetrics(), equalTo(request2.requestedMetrics()));
}
public void testGetDescription() {
final var request = new NodesStatsRequest("nodeid1", "nodeid2");
request.clear();
request.addMetrics(Metric.OS, Metric.TRANSPORT);
request.indices(new CommonStatsFlags(CommonStatsFlags.Flag.Store, CommonStatsFlags.Flag.Flush));
final var description = request.getDescription();
assertThat(
description,
allOf(
containsString("nodeid1"),
containsString("nodeid2"),
containsString(Metric.OS.metricName()),
containsString(Metric.TRANSPORT.metricName()),
not(containsString(Metric.SCRIPT.metricName())),
containsString(CommonStatsFlags.Flag.Store.toString()),
containsString(CommonStatsFlags.Flag.Flush.toString()),
not(containsString(CommonStatsFlags.Flag.FieldData.toString()))
)
);
assertEquals(description, request.createTask(1, "", "", TaskId.EMPTY_TASK_ID, Map.of()).getDescription());
}
}
| NodesStatsRequestTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/embeddables/MappedSuperclassGenericEmbeddableQueryParamTest.java | {
"start": 2732,
"end": 2866
} | interface ____ {
String getAttributeValue();
void setAttributeValue(String value);
}
@MappedSuperclass
public static | GenericValue |
java | elastic__elasticsearch | libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java | {
"start": 16419,
"end": 16943
} | class ____ { }"));
var classToBytes = InMemoryJavaCompiler.compile(sources);
JarUtils.createJarWithEntries(jar, Map.ofEntries(entry("q/B.class", classToBytes.get("q.B"))));
return jar;
}
private static Path createMockPluginJar(Path home) throws IOException {
Path jar = home.resolve("mock-plugin.jar");
Map<String, CharSequence> sources = Map.ofEntries(
entry("module-info", "module org.example.plugin { exports q; }"),
entry("q.B", "package q; public | B |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/ImportSelectorTests.java | {
"start": 12960,
"end": 13072
} | class ____ {
@Bean
public String a() {
return "a";
}
}
@Configuration
public static | ImportedSelector1 |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/filter/OncePerRequestFilter.java | {
"start": 3179,
"end": 6586
} | class ____ extends GenericFilterBean {
/**
* Suffix that gets appended to the filter name for the
* "already filtered" request attribute.
* @see #getAlreadyFilteredAttributeName
*/
public static final String ALREADY_FILTERED_SUFFIX = ".FILTERED";
/**
* This {@code doFilter} implementation stores a request attribute for
* "already filtered", proceeding without filtering again if the
* attribute is already there.
* @see #getAlreadyFilteredAttributeName
* @see #shouldNotFilter
* @see #doFilterInternal
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain)
throws ServletException, IOException {
if (!((request instanceof HttpServletRequest httpRequest) && (response instanceof HttpServletResponse httpResponse))) {
throw new ServletException("OncePerRequestFilter only supports HTTP requests");
}
String alreadyFilteredAttributeName = getAlreadyFilteredAttributeName();
boolean hasAlreadyFilteredAttribute = request.getAttribute(alreadyFilteredAttributeName) != null;
if (skipDispatch(httpRequest) || shouldNotFilter(httpRequest)) {
// Proceed without invoking this filter...
filterChain.doFilter(request, response);
}
else if (hasAlreadyFilteredAttribute) {
if (DispatcherType.ERROR.equals(request.getDispatcherType())) {
doFilterNestedErrorDispatch(httpRequest, httpResponse, filterChain);
return;
}
// Proceed without invoking this filter...
filterChain.doFilter(request, response);
}
else {
// Do invoke this filter...
request.setAttribute(alreadyFilteredAttributeName, Boolean.TRUE);
try {
doFilterInternal(httpRequest, httpResponse, filterChain);
}
finally {
// Remove the "already filtered" request attribute for this request.
request.removeAttribute(alreadyFilteredAttributeName);
}
}
}
private boolean skipDispatch(HttpServletRequest request) {
if (isAsyncDispatch(request) && shouldNotFilterAsyncDispatch()) {
return true;
}
if (request.getAttribute(WebUtils.ERROR_REQUEST_URI_ATTRIBUTE) != null && shouldNotFilterErrorDispatch()) {
return true;
}
return false;
}
/**
* The dispatcher type {@code jakarta.servlet.DispatcherType.ASYNC} means a
* filter can be invoked in more than one thread over the course of a single
* request. This method returns {@code true} if the filter is currently
* executing within an asynchronous dispatch.
* @param request the current request
* @since 3.2
* @see WebAsyncManager#hasConcurrentResult()
*/
protected boolean isAsyncDispatch(HttpServletRequest request) {
return DispatcherType.ASYNC.equals(request.getDispatcherType());
}
/**
* Whether request processing is in asynchronous mode meaning that the
* response will not be committed after the current thread is exited.
* @param request the current request
* @since 3.2
* @see WebAsyncManager#isConcurrentHandlingStarted()
*/
protected boolean isAsyncStarted(HttpServletRequest request) {
return WebAsyncUtils.getAsyncManager(request).isConcurrentHandlingStarted();
}
/**
* Return the name of the request attribute that identifies that a request
* is already filtered.
* <p>The default implementation takes the configured name of the concrete filter
* instance and appends ".FILTERED". If the filter is not fully initialized,
* it falls back to its | OncePerRequestFilter |
java | quarkusio__quarkus | extensions/flyway/deployment/src/main/java/io/quarkus/flyway/deployment/FlywayCallbacksLocator.java | {
"start": 897,
"end": 2328
} | class ____ {
private final Collection<String> dataSourceNames;
private final FlywayBuildTimeConfig flywayBuildConfig;
private final CombinedIndexBuildItem combinedIndexBuildItem;
private final BuildProducer<ReflectiveClassBuildItem> reflectiveClassProducer;
private FlywayCallbacksLocator(Collection<String> dataSourceNames, FlywayBuildTimeConfig flywayBuildConfig,
CombinedIndexBuildItem combinedIndexBuildItem, BuildProducer<ReflectiveClassBuildItem> reflectiveClassProducer) {
this.dataSourceNames = dataSourceNames;
this.flywayBuildConfig = flywayBuildConfig;
this.combinedIndexBuildItem = combinedIndexBuildItem;
this.reflectiveClassProducer = reflectiveClassProducer;
}
public static FlywayCallbacksLocator with(Collection<String> dataSourceNames, FlywayBuildTimeConfig flywayBuildConfig,
CombinedIndexBuildItem combinedIndexBuildItem, BuildProducer<ReflectiveClassBuildItem> reflectiveClassProducer) {
return new FlywayCallbacksLocator(dataSourceNames, flywayBuildConfig, combinedIndexBuildItem, reflectiveClassProducer);
}
/**
* Main logic to identify callbacks and return them to be processed by the {@link FlywayProcessor}
*
* @return Map containing the callbacks for each datasource. The datasource name is the map key
* @exception ClassNotFoundException if the {@link Callback} | FlywayCallbacksLocator |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/streaming/runtime/operators/asyncprocessing/ElementOrder.java | {
"start": 1230,
"end": 1724
} | enum ____ {
/**
* Treat the record processing as a whole, meaning that any {@code processElement} call for the
* elements with same key should follow the order of record arrival AND no parallel run is
* allowed.
*/
RECORD_ORDER,
/**
* The {@code processElement} call will be invoked on record arrival, but may be blocked at the
* first async request if there is a preceding same-key record under processing.
*/
FIRST_REQUEST_ORDER,
}
| ElementOrder |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/FormLoginConfigurerTests.java | {
"start": 23145,
"end": 23605
} | class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((requests) -> requests
.anyRequest().hasRole("USER"))
.formLogin((login) -> login
.loginPage("/authenticate")
.permitAll())
.logout((logout) -> logout
.permitAll());
return http.build();
// @formatter:on
}
}
@Configuration
@EnableWebSecurity
static | FormLoginDefaultsConfig |
java | apache__camel | components/camel-github/src/test/java/org/apache/camel/component/github/producer/CreateIssueProducerTest.java | {
"start": 2593,
"end": 2913
} | class ____ implements Processor {
@Override
public void process(Exchange exchange) {
Message in = exchange.getIn();
Map<String, Object> headers = in.getHeaders();
headers.put(GitHubConstants.GITHUB_ISSUE_TITLE, "Error");
}
}
}
| MockIssueCreateProducerProcessor |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/java/typeutils/TypeExtractorTest.java | {
"start": 81955,
"end": 82730
} | class ____<K, V> implements MapFunction<Edge<K, V>[], V> {
private static final long serialVersionUID = 1L;
@Override
public V map(Edge<K, V>[] value) throws Exception {
return null;
}
}
@SuppressWarnings({"unchecked", "rawtypes"})
@Test
void testInputInference4() {
EdgeMapper4<Boolean, String> em = new EdgeMapper4<Boolean, String>();
TypeInformation<?> ti =
TypeExtractor.getMapReturnTypes(
(MapFunction) em,
TypeInformation.of(new TypeHint<Tuple3<Boolean, Boolean, String>[]>() {}));
assertThat(ti.isBasicType()).isTrue();
assertThat(ti).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO);
}
public static | EdgeMapper4 |
java | apache__camel | core/camel-xml-jaxp/src/main/java/org/apache/camel/converter/jaxp/XmlConverter.java | {
"start": 33488,
"end": 34387
} | class ____ add new kinds of conversion).
*/
@Converter(order = 69)
public DOMSource toDOMSource(Source source, Exchange exchange)
throws ParserConfigurationException, IOException, SAXException, TransformerException {
if (source instanceof DOMSource domSource) {
return domSource;
} else if (source instanceof SAXSource saxSource) {
return toDOMSourceFromSAX(saxSource);
} else if (source instanceof StreamSource streamSource) {
return toDOMSourceFromStream(streamSource, exchange);
} else if (source instanceof StAXSource staxSource) {
return toDOMSourceFromStAX(staxSource);
} else {
return null;
}
}
/**
* Converts the source instance to a {@link SAXSource} or returns null if the conversion is not supported (making it
* easy to derive from this | to |
java | google__dagger | javatests/dagger/internal/codegen/BindsInstanceValidationTest.java | {
"start": 4020,
"end": 4831
} | interface ____ {",
" @BindsInstance void noParams();",
"}");
CompilerTests.daggerCompiler(notAbstract)
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining(
"@BindsInstance methods should have exactly one parameter for the bound type")
.onSource(notAbstract)
.onLine(6);
});
}
@Test
public void bindsInstanceManyParameters() {
Source notAbstract =
CompilerTests.javaSource(
"test.BindsInstanceNoParameter",
"package test;",
"",
"import dagger.BindsInstance;",
"",
" | BindsInstanceNoParameters |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/eventbus/impl/codecs/JsonObjectMessageCodec.java | {
"start": 645,
"end": 1350
} | class ____ implements MessageCodec<JsonObject, JsonObject> {
@Override
public void encodeToWire(Buffer buffer, JsonObject jsonObject) {
Buffer encoded = jsonObject.toBuffer();
buffer.appendInt(encoded.length());
buffer.appendBuffer(encoded);
}
@Override
public JsonObject decodeFromWire(int pos, Buffer buffer) {
int length = buffer.getInt(pos);
pos += 4;
return new JsonObject(buffer.slice(pos, pos + length));
}
@Override
public JsonObject transform(JsonObject jsonObject) {
return jsonObject.copy();
}
@Override
public String name() {
return "jsonobject";
}
@Override
public byte systemCodecID() {
return 13;
}
}
| JsonObjectMessageCodec |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenIT0137EarLifecycleTest.java | {
"start": 946,
"end": 1865
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test default binding of goals for "ear" lifecycle.
*
* @throws Exception in case of failure
*/
@Test
public void testit0137() throws Exception {
File testDir = extractResources("/it0137");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.deleteDirectory("target");
verifier.setAutoclean(false);
verifier.addCliArgument("deploy");
verifier.execute();
verifier.verifyFilePresent("target/ear-generate-application-xml.txt");
verifier.verifyFilePresent("target/resources-resources.txt");
verifier.verifyFilePresent("target/ear-ear.txt");
verifier.verifyFilePresent("target/install-install.txt");
verifier.verifyFilePresent("target/deploy-deploy.txt");
verifier.verifyErrorFreeLog();
}
}
| MavenIT0137EarLifecycleTest |
java | spring-projects__spring-framework | spring-web/src/testFixtures/java/org/springframework/web/testfixture/server/handler/AbstractResponseStatusExceptionHandlerTests.java | {
"start": 1621,
"end": 1765
} | class ____ unit tests for {@link ResponseStatusExceptionHandler}.
*
* @author Rossen Stoyanchev
* @author Juergen Hoeller
*/
public abstract | for |
java | apache__camel | components/camel-docker/src/test/java/org/apache/camel/component/docker/headers/UnpauseContainerCmdHeaderTest.java | {
"start": 1283,
"end": 2077
} | class ____ extends BaseDockerHeaderTest<UnpauseContainerCmd> {
@Mock
private UnpauseContainerCmd mockObject;
@Test
void unpauseHeaderTest() {
String containerId = "9c09acd48a25";
Map<String, Object> headers = getDefaultParameters();
headers.put(DockerConstants.DOCKER_CONTAINER_ID, containerId);
template.sendBodyAndHeaders("direct:in", "", headers);
Mockito.verify(dockerClient, Mockito.times(1)).unpauseContainerCmd(containerId);
}
@Override
protected void setupMocks() {
Mockito.when(dockerClient.unpauseContainerCmd(anyString())).thenReturn(mockObject);
}
@Override
protected DockerOperation getOperation() {
return DockerOperation.UNPAUSE_CONTAINER;
}
}
| UnpauseContainerCmdHeaderTest |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/request/rerank/HuggingFaceRerankRequestTests.java | {
"start": 1019,
"end": 3355
} | class ____ extends ESTestCase {
private static final String INPUT = "texts";
private static final String QUERY = "query";
private static final String INFERENCE_ID = "model";
private static final Integer TOP_N = 8;
private static final Boolean RETURN_TEXT = false;
private static final String AUTH_HEADER_VALUE = "foo";
public void testCreateRequest_WithMinimalFieldsSet() throws IOException {
testCreateRequest(null, null);
}
public void testCreateRequest_WithTopN() throws IOException {
testCreateRequest(TOP_N, null);
}
public void testCreateRequest_WithReturnDocuments() throws IOException {
testCreateRequest(null, RETURN_TEXT);
}
private void testCreateRequest(Integer topN, Boolean returnDocuments) throws IOException {
var request = createRequest(topN, returnDocuments);
var httpRequest = request.createHttpRequest();
assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class));
var httpPost = (HttpPost) httpRequest.httpRequestBase();
assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaTypeWithoutParameters()));
assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is(AUTH_HEADER_VALUE));
var requestMap = entityAsMap(httpPost.getEntity().getContent());
assertThat(requestMap.get(INPUT), is(List.of(INPUT)));
assertThat(requestMap.get(QUERY), is(QUERY));
// input and query must exist
int itemsCount = 2;
if (topN != null) {
assertThat(requestMap.get("top_n"), is(topN));
itemsCount++;
}
if (returnDocuments != null) {
assertThat(requestMap.get("return_text"), is(returnDocuments));
itemsCount++;
}
assertThat(requestMap, aMapWithSize(itemsCount));
}
private static HuggingFaceRerankRequest createRequest(@Nullable Integer topN, @Nullable Boolean returnDocuments) {
var rerankModel = HuggingFaceRerankModelTests.createModel(randomAlphaOfLength(10), "secret", INFERENCE_ID, topN, returnDocuments);
return new HuggingFaceRerankWithoutAuthRequest(QUERY, List.of(INPUT), rerankModel, topN, returnDocuments);
}
/**
* We use this | HuggingFaceRerankRequestTests |
java | elastic__elasticsearch | benchmarks/src/main/java/org/elasticsearch/benchmark/bytes/BytesArrayReadVLongBenchmark.java | {
"start": 1458,
"end": 2434
} | class ____ {
@Param(value = { "10000000" })
int entries;
private StreamInput streamInput;
@Setup
public void initResults() throws IOException {
final BytesStreamOutput tmp = new BytesStreamOutput();
for (int i = 0; i < entries / 2; i++) {
tmp.writeVLong(i);
}
for (int i = 0; i < entries / 2; i++) {
tmp.writeVLong(Long.MAX_VALUE - i);
}
BytesReference bytesArray = tmp.copyBytes();
if (bytesArray instanceof BytesArray == false) {
throw new AssertionError("expected BytesArray but saw [" + bytesArray.getClass() + "]");
}
this.streamInput = bytesArray.streamInput();
}
@Benchmark
public long readVLong() throws IOException {
long res = 0;
streamInput.reset();
for (int i = 0; i < entries; i++) {
res = res ^ streamInput.readVLong();
}
return res;
}
}
| BytesArrayReadVLongBenchmark |
java | elastic__elasticsearch | libs/x-content/src/main/java/org/elasticsearch/xcontent/ParsedMediaType.java | {
"start": 940,
"end": 7574
} | class ____ {
private final String originalHeaderValue;
private final String type;
private final String subType;
private final Map<String, String> parameters;
// tchar pattern as defined by RFC7230 section 3.2.6
private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-Z0-9!#$%&'*+\\-.\\^_`|~]+");
private ParsedMediaType(String originalHeaderValue, String type, String subType, Map<String, String> parameters) {
this.originalHeaderValue = originalHeaderValue;
this.type = type;
this.subType = subType;
this.parameters = Map.copyOf(parameters);
}
/**
* The parsed mime type without the associated parameters. Will always return lowercase.
*/
public String mediaTypeWithoutParameters() {
return type + "/" + subType;
}
public Map<String, String> getParameters() {
return parameters;
}
/**
* Parses a header value into it's parts.
* follows https://tools.ietf.org/html/rfc7231#section-3.1.1.1
* but allows only single media type. Media ranges will be ignored (treated as not provided)
* Note: parsing can return null, but it will throw exceptions once https://github.com/elastic/elasticsearch/issues/63080 is done
* TODO Do not rely on nulls
*
* @return a {@link ParsedMediaType} if the header could be parsed.
* @throws IllegalArgumentException if the header is malformed
*/
public static ParsedMediaType parseMediaType(String headerValue) {
if (headerValue != null) {
if (isMediaRange(headerValue) || "*/*".equals(headerValue)) {
return null;
}
final String[] elements = headerValue.toLowerCase(Locale.ROOT).split(";");
final String[] splitMediaType = elements[0].split("/");
if ((splitMediaType.length == 2
&& TCHAR_PATTERN.matcher(splitMediaType[0].trim()).matches()
&& TCHAR_PATTERN.matcher(splitMediaType[1].trim()).matches()) == false) {
throw new IllegalArgumentException("invalid media-type [" + headerValue + "]");
}
if (elements.length == 1) {
return new ParsedMediaType(headerValue, splitMediaType[0].trim(), splitMediaType[1].trim(), new HashMap<>());
} else {
Map<String, String> parameters = new HashMap<>();
for (int i = 1; i < elements.length; i++) {
String paramsAsString = elements[i].trim();
if (paramsAsString.isEmpty()) {
continue;
}
// spaces are allowed between parameters, but not between '=' sign
String[] keyValueParam = paramsAsString.split("=");
if (keyValueParam.length != 2 || hasTrailingSpace(keyValueParam[0]) || hasLeadingSpace(keyValueParam[1])) {
throw new IllegalArgumentException("invalid parameters for header [" + headerValue + "]");
}
String parameterName = keyValueParam[0].toLowerCase(Locale.ROOT).trim();
String parameterValue = keyValueParam[1].toLowerCase(Locale.ROOT).trim();
parameters.put(parameterName, parameterValue);
}
return new ParsedMediaType(
headerValue,
splitMediaType[0].trim().toLowerCase(Locale.ROOT),
splitMediaType[1].trim().toLowerCase(Locale.ROOT),
parameters
);
}
}
return null;
}
public static ParsedMediaType parseMediaType(XContentType requestContentType, Map<String, String> parameters) {
ParsedMediaType parsedMediaType = requestContentType.toParsedMediaType();
return new ParsedMediaType(parsedMediaType.originalHeaderValue, parsedMediaType.type, parsedMediaType.subType, parameters);
}
// simplistic check for media ranges. do not validate if this is a correct header
private static boolean isMediaRange(String headerValue) {
return headerValue.contains(",");
}
private static boolean hasTrailingSpace(String s) {
return s.length() == 0 || Character.isWhitespace(s.charAt(s.length() - 1));
}
private static boolean hasLeadingSpace(String s) {
return s.length() == 0 || Character.isWhitespace(s.charAt(0));
}
/**
* Resolves this instance to a MediaType instance defined in given MediaTypeRegistry.
* Performs validation against parameters.
* @param mediaTypeRegistry a registry where a mapping between a raw media type to an instance MediaType is defined
* @return a MediaType instance or null if no media type could be found or if a known parameter do not passes validation
*/
public <T extends MediaType> T toMediaType(MediaTypeRegistry<T> mediaTypeRegistry) {
T someType = mediaTypeRegistry.typeWithSubtypeToMediaType(mediaTypeWithoutParameters());
if (someType != null) {
Map<String, Pattern> registeredParams = mediaTypeRegistry.parametersFor(mediaTypeWithoutParameters());
for (Map.Entry<String, String> givenParamEntry : parameters.entrySet()) {
if (isValidParameter(givenParamEntry.getKey(), givenParamEntry.getValue(), registeredParams) == false) {
return null;
}
}
return someType;
}
return null;
}
private static boolean isValidParameter(String paramName, String value, Map<String, Pattern> registeredParams) {
if (registeredParams.containsKey(paramName)) {
Pattern regex = registeredParams.get(paramName);
return regex.matcher(value).matches();
}
// TODO undefined parameters are allowed until https://github.com/elastic/elasticsearch/issues/63080
return true;
}
@Override
public String toString() {
return originalHeaderValue;
}
public String responseContentTypeHeader() {
return mediaTypeWithoutParameters() + formatParameters(parameters);
}
// used in testing
public String responseContentTypeHeader(Map<String, String> params) {
return mediaTypeWithoutParameters() + formatParameters(params);
}
private static String formatParameters(Map<String, String> params) {
String joined = params.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining(";"));
return joined.isEmpty() ? "" : ";" + joined;
}
}
| ParsedMediaType |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ForecastRequestStats.java | {
"start": 1232,
"end": 4611
} | class ____ implements ToXContentObject, Writeable {
/**
* Result type
*/
public static final String RESULT_TYPE_VALUE = "model_forecast_request_stats";
public static final ParseField RESULTS_FIELD = new ParseField(RESULT_TYPE_VALUE);
public static final ParseField FORECAST_ID = new ParseField("forecast_id");
public static final ParseField START_TIME = new ParseField("forecast_start_timestamp");
public static final ParseField END_TIME = new ParseField("forecast_end_timestamp");
public static final ParseField CREATE_TIME = new ParseField("forecast_create_timestamp");
public static final ParseField EXPIRY_TIME = new ParseField("forecast_expiry_timestamp");
public static final ParseField MESSAGES = new ParseField("forecast_messages");
public static final ParseField PROCESSING_TIME_MS = new ParseField("processing_time_ms");
public static final ParseField PROGRESS = new ParseField("forecast_progress");
public static final ParseField PROCESSED_RECORD_COUNT = new ParseField("processed_record_count");
public static final ParseField STATUS = new ParseField("forecast_status");
public static final ParseField MEMORY_USAGE = new ParseField("forecast_memory_bytes");
public static final ConstructingObjectParser<ForecastRequestStats, Void> STRICT_PARSER = createParser(false);
public static final ConstructingObjectParser<ForecastRequestStats, Void> LENIENT_PARSER = createParser(true);
private static ConstructingObjectParser<ForecastRequestStats, Void> createParser(boolean ignoreUnknownFields) {
ConstructingObjectParser<ForecastRequestStats, Void> parser = new ConstructingObjectParser<>(
RESULT_TYPE_VALUE,
ignoreUnknownFields,
a -> new ForecastRequestStats((String) a[0], (String) a[1])
);
parser.declareString(ConstructingObjectParser.constructorArg(), Job.ID);
parser.declareString(ConstructingObjectParser.constructorArg(), FORECAST_ID);
parser.declareString((modelForecastRequestStats, s) -> {}, Result.RESULT_TYPE);
parser.declareLong(ForecastRequestStats::setRecordCount, PROCESSED_RECORD_COUNT);
parser.declareStringArray(ForecastRequestStats::setMessages, MESSAGES);
parser.declareField(ForecastRequestStats::setTimeStamp, p -> Instant.ofEpochMilli(p.longValue()), Result.TIMESTAMP, ValueType.LONG);
parser.declareField(ForecastRequestStats::setStartTime, p -> Instant.ofEpochMilli(p.longValue()), START_TIME, ValueType.LONG);
parser.declareField(ForecastRequestStats::setEndTime, p -> Instant.ofEpochMilli(p.longValue()), END_TIME, ValueType.LONG);
parser.declareField(ForecastRequestStats::setCreateTime, p -> Instant.ofEpochMilli(p.longValue()), CREATE_TIME, ValueType.LONG);
parser.declareField(ForecastRequestStats::setExpiryTime, p -> Instant.ofEpochMilli(p.longValue()), EXPIRY_TIME, ValueType.LONG);
parser.declareDouble(ForecastRequestStats::setProgress, PROGRESS);
parser.declareLong(ForecastRequestStats::setProcessingTime, PROCESSING_TIME_MS);
parser.declareField(ForecastRequestStats::setStatus, p -> ForecastRequestStatus.fromString(p.text()), STATUS, ValueType.STRING);
parser.declareLong(ForecastRequestStats::setMemoryUsage, MEMORY_USAGE);
return parser;
}
public | ForecastRequestStats |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/entitygraph/EntityGraphWithInheritanceTest.java | {
"start": 5568,
"end": 5718
} | class ____ {
@Id
Long id;
String firstname;
String lastname;
}
@Entity(name = "Student")
@DiscriminatorValue("student")
public static | Person |
java | google__guava | guava/src/com/google/common/primitives/ImmutableDoubleArray.java | {
"start": 3593,
"end": 8938
} | class ____ implements Serializable {
private static final ImmutableDoubleArray EMPTY = new ImmutableDoubleArray(new double[0]);
/** Returns the empty array. */
public static ImmutableDoubleArray of() {
return EMPTY;
}
/** Returns an immutable array containing a single value. */
public static ImmutableDoubleArray of(double e0) {
return new ImmutableDoubleArray(new double[] {e0});
}
/** Returns an immutable array containing the given values, in order. */
public static ImmutableDoubleArray of(double e0, double e1) {
return new ImmutableDoubleArray(new double[] {e0, e1});
}
/** Returns an immutable array containing the given values, in order. */
public static ImmutableDoubleArray of(double e0, double e1, double e2) {
return new ImmutableDoubleArray(new double[] {e0, e1, e2});
}
/** Returns an immutable array containing the given values, in order. */
public static ImmutableDoubleArray of(double e0, double e1, double e2, double e3) {
return new ImmutableDoubleArray(new double[] {e0, e1, e2, e3});
}
/** Returns an immutable array containing the given values, in order. */
public static ImmutableDoubleArray of(double e0, double e1, double e2, double e3, double e4) {
return new ImmutableDoubleArray(new double[] {e0, e1, e2, e3, e4});
}
/** Returns an immutable array containing the given values, in order. */
public static ImmutableDoubleArray of(
double e0, double e1, double e2, double e3, double e4, double e5) {
return new ImmutableDoubleArray(new double[] {e0, e1, e2, e3, e4, e5});
}
// TODO(kevinb): go up to 11?
/**
* Returns an immutable array containing the given values, in order.
*
* <p>The array {@code rest} must not be longer than {@code Integer.MAX_VALUE - 1}.
*/
// Use (first, rest) so that `of(someDoubleArray)` won't compile (they should use copyOf), which
// is okay since we have to copy the just-created array anyway.
public static ImmutableDoubleArray of(double first, double... rest) {
checkArgument(
rest.length <= Integer.MAX_VALUE - 1, "the total number of elements must fit in an int");
double[] array = new double[rest.length + 1];
array[0] = first;
System.arraycopy(rest, 0, array, 1, rest.length);
return new ImmutableDoubleArray(array);
}
/** Returns an immutable array containing the given values, in order. */
public static ImmutableDoubleArray copyOf(double[] values) {
return values.length == 0
? EMPTY
: new ImmutableDoubleArray(Arrays.copyOf(values, values.length));
}
/** Returns an immutable array containing the given values, in order. */
public static ImmutableDoubleArray copyOf(Collection<Double> values) {
return values.isEmpty() ? EMPTY : new ImmutableDoubleArray(Doubles.toArray(values));
}
/**
* Returns an immutable array containing the given values, in order.
*
* <p><b>Performance note:</b> this method delegates to {@link #copyOf(Collection)} if {@code
* values} is a {@link Collection}. Otherwise it creates a {@link #builder} and uses {@link
* Builder#addAll(Iterable)}, with all the performance implications associated with that.
*/
public static ImmutableDoubleArray copyOf(Iterable<Double> values) {
if (values instanceof Collection) {
return copyOf((Collection<Double>) values);
}
return builder().addAll(values).build();
}
/**
* Returns an immutable array containing all the values from {@code stream}, in order.
*
* @since 22.0 (but only since 33.4.0 in the Android flavor)
*/
public static ImmutableDoubleArray copyOf(DoubleStream stream) {
// Note this uses very different growth behavior from copyOf(Iterable) and the builder.
double[] array = stream.toArray();
return (array.length == 0) ? EMPTY : new ImmutableDoubleArray(array);
}
/**
* Returns a new, empty builder for {@link ImmutableDoubleArray} instances, sized to hold up to
* {@code initialCapacity} values without resizing. The returned builder is not thread-safe.
*
* <p><b>Performance note:</b> When feasible, {@code initialCapacity} should be the exact number
* of values that will be added, if that knowledge is readily available. It is better to guess a
* value slightly too high than slightly too low. If the value is not exact, the {@link
* ImmutableDoubleArray} that is built will very likely occupy more memory than strictly
* necessary; to trim memory usage, build using {@code builder.build().trimmed()}.
*/
public static Builder builder(int initialCapacity) {
checkArgument(initialCapacity >= 0, "Invalid initialCapacity: %s", initialCapacity);
return new Builder(initialCapacity);
}
/**
* Returns a new, empty builder for {@link ImmutableDoubleArray} instances, with a default initial
* capacity. The returned builder is not thread-safe.
*
* <p><b>Performance note:</b> The {@link ImmutableDoubleArray} that is built will very likely
* occupy more memory than necessary; to trim memory usage, build using {@code
* builder.build().trimmed()}.
*/
public static Builder builder() {
return new Builder(10);
}
/**
* A builder for {@link ImmutableDoubleArray} instances; obtained using {@link
* ImmutableDoubleArray#builder}.
*/
public static final | ImmutableDoubleArray |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamLimiter.java | {
"start": 1017,
"end": 1223
} | interface ____ {
/**
* Set a limit. Calling this function clears any existing limit.
*/
public void setLimit(long limit);
/**
* Disable limit.
*/
public void clearLimit();
} | StreamLimiter |
java | micronaut-projects__micronaut-core | router/src/main/java/io/micronaut/web/router/AnnotatedFilterRouteBuilder.java | {
"start": 1537,
"end": 4544
} | class ____ extends DefaultRouteBuilder implements BeanDefinitionProcessor<Filter> {
private final ServerContextPathProvider contextPathProvider;
/**
* Constructor.
*
* @param executionHandleLocator The execution handler locator
* @param uriNamingStrategy The URI naming strategy
* @param conversionService The conversion service
* @param contextPathProvider The server context path provider
*/
@Inject
public AnnotatedFilterRouteBuilder(
ExecutionHandleLocator executionHandleLocator,
UriNamingStrategy uriNamingStrategy,
ConversionService conversionService,
@Nullable ServerContextPathProvider contextPathProvider) {
super(executionHandleLocator, uriNamingStrategy, conversionService);
this.contextPathProvider = contextPathProvider;
}
@Override
public void process(BeanDefinition<?> beanDefinition, BeanContext beanContext) {
if (HttpClientFilter.class.isAssignableFrom(beanDefinition.getBeanType())) {
// ignore http client filters
return;
}
String[] patterns = getPatterns(beanDefinition);
if (ArrayUtils.isNotEmpty(patterns)) {
HttpMethod[] methods = beanDefinition.enumValues(Filter.class, "methods", HttpMethod.class);
FilterPatternStyle patternStyle = beanDefinition.enumValue(Filter.class, "patternStyle",
FilterPatternStyle.class).orElse(FilterPatternStyle.ANT);
String first = patterns[0];
@SuppressWarnings("unchecked")
FilterRoute filterRoute = addFilter(first, beanContext, (BeanDefinition<? extends HttpFilter>) beanDefinition);
if (patterns.length > 1) {
for (int i = 1; i < patterns.length; i++) {
String pattern = patterns[i];
filterRoute.pattern(pattern);
}
}
if (ArrayUtils.isNotEmpty(methods)) {
filterRoute.methods(methods);
}
filterRoute.patternStyle(patternStyle);
}
}
/**
* @param beanDefinition The bean definition
* @return The array of patterns that should match request URLs for the bean to
* be invoked.
*/
private String[] getPatterns(BeanDefinition<?> beanDefinition) {
String[] values = beanDefinition.stringValues(Filter.class);
String contextPath = contextPathProvider != null ? contextPathProvider.getContextPath() : null;
if (contextPath != null) {
for (int i = 0; i < values.length; i++) {
if (!values[i].startsWith(contextPath)) {
String newValue = StringUtils.prependUri(contextPath, values[i]);
if (newValue.charAt(0) != '/') {
newValue = "/" + newValue;
}
values[i] = newValue;
}
}
}
return values;
}
}
| AnnotatedFilterRouteBuilder |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/builditem/nativeimage/NativeImageAllowIncompleteClasspathAggregateBuildItem.java | {
"start": 260,
"end": 560
} | class ____ extends SimpleBuildItem {
private final boolean allow;
public NativeImageAllowIncompleteClasspathAggregateBuildItem(boolean allow) {
this.allow = allow;
}
public boolean isAllow() {
return allow;
}
}
| NativeImageAllowIncompleteClasspathAggregateBuildItem |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/event/collection/detached/MultipleCollectionRefEntity2.java | {
"start": 472,
"end": 2849
} | class ____ implements org.hibernate.orm.test.event.collection.Entity {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "ID", length = 10)
private Long id;
@Column(name = "TEXT", length = 50, nullable = false)
private String text;
@ManyToOne
@JoinColumn(name = "MCE_ID", nullable = false, insertable = false, updatable = false,
foreignKey = @ForeignKey(name = "FK_RE2_MCE"))
private MultipleCollectionEntity multipleCollectionEntity;
@Column(name = "MCE_ID", insertable = false, updatable = false)
private Long multipleCollectionEntityId;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getText() {
return text;
}
public void setText(String text) {
this.text = text;
}
public MultipleCollectionEntity getMultipleCollectionEntity() {
return multipleCollectionEntity;
}
public void setMultipleCollectionEntity(
MultipleCollectionEntity multipleCollectionEntity) {
this.multipleCollectionEntity = multipleCollectionEntity;
}
public Long getMultipleCollectionEntityId() {
return multipleCollectionEntityId;
}
public void setMultipleCollectionEntityId(Long multipleCollectionEntityId) {
this.multipleCollectionEntityId = multipleCollectionEntityId;
}
@Override
public String toString() {
return "MultipleCollectionRefEntity2 [id=" + id + ", text=" + text
+ ", multipleCollectionEntityId=" + multipleCollectionEntityId
+ "]";
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((text == null) ? 0 : text.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
MultipleCollectionRefEntity2 other = (MultipleCollectionRefEntity2) obj;
if (text == null) {
if (other.text != null)
return false;
} else if (!text.equals(other.text))
return false;
return true;
}
public MultipleCollectionRefEntity2 deepCopy(MultipleCollectionEntity newRef) {
MultipleCollectionRefEntity2 clone = new MultipleCollectionRefEntity2();
clone.setText(this.text);
clone.setId(this.id);
clone.setMultipleCollectionEntity(newRef);
clone.setMultipleCollectionEntityId(newRef.getId());
return clone;
}
}
| MultipleCollectionRefEntity2 |
java | apache__maven | compat/maven-compat/src/test/java/org/apache/maven/project/LegacyLocalRepositoryManager.java | {
"start": 1566,
"end": 5687
} | class ____ implements LocalRepositoryManager {
private final LocalRepository repository;
public LegacyLocalRepositoryManager(File basedir) {
this.repository = new LocalRepository(basedir.getAbsoluteFile(), "legacy");
}
@Override
public LocalRepository getRepository() {
return repository;
}
@Override
public String getPathForLocalArtifact(Artifact artifact) {
StringBuilder path = new StringBuilder(128);
path.append(artifact.getGroupId()).append('/');
path.append(artifact.getExtension()).append("s/");
path.append(artifact.getArtifactId()).append('-').append(artifact.getVersion());
if (!artifact.getClassifier().isEmpty()) {
path.append('-').append(artifact.getClassifier());
}
path.append('.').append(artifact.getExtension());
return path.toString();
}
@Override
public String getPathForRemoteArtifact(Artifact artifact, RemoteRepository repository, String context) {
return getPathForLocalArtifact(artifact);
}
@Override
public String getPathForLocalMetadata(Metadata metadata) {
return getPath(metadata, "local");
}
@Override
public String getPathForRemoteMetadata(Metadata metadata, RemoteRepository repository, String context) {
return getPath(metadata, getRepositoryKey(repository, context));
}
String getRepositoryKey(RemoteRepository repository, String context) {
return repository.getId();
}
private String getPath(Metadata metadata, String repositoryKey) {
StringBuilder path = new StringBuilder(128);
if (!metadata.getGroupId().isEmpty()) {
path.append(metadata.getGroupId().replace('.', '/')).append('/');
if (!metadata.getArtifactId().isEmpty()) {
path.append(metadata.getArtifactId()).append('/');
if (!metadata.getVersion().isEmpty()) {
path.append(metadata.getVersion()).append('/');
}
}
}
path.append(insertRepositoryKey(metadata.getType(), repositoryKey));
return path.toString();
}
private String insertRepositoryKey(String filename, String repositoryKey) {
String result;
int idx = filename.indexOf('.');
if (idx < 0) {
result = filename + '-' + repositoryKey;
} else {
result = filename.substring(0, idx) + '-' + repositoryKey + filename.substring(idx);
}
return result;
}
@Override
public LocalArtifactResult find(RepositorySystemSession session, LocalArtifactRequest request) {
String path = getPathForLocalArtifact(request.getArtifact());
File file = new File(getRepository().getBasedir(), path);
LocalArtifactResult result = new LocalArtifactResult(request);
if (file.isFile()) {
result.setFile(file);
result.setAvailable(true);
}
return result;
}
@Override
public void add(RepositorySystemSession session, LocalArtifactRegistration request) {
// noop
}
@Override
public LocalMetadataResult find(RepositorySystemSession session, LocalMetadataRequest request) {
LocalMetadataResult result = new LocalMetadataResult(request);
String path;
Metadata metadata = request.getMetadata();
String context = request.getContext();
RemoteRepository remote = request.getRepository();
if (remote != null) {
path = getPathForRemoteMetadata(metadata, remote, context);
} else {
path = getPathForLocalMetadata(metadata);
}
File file = new File(getRepository().getBasedir(), path);
if (file.isFile()) {
result.setFile(file);
}
return result;
}
@Override
public void add(RepositorySystemSession session, LocalMetadataRegistration request) {
// noop
}
@Override
public String toString() {
return String.valueOf(getRepository());
}
}
| LegacyLocalRepositoryManager |
java | quarkusio__quarkus | extensions/smallrye-reactive-messaging-kafka/deployment/src/test/java/io/quarkus/smallrye/reactivemessaging/kafka/deployment/DefaultSerdeConfigTest.java | {
"start": 140137,
"end": 140355
} | class ____ implements Serializer<Long>, CustomInterface<Long> {
@Override
public byte[] serialize(String topic, Long data) {
return new byte[0];
}
}
private static | MySerializer |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/localdatetime/LocalDateTimeAssert_isStrictlyBetween_with_String_parameters_Test.java | {
"start": 1024,
"end": 2220
} | class ____ extends AbstractLocalDateTimeAssertBaseTest {
@Override
protected LocalDateTimeAssert invoke_api_method() {
return assertions.isStrictlyBetween(YESTERDAY.toString(), TOMORROW.toString());
}
@Override
protected void verify_internal_effects() {
verify(getComparables(assertions)).assertIsBetween(getInfo(assertions), getActual(assertions), YESTERDAY, TOMORROW, false,
false);
}
@Test
void should_throw_a_DateTimeParseException_if_start_String_parameter_cant_be_converted() {
// GIVEN
String abc = "abc";
// WHEN
Throwable thrown = catchThrowable(() -> assertions.isStrictlyBetween(abc, TOMORROW.toString()));
// THEN
assertThat(thrown).isInstanceOf(DateTimeParseException.class);
}
@Test
void should_throw_a_DateTimeParseException_if_end_String_parameter_cant_be_converted() {
// GIVEN
String abc = "abc";
// WHEN
Throwable thrown = catchThrowable(() -> assertions.isStrictlyBetween(YESTERDAY.toString(), abc));
// THEN
assertThat(thrown).isInstanceOf(DateTimeParseException.class);
}
}
| LocalDateTimeAssert_isStrictlyBetween_with_String_parameters_Test |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/tests/SystemTestUtilTest.java | {
"start": 1175,
"end": 2860
} | class ____ {
private final Map<String, String> expectedParsedMap = new TreeMap<>();
@BeforeEach
public void setUp() {
expectedParsedMap.put("foo", "foo1");
expectedParsedMap.put("bar", "bar1");
expectedParsedMap.put("baz", "baz1");
}
@Test
public void shouldParseCorrectMap() {
final String formattedConfigs = "foo=foo1,bar=bar1,baz=baz1";
final Map<String, String> parsedMap = SystemTestUtil.parseConfigs(formattedConfigs);
final TreeMap<String, String> sortedParsedMap = new TreeMap<>(parsedMap);
assertEquals(sortedParsedMap, expectedParsedMap);
}
@Test
public void shouldThrowExceptionOnNull() {
assertThrows(NullPointerException.class, () -> SystemTestUtil.parseConfigs(null));
}
@Test
public void shouldThrowExceptionIfNotCorrectKeyValueSeparator() {
final String badString = "foo:bar,baz:boo";
assertThrows(IllegalStateException.class, () -> SystemTestUtil.parseConfigs(badString));
}
@Test
public void shouldThrowExceptionIfNotCorrectKeyValuePairSeparator() {
final String badString = "foo=bar;baz=boo";
assertThrows(IllegalStateException.class, () -> SystemTestUtil.parseConfigs(badString));
}
@Test
public void shouldParseSingleKeyValuePairString() {
final Map<String, String> expectedSinglePairMap = new HashMap<>();
expectedSinglePairMap.put("foo", "bar");
final String singleValueString = "foo=bar";
final Map<String, String> parsedMap = SystemTestUtil.parseConfigs(singleValueString);
assertEquals(expectedSinglePairMap, parsedMap);
}
} | SystemTestUtilTest |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/updatemethods/CompanyEntity.java | {
"start": 237,
"end": 645
} | class ____ {
private String name;
private DepartmentEntity department;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public DepartmentEntity getDepartment() {
return department;
}
public void setDepartment(DepartmentEntity department) {
this.department = department;
}
}
| CompanyEntity |
java | netty__netty | transport/src/main/java/io/netty/channel/StacklessClosedChannelException.java | {
"start": 851,
"end": 1543
} | class ____ extends ClosedChannelException {
private static final long serialVersionUID = -2214806025529435136L;
private StacklessClosedChannelException() { }
@Override
public Throwable fillInStackTrace() {
// Suppress a warning since this method doesn't need synchronization
return this;
}
/**
* Creates a new {@link StacklessClosedChannelException} which has the origin of the given {@link Class} and method.
*/
static StacklessClosedChannelException newInstance(Class<?> clazz, String method) {
return ThrowableUtil.unknownStackTrace(new StacklessClosedChannelException(), clazz, method);
}
}
| StacklessClosedChannelException |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/NamespaceHttpAnonymousTests.java | {
"start": 6016,
"end": 6443
} | class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((requests) -> requests
.requestMatchers("/principal").anonymous()
.anyRequest().denyAll())
.anonymous((anonymous) -> anonymous.principal("AnonymousUsernameConfig"));
return http.build();
// @formatter:on
}
}
@RestController
static | AnonymousUsernameConfig |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/issues/ExceptionPolicyIssueTest.java | {
"start": 970,
"end": 1898
} | class ____ extends ContextTestSupport {
@Test
public void testOnExceptionWithGenericException() throws Exception {
getMockEndpoint("mock:exception").expectedMessageCount(0);
getMockEndpoint("mock:ue").expectedMessageCount(1);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
onException(MyUnmarshalException.class).handled(true).to("mock:ue");
onException(Exception.class).handled(true).to("mock:exception");
from("direct:start")
.throwException(new MyUnmarshalException("Could not unmarshal", new IllegalArgumentException("Damn")));
}
};
}
private static final | ExceptionPolicyIssueTest |
java | alibaba__nacos | core/src/main/java/com/alibaba/nacos/core/remote/tls/RpcServerSslContextRefresherHolder.java | {
"start": 998,
"end": 1278
} | class ____ responsible for initializing and
* providing instances of the SSL context refresher based on the communication type (SDK or Cluster).
*
* @author liuzunfei
* @version $Id: RpcServerSslContextRefresherHolder.java, v 0.1 2023年03月17日 12:00 PM liuzunfei Exp $
*/
public | is |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/ListableBeanFactory.java | {
"start": 16064,
"end": 18138
} | interface ____ match, or {@code null} for all concrete beans
* @return a Map with the matching beans, containing the bean names as
* keys and the corresponding bean instances as values
* @throws BeansException if a bean could not be created
* @since 1.1.2
* @see FactoryBean#getObjectType
* @see BeanFactoryUtils#beansOfTypeIncludingAncestors(ListableBeanFactory, Class)
*/
<T> Map<String, T> getBeansOfType(@Nullable Class<T> type) throws BeansException;
/**
* Return the bean instances that match the given object type (including
* subclasses), judging from either bean definitions or the value of
* {@code getObjectType} in the case of FactoryBeans.
* <p><b>NOTE: This method introspects top-level beans only.</b> It does <i>not</i>
* check nested beans which might match the specified type as well. Also, it
* <b>suppresses exceptions for beans that are currently in creation in a circular
* reference scenario:</b> typically, references back to the caller of this method.
* <p>Does consider objects created by FactoryBeans if the "allowEagerInit" flag is set,
* which means that FactoryBeans will get initialized. If the object created by the
* FactoryBean doesn't match, the raw FactoryBean itself will be matched against the
* type. If "allowEagerInit" is not set, only raw FactoryBeans will be checked
* (which doesn't require initialization of each FactoryBean).
* <p>Does not consider any hierarchy this factory may participate in.
* Use BeanFactoryUtils' {@code beansOfTypeIncludingAncestors}
* to include beans in ancestor factories too.
* <p>The Map returned by this method should always return bean names and
* corresponding bean instances <i>in the order of definition</i> in the
* backend configuration, as far as possible.
* <p><b>Consider {@link #getBeanNamesForType(Class)} with selective {@link #getBean}
* calls for specific bean names in preference to this Map-based retrieval method.</b>
* Aside from lazy instantiation benefits, this also avoids any exception suppression.
* @param type the | to |
java | apache__logging-log4j2 | log4j-jpa/src/test/java/org/apache/logging/log4j/core/appender/db/jpa/converter/ContextDataJsonAttributeConverterTest.java | {
"start": 1348,
"end": 3258
} | class ____ {
private ContextDataJsonAttributeConverter converter;
@BeforeEach
void setUp() {
this.converter = new ContextDataJsonAttributeConverter();
}
@Test
void testConvert01() {
final StringMap map = new SortedArrayStringMap();
map.putValue("test1", "another1");
map.putValue("key2", "value2");
final String converted = this.converter.convertToDatabaseColumn(map);
assertNotNull(converted, "The converted value should not be null.");
final ReadOnlyStringMap reversed = this.converter.convertToEntityAttribute(converted);
assertNotNull(reversed, "The reversed value should not be null.");
assertEquals(map, reversed, "The reversed value is not correct.");
}
@Test
void testConvert02() {
final StringMap map = new SortedArrayStringMap();
map.putValue("someKey", "coolValue");
map.putValue("anotherKey", "testValue");
map.putValue("myKey", "yourValue");
final String converted = this.converter.convertToDatabaseColumn(map);
assertNotNull(converted, "The converted value should not be null.");
final ReadOnlyStringMap reversed = this.converter.convertToEntityAttribute(converted);
assertNotNull(reversed, "The reversed value should not be null.");
assertEquals(reversed, map, "The reversed value is not correct.");
}
@Test
void testConvertNullToDatabaseColumn() {
assertNull(this.converter.convertToDatabaseColumn(null), "The converted value should be null.");
}
@Test
void testConvertNullOrBlankToEntityAttribute() {
assertNull(this.converter.convertToEntityAttribute(null), "The converted attribute should be null (1).");
assertNull(this.converter.convertToEntityAttribute(""), "The converted attribute should be null (2).");
}
}
| ContextDataJsonAttributeConverterTest |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/mock/MockDriverMBean.java | {
"start": 687,
"end": 1165
} | interface ____ {
long getConnectionCloseCount();
int getMajorVersion();
int getMinorVersion();
boolean jdbcCompliant();
boolean acceptsURL(String url) throws SQLException;
boolean isLogExecuteQueryEnable();
void setLogExecuteQueryEnable(boolean logExecuteQueryEnable);
long getIdleTimeCount();
void setIdleTimeCount(long idleTimeCount);
void closeAllConnections() throws SQLException;
int getConnectionsSize();
}
| MockDriverMBean |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableDoFinallyTest.java | {
"start": 1232,
"end": 14843
} | class ____ extends RxJavaTest implements Action {
int calls;
@Override
public void run() throws Exception {
calls++;
}
@Test
public void normalJust() {
Flowable.just(1)
.doFinally(this)
.test()
.assertResult(1);
assertEquals(1, calls);
}
@Test
public void normalEmpty() {
Flowable.empty()
.doFinally(this)
.test()
.assertResult();
assertEquals(1, calls);
}
@Test
public void normalError() {
Flowable.error(new TestException())
.doFinally(this)
.test()
.assertFailure(TestException.class);
assertEquals(1, calls);
}
@Test
public void normalTake() {
Flowable.range(1, 10)
.doFinally(this)
.take(5)
.test()
.assertResult(1, 2, 3, 4, 5);
assertEquals(1, calls);
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeFlowable(new Function<Flowable<Object>, Publisher<Object>>() {
@Override
public Publisher<Object> apply(Flowable<Object> f) throws Exception {
return f.doFinally(FlowableDoFinallyTest.this);
}
});
TestHelper.checkDoubleOnSubscribeFlowable(new Function<Flowable<Object>, Publisher<Object>>() {
@Override
public Publisher<Object> apply(Flowable<Object> f) throws Exception {
return f.doFinally(FlowableDoFinallyTest.this).filter(Functions.alwaysTrue());
}
});
}
@Test
public void syncFused() {
TestSubscriberEx<Integer> ts = new TestSubscriberEx<Integer>().setInitialFusionMode(QueueFuseable.SYNC);
Flowable.range(1, 5)
.doFinally(this)
.subscribe(ts);
ts.assertFusionMode(QueueFuseable.SYNC)
.assertResult(1, 2, 3, 4, 5);
assertEquals(1, calls);
}
@Test
public void syncFusedBoundary() {
TestSubscriberEx<Integer> ts = new TestSubscriberEx<Integer>().setInitialFusionMode(QueueFuseable.SYNC | QueueFuseable.BOUNDARY);
Flowable.range(1, 5)
.doFinally(this)
.subscribe(ts);
ts.assertFusionMode(QueueFuseable.NONE)
.assertResult(1, 2, 3, 4, 5);
assertEquals(1, calls);
}
@Test
public void asyncFused() {
TestSubscriberEx<Integer> ts = new TestSubscriberEx<Integer>().setInitialFusionMode(QueueFuseable.ASYNC);
UnicastProcessor<Integer> up = UnicastProcessor.create();
TestHelper.emit(up, 1, 2, 3, 4, 5);
up
.doFinally(this)
.subscribe(ts);
ts.assertFusionMode(QueueFuseable.ASYNC)
.assertResult(1, 2, 3, 4, 5);
assertEquals(1, calls);
}
@Test
public void asyncFusedBoundary() {
TestSubscriberEx<Integer> ts = new TestSubscriberEx<Integer>().setInitialFusionMode(QueueFuseable.ASYNC | QueueFuseable.BOUNDARY);
UnicastProcessor<Integer> up = UnicastProcessor.create();
TestHelper.emit(up, 1, 2, 3, 4, 5);
up
.doFinally(this)
.subscribe(ts);
ts.assertFusionMode(QueueFuseable.NONE)
.assertResult(1, 2, 3, 4, 5);
assertEquals(1, calls);
}
@Test
public void normalJustConditional() {
Flowable.just(1)
.doFinally(this)
.filter(Functions.alwaysTrue())
.test()
.assertResult(1);
assertEquals(1, calls);
}
@Test
public void normalEmptyConditional() {
Flowable.empty()
.doFinally(this)
.filter(Functions.alwaysTrue())
.test()
.assertResult();
assertEquals(1, calls);
}
@Test
public void normalErrorConditional() {
Flowable.error(new TestException())
.doFinally(this)
.filter(Functions.alwaysTrue())
.test()
.assertFailure(TestException.class);
assertEquals(1, calls);
}
@Test
public void normalTakeConditional() {
Flowable.range(1, 10)
.doFinally(this)
.filter(Functions.alwaysTrue())
.take(5)
.test()
.assertResult(1, 2, 3, 4, 5);
assertEquals(1, calls);
}
@Test
public void syncFusedConditional() {
TestSubscriberEx<Integer> ts = new TestSubscriberEx<Integer>().setInitialFusionMode(QueueFuseable.SYNC);
Flowable.range(1, 5)
.doFinally(this)
.compose(TestHelper.conditional())
.subscribe(ts);
ts.assertFusionMode(QueueFuseable.SYNC)
.assertResult(1, 2, 3, 4, 5);
assertEquals(1, calls);
}
@Test
public void nonFused() {
TestSubscriberEx<Integer> ts = new TestSubscriberEx<Integer>().setInitialFusionMode(QueueFuseable.SYNC);
Flowable.range(1, 5).hide()
.doFinally(this)
.subscribe(ts);
ts.assertFusionMode(QueueFuseable.NONE)
.assertResult(1, 2, 3, 4, 5);
assertEquals(1, calls);
}
@Test
public void nonFusedConditional() {
TestSubscriberEx<Integer> ts = new TestSubscriberEx<Integer>().setInitialFusionMode(QueueFuseable.SYNC);
Flowable.range(1, 5).hide()
.doFinally(this)
.compose(TestHelper.conditional())
.subscribe(ts);
ts.assertFusionMode(QueueFuseable.NONE)
.assertResult(1, 2, 3, 4, 5);
assertEquals(1, calls);
}
@Test
public void syncFusedBoundaryConditional() {
TestSubscriberEx<Integer> ts = new TestSubscriberEx<Integer>().setInitialFusionMode(QueueFuseable.SYNC | QueueFuseable.BOUNDARY);
Flowable.range(1, 5)
.doFinally(this)
.compose(TestHelper.conditional())
.subscribe(ts);
ts.assertFusionMode(QueueFuseable.NONE)
.assertResult(1, 2, 3, 4, 5);
assertEquals(1, calls);
}
@Test
public void asyncFusedConditional() {
TestSubscriberEx<Integer> ts = new TestSubscriberEx<Integer>().setInitialFusionMode(QueueFuseable.ASYNC);
UnicastProcessor<Integer> up = UnicastProcessor.create();
TestHelper.emit(up, 1, 2, 3, 4, 5);
up
.doFinally(this)
.compose(TestHelper.conditional())
.subscribe(ts);
ts.assertFusionMode(QueueFuseable.ASYNC)
.assertResult(1, 2, 3, 4, 5);
assertEquals(1, calls);
}
@Test
public void asyncFusedBoundaryConditional() {
TestSubscriberEx<Integer> ts = new TestSubscriberEx<Integer>().setInitialFusionMode(QueueFuseable.ASYNC | QueueFuseable.BOUNDARY);
UnicastProcessor<Integer> up = UnicastProcessor.create();
TestHelper.emit(up, 1, 2, 3, 4, 5);
up
.doFinally(this)
.compose(TestHelper.conditional())
.subscribe(ts);
ts.assertFusionMode(QueueFuseable.NONE)
.assertResult(1, 2, 3, 4, 5);
assertEquals(1, calls);
}
@Test
public void actionThrows() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
Flowable.just(1)
.doFinally(new Action() {
@Override
public void run() throws Exception {
throw new TestException();
}
})
.test()
.assertResult(1)
.cancel();
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void actionThrowsConditional() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
Flowable.just(1)
.doFinally(new Action() {
@Override
public void run() throws Exception {
throw new TestException();
}
})
.filter(Functions.alwaysTrue())
.test()
.assertResult(1)
.cancel();
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void clearIsEmpty() {
Flowable.range(1, 5)
.doFinally(this)
.subscribe(new FlowableSubscriber<Integer>() {
@Override
public void onSubscribe(Subscription s) {
@SuppressWarnings("unchecked")
QueueSubscription<Integer> qs = (QueueSubscription<Integer>)s;
qs.requestFusion(QueueFuseable.ANY);
assertFalse(qs.isEmpty());
try {
assertEquals(1, qs.poll().intValue());
} catch (Throwable ex) {
throw new RuntimeException(ex);
}
assertFalse(qs.isEmpty());
qs.clear();
assertTrue(qs.isEmpty());
qs.cancel();
}
@Override
public void onNext(Integer t) {
}
@Override
public void onError(Throwable t) {
}
@Override
public void onComplete() {
}
});
assertEquals(1, calls);
}
@Test
public void clearIsEmptyConditional() {
Flowable.range(1, 5)
.doFinally(this)
.filter(Functions.alwaysTrue())
.subscribe(new FlowableSubscriber<Integer>() {
@Override
public void onSubscribe(Subscription s) {
@SuppressWarnings("unchecked")
QueueSubscription<Integer> qs = (QueueSubscription<Integer>)s;
qs.requestFusion(QueueFuseable.ANY);
assertFalse(qs.isEmpty());
try {
assertEquals(1, qs.poll().intValue());
} catch (Throwable ex) {
throw new RuntimeException(ex);
}
assertFalse(qs.isEmpty());
qs.clear();
assertTrue(qs.isEmpty());
qs.cancel();
}
@Override
public void onNext(Integer t) {
}
@Override
public void onError(Throwable t) {
}
@Override
public void onComplete() {
}
});
assertEquals(1, calls);
}
@Test
public void eventOrdering() {
final List<String> list = new ArrayList<>();
Flowable.error(new TestException())
.doOnCancel(new Action() {
@Override
public void run() throws Exception {
list.add("cancel");
}
})
.doFinally(new Action() {
@Override
public void run() throws Exception {
list.add("finally");
}
})
.subscribe(
new Consumer<Object>() {
@Override
public void accept(Object v) throws Exception {
list.add("onNext");
}
},
new Consumer<Throwable>() {
@Override
public void accept(Throwable e) throws Exception {
list.add("onError");
}
},
new Action() {
@Override
public void run() throws Exception {
list.add("onComplete");
}
});
assertEquals(Arrays.asList("onError", "finally"), list);
}
@Test
public void eventOrdering2() {
final List<String> list = new ArrayList<>();
Flowable.just(1)
.doOnCancel(new Action() {
@Override
public void run() throws Exception {
list.add("cancel");
}
})
.doFinally(new Action() {
@Override
public void run() throws Exception {
list.add("finally");
}
})
.subscribe(
new Consumer<Object>() {
@Override
public void accept(Object v) throws Exception {
list.add("onNext");
}
},
new Consumer<Throwable>() {
@Override
public void accept(Throwable e) throws Exception {
list.add("onError");
}
},
new Action() {
@Override
public void run() throws Exception {
list.add("onComplete");
}
});
assertEquals(Arrays.asList("onNext", "onComplete", "finally"), list);
}
@Test
public void fusionRejected() {
TestSubscriberEx<Object> ts = new TestSubscriberEx<>();
ts.setInitialFusionMode(QueueFuseable.ANY);
TestHelper.rejectFlowableFusion()
.doFinally(() -> { })
.subscribeWith(ts);
ts.assertFuseable()
.assertFusionMode(QueueFuseable.NONE);
}
@Test
public void fusionRejectedConditional() {
TestSubscriberEx<Object> ts = new TestSubscriberEx<>();
ts.setInitialFusionMode(QueueFuseable.ANY);
TestHelper.rejectFlowableFusion()
.doFinally(() -> { })
.compose(TestHelper.conditional())
.subscribeWith(ts);
ts.assertFuseable()
.assertFusionMode(QueueFuseable.NONE);
}
}
| FlowableDoFinallyTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryOptionalGetTest.java | {
"start": 11575,
"end": 11805
} | class ____ {
private void home() {
Optional<String> op = Optional.of("hello");
op.flatMap(x -> Optional.of(x));
}
}
""")
.doTest();
}
}
| Test |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1700/Issue1766.java | {
"start": 787,
"end": 1284
} | class ____
{
private String name;
@JSONField(format = "yyyy-MM-dd HH:mm:ss")
private Date birthday;
public String getName()
{
return name;
}
public void setName(String name)
{
this.name = name;
}
public Date getBirthday()
{
return birthday;
}
public void setBirthday(Date birthday)
{
this.birthday = birthday;
}
}
}
| User |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/Sinks.java | {
"start": 11709,
"end": 13567
} | interface ____ {
/**
* A {@link Sinks.Empty} which exclusively produces one terminal signal: error or complete.
* It has the following characteristics:
* <ul>
* <li>Multicast</li>
* <li>Backpressure : this sink does not need any demand since it can only signal error or completion</li>
* <li>Replaying: Replay the terminal signal (error or complete).</li>
* </ul>
* Use {@link Sinks.Empty#asMono()} to expose the {@link Mono} view of the sink to downstream consumers.
*/
<T> Sinks.Empty<T> empty();
/**
* A {@link Sinks.One} that works like a conceptual promise: it can be completed
* with or without a value at any time, but only once. This completion is replayed to late subscribers.
* Calling {@link One#emitValue(Object, Sinks.EmitFailureHandler)} (or
* {@link One#tryEmitValue(Object)}) is enough and will implicitly produce
* a {@link Subscriber#onComplete()} signal as well.
* <p>
* Use {@link One#asMono()} to expose the {@link Mono} view of the sink to downstream consumers.
*/
<T> Sinks.One<T> one();
/**
* Help building {@link Sinks.Many} sinks that will broadcast multiple signals to one or more {@link Subscriber}.
* <p>
* Use {@link Many#asFlux()} to expose the {@link Flux} view of the sink to the downstream consumers.
*
* @return {@link ManySpec}
*/
ManySpec many();
/**
* Help building {@link Sinks.ManyWithUpstream} sinks that can also be {@link ManyWithUpstream#subscribeTo(Publisher) subscribed to}
* an upstream {@link Publisher}. This is an advanced use case, see {@link ManyWithUpstream#subscribeTo(Publisher)}.
*
* @return a {@link ManyWithUpstreamUnsafeSpec}
*/
ManyWithUpstreamUnsafeSpec manyWithUpstream();
}
/**
* Provides {@link Sinks.Many} specs for sinks which can emit multiple elements
*/
public | RootSpec |
java | spring-projects__spring-boot | module/spring-boot-http-codec/src/test/java/org/springframework/boot/http/codec/autoconfigure/CodecsAutoConfigurationTests.java | {
"start": 8743,
"end": 8951
} | class ____ {
private final List<CodecCustomizer> codecCustomizers;
private CodecCustomizers(List<CodecCustomizer> codecCustomizers) {
this.codecCustomizers = codecCustomizers;
}
}
}
| CodecCustomizers |
java | quarkusio__quarkus | extensions/flyway/deployment/src/test/java/io/quarkus/flyway/test/FlywayExtensionRepairAtStartTest.java | {
"start": 657,
"end": 2913
} | class ____ {
@RegisterExtension
static final QuarkusDevModeTest config = new QuarkusDevModeTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)
.addClass(FlywayResource.class)
.addAsResource("db/migration/V1.0.0__Quarkus.sql")
.addAsResource("repair-at-start-config.properties", "application.properties"))
.setLogRecordPredicate(r -> true)
.setAllowFailedStart(true);
@Test
@DisplayName("Repair at start works correctly")
public void testRepairUsingDevMode() {
assertThat(RestAssured.get("/flyway/current-version").then().statusCode(200).extract().asString()).isEqualTo("1.0.0");
config.clearLogRecords();
config.modifyResourceFile("db/migration/V1.0.0__Quarkus.sql", s -> s + "\nNONSENSE STATEMENT CHANGING CHECKSUM;");
config.modifyResourceFile("application.properties", s -> s + "\nquarkus.flyway.validate-on-migrate=true");
// trigger application restart
RestAssured.get("/");
await().atMost(30, TimeUnit.SECONDS).untilAsserted(() -> {
assertThat(config.getLogRecords()).anySatisfy(r -> {
assertThat(r.getMessage()).contains("Failed to start application");
assertThat(ExceptionUtil.getRootCause(r.getThrown()).getMessage())
.contains("Migration checksum mismatch for migration version 1.0.0");
});
RestAssured.get("/flyway/current-version").then().statusCode(500);
});
config.clearLogRecords();
config.modifyResourceFile("application.properties", s -> s + "\nquarkus.flyway.repair-at-start=true");
// trigger application restart
RestAssured.get("/");
await().atMost(30, TimeUnit.SECONDS).untilAsserted(() -> {
assertThat(config.getLogRecords()).anySatisfy(
r -> assertThat(r.getMessage()).contains("Successfully repaired schema history table"));
assertThat(RestAssured.get("/flyway/current-version").then().statusCode(200).extract().asString())
.isEqualTo("1.0.0");
});
}
@Path("flyway")
public static | FlywayExtensionRepairAtStartTest |
java | apache__rocketmq | client/src/main/java/org/apache/rocketmq/client/trace/hook/EndTransactionOpenTracingHookImpl.java | {
"start": 1358,
"end": 3114
} | class ____ implements EndTransactionHook {
private Tracer tracer;
public EndTransactionOpenTracingHookImpl(Tracer tracer) {
this.tracer = tracer;
}
@Override
public String hookName() {
return "EndTransactionOpenTracingHook";
}
@Override
public void endTransaction(EndTransactionContext context) {
if (context == null) {
return;
}
Message msg = context.getMessage();
Tracer.SpanBuilder spanBuilder = tracer
.buildSpan(TraceConstants.END_TRANSACTION)
.withTag(Tags.SPAN_KIND, Tags.SPAN_KIND_PRODUCER);
SpanContext spanContext = tracer.extract(Format.Builtin.TEXT_MAP, new TextMapAdapter(msg.getProperties()));
if (spanContext != null) {
spanBuilder.asChildOf(spanContext);
}
Span span = spanBuilder.start();
span.setTag(Tags.PEER_SERVICE, TraceConstants.ROCKETMQ_SERVICE);
span.setTag(Tags.MESSAGE_BUS_DESTINATION, msg.getTopic());
span.setTag(TraceConstants.ROCKETMQ_TAGS, msg.getTags());
span.setTag(TraceConstants.ROCKETMQ_KEYS, msg.getKeys());
span.setTag(TraceConstants.ROCKETMQ_STORE_HOST, context.getBrokerAddr());
span.setTag(TraceConstants.ROCKETMQ_MSG_ID, context.getMsgId());
span.setTag(TraceConstants.ROCKETMQ_MSG_TYPE, MessageType.Trans_msg_Commit.name());
span.setTag(TraceConstants.ROCKETMQ_TRANSACTION_ID, context.getTransactionId());
span.setTag(TraceConstants.ROCKETMQ_TRANSACTION_STATE, context.getTransactionState().name());
span.setTag(TraceConstants.ROCKETMQ_IS_FROM_TRANSACTION_CHECK, context.isFromTransactionCheck());
span.finish();
}
}
| EndTransactionOpenTracingHookImpl |
java | quarkusio__quarkus | integration-tests/picocli/src/test/java/io/quarkus/it/picocli/TestNamedCommand.java | {
"start": 279,
"end": 737
} | class ____ {
@RegisterExtension
static final QuarkusProdModeTest config = createConfig("named-app", EntryCommand.class, NamedCommand.class)
.overrideConfigKey("quarkus.picocli.top-command", "PicocliEntry");
@Test
public void simpleTest() {
Assertions.assertThat(config.getStartupConsoleOutput()).containsOnlyOnce("NamedCommand called!");
Assertions.assertThat(config.getExitCode()).isZero();
}
}
| TestNamedCommand |
java | quarkusio__quarkus | extensions/smallrye-health/runtime/src/main/java/io/quarkus/smallrye/health/runtime/SmallRyeHealthHandler.java | {
"start": 216,
"end": 444
} | class ____ extends SmallRyeHealthHandlerBase {
@Override
protected Uni<SmallRyeHealth> getHealth(SmallRyeHealthReporter reporter, RoutingContext ctx) {
return reporter.getHealthAsync();
}
}
| SmallRyeHealthHandler |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/innerclass/InnerInterfaceTest.java | {
"start": 967,
"end": 1059
} | class ____ {
@Id
@GeneratedValue
public Long id;
public String foo;
}
public | MyEntity |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/FluxPublishOnTest.java | {
"start": 45225,
"end": 46103
} | class ____ implements Scheduler {
@Override
public Disposable schedule(Runnable task) {
throw Exceptions.failWithRejected();
}
@Override
public Worker createWorker() {
throw exception();
}
}
@Test
public void scanRunOn() {
Scannable publishOnScannable = Scannable.from(
Flux.just(1).hide()
.publishOn(Schedulers.boundedElastic())
);
Scannable runOnScannable = publishOnScannable.scan(Scannable.Attr.RUN_ON);
assertThat(runOnScannable).isNotNull()
.matches(Scannable::isScanAvailable, "isScanAvailable");
System.out.println(runOnScannable + " isScannable " + runOnScannable.isScanAvailable());
System.out.println(runOnScannable.scan(Scannable.Attr.NAME));
runOnScannable.parents().forEach(System.out::println);
System.out.println(runOnScannable.scan(Scannable.Attr.BUFFERED));
}
}
| FailWorkerScheduler |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/jdk/CollectionDeserializer.java | {
"start": 1131,
"end": 20915
} | class ____
extends ContainerDeserializerBase<Collection<Object>>
{
// // Configuration
/**
* Value deserializer.
*/
protected final ValueDeserializer<Object> _valueDeserializer;
/**
* If element instances have polymorphic type information, this
* is the type deserializer that can handle it
*/
protected final TypeDeserializer _valueTypeDeserializer;
// // Instance construction settings:
protected final ValueInstantiator _valueInstantiator;
/**
* Deserializer that is used iff delegate-based creator is
* to be used for deserializing from JSON Object.
*/
protected final ValueDeserializer<Object> _delegateDeserializer;
// NOTE: no PropertyBasedCreator, as JSON Arrays have no properties
/*
/**********************************************************
/* Life-cycle
/**********************************************************
*/
/**
* Constructor for context-free instances, where we do not yet know
* which property is using this deserializer.
*/
public CollectionDeserializer(JavaType collectionType,
ValueDeserializer<Object> valueDeser,
TypeDeserializer valueTypeDeser, ValueInstantiator valueInstantiator)
{
this(collectionType, valueDeser, valueTypeDeser, valueInstantiator, null, null, null);
}
/**
* Constructor used when creating contextualized instances.
*/
protected CollectionDeserializer(JavaType collectionType,
ValueDeserializer<Object> valueDeser, TypeDeserializer valueTypeDeser,
ValueInstantiator valueInstantiator, ValueDeserializer<Object> delegateDeser,
NullValueProvider nuller, Boolean unwrapSingle)
{
super(collectionType, nuller, unwrapSingle);
_valueDeserializer = valueDeser;
_valueTypeDeserializer = valueTypeDeser;
_valueInstantiator = valueInstantiator;
_delegateDeserializer = delegateDeser;
}
/**
* Copy-constructor that can be used by sub-classes to allow
* copy-on-write styling copying of settings of an existing instance.
*/
protected CollectionDeserializer(CollectionDeserializer src)
{
super(src);
_valueDeserializer = src._valueDeserializer;
_valueTypeDeserializer = src._valueTypeDeserializer;
_valueInstantiator = src._valueInstantiator;
_delegateDeserializer = src._delegateDeserializer;
}
/**
* Fluent-factory method call to construct contextual instance.
*/
@SuppressWarnings("unchecked")
protected CollectionDeserializer withResolved(ValueDeserializer<?> dd,
ValueDeserializer<?> vd, TypeDeserializer vtd,
NullValueProvider nuller, Boolean unwrapSingle)
{
return new CollectionDeserializer(_containerType,
(ValueDeserializer<Object>) vd, vtd,
_valueInstantiator, (ValueDeserializer<Object>) dd,
nuller, unwrapSingle);
}
// Important: do NOT cache if polymorphic values
@Override
public boolean isCachable() {
// 26-Mar-2015, tatu: As per [databind#735], need to be careful
return (_valueDeserializer == null)
&& (_valueTypeDeserializer == null)
&& (_delegateDeserializer == null)
;
}
@Override // since 2.12
public LogicalType logicalType() {
return LogicalType.Collection;
}
/*
/**********************************************************
/* Validation, post-processing (ResolvableDeserializer)
/**********************************************************
*/
/**
* Method called to finalize setup of this deserializer,
* when it is known for which property deserializer is needed
* for.
*/
@Override
public CollectionDeserializer createContextual(DeserializationContext ctxt,
BeanProperty property)
{
// May need to resolve types for delegate-based creators:
ValueDeserializer<Object> delegateDeser = null;
if (_valueInstantiator != null) {
if (_valueInstantiator.canCreateUsingDelegate()) {
JavaType delegateType = _valueInstantiator.getDelegateType(ctxt.getConfig());
if (delegateType == null) {
ctxt.reportBadDefinition(_containerType, String.format(
"Invalid delegate-creator definition for %s: value instantiator (%s) returned true for 'canCreateUsingDelegate()', but null for 'getDelegateType()'",
_containerType,
_valueInstantiator.getClass().getName()));
}
delegateDeser = findDeserializer(ctxt, delegateType, property);
} else if (_valueInstantiator.canCreateUsingArrayDelegate()) {
JavaType delegateType = _valueInstantiator.getArrayDelegateType(ctxt.getConfig());
if (delegateType == null) {
ctxt.reportBadDefinition(_containerType, String.format(
"Invalid delegate-creator definition for %s: value instantiator (%s) returned true for 'canCreateUsingArrayDelegate()', but null for 'getArrayDelegateType()'",
_containerType,
_valueInstantiator.getClass().getName()));
}
delegateDeser = findDeserializer(ctxt, delegateType, property);
}
}
// [databind#1043]: allow per-property allow-wrapping of single overrides:
// 11-Dec-2015, tatu: Should we pass basic `Collection.class`, or more refined? Mostly
// comes down to "List vs Collection" I suppose... for now, pass Collection
Boolean unwrapSingle = findFormatFeature(ctxt, property, Collection.class,
JsonFormat.Feature.ACCEPT_SINGLE_VALUE_AS_ARRAY);
// also, often value deserializer is resolved here:
ValueDeserializer<?> valueDeser = _valueDeserializer;
// May have a content converter
valueDeser = findConvertingContentDeserializer(ctxt, property, valueDeser);
final JavaType vt = _containerType.getContentType();
if (valueDeser == null) {
valueDeser = ctxt.findContextualValueDeserializer(vt, property);
} else { // if directly assigned, probably not yet contextual, so:
valueDeser = ctxt.handleSecondaryContextualization(valueDeser, property, vt);
}
// and finally, type deserializer needs context as well
TypeDeserializer valueTypeDeser = _valueTypeDeserializer;
if (valueTypeDeser != null) {
valueTypeDeser = valueTypeDeser.forProperty(property);
}
NullValueProvider nuller = findContentNullProvider(ctxt, property, valueDeser);
if ((!Objects.equals(unwrapSingle, _unwrapSingle))
|| (nuller != _nullProvider)
|| (delegateDeser != _delegateDeserializer)
|| (valueDeser != _valueDeserializer)
|| (valueTypeDeser != _valueTypeDeserializer)
) {
return withResolved(delegateDeser, valueDeser, valueTypeDeser,
nuller, unwrapSingle);
}
return this;
}
/*
/**********************************************************
/* ContainerDeserializerBase API
/**********************************************************
*/
@Override
public ValueDeserializer<Object> getContentDeserializer() {
return _valueDeserializer;
}
@Override
public ValueInstantiator getValueInstantiator() {
return _valueInstantiator;
}
/*
/**********************************************************
/* ValueDeserializer impl
/**********************************************************
*/
@SuppressWarnings("unchecked")
@Override
public Collection<Object> deserialize(JsonParser p, DeserializationContext ctxt)
throws JacksonException
{
if (_delegateDeserializer != null) {
return (Collection<Object>) _valueInstantiator.createUsingDelegate(ctxt,
_delegateDeserializer.deserialize(p, ctxt));
}
// 16-May-2020, tatu: As per [dataformats-text#199] need to first check for
// possible Array-coercion and only after that String coercion
if (p.isExpectedStartArrayToken()) {
return _deserializeFromArray(p, ctxt, createDefaultInstance(ctxt));
}
// Empty String may be ok; bit tricky to check, however, since
// there is also possibility of "auto-wrapping" of single-element arrays.
// Hence we only accept empty String here.
if (p.hasToken(JsonToken.VALUE_STRING)) {
return _deserializeFromString(p, ctxt, p.getString());
}
return handleNonArray(p, ctxt, createDefaultInstance(ctxt));
}
@SuppressWarnings("unchecked")
protected Collection<Object> createDefaultInstance(DeserializationContext ctxt)
throws JacksonException
{
return (Collection<Object>) _valueInstantiator.createUsingDefault(ctxt);
}
@Override
public Collection<Object> deserialize(JsonParser p, DeserializationContext ctxt,
Collection<Object> result)
throws JacksonException
{
// Ok: must point to START_ARRAY (or equivalent)
if (p.isExpectedStartArrayToken()) {
return _deserializeFromArray(p, ctxt, result);
}
return handleNonArray(p, ctxt, result);
}
@Override
public Object deserializeWithType(JsonParser p, DeserializationContext ctxt,
TypeDeserializer typeDeserializer)
throws JacksonException
{
// In future could check current token... for now this should be enough:
return typeDeserializer.deserializeTypedFromArray(p, ctxt);
}
/**
* Logic extracted to deal with incoming String value.
*
* @since 2.12
*/
@SuppressWarnings("unchecked")
protected Collection<Object> _deserializeFromString(JsonParser p, DeserializationContext ctxt,
String value)
throws JacksonException
{
final Class<?> rawTargetType = handledType();
// 05-Nov-2020, ckozak: As per [jackson-databind#2922] string values may be handled
// using handleNonArray, however empty strings may result in a null or empty collection
// depending on configuration.
// Start by verifying if we got empty/blank string since accessing
// CoercionAction may be costlier than String value we'll almost certainly
// need anyway
if (value.isEmpty()) {
CoercionAction act = ctxt.findCoercionAction(logicalType(), rawTargetType,
CoercionInputShape.EmptyString);
// handleNonArray may successfully deserialize the result (if
// ACCEPT_SINGLE_VALUE_AS_ARRAY is enabled, for example) otherwise it
// is capable of failing just as well as _deserializeFromEmptyString.
if (act != null && act != CoercionAction.Fail) {
return (Collection<Object>) _deserializeFromEmptyString(
p, ctxt, act, rawTargetType, "empty String (\"\")");
}
// note: `CoercionAction.Fail` falls through because we may need to allow
// `ACCEPT_SINGLE_VALUE_AS_ARRAY` handling later on
}
// 26-Mar-2021, tatu: Some day is today; as per [dataformat-xml#460],
// we do need to support blank String too...
else if (_isBlank(value)) {
final CoercionAction act = ctxt.findCoercionFromBlankString(logicalType(), rawTargetType,
CoercionAction.Fail);
if (act != CoercionAction.Fail) {
return (Collection<Object>) _deserializeFromEmptyString(
p, ctxt, act, rawTargetType, "blank String (all whitespace)");
}
// note: `CoercionAction.Fail` falls through because we may need to allow
// `ACCEPT_SINGLE_VALUE_AS_ARRAY` handling later on
}
return handleNonArray(p, ctxt, createDefaultInstance(ctxt));
}
/**
* @since 2.12
*/
protected Collection<Object> _deserializeFromArray(JsonParser p, DeserializationContext ctxt,
Collection<Object> result)
throws JacksonException
{
// [databind#631]: Assign current value, to be accessible by custom serializers
p.assignCurrentValue(result);
// Let's offline handling of values with Object Ids (simplifies code here)
if (_valueDeserializer.getObjectIdReader(ctxt) != null) {
return _deserializeWithObjectId(p, ctxt, result);
}
JsonToken t;
while ((t = p.nextToken()) != JsonToken.END_ARRAY) {
try {
Object value;
if (t == JsonToken.VALUE_NULL) {
if (_skipNullValues) {
continue;
}
value = null;
} else {
value = _deserializeNoNullChecks(p, ctxt);
}
if (value == null) {
value = _nullProvider.getNullValue(ctxt);
// _skipNullValues is checked by _tryToAddNull.
if (value == null) {
_tryToAddNull(p, ctxt, result);
continue;
}
}
result.add(value);
/* 17-Dec-2017, tatu: should not occur at this level...
} catch (UnresolvedForwardReference reference) {
throw DatabindException
.from(p, "Unresolved forward reference but no identity info", reference);
*/
} catch (Exception e) {
boolean wrap = (ctxt == null) || ctxt.isEnabled(DeserializationFeature.WRAP_EXCEPTIONS);
if (!wrap) {
ClassUtil.throwIfRTE(e);
}
throw DatabindException.wrapWithPath(ctxt, e,
new JacksonException.Reference(result, result.size()));
}
}
return result;
}
/**
* Helper method called when current token is no START_ARRAY. Will either
* throw an exception, or try to handle value as if member of implicit
* array, depending on configuration.
*/
@SuppressWarnings("unchecked")
protected final Collection<Object> handleNonArray(JsonParser p, DeserializationContext ctxt,
Collection<Object> result)
throws JacksonException
{
// Implicit arrays from single values?
boolean canWrap = (_unwrapSingle == Boolean.TRUE) ||
((_unwrapSingle == null) &&
ctxt.isEnabled(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY));
if (!canWrap) {
return (Collection<Object>) ctxt.handleUnexpectedToken(_containerType, p);
}
JsonToken t = p.currentToken();
Object value;
try {
if (t == JsonToken.VALUE_NULL) {
// 03-Feb-2017, tatu: Hmmh. I wonder... let's try skipping here, too
if (_skipNullValues) {
return result;
}
value = null;
} else {
value = _deserializeNoNullChecks(p, ctxt);
}
if (value == null) {
value = _nullProvider.getNullValue(ctxt);
// _skipNullValues is checked by _tryToAddNull.
if (value == null) {
_tryToAddNull(p, ctxt, result);
return result;
}
}
} catch (Exception e) {
boolean wrap = ctxt.isEnabled(DeserializationFeature.WRAP_EXCEPTIONS);
if (!wrap) {
ClassUtil.throwIfRTE(e);
}
// note: pass Object.class, not Object[].class, as we need element type for error info
throw DatabindException.wrapWithPath(ctxt, e,
new JacksonException.Reference(Object.class, result.size()));
}
result.add(value);
return result;
}
protected Collection<Object> _deserializeWithObjectId(JsonParser p, DeserializationContext ctxt,
Collection<Object> result)
throws JacksonException
{
// Ok: must point to START_ARRAY (or equivalent)
if (!p.isExpectedStartArrayToken()) {
return handleNonArray(p, ctxt, result);
}
// [databind#631]: Assign current value, to be accessible by custom serializers
p.assignCurrentValue(result);
CollectionReferringAccumulator referringAccumulator =
new CollectionReferringAccumulator(_containerType.getContentType().getRawClass(), result);
JsonToken t;
while ((t = p.nextToken()) != JsonToken.END_ARRAY) {
try {
Object value;
if (t == JsonToken.VALUE_NULL) {
if (_skipNullValues) {
continue;
}
value = null;
} else {
value = _deserializeNoNullChecks(p, ctxt);
}
if (value == null) {
value = _nullProvider.getNullValue(ctxt);
if (value == null && _skipNullValues) {
continue;
}
}
referringAccumulator.add(value);
} catch (UnresolvedForwardReference reference) {
Referring ref = referringAccumulator.handleUnresolvedReference(reference);
reference.getRoid().appendReferring(ref);
} catch (Exception e) {
boolean wrap = (ctxt == null) || ctxt.isEnabled(DeserializationFeature.WRAP_EXCEPTIONS);
if (!wrap) {
ClassUtil.throwIfRTE(e);
}
throw DatabindException.wrapWithPath(ctxt, e,
new JacksonException.Reference(result, result.size()));
}
}
return result;
}
/**
* Deserialize the content of the collection.
* If _valueTypeDeserializer is null, use _valueDeserializer.deserialize; if non-null,
* use _valueDeserializer.deserializeWithType to deserialize value.
* This method only performs deserialization and does not consider _skipNullValues, _nullProvider, etc.
*/
protected Object _deserializeNoNullChecks(JsonParser p,DeserializationContext ctxt)
{
if (_valueTypeDeserializer == null) {
return _valueDeserializer.deserialize(p, ctxt);
}
return _valueDeserializer.deserializeWithType(p, ctxt, _valueTypeDeserializer);
}
/**
* {@code java.util.TreeSet} (and possibly other {@link Collection} types) does not
* allow addition of {@code null} values, so isolate handling here.
*
*/
protected void _tryToAddNull(JsonParser p, DeserializationContext ctxt, Collection<?> set)
{
if (_skipNullValues) {
return;
}
// Ideally we'd have better idea of where nulls are accepted, but first
// let's just produce something better than NPE:
try {
set.add(null);
} catch (NullPointerException e) {
ctxt.handleUnexpectedToken(_valueType, JsonToken.VALUE_NULL, p,
"`java.util.Collection` of type %s does not accept `null` values",
ClassUtil.getTypeDescription(getValueType(ctxt)));
}
}
/**
* Helper | CollectionDeserializer |
java | apache__camel | components/camel-google/camel-google-sheets/src/generated/java/org/apache/camel/component/google/sheets/GoogleSheetsConfigurationConfigurer.java | {
"start": 741,
"end": 6255
} | class ____ extends org.apache.camel.support.component.PropertyConfigurerSupport implements GeneratedPropertyConfigurer, ExtendedPropertyConfigurerGetter {
private static final Map<String, Object> ALL_OPTIONS;
static {
Map<String, Object> map = new CaseInsensitiveMap();
map.put("AccessToken", java.lang.String.class);
map.put("ApiName", org.apache.camel.component.google.sheets.internal.GoogleSheetsApiName.class);
map.put("ApplicationName", java.lang.String.class);
map.put("ClientId", java.lang.String.class);
map.put("ClientSecret", java.lang.String.class);
map.put("Delegate", java.lang.String.class);
map.put("MethodName", java.lang.String.class);
map.put("RefreshToken", java.lang.String.class);
map.put("Scopes", java.lang.String.class);
map.put("ServiceAccountKey", java.lang.String.class);
map.put("SplitResult", boolean.class);
ALL_OPTIONS = map;
}
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
org.apache.camel.component.google.sheets.GoogleSheetsConfiguration target = (org.apache.camel.component.google.sheets.GoogleSheetsConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": target.setAccessToken(property(camelContext, java.lang.String.class, value)); return true;
case "apiname":
case "apiName": target.setApiName(property(camelContext, org.apache.camel.component.google.sheets.internal.GoogleSheetsApiName.class, value)); return true;
case "applicationname":
case "applicationName": target.setApplicationName(property(camelContext, java.lang.String.class, value)); return true;
case "clientid":
case "clientId": target.setClientId(property(camelContext, java.lang.String.class, value)); return true;
case "clientsecret":
case "clientSecret": target.setClientSecret(property(camelContext, java.lang.String.class, value)); return true;
case "delegate": target.setDelegate(property(camelContext, java.lang.String.class, value)); return true;
case "methodname":
case "methodName": target.setMethodName(property(camelContext, java.lang.String.class, value)); return true;
case "refreshtoken":
case "refreshToken": target.setRefreshToken(property(camelContext, java.lang.String.class, value)); return true;
case "scopes": target.setScopes(property(camelContext, java.lang.String.class, value)); return true;
case "serviceaccountkey":
case "serviceAccountKey": target.setServiceAccountKey(property(camelContext, java.lang.String.class, value)); return true;
case "splitresult":
case "splitResult": target.setSplitResult(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Map<String, Object> getAllOptions(Object target) {
return ALL_OPTIONS;
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": return java.lang.String.class;
case "apiname":
case "apiName": return org.apache.camel.component.google.sheets.internal.GoogleSheetsApiName.class;
case "applicationname":
case "applicationName": return java.lang.String.class;
case "clientid":
case "clientId": return java.lang.String.class;
case "clientsecret":
case "clientSecret": return java.lang.String.class;
case "delegate": return java.lang.String.class;
case "methodname":
case "methodName": return java.lang.String.class;
case "refreshtoken":
case "refreshToken": return java.lang.String.class;
case "scopes": return java.lang.String.class;
case "serviceaccountkey":
case "serviceAccountKey": return java.lang.String.class;
case "splitresult":
case "splitResult": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
org.apache.camel.component.google.sheets.GoogleSheetsConfiguration target = (org.apache.camel.component.google.sheets.GoogleSheetsConfiguration) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "accesstoken":
case "accessToken": return target.getAccessToken();
case "apiname":
case "apiName": return target.getApiName();
case "applicationname":
case "applicationName": return target.getApplicationName();
case "clientid":
case "clientId": return target.getClientId();
case "clientsecret":
case "clientSecret": return target.getClientSecret();
case "delegate": return target.getDelegate();
case "methodname":
case "methodName": return target.getMethodName();
case "refreshtoken":
case "refreshToken": return target.getRefreshToken();
case "scopes": return target.getScopes();
case "serviceaccountkey":
case "serviceAccountKey": return target.getServiceAccountKey();
case "splitresult":
case "splitResult": return target.isSplitResult();
default: return null;
}
}
}
| GoogleSheetsConfigurationConfigurer |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/engine/ThreadPoolMergeExecutorServiceDiskSpaceTests.java | {
"start": 2661,
"end": 5459
} | class ____ extends ESTestCase {
private static TestMockFileStore aFileStore;
private static TestMockFileStore bFileStore;
private static String aPathPart;
private static String bPathPart;
private static int mergeExecutorThreadCount;
private static Settings settings;
private static TestCapturingThreadPool testThreadPool;
private static NodeEnvironment nodeEnvironment;
private static boolean setThreadPoolMergeSchedulerSetting;
@Before
public void setupTestEnv() throws Exception {
aFileStore = new TestMockFileStore("mocka");
bFileStore = new TestMockFileStore("mockb");
FileSystem current = PathUtils.getDefaultFileSystem();
aPathPart = "a-" + randomUUID();
bPathPart = "b-" + randomUUID();
FileSystemProvider mock = new TestMockUsableSpaceFileSystemProvider(current);
PathUtilsForTesting.installMock(mock.getFileSystem(null));
Path path = PathUtils.get(createTempDir().toString());
// use 2 data paths
String[] paths = new String[] { path.resolve(aPathPart).toString(), path.resolve(bPathPart).toString() };
// some tests hold one merge thread blocked, and need at least one other runnable
mergeExecutorThreadCount = randomIntBetween(2, 8);
Settings.Builder settingsBuilder = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), path)
.putList(Environment.PATH_DATA_SETTING.getKey(), paths)
// the default of "5s" slows down testing
.put(ThreadPoolMergeExecutorService.INDICES_MERGE_DISK_CHECK_INTERVAL_SETTING.getKey(), "50ms")
.put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), mergeExecutorThreadCount);
setThreadPoolMergeSchedulerSetting = randomBoolean();
if (setThreadPoolMergeSchedulerSetting) {
settingsBuilder.put(ThreadPoolMergeScheduler.USE_THREAD_POOL_MERGE_SCHEDULER_SETTING.getKey(), true);
}
settings = settingsBuilder.build();
testThreadPool = new TestCapturingThreadPool("test", settings);
nodeEnvironment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
}
@After
public void removeMockUsableSpaceFS() {
if (setThreadPoolMergeSchedulerSetting) {
assertWarnings(
"[indices.merge.scheduler.use_thread_pool] setting was deprecated in Elasticsearch "
+ "and will be removed in a future release. See the breaking changes documentation for the next major version."
);
}
PathUtilsForTesting.teardown();
aFileStore = null;
bFileStore = null;
testThreadPool.close();
nodeEnvironment.close();
}
static | ThreadPoolMergeExecutorServiceDiskSpaceTests |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/utils/ExecNodeMetadataUtilTest.java | {
"start": 21203,
"end": 23252
} | class ____ extends ExecNodeBase<RowData> {
protected AbstractDummyNode(
ExecNodeContext context,
ReadableConfig persistedConfig,
List<InputProperty> properties,
LogicalType outputType,
String description) {
super(10, context, persistedConfig, properties, outputType, description);
}
@Override
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
return null;
}
}
private static final ConfigOption<Integer> OPTION_1 =
key("option1").intType().defaultValue(-1).withDescription("option1");
private static final ConfigOption<Integer> OPTION_2 =
key("option2").intType().defaultValue(-1).withDescription("option2");
private static final ConfigOption<Integer> OPTION_3 =
key("option3")
.intType()
.defaultValue(-1)
.withDeprecatedKeys("option3-deprecated")
.withDescription("option3");
private static final ConfigOption<Integer> OPTION_4 =
key("option4")
.intType()
.defaultValue(-1)
.withFallbackKeys("option4-fallback")
.withDescription("option4");
private static final ConfigOption<Integer> OPTION_5 =
key("option5")
.intType()
.defaultValue(-1)
.withFallbackKeys("option5-fallback")
.withDeprecatedKeys("option5-deprecated")
.withDescription("option5");
private static final ConfigOption<Integer> OPTION_6 =
key("option6")
.intType()
.defaultValue(-1)
.withDeprecatedKeys("option6-deprecated")
.withFallbackKeys("option6-fallback")
.withDescription("option6");
}
| AbstractDummyNode |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/merge/MergeUnsavedEntitiesTest.java | {
"start": 4350,
"end": 4900
} | class ____ {
@Id
private Long id;
private String name;
@ManyToOne(fetch = FetchType.LAZY)
private Parent parent;
public Child() {
}
public Child(Long id, String name) {
this.id = id;
this.name = name;
}
public Long getId() {
return this.id;
}
public Parent getParent() {
return parent;
}
public void setId(Long id) {
this.id = id;
}
public void setParent(Parent parent) {
this.parent = parent;
}
public String getName() {
return name;
}
}
@Entity(name = "Book")
public static | Child |
java | lettuce-io__lettuce-core | src/test/java/io/lettuce/core/AuthenticationIntegrationTests.java | {
"start": 7986,
"end": 8974
} | class ____ implements CommandListener {
final List<RedisCommand<?, ?, ?>> succeeded = new ArrayList<>();
@Override
public void commandSucceeded(CommandSucceededEvent event) {
synchronized (succeeded) {
succeeded.add(event.getCommand());
}
}
}
private boolean isAuthCommandWithCredentials(RedisCommand<?, ?, ?> command, String username, char[] password) {
if (command.getType() == CommandType.AUTH) {
CommandArgs<?, ?> args = command.getArgs();
return args.toCommandString().contains(username) && args.toCommandString().contains(String.valueOf(password));
}
return false;
}
private SimpleToken testToken(String username, char[] password) {
return new SimpleToken(username, String.valueOf(password), Instant.now().plusMillis(500).toEpochMilli(),
Instant.now().toEpochMilli(), Collections.emptyMap());
}
}
| TestCommandListener |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/aot/BeanDefinitionMethodGeneratorTests.java | {
"start": 4119,
"end": 14730
} | class ____ {
private final TestGenerationContext generationContext;
private final DefaultListableBeanFactory beanFactory;
private final MockBeanRegistrationsCode beanRegistrationsCode;
private final BeanDefinitionMethodGeneratorFactory methodGeneratorFactory;
BeanDefinitionMethodGeneratorTests() {
this.generationContext = new TestGenerationContext();
this.beanFactory = new DefaultListableBeanFactory();
this.methodGeneratorFactory = new BeanDefinitionMethodGeneratorFactory(
AotServices.factoriesAndBeans(new MockSpringFactoriesLoader(), this.beanFactory));
this.beanRegistrationsCode = new MockBeanRegistrationsCode(this.generationContext);
}
@Test
void generateWithBeanClassSetsOnlyBeanClass() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TestBean.class);
RegisteredBean registeredBean = registerBean(beanDefinition);
BeanDefinitionMethodGenerator generator = new BeanDefinitionMethodGenerator(
this.methodGeneratorFactory, registeredBean, null,
Collections.emptyList());
MethodReference method = generator.generateBeanDefinitionMethod(
this.generationContext, this.beanRegistrationsCode);
compile(method, (actual, compiled) -> {
SourceFile sourceFile = compiled.getSourceFile(".*BeanDefinitions");
assertThat(sourceFile).contains("Get the bean definition for 'testBean'");
assertThat(sourceFile).contains("new RootBeanDefinition(TestBean.class)");
assertThat(sourceFile).doesNotContain("setTargetType(");
assertThat(sourceFile).contains("setInstanceSupplier(TestBean::new)");
assertThat(actual).isInstanceOf(RootBeanDefinition.class);
});
}
@Test
void generateWithTargetTypeWithNoGenericSetsOnlyBeanClass() {
RootBeanDefinition beanDefinition = new RootBeanDefinition();
beanDefinition.setTargetType(TestBean.class);
RegisteredBean registeredBean = registerBean(beanDefinition);
BeanDefinitionMethodGenerator generator = new BeanDefinitionMethodGenerator(
this.methodGeneratorFactory, registeredBean, null,
Collections.emptyList());
MethodReference method = generator.generateBeanDefinitionMethod(
this.generationContext, this.beanRegistrationsCode);
compile(method, (actual, compiled) -> {
SourceFile sourceFile = compiled.getSourceFile(".*BeanDefinitions");
assertThat(sourceFile).contains("Get the bean definition for 'testBean'");
assertThat(sourceFile).contains("new RootBeanDefinition(TestBean.class)");
assertThat(sourceFile).contains("setInstanceSupplier(TestBean::new)");
assertThat(actual).isInstanceOf(RootBeanDefinition.class);
});
}
@Test
void generateWithTargetTypeUsingGenericsSetsBothBeanClassAndTargetType() {
RootBeanDefinition beanDefinition = new RootBeanDefinition();
beanDefinition.setTargetType(ResolvableType.forClassWithGenerics(GenericBean.class, Integer.class));
RegisteredBean registeredBean = registerBean(beanDefinition);
BeanDefinitionMethodGenerator generator = new BeanDefinitionMethodGenerator(
this.methodGeneratorFactory, registeredBean, null,
Collections.emptyList());
MethodReference method = generator.generateBeanDefinitionMethod(
this.generationContext, this.beanRegistrationsCode);
compile(method, (actual, compiled) -> {
assertThat(actual.getResolvableType().resolve()).isEqualTo(GenericBean.class);
SourceFile sourceFile = compiled.getSourceFile(".*BeanDefinitions");
assertThat(sourceFile).contains("Get the bean definition for 'testBean'");
assertThat(sourceFile).contains("new RootBeanDefinition(GenericBean.class)");
assertThat(sourceFile).contains(
"setTargetType(ResolvableType.forClassWithGenerics(GenericBean.class, Integer.class))");
assertThat(sourceFile).contains("setInstanceSupplier(GenericBean::new)");
assertThat(actual).isInstanceOf(RootBeanDefinition.class);
});
}
@Test
void generateWithBeanClassAndFactoryMethodNameSetsTargetTypeAndBeanClass() {
this.beanFactory.registerBeanDefinition("factory",
new RootBeanDefinition(SimpleBeanConfiguration.class));
RootBeanDefinition beanDefinition = new RootBeanDefinition(SimpleBean.class);
beanDefinition.setFactoryBeanName("factory");
beanDefinition.setFactoryMethodName("simpleBean");
RegisteredBean registeredBean = registerBean(beanDefinition);
BeanDefinitionMethodGenerator generator = new BeanDefinitionMethodGenerator(
this.methodGeneratorFactory, registeredBean, null,
Collections.emptyList());
MethodReference method = generator.generateBeanDefinitionMethod(
this.generationContext, this.beanRegistrationsCode);
compile(method, (actual, compiled) -> {
SourceFile sourceFile = compiled.getSourceFile(".*BeanDefinitions");
assertThat(sourceFile).contains("Get the bean definition for 'testBean'");
assertThat(sourceFile).contains("new RootBeanDefinition(SimpleBean.class)");
assertThat(sourceFile).contains("setTargetType(SimpleBean.class)");
assertThat(actual).isInstanceOf(RootBeanDefinition.class);
});
}
@Test
void generateWithTargetTypeAndFactoryMethodNameSetsOnlyBeanClass() {
this.beanFactory.registerBeanDefinition("factory",
new RootBeanDefinition(SimpleBeanConfiguration.class));
RootBeanDefinition beanDefinition = new RootBeanDefinition();
beanDefinition.setTargetType(SimpleBean.class);
beanDefinition.setFactoryBeanName("factory");
beanDefinition.setFactoryMethodName("simpleBean");
RegisteredBean registeredBean = registerBean(beanDefinition);
BeanDefinitionMethodGenerator generator = new BeanDefinitionMethodGenerator(
this.methodGeneratorFactory, registeredBean, null,
Collections.emptyList());
MethodReference method = generator.generateBeanDefinitionMethod(
this.generationContext, this.beanRegistrationsCode);
compile(method, (actual, compiled) -> {
SourceFile sourceFile = compiled.getSourceFile(".*BeanDefinitions");
assertThat(sourceFile).contains("Get the bean definition for 'testBean'");
assertThat(sourceFile).contains("new RootBeanDefinition(SimpleBean.class)");
assertThat(sourceFile).doesNotContain("setTargetType(");
assertThat(actual).isInstanceOf(RootBeanDefinition.class);
});
}
@Test
void generateWithBeanClassAndTargetTypeDifferentSetsBoth() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(One.class);
beanDefinition.setTargetType(Implementation.class);
beanDefinition.setResolvedFactoryMethod(ReflectionUtils.findMethod(TestHierarchy.class, "oneBean"));
RegisteredBean registeredBean = registerBean(beanDefinition);
BeanDefinitionMethodGenerator generator = new BeanDefinitionMethodGenerator(
this.methodGeneratorFactory, registeredBean, null,
Collections.emptyList());
MethodReference method = generator.generateBeanDefinitionMethod(
this.generationContext, this.beanRegistrationsCode);
compile(method, (actual, compiled) -> {
SourceFile sourceFile = compiled.getSourceFile(".*BeanDefinitions");
assertThat(sourceFile).contains("Get the bean definition for 'testBean'");
assertThat(sourceFile).contains("new RootBeanDefinition(TestHierarchy.One.class)");
assertThat(sourceFile).contains("setTargetType(TestHierarchy.Implementation.class)");
assertThat(actual).isInstanceOf(RootBeanDefinition.class);
});
}
@Test
void generateWithBeanClassAndTargetTypWithGenericSetsBoth() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(Integer.class);
beanDefinition.setTargetType(ResolvableType.forClassWithGenerics(GenericBean.class, Integer.class));
RegisteredBean registeredBean = registerBean(beanDefinition);
BeanDefinitionMethodGenerator generator = new BeanDefinitionMethodGenerator(
this.methodGeneratorFactory, registeredBean, null,
Collections.emptyList());
MethodReference method = generator.generateBeanDefinitionMethod(
this.generationContext, this.beanRegistrationsCode);
compile(method, (actual, compiled) -> {
assertThat(actual.getResolvableType().resolve()).isEqualTo(GenericBean.class);
SourceFile sourceFile = compiled.getSourceFile(".*BeanDefinitions");
assertThat(sourceFile).contains("Get the bean definition for 'testBean'");
assertThat(sourceFile).contains("new RootBeanDefinition(Integer.class)");
assertThat(sourceFile).contains(
"setTargetType(ResolvableType.forClassWithGenerics(GenericBean.class, Integer.class))");
assertThat(sourceFile).contains("setInstanceSupplier(GenericBean::new)");
assertThat(actual).isInstanceOf(RootBeanDefinition.class);
});
}
@Test
void generateBeanDefinitionMethodUSeBeanClassNameIfNotReachable() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(PackagePrivateTestBean.class);
beanDefinition.setTargetType(TestBean.class);
RegisteredBean registeredBean = registerBean(beanDefinition);
BeanDefinitionMethodGenerator generator = new BeanDefinitionMethodGenerator(
this.methodGeneratorFactory, registeredBean, null,
Collections.emptyList());
MethodReference method = generator.generateBeanDefinitionMethod(
this.generationContext, this.beanRegistrationsCode);
compile(method, (actual, compiled) -> {
SourceFile sourceFile = compiled.getSourceFile(".*BeanDefinitions");
assertThat(sourceFile).contains("Get the bean definition for 'testBean'");
assertThat(sourceFile).contains("new RootBeanDefinition(\"org.springframework.beans.factory.aot.PackagePrivateTestBean\"");
assertThat(sourceFile).contains("setTargetType(TestBean.class)");
assertThat(sourceFile).contains("setInstanceSupplier(TestBean::new)");
assertThat(actual).isInstanceOf(RootBeanDefinition.class);
});
}
@Test
void generateBeanDefinitionMethodWhenHasInnerClassTargetMethodGeneratesMethod() {
this.beanFactory.registerBeanDefinition("testBeanConfiguration", new RootBeanDefinition(
InnerBeanConfiguration.Simple.class));
RootBeanDefinition beanDefinition = new RootBeanDefinition(SimpleBean.class);
beanDefinition.setFactoryBeanName("testBeanConfiguration");
beanDefinition.setFactoryMethodName("simpleBean");
RegisteredBean registeredBean = registerBean(beanDefinition);
BeanDefinitionMethodGenerator generator = new BeanDefinitionMethodGenerator(
this.methodGeneratorFactory, registeredBean, null,
Collections.emptyList());
MethodReference method = generator.generateBeanDefinitionMethod(
this.generationContext, this.beanRegistrationsCode);
compile(method, (actual, compiled) -> {
SourceFile sourceFile = compiled.getSourceFile(".*BeanDefinitions");
assertThat(sourceFile.getClassName()).endsWith("InnerBeanConfiguration__BeanDefinitions");
assertThat(sourceFile).contains("public static | BeanDefinitionMethodGeneratorTests |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/util/Loggers.java | {
"start": 13835,
"end": 14220
} | class ____ implements Function<String, Logger> {
@Override
public Logger apply(String name) {
return new JdkLogger(java.util.logging.Logger.getLogger(name));
}
}
/**
* A {@link Logger} that has all levels enabled. error and warn log to System.err
* while all other levels log to System.out (printstreams can be changed via constructor).
*/
static final | JdkLoggerFactory |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.