language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/NodeManagerTestBase.java
|
{
"start": 5991,
"end": 6796
}
|
class ____ extends ContainerManagerImpl {
public boolean signaled = false;
public MyContainerManager(Context context, ContainerExecutor exec,
DeletionService deletionContext, NodeStatusUpdater nodeStatusUpdater,
NodeManagerMetrics metrics,
LocalDirsHandlerService dirsHandler) {
super(context, exec, deletionContext, nodeStatusUpdater,
metrics, dirsHandler);
}
@Override
public void handle(ContainerManagerEvent event) {
if (event.getType() == ContainerManagerEventType.SIGNAL_CONTAINERS) {
signaled = true;
}
}
}
@BeforeEach
public void setUp() throws IOException {
nmLocalDir.mkdirs();
tmpDir.mkdirs();
logsDir.mkdirs();
remoteLogsDir.mkdirs();
conf = createNMConfig();
}
}
|
MyContainerManager
|
java
|
apache__flink
|
flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/logical/BooleanType.java
|
{
"start": 1184,
"end": 2788
}
|
class ____ extends LogicalType {
private static final long serialVersionUID = 1L;
private static final String FORMAT = "BOOLEAN";
private static final Set<String> NULL_OUTPUT_CONVERSION =
conversionSet(Boolean.class.getName());
private static final Set<String> NOT_NULL_INPUT_OUTPUT_CONVERSION =
conversionSet(Boolean.class.getName(), boolean.class.getName());
private static final Class<?> DEFAULT_CONVERSION = Boolean.class;
public BooleanType(boolean isNullable) {
super(isNullable, LogicalTypeRoot.BOOLEAN);
}
public BooleanType() {
this(true);
}
@Override
public LogicalType copy(boolean isNullable) {
return new BooleanType(isNullable);
}
@Override
public String asSerializableString() {
return withNullability(FORMAT);
}
@Override
public boolean supportsInputConversion(Class<?> clazz) {
return NOT_NULL_INPUT_OUTPUT_CONVERSION.contains(clazz.getName());
}
@Override
public boolean supportsOutputConversion(Class<?> clazz) {
if (isNullable()) {
return NULL_OUTPUT_CONVERSION.contains(clazz.getName());
}
return NOT_NULL_INPUT_OUTPUT_CONVERSION.contains(clazz.getName());
}
@Override
public Class<?> getDefaultConversion() {
return DEFAULT_CONVERSION;
}
@Override
public List<LogicalType> getChildren() {
return Collections.emptyList();
}
@Override
public <R> R accept(LogicalTypeVisitor<R> visitor) {
return visitor.visit(this);
}
}
|
BooleanType
|
java
|
spring-projects__spring-security
|
test/src/test/java/org/springframework/security/test/web/servlet/response/Gh3409Tests.java
|
{
"start": 3319,
"end": 3696
}
|
class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((requests) -> requests
.requestMatchers("/public/**").permitAll()
.anyRequest().authenticated())
.formLogin(withDefaults())
.httpBasic(withDefaults());
return http.build();
// @formatter:on
}
}
}
|
Config
|
java
|
apache__commons-lang
|
src/test/java/org/apache/commons/lang3/event/EventUtilsTest.java
|
{
"start": 3648,
"end": 3978
}
|
class ____ {
private final EventListenerSupport<MultipleEventListener> listeners = EventListenerSupport.create(MultipleEventListener.class);
public void addMultipleEventListener(final MultipleEventListener listener) {
listeners.addListener(listener);
}
}
public static
|
MultipleEventSource
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvtVO/AuditStatusType.java
|
{
"start": 85,
"end": 1161
}
|
enum ____ implements IntEnum<AuditStatusType> {
AUDIT_FAILURE(0, "审核失败", "FAILED"),
AUDIT_SUCCESS(1, "成功", "SUCCEED"),
AUDIT_NO_SUBMIT(2, "未实名认证", "NONAUDIT"),
AUDIT_SUBMIT(3, "审核中", "AUDITING");
private int code;
private String desc;
private String enCode;
private AuditStatusType(int code) {
this.code = code;
}
private AuditStatusType(int code, String desc, String enCode) {
this.code = code;
this.desc = desc;
this.enCode = enCode;
}
public static AuditStatusType valuesOf(String enCode) {
AuditStatusType[] arr$ = values();
int len$ = arr$.length;
for(int i$ = 0; i$ < len$; ++i$) {
AuditStatusType temp = arr$[i$];
if(temp.getEnCode().equals(enCode)) {
return temp;
}
}
return null;
}
public String getDesc() {
return this.desc;
}
public String getEnCode() {
return this.enCode;
}
public int getCode() {
return this.code;
}
}
|
AuditStatusType
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/annotation/ComposedRepeatableAnnotationsTests.java
|
{
"start": 9866,
"end": 9986
}
|
interface ____ {
// InvalidRepeatable[] value();
}
@Retention(RetentionPolicy.RUNTIME)
@
|
ContainerMissingValueAttribute
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java
|
{
"start": 21982,
"end": 25218
}
|
enum ____ {
NONE, // this field is not collapsable
KEYWORD,
NUMERIC
}
/**
* This method is used to support auto-complete services and implementations
* are expected to find terms beginning with the provided string very quickly.
* If fields cannot look up matching terms quickly they should return null.
* The returned TermEnum should implement next(), term() and doc_freq() methods
* but postings etc are not required.
* @param reader an index reader
* @param prefix the partially complete word the user has typed (can be empty)
* @param caseInsensitive if prefix matches should be case insensitive
* @param searchAfter - usually null. If supplied the TermsEnum result must be positioned after the provided term (used for pagination)
* @return null or an enumeration of matching terms
* @throws IOException Errors accessing data
*/
public TermsEnum getTerms(IndexReader reader, String prefix, boolean caseInsensitive, String searchAfter) throws IOException {
return null;
}
/**
* Validate that this field can be the target of {@link IndexMetadata#INDEX_ROUTING_PATH}.
*/
public void validateMatchedRoutingPath(String routingPath) {
if (hasScriptValues()) {
throw new IllegalArgumentException(
"All fields that match routing_path must be configured with [time_series_dimension: true] "
+ "or flattened fields with a list of dimensions in [time_series_dimensions] and "
+ "without the [script] parameter. ["
+ name()
+ "] has a [script] parameter."
);
}
if (isDimension() == false) {
throw new IllegalArgumentException(
"All fields that match routing_path "
+ "must be configured with [time_series_dimension: true] "
+ "or flattened fields with a list of dimensions in [time_series_dimensions] and "
+ "without the [script] parameter. ["
+ name()
+ "] was not a dimension."
);
}
}
/**
* This method is used to support _field_caps when include_empty_fields is set to
* {@code false}. In that case we return only fields with value in an index. This method
* gets as input FieldInfos and returns if the field is non-empty. This method needs to
* be overwritten where fields don't have footprint in Lucene or their name differs from
* {@link MappedFieldType#name()}
* @param fieldInfos field information
* @return {@code true} if field is present in fieldInfos {@code false} otherwise
*/
public boolean fieldHasValue(FieldInfos fieldInfos) {
return fieldInfos.fieldInfo(name()) != null;
}
/**
* Returns a loader for ESQL or {@code null} if the field doesn't support
* ESQL.
*/
public BlockLoader blockLoader(BlockLoaderContext blContext) {
return null;
}
public boolean supportsBlockLoaderConfig(BlockLoaderFunctionConfig config, FieldExtractPreference preference) {
return false;
}
public
|
CollapseType
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/hql/instantiation/InstantiationWithGenericsExpressionTest.java
|
{
"start": 5366,
"end": 5585
}
|
class ____ extends AbstractEntity<Long> {
protected String data;
public String getData() {
return data;
}
public void setData(String data) {
this.data = data;
}
}
@Imported
public static
|
ConcreteEntity
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/util/EnumValues.java
|
{
"start": 6069,
"end": 7944
}
|
enum ____ for class "+enumCls.getName());
}
return enumValues;
}
protected static String _findNameToUse(String explicitName, String otherName, boolean toLowerCase) {
// If explicitly named, like @JsonProperty-annotated, then use it
if (explicitName != null) {
return explicitName;
}
// [databind#4788] Since 2.18.2 : EnumFeature.WRITE_ENUMS_TO_LOWERCASE should not
// override @JsonProperty values
if (toLowerCase) {
return otherName.toLowerCase();
}
return otherName;
}
/*
/**********************************************************************
/* Public API
/**********************************************************************
*/
public SerializableString serializedValueFor(Enum<?> key) {
return _textual[key.ordinal()];
}
public Collection<SerializableString> values() {
return Arrays.asList(_textual);
}
/**
* Convenience accessor for getting raw Enum instances.
*/
public List<Enum<?>> enums() {
return Arrays.asList(_values);
}
/**
* Method used for serialization and introspection by core Jackson code.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public EnumMap<?,SerializableString> internalMap() {
EnumMap<?,SerializableString> result = _asMap;
if (result == null) {
// Alas, need to create it in a round-about way, due to typing constraints...
Map<Enum<?>,SerializableString> map = new LinkedHashMap<>();
for (Enum<?> en : _values) {
map.put(en, _textual[en.ordinal()]);
}
_asMap = result = new EnumMap(map);
}
return result;
}
public Class<Enum<?>> getEnumClass() { return _enumClass; }
}
|
constants
|
java
|
grpc__grpc-java
|
core/src/main/java/io/grpc/internal/AbstractClientStream.java
|
{
"start": 1589,
"end": 1723
}
|
class ____ {@link ClientStream} implementations.
*
* <p>Must only be called from the sending application thread.
*/
public abstract
|
for
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/UnnecessarySetDefaultTest.java
|
{
"start": 2321,
"end": 3085
}
|
class ____ {
{
NullPointerTester tester = new NullPointerTester();
tester
.setDefault(ImmutableList.class, ImmutableList.of(42))
.setDefault(ImmutableList.class, ImmutableList.of(42));
}
}
""")
.doTest();
}
@Test
public void exhaustive() throws ReflectiveOperationException {
Field f = ArbitraryInstances.class.getDeclaredField("DEFAULTS");
f.setAccessible(true);
ClassToInstanceMap<?> actual = (ClassToInstanceMap<?>) f.get(null);
assertThat(UnnecessarySetDefault.DEFAULTS.keySet())
.containsAnyIn(
actual.keySet().stream().map(Class::getCanonicalName).collect(toImmutableList()));
}
}
|
Test
|
java
|
elastic__elasticsearch
|
x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java
|
{
"start": 4630,
"end": 31508
}
|
class ____ extends BaseFrozenSearchableSnapshotsIntegTestCase {
public void testCreateAndRestorePartialSearchableSnapshot() throws Exception {
final String fsRepoName = randomAlphaOfLength(10);
final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
final String aliasName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
final String restoredIndexName = randomBoolean() ? indexName : randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
final String snapshotName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
createRepository(
fsRepoName,
"fs",
Settings.builder().put("location", randomRepoPath()).put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
);
// Peer recovery always copies .liv files but we do not permit writing to searchable snapshot directories so this doesn't work, but
// we can bypass this by forcing soft deletes to be used. TODO this restriction can be lifted when #55142 is resolved.
final Settings.Builder originalIndexSettings = Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true);
if (randomBoolean()) {
// INDEX_CHECK_ON_STARTUP requires expensive processing due to verification the integrity of many important files during
// a shard recovery or relocation. Therefore, it takes lots of time for the files to clean up and the assertShardFolder
// check may not complete in 30s.
originalIndexSettings.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "false");
}
assertAcked(prepareCreate(indexName, originalIndexSettings));
assertAcked(indicesAdmin().prepareAliases(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).addAlias(indexName, aliasName));
populateIndex(indexName, 10_000);
final TotalHits originalAllHits;
var originalResponse = internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true).get();
try {
originalAllHits = originalResponse.getHits().getTotalHits();
} finally {
originalResponse.decRef();
}
final TotalHits originalBarHits;
var barResponse = internalCluster().client()
.prepareSearch(indexName)
.setTrackTotalHits(true)
.setQuery(matchQuery("foo", "bar"))
.get();
try {
originalBarHits = barResponse.getHits().getTotalHits();
} finally {
barResponse.decRef();
}
logger.info("--> [{}] in total, of which [{}] match the query", originalAllHits, originalBarHits);
expectThrows(
ResourceNotFoundException.class,
"Searchable snapshot stats on a non snapshot searchable index should fail",
() -> client().execute(SearchableSnapshotsStatsAction.INSTANCE, new SearchableSnapshotsStatsRequest()).actionGet()
);
final SnapshotInfo snapshotInfo = createFullSnapshot(fsRepoName, snapshotName);
ensureGreen(indexName);
assertShardFolders(indexName, false);
assertThat(
clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT)
.clear()
.setMetadata(true)
.setIndices(indexName)
.get()
.getState()
.metadata()
.getProject()
.index(indexName)
.getTimestampRange(),
sameInstance(IndexLongFieldRange.UNKNOWN)
);
final boolean deletedBeforeMount = randomBoolean();
if (deletedBeforeMount) {
assertAcked(indicesAdmin().prepareDelete(indexName));
} else {
assertAcked(indicesAdmin().prepareClose(indexName));
}
logger.info("--> restoring partial index [{}] with cache enabled", restoredIndexName);
Settings.Builder indexSettingsBuilder = Settings.builder().put(SearchableSnapshots.SNAPSHOT_CACHE_ENABLED_SETTING.getKey(), true);
if (randomBoolean()) {
var nonCachedExtensions = randomSubsetOf(Arrays.asList("fdt", "fdx", "nvd", "dvd", "tip", "cfs", "dim"));
indexSettingsBuilder.putList(SearchableSnapshots.SNAPSHOT_CACHE_EXCLUDED_FILE_TYPES_SETTING.getKey(), nonCachedExtensions);
}
if (randomBoolean()) {
indexSettingsBuilder.put(
SearchableSnapshots.SNAPSHOT_UNCACHED_CHUNK_SIZE_SETTING.getKey(),
ByteSizeValue.ofBytes(randomLongBetween(10, 100_000))
);
}
final int expectedReplicas;
if (randomBoolean()) {
expectedReplicas = numberOfReplicas();
indexSettingsBuilder.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, expectedReplicas);
} else {
expectedReplicas = 0;
}
final String indexCheckOnStartup;
if (randomBoolean()) {
indexCheckOnStartup = randomFrom("false", "true", "checksum");
indexSettingsBuilder.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), indexCheckOnStartup);
} else {
indexCheckOnStartup = "false";
}
final String expectedDataTiersPreference;
expectedDataTiersPreference = MountSearchableSnapshotRequest.Storage.SHARED_CACHE.defaultDataTiersPreference();
indexSettingsBuilder.put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.ZERO);
final AtomicBoolean statsWatcherRunning = new AtomicBoolean(true);
final Thread statsWatcher = new Thread(() -> {
while (statsWatcherRunning.get()) {
final IndicesStatsResponse indicesStatsResponse;
try {
indicesStatsResponse = indicesAdmin().prepareStats(restoredIndexName).clear().setStore(true).get();
} catch (IndexNotFoundException | IndexClosedException e) {
continue;
// ok
}
for (ShardStats shardStats : indicesStatsResponse.getShards()) {
StoreStats store = shardStats.getStats().getStore();
assertThat(shardStats.getShardRouting().toString(), store.reservedSizeInBytes(), equalTo(0L));
assertThat(shardStats.getShardRouting().toString(), store.sizeInBytes(), equalTo(0L));
}
if (indicesStatsResponse.getShards().length > 0) {
assertThat(indicesStatsResponse.getTotal().getStore().reservedSizeInBytes(), equalTo(0L));
assertThat(indicesStatsResponse.getTotal().getStore().sizeInBytes(), equalTo(0L));
}
}
}, "test-stats-watcher");
statsWatcher.start();
final MountSearchableSnapshotRequest req = new MountSearchableSnapshotRequest(
TEST_REQUEST_TIMEOUT,
restoredIndexName,
fsRepoName,
snapshotInfo.snapshotId().getName(),
indexName,
indexSettingsBuilder.build(),
Strings.EMPTY_ARRAY,
true,
MountSearchableSnapshotRequest.Storage.SHARED_CACHE
);
final RestoreSnapshotResponse restoreSnapshotResponse = client().execute(MountSearchableSnapshotAction.INSTANCE, req).get();
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
final Map<Integer, SnapshotIndexShardStatus> snapshotShards = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, fsRepoName)
.setSnapshots(snapshotInfo.snapshotId().getName())
.get()
.getSnapshots()
.get(0)
.getIndices()
.get(indexName)
.getShards();
ensureGreen(restoredIndexName);
final IndicesStatsResponse indicesStatsResponse = indicesAdmin().prepareStats(restoredIndexName).clear().setStore(true).get();
assertThat(indicesStatsResponse.getShards().length, greaterThan(0));
long totalExpectedSize = 0;
for (ShardStats shardStats : indicesStatsResponse.getShards()) {
StoreStats store = shardStats.getStats().getStore();
final ShardRouting shardRouting = shardStats.getShardRouting();
assertThat(shardRouting.toString(), store.reservedSizeInBytes(), equalTo(0L));
assertThat(shardRouting.toString(), store.sizeInBytes(), equalTo(0L));
// the original shard size from the snapshot
final long originalSize = snapshotShards.get(shardRouting.getId()).getStats().getTotalSize();
assertThat(shardRouting.toString(), store.totalDataSetSizeInBytes(), equalTo(originalSize));
totalExpectedSize += originalSize;
}
final StoreStats store = indicesStatsResponse.getTotal().getStore();
assertThat(store.totalDataSetSizeInBytes(), equalTo(totalExpectedSize));
statsWatcherRunning.set(false);
statsWatcher.join();
final Settings settings = indicesAdmin().prepareGetSettings(TEST_REQUEST_TIMEOUT, restoredIndexName)
.get()
.getIndexToSettings()
.get(restoredIndexName);
assertThat(SearchableSnapshots.SNAPSHOT_SNAPSHOT_NAME_SETTING.get(settings), equalTo(snapshotName));
assertThat(IndexModule.INDEX_STORE_TYPE_SETTING.get(settings), equalTo(SEARCHABLE_SNAPSHOT_STORE_TYPE));
assertThat(IndexModule.INDEX_RECOVERY_TYPE_SETTING.get(settings), equalTo(SNAPSHOT_RECOVERY_STATE_FACTORY_KEY));
assertTrue(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.get(settings));
assertTrue(SearchableSnapshots.SNAPSHOT_SNAPSHOT_ID_SETTING.exists(settings));
assertTrue(SearchableSnapshots.SNAPSHOT_INDEX_ID_SETTING.exists(settings));
assertThat(IndexMetadata.INDEX_AUTO_EXPAND_REPLICAS_SETTING.get(settings).toString(), equalTo("false"));
assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(settings), equalTo(expectedReplicas));
assertThat(DataTier.TIER_PREFERENCE_SETTING.get(settings), equalTo(expectedDataTiersPreference));
assertTrue(SearchableSnapshotsSettings.SNAPSHOT_PARTIAL_SETTING.get(settings));
assertTrue(DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS.get(settings));
assertThat(IndexSettings.INDEX_CHECK_ON_STARTUP.get(settings), equalTo(indexCheckOnStartup));
checkSoftDeletesNotEagerlyLoaded(restoredIndexName);
assertTotalHits(restoredIndexName, originalAllHits, originalBarHits);
assertRecoveryStats(restoredIndexName, false);
// TODO: fix
// assertSearchableSnapshotStats(restoredIndexName, true, nonCachedExtensions);
ensureGreen(restoredIndexName);
assertBusy(() -> assertShardFolders(restoredIndexName, true), 30, TimeUnit.SECONDS);
assertThat(
clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT)
.clear()
.setMetadata(true)
.setIndices(restoredIndexName)
.get()
.getState()
.metadata()
.getProject()
.index(restoredIndexName)
.getTimestampRange(),
sameInstance(IndexLongFieldRange.UNKNOWN)
);
if (deletedBeforeMount) {
assertThat(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, aliasName).get().getAliases().size(), equalTo(0));
assertAcked(indicesAdmin().prepareAliases(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).addAlias(restoredIndexName, aliasName));
} else if (indexName.equals(restoredIndexName) == false) {
assertThat(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, aliasName).get().getAliases().size(), equalTo(1));
assertAcked(
indicesAdmin().prepareAliases(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT)
.addAliasAction(IndicesAliasesRequest.AliasActions.remove().index(indexName).alias(aliasName).mustExist(true))
.addAlias(restoredIndexName, aliasName)
);
}
assertThat(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, aliasName).get().getAliases().size(), equalTo(1));
assertTotalHits(aliasName, originalAllHits, originalBarHits);
final var request = new ClusterAllocationExplainRequest(TEST_REQUEST_TIMEOUT).setIndex(restoredIndexName)
.setShard(0)
.setPrimary(true);
request.includeYesDecisions(true);
final var diskDeciderDecision = safeGet(client().execute(TransportClusterAllocationExplainAction.TYPE, request)).getExplanation()
.getShardAllocationDecision()
.getMoveDecision()
.getCanRemainDecision()
.getDecisions()
.stream()
.filter(d -> d.label().equals(DiskThresholdDecider.NAME))
.findFirst()
.orElseThrow();
assertThat(diskDeciderDecision.type(), equalTo(Decision.Type.YES));
assertThat(
diskDeciderDecision.getExplanation(),
oneOf("disk watermarks are ignored on this index", "there is only a single data node present")
);
internalCluster().fullRestart();
assertTotalHits(restoredIndexName, originalAllHits, originalBarHits);
assertRecoveryStats(restoredIndexName, false);
assertTotalHits(aliasName, originalAllHits, originalBarHits);
// TODO: fix
// assertSearchableSnapshotStats(restoredIndexName, false, nonCachedExtensions);
internalCluster().ensureAtLeastNumDataNodes(2);
final DiscoveryNode dataNode = randomFrom(
clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getDataNodes().values()
);
updateIndexSettings(
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(
IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(),
dataNode.getName()
),
restoredIndexName
);
assertFalse(
clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, restoredIndexName)
.setWaitForNoRelocatingShards(true)
.setWaitForEvents(Priority.LANGUID)
.get()
.isTimedOut()
);
assertTotalHits(restoredIndexName, originalAllHits, originalBarHits);
assertRecoveryStats(restoredIndexName, false);
// TODO: fix
// assertSearchableSnapshotStats(restoredIndexName, false, nonCachedExtensions);
updateIndexSettings(
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
.putNull(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey()),
restoredIndexName
);
assertTotalHits(restoredIndexName, originalAllHits, originalBarHits);
assertRecoveryStats(restoredIndexName, false);
final String clonedIndexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
assertAcked(
executeResize(
ResizeType.CLONE,
restoredIndexName,
clonedIndexName,
Settings.builder()
.putNull(IndexModule.INDEX_STORE_TYPE_SETTING.getKey())
.putNull(IndexModule.INDEX_RECOVERY_TYPE_SETTING.getKey())
.put(DataTier.TIER_PREFERENCE, DataTier.DATA_HOT)
)
);
ensureGreen(clonedIndexName);
assertTotalHits(clonedIndexName, originalAllHits, originalBarHits);
final Settings clonedIndexSettings = indicesAdmin().prepareGetSettings(TEST_REQUEST_TIMEOUT, clonedIndexName)
.get()
.getIndexToSettings()
.get(clonedIndexName);
assertFalse(clonedIndexSettings.hasValue(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()));
assertFalse(clonedIndexSettings.hasValue(SearchableSnapshots.SNAPSHOT_SNAPSHOT_NAME_SETTING.getKey()));
assertFalse(clonedIndexSettings.hasValue(SearchableSnapshots.SNAPSHOT_SNAPSHOT_ID_SETTING.getKey()));
assertFalse(clonedIndexSettings.hasValue(SearchableSnapshots.SNAPSHOT_INDEX_ID_SETTING.getKey()));
assertFalse(clonedIndexSettings.hasValue(IndexModule.INDEX_RECOVERY_TYPE_SETTING.getKey()));
assertAcked(indicesAdmin().prepareDelete(restoredIndexName));
assertThat(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, aliasName).get().getAliases().size(), equalTo(0));
assertAcked(indicesAdmin().prepareAliases(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).addAlias(clonedIndexName, aliasName));
assertTotalHits(aliasName, originalAllHits, originalBarHits);
}
public void testRequestCacheOnFrozen() throws Exception {
assertAcked(
indicesAdmin().prepareCreate("test-index")
.setMapping("f", "type=date")
.setSettings(indexSettings(1, 0).put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true))
);
indexRandom(
true,
prepareIndex("test-index").setSource("f", "2014-03-10T00:00:00.000Z"),
prepareIndex("test-index").setSource("f", "2014-05-13T00:00:00.000Z")
);
ensureSearchable("test-index");
createRepository("repo", "fs", Settings.builder().put("location", randomRepoPath()));
createFullSnapshot("repo", "snap");
assertAcked(indicesAdmin().prepareDelete("test-index"));
logger.info("--> restoring index [{}]", "test-index");
Settings.Builder indexSettingsBuilder = Settings.builder().put(SearchableSnapshots.SNAPSHOT_CACHE_ENABLED_SETTING.getKey(), true);
final MountSearchableSnapshotRequest req = new MountSearchableSnapshotRequest(
TEST_REQUEST_TIMEOUT,
"test-index",
"repo",
"snap",
"test-index",
indexSettingsBuilder.build(),
Strings.EMPTY_ARRAY,
true,
MountSearchableSnapshotRequest.Storage.SHARED_CACHE
);
final RestoreSnapshotResponse restoreSnapshotResponse = client().execute(MountSearchableSnapshotAction.INSTANCE, req).get();
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
ensureSearchable("test-index");
// use a fixed client for the searches, as clients randomize timeouts, which leads to different cache entries
Client client = client();
assertNoFailuresAndResponse(
client.prepareSearch("test-index")
.setSize(0)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.addAggregation(
dateHistogram("histo").field("f")
.timeZone(ZoneId.of("+01:00"))
.minDocCount(0)
.calendarInterval(DateHistogramInterval.MONTH)
),
r1 -> {
assertRequestCacheState(client(), "test-index", 0, 1);
// The cached is actually used
assertThat(
indicesAdmin().prepareStats("test-index")
.setRequestCache(true)
.get()
.getTotal()
.getRequestCache()
.getMemorySizeInBytes(),
greaterThan(0L)
);
for (int i = 0; i < 10; ++i) {
final int idx = i;
assertNoFailuresAndResponse(
client.prepareSearch("test-index")
.setSize(0)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.addAggregation(
dateHistogram("histo").field("f")
.timeZone(ZoneId.of("+01:00"))
.minDocCount(0)
.calendarInterval(DateHistogramInterval.MONTH)
),
r2 -> {
assertRequestCacheState(client(), "test-index", idx + 1, 1);
Histogram h1 = r1.getAggregations().get("histo");
Histogram h2 = r2.getAggregations().get("histo");
final List<? extends Histogram.Bucket> buckets1 = h1.getBuckets();
final List<? extends Histogram.Bucket> buckets2 = h2.getBuckets();
assertEquals(buckets1.size(), buckets2.size());
for (int j = 0; j < buckets1.size(); ++j) {
final Histogram.Bucket b1 = buckets1.get(j);
final Histogram.Bucket b2 = buckets2.get(j);
assertEquals(b1.getKey(), b2.getKey());
assertEquals(b1.getDocCount(), b2.getDocCount());
}
}
);
}
}
);
// shut down shard and check that cache entries are actually removed
indicesAdmin().prepareClose("test-index").get();
ensureGreen("test-index");
for (IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) {
IndicesRequestCache indicesRequestCache = IndicesRequestCacheUtils.getRequestCache(indicesService);
IndicesRequestCacheUtils.cleanCache(indicesRequestCache);
for (String key : IndicesRequestCacheUtils.cachedKeys(indicesRequestCache)) {
assertThat(key, not(containsString("test-index")));
}
}
}
public void testRefreshPartiallyMountedIndex() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
final var index = "index";
createIndex(index, 1, 0);
populateIndex(index, 1_000);
final var repository = "repository";
createRepository(repository, FsRepository.TYPE, Settings.builder().put("location", randomRepoPath()));
final var snapshot = "repository";
createFullSnapshot(repository, snapshot);
assertAcked(indicesAdmin().prepareDelete(index));
final var partialIndex = "partial-" + index;
mountSnapshot(
repository,
snapshot,
index,
partialIndex,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, randomInt(1)).build(),
MountSearchableSnapshotRequest.Storage.SHARED_CACHE
);
ensureGreen(partialIndex);
// before the fix this would have failed
var refreshResult = indicesAdmin().prepareRefresh(partialIndex).execute().actionGet();
assertNoFailures(refreshResult);
}
public void testTierPreferenceCannotBeRemovedForFrozenIndex() throws Exception {
final String fsRepoName = randomAlphaOfLength(10);
final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
final String aliasName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
final String restoredIndexName = randomBoolean() ? indexName : randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
final String snapshotName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
createRepository(fsRepoName, FsRepository.TYPE);
final Settings.Builder originalIndexSettings = Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true);
assertAcked(prepareCreate(indexName, originalIndexSettings));
assertAcked(indicesAdmin().prepareAliases(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).addAlias(indexName, aliasName));
populateIndex(indexName, 100);
final SnapshotInfo snapshotInfo = createFullSnapshot(fsRepoName, snapshotName);
ensureGreen(indexName);
assertShardFolders(indexName, false);
assertAcked(indicesAdmin().prepareClose(indexName));
logger.info("--> restoring partial index [{}] with cache enabled", restoredIndexName);
Settings.Builder indexSettingsBuilder = Settings.builder().put(SearchableSnapshots.SNAPSHOT_CACHE_ENABLED_SETTING.getKey(), true);
indexSettingsBuilder.put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.ZERO)
.putNull(DataTier.TIER_PREFERENCE);
final MountSearchableSnapshotRequest req = new MountSearchableSnapshotRequest(
TEST_REQUEST_TIMEOUT,
restoredIndexName,
fsRepoName,
snapshotInfo.snapshotId().getName(),
indexName,
indexSettingsBuilder.build(),
Strings.EMPTY_ARRAY,
true,
MountSearchableSnapshotRequest.Storage.SHARED_CACHE
);
final RestoreSnapshotResponse restoreSnapshotResponse = client().execute(MountSearchableSnapshotAction.INSTANCE, req).get();
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
ensureGreen(restoredIndexName);
UpdateSettingsRequestBuilder settingsRequest = indicesAdmin().prepareUpdateSettings(restoredIndexName);
settingsRequest.setSettings(Settings.builder().putNull(DataTier.TIER_PREFERENCE));
indicesAdmin().updateSettings(settingsRequest.request()).actionGet();
// we're expecting the tier preference to not be explicitly set in the settings (as we nullified it) but
// the index to still have the default value of `data_frozen`
GetSettingsResponse getSettingsResponse = indicesAdmin().prepareGetSettings(TEST_REQUEST_TIMEOUT, restoredIndexName).get();
final Settings settings = getSettingsResponse.getIndexToSettings().get(restoredIndexName);
assertThat(settings.get(DataTier.TIER_PREFERENCE), nullValue());
assertThat(DataTier.TIER_PREFERENCE_SETTING.get(settings), is("data_frozen"));
}
private static void assertRequestCacheState(Client client, String index, long expectedHits, long expectedMisses) {
RequestCacheStats requestCacheStats = client.admin()
.indices()
.prepareStats(index)
.setRequestCache(true)
.get()
.getTotal()
.getRequestCache();
// Check the hit count and miss count together so if they are not
// correct we can see both values
assertEquals(
Arrays.asList(expectedHits, expectedMisses, 0L),
Arrays.asList(requestCacheStats.getHitCount(), requestCacheStats.getMissCount(), requestCacheStats.getEvictions())
);
}
}
|
FrozenSearchableSnapshotsIntegTests
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/window/groupwindow/triggers/ProcessingTimeTriggers.java
|
{
"start": 2145,
"end": 2272
}
|
class ____ {
private static final String TO_STRING = "ProcessingTime.afterEndOfWindow()";
/** This
|
ProcessingTimeTriggers
|
java
|
quarkusio__quarkus
|
test-framework/junit5-component/src/test/java/io/quarkus/test/component/InjectSpyFailureTest.java
|
{
"start": 292,
"end": 752
}
|
class ____ {
@RegisterExtension
static final QuarkusComponentTestExtension extension = QuarkusComponentTestExtension.builder()
.addComponentClasses(Delta.class)
.buildShouldFail()
.build();
@InjectSpy
Delta delta;
@Test
public void testStartFailure() {
Throwable failure = extension.getBuildFailure();
assertTrue(failure instanceof IllegalStateException);
}
}
|
InjectSpyFailureTest
|
java
|
grpc__grpc-java
|
api/src/context/java/io/grpc/Context.java
|
{
"start": 24484,
"end": 25908
}
|
class ____ extends Context implements Closeable {
private final Deadline deadline;
private final Context uncancellableSurrogate;
private ArrayList<ExecutableListener> listeners;
// parentListener is initialized when listeners is initialized (only if there is a
// cancellable ancestor), and uninitialized when listeners is uninitialized.
private CancellationListener parentListener;
private Throwable cancellationCause;
private ScheduledFuture<?> pendingDeadline;
private boolean cancelled;
/**
* Create a cancellable context that does not have a deadline.
*/
private CancellableContext(Context parent) {
super(parent, parent.keyValueEntries);
deadline = parent.getDeadline();
// Create a surrogate that inherits from this to attach so that you cannot retrieve a
// cancellable context from Context.current()
uncancellableSurrogate = new Context(this, keyValueEntries);
}
/**
* Create a cancellable context that has a deadline.
*/
private CancellableContext(Context parent, Deadline deadline) {
super(parent, parent.keyValueEntries);
this.deadline = deadline;
this.uncancellableSurrogate = new Context(this, keyValueEntries);
}
private void setUpDeadlineCancellation(Deadline deadline, ScheduledExecutorService scheduler) {
if (!deadline.isExpired()) {
final
|
CancellableContext
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/InnerConsumer.java
|
{
"start": 907,
"end": 978
}
|
interface ____<I>
extends CoreSubscriber<I>, Scannable {
}
|
InnerConsumer
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java
|
{
"start": 1448,
"end": 7747
}
|
class ____ {
static final String SUCCESS = "SUCCESS";
static final String FAILURE = "FAILURE";
static final String KEY_VAL_SEPARATOR = "=";
static final char PAIR_SEPARATOR = '\t';
// Some commonly used descriptions
public static final String START_CONTAINER = "Start Container Request";
public static final String STOP_CONTAINER = "Stop Container Request";
public static final String START_CONTAINER_REINIT =
"Container ReInitialization - Started";
public static final String FINISH_CONTAINER_REINIT =
"Container ReInitialization - Finished";
public static final String FINISH_SUCCESS_CONTAINER = "Container Finished - Succeeded";
public static final String FINISH_FAILED_CONTAINER = "Container Finished - Failed";
public static final String FINISH_KILLED_CONTAINER = "Container Finished - Killed";
}
/**
* A helper api for creating an audit log for a successful event.
*/
static String createSuccessLog(String user, String operation, String target,
ApplicationId appId, ContainerId containerId) {
StringBuilder b = new StringBuilder();
start(Keys.USER, user, b);
addRemoteIP(b);
add(Keys.OPERATION, operation, b);
add(Keys.TARGET, target ,b);
add(Keys.RESULT, AuditConstants.SUCCESS, b);
if (appId != null) {
add(Keys.APPID, appId.toString(), b);
}
if (containerId != null) {
add(Keys.CONTAINERID, containerId.toString(), b);
}
return b.toString();
}
/**
* Create a readable and parseable audit log string for a successful event.
*
* @param user User who made the service request.
* @param operation Operation requested by the user
* @param target The target on which the operation is being performed.
* @param appId Application Id in which operation was performed.
* @param containerId Container Id in which operation was performed.
*
* <br><br>
* Note that the {@link NMAuditLogger} uses tabs ('\t') as a key-val delimiter
* and hence the value fields should not contains tabs ('\t').
*/
public static void logSuccess(String user, String operation, String target,
ApplicationId appId, ContainerId containerId) {
if (LOG.isInfoEnabled()) {
LOG.info(createSuccessLog(user, operation, target, appId, containerId));
}
}
/**
* Create a readable and parseable audit log string for a successful event.
*
* @param user User who made the service request.
* @param operation Operation requested by the user
* @param target The target on which the operation is being performed.
*
* <br><br>
* Note that the {@link NMAuditLogger} uses tabs ('\t') as a key-val delimiter
* and hence the value fields should not contains tabs ('\t').
*/
public static void logSuccess(String user, String operation, String target) {
if (LOG.isInfoEnabled()) {
LOG.info(createSuccessLog(user, operation, target, null, null));
}
}
/**
* A helper api for creating an audit log for a failure event.
* This is factored out for testing purpose.
*/
static String createFailureLog(String user, String operation, String target,
String description, ApplicationId appId, ContainerId containerId) {
StringBuilder b = new StringBuilder();
start(Keys.USER, user, b);
addRemoteIP(b);
add(Keys.OPERATION, operation, b);
add(Keys.TARGET, target ,b);
add(Keys.RESULT, AuditConstants.FAILURE, b);
add(Keys.DESCRIPTION, description, b);
if (appId != null) {
add(Keys.APPID, appId.toString(), b);
}
if (containerId != null) {
add(Keys.CONTAINERID, containerId.toString(), b);
}
return b.toString();
}
/**
* Create a readable and parseable audit log string for a failed event.
*
* @param user User who made the service request.
* @param operation Operation requested by the user.
* @param target The target on which the operation is being performed.
* @param description Some additional information as to why the operation
* failed.
* @param appId ApplicationId in which operation was performed.
* @param containerId Container Id in which operation was performed.
*
* <br><br>
* Note that the {@link NMAuditLogger} uses tabs ('\t') as a key-val delimiter
* and hence the value fields should not contains tabs ('\t').
*/
public static void logFailure(String user, String operation, String target,
String description, ApplicationId appId, ContainerId containerId) {
if (LOG.isWarnEnabled()) {
LOG.warn(createFailureLog(user, operation, target, description, appId, containerId));
}
}
/**
* Create a readable and parseable audit log string for a failed event.
*
* @param user User who made the service request.
* @param operation Operation requested by the user.
* @param target The target on which the operation is being performed.
* @param description Some additional information as to why the operation
* failed.
*
* <br><br>
* Note that the {@link NMAuditLogger} uses tabs ('\t') as a key-val delimiter
* and hence the value fields should not contains tabs ('\t').
*/
public static void logFailure(String user, String operation,
String target, String description) {
if (LOG.isWarnEnabled()) {
LOG.warn(createFailureLog(user, operation, target, description, null, null));
}
}
/**
* A helper api to add remote IP address
*/
static void addRemoteIP(StringBuilder b) {
InetAddress ip = Server.getRemoteIp();
// ip address can be null for testcases
if (ip != null) {
add(Keys.IP, ip.getHostAddress(), b);
}
}
/**
* Adds the first key-val pair to the passed builder in the following format
* key=value
*/
static void start(Keys key, String value, StringBuilder b) {
b.append(key.name()).append(AuditConstants.KEY_VAL_SEPARATOR).append(value);
}
/**
* Appends the key-val pair to the passed builder in the following format
* <pair-delim>key=value
*/
static void add(Keys key, String value, StringBuilder b) {
b.append(AuditConstants.PAIR_SEPARATOR).append(key.name())
.append(AuditConstants.KEY_VAL_SEPARATOR).append(value);
}
}
|
AuditConstants
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/builder/BuilderWithUnwrappedTest.java
|
{
"start": 2627,
"end": 3255
}
|
class ____ {
private final long id;
private final Name name;
private final int age;
private final boolean alive;
Animal(Builder builder) {
id = builder.id;
name = builder.name;
age = builder.age;
alive = builder.alive;
}
long getId() {
return id;
}
Name getName() {
return name;
}
int getAge() {
return age;
}
boolean isAlive() {
return alive;
}
@JsonPOJOBuilder(withPrefix = "set")
final static
|
Animal
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jackson/src/test/java/org/springframework/boot/jackson/autoconfigure/JacksonAutoConfigurationTests.java
|
{
"start": 35088,
"end": 35681
}
|
class ____ {
@Bean
JacksonModule jacksonModule() {
SimpleModule module = new SimpleModule();
module.addSerializer(Foo.class, new ValueSerializer<>() {
@Override
public void serialize(Foo value, JsonGenerator jgen, SerializationContext context) {
jgen.writeStartObject();
jgen.writeStringProperty("foo", "bar");
jgen.writeEndObject();
}
});
return module;
}
@Bean
@Primary
JsonMapper jsonMapper() {
return JsonMapper.builder().addModule(jacksonModule()).build();
}
}
@Configuration(proxyBeanMethods = false)
static
|
DoubleModulesConfig
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/api/recursive/comparison/AbstractRecursiveComparisonIntrospectionStrategy.java
|
{
"start": 1119,
"end": 2719
}
|
class ____ implements RecursiveComparisonIntrospectionStrategy {
private boolean ignoreTransientFields = false;
public void ignoreTransientFields() {
ignoreTransientFields = true;
}
@Override
public boolean shouldIgnoreTransientFields() {
return ignoreTransientFields;
}
private Set<Field> getDeclaredFieldsIncludingInherited(Class<?> clazz) {
requireNonNull(clazz, "expecting Class parameter not to be null");
Set<Field> declaredFields = getDeclaredFields(clazz);
// get fields declared in superClass
Class<?> superClass = clazz.getSuperclass();
while (!isInJavaLangPackage(superClass)) {
declaredFields.addAll(getDeclaredFields(superClass));
superClass = superClass.getSuperclass();
}
return declaredFields;
}
protected Set<String> getFieldsNames(Class<?> clazz) {
return getDeclaredFieldsIncludingInherited(clazz).stream()
.map(Field::getName)
.collect(toSet());
}
private Set<Field> getDeclaredFields(Class<?> clazz) {
Field[] declaredFields = clazz.getDeclaredFields();
Predicate<Field> fieldPredicate = field -> !(field.isSynthetic() || Modifier.isStatic(field.getModifiers()));
if (ignoreTransientFields) {
fieldPredicate = fieldPredicate.and(field -> !Modifier.isTransient(field.getModifiers()));
}
return stream(declaredFields).filter(fieldPredicate)
.collect(toCollection(LinkedHashSet::new));
}
}
|
AbstractRecursiveComparisonIntrospectionStrategy
|
java
|
google__auto
|
value/src/test/java/com/google/auto/value/extension/toprettystring/ToPrettyStringTest.java
|
{
"start": 23532,
"end": 23962
}
|
class ____ {
abstract int i();
@ToPrettyString
abstract String customName();
}
@Test
public void withCustomName() {
WithCustomName valueType = new AutoValue_ToPrettyStringTest_WithCustomName(1);
assertThat(valueType.customName())
.isEqualTo(
"WithCustomName {" // force newline
+ "\n i = 1,"
+ "\n}");
}
@AutoValue
abstract static
|
WithCustomName
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/context/config/ConfigDataLocationResolversTests.java
|
{
"start": 10296,
"end": 10610
}
|
class ____ extends TestResolver {
private final DeferredLogFactory deferredLogFactory;
TestLogResolver(DeferredLogFactory deferredLogFactory) {
this.deferredLogFactory = deferredLogFactory;
}
DeferredLogFactory getDeferredLogFactory() {
return this.deferredLogFactory;
}
}
static
|
TestLogResolver
|
java
|
google__error-prone
|
test_helpers/src/test/java/com/google/errorprone/CompilationTestHelperTest.java
|
{
"start": 1523,
"end": 1821
}
|
class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(ReturnTreeChecker.class, getClass());
@Test
public void fileWithNoBugMarkersAndNoErrorsShouldPass() {
compilationHelper.addSourceLines("Test.java", "public
|
CompilationTestHelperTest
|
java
|
apache__maven
|
impl/maven-di/src/test/java/org/apache/maven/di/impl/TypeUtilsTest.java
|
{
"start": 7222,
"end": 7273
}
|
class ____<A, B, C, D, E, F, G, H, I> {}
|
TestClass
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/ExtensionLoader.java
|
{
"start": 4768,
"end": 4885
}
|
class ____ load build steps, runtime recorders, and configuration roots from a given extension class.
*/
public final
|
to
|
java
|
quarkusio__quarkus
|
extensions/arc/deployment/src/test/java/io/quarkus/arc/test/profile/IfBuildProfileAllAnyTest.java
|
{
"start": 6024,
"end": 6337
}
|
class ____ implements IfBuildProfileBean {
@Override
public String profile() {
return "allOf-test-build,anyOf-any";
}
}
// Active
@ApplicationScoped
@IfBuildProfile(allOf = { "test", "build" }, anyOf = { "any", "dev" })
public static
|
AllOfTestBuildAnyOfAnyBean
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/atomic/longadder/LongAdderAssert_isNotCloseTo_with_Long_and_Offset_Test.java
|
{
"start": 1010,
"end": 1482
}
|
class ____ extends LongAdderAssertBaseTest {
private final long value = 10L;
private final Offset<Long> offset = offset(5L);
@Override
protected LongAdderAssert invoke_api_method() {
return assertions.isNotCloseTo(value, offset);
}
@Override
protected void verify_internal_effects() {
verify(longs).assertIsNotCloseTo(getInfo(assertions), getActual(assertions).longValue(), value, offset);
}
}
|
LongAdderAssert_isNotCloseTo_with_Long_and_Offset_Test
|
java
|
apache__maven
|
api/maven-api-cli/src/main/java/org/apache/maven/api/cli/mvn/MavenOptions.java
|
{
"start": 1516,
"end": 8152
}
|
interface ____ extends Options {
/**
* Returns the path to an alternate POM file.
*
* @return an {@link Optional} containing the path to the alternate POM file, or empty if not specified
*/
@Nonnull
Optional<String> alternatePomFile();
/**
* Indicates whether Maven should operate in non-recursive mode (i.e., not build child modules).
*
* @return an {@link Optional} containing true if non-recursive mode is enabled, false if disabled, or empty if not specified
*/
@Nonnull
Optional<Boolean> nonRecursive();
/**
* Indicates whether Maven should force a check for updated snapshots on remote repositories.
*
* @return an {@link Optional} containing true if snapshot updates should be forced, false if not, or empty if not specified
*/
@Nonnull
Optional<Boolean> updateSnapshots();
/**
* Returns the list of profiles to activate.
*
* @return an {@link Optional} containing the list of profile names to activate, or empty if not specified
*/
@Nonnull
Optional<List<String>> activatedProfiles();
/**
* Indicates whether Maven should suppress SNAPSHOT updates.
*
* @return an {@link Optional} containing true if SNAPSHOT updates should be suppressed, false if not, or empty if not specified
*/
@Nonnull
Optional<Boolean> suppressSnapshotUpdates();
/**
* Indicates whether Maven should use strict checksum verification.
*
* @return an {@link Optional} containing true if strict checksum verification is enabled, false if not, or empty if not specified
*/
@Nonnull
Optional<Boolean> strictChecksums();
/**
* Indicates whether Maven should use relaxed checksum verification.
*
* @return an {@link Optional} containing true if relaxed checksum verification is enabled, false if not, or empty if not specified
*/
@Nonnull
Optional<Boolean> relaxedChecksums();
/**
* Indicates whether Maven should stop at the first failure in a multi-module build.
*
* @return an {@link Optional} containing true if Maven should stop at the first failure, false if not, or empty if not specified
*/
@Nonnull
Optional<Boolean> failFast();
/**
* Indicates whether Maven should run all builds but defer error reporting to the end.
*
* @return an {@link Optional} containing true if error reporting should be deferred to the end, false if not, or empty if not specified
*/
@Nonnull
Optional<Boolean> failAtEnd();
/**
* Indicates whether Maven should never fail the build, regardless of project result.
*
* @return an {@link Optional} containing true if the build should never fail, false if it should fail normally, or empty if not specified
*/
@Nonnull
Optional<Boolean> failNever();
/**
* Indicates whether Maven should resume from the last failed project in a previous build.
*
* @return an {@link Optional} containing true if Maven should resume from the last failure, false if not, or empty if not specified
*/
@Nonnull
Optional<Boolean> resume();
/**
* Returns the project to resume the build from.
*
* @return an {@link Optional} containing the project name to resume from, or empty if not specified
*/
@Nonnull
Optional<String> resumeFrom();
/**
* Returns the list of specified reactor projects to build instead of all projects.
*
* @return an {@link Optional} containing the list of project names to build, or empty if not specified
*/
@Nonnull
Optional<List<String>> projects();
/**
* Indicates whether Maven should also build the specified projects' dependencies.
*
* @return an {@link Optional} containing true if dependencies should also be built, false if not, or empty if not specified
*/
@Nonnull
Optional<Boolean> alsoMake();
/**
* Indicates whether Maven should also build the specified projects' dependents.
*
* @return an {@link Optional} containing true if dependents should also be built, false if not, or empty if not specified
*/
@Nonnull
Optional<Boolean> alsoMakeDependents();
/**
* Returns the number of threads used for parallel builds.
*
* @return an {@link Optional} containing the number of threads (or "1C" for one thread per CPU core), or empty if not specified
*/
@Nonnull
Optional<String> threads();
/**
* Returns the id of the build strategy to use.
*
* @return an {@link Optional} containing the id of the build strategy, or empty if not specified
*/
@Nonnull
Optional<String> builder();
/**
* Indicates whether Maven should not display transfer progress when downloading or uploading.
*
* @return an {@link Optional} containing true if transfer progress should not be displayed, false if it should, or empty if not specified
*/
@Nonnull
Optional<Boolean> noTransferProgress();
/**
* Indicates whether Maven should cache the "not found" status of artifacts that were not found in remote repositories.
*
* @return an {@link Optional} containing true if "not found" status should be cached, false if not, or empty if not specified
*/
@Nonnull
Optional<Boolean> cacheArtifactNotFound();
/**
* Indicates whether Maven should use strict artifact descriptor policy.
*
* @return an {@link Optional} containing true if strict artifact descriptor policy should be used, false if not, or empty if not specified
*/
@Nonnull
Optional<Boolean> strictArtifactDescriptorPolicy();
/**
* Indicates whether Maven should ignore transitive repositories.
*
* @return an {@link Optional} containing true if transitive repositories should be ignored, false if not, or empty if not specified
*/
@Nonnull
Optional<Boolean> ignoreTransitiveRepositories();
/**
* Specifies "@file"-like file, to load up command line from. It may contain goals as well. Format is one parameter
* per line (similar to {@code maven.conf}) and {@code '#'} (hash) marked comment lines are allowed. Goals, if
* present, are appended, to those specified on CLI input, if any.
*/
Optional<String> atFile();
/**
* Returns the list of goals and phases to execute.
*
* @return an {@link Optional} containing the list of goals and phases to execute, or empty if not specified
*/
@Nonnull
Optional<List<String>> goals();
}
|
MavenOptions
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java
|
{
"start": 2991,
"end": 43568
}
|
class ____ implements Iterable<RoutingNode> {
private static final Logger logger = LogManager.getLogger(RoutingNodes.class);
public static final String RESET_FAILED_ALLOCATION_COUNTER_LOG_MSG =
"Resetting failure counter for %d shard(s) that have reached their max allocation retires (%s)";
public static final String RESET_FAILED_RELOCATION_COUNTER_LOG_MSG =
"Resetting failure counter for %d shard(s) that have reached their max relocation retries (%s)";
private static final int MAX_SHARDS_IN_LOG_MSG = 20;
private final Map<String, RoutingNode> nodesToShards;
private final UnassignedShards unassignedShards;
private final Map<ShardId, List<ShardRouting>> assignedShards;
private final boolean readOnly;
private int inactivePrimaryCount = 0;
private int inactiveShardCount = 0;
private int relocatingShards = 0;
private int relocatingFrozenShards = 0;
private final Map<String, Set<String>> attributeValuesByAttribute;
private final Map<String, Recoveries> recoveriesPerNode;
private Map<DiscoveryNode, DesiredBalanceMetrics.NodeWeightStats> balanceWeightStatsPerNode;
/**
* Creates an immutable instance from the {@link RoutingTable} and {@link DiscoveryNodes} found in a cluster state. Used to initialize
* the routing nodes in {@link ClusterState#getRoutingNodes()}. This method should not be used directly, use
* {@link ClusterState#getRoutingNodes()} instead.
*/
public static RoutingNodes immutable(GlobalRoutingTable routingTable, DiscoveryNodes discoveryNodes) {
return new RoutingNodes(routingTable, discoveryNodes, true);
}
public static RoutingNodes mutable(GlobalRoutingTable routingTable, DiscoveryNodes discoveryNodes) {
return new RoutingNodes(routingTable, discoveryNodes, false);
}
public void setBalanceWeightStatsPerNode(Map<DiscoveryNode, DesiredBalanceMetrics.NodeWeightStats> weightStatsPerNode) {
this.balanceWeightStatsPerNode = weightStatsPerNode;
}
public Map<DiscoveryNode, DesiredBalanceMetrics.NodeWeightStats> getBalanceWeightStatsPerNode() {
return balanceWeightStatsPerNode;
}
private RoutingNodes(GlobalRoutingTable routingTable, DiscoveryNodes discoveryNodes, boolean readOnly) {
this.readOnly = readOnly;
this.recoveriesPerNode = new HashMap<>();
final int indexCount = routingTable.totalIndexCount();
this.assignedShards = Maps.newMapWithExpectedSize(indexCount);
this.unassignedShards = new UnassignedShards(this);
this.attributeValuesByAttribute = Collections.synchronizedMap(new HashMap<>());
balanceWeightStatsPerNode = Maps.newMapWithExpectedSize(discoveryNodes.getDataNodes().size());
nodesToShards = Maps.newMapWithExpectedSize(discoveryNodes.getDataNodes().size());
// fill in the nodeToShards with the "live" nodes
var dataNodes = discoveryNodes.getDataNodes().keySet();
// best guess for the number of shards per data node
final int sizeGuess = dataNodes.isEmpty() ? indexCount : 2 * indexCount / dataNodes.size();
for (var node : discoveryNodes.getDataNodes().keySet()) {
nodesToShards.put(node, new RoutingNode(node, discoveryNodes.get(node), sizeGuess));
}
// fill in the inverse of node -> shards allocated
// also fill replicaSet information
final Function<String, RoutingNode> createRoutingNode = k -> new RoutingNode(k, discoveryNodes.get(k), sizeGuess);
for (RoutingTable projectRouting : routingTable) {
for (IndexRoutingTable indexRoutingTable : projectRouting.indicesRouting().values()) {
for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) {
IndexShardRoutingTable indexShard = indexRoutingTable.shard(shardId);
assert indexShard.primary != null;
for (int copy = 0; copy < indexShard.size(); copy++) {
final ShardRouting shard = indexShard.shard(copy);
// to get all the shards belonging to an index, including the replicas,
// we define a replica set and keep track of it. A replica set is identified
// by the ShardId, as this is common for primary and replicas.
// A replica Set might have one (and not more) replicas with the state of RELOCATING.
if (shard.assignedToNode()) {
// LinkedHashMap to preserve order
nodesToShards.computeIfAbsent(shard.currentNodeId(), createRoutingNode).addWithoutValidation(shard);
assignedShardsAdd(shard);
if (shard.relocating()) {
relocatingShards++;
if (isDedicatedFrozenNode(shard.currentNodeId())) {
relocatingFrozenShards++;
}
ShardRouting targetShardRouting = shard.getTargetRelocatingShard();
addInitialRecovery(targetShardRouting, indexShard.primary);
// LinkedHashMap to preserve order.
// Add the counterpart shard with relocatingNodeId reflecting the source from which it's relocating from.
nodesToShards.computeIfAbsent(shard.relocatingNodeId(), createRoutingNode)
.addWithoutValidation(targetShardRouting);
assignedShardsAdd(targetShardRouting);
} else if (shard.initializing()) {
if (shard.primary()) {
inactivePrimaryCount++;
}
inactiveShardCount++;
addInitialRecovery(shard, indexShard.primary);
}
} else {
unassignedShards.add(shard);
}
}
}
}
}
assert invariant();
}
private boolean invariant() {
nodesToShards.values().forEach(RoutingNode::invariant);
return true;
}
private RoutingNodes(RoutingNodes routingNodes) {
// we should not call this on mutable instances, it's still expensive to create the copy and callers should instead mutate a single
// instance
assert routingNodes.readOnly : "tried to create a mutable copy from a mutable instance";
this.readOnly = false;
this.nodesToShards = Maps.copyOf(routingNodes.nodesToShards, RoutingNode::copy);
this.assignedShards = Maps.copyOf(routingNodes.assignedShards, ArrayList::new);
this.unassignedShards = routingNodes.unassignedShards.copyFor(this);
this.inactivePrimaryCount = routingNodes.inactivePrimaryCount;
this.inactiveShardCount = routingNodes.inactiveShardCount;
this.relocatingShards = routingNodes.relocatingShards;
this.relocatingFrozenShards = routingNodes.relocatingFrozenShards;
this.attributeValuesByAttribute = Collections.synchronizedMap(Maps.copyOf(routingNodes.attributeValuesByAttribute, HashSet::new));
this.recoveriesPerNode = Maps.copyOf(routingNodes.recoveriesPerNode, Recoveries::copy);
}
/**
* @return a mutable copy of this instance
*/
public RoutingNodes mutableCopy() {
return new RoutingNodes(this);
}
private void addRecovery(ShardRouting routing) {
updateRecoveryCounts(routing, true, findAssignedPrimaryIfPeerRecovery(routing));
}
private void removeRecovery(ShardRouting routing) {
updateRecoveryCounts(routing, false, findAssignedPrimaryIfPeerRecovery(routing));
}
private void addInitialRecovery(ShardRouting routing, ShardRouting initialPrimaryShard) {
updateRecoveryCounts(routing, true, initialPrimaryShard);
}
private void updateRecoveryCounts(final ShardRouting routing, final boolean increment, @Nullable final ShardRouting primary) {
final int howMany = increment ? 1 : -1;
assert routing.initializing() : "routing must be initializing: " + routing;
// TODO: check primary == null || primary.active() after all tests properly add ReplicaAfterPrimaryActiveAllocationDecider
assert primary == null || primary.assignedToNode() : "shard is initializing but its primary is not assigned to a node";
Recoveries.getOrAdd(recoveriesPerNode, routing.currentNodeId()).addIncoming(howMany);
if (routing.recoverySource().getType() == RecoverySource.Type.PEER) {
// add/remove corresponding outgoing recovery on node with primary shard
if (primary == null) {
throw new IllegalStateException("shard [" + routing + "] is peer recovering but primary is unassigned");
}
Recoveries.getOrAdd(recoveriesPerNode, primary.currentNodeId()).addOutgoing(howMany);
if (increment == false && routing.primary() && routing.relocatingNodeId() != null) {
// primary is done relocating, move non-primary recoveries from old primary to new primary
int numRecoveringReplicas = 0;
for (ShardRouting assigned : assignedShards(routing.shardId())) {
if (assigned.primary() == false
&& assigned.initializing()
&& assigned.recoverySource().getType() == RecoverySource.Type.PEER) {
numRecoveringReplicas++;
}
}
recoveriesPerNode.get(routing.relocatingNodeId()).addOutgoing(-numRecoveringReplicas);
recoveriesPerNode.get(routing.currentNodeId()).addOutgoing(numRecoveringReplicas);
}
}
}
public int getIncomingRecoveries(String nodeId) {
return recoveriesPerNode.getOrDefault(nodeId, Recoveries.EMPTY).getIncoming();
}
public int getOutgoingRecoveries(String nodeId) {
return recoveriesPerNode.getOrDefault(nodeId, Recoveries.EMPTY).getOutgoing();
}
@Nullable
private ShardRouting findAssignedPrimaryIfPeerRecovery(ShardRouting routing) {
ShardRouting primary = null;
if (routing.recoverySource() != null && routing.recoverySource().getType() == RecoverySource.Type.PEER) {
List<ShardRouting> shardRoutings = assignedShards.get(routing.shardId());
if (shardRoutings != null) {
for (ShardRouting shardRouting : shardRoutings) {
if (shardRouting.primary()) {
if (shardRouting.active()) {
return shardRouting;
} else if (primary == null) {
primary = shardRouting;
} else if (primary.relocatingNodeId() != null) {
primary = shardRouting;
}
}
}
}
}
return primary;
}
public Set<String> getAllNodeIds() {
return Collections.unmodifiableSet(nodesToShards.keySet());
}
@Override
public Iterator<RoutingNode> iterator() {
return Collections.unmodifiableCollection(nodesToShards.values()).iterator();
}
public Stream<RoutingNode> stream() {
return nodesToShards.values().stream();
}
public Iterator<RoutingNode> mutableIterator() {
ensureMutable();
return nodesToShards.values().iterator();
}
public UnassignedShards unassigned() {
return this.unassignedShards;
}
public RoutingNode node(String nodeId) {
return nodesToShards.get(nodeId);
}
public Set<String> getAttributeValues(String attributeName) {
return attributeValuesByAttribute.computeIfAbsent(
attributeName,
ignored -> stream().map(r -> r.node().getAttributes().get(attributeName)).filter(Objects::nonNull).collect(Collectors.toSet())
);
}
/**
* Returns <code>true</code> iff this {@link RoutingNodes} instance has any unassigned primaries even if the
* primaries are marked as temporarily ignored.
*/
public boolean hasUnassignedPrimaries() {
return unassignedShards.getNumPrimaries() + unassignedShards.getNumIgnoredPrimaries() > 0;
}
/**
* Returns <code>true</code> iff this {@link RoutingNodes} instance has any unassigned shards even if the
* shards are marked as temporarily ignored.
* @see UnassignedShards#isEmpty()
* @see UnassignedShards#isIgnoredEmpty()
*/
public boolean hasUnassignedShards() {
return unassignedShards.isEmpty() == false || unassignedShards.isIgnoredEmpty() == false;
}
public boolean hasInactivePrimaries() {
return inactivePrimaryCount > 0;
}
public boolean hasInactiveReplicas() {
return inactiveShardCount > inactivePrimaryCount;
}
public boolean hasInactiveShards() {
return inactiveShardCount > 0;
}
public int getRelocatingShardCount() {
return relocatingShards;
}
public boolean isDedicatedFrozenNode(String nodeId) {
RoutingNode node = nodesToShards.get(nodeId);
if (node != null && node.node() != null && node.node().isDedicatedFrozenNode()) {
return true;
}
return false;
}
public int getRelocatingFrozenShardCount() {
return relocatingFrozenShards;
}
/**
* Returns all shards that are not in the state UNASSIGNED with the same shard
* ID as the given shard.
*/
public List<ShardRouting> assignedShards(ShardId shardId) {
final List<ShardRouting> replicaSet = assignedShards.get(shardId);
return replicaSet == null ? EMPTY : Collections.unmodifiableList(replicaSet);
}
@Nullable
public ShardRouting getByAllocationId(ShardId shardId, String allocationId) {
final List<ShardRouting> replicaSet = assignedShards.get(shardId);
if (replicaSet == null) {
return null;
}
for (ShardRouting shardRouting : replicaSet) {
if (shardRouting.allocationId().getId().equals(allocationId)) {
return shardRouting;
}
}
return null;
}
/**
* Returns the active primary shard for the given shard id or <code>null</code> if
* no primary is found or the primary is not active.
*/
public ShardRouting activePrimary(ShardId shardId) {
for (ShardRouting shardRouting : assignedShards(shardId)) {
if (shardRouting.primary() && shardRouting.active()) {
return shardRouting;
}
}
return null;
}
/**
* Returns one active and promotable replica shard for the given shard id or <code>null</code> if no active replica is found.
*
* Since replicas could possibly be on nodes with a older version of ES than the primary is, this will return replicas on the highest
* version of ES.
*/
public ShardRouting activePromotableReplicaWithHighestVersion(ShardId shardId) {
// It's possible for replicaNodeVersion to be null, when disassociating dead nodes
// that have been removed, the shards are failed, and part of the shard failing
// calls this method with an out-of-date RoutingNodes, where the version might not
// be accessible. Therefore, we need to protect against the version being null
// (meaning the node will be going away).
return assignedShards(shardId).stream()
.filter(shr -> shr.primary() == false && shr.active())
.filter(shr -> node(shr.currentNodeId()) != null)
.filter(ShardRouting::isPromotableToPrimary)
.max(
Comparator.comparing(
shr -> node(shr.currentNodeId()).node(),
Comparator.nullsFirst(Comparator.comparing(DiscoveryNode::getVersion))
)
)
.orElse(null);
}
/**
* Returns <code>true</code> iff all replicas are active for the given shard routing. Otherwise <code>false</code>
*/
public boolean allShardsActive(ShardId shardId, ProjectMetadata project) {
final List<ShardRouting> shards = assignedShards(shardId);
final int shardCopies = project.getIndexSafe(shardId.getIndex()).getNumberOfReplicas() + 1;
if (shards.size() < shardCopies) {
return false; // if we are empty nothing is active if we have less than total at least one is unassigned
}
int active = 0;
for (ShardRouting shard : shards) {
if (shard.active()) {
active++;
}
}
assert active <= shardCopies;
return active == shardCopies;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("routing_nodes:\n");
for (RoutingNode routingNode : this) {
sb.append(routingNode.prettyPrint());
}
sb.append("---- unassigned\n");
for (ShardRouting shardEntry : unassignedShards) {
sb.append("--------").append(shardEntry.shortSummary()).append('\n');
}
return sb.toString();
}
/**
* Moves a shard from unassigned to initialize state
*
* @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated.
* @return the initialized shard
*/
public ShardRouting initializeShard(
ShardRouting unassignedShard,
String nodeId,
@Nullable String existingAllocationId,
long expectedSize,
RoutingChangesObserver routingChangesObserver
) {
ensureMutable();
assert unassignedShard.unassigned() : "expected an unassigned shard " + unassignedShard;
ShardRouting initializedShard = unassignedShard.initialize(nodeId, existingAllocationId, expectedSize);
node(nodeId).add(initializedShard);
inactiveShardCount++;
if (initializedShard.primary()) {
inactivePrimaryCount++;
}
addRecovery(initializedShard);
assignedShardsAdd(initializedShard);
routingChangesObserver.shardInitialized(unassignedShard, initializedShard);
return initializedShard;
}
/**
* Relocate a shard to another node, adding the target initializing
* shard as well as assigning it.
*
* @return pair of source relocating and target initializing shards.
*/
public Tuple<ShardRouting, ShardRouting> relocateShard(
ShardRouting startedShard,
String nodeId,
long expectedShardSize,
String reason,
RoutingChangesObserver changes
) {
ensureMutable();
relocatingShards++;
if (isDedicatedFrozenNode(nodeId)) {
relocatingFrozenShards++;
}
ShardRouting source = startedShard.relocate(nodeId, expectedShardSize);
ShardRouting target = source.getTargetRelocatingShard();
updateAssigned(startedShard, source);
node(target.currentNodeId()).add(target);
assignedShardsAdd(target);
addRecovery(target);
changes.relocationStarted(startedShard, target, reason);
return Tuple.tuple(source, target);
}
/**
* Applies the relevant logic to start an initializing shard.
*
* Moves the initializing shard to started. If the shard is a relocation target, also removes the relocation source.
*
* If the started shard is a primary relocation target, this also reinitializes currently initializing replicas as their
* recovery source changes
*
* @return the started shard
*/
public ShardRouting startShard(
ShardRouting initializingShard,
RoutingChangesObserver routingChangesObserver,
long startedExpectedShardSize
) {
ensureMutable();
ShardRouting startedShard = started(initializingShard, startedExpectedShardSize);
routingChangesObserver.shardStarted(initializingShard, startedShard);
if (initializingShard.relocatingNodeId() != null) {
// relocation target has been started, remove relocation source
RoutingNode relocationSourceNode = node(initializingShard.relocatingNodeId());
ShardRouting relocationSourceShard = relocationSourceNode.getByShardId(initializingShard.shardId());
assert relocationSourceShard.isRelocationSourceOf(initializingShard);
assert relocationSourceShard.getTargetRelocatingShard() == initializingShard
: "relocation target mismatch, expected: "
+ initializingShard
+ " but was: "
+ relocationSourceShard.getTargetRelocatingShard();
remove(relocationSourceShard);
routingChangesObserver.relocationCompleted(relocationSourceShard);
// if this is a primary shard with ongoing replica recoveries, reinitialize them as their recovery source changed
if (startedShard.primary()) {
List<ShardRouting> assignedShards = assignedShards(startedShard.shardId());
// copy list to prevent ConcurrentModificationException
for (ShardRouting routing : new ArrayList<>(assignedShards)) {
if (routing.initializing() && routing.primary() == false) {
if (routing.isRelocationTarget()) {
// find the relocation source
ShardRouting sourceShard = getByAllocationId(routing.shardId(), routing.allocationId().getRelocationId());
// cancel relocation and start relocation to same node again
ShardRouting startedReplica = cancelRelocation(sourceShard);
remove(routing);
routingChangesObserver.shardFailed(
routing,
new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, "primary changed")
);
relocateShard(
startedReplica,
sourceShard.relocatingNodeId(),
sourceShard.getExpectedShardSize(),
"restarting relocation",
routingChangesObserver
);
} else {
ShardRouting reinitializedReplica = reinitReplica(routing);
routingChangesObserver.initializedReplicaReinitialized(routing, reinitializedReplica);
}
}
}
}
}
return startedShard;
}
/**
* Applies the relevant logic to handle a cancelled or failed shard.
*
* Moves the shard to unassigned or completely removes the shard (if relocation target).
*
* - If shard is a primary, this also fails initializing replicas.
* - If shard is an active primary, this also promotes an active replica to primary (if such a replica exists).
* - If shard is a relocating primary, this also removes the primary relocation target shard.
* - If shard is a relocating replica, this promotes the replica relocation target to a full initializing replica, removing the
* relocation source information. This is possible as peer recovery is always done from the primary.
* - If shard is a (primary or replica) relocation target, this also clears the relocation information on the source shard.
*
*/
public void failShard(ShardRouting failedShard, UnassignedInfo unassignedInfo, RoutingChangesObserver routingChangesObserver) {
ensureMutable();
assert failedShard.assignedToNode() : "only assigned shards can be failed";
assert getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId()) == failedShard
: "shard routing to fail does not exist in routing table, expected: "
+ failedShard
+ " but was: "
+ getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId());
// if this is a primary, fail initializing replicas first (otherwise we move RoutingNodes into an inconsistent state)
if (failedShard.primary()) {
List<ShardRouting> assignedShards = assignedShards(failedShard.shardId());
if (assignedShards.isEmpty() == false) {
// copy list to prevent ConcurrentModificationException
for (ShardRouting routing : new ArrayList<>(assignedShards)) {
if (routing.primary() == false && routing.initializing()) {
// re-resolve replica as earlier iteration could have changed source/target of replica relocation
ShardRouting replicaShard = getByAllocationId(routing.shardId(), routing.allocationId().getId());
assert replicaShard != null : "failed to re-resolve " + routing + " when failing replicas";
UnassignedInfo primaryFailedUnassignedInfo = new UnassignedInfo(
UnassignedInfo.Reason.PRIMARY_FAILED,
"primary failed while replica initializing",
null,
0,
unassignedInfo.unassignedTimeNanos(),
unassignedInfo.unassignedTimeMillis(),
false,
AllocationStatus.NO_ATTEMPT,
Collections.emptySet(),
routing.currentNodeId()
);
failShard(replicaShard, primaryFailedUnassignedInfo, routingChangesObserver);
}
}
}
}
if (failedShard.relocating()) {
// find the shard that is initializing on the target node
ShardRouting targetShard = getByAllocationId(failedShard.shardId(), failedShard.allocationId().getRelocationId());
assert targetShard.isRelocationTargetOf(failedShard);
if (failedShard.primary()) {
// cancel and remove target shard
remove(targetShard);
routingChangesObserver.shardFailed(targetShard, unassignedInfo);
} else {
// promote to initializing shard without relocation source and ensure that removed relocation source
// is not added back as unassigned shard
removeRelocationSource(targetShard);
routingChangesObserver.relocationSourceRemoved(targetShard);
}
}
// fail actual shard
if (failedShard.initializing()) {
if (failedShard.relocatingNodeId() == null) {
if (failedShard.primary()) {
// promote active replica to primary if active replica exists (only the case for shadow replicas)
unassignPrimaryAndPromoteActiveReplicaIfExists(failedShard, unassignedInfo, routingChangesObserver);
} else {
// initializing shard that is not relocation target, just move to unassigned
moveToUnassigned(failedShard, unassignedInfo);
}
} else {
// The shard is a target of a relocating shard. In that case we only need to remove the target shard and cancel the source
// relocation. No shard is left unassigned
ShardRouting sourceShard = getByAllocationId(failedShard.shardId(), failedShard.allocationId().getRelocationId());
assert sourceShard.isRelocationSourceOf(failedShard);
cancelRelocation(sourceShard);
remove(failedShard);
}
} else {
assert failedShard.active();
if (failedShard.primary()) {
// promote active replica to primary if active replica exists
unassignPrimaryAndPromoteActiveReplicaIfExists(failedShard, unassignedInfo, routingChangesObserver);
} else {
if (failedShard.relocating()) {
remove(failedShard);
} else {
moveToUnassigned(failedShard, unassignedInfo);
}
}
}
routingChangesObserver.shardFailed(failedShard, unassignedInfo);
assert node(failedShard.currentNodeId()).getByShardId(failedShard.shardId()) == null
: "failedShard " + failedShard + " was matched but wasn't removed";
}
private void unassignPrimaryAndPromoteActiveReplicaIfExists(
ShardRouting failedPrimary,
UnassignedInfo unassignedInfo,
RoutingChangesObserver routingChangesObserver
) {
assert failedPrimary.primary();
ShardRouting replicaToPromote = activePromotableReplicaWithHighestVersion(failedPrimary.shardId());
if (replicaToPromote == null) {
moveToUnassigned(failedPrimary, unassignedInfo);
for (ShardRouting unpromotableReplica : List.copyOf(assignedShards(failedPrimary.shardId()))) {
assert unpromotableReplica.primary() == false : unpromotableReplica;
assert unpromotableReplica.isPromotableToPrimary() == false : unpromotableReplica;
moveToUnassigned(
unpromotableReplica,
new UnassignedInfo(
UnassignedInfo.Reason.UNPROMOTABLE_REPLICA,
unassignedInfo.message(),
unassignedInfo.failure(),
0,
unassignedInfo.unassignedTimeNanos(),
unassignedInfo.unassignedTimeMillis(),
false, // TODO debatable, but do we want to delay reassignment of unpromotable replicas tho?
AllocationStatus.NO_ATTEMPT,
Set.of(),
unpromotableReplica.currentNodeId()
)
);
}
} else {
movePrimaryToUnassignedAndDemoteToReplica(failedPrimary, unassignedInfo);
promoteReplicaToPrimary(replicaToPromote, routingChangesObserver);
}
}
private void promoteReplicaToPrimary(ShardRouting activeReplica, RoutingChangesObserver routingChangesObserver) {
// if the activeReplica was relocating before this call to failShard, its relocation was cancelled earlier when we
// failed initializing replica shards (and moved replica relocation source back to started)
assert activeReplica.started() : "replica relocation should have been cancelled: " + activeReplica;
promoteActiveReplicaShardToPrimary(activeReplica);
routingChangesObserver.replicaPromoted(activeReplica);
}
/**
* Mark a shard as started and adjusts internal statistics.
*
* @return the started shard
*/
private ShardRouting started(ShardRouting shard, long expectedShardSize) {
assert shard.initializing() : "expected an initializing shard " + shard;
if (shard.relocatingNodeId() == null) {
// if this is not a target shard for relocation, we need to update statistics
inactiveShardCount--;
if (shard.primary()) {
inactivePrimaryCount--;
}
}
removeRecovery(shard);
ShardRouting startedShard = shard.moveToStarted(expectedShardSize);
updateAssigned(shard, startedShard);
return startedShard;
}
/**
* Cancels a relocation of a shard that shard must relocating.
*
* @return the shard after cancelling relocation
*/
private ShardRouting cancelRelocation(ShardRouting shard) {
relocatingShards--;
if (isDedicatedFrozenNode(shard.currentNodeId())) {
relocatingFrozenShards--;
}
ShardRouting cancelledShard = shard.cancelRelocation();
updateAssigned(shard, cancelledShard);
return cancelledShard;
}
/**
* moves the assigned replica shard to primary.
*
* @param replicaShard the replica shard to be promoted to primary
* @return the resulting primary shard
*/
private ShardRouting promoteActiveReplicaShardToPrimary(ShardRouting replicaShard) {
assert replicaShard.active() : "non-active shard cannot be promoted to primary: " + replicaShard;
assert replicaShard.primary() == false : "primary shard cannot be promoted to primary: " + replicaShard;
ShardRouting primaryShard = replicaShard.moveActiveReplicaToPrimary();
updateAssigned(replicaShard, primaryShard);
return primaryShard;
}
private static final List<ShardRouting> EMPTY = Collections.emptyList();
/**
* Cancels the give shard from the Routing nodes internal statistics and cancels
* the relocation if the shard is relocating.
*/
private void remove(ShardRouting shard) {
assert shard.unassigned() == false : "only assigned shards can be removed here (" + shard + ")";
node(shard.currentNodeId()).remove(shard);
if (shard.initializing() && shard.relocatingNodeId() == null) {
inactiveShardCount--;
assert inactiveShardCount >= 0;
if (shard.primary()) {
inactivePrimaryCount--;
}
} else if (shard.relocating()) {
shard = cancelRelocation(shard);
}
assignedShardsRemove(shard);
if (shard.initializing()) {
removeRecovery(shard);
}
}
/**
* Removes relocation source of an initializing non-primary shard. This allows the replica shard to continue recovery from
* the primary even though its non-primary relocation source has failed.
*/
private ShardRouting removeRelocationSource(ShardRouting shard) {
assert shard.isRelocationTarget() : "only relocation target shards can have their relocation source removed (" + shard + ")";
ShardRouting relocationMarkerRemoved = shard.removeRelocationSource();
updateAssigned(shard, relocationMarkerRemoved);
inactiveShardCount++; // relocation targets are not counted as inactive shards whereas initializing shards are
return relocationMarkerRemoved;
}
private void assignedShardsAdd(ShardRouting shard) {
assert shard.unassigned() == false : "unassigned shard " + shard + " cannot be added to list of assigned shards";
List<ShardRouting> shards = assignedShards.computeIfAbsent(shard.shardId(), k -> new ArrayList<>());
assert assertInstanceNotInList(shard, shards) : "shard " + shard + " cannot appear twice in list of assigned shards";
shards.add(shard);
}
private static boolean assertInstanceNotInList(ShardRouting shard, List<ShardRouting> shards) {
for (ShardRouting s : shards) {
assert s != shard;
}
return true;
}
private void assignedShardsRemove(ShardRouting shard) {
final List<ShardRouting> replicaSet = assignedShards.get(shard.shardId());
if (replicaSet != null) {
final Iterator<ShardRouting> iterator = replicaSet.iterator();
while (iterator.hasNext()) {
// yes we check identity here
if (shard == iterator.next()) {
iterator.remove();
return;
}
}
}
assert false : "No shard found to remove";
}
private ShardRouting reinitReplica(ShardRouting shard) {
assert shard.primary() == false : "shard must be a replica: " + shard;
assert shard.initializing() : "can only reinitialize an initializing replica: " + shard;
assert shard.isRelocationTarget() == false : "replication target cannot be reinitialized: " + shard;
ShardRouting reinitializedShard = shard.reinitializeReplicaShard();
updateAssigned(shard, reinitializedShard);
return reinitializedShard;
}
private void updateAssigned(ShardRouting oldShard, ShardRouting newShard) {
assert oldShard.shardId().equals(newShard.shardId())
: "can only update " + oldShard + " by shard with same shard id but was " + newShard;
assert oldShard.unassigned() == false && newShard.unassigned() == false
: "only assigned shards can be updated in list of assigned shards (prev: " + oldShard + ", new: " + newShard + ")";
assert oldShard.currentNodeId().equals(newShard.currentNodeId())
: "shard to update " + oldShard + " can only update " + oldShard + " by shard assigned to same node but was " + newShard;
node(oldShard.currentNodeId()).update(oldShard, newShard);
List<ShardRouting> shardsWithMatchingShardId = assignedShards.computeIfAbsent(oldShard.shardId(), k -> new ArrayList<>());
int previousShardIndex = shardsWithMatchingShardId.indexOf(oldShard);
assert previousShardIndex >= 0 : "shard to update " + oldShard + " does not exist in list of assigned shards";
shardsWithMatchingShardId.set(previousShardIndex, newShard);
}
private ShardRouting moveToUnassigned(ShardRouting shard, UnassignedInfo unassignedInfo) {
assert shard.unassigned() == false : "only assigned shards can be moved to unassigned (" + shard + ")";
remove(shard);
ShardRouting unassigned = shard.moveToUnassigned(unassignedInfo);
unassignedShards.add(unassigned);
return unassigned;
}
/**
* Moves assigned primary to unassigned and demotes it to a replica.
* Used in conjunction with {@link #promoteActiveReplicaShardToPrimary} when an active replica is promoted to primary.
*/
private ShardRouting movePrimaryToUnassignedAndDemoteToReplica(ShardRouting shard, UnassignedInfo unassignedInfo) {
assert shard.unassigned() == false : "only assigned shards can be moved to unassigned (" + shard + ")";
assert shard.primary() : "only primary can be demoted to replica (" + shard + ")";
remove(shard);
ShardRouting unassigned = shard.moveToUnassigned(unassignedInfo).moveUnassignedFromPrimary();
unassignedShards.add(unassigned);
return unassigned;
}
/**
* Returns the number of routing nodes
*/
public int size() {
return nodesToShards.size();
}
/**
* @return collection of {@link ShardRouting}s, keyed by shard ID.
*/
public Map<ShardId, List<ShardRouting>> getAssignedShards() {
return Collections.unmodifiableMap(assignedShards);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
RoutingNodes that = (RoutingNodes) o;
return readOnly == that.readOnly
&& inactivePrimaryCount == that.inactivePrimaryCount
&& inactiveShardCount == that.inactiveShardCount
&& relocatingShards == that.relocatingShards
&& relocatingFrozenShards == that.relocatingFrozenShards
&& nodesToShards.equals(that.nodesToShards)
&& unassignedShards.equals(that.unassignedShards)
&& assignedShards.equals(that.assignedShards)
&& attributeValuesByAttribute.equals(that.attributeValuesByAttribute)
&& recoveriesPerNode.equals(that.recoveriesPerNode);
}
@Override
public int hashCode() {
return Objects.hash(
nodesToShards,
unassignedShards,
assignedShards,
readOnly,
inactivePrimaryCount,
inactiveShardCount,
relocatingShards,
relocatingFrozenShards,
attributeValuesByAttribute,
recoveriesPerNode
);
}
public static final
|
RoutingNodes
|
java
|
apache__flink
|
flink-core-api/src/test/java/org/apache/flink/api/common/watermark/WatermarkDeclarationsTest.java
|
{
"start": 998,
"end": 4645
}
|
class ____ {
private static final String DEFAULT_WATERMARK_IDENTIFIER = "default";
@Test
void testCreatedLongWatermarkDeclarationDefaultValue() {
LongWatermarkDeclaration watermarkDeclaration =
WatermarkDeclarations.newBuilder(DEFAULT_WATERMARK_IDENTIFIER).typeLong().build();
assertThat(watermarkDeclaration.getIdentifier()).isEqualTo(DEFAULT_WATERMARK_IDENTIFIER);
assertThat(watermarkDeclaration.getCombinationPolicy().getWatermarkCombinationFunction())
.isEqualTo(WatermarkCombinationFunction.NumericWatermarkCombinationFunction.MIN);
assertThat(watermarkDeclaration.getCombinationPolicy().isCombineWaitForAllChannels())
.isEqualTo(false);
assertThat(watermarkDeclaration.getDefaultHandlingStrategy())
.isEqualTo(WatermarkHandlingStrategy.FORWARD);
}
@Test
void testCreatedBoolWatermarkDeclarationDefaultValue() {
BoolWatermarkDeclaration watermarkDeclaration =
WatermarkDeclarations.newBuilder(DEFAULT_WATERMARK_IDENTIFIER).typeBool().build();
assertThat(watermarkDeclaration.getIdentifier()).isEqualTo(DEFAULT_WATERMARK_IDENTIFIER);
assertThat(watermarkDeclaration.getCombinationPolicy().getWatermarkCombinationFunction())
.isEqualTo(WatermarkCombinationFunction.BoolWatermarkCombinationFunction.AND);
assertThat(watermarkDeclaration.getCombinationPolicy().isCombineWaitForAllChannels())
.isEqualTo(false);
assertThat(watermarkDeclaration.getDefaultHandlingStrategy())
.isEqualTo(WatermarkHandlingStrategy.FORWARD);
}
@Test
void testBuildLongWatermarkDeclaration() {
LongWatermarkDeclaration watermarkDeclaration =
WatermarkDeclarations.newBuilder(DEFAULT_WATERMARK_IDENTIFIER)
.typeLong()
.combineFunctionMax()
.combineWaitForAllChannels(true)
.defaultHandlingStrategyIgnore()
.build();
assertThat(watermarkDeclaration.getIdentifier()).isEqualTo(DEFAULT_WATERMARK_IDENTIFIER);
assertThat(watermarkDeclaration.getCombinationPolicy().getWatermarkCombinationFunction())
.isEqualTo(WatermarkCombinationFunction.NumericWatermarkCombinationFunction.MAX);
assertThat(watermarkDeclaration.getCombinationPolicy().isCombineWaitForAllChannels())
.isEqualTo(true);
assertThat(watermarkDeclaration.getDefaultHandlingStrategy())
.isEqualTo(WatermarkHandlingStrategy.IGNORE);
}
@Test
void testBuildBoolWatermarkDeclaration() {
BoolWatermarkDeclaration watermarkDeclaration =
WatermarkDeclarations.newBuilder(DEFAULT_WATERMARK_IDENTIFIER)
.typeBool()
.combineFunctionOR()
.combineWaitForAllChannels(true)
.defaultHandlingStrategyIgnore()
.build();
assertThat(watermarkDeclaration.getIdentifier()).isEqualTo(DEFAULT_WATERMARK_IDENTIFIER);
assertThat(watermarkDeclaration.getCombinationPolicy().getWatermarkCombinationFunction())
.isEqualTo(WatermarkCombinationFunction.BoolWatermarkCombinationFunction.OR);
assertThat(watermarkDeclaration.getCombinationPolicy().isCombineWaitForAllChannels())
.isEqualTo(true);
assertThat(watermarkDeclaration.getDefaultHandlingStrategy())
.isEqualTo(WatermarkHandlingStrategy.IGNORE);
}
}
|
WatermarkDeclarationsTest
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/join/stream/utils/JoinInputSideSpec.java
|
{
"start": 1355,
"end": 4781
}
|
class ____ implements Serializable {
private static final long serialVersionUID = 3178408082297179959L;
private final boolean inputSideHasUniqueKey;
private final boolean joinKeyContainsUniqueKey;
@Nullable private final InternalTypeInfo<RowData> uniqueKeyType;
@Nullable private final KeySelector<RowData, RowData> uniqueKeySelector;
private JoinInputSideSpec(
boolean joinKeyContainsUniqueKey,
@Nullable InternalTypeInfo<RowData> uniqueKeyType,
@Nullable KeySelector<RowData, RowData> uniqueKeySelector) {
this.inputSideHasUniqueKey = uniqueKeyType != null && uniqueKeySelector != null;
this.joinKeyContainsUniqueKey = joinKeyContainsUniqueKey;
this.uniqueKeyType = uniqueKeyType;
this.uniqueKeySelector = uniqueKeySelector;
}
/** Returns true if the input has unique key, otherwise false. */
public boolean hasUniqueKey() {
return inputSideHasUniqueKey;
}
/** Returns true if the join key contains the unique key of the input. */
public boolean joinKeyContainsUniqueKey() {
return joinKeyContainsUniqueKey;
}
/**
* Returns the {@link TypeInformation} of the unique key. Returns null if the input hasn't
* unique key.
*/
@Nullable
public InternalTypeInfo<RowData> getUniqueKeyType() {
return uniqueKeyType;
}
/**
* Returns the {@link KeySelector} to extract unique key from the input row. Returns null if the
* input hasn't unique key.
*/
@Nullable
public KeySelector<RowData, RowData> getUniqueKeySelector() {
return uniqueKeySelector;
}
/**
* Creates a {@link JoinInputSideSpec} that the input has an unique key.
*
* @param uniqueKeyType type information of the unique key
* @param uniqueKeySelector key selector to extract unique key from the input row
*/
public static JoinInputSideSpec withUniqueKey(
InternalTypeInfo<RowData> uniqueKeyType,
KeySelector<RowData, RowData> uniqueKeySelector) {
checkNotNull(uniqueKeyType);
checkNotNull(uniqueKeySelector);
return new JoinInputSideSpec(false, uniqueKeyType, uniqueKeySelector);
}
/**
* Creates a {@link JoinInputSideSpec} that input has an unique key and the unique key is
* contained by the join key.
*
* @param uniqueKeyType type information of the unique key
* @param uniqueKeySelector key selector to extract unique key from the input row
*/
public static JoinInputSideSpec withUniqueKeyContainedByJoinKey(
InternalTypeInfo<RowData> uniqueKeyType,
KeySelector<RowData, RowData> uniqueKeySelector) {
checkNotNull(uniqueKeyType);
checkNotNull(uniqueKeySelector);
return new JoinInputSideSpec(true, uniqueKeyType, uniqueKeySelector);
}
/** Creates a {@link JoinInputSideSpec} that input hasn't any unique keys. */
public static JoinInputSideSpec withoutUniqueKey() {
return new JoinInputSideSpec(false, null, null);
}
@Override
public String toString() {
if (inputSideHasUniqueKey) {
if (joinKeyContainsUniqueKey) {
return "JoinKeyContainsUniqueKey";
} else {
return "HasUniqueKey";
}
} else {
return "NoUniqueKey";
}
}
}
|
JoinInputSideSpec
|
java
|
quarkusio__quarkus
|
independent-projects/tools/codestarts/src/main/java/io/quarkus/devtools/codestarts/CodestartCatalog.java
|
{
"start": 78,
"end": 249
}
|
interface ____<T extends CodestartProjectInput> {
Collection<Codestart> getCodestarts();
CodestartProjectDefinition createProject(T projectInput);
}
|
CodestartCatalog
|
java
|
apache__flink
|
flink-streaming-java/src/main/java/org/apache/flink/streaming/util/serialize/FlinkChillPackageRegistrar.java
|
{
"start": 1673,
"end": 3160
}
|
class ____ implements ChillSerializerRegistrar {
private static final int FIRST_REGISTRATION_ID = 73;
@Override
public int getNextRegistrationId() {
return 85;
}
@Override
public void registerSerializers(Kryo kryo) {
//noinspection ArraysAsListWithZeroOrOneArgument
new RegistrationHelper(FIRST_REGISTRATION_ID, kryo)
.register(
Arrays.asList("").getClass(),
new DefaultSerializers.ArraysAsListSerializer())
.register(BitSet.class, new DefaultSerializers.BitSetSerializer())
.register(PriorityQueue.class, new DefaultSerializers.PriorityQueueSerializer())
.register(Pattern.class, new DefaultSerializers.PatternSerializer())
.register(Date.class, new DefaultSerializers.DateSerializer())
.register(Time.class, new DefaultSerializers.DateSerializer())
.register(Timestamp.class, new DefaultSerializers.TimestampSerializer())
.register(URI.class, new DefaultSerializers.URISerializer())
.register(InetSocketAddress.class, new InetSocketAddressSerializer())
.register(UUID.class, new DefaultSerializers.UUIDSerializer())
.register(Locale.class, new DefaultSerializers.LocaleSerializer())
.register(SimpleDateFormat.class, new JavaSerializer());
}
private static final
|
FlinkChillPackageRegistrar
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/stat/JdbcStatManagerMBean.java
|
{
"start": 741,
"end": 986
}
|
interface ____ {
TabularData getDataSourceList() throws JMException;
TabularData getSqlList() throws JMException;
TabularData getConnectionList() throws JMException;
void reset();
long getResetCount();
}
|
JdbcStatManagerMBean
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java
|
{
"start": 1145,
"end": 3441
}
|
class ____ extends AbstractXContentSerializingTestCase<SearchProfileDfsPhaseResult> {
static SearchProfileDfsPhaseResult createTestItem() {
return new SearchProfileDfsPhaseResult(
randomBoolean() ? null : ProfileResultTests.createTestItem(1),
randomBoolean() ? null : randomList(1, 10, QueryProfileShardResultTests::createTestItem)
);
}
@Override
protected SearchProfileDfsPhaseResult createTestInstance() {
return createTestItem();
}
@Override
protected SearchProfileDfsPhaseResult mutateInstance(SearchProfileDfsPhaseResult instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected Reader<SearchProfileDfsPhaseResult> instanceReader() {
return SearchProfileDfsPhaseResult::new;
}
@Override
protected SearchProfileDfsPhaseResult doParseInstance(XContentParser parser) throws IOException {
return SearchResponseUtils.parseProfileDfsPhaseResult(parser);
}
public void testCombineQueryProfileShardResults() {
assertThat(new SearchProfileDfsPhaseResult(null, null).combineQueryProfileShardResults(), is(nullValue()));
List<QueryProfileShardResult> resultList = randomList(5, 5, QueryProfileShardResultTests::createTestItem);
SearchProfileDfsPhaseResult result = new SearchProfileDfsPhaseResult(null, resultList);
QueryProfileShardResult queryProfileShardResult = result.combineQueryProfileShardResults();
assertThat(
queryProfileShardResult.getRewriteTime(),
equalTo(resultList.stream().mapToLong(QueryProfileShardResult::getRewriteTime).sum())
);
assertThat(
queryProfileShardResult.getCollectorResult().getTime(),
equalTo(resultList.stream().map(QueryProfileShardResult::getCollectorResult).mapToLong(CollectorResult::getTime).sum())
);
assertThat(queryProfileShardResult.getCollectorResult().getProfiledChildren().size(), equalTo(resultList.size()));
assertThat(
queryProfileShardResult.getQueryResults().size(),
equalTo((int) resultList.stream().mapToLong(q -> q.getQueryResults().size()).sum())
);
}
}
|
SearchProfileDfsPhaseResultTests
|
java
|
apache__kafka
|
core/src/test/java/kafka/server/share/SharePartitionCacheTest.java
|
{
"start": 1251,
"end": 5144
}
|
class ____ {
private static final String GROUP_ID = "test-group";
private static final Uuid TOPIC_ID = Uuid.randomUuid();
private static final TopicIdPartition TOPIC_ID_PARTITION = new TopicIdPartition(TOPIC_ID, new TopicPartition("test-topic", 1));
private static final SharePartitionKey SHARE_PARTITION_KEY = new SharePartitionKey(GROUP_ID, TOPIC_ID_PARTITION);
private SharePartitionCache cache;
@BeforeEach
public void setUp() {
cache = new SharePartitionCache();
}
@Test
public void testComputeIfAbsent() {
// Test computeIfAbsent when key doesn't exist
SharePartition sharePartition = Mockito.mock(SharePartition.class);
SharePartition newPartition = cache.computeIfAbsent(SHARE_PARTITION_KEY, key -> sharePartition);
assertEquals(sharePartition, newPartition);
assertEquals(sharePartition, cache.get(SHARE_PARTITION_KEY));
assertEquals(1, cache.groups().size());
// Test computeIfAbsent when key exists
SharePartition anotherPartition = Mockito.mock(SharePartition.class);
SharePartition existingPartition = cache.computeIfAbsent(SHARE_PARTITION_KEY, key -> anotherPartition);
assertEquals(sharePartition, existingPartition);
assertEquals(sharePartition, cache.get(SHARE_PARTITION_KEY));
assertEquals(1, cache.groups().size());
}
@Test
public void testRemoveGroup() {
// Add partitions for multiple groups
String group1 = "group1";
String group2 = "group2";
TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("test-topic1", 1));
TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("test-topic2", 2));
TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("test-topic3", 3));
// Group1 with 2 partitions.
SharePartitionKey key1 = new SharePartitionKey(group1, tp1);
SharePartitionKey key2 = new SharePartitionKey(group1, tp2);
// Group2 with 1 partition.
SharePartitionKey key3 = new SharePartitionKey(group2, tp3);
SharePartition sp1 = Mockito.mock(SharePartition.class);
SharePartition sp2 = Mockito.mock(SharePartition.class);
SharePartition sp3 = Mockito.mock(SharePartition.class);
// Test computeIfAbsent adds to group map
cache.computeIfAbsent(key1, k -> sp1);
cache.computeIfAbsent(key2, k -> sp2);
cache.computeIfAbsent(key3, k -> sp3);
// Verify partitions are in the cache.
assertEquals(3, cache.size());
assertTrue(cache.containsKey(key1));
assertTrue(cache.containsKey(key2));
assertTrue(cache.containsKey(key3));
// Verify groups are in the group map.
assertEquals(2, cache.groups().size());
assertTrue(cache.groups().containsKey(group1));
assertTrue(cache.groups().containsKey(group2));
// Verify topic partitions are in the group map.
assertEquals(2, cache.groups().get(group1).size());
assertEquals(1, cache.groups().get(group2).size());
assertEquals(1, cache.groups().get(group1).stream().filter(tp -> tp.equals(tp1)).count());
assertEquals(1, cache.groups().get(group1).stream().filter(tp -> tp.equals(tp2)).count());
assertEquals(1, cache.groups().get(group2).stream().filter(tp -> tp.equals(tp3)).count());
// Remove one group and verify only its partitions are removed.
cache.topicIdPartitionsForGroup(group1).forEach(
topicIdPartition -> cache.remove(new SharePartitionKey(group1, topicIdPartition)));
assertEquals(1, cache.size());
assertTrue(cache.containsKey(key3));
assertEquals(1, cache.groups().size());
assertTrue(cache.groups().containsKey(group2));
}
}
|
SharePartitionCacheTest
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/EnrichVariableTest.java
|
{
"start": 973,
"end": 3583
}
|
class ____ extends ContextTestSupport {
@Test
public void testSend() throws Exception {
getMockEndpoint("mock:before").expectedBodiesReceived("World");
getMockEndpoint("mock:before").expectedVariableReceived("hello", "Camel");
getMockEndpoint("mock:result").expectedBodiesReceived("Bye Camel");
getMockEndpoint("mock:result").expectedVariableReceived("hello", "Camel");
template.sendBody("direct:send", "World");
assertMockEndpointsSatisfied();
}
@Test
public void testReceive() throws Exception {
getMockEndpoint("mock:after").expectedBodiesReceived("World");
getMockEndpoint("mock:after").expectedVariableReceived("bye", "Bye World");
getMockEndpoint("mock:result").expectedBodiesReceived("Bye World");
getMockEndpoint("mock:result").expectedVariableReceived("bye", "Bye World");
template.sendBody("direct:receive", "World");
assertMockEndpointsSatisfied();
}
@Test
public void testSendAndReceive() throws Exception {
getMockEndpoint("mock:before").expectedBodiesReceived("World");
getMockEndpoint("mock:before").expectedVariableReceived("hello", "Camel");
getMockEndpoint("mock:result").expectedBodiesReceived("World");
getMockEndpoint("mock:result").expectedVariableReceived("bye", "Bye Camel");
template.sendBody("direct:sendAndReceive", "World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:send")
.setVariable("hello", simple("Camel"))
.to("mock:before")
.enrich().constant("direct:foo").variableSend("hello")
.to("mock:result");
from("direct:receive")
.enrich().constant("direct:foo").variableReceive("bye")
.to("mock:after")
.setBody(simple("${variable:bye}"))
.to("mock:result");
from("direct:sendAndReceive")
.setVariable("hello", simple("Camel"))
.to("mock:before")
.enrich().constant("direct:foo").variableSend("hello").variableReceive("bye")
.to("mock:result");
from("direct:foo")
.transform().simple("Bye ${body}");
}
};
}
}
|
EnrichVariableTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/util/BytesHelperTest.java
|
{
"start": 504,
"end": 1412
}
|
class ____ {
@Test
public void testAsLongNullArray() {
assertEquals(0, BytesHelper.asLong(null, 0));
}
@Test()
@ExpectedException(IllegalArgumentException.class)
public void testAsLongArrayTooSmall() {
byte[] src = new byte[16];
assertEquals(0, BytesHelper.asLong(src, 9));
}
@Test
public void testAsLong() {
byte[] src = new byte[] {-92, -120, -59, -64, 97, 55, -41, -55, 64, -43, 20, 109, -7, -95, 77, -115};
assertEquals(-6590800624601278519L, BytesHelper.asLong(src, 0));
assertEquals(4671662651038846349L, BytesHelper.asLong(src, 8));
}
@Test
public void testfromLong() {
byte[] expected = new byte[] {-92, -120, -59, -64, 97, 55, -41, -55, 64, -43, 20, 109, -7, -95, 77, -115};
byte[] dest = new byte[16];
BytesHelper.fromLong(-6590800624601278519L, dest, 0);
BytesHelper.fromLong(4671662651038846349L, dest, 8);
assertArrayEquals(expected, dest);
}
}
|
BytesHelperTest
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/script/BytesRefProducer.java
|
{
"start": 752,
"end": 811
}
|
interface ____ {
BytesRef toBytesRef();
}
|
BytesRefProducer
|
java
|
spring-projects__spring-security
|
core/src/main/java/org/springframework/security/authentication/password/CompromisedPasswordChecker.java
|
{
"start": 852,
"end": 1326
}
|
interface ____ {
/**
* Check whether the password is compromised. If password is null, then the return
* value must be false for {@link CompromisedPasswordDecision#isCompromised()} since a
* null password represents no password (e.g. the user leverages Passkeys instead).
* @param password the password to check
* @return a non-null {@link CompromisedPasswordDecision}
*/
CompromisedPasswordDecision check(@Nullable String password);
}
|
CompromisedPasswordChecker
|
java
|
quarkusio__quarkus
|
extensions/oidc-token-propagation-reactive/deployment/src/main/java/io/quarkus/oidc/token/propagation/reactive/deployment/OidcTokenPropagationReactiveBuildTimeConfig.java
|
{
"start": 357,
"end": 1109
}
|
interface ____ {
/**
* If the OIDC Token Reactive Propagation is enabled.
*/
@WithDefault("true")
boolean enabled();
/**
* Whether the token propagation is enabled during the `SecurityIdentity` augmentation.
*
* For example, you may need to use a REST client from `SecurityIdentityAugmentor`
* to propagate the current token to acquire additional roles for the `SecurityIdentity`.
*
* Note, this feature relies on a duplicated context. More information about Vert.x duplicated
* context can be found in xref:duplicated-context.adoc[this guide].
*
* @asciidoclet
*/
@WithDefault("false")
boolean enabledDuringAuthentication();
}
|
OidcTokenPropagationReactiveBuildTimeConfig
|
java
|
quarkusio__quarkus
|
integration-tests/maven/src/test/resources-filtered/projects/multimodule-parent-dep/level2/submodule/src/main/java/org/acme/level2/submodule/Level2Service.java
|
{
"start": 185,
"end": 360
}
|
class ____ {
@Inject
Level1Service level1Service;
public String getGreetingFromLevel1() {
return level1Service.getGreetingFromLevel0();
}
}
|
Level2Service
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java
|
{
"start": 10598,
"end": 11569
}
|
class ____ the state but does no writing.
* @param buffer buffer
* @param offset offset
* @param length length of write
* @return number of bytes written
* @throws IOException trouble
*/
int write(byte[] buffer, int offset, int length) throws IOException {
verifyState(Writing);
checkArgument(buffer != null, "Null buffer");
checkArgument(length >= 0, "length is negative");
checkArgument(offset >= 0, "offset is negative");
checkArgument(
!(buffer.length - offset < length),
"buffer shorter than amount of data to write");
return 0;
}
/**
* Flush the output.
* Only valid in the state {@code Writing}.
* In the base class, this is a no-op
* @throws IOException any IO problem.
*/
void flush() throws IOException {
verifyState(Writing);
}
/**
* Switch to the upload state and return a stream for uploading.
* Base
|
verifies
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/runtime/NetworkStackThroughputITCase.java
|
{
"start": 8411,
"end": 15525
}
|
class ____ implements IOReadableWritable {
private static final int RECORD_SIZE = 128;
private final byte[] buf = new byte[RECORD_SIZE];
public SpeedTestRecord() {
for (int i = 0; i < RECORD_SIZE; ++i) {
this.buf[i] = (byte) (i % 128);
}
}
@Override
public void write(DataOutputView out) throws IOException {
out.write(this.buf);
}
@Override
public void read(DataInputView in) throws IOException {
in.readFully(this.buf);
}
}
// ------------------------------------------------------------------------
@Test
public void testThroughput() throws Exception {
Object[][] configParams =
new Object[][] {
new Object[] {1, false, false, false, 4, 2},
new Object[] {1, true, false, false, 4, 2},
new Object[] {1, true, true, false, 4, 2},
new Object[] {1, true, false, true, 4, 2},
new Object[] {2, true, false, false, 4, 2},
new Object[] {4, true, false, false, 4, 2},
new Object[] {4, true, false, false, 8, 4},
};
for (Object[] p : configParams) {
final int dataVolumeGb = (Integer) p[0];
final boolean useForwarder = (Boolean) p[1];
final boolean isSlowSender = (Boolean) p[2];
final boolean isSlowReceiver = (Boolean) p[3];
final int parallelism = (Integer) p[4];
final int numSlotsPerTaskManager = (Integer) p[5];
if (parallelism % numSlotsPerTaskManager != 0) {
throw new RuntimeException(
"The test case defines a parallelism that is not a multiple of the slots per task manager.");
}
final int numTaskManagers = parallelism / numSlotsPerTaskManager;
final MiniClusterWithClientResource cluster =
new MiniClusterWithClientResource(
new MiniClusterResourceConfiguration.Builder()
.setNumberTaskManagers(numTaskManagers)
.setNumberSlotsPerTaskManager(numSlotsPerTaskManager)
.build());
cluster.before();
try {
System.out.println(
String.format(
"Running test with parameters: dataVolumeGB=%s, useForwarder=%s, isSlowSender=%s, isSlowReceiver=%s, parallelism=%s, numSlotsPerTM=%s",
dataVolumeGb,
useForwarder,
isSlowSender,
isSlowReceiver,
parallelism,
numSlotsPerTaskManager));
testProgram(
cluster,
dataVolumeGb,
useForwarder,
isSlowSender,
isSlowReceiver,
parallelism);
} finally {
cluster.after();
}
}
}
private void testProgram(
final MiniClusterWithClientResource cluster,
final int dataVolumeGb,
final boolean useForwarder,
final boolean isSlowSender,
final boolean isSlowReceiver,
final int parallelism)
throws Exception {
final ClusterClient<?> client = cluster.getClusterClient();
final JobGraph jobGraph =
createJobGraph(
dataVolumeGb, useForwarder, isSlowSender, isSlowReceiver, parallelism);
final JobResult jobResult =
client.submitJob(jobGraph).thenCompose(client::requestJobResult).get();
Assert.assertFalse(jobResult.getSerializedThrowable().isPresent());
final long dataVolumeMbit = dataVolumeGb * 8192;
final long runtimeSecs =
TimeUnit.SECONDS.convert(jobResult.getNetRuntime(), TimeUnit.MILLISECONDS);
final int mbitPerSecond = (int) (((double) dataVolumeMbit) / runtimeSecs);
LOG.info(
String.format(
"Test finished with throughput of %d MBit/s (runtime [secs]: %d, "
+ "data volume [gb/mbits]: %d/%d)",
mbitPerSecond, runtimeSecs, dataVolumeGb, dataVolumeMbit));
}
private JobGraph createJobGraph(
int dataVolumeGb,
boolean useForwarder,
boolean isSlowSender,
boolean isSlowReceiver,
int numSubtasks) {
SlotSharingGroup sharingGroup = new SlotSharingGroup();
final List<JobVertex> jobVertices = new ArrayList<>();
JobVertex producer = new JobVertex("Speed Test Producer");
producer.setSlotSharingGroup(sharingGroup);
producer.setInvokableClass(SpeedTestProducer.class);
producer.setParallelism(numSubtasks);
producer.getConfiguration()
.set(getIntConfigOption(DATA_VOLUME_GB_CONFIG_KEY), dataVolumeGb);
producer.getConfiguration()
.set(getBooleanConfigOption(IS_SLOW_SENDER_CONFIG_KEY), isSlowSender);
jobVertices.add(producer);
JobVertex forwarder = null;
if (useForwarder) {
forwarder = new JobVertex("Speed Test Forwarder");
forwarder.setSlotSharingGroup(sharingGroup);
forwarder.setInvokableClass(SpeedTestForwarder.class);
forwarder.setParallelism(numSubtasks);
jobVertices.add(forwarder);
}
JobVertex consumer = new JobVertex("Speed Test Consumer");
consumer.setSlotSharingGroup(sharingGroup);
consumer.setInvokableClass(SpeedTestConsumer.class);
consumer.setParallelism(numSubtasks);
consumer.getConfiguration()
.set(getBooleanConfigOption(IS_SLOW_RECEIVER_CONFIG_KEY), isSlowReceiver);
jobVertices.add(consumer);
if (useForwarder) {
connectNewDataSetAsInput(
forwarder,
producer,
DistributionPattern.ALL_TO_ALL,
ResultPartitionType.PIPELINED);
connectNewDataSetAsInput(
consumer,
forwarder,
DistributionPattern.ALL_TO_ALL,
ResultPartitionType.PIPELINED);
} else {
connectNewDataSetAsInput(
consumer,
producer,
DistributionPattern.ALL_TO_ALL,
ResultPartitionType.PIPELINED);
}
return JobGraphTestUtils.streamingJobGraph(jobVertices.toArray(new JobVertex[0]));
}
public static void main(String[] args) throws Exception {
new NetworkStackThroughputITCase().testThroughput();
System.out.println("Done.");
}
}
|
SpeedTestRecord
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/issues/RouteScopedErrorHandlerAndOnExceptionTest.java
|
{
"start": 1437,
"end": 4065
}
|
class ____ extends ContextTestSupport {
@Test
public void testOnException() throws Exception {
RouteDefinition route = context.getRouteDefinitions().get(0);
AdviceWith.adviceWith(route, context, new AdviceWithRouteBuilder() {
@Override
public void configure() {
interceptSendToEndpoint("seda:*").skipSendToOriginalEndpoint().throwException(new ConnectException("Forced"));
}
});
getMockEndpoint("mock:local").expectedMessageCount(0);
getMockEndpoint("mock:seda").expectedMessageCount(0);
// we fail all redeliveries so after that we send to mock:exhausted
getMockEndpoint("mock:exhausted").expectedMessageCount(1);
CamelExecutionException e = assertThrows(CamelExecutionException.class,
() -> template.sendBody("direct:start", "Hello World"),
"Should thrown an exception");
ConnectException cause = assertIsInstanceOf(ConnectException.class, e.getCause());
assertEquals("Forced", cause.getMessage());
assertMockEndpointsSatisfied();
}
@Test
public void testErrorHandler() throws Exception {
RouteDefinition route = context.getRouteDefinitions().get(0);
AdviceWith.adviceWith(route, context, new AdviceWithRouteBuilder() {
@Override
public void configure() {
interceptSendToEndpoint("seda:*").skipSendToOriginalEndpoint()
.throwException(new FileNotFoundException("Forced"));
}
});
getMockEndpoint("mock:local").expectedMessageCount(1);
getMockEndpoint("mock:seda").expectedMessageCount(0);
getMockEndpoint("mock:exhausted").expectedMessageCount(0);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").errorHandler(deadLetterChannel("mock:local").maximumRedeliveries(2).redeliveryDelay(0))
// no redelivery delay for faster unit tests
.onException(ConnectException.class).maximumRedeliveries(5).redeliveryDelay(0).logRetryAttempted(true)
.retryAttemptedLogLevel(LoggingLevel.WARN)
// send to mock when we are exhausted
.to("mock:exhausted").end().to("seda:foo");
}
};
}
}
|
RouteScopedErrorHandlerAndOnExceptionTest
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/jsontype/SealedTypesWithTypedDeserializationTest.java
|
{
"start": 2834,
"end": 2940
}
|
class ____ permits DummyImpl {
protected DummyBase(boolean foo) { }
}
static final
|
DummyBase
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TestInstancePreConstructCallbackTests.java
|
{
"start": 14250,
"end": 14573
}
|
class ____ extends AbstractTestInstancePreConstructCallback {
InstancePreConstructCallbackRecordingFoo() {
super("foo");
}
@Override
public ExtensionContextScope getTestInstantiationExtensionContextScope(ExtensionContext rootContext) {
return TEST_METHOD;
}
}
static
|
InstancePreConstructCallbackRecordingFoo
|
java
|
apache__camel
|
components/camel-jms/src/test/java/org/apache/camel/component/jms/JmsInOnlyPooledExchangeTest.java
|
{
"start": 1895,
"end": 5130
}
|
class ____ extends AbstractJMSTest {
@Order(2)
@RegisterExtension
public static CamelContextExtension camelContextExtension = new TransientCamelContextExtension();
private static final String JMS_QUEUE_NAME = "activemq:queue:JmsInOnlyPooledExchangeTest";
private static final String MOCK_RESULT = "mock:result";
protected CamelContext context;
protected ProducerTemplate template;
protected ConsumerTemplate consumer;
@Test
public void testSynchronous() throws Exception {
final String expectedBody = "Hello World";
MockEndpoint mock = getMockEndpoint(MOCK_RESULT);
mock.expectedMessageCount(1);
mock.expectedBodiesReceived(expectedBody);
template.sendBody(JMS_QUEUE_NAME, expectedBody);
mock.assertIsSatisfied();
Awaitility.waitAtMost(2, TimeUnit.SECONDS).untilAsserted(() -> {
PooledObjectFactory.Statistics stat
= context.getCamelContextExtension().getExchangeFactoryManager().getStatistics();
assertEquals(1, stat.getCreatedCounter());
assertEquals(0, stat.getAcquiredCounter());
assertEquals(1, stat.getReleasedCounter());
assertEquals(0, stat.getDiscardedCounter());
});
}
@Test
public void testTwoSynchronous() throws Exception {
MockEndpoint mock = getMockEndpoint(MOCK_RESULT);
mock.expectedBodiesReceived("Hello World", "Bye World");
template.sendBody(JMS_QUEUE_NAME, "Hello World");
template.sendBody(JMS_QUEUE_NAME, "Bye World");
mock.assertIsSatisfied();
Awaitility.waitAtMost(2, TimeUnit.SECONDS).untilAsserted(() -> {
PooledObjectFactory.Statistics stat
= context.getCamelContextExtension().getExchangeFactoryManager().getStatistics();
assertEquals(1, stat.getCreatedCounter());
assertEquals(1, stat.getAcquiredCounter());
assertEquals(2, stat.getReleasedCounter());
assertEquals(0, stat.getDiscardedCounter());
});
}
protected String getComponentName() {
return "activemq";
}
@ContextFixture
public void configurePooling(CamelContext context) {
ExtendedCamelContext ecc = context.getCamelContextExtension();
ecc.setExchangeFactory(new PooledExchangeFactory());
ecc.setProcessorExchangeFactory(new PooledProcessorExchangeFactory());
ecc.getExchangeFactory().setStatisticsEnabled(true);
ecc.getProcessorExchangeFactory().setStatisticsEnabled(true);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(JMS_QUEUE_NAME)
.to(MOCK_RESULT);
}
};
}
@Override
public CamelContextExtension getCamelContextExtension() {
return camelContextExtension;
}
@BeforeEach
void setUpRequirements() {
context = camelContextExtension.getContext();
template = camelContextExtension.getProducerTemplate();
consumer = camelContextExtension.getConsumerTemplate();
}
}
|
JmsInOnlyPooledExchangeTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/ArrayEqualsTest.java
|
{
"start": 5925,
"end": 8122
}
|
class ____ {
public void neitherArray() {
Object a = new Object();
Object b = new Object();
if (a.equals(b)) {
System.out.println("Objects are equal!");
} else {
System.out.println("Objects are not equal!");
}
if (Objects.equal(a, b)) {
System.out.println("Objects are equal!");
} else {
System.out.println("Objects are not equal!");
}
}
public void firstArray() {
Object[] a = new Object[3];
Object b = new Object();
if (a.equals(b)) {
System.out.println("arrays are equal!");
} else {
System.out.println("arrays are not equal!");
}
if (Objects.equal(a, b)) {
System.out.println("Objects are equal!");
} else {
System.out.println("Objects are not equal!");
}
}
public void secondArray() {
Object a = new Object();
Object[] b = new Object[3];
if (a.equals(b)) {
System.out.println("arrays are equal!");
} else {
System.out.println("arrays are not equal!");
}
if (Objects.equal(a, b)) {
System.out.println("Objects are equal!");
} else {
System.out.println("Objects are not equal!");
}
}
}\
""")
.doTest();
}
@Test
public void java7NegativeCase() {
compilationHelper
.addSourceLines(
"ArrayEqualsNegativeCases2.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import java.util.Objects;
/**
* Tests that only run with Java 7 and above.
*
* @author eaftan@google.com (Eddie Aftandilian)
*/
public
|
ArrayEqualsNegativeCases
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ingest/SetSecurityUserProcessor.java
|
{
"start": 1599,
"end": 9485
}
|
class ____ extends AbstractProcessor {
public static final String TYPE = "set_security_user";
private static final Logger logger = LogManager.getLogger(SetSecurityUserProcessor.class);
private final SecurityContext securityContext;
private final Settings settings;
private final String field;
private final Set<Property> properties;
public SetSecurityUserProcessor(
String tag,
String description,
SecurityContext securityContext,
Settings settings,
String field,
Set<Property> properties
) {
super(tag, description);
this.securityContext = securityContext;
this.settings = Objects.requireNonNull(settings, "settings object cannot be null");
if (XPackSettings.SECURITY_ENABLED.get(settings) == false) {
logger.warn(
"Creating processor [{}] (tag [{}]) on field [{}] but authentication is not currently enabled on this cluster "
+ " - this processor is likely to fail at runtime if it is used",
TYPE,
tag,
field
);
} else if (this.securityContext == null) {
throw new IllegalArgumentException("Authentication is allowed on this cluster state, but there is no security context");
}
this.field = field;
this.properties = properties;
}
@Override
public IngestDocument execute(IngestDocument ingestDocument) throws Exception {
Authentication authentication = null;
User user = null;
if (this.securityContext != null) {
authentication = securityContext.getAuthentication();
if (authentication != null) {
user = authentication.getEffectiveSubject().getUser();
}
}
if (user == null) {
logger.debug(
"Failed to find active user. SecurityContext=[{}] Authentication=[{}] User=[{}]",
securityContext,
authentication,
user
);
if (XPackSettings.SECURITY_ENABLED.get(settings)) {
// This shouldn't happen. If authentication is allowed (and active), then there _should_ always be an authenticated user.
// If we ever see this error message, then one of our assumptions are wrong.
throw new IllegalStateException(
"There is no authenticated user - the [" + TYPE + "] processor requires an authenticated user"
);
} else {
throw new IllegalStateException(
"Security (authentication) is not enabled on this cluster, so there is no active user - "
+ "the ["
+ TYPE
+ "] processor cannot be used without security"
);
}
}
Object fieldValue = ingestDocument.getFieldValue(field, Object.class, true);
@SuppressWarnings("unchecked")
Map<String, Object> userObject = fieldValue instanceof Map ? (Map<String, Object>) fieldValue : new HashMap<>();
for (Property property : properties) {
switch (property) {
case USERNAME:
if (user.principal() != null) {
userObject.put("username", user.principal());
}
break;
case FULL_NAME:
if (user.fullName() != null) {
userObject.put("full_name", user.fullName());
}
break;
case EMAIL:
if (user.email() != null) {
userObject.put("email", user.email());
}
break;
case ROLES:
if (user.roles() != null && user.roles().length != 0) {
userObject.put("roles", Arrays.asList(user.roles()));
}
break;
case METADATA:
if (user.metadata() != null && user.metadata().isEmpty() == false) {
userObject.put("metadata", user.metadata());
}
break;
case API_KEY:
if (authentication.isApiKey()) {
final String apiKey = "api_key";
final Object existingApiKeyField = userObject.get(apiKey);
@SuppressWarnings("unchecked")
final Map<String, Object> apiKeyField = existingApiKeyField instanceof Map
? (Map<String, Object>) existingApiKeyField
: new HashMap<>();
if (authentication.getAuthenticatingSubject().getMetadata().containsKey(AuthenticationField.API_KEY_NAME_KEY)) {
apiKeyField.put(
"name",
authentication.getAuthenticatingSubject().getMetadata().get(AuthenticationField.API_KEY_NAME_KEY)
);
}
if (authentication.getAuthenticatingSubject().getMetadata().containsKey(AuthenticationField.API_KEY_ID_KEY)) {
apiKeyField.put(
"id",
authentication.getAuthenticatingSubject().getMetadata().get(AuthenticationField.API_KEY_ID_KEY)
);
}
final Map<String, Object> apiKeyMetadata = ApiKeyService.getApiKeyMetadata(authentication);
if (false == apiKeyMetadata.isEmpty()) {
apiKeyField.put("metadata", apiKeyMetadata);
}
if (false == apiKeyField.isEmpty()) {
userObject.put(apiKey, apiKeyField);
}
}
break;
case REALM:
final String realmKey = "realm";
final Object existingRealmField = userObject.get(realmKey);
@SuppressWarnings("unchecked")
final Map<String, Object> realmField = existingRealmField instanceof Map
? (Map<String, Object>) existingRealmField
: new HashMap<>();
final Object realmName = ApiKeyService.getCreatorRealmName(authentication);
if (realmName != null) {
realmField.put("name", realmName);
}
final Object realmType = ApiKeyService.getCreatorRealmType(authentication);
if (realmType != null) {
realmField.put("type", realmType);
}
if (false == realmField.isEmpty()) {
userObject.put(realmKey, realmField);
}
break;
case AUTHENTICATION_TYPE:
if (authentication.getAuthenticationType() != null) {
userObject.put("authentication_type", authentication.getAuthenticationType().toString());
}
break;
default:
throw new UnsupportedOperationException("unsupported property [" + property + "]");
}
}
ingestDocument.setFieldValue(field, userObject);
return ingestDocument;
}
@Override
public String getType() {
return TYPE;
}
String getField() {
return field;
}
Set<Property> getProperties() {
return properties;
}
public static final
|
SetSecurityUserProcessor
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockitousage/PlaygroundTest.java
|
{
"start": 446,
"end": 2429
}
|
class ____ {
public final Object withLong(long y) {
return "";
}
public Object foo() {
return "";
}
}
Foo mock;
@Mock IMethods mockTwo;
@Test
public void spyInAction() {}
@Test
public void partialMockInAction() {
// mock = mock(Foo.class, withSettings()
// .defaultBehavior(CALLS_REAL_METHODS);
// mock = mock(Foo.class, withSettings()
// .defaultMockAnswer(CALLS_REAL_METHODS);
// mock = mock(Foo.class, withSettings()
// .defaultAnswer(CALLS_REAL_METHODS);
// mock = mock(Foo.class, CALLS_REAL_METHODS);
// mock = mock(Foo.class, withSettings()
// .defaultBehavior(CALLS_REAL_METHODS)
// .createUsingDefaultConstructor();
//
// mock = mock(Foo.class, withSettings()
// .defaultBehavior(CALLS_REAL_METHODS)
// .createPassingArguments("some arg", 1);
//
// spy = spy(Foo.class, "some arg", 1);
//
// .withName("foo")
// .withDefaultBehavior(RETURNS_SMART_NULLS)
// .withInterfaces(Bar.class);
//
// mock = mock(Foo.class)
// .name("foo")
// .defaultBehavior(RETURNS_SMART_NULLS)
// .interfaces(Bar.class);
//
// mock = mock(Foo.class)
// .named("foo")
// .byDefault(RETURNS_SMART_NULLS)
// .alsoImplements(Bar.class, Bar2.class);
//
// mock = mock(Foo.class)
// hasName("foo");
// when(mock.getStuff()).thenReturn("aha!");
// when(mock.doSomeThing()).thenCallRealMethod();
//
// mock.doSomeThing();
}
//
|
Boo
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/KeyDeserializer.java
|
{
"start": 1712,
"end": 1862
}
|
class ____ to be used as the marker for
* annotation {@link tools.jackson.databind.annotation.JsonDeserialize}.
*/
public abstract static
|
is
|
java
|
micronaut-projects__micronaut-core
|
core-processor/src/main/java/io/micronaut/inject/ast/ReflectParameterElement.java
|
{
"start": 1158,
"end": 3380
}
|
class ____ implements ParameterElement {
private final ClassElement classElement;
private final String name;
private MutableAnnotationMetadata annotationMetadata = new MutableAnnotationMetadata();
ReflectParameterElement(ClassElement classElement, String name) {
this.classElement = classElement;
this.name = name;
}
@Override
public boolean isPrimitive() {
return classElement.isPrimitive();
}
@Override
public boolean isArray() {
return classElement.isArray();
}
@Override
public int getArrayDimensions() {
return classElement.getArrayDimensions();
}
@NonNull
@Override
public ClassElement getType() {
return classElement;
}
@NonNull
@Override
public String getName() {
return name;
}
@Override
public boolean isProtected() {
return false;
}
@Override
public boolean isPublic() {
return true;
}
@NonNull
@Override
public Object getNativeType() {
return classElement.getNativeType();
}
@NonNull
@Override
public AnnotationMetadata getAnnotationMetadata() {
return this.annotationMetadata;
}
@NonNull
@Override
public <T extends Annotation> Element annotate(@NonNull String annotationType, @NonNull Consumer<AnnotationValueBuilder<T>> consumer) {
if (annotationMetadata == AnnotationMetadata.EMPTY_METADATA) {
final MutableAnnotationMetadata mutableAnnotationMetadata = new MutableAnnotationMetadata();
this.annotationMetadata = mutableAnnotationMetadata;
AnnotationValueBuilder<T> builder = AnnotationValue.builder(annotationType);
consumer.accept(builder);
mutableAnnotationMetadata.addDeclaredAnnotation(annotationType, builder.build().getValues());
} else {
AnnotationValueBuilder<T> builder = AnnotationValue.builder(annotationType);
consumer.accept(builder);
this.annotationMetadata = MutableAnnotationMetadata.mutateMember(annotationMetadata, annotationType, builder.build().getValues());
}
return this;
}
}
|
ReflectParameterElement
|
java
|
apache__camel
|
components/camel-rocketmq/src/main/java/org/apache/camel/component/rocketmq/reply/ReplyHolder.java
|
{
"start": 991,
"end": 2398
}
|
class ____ {
private final Exchange exchange;
private final AsyncCallback callback;
private final String messageKey;
private final MessageExt messageExt;
private long timeout;
public ReplyHolder(Exchange exchange, AsyncCallback callback, String messageKey, MessageExt messageExt) {
this.exchange = exchange;
this.callback = callback;
this.messageExt = messageExt;
this.messageKey = messageKey;
}
public ReplyHolder(Exchange exchange, AsyncCallback callback, String messageKey, long timeout) {
this(exchange, callback, messageKey, null);
this.timeout = timeout;
}
public boolean isTimeout() {
return messageExt == null;
}
public Exchange getExchange() {
return exchange;
}
public AsyncCallback getCallback() {
return callback;
}
public MessageExt getMessageExt() {
return messageExt;
}
public String getMessageKey() {
return messageKey;
}
public long getTimeout() {
return timeout;
}
@Override
public String toString() {
return "ReplyHolder{" +
"exchange=" + exchange +
", callback=" + callback +
", messageKey='" + messageKey + '\'' +
", messageExt=" + messageExt +
", timeout=" + timeout +
'}';
}
}
|
ReplyHolder
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FencedException.java
|
{
"start": 1027,
"end": 1196
}
|
class ____ extends IOException {
private static final long serialVersionUID = 1L;
public FencedException(String errorMsg) {
super(errorMsg);
}
}
|
FencedException
|
java
|
elastic__elasticsearch
|
modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/JoinAggregationBuilders.java
|
{
"start": 609,
"end": 1154
}
|
class ____ {
/**
* Create a new {@link SingleBucketAggregation} aggregation with the given name.
*/
public static ChildrenAggregationBuilder children(String name, String childType) {
return new ChildrenAggregationBuilder(name, childType);
}
/**
* Create a new {@link SingleBucketAggregation} aggregation with the given name.
*/
public static ParentAggregationBuilder parent(String name, String childType) {
return new ParentAggregationBuilder(name, childType);
}
}
|
JoinAggregationBuilders
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/query/results/ResultSetMapping.java
|
{
"start": 1355,
"end": 3179
}
|
interface ____ extends JdbcValuesMappingProducer {
/**
* An identifier for the mapping
*/
String getMappingIdentifier();
/**
* Indicates whether the mapping is dynamic per {@link ResultSetMapping}
*/
boolean isDynamic();
/**
* The number of result builders currently associated with this mapping
*/
int getNumberOfResultBuilders();
/**
* The result builders currently associated with this mapping
*/
List<ResultBuilder> getResultBuilders();
/**
* Visit each result builder
*/
void visitResultBuilders(BiConsumer<Integer, ResultBuilder> resultBuilderConsumer);
/**
* Visit the "legacy" fetch builders.
* <p/>
* Historically these mappings in Hibernate were defined such that results and fetches are
* unaware of each other. So while {@link ResultBuilder} encapsulates the fetches (see
* {@link ResultBuilder#visitFetchBuilders}), fetches defined in the legacy way are unassociated
* to their "parent".
*/
void visitLegacyFetchBuilders(Consumer<LegacyFetchBuilder> resultBuilderConsumer);
/**
* Add a builder
*/
void addResultBuilder(ResultBuilder resultBuilder);
/**
* Add a legacy fetch builder
*/
void addLegacyFetchBuilder(LegacyFetchBuilder fetchBuilder);
/**
* Create a memento from this mapping.
*/
NamedResultSetMappingMemento toMemento(String name);
static ResultSetMapping resolveResultSetMapping(String name, SessionFactoryImplementor sessionFactory) {
return resolveResultSetMapping( name, false, sessionFactory );
}
static ResultSetMapping resolveResultSetMapping(String name, boolean isDynamic, SessionFactoryImplementor sessionFactory) {
return sessionFactory.getJdbcValuesMappingProducerProvider()
.buildResultSetMapping( name, isDynamic, sessionFactory );
}
@Override
ResultSetMapping cacheKeyInstance();
}
|
ResultSetMapping
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/rules/physical/stream/PushCalcPastChangelogNormalizeRuleTest.java
|
{
"start": 1617,
"end": 10832
}
|
class ____ extends TableTestBase {
private StreamTableTestUtil util;
private static TableDescriptor sourceDescriptor;
private static TableDescriptor sourceDescriptorWithTwoPrimaryKeys;
@BeforeAll
static void setup() {
sourceDescriptor =
TableFactoryHarness.newBuilder()
.schema(
Schema.newBuilder()
.column("f0", INT())
.column("f1", INT().notNull())
.column("f2", STRING())
.column("f3", BIGINT().notNull())
.primaryKey("f1")
.build())
.unboundedScanSource(ChangelogMode.upsert())
.build();
sourceDescriptorWithTwoPrimaryKeys =
TableFactoryHarness.newBuilder()
.schema(
Schema.newBuilder()
.column("f0", STRING())
.column("f1", INT().notNull())
.column("f2", BIGINT().notNull())
.column("f3", STRING())
.column("f4", BIGINT().notNull())
.column("f5", BIGINT().notNull())
.column("f6", BIGINT().notNull())
.column("f7", BIGINT().notNull())
.primaryKey("f1", "f2")
.build())
.unboundedScanSource(ChangelogMode.upsert())
.build();
}
@BeforeEach
void before() {
util = streamTestUtil(TableConfig.getDefault());
}
@Test
void testWithSinglePrimaryKeyFilter() {
util.tableEnv().createTable("T", sourceDescriptor);
util.verifyRelPlan("SELECT * FROM T WHERE f1 < 1");
}
@Test
void testPrimaryKeySeveralSameSources() {
util.tableEnv().createTable("T", sourceDescriptor);
// Shouldn't be pushed down since there is no common filter for ChangelogNormalize with the
// same source
// verifyExecPlan is intended here as it will show whether the node is reused or not
util.verifyExecPlan(
"SELECT * FROM T WHERE f1 < 1\n"
+ "UNION SELECT * FROM T WHERE f1 < 3\n"
+ "INTERSECT SELECT * FROM T WHERE f1 > 0");
}
@Test
void testPrimaryKeySeveralSameSourcesWithPartialPushDown() {
util.tableEnv().createTable("T", sourceDescriptor);
// Here filter should be partially pushed down
// verifyExecPlan is intended here as it will show whether the node is reused or not
util.verifyExecPlan(
"SELECT * FROM T WHERE f1 < 1 AND f1 > 0\n"
+ " UNION SELECT * FROM T WHERE f1 < 3 AND f1 > 0\n"
+ " INTERSECT SELECT * FROM T WHERE f1 > 0 AND f1 < 10");
}
@Test
void testPrimaryKeySeveralSameSourcesWithFullPushDown() {
util.tableEnv().createTable("T", sourceDescriptor);
// Here filter should be fully pushed down
// verifyExecPlan is intended here as it will show whether the node is reused or not
util.verifyExecPlan(
"SELECT * FROM T WHERE f1 < 1 AND f1 > 0\n"
+ " UNION SELECT * FROM T WHERE f1 < 1 AND f1 > 0\n"
+ " INTERSECT SELECT * FROM T WHERE f1 < 1 AND f1 > 0");
}
@Test
void testPrimaryKeySeveralDifferentSources() {
util.tableEnv().createTable("T", sourceDescriptor);
util.tableEnv().createTable("T2", sourceDescriptor);
util.tableEnv().createTable("T3", sourceDescriptor);
// verifyExecPlan is intended here as it will show whether the node is reused or not
util.verifyExecPlan(
"SELECT * FROM T WHERE f1 < 1 AND f1 > 0\n"
+ " UNION SELECT * FROM T2 WHERE f1 < 1 AND f1 > 0\n"
+ " INTERSECT SELECT * FROM T3 WHERE f1 < 1 AND f1 > 0");
}
@Test
void testNonPrimaryKeySeveralSameSources() {
util.tableEnv().createTable("T", sourceDescriptor);
// verifyExecPlan is intended here as it will show whether the node is reused or not
util.verifyExecPlan(
"SELECT * FROM T WHERE f3 < 1\n"
+ "UNION SELECT * FROM T WHERE f3 < 3\n"
+ "INTERSECT SELECT * FROM T WHERE f3 > 0");
}
@Test
void testNonPrimaryKeySeveralSameSourcesPartialPushedDown() {
util.tableEnv().createTable("T", sourceDescriptor);
// verifyExecPlan is intended here as it will show whether the node is reused or not
util.verifyExecPlan(
"SELECT * FROM T WHERE f3 < 1 AND f3 > 0\n"
+ " UNION SELECT * FROM T WHERE f3 < 3 AND f3 > 0\n"
+ " INTERSECT SELECT * FROM T WHERE f3 > 0 AND f3 < 10");
}
@Test
void testNonPrimaryKeySeveralSameSourcesWithFullPushDown() {
util.tableEnv().createTable("T", sourceDescriptor);
// Here filter should be fully pushed down
// verifyExecPlan is intended here as it will show whether the node is reused or not
util.verifyExecPlan(
"SELECT * FROM T WHERE f3 < 1 AND f3 > 0\n"
+ " UNION SELECT * FROM T WHERE f3 < 1 AND f3 > 0\n"
+ " INTERSECT SELECT * FROM T WHERE f3 < 1 AND f3 > 0");
}
@Test
void testNonPrimaryKeySeveralDifferentSources() {
util.tableEnv().createTable("T", sourceDescriptor);
util.tableEnv().createTable("T2", sourceDescriptor);
util.tableEnv().createTable("T3", sourceDescriptor);
// verifyExecPlan is intended here as it will show whether the node is reused or not
util.verifyExecPlan(
"SELECT * FROM T WHERE f3 < 1 AND f3 > 0\n"
+ " UNION SELECT * FROM T2 WHERE f3 < 1 AND f3 > 0\n"
+ " INTERSECT SELECT * FROM T3 WHERE f3 < 1 AND f3 > 0");
}
@Test
void testNonPrimaryKeySameSourcesAndSargNotPushedDown() {
util.tableEnv().createTable("T", sourceDescriptor);
// Here IS NOT NULL filter should be pushed down, SARG should stay in Calc
// verifyExecPlan is intended here as it will show whether the node is reused or not
util.verifyExecPlan(
"SELECT * FROM T WHERE f0 < 10 AND f0 > 1 AND f0 IS NOT NULL\n"
+ " UNION SELECT * FROM T WHERE f0 < 2 AND f0 > 0 AND f0 IS NOT NULL\n"
+ " INTERSECT SELECT * FROM T WHERE f0 < 4 AND f0 > 2 AND f0 IS NOT NULL");
}
@Test
void testWithMultipleFilters() {
util.tableEnv().createTable("T", sourceDescriptor);
// Only the first filter (f1 < 10) can be pushed
util.verifyRelPlan(
"SELECT f1, SUM(f1) AS `sum` FROM T WHERE f1 < 10 AND (f1 > 3 OR f2 IS NULL) GROUP BY f1");
}
@Test
void testWithMultiplePrimaryKeyColumns() {
util.tableEnv().createTable("T", sourceDescriptorWithTwoPrimaryKeys);
util.verifyRelPlan("SELECT f0, f1 FROM T WHERE (f1 < 1 OR f2 > 10) AND f0 IS NOT NULL");
}
@Test
void testOnlyProjection() {
util.tableEnv().createTable("T", sourceDescriptor);
util.verifyRelPlan("SELECT f1, f2 FROM T");
}
@Test
void testFilterAndProjection() {
util.tableEnv().createTable("T", sourceDescriptorWithTwoPrimaryKeys);
util.verifyRelPlan("SELECT f1, f5 FROM T WHERE (f1 < 1 OR f2 > 10) AND f3 IS NOT NULL");
}
@Test
void testPartialPrimaryKeyFilterAndProjection() {
util.tableEnv().createTable("T", sourceDescriptorWithTwoPrimaryKeys);
util.verifyRelPlan("SELECT f1, f5 FROM T WHERE f1 < 1 AND f3 IS NOT NULL");
}
@Test
void testPartialPushDownWithTrimmedFieldsAndDifferentProjection() {
util.tableEnv().createTable("T", sourceDescriptorWithTwoPrimaryKeys);
// verifyExecPlan is intended here as it will show whether the node is reused or not
util.verifyExecPlan(
"SELECT f3 FROM T WHERE f2 < 1 AND f2 > 0\n"
+ " UNION SELECT f3 FROM T WHERE f2 < 3 AND f2 > 0\n"
+ " INTERSECT SELECT f3 FROM T WHERE f2 > 0 AND f2 < 10");
}
@Test
void testPartialPushDownWithTrimmedFields() {
util.tableEnv().createTable("T", sourceDescriptorWithTwoPrimaryKeys);
// verifyExecPlan is intended here as it will show whether the node is reused or not
util.verifyExecPlan(
"SELECT f2 FROM T WHERE f2 < 1 AND f2 > 0\n"
+ " UNION SELECT f2 FROM T WHERE f2 < 3 AND f2 > 0\n"
+ " INTERSECT SELECT f2 FROM T WHERE f2 > 0 AND f2 < 10");
}
}
|
PushCalcPastChangelogNormalizeRuleTest
|
java
|
apache__flink
|
flink-metrics/flink-metrics-otel/src/test/java/org/apache/flink/metrics/otel/OpenTelemetryTestBase.java
|
{
"start": 6850,
"end": 7761
}
|
class ____ extends BaseConsumer<Slf4jLevelLogConsumer> {
private final Logger logger;
public Slf4jLevelLogConsumer(Logger logger) {
this.logger = logger;
}
@Override
public void accept(OutputFrame outputFrame) {
final OutputFrame.OutputType outputType = outputFrame.getType();
final String utf8String = outputFrame.getUtf8StringWithoutLineEnding();
String lowerCase = utf8String.toLowerCase();
if (lowerCase.contains("error") || lowerCase.contains("exception")) {
logger.error("{}: {}", outputType, utf8String);
} else if (lowerCase.contains("warn") || lowerCase.contains("fail")) {
logger.warn("{}: {}", outputType, utf8String);
} else {
logger.info("{}: {}", outputType, utf8String);
}
}
}
}
|
Slf4jLevelLogConsumer
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/count/CountTest.java
|
{
"start": 5331,
"end": 5441
}
|
class ____ {
@Id String ssn;
String name;
}
@Entity(name="Publisher")
@Table(name = "pubs")
static
|
Author
|
java
|
apache__dubbo
|
dubbo-registry/dubbo-registry-api/src/main/java/org/apache/dubbo/registry/client/ServiceDiscoveryRegistry.java
|
{
"start": 4846,
"end": 10897
}
|
interface ____ to the same app list can do subscribe or unsubscribe at the same moment.
And the lock should be destroyed when listener destroying its corresponding instance listener.
* */
private final ConcurrentMap<String, Lock> appSubscriptionLocks = new ConcurrentHashMap<>();
public ServiceDiscoveryRegistry(URL registryURL, ApplicationModel applicationModel) {
super(registryURL);
this.serviceDiscovery = createServiceDiscovery(registryURL);
this.serviceNameMapping =
(AbstractServiceNameMapping) ServiceNameMapping.getDefaultExtension(registryURL.getScopeModel());
super.applicationModel = applicationModel;
}
// Currently, for test purpose
protected ServiceDiscoveryRegistry(
URL registryURL, ServiceDiscovery serviceDiscovery, ServiceNameMapping serviceNameMapping) {
super(registryURL);
this.serviceDiscovery = serviceDiscovery;
this.serviceNameMapping = (AbstractServiceNameMapping) serviceNameMapping;
}
public ServiceDiscovery getServiceDiscovery() {
return serviceDiscovery;
}
/**
* Create the {@link ServiceDiscovery} from the registry {@link URL}
*
* @param registryURL the {@link URL} to connect the registry
* @return non-null
*/
protected ServiceDiscovery createServiceDiscovery(URL registryURL) {
return getServiceDiscovery(registryURL
.addParameter(INTERFACE_KEY, ServiceDiscovery.class.getName())
.removeParameter(REGISTRY_TYPE_KEY));
}
/**
* Get the instance {@link ServiceDiscovery} from the registry {@link URL} using
* {@link ServiceDiscoveryFactory} SPI
*
* @param registryURL the {@link URL} to connect the registry
* @return
*/
private ServiceDiscovery getServiceDiscovery(URL registryURL) {
ServiceDiscoveryFactory factory = getExtension(registryURL);
return factory.getServiceDiscovery(registryURL);
}
@Override
protected boolean shouldRegister(URL providerURL) {
String side = providerURL.getSide();
boolean should = PROVIDER_SIDE.equals(side); // Only register the Provider.
if (!should && logger.isDebugEnabled()) {
logger.debug(String.format("The URL[%s] should not be registered.", providerURL));
}
if (!acceptable(providerURL)) {
logger.info("URL " + providerURL + " will not be registered to Registry. Registry " + this.getUrl()
+ " does not accept service of this protocol type.");
return false;
}
return should;
}
protected boolean shouldSubscribe(URL subscribedURL) {
return !shouldRegister(subscribedURL);
}
@Override
public final void register(URL url) {
if (!shouldRegister(url)) { // Should Not Register
return;
}
doRegister(url);
}
@Override
public void doRegister(URL url) {
// fixme, add registry-cluster is not necessary anymore
url = addRegistryClusterKey(url);
serviceDiscovery.register(url);
}
@Override
public final void unregister(URL url) {
if (!shouldRegister(url)) {
return;
}
doUnregister(url);
}
@Override
public void doUnregister(URL url) {
// fixme, add registry-cluster is not necessary anymore
url = addRegistryClusterKey(url);
serviceDiscovery.unregister(url);
}
@Override
public final void subscribe(URL url, NotifyListener listener) {
if (!shouldSubscribe(url)) { // Should Not Subscribe
return;
}
doSubscribe(url, listener);
}
@Override
public void doSubscribe(URL url, NotifyListener listener) {
url = addRegistryClusterKey(url);
serviceDiscovery.subscribe(url, listener);
Set<String> mappingByUrl = ServiceNameMapping.getMappingByUrl(url);
String key = ServiceNameMapping.buildMappingKey(url);
if (mappingByUrl == null) {
Lock mappingLock = serviceNameMapping.getMappingLock(key);
try {
mappingLock.lock();
mappingByUrl = serviceNameMapping.getMapping(url);
try {
DefaultMappingListener mappingListener = new DefaultMappingListener(url, mappingByUrl, listener);
mappingByUrl = serviceNameMapping.getAndListen(this.getUrl(), url, mappingListener);
// update the initial mapping apps we started to listen, to make sure it reflects the real value
// used do subscription before any event.
// it's protected by the mapping lock, so it won't override the event value.
mappingListener.updateInitialApps(mappingByUrl);
synchronized (mappingListeners) {
ConcurrentHashMapUtils.computeIfAbsent(
mappingListeners, url.getProtocolServiceKey(), (k) -> new ConcurrentHashSet<>())
.add(mappingListener);
}
} catch (Exception e) {
logger.warn(
INTERNAL_ERROR,
"",
"",
"Cannot find app mapping for service " + url.getServiceInterface() + ", will not migrate.",
e);
}
if (CollectionUtils.isEmpty(mappingByUrl)) {
logger.info(
"[METADATA_REGISTER] No interface-apps mapping found in local cache, stop subscribing, will automatically wait for mapping listener callback: "
+ url);
// if (check) {
// throw new IllegalStateException("Should has at least one way to know which
// services this
|
mapping
|
java
|
micronaut-projects__micronaut-core
|
http-client-core/src/main/java/io/micronaut/http/client/loadbalance/ServiceInstanceListLoadBalancerFactory.java
|
{
"start": 1078,
"end": 1502
}
|
class ____ {
/**
* Creates a {@link LoadBalancer} from the given {@link ServiceInstanceList}.
*
* @param serviceInstanceList The {@link ServiceInstanceList}
* @return The {@link LoadBalancer}
*/
public LoadBalancer create(ServiceInstanceList serviceInstanceList) {
return new ServiceInstanceListRoundRobinLoadBalancer(serviceInstanceList);
}
}
|
ServiceInstanceListLoadBalancerFactory
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/reflect/InvokableTest.java
|
{
"start": 25172,
"end": 25504
}
|
class ____ {}
Constructor<?> constructor = Local.class.getDeclaredConstructors()[0];
assertEquals(0, Invokable.from(constructor).getParameters().size());
}
}
public void testLocalClassInStaticInitializer() {
LocalClassInStaticInitializer unused = new LocalClassInStaticInitializer();
}
private static
|
Local
|
java
|
google__auto
|
value/src/test/java/com/google/auto/value/processor/AutoValueCompilationTest.java
|
{
"start": 6380,
"end": 7093
}
|
class ____ {",
" @SuppressWarnings(\"mutable\")",
" public abstract int[] ints();",
" public abstract Arrays arrays();",
"",
" public static Baz create(int[] ints, Arrays arrays) {",
" return new AutoValue_Baz(ints, arrays);",
" }",
"}");
JavaFileObject expectedOutput =
JavaFileObjects.forSourceLines(
"foo.bar.AutoValue_Baz",
"package foo.bar;",
"",
"import java.util.Arrays;",
GeneratedImport.importGeneratedAnnotationType(),
"",
"@Generated(\"" + AutoValueProcessor.class.getName() + "\")",
"final
|
Baz
|
java
|
apache__camel
|
components/camel-jetty/src/test/java/org/apache/camel/component/jetty/rest/producer/JettyRestProducerPutTest.java
|
{
"start": 1057,
"end": 1968
}
|
class ____ extends BaseJettyTest {
@Test
public void testJettyProducerPut() throws Exception {
getMockEndpoint("mock:input").expectedMessageCount(1);
fluentTemplate.withBody("Donald Duck").withHeader("id", "123").to("direct:start").send();
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// configure to use localhost with the given port
restConfiguration().component("jetty").producerComponent("http").host("localhost").port(getPort());
from("direct:start").to("rest:put:users/{id}");
// use the rest DSL to define the rest services
rest("/users/").put("{id}").to("mock:input");
}
};
}
}
|
JettyRestProducerPutTest
|
java
|
apache__camel
|
components/camel-jacksonxml/src/test/java/org/apache/camel/component/jacksonxml/JacksonJAXBAnnotationTest.java
|
{
"start": 1109,
"end": 2531
}
|
class ____ extends CamelTestSupport {
@Test
public void testMarshalJAXBObject() throws Exception {
TestJAXBPojo in = new TestJAXBPojo();
in.setName("Camel");
MockEndpoint mock = getMockEndpoint("mock:reversePojo");
mock.expectedMessageCount(1);
mock.message(0).body().isInstanceOf(TestJAXBPojo.class);
mock.message(0).body().isEqualTo(in);
Object marshalled = template.requestBody("direct:inPojo", in);
String marshalledAsString = context.getTypeConverter().convertTo(String.class, marshalled);
assertEquals("<XMLPojo><PojoName>Camel</PojoName></XMLPojo>", marshalledAsString);
template.sendBody("direct:backPojo", marshalled);
mock.assertIsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
JacksonXMLDataFormat format = new JacksonXMLDataFormat();
from("direct:in").marshal(format);
from("direct:back").unmarshal(format).to("mock:reverse");
JacksonXMLDataFormat formatPojo = new JacksonXMLDataFormat(TestJAXBPojo.class);
from("direct:inPojo").marshal(formatPojo);
from("direct:backPojo").unmarshal(formatPojo).to("mock:reversePojo");
}
};
}
}
|
JacksonJAXBAnnotationTest
|
java
|
spring-projects__spring-framework
|
spring-messaging/src/main/java/org/springframework/messaging/tcp/reactor/TcpMessageCodec.java
|
{
"start": 1039,
"end": 1495
}
|
interface ____<P> {
/**
* Decode the input {@link ByteBuffer} into one or more {@link Message Messages}.
* @param buffer the input buffer to decode from
* @return 0 or more decoded messages
*/
List<Message<P>> decode(ByteBuffer buffer);
/**
* Encode the given {@link Message} to the output {@link ByteBuffer}.
* @param message the message to encode
* @return the encoded buffer
*/
ByteBuffer encode(Message<P> message);
}
|
TcpMessageCodec
|
java
|
apache__logging-log4j2
|
log4j-api/src/main/java/org/apache/logging/log4j/util/internal/SerializationUtil.java
|
{
"start": 7514,
"end": 7602
}
|
class ____ '" + name + "'");
}
}
private SerializationUtil() {}
}
|
signature
|
java
|
elastic__elasticsearch
|
test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java
|
{
"start": 923,
"end": 4022
}
|
class ____ implements Comparable<ClientYamlTestSection> {
public static ClientYamlTestSection parse(XContentParser parser) throws IOException {
ParserUtils.advanceToFieldName(parser);
XContentLocation sectionLocation = parser.getTokenLocation();
String sectionName = parser.currentName();
List<ExecutableSection> executableSections = new ArrayList<>();
try {
parser.nextToken();
PrerequisiteSection prerequisiteSection = PrerequisiteSection.parseIfNext(parser);
while (parser.currentToken() != XContentParser.Token.END_ARRAY) {
ParserUtils.advanceToFieldName(parser);
executableSections.add(ExecutableSection.parse(parser));
}
if (parser.nextToken() != XContentParser.Token.END_OBJECT) {
throw new IllegalArgumentException(
"malformed section ["
+ sectionName
+ "] expected ["
+ XContentParser.Token.END_OBJECT
+ "] but was ["
+ parser.currentToken()
+ "]"
);
}
parser.nextToken();
return new ClientYamlTestSection(sectionLocation, sectionName, prerequisiteSection, executableSections);
} catch (Exception e) {
throw new ParsingException(parser.getTokenLocation(), "Error parsing test named [" + sectionName + "]", e);
}
}
private final XContentLocation location;
private final String name;
private final PrerequisiteSection prerequisiteSection;
private final List<ExecutableSection> executableSections;
public ClientYamlTestSection(
XContentLocation location,
String name,
PrerequisiteSection prerequisiteSection,
List<ExecutableSection> executableSections
) {
this.location = location;
this.name = name;
this.prerequisiteSection = Objects.requireNonNull(prerequisiteSection, "skip section cannot be null");
this.executableSections = Collections.unmodifiableList(executableSections);
}
public XContentLocation getLocation() {
return location;
}
public String getName() {
return name;
}
public PrerequisiteSection getPrerequisiteSection() {
return prerequisiteSection;
}
public List<ExecutableSection> getExecutableSections() {
return executableSections;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ClientYamlTestSection that = (ClientYamlTestSection) o;
if (name != null ? name.equals(that.name) == false : that.name != null) return false;
return true;
}
@Override
public int hashCode() {
return name != null ? name.hashCode() : 0;
}
@Override
public int compareTo(ClientYamlTestSection o) {
return name.compareTo(o.getName());
}
}
|
ClientYamlTestSection
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/query/results/internal/complete/DelayedFetchBuilderBasicPart.java
|
{
"start": 736,
"end": 2704
}
|
class ____
implements CompleteFetchBuilder, FetchBuilderBasicValued, ModelPartReferenceBasic {
private final NavigablePath navigablePath;
private final BasicValuedModelPart referencedModelPart;
private final boolean isEnhancedForLazyLoading;
public DelayedFetchBuilderBasicPart(
NavigablePath navigablePath,
BasicValuedModelPart referencedModelPart,
boolean isEnhancedForLazyLoading) {
this.navigablePath = navigablePath;
this.referencedModelPart = referencedModelPart;
this.isEnhancedForLazyLoading = isEnhancedForLazyLoading;
}
@Override
public FetchBuilder cacheKeyInstance() {
return this;
}
@Override
public NavigablePath getNavigablePath() {
return navigablePath;
}
@Override
public BasicValuedModelPart getReferencedPart() {
return referencedModelPart;
}
@Override
public BasicFetch<?> buildFetch(
FetchParent parent,
NavigablePath fetchPath,
JdbcValuesMetadata jdbcResultsMetadata,
DomainResultCreationState domainResultCreationState) {
return new BasicFetch<>(
-1,
parent,
fetchPath,
referencedModelPart,
null,
FetchTiming.DELAYED,
isEnhancedForLazyLoading,
domainResultCreationState,
false,
false
);
}
@Override
public List<String> getColumnAliases() {
return Collections.emptyList();
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( o == null || getClass() != o.getClass() ) {
return false;
}
final DelayedFetchBuilderBasicPart that = (DelayedFetchBuilderBasicPart) o;
return isEnhancedForLazyLoading == that.isEnhancedForLazyLoading
&& navigablePath.equals( that.navigablePath )
&& referencedModelPart.equals( that.referencedModelPart );
}
@Override
public int hashCode() {
int result = navigablePath.hashCode();
result = 31 * result + referencedModelPart.hashCode();
result = 31 * result + ( isEnhancedForLazyLoading ? 1 : 0 );
return result;
}
}
|
DelayedFetchBuilderBasicPart
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/response/AlibabaCloudSearchCompletionResponseEntity.java
|
{
"start": 859,
"end": 3289
}
|
class ____ extends AlibabaCloudSearchResponseEntity {
private static final String FAILED_TO_FIND_FIELD_TEMPLATE =
"Failed to find required field [%s] in AlibabaCloud Search completion response";
/**
* Parses the AlibabaCloud Search embedding json response.
* For a request like:
*
* <pre>
* <code>
* {
* "messages": [
* {
* "role": "system",
* "content": "你是一个机器人助手"
* },
* {
* "role": "user",
* "content": "河南的省会是哪里"
* },
* {
* "role": "assistant",
* "content": "郑州"
* },
* {
* "role": "user",
* "content": "那里有什么好玩的"
* }
* ],
* "stream": false
* }
* </code>
* </pre>
*
* The response would look like:
*
* <pre>
* <code>
* {
* "request_id": "450fcb80-f796-46c1-8d69-e1e86d29aa9f",
* "latency": 564.903929,
* "result": {
* "text":"郑州是一个历史文化悠久且现代化的城市,有很多好玩的地方。以下是一些推荐的旅游景点:
* 嵩山少林寺:作为少林武术的发源地,嵩山少林寺一直以来都是游客向往的地方。在这里,你可以欣赏到精彩的武术表演,领略少林功夫的魅力。
* 黄河游览区:黄河是中华民族的母亲河,而在郑州,你可以乘坐游船观赏黄河的多种风情,感受大河之美。
* 郑州动物园:这是一个适合全家游玩的景点,拥有各种珍稀动物,如大熊猫、金丝猴等,让孩子们近距离接触动物,增长见识。
* 郑州博物馆:如果你对历史文化感兴趣,那么郑州博物馆是一个不错的选择。这里收藏了大量珍贵的文物,展示了郑州地区的历史变迁和文化传承。
* 郑州世纪公园:这是一个大型的城市公园,拥有美丽的湖泊、花园和休闲设施。在这里,你可以进行散步、慢跑等户外活动,享受大自然的宁静与和谐。
* 以上只是郑州众多好玩地方的一部分,实际上郑州还有很多其他值得一游的景点。希望你在郑州的旅行能够愉快!"
* }
* "usage": {
* "output_tokens": 6320,
* "input_tokens": 35,
* "total_tokens": 6355,
* }
*
* }
* </code>
* </pre>
*/
public static ChatCompletionResults fromResponse(Request request, HttpResult response) throws IOException {
return fromResponse(request, response, jsonParser -> {
positionParserAtTokenAfterField(jsonParser, "text", FAILED_TO_FIND_FIELD_TEMPLATE);
XContentParser.Token contentToken = jsonParser.currentToken();
ensureExpectedToken(XContentParser.Token.VALUE_STRING, contentToken, jsonParser);
String content = jsonParser.text();
return new ChatCompletionResults(List.of(new ChatCompletionResults.Result(content)));
});
}
}
|
AlibabaCloudSearchCompletionResponseEntity
|
java
|
apache__spark
|
examples/src/main/java/org/apache/spark/examples/ml/JavaPrefixSpanExample.java
|
{
"start": 1286,
"end": 2364
}
|
class ____ {
public static void main(String[] args) {
SparkSession spark = SparkSession
.builder()
.appName("JavaPrefixSpanExample")
.getOrCreate();
// $example on$
List<Row> data = Arrays.asList(
RowFactory.create(Arrays.asList(Arrays.asList(1, 2), Arrays.asList(3))),
RowFactory.create(Arrays.asList(Arrays.asList(1), Arrays.asList(3, 2), Arrays.asList(1,2))),
RowFactory.create(Arrays.asList(Arrays.asList(1, 2), Arrays.asList(5))),
RowFactory.create(Arrays.asList(Arrays.asList(6)))
);
StructType schema = new StructType(new StructField[]{ new StructField(
"sequence", new ArrayType(new ArrayType(DataTypes.IntegerType, true), true),
false, Metadata.empty())
});
Dataset<Row> sequenceDF = spark.createDataFrame(data, schema);
PrefixSpan prefixSpan = new PrefixSpan().setMinSupport(0.5).setMaxPatternLength(5);
// Finding frequent sequential patterns
prefixSpan.findFrequentSequentialPatterns(sequenceDF).show();
// $example off$
spark.stop();
}
}
|
JavaPrefixSpanExample
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/hooks/TestMasterHook.java
|
{
"start": 1329,
"end": 2758
}
|
class ____ implements MasterTriggerRestoreHook<String> {
private static final String DEFAULT_STATE = "default";
private final String id;
private int restoreCount = 0;
private boolean failOnRestore = false;
private TestMasterHook(String id) {
this.id = checkNotNull(id);
}
public static TestMasterHook fromId(String id) {
return new TestMasterHook(id);
}
@Override
public String getIdentifier() {
return id;
}
@Override
public CompletableFuture<String> triggerCheckpoint(
final long checkpointId, final long timestamp, final Executor executor) {
return CompletableFuture.completedFuture(DEFAULT_STATE);
}
@Override
public void restoreCheckpoint(final long checkpointId, @Nullable final String checkpointData)
throws Exception {
restoreCount++;
if (failOnRestore) {
throw new Exception("Failing mast hook state restore on purpose.");
}
}
@Override
public SimpleVersionedSerializer<String> createCheckpointDataSerializer() {
return new CheckpointCoordinatorTestingUtils.StringSerializer();
}
public int getRestoreCount() {
return restoreCount;
}
public void enableFailOnRestore() {
this.failOnRestore = true;
}
public void disableFailOnRestore() {
this.failOnRestore = false;
}
}
|
TestMasterHook
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/config/ClassPathResourceContextConfigurationAppCtxTests.java
|
{
"start": 1341,
"end": 1929
}
|
class ____ extends CoreContextConfigurationAppCtxTests {
/**
* Classpath-based resource path for the application context configuration
* for {@link CoreContextConfigurationAppCtxTests}: {@value}
*
* @see CoreContextConfigurationAppCtxTests#DEFAULT_CONTEXT_RESOURCE_PATH
* @see ResourceUtils#CLASSPATH_URL_PREFIX
*/
public static final String CLASSPATH_CONTEXT_RESOURCE_PATH = ResourceUtils.CLASSPATH_URL_PREFIX +
CoreContextConfigurationAppCtxTests.DEFAULT_CONTEXT_RESOURCE_PATH;
/* all tests are in the parent class. */
}
|
ClassPathResourceContextConfigurationAppCtxTests
|
java
|
apache__camel
|
core/camel-management/src/test/java/org/apache/camel/management/ManagedRouteStopAndStartCleanupTest.java
|
{
"start": 1447,
"end": 5222
}
|
class ____ extends ManagedRouteStopAndStartTest {
@Override
@Test
public void testStopAndStartRoute() throws Exception {
MBeanServer mbeanServer = getMBeanServer();
ObjectName on = getRouteObjectName(mbeanServer);
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedBodiesReceived("Hello World");
template.sendBodyAndHeader(fileUri(), "Hello World", Exchange.FILE_NAME, "hello.txt");
assertMockEndpointsSatisfied();
// should be started
String state = (String) mbeanServer.getAttribute(on, "State");
assertEquals(ServiceStatus.Started.name(), state, "Should be started");
// need a bit time to let JMX update
await().atMost(1, TimeUnit.SECONDS).untilAsserted(() -> {
// should have 1 completed exchange
Long completed = (Long) mbeanServer.getAttribute(on, "ExchangesCompleted");
assertEquals(1, completed.longValue());
});
// should be 1 consumer and 2 processors
Set<ObjectName> set = mbeanServer.queryNames(new ObjectName("*:type=consumers,*"), null);
assertEquals(1, set.size(), "Should be 1 consumer");
set = mbeanServer.queryNames(new ObjectName("*:type=processors,*"), null);
assertEquals(2, set.size(), "Should be 2 processors");
// stop
log.info(">>>>>>>>>>>>>>>>>> invoking stop <<<<<<<<<<<<<<<<<<<<<");
mbeanServer.invoke(on, "stop", null, null);
log.info(">>>>>>>>>>>>>>>>>> invoking stop DONE <<<<<<<<<<<<<<<<<<<<<");
state = (String) mbeanServer.getAttribute(on, "State");
assertEquals(ServiceStatus.Stopped.name(), state, "Should be stopped");
// should be 0 consumer and 1 processor
set = mbeanServer.queryNames(new ObjectName("*:type=consumers,*"), null);
assertEquals(0, set.size(), "Should be 0 consumer");
set = mbeanServer.queryNames(new ObjectName("*:type=processors,*"), null);
assertEquals(2, set.size(), "Should be 2 processor");
mock.reset();
mock.expectedBodiesReceived("Bye World");
// wait 2 seconds while route is stopped to verify that file was not consumed
mock.setResultWaitTime(2000);
template.sendBodyAndHeader(fileUri(), "Bye World", Exchange.FILE_NAME, "bye.txt");
// route is stopped so we do not get the file
mock.assertIsNotSatisfied();
// prepare mock for starting route
mock.reset();
mock.expectedBodiesReceived("Bye World");
// start
log.info(">>>>>>>>>>>>>>>>> invoking start <<<<<<<<<<<<<<<<<<");
mbeanServer.invoke(on, "start", null, null);
log.info(">>>>>>>>>>>>>>>>> invoking start DONE <<<<<<<<<<<<<<<<<<");
state = (String) mbeanServer.getAttribute(on, "State");
assertEquals(ServiceStatus.Started.name(), state, "Should be started");
// should be 1 consumer and 1 processor
set = mbeanServer.queryNames(new ObjectName("*:type=consumers,*"), null);
assertEquals(1, set.size(), "Should be 1 consumer");
set = mbeanServer.queryNames(new ObjectName("*:type=processors,*"), null);
assertEquals(2, set.size(), "Should be 2 processors");
// this time the file is consumed
mock.assertIsSatisfied();
// need a bit time to let JMX update
await().atMost(1, TimeUnit.SECONDS).until(() -> {
Long num = (Long) mbeanServer.getAttribute(on, "ExchangesCompleted");
return num == 2;
});
// should have 2 completed exchange
Long completed = (Long) mbeanServer.getAttribute(on, "ExchangesCompleted");
assertEquals(2, completed.longValue());
}
}
|
ManagedRouteStopAndStartCleanupTest
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/PageSizeUtil.java
|
{
"start": 3350,
"end": 3478
}
|
class ____ not
* implicitly try to resolve the unsafe class.
*/
@SuppressWarnings("all")
private static final
|
does
|
java
|
quarkusio__quarkus
|
extensions/security/deployment/src/test/java/io/quarkus/security/test/permissionsallowed/checker/ReactivePermissionCheckerTest.java
|
{
"start": 1078,
"end": 3320
}
|
class ____ {
private static final AuthData USER_WITH_AUGMENTORS = new AuthData(USER, true);
private static final AuthData ADMIN_WITH_AUGMENTORS = new AuthData(ADMIN, true);
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(IdentityMock.class, AuthData.class, SecurityTestUtils.class));
@Inject
ReactivePermissionCheckerSecuredBean bean;
@Test
public void testCheckerAcceptingOnlySecurityIdentity() {
assertSuccess(() -> bean.securityIdentityOnly(), "securityIdentityOnly", ADMIN_WITH_AUGMENTORS);
assertFailureFor(() -> bean.securityIdentityOnly(), ForbiddenException.class, USER_WITH_AUGMENTORS);
assertFailureFor(() -> bean.securityIdentityOnly(), ForbiddenException.class, ADMIN);
}
@Test
public void testCheckerAcceptingSecuredMethodArguments() {
assertSuccess(() -> bean.securedMethodArguments(1, 2, 3), "securedMethodArguments", ADMIN_WITH_AUGMENTORS);
assertFailureFor(() -> bean.securedMethodArguments(1, 2, 3), UnauthorizedException.class, ANONYMOUS);
assertFailureFor(() -> bean.securedMethodArguments(1, 2, 3), ForbiddenException.class, USER_WITH_AUGMENTORS);
assertFailureFor(() -> bean.securedMethodArguments(1, 2, 3), ForbiddenException.class, ADMIN);
assertFailureFor(() -> bean.securedMethodArguments(9, 2, 3), ForbiddenException.class, ADMIN_WITH_AUGMENTORS);
assertFailureFor(() -> bean.securedMethodArguments(1, 9, 3), ForbiddenException.class, ADMIN_WITH_AUGMENTORS);
assertFailureFor(() -> bean.securedMethodArguments(1, 2, 9), ForbiddenException.class, ADMIN_WITH_AUGMENTORS);
}
@Test
public void testPermissionCheckerRuntimeExceptionHandling() {
assertSuccess(() -> bean.permissionCheckFailingForUser(), "permissionCheckFailingForUser", ADMIN_WITH_AUGMENTORS);
assertFailureFor(() -> bean.permissionCheckFailingForUser(), UnauthorizedException.class, ANONYMOUS);
assertFailureFor(() -> bean.permissionCheckFailingForUser(), ForbiddenException.class, USER_WITH_AUGMENTORS);
}
@ApplicationScoped
public static
|
ReactivePermissionCheckerTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/GetRolesIT.java
|
{
"start": 1443,
"end": 7161
}
|
class ____ extends SecurityInBasicRestTestCase {
private static final String ADMIN_USER = "admin_user";
private static final SecureString ADMIN_PASSWORD = new SecureString("admin-password".toCharArray());
protected static final String READ_SECURITY_USER = "read_security_user";
private static final SecureString READ_SECURITY_PASSWORD = new SecureString("read-security-password".toCharArray());
@Before
public void initialize() {
new ReservedRolesStore();
}
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.distribution(DistributionType.DEFAULT)
.nodes(2)
.setting("xpack.security.enabled", "true")
.setting("xpack.license.self_generated.type", "basic")
.rolesFile(Resource.fromClasspath("roles.yml"))
.user(ADMIN_USER, ADMIN_PASSWORD.toString(), User.ROOT_USER_ROLE, true)
.user(READ_SECURITY_USER, READ_SECURITY_PASSWORD.toString(), "read_security_user_role", false)
.build();
@Override
protected Settings restAdminSettings() {
String token = basicAuthHeaderValue(ADMIN_USER, ADMIN_PASSWORD);
return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build();
}
@Override
protected Settings restClientSettings() {
String token = basicAuthHeaderValue(READ_SECURITY_USER, READ_SECURITY_PASSWORD);
return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build();
}
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
public void testGetAllRolesNoNative() throws Exception {
// Test get roles API with operator admin_user
getAllRolesAndAssert(adminClient(), ReservedRolesStore.names());
// Test get roles API with read_security_user
getAllRolesAndAssert(client(), ReservedRolesStore.names());
}
public void testGetAllRolesWithNative() throws Exception {
createRole("custom_role", "Test custom native role.", Map.of("owner", "test"));
Set<String> expectedRoles = new HashSet<>(ReservedRolesStore.names());
expectedRoles.add("custom_role");
// Test get roles API with operator admin_user
getAllRolesAndAssert(adminClient(), expectedRoles);
// Test get roles API with read_security_user
getAllRolesAndAssert(client(), expectedRoles);
}
public void testGetReservedOnly() throws Exception {
createRole("custom_role", "Test custom native role.", Map.of("owner", "test"));
Set<String> rolesToGet = new HashSet<>();
rolesToGet.add("custom_role");
rolesToGet.addAll(randomSet(1, 5, () -> randomFrom(ReservedRolesStore.names())));
getRolesAndAssert(adminClient(), rolesToGet);
getRolesAndAssert(client(), rolesToGet);
}
public void testGetNativeOnly() throws Exception {
createRole("custom_role1", "Test custom native role.", Map.of("owner", "test1"));
createRole("custom_role2", "Test custom native role.", Map.of("owner", "test2"));
Set<String> rolesToGet = Set.of("custom_role1", "custom_role2");
getRolesAndAssert(adminClient(), rolesToGet);
getRolesAndAssert(client(), rolesToGet);
}
public void testGetMixedRoles() throws Exception {
createRole("custom_role", "Test custom native role.", Map.of("owner", "test"));
Set<String> rolesToGet = new HashSet<>();
rolesToGet.add("custom_role");
rolesToGet.addAll(randomSet(1, 5, () -> randomFrom(ReservedRolesStore.names())));
getRolesAndAssert(adminClient(), rolesToGet);
getRolesAndAssert(client(), rolesToGet);
}
public void testNonExistentRole() {
var e = expectThrows(
ResponseException.class,
() -> client().performRequest(new Request("GET", "/_security/role/non_existent_role"))
);
assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404));
}
private void createRole(String roleName, String description, Map<String, Object> metadata) throws IOException {
Request request = new Request("POST", "/_security/role/" + roleName);
Map<String, Object> requestMap = new HashMap<>();
if (description != null) {
requestMap.put(RoleDescriptor.Fields.DESCRIPTION.getPreferredName(), description);
}
if (metadata != null) {
requestMap.put(RoleDescriptor.Fields.METADATA.getPreferredName(), metadata);
}
BytesReference source = BytesReference.bytes(jsonBuilder().map(requestMap));
request.setJsonEntity(source.utf8ToString());
Response response = adminClient().performRequest(request);
assertOK(response);
Map<String, Object> responseMap = responseAsMap(response);
assertTrue(ObjectPath.eval("role.created", responseMap));
}
private void getAllRolesAndAssert(RestClient client, Set<String> expectedRoles) throws IOException {
final Response response = client.performRequest(new Request("GET", "/_security/role"));
assertOK(response);
final Map<String, Object> responseMap = responseAsMap(response);
assertThat(responseMap.keySet(), equalTo(expectedRoles));
}
private void getRolesAndAssert(RestClient client, Set<String> rolesToGet) throws IOException {
final Response response = client.performRequest(new Request("GET", "/_security/role/" + String.join(",", rolesToGet)));
assertOK(response);
final Map<String, Object> responseMap = responseAsMap(response);
assertThat(responseMap.keySet(), equalTo(rolesToGet));
}
}
|
GetRolesIT
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/AtomComponentBuilderFactory.java
|
{
"start": 1344,
"end": 1753
}
|
interface ____ {
/**
* Atom (camel-atom)
* Poll Atom RSS feeds.
*
* Category: document
* Since: 1.2
* Maven coordinates: org.apache.camel:camel-atom
*
* @return the dsl builder
*/
static AtomComponentBuilder atom() {
return new AtomComponentBuilderImpl();
}
/**
* Builder for the Atom component.
*/
|
AtomComponentBuilderFactory
|
java
|
apache__camel
|
components/camel-jacksonxml/src/test/java/org/apache/camel/component/jacksonxml/JacksonObjectListSplitTest.java
|
{
"start": 1777,
"end": 2080
}
|
class ____ for unmarshal the jason file
JacksonXMLDataFormat format = new JacksonXMLDataFormat(DummyObject.class);
format.useList();
from("direct:start").unmarshal(format).split(body()).to("mock:result");
}
};
}
public static
|
type
|
java
|
alibaba__nacos
|
config/src/main/java/com/alibaba/nacos/config/server/constant/Constants.java
|
{
"start": 844,
"end": 11285
}
|
class ____ {
public static final String CLIENT_VERSION_HEADER = "Client-Version";
public static final String CLIENT_VERSION = "3.0.0";
public static final String DEFAULT_GROUP = "DEFAULT_GROUP";
public static final String DATASOURCE_PLATFORM_PROPERTY_STATE = "datasource_platform";
public static final String CONFIG_RENTENTION_DAYS_PROPERTY_STATE = "config_retention_days";
/**
* Config file directory in server side.
*/
public static final String BASE_DIR = "config-data";
/**
* Back up file directory in server side.
*/
public static final String CONFIG_BAK_DIR = System.getProperty("user.home", "/home/admin") + "/nacos/bak_data";
public static final String APPNAME = "AppName";
public static final String UNKNOWN_APP = "UnknownApp";
public static final String DEFAULT_DOMAINNAME = "commonconfig.config-host.taobao.com";
public static final String DAILY_DOMAINNAME = "commonconfig.taobao.net";
public static final String NULL = "";
public static final String DATAID = "dataId";
public static final String GROUP = "group";
public static final String LAST_MODIFIED = "Last-Modified";
public static final String ACCEPT_ENCODING = "Accept-Encoding";
public static final String CONTENT_ENCODING = "Content-Encoding";
public static final String PROBE_MODIFY_REQUEST = "Listening-Configs";
public static final String PROBE_MODIFY_RESPONSE = "Probe-Modify-Response";
public static final String PROBE_MODIFY_RESPONSE_NEW = "Probe-Modify-Response-New";
public static final String USE_ZIP = "true";
public static final String CONTENT_MD5 = "Content-MD5";
public static final String CONFIG_VERSION = "Config-Version";
public static final String IF_MODIFIED_SINCE = "If-Modified-Since";
public static final String SPACING_INTERVAL = "client-spacing-interval";
/**
* Interval for async update address(unit: second).
*/
public static final int ASYNC_UPDATE_ADDRESS_INTERVAL = 300;
/**
* Interval for polling(unit: second).
*/
public static final int POLLING_INTERVAL_TIME = 15;
/**
* Unit: millisecond.
*/
public static final int ONCE_TIMEOUT = 2000;
/**
* Unit: millisecond.
*/
public static final int CONN_TIMEOUT = 2000;
/**
* Unit: millisecond.
*/
public static final int SO_TIMEOUT = 60000;
/**
* Unit: millisecond.
*/
public static final int RECV_WAIT_TIMEOUT = ONCE_TIMEOUT * 5;
public static final String BASE_PATH = "/v1/cs";
public static final String BASE_V2_PATH = "/v2/cs";
public static final String BASE_ADMIN_V3_PATH = "/v3/admin/cs";
public static final String OPS_CONTROLLER_PATH = BASE_PATH + "/ops";
public static final String OPS_CONTROLLER_V3_ADMIN_PATH = BASE_ADMIN_V3_PATH + "/ops";
public static final String CAPACITY_CONTROLLER_PATH = BASE_PATH + "/capacity";
public static final String CAPACITY_CONTROLLER_V3_ADMIN_PATH = BASE_ADMIN_V3_PATH + "/capacity";
public static final String COMMUNICATION_CONTROLLER_PATH = BASE_PATH + "/communication";
public static final String CONFIG_CONTROLLER_PATH = BASE_PATH + "/configs";
public static final String CONFIG_CONTROLLER_V2_PATH = BASE_V2_PATH + "/config";
public static final String CONFIG_ADMIN_V3_PATH = BASE_ADMIN_V3_PATH + "/config";
public static final String HEALTH_CONTROLLER_PATH = BASE_PATH + "/health";
public static final String HISTORY_CONTROLLER_PATH = BASE_PATH + "/history";
public static final String HISTORY_CONTROLLER_V2_PATH = BASE_V2_PATH + "/history";
public static final String HISTORY_ADMIN_V3_PATH = BASE_ADMIN_V3_PATH + "/history";
public static final String LISTENER_CONTROLLER_PATH = BASE_PATH + "/listener";
public static final String LISTENER_CONTROLLER_V3_ADMIN_PATH = BASE_ADMIN_V3_PATH + "/listener";
public static final String NAMESPACE_CONTROLLER_PATH = BASE_PATH + "/namespaces";
public static final String METRICS_CONTROLLER_PATH = BASE_PATH + "/metrics";
public static final String METRICS_CONTROLLER_V3_ADMIN_PATH = BASE_ADMIN_V3_PATH + "/metrics";
public static final String CONFIG_V3_CLIENT_API_PATH = "/v3/client/cs/config";
public static final String ENCODE = "UTF-8";
public static final String PERSIST_ENCODE = getPersistEncode();
public static final String ENCODE_GBK = "GBK";
public static final String ENCODE_UTF8 = "UTF-8";
public static final String MAP_FILE = "map-file.js";
public static final int FLOW_CONTROL_THRESHOLD = 20;
public static final int FLOW_CONTROL_SLOT = 10;
public static final int FLOW_CONTROL_INTERVAL = 1000;
public static final String LINE_SEPARATOR = Character.toString((char) 1);
public static final String WORD_SEPARATOR = Character.toString((char) 2);
public static final String NACOS_LINE_SEPARATOR = "\r\n";
/**
* Total time of threshold value when getting data from network(unit: millisecond).
*/
public static final long TOTALTIME_FROM_SERVER = 10000;
/**
* Invalid total time of threshold value when getting data from network(unit: millisecond).
*/
public static final long TOTALTIME_INVALID_THRESHOLD = 60000;
/**
* When exception or error occurs.
*/
public static final int BATCH_OP_ERROR = -1;
/**
* State code of single data when batch operation.
*/
public static final String BATCH_OP_ERROR_IO_MSG = "get config dump error";
public static final String BATCH_OP_ERROR_CONFLICT_MSG = "config get conflicts";
/**
* Batch query when data existent.
*/
public static final int BATCH_QUERY_EXISTS = 1;
public static final String BATCH_QUERY_EXISTS_MSG = "config exits";
/**
* Batch query when data non-existent.
*/
public static final int BATCH_QUERY_NONEXISTS = 2;
public static final String BATCH_QUERY_NONEEXISTS_MSG = "config not exits";
/**
* Batch adding successfully.
*/
public static final int BATCH_ADD_SUCCESS = 3;
/**
* Batch updating successfully.
*/
public static final int BATCH_UPDATE_SUCCESS = 4;
public static final int MAX_UPDATE_FAIL_COUNT = 5;
public static final int MAX_UPDATEALL_FAIL_COUNT = 5;
public static final int MAX_REMOVE_FAIL_COUNT = 5;
public static final int MAX_REMOVEALL_FAIL_COUNT = 5;
public static final int MAX_NOTIFY_COUNT = 5;
public static final int MAX_ADDACK_COUNT = 5;
/**
* First version of data.
*/
public static final int FIRST_VERSION = 1;
/**
* Poison version when data is deleted.
*/
public static final int POISON_VERSION = -1;
/**
* Temporary version when disk file is full.
*/
public static final int TEMP_VERSION = 0;
/**
* Plain sequence of getting data: backup file -> server -> local file.
*/
public static final int GETCONFIG_LOCAL_SERVER_SNAPSHOT = 1;
/**
* Plain sequence of getting data: backup file -> local file -> server.
*/
public static final int GETCONFIG_LOCAL_SNAPSHOT_SERVER = 2;
public static final String CLIENT_APPNAME_HEADER = "Client-AppName";
public static final String CLIENT_REQUEST_TS_HEADER = "Client-RequestTS";
public static final String CLIENT_REQUEST_TOKEN_HEADER = "Client-RequestToken";
/**
* Client, identity for sdk request to server.
*/
public static final String REQUEST_IDENTITY = "Request-Identity";
/**
* Forward to leader node.
*/
public static final String FORWARD_LEADER = "Forward-Leader";
/**
* Acl result information.
*/
public static final String ACL_RESPONSE = "ACL-Response";
public static final int ATOMIC_MAX_SIZE = 1000;
public static final int DATA_IN_BODY_VERSION = 204;
/**
* Configure the dump event name.
*/
public static final String EXTEND_INFO_CONFIG_DUMP_EVENT = ConfigDumpEvent.class.getName();
/**
* Configure the dump event-list name.
*/
public static final String EXTEND_INFOS_CONFIG_DUMP_EVENT = ConfigDumpEvent.class.getName() + "@@many";
public static final String CONFIG_EXPORT_ITEM_FILE_SEPARATOR = "/";
public static final String CONFIG_EXPORT_METADATA = ".meta.yml";
public static final String CONFIG_EXPORT_METADATA_NEW = ".metadata.yml";
public static final int LIMIT_ERROR_CODE = 429;
public static final String NACOS_PLUGIN_DATASOURCE_LOG_STATE = "plugin_datasource_log_enabled";
public static final String CONFIG_SEARCH_BLUR = "blur";
public static final String CONFIG_SEARCH_ACCURATE = "accurate";
/**
* Gray rule.
*/
public static final String GRAY_RULE_TYPE = "type";
public static final String GRAY_RULE_EXPR = "expr";
public static final String GRAY_RULE_VERSION = "version";
public static final String GRAY_RULE_PRIORITY = "priority";
/**
* default nacos encode.
*/
public static final String DEFAULT_NACOS_ENCODE = "UTF-8";
public static final String NACOS_PERSIST_ENCODE_KEY = "nacosPersistEncodingKey";
/**
* config publish type.
*/
public static final String FORMAL = "formal";
public static final String GRAY = "gray";
/**
* request source type.
*/
public static final String HTTP = "http";
public static final String RPC = "rpc";
/**
* Separator.
*/
public static final String COLON = ":";
static String getPersistEncode() {
String persistEncode = System.getenv(NACOS_PERSIST_ENCODE_KEY);
if (StringUtils.isBlank(persistEncode)) {
persistEncode = System.getProperty(NACOS_PERSIST_ENCODE_KEY);
if (StringUtils.isBlank(persistEncode)) {
persistEncode = DEFAULT_NACOS_ENCODE;
}
}
return persistEncode;
}
}
|
Constants
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/namingstrategy/components/ComponentNamingStrategyJoinColumnTest.java
|
{
"start": 987,
"end": 2363
}
|
class ____ {
@Test
public void testNamingComponentPath() {
final StandardServiceRegistry ssr = ServiceRegistryUtil.serviceRegistry();
try {
final MetadataSources ms = new MetadataSources( ssr )
.addAnnotatedClass( BaseEntity.class )
.addAnnotatedClass( CollectionWrapper.class )
.addAnnotatedClass( CollectionItem.class )
.addAnnotatedClass( ToOneEntity.class );
final Metadata metadata = ms.getMetadataBuilder()
.applyImplicitNamingStrategy( ImplicitNamingStrategyComponentPathImpl.INSTANCE )
.build();
final org.hibernate.mapping.Collection collection = metadata.getCollectionBinding(
BaseEntity.class.getName() + '.' + "collectionWrapper.items"
);
final org.hibernate.mapping.Table table = collection.getCollectionTable();
assertThat( table.getName() ).isEqualTo( "BaseEntity_collectionWrapper_items" );
assertThat( collection.getOwner().getKey().getColumnSpan() ).isEqualTo( 1 );
assertThat( collection.getKey().getColumns().get( 0 ).getName() ).isEqualTo( "BaseEntity_id" );
assertThat( table.getColumns().stream().map( Column::getName ) ).contains(
"BaseEntity_id",
"collectionWrapper_items_name",
"collectionWrapper_items_toOne_id"
);
}
finally {
StandardServiceRegistryBuilder.destroy( ssr );
}
}
@Entity(name = "BaseEntity")
static
|
ComponentNamingStrategyJoinColumnTest
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_909/ValuesMapper.java
|
{
"start": 484,
"end": 713
}
|
class ____ {
private String values;
public String getValues() {
return values;
}
public void setValues(String values) {
this.values = values;
}
}
|
ValuesHolder
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/commons/support/ModifierSupportTests.java
|
{
"start": 6809,
"end": 6878
}
|
class ____ {
public void publicMethod() {
}
}
private
|
PublicClass
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/TypeEqualsCheckerTest.java
|
{
"start": 3169,
"end": 4156
}
|
class ____ extends BugChecker implements ClassTreeMatcher {
@Override
public Description matchClass(ClassTree tree, VisitorState state) {
Symbol sym = getSymbol(tree);
Types types = state.getTypes();
ClassSymbol owner = sym.enclClass();
for (Type s : types.closure(owner.type)) {
// BUG: Diagnostic contains: TypeEquals
if (s.equals(owner.type)) {
return Description.NO_MATCH;
}
// BUG: Diagnostic contains: TypeEquals
if (Objects.equals(s, owner.type)) {
return Description.NO_MATCH;
}
}
return Description.NO_MATCH;
}
}
""")
.addModules(
"jdk.compiler/com.sun.tools.javac.code", "jdk.compiler/com.sun.tools.javac.util")
.doTest();
}
}
|
ExampleChecker
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/test/java/org/springframework/web/reactive/result/method/annotation/ProtobufIntegrationTests.java
|
{
"start": 1821,
"end": 5275
}
|
class ____ extends AbstractRequestMappingIntegrationTests {
static final Msg TEST_MSG =
Msg.newBuilder().setFoo("Foo").setBlah(SecondMsg.newBuilder().setBlah(123).build()).build();
private WebClient webClient;
@Override
protected void startServer(HttpServer httpServer) throws Exception {
super.startServer(httpServer);
this.webClient = WebClient.create("http://localhost:" + this.port);
}
@Override
protected ApplicationContext initApplicationContext() {
return new AnnotationConfigApplicationContext(TestConfiguration.class);
}
@ParameterizedHttpServerTest
void value(HttpServer httpServer) throws Exception {
startServer(httpServer);
Mono<ResponseEntity<Msg>> result = this.webClient.get()
.uri("/message")
.retrieve()
.toEntity(Msg.class);
StepVerifier.create(result)
.consumeNextWith(entity -> {
HttpHeaders headers = entity.getHeaders();
assertThat(headers.getContentType().getParameters().containsKey("delimited")).isFalse();
assertThat(headers.getFirst("X-Protobuf-Schema")).isEqualTo("sample.proto");
assertThat(headers.getFirst("X-Protobuf-Message")).isEqualTo("Msg");
assertThat(entity.getBody()).isEqualTo(TEST_MSG);
})
.verifyComplete();
}
@ParameterizedHttpServerTest
void values(HttpServer httpServer) throws Exception {
startServer(httpServer);
Mono<ResponseEntity<List<Msg>>> result = this.webClient.get()
.uri("/messages")
.retrieve()
.toEntityList(Msg.class);
StepVerifier.create(result)
.consumeNextWith(entity -> {
HttpHeaders headers = entity.getHeaders();
assertThat(headers.getContentType().getParameters().get("delimited")).isEqualTo("true");
assertThat(headers.getFirst("X-Protobuf-Schema")).isEqualTo("sample.proto");
assertThat(headers.getFirst("X-Protobuf-Message")).isEqualTo("Msg");
assertThat(entity.getBody()).containsExactly(TEST_MSG, TEST_MSG, TEST_MSG);
})
.verifyComplete();
}
@ParameterizedHttpServerTest
void streaming(HttpServer httpServer) throws Exception {
startServer(httpServer);
Flux<Msg> result = this.webClient.get()
.uri("/message-stream")
.exchangeToFlux(response -> {
assertThat(response.headers().contentType().get().getParameters().get("delimited")).isEqualTo("true");
assertThat(response.headers().header("X-Protobuf-Schema")).containsExactly("sample.proto");
assertThat(response.headers().header("X-Protobuf-Message")).containsExactly("Msg");
return response.bodyToFlux(Msg.class);
});
StepVerifier.create(result)
.expectNext(Msg.newBuilder().setFoo("Foo").setBlah(SecondMsg.newBuilder().setBlah(0).build()).build())
.expectNext(Msg.newBuilder().setFoo("Foo").setBlah(SecondMsg.newBuilder().setBlah(1).build()).build())
.thenCancel()
.verify();
}
@ParameterizedHttpServerTest
void empty(HttpServer httpServer) throws Exception {
startServer(httpServer);
Mono<Msg> result = this.webClient.get()
.uri("/empty")
.retrieve()
.bodyToMono(Msg.class);
StepVerifier.create(result)
.verifyComplete();
}
@ParameterizedHttpServerTest
void defaultInstance(HttpServer httpServer) throws Exception {
startServer(httpServer);
Mono<Msg> result = this.webClient.get()
.uri("/default-instance")
.retrieve()
.bodyToMono(Msg.class);
StepVerifier.create(result)
.verifyComplete();
}
@RestController
@SuppressWarnings("unused")
static
|
ProtobufIntegrationTests
|
java
|
quarkusio__quarkus
|
extensions/oidc/runtime/src/main/java/io/quarkus/oidc/runtime/OidcTenantConfig.java
|
{
"start": 11881,
"end": 12910
}
|
enum ____ {
/**
* Clear cache
*/
CACHE("cache"),
/**
* Clear client hints.
*/
CLIENT_HINTS("clientHints"),
/**
* Clear cookies.
*/
COOKIES("cookies"),
/**
* Clear execution contexts
*/
EXECUTION_CONTEXTS("executionContexts"),
/**
* Clear storage
*/
STORAGE("storage"),
/**
* Clear all types of data
*/
WILDCARD("*");
private String directive;
private ClearSiteData(String directive) {
this.directive = directive;
}
public String directive() {
return "\"" + directive + "\"";
}
}
/**
* Clear-Site-Data header directives
*/
Optional<Set<ClearSiteData>> clearSiteData();
|
ClearSiteData
|
java
|
quarkusio__quarkus
|
extensions/tls-registry/deployment/src/test/java/io/quarkus/tls/NamedP12TrustStoreWithCredentialsProviderWithAliasTest.java
|
{
"start": 1145,
"end": 2541
}
|
class ____ {
private static final String configuration = """
quarkus.tls.foo.trust-store.p12.path=target/certs/test-credentials-provider-alias-truststore.p12
quarkus.tls.foo.trust-store.p12.alias=my-alias
quarkus.tls.foo.trust-store.credentials-provider.name=tls
""";
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.addClass(MyCredentialProvider.class)
.add(new StringAsset(configuration), "application.properties"));
@Inject
TlsConfigurationRegistry certificates;
@Test
void test() throws KeyStoreException, CertificateParsingException {
TlsConfiguration def = certificates.get("foo").orElseThrow();
assertThat(def.getTrustStoreOptions()).isNotNull();
assertThat(def.getTrustStore()).isNotNull();
X509Certificate certificate = (X509Certificate) def.getTrustStore().getCertificate("my-alias");
assertThat(certificate).isNotNull();
assertThat(certificate.getSubjectAlternativeNames()).anySatisfy(l -> {
assertThat(l.get(0)).isEqualTo(2);
assertThat(l.get(1)).isEqualTo("dns:acme.org");
});
}
@ApplicationScoped
public static
|
NamedP12TrustStoreWithCredentialsProviderWithAliasTest
|
java
|
spring-projects__spring-boot
|
buildSrc/src/main/java/org/springframework/boot/build/bom/bomr/InteractiveUpgradeResolver.java
|
{
"start": 1297,
"end": 3899
}
|
class ____ implements UpgradeResolver {
private final UserInputHandler userInputHandler;
private final LibraryUpdateResolver libraryUpdateResolver;
InteractiveUpgradeResolver(UserInputHandler userInputHandler, LibraryUpdateResolver libraryUpdateResolver) {
this.userInputHandler = userInputHandler;
this.libraryUpdateResolver = libraryUpdateResolver;
}
@Override
public List<Upgrade> resolveUpgrades(Collection<Library> librariesToUpgrade, Collection<Library> libraries) {
Map<String, Library> librariesByName = new HashMap<>();
for (Library library : libraries) {
librariesByName.put(library.getName(), library);
}
try {
return this.libraryUpdateResolver.findLibraryUpdates(librariesToUpgrade, librariesByName)
.stream()
.map(this::resolveUpgrade)
.filter(Objects::nonNull)
.toList();
}
catch (UpgradesInterruptedException ex) {
return Collections.emptyList();
}
}
private Upgrade resolveUpgrade(LibraryWithVersionOptions libraryWithVersionOptions) {
Library library = libraryWithVersionOptions.getLibrary();
List<VersionOption> versionOptions = libraryWithVersionOptions.getVersionOptions();
if (versionOptions.isEmpty()) {
return null;
}
VersionOption defaultOption = defaultOption(library);
VersionOption selected = selectOption(defaultOption, library, versionOptions);
return (selected.equals(defaultOption)) ? null : selected.upgrade(library);
}
private VersionOption defaultOption(Library library) {
VersionAlignment alignment = library.getVersionAlignment();
Set<String> alignedVersions = (alignment != null) ? alignment.resolve() : null;
if (alignedVersions != null && alignedVersions.size() == 1) {
DependencyVersion alignedVersion = DependencyVersion.parse(alignedVersions.iterator().next());
if (alignedVersion.equals(library.getVersion().getVersion())) {
return new VersionOption.AlignedVersionOption(alignedVersion, alignment);
}
}
return new VersionOption(library.getVersion().getVersion());
}
private VersionOption selectOption(VersionOption defaultOption, Library library,
List<VersionOption> versionOptions) {
VersionOption selected = this.userInputHandler.askUser((questions) -> {
String question = library.getNameAndVersion();
List<VersionOption> options = new ArrayList<>();
options.add(defaultOption);
options.addAll(versionOptions);
return questions.selectOption(question, options, defaultOption);
}).get();
if (this.userInputHandler.interrupted()) {
throw new UpgradesInterruptedException();
}
return selected;
}
static
|
InteractiveUpgradeResolver
|
java
|
alibaba__nacos
|
config/src/main/java/com/alibaba/nacos/config/server/utils/SystemConfig.java
|
{
"start": 980,
"end": 2279
}
|
class ____ {
public static final String LOCAL_IP = getHostAddress();
private static final Logger LOGGER = LoggerFactory.getLogger(SystemConfig.class);
private static String getHostAddress() {
String address = System.getProperty("nacos.server.ip");
if (StringUtils.isNotEmpty(address)) {
return address;
} else {
address = InternetAddressUtil.localHostIp();
}
try {
Enumeration<NetworkInterface> en = NetworkInterface.getNetworkInterfaces();
while (en.hasMoreElements()) {
NetworkInterface ni = en.nextElement();
Enumeration<InetAddress> ads = ni.getInetAddresses();
while (ads.hasMoreElements()) {
InetAddress ip = ads.nextElement();
// Compatible group does not regulate 11 network segments
if (!ip.isLoopbackAddress() && ip.getHostAddress().indexOf(":") == -1
/* && ip.isSiteLocalAddress() */) {
return ip.getHostAddress();
}
}
}
} catch (Exception e) {
LOGGER.error("get local host address error", e);
}
return address;
}
}
|
SystemConfig
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/query/DateTimeParameterTest.java
|
{
"start": 3742,
"end": 4265
}
|
class ____ {
@Id
public Integer id;
public String someString;
@Temporal(TemporalType.DATE)
public Date someDate;
@Temporal(TemporalType.TIME)
public Date someTime;
@Temporal(TemporalType.TIMESTAMP)
public Date someTimestamp;
public Thing() {
}
public Thing(Integer id, String someString, Date someDate, Date someTime, Date someTimestamp) {
this.id = id;
this.someString = someString;
this.someDate = someDate;
this.someTime = someTime;
this.someTimestamp = someTimestamp;
}
}
}
|
Thing
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
|
{
"start": 2898,
"end": 6037
}
|
class ____ extends CookieHandler {
private HttpCookie authCookie;
private Map<String, List<String>> cookieHeaders = Collections.emptyMap();
@Override
public synchronized Map<String, List<String>> get(URI uri,
Map<String, List<String>> requestHeaders) throws IOException {
// call getter so it will reset headers if token is expiring.
getAuthCookie();
return cookieHeaders;
}
@Override
public void put(URI uri, Map<String, List<String>> responseHeaders) {
List<String> headers = responseHeaders.get("Set-Cookie");
if (headers == null) {
headers = responseHeaders.get("set-cookie");
}
if (headers != null) {
for (String header : headers) {
List<HttpCookie> cookies;
try {
cookies = HttpCookie.parse(header);
} catch (IllegalArgumentException iae) {
// don't care. just skip malformed cookie headers.
// When header is empty - "Cannot parse cookie header, header = ,
// reason = Empty cookie header string"
LOG.debug("Cannot parse cookie header, header = {}, reason = {} ",
header, iae.getMessage());
continue;
}
for (HttpCookie cookie : cookies) {
if (AUTH_COOKIE.equals(cookie.getName())) {
setAuthCookie(cookie);
}
}
}
}
}
// return the auth cookie if still valid.
private synchronized HttpCookie getAuthCookie() {
if (authCookie != null && authCookie.hasExpired()) {
setAuthCookie(null);
}
return authCookie;
}
private synchronized void setAuthCookie(HttpCookie cookie) {
final HttpCookie oldCookie = authCookie;
// will redefine if new cookie is valid.
authCookie = null;
cookieHeaders = Collections.emptyMap();
boolean valid = cookie != null && !cookie.getValue().isEmpty() &&
!cookie.hasExpired();
if (valid) {
// decrease lifetime to avoid using a cookie soon to expire.
// allows authenticators to pre-emptively reauthenticate to
// prevent clients unnecessarily receiving a 401.
long maxAge = cookie.getMaxAge();
if (maxAge != -1) {
cookie.setMaxAge(maxAge * 9/10);
valid = !cookie.hasExpired();
}
}
if (valid) {
// v0 cookies value aren't quoted by default but tomcat demands
// quoting.
if (cookie.getVersion() == 0) {
String value = cookie.getValue();
if (!value.startsWith("\"")) {
value = "\"" + value + "\"";
cookie.setValue(value);
}
}
authCookie = cookie;
cookieHeaders = new HashMap<>();
cookieHeaders.put("Cookie", Arrays.asList(cookie.toString()));
}
}
private void setAuthCookieValue(String value) {
HttpCookie c = null;
if (value != null) {
c = new HttpCookie(AUTH_COOKIE, value);
}
setAuthCookie(c);
}
}
/**
* Client side authentication token.
*/
public static
|
AuthCookieHandler
|
java
|
elastic__elasticsearch
|
modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/TokenCountFieldMapperIntegrationIT.java
|
{
"start": 1887,
"end": 11602
}
|
class ____ extends ESIntegTestCase {
@ParametersFactory
public static Iterable<Object[]> buildParameters() {
List<Object[]> parameters = new ArrayList<>();
for (boolean storeCountedFields : new boolean[] { true, false }) {
for (boolean loadCountedFields : new boolean[] { true, false }) {
parameters.add(new Object[] { storeCountedFields, loadCountedFields });
}
}
return parameters;
}
private final boolean storeCountedFields;
private final boolean loadCountedFields;
public TokenCountFieldMapperIntegrationIT(
@Name("storeCountedFields") boolean storeCountedFields,
@Name("loadCountedFields") boolean loadCountedFields
) {
this.storeCountedFields = storeCountedFields;
this.loadCountedFields = loadCountedFields;
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(MapperExtrasPlugin.class);
}
/**
* It is possible to get the token count in a search response.
*/
public void testSearchReturnsTokenCount() throws IOException {
init();
assertResponse(searchById("single"), resp -> assertSearchReturns(resp, "single"));
assertResponse(searchById("bulk1"), resp -> assertSearchReturns(resp, "bulk1"));
assertResponse(searchById("bulk2"), resp -> assertSearchReturns(resp, "bulk2"));
assertResponse(searchById("multi"), resp -> assertSearchReturns(resp, "multi"));
assertResponse(searchById("multibulk1"), resp -> assertSearchReturns(resp, "multibulk1"));
assertResponse(searchById("multibulk2"), resp -> assertSearchReturns(resp, "multibulk2"));
}
/**
* It is possible to search by token count.
*/
public void testSearchByTokenCount() throws IOException {
init();
assertResponse(searchByNumericRange(4, 4), response -> assertSearchReturns(response, "single"));
assertResponse(searchByNumericRange(10, 10), response -> assertSearchReturns(response, "multibulk2"));
assertResponse(searchByNumericRange(7, 10), response -> assertSearchReturns(response, "multi", "multibulk1", "multibulk2"));
assertResponse(
searchByNumericRange(1, 10),
response -> assertSearchReturns(response, "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2")
);
assertResponse(searchByNumericRange(12, 12), this::assertSearchReturns);
}
/**
* It is possible to search by token count.
*/
public void testFacetByTokenCount() throws IOException {
init();
String facetField = randomFrom(Arrays.asList("foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values"));
assertResponse(searchByNumericRange(1, 10).addAggregation(AggregationBuilders.terms("facet").field(facetField)), result -> {
assertSearchReturns(result, "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2");
assertThat(result.getAggregations().asList().size(), equalTo(1));
Terms terms = (Terms) result.getAggregations().asList().get(0);
assertThat(terms.getBuckets().size(), equalTo(9));
});
}
private void init() throws IOException {
Settings.Builder settings = Settings.builder();
settings.put(indexSettings());
settings.put("index.analysis.analyzer.mock_english.tokenizer", "standard");
settings.put("index.analysis.analyzer.mock_english.filter", "stop");
prepareCreate("test").setSettings(settings)
.setMapping(
jsonBuilder().startObject()
.startObject("_doc")
.startObject("properties")
.startObject("foo")
.field("type", "text")
.field("store", storeCountedFields)
.field("analyzer", "simple")
.startObject("fields")
.startObject("token_count")
.field("type", "token_count")
.field("analyzer", "standard")
.field("store", true)
.endObject()
.startObject("token_count_unstored")
.field("type", "token_count")
.field("analyzer", "standard")
.endObject()
.startObject("token_count_with_doc_values")
.field("type", "token_count")
.field("analyzer", "standard")
.field("doc_values", true)
.endObject()
.startObject("token_count_without_position_increments")
.field("type", "token_count")
.field("analyzer", "mock_english")
.field("enable_position_increments", false)
.field("store", true)
.endObject()
.endObject()
.endObject()
.endObject()
.endObject()
.endObject()
)
.get();
ensureGreen();
assertEquals(DocWriteResponse.Result.CREATED, prepareIndex("single", "I have four terms").get().getResult());
BulkResponse bulk = client().prepareBulk()
.add(prepareIndex("bulk1", "bulk three terms"))
.add(prepareIndex("bulk2", "this has five bulk terms"))
.get();
assertFalse(bulk.buildFailureMessage(), bulk.hasFailures());
assertEquals(
DocWriteResponse.Result.CREATED,
prepareIndex("multi", "two terms", "wow now I have seven lucky terms").get().getResult()
);
bulk = client().prepareBulk()
.add(prepareIndex("multibulk1", "one", "oh wow now I have eight unlucky terms"))
.add(prepareIndex("multibulk2", "six is a bunch of terms", "ten! ten terms is just crazy! too many too count!"))
.get();
assertFalse(bulk.buildFailureMessage(), bulk.hasFailures());
assertThat(refresh().getFailedShards(), equalTo(0));
}
private IndexRequestBuilder prepareIndex(String id, String... texts) throws IOException {
return prepareIndex("test").setId(id).setSource("foo", texts);
}
private SearchRequestBuilder searchById(String id) {
return prepareTokenCountFieldMapperSearch().setQuery(QueryBuilders.termQuery("_id", id));
}
private SearchRequestBuilder searchByNumericRange(int low, int high) {
return prepareTokenCountFieldMapperSearch().setQuery(
QueryBuilders.rangeQuery(
randomFrom(Arrays.asList("foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values"))
).gte(low).lte(high)
);
}
private SearchRequestBuilder prepareTokenCountFieldMapperSearch() {
SearchRequestBuilder request = prepareSearch("test");
request.addStoredField("foo.token_count");
request.addStoredField("foo.token_count_without_position_increments");
if (loadCountedFields) {
request.addStoredField("foo");
}
return request;
}
private void assertSearchReturns(SearchResponse result, String... ids) {
assertThat(result.getHits().getTotalHits().value(), equalTo((long) ids.length));
assertThat(result.getHits().getHits().length, equalTo(ids.length));
List<String> foundIds = new ArrayList<>();
for (SearchHit hit : result.getHits()) {
foundIds.add(hit.getId());
}
assertThat(foundIds, containsInAnyOrder(ids));
for (SearchHit hit : result.getHits()) {
String id = hit.getId();
if (id.equals("single")) {
assertSearchHit(hit, new int[] { 4 }, new int[] { 4 });
} else if (id.equals("bulk1")) {
assertSearchHit(hit, new int[] { 3 }, new int[] { 3 });
} else if (id.equals("bulk2")) {
assertSearchHit(hit, new int[] { 5 }, new int[] { 4 });
} else if (id.equals("multi")) {
assertSearchHit(hit, new int[] { 2, 7 }, new int[] { 2, 7 });
} else if (id.equals("multibulk1")) {
assertSearchHit(hit, new int[] { 1, 8 }, new int[] { 1, 8 });
} else if (id.equals("multibulk2")) {
assertSearchHit(hit, new int[] { 6, 10 }, new int[] { 3, 9 });
} else {
throw new ElasticsearchException("Unexpected response!");
}
}
}
private void assertSearchHit(SearchHit hit, int[] standardTermCounts, int[] englishTermCounts) {
assertThat(hit.field("foo.token_count"), not(nullValue()));
assertThat(hit.field("foo.token_count").getValues().size(), equalTo(standardTermCounts.length));
for (int i = 0; i < standardTermCounts.length; i++) {
assertThat(hit.field("foo.token_count").getValues().get(i), equalTo(standardTermCounts[i]));
}
assertThat(hit.field("foo.token_count_without_position_increments"), not(nullValue()));
assertThat(hit.field("foo.token_count_without_position_increments").getValues().size(), equalTo(englishTermCounts.length));
for (int i = 0; i < englishTermCounts.length; i++) {
assertThat(hit.field("foo.token_count_without_position_increments").getValues().get(i), equalTo(englishTermCounts[i]));
}
if (loadCountedFields && storeCountedFields) {
assertThat(hit.field("foo").getValues().size(), equalTo(standardTermCounts.length));
}
}
}
|
TokenCountFieldMapperIntegrationIT
|
java
|
google__guice
|
core/test/com/google/inject/ImplicitBindingTest.java
|
{
"start": 13551,
"end": 14260
}
|
class ____ {}
public void testImplicitJdkBindings_publicCxtor() {
Injector injector = Guice.createInjector();
// String has a public nullary constructor, so Guice will call it.
assertEquals("", injector.getInstance(String.class));
}
public void testRecursiveLoadWithOptionals() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bind(A1.class);
}
});
assertThat(injector.getExistingBinding(Key.get(D1.class))).isNull();
assertThat(injector.getExistingBinding(Key.get(Unresolved.class))).isNull();
}
static
|
EnumWithImplementedByEnum
|
java
|
elastic__elasticsearch
|
x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java
|
{
"start": 6405,
"end": 9249
}
|
class ____ extends AbstractShapeGeometryFieldType<Geometry> implements ShapeQueryable {
private final boolean isSyntheticSource;
public ShapeFieldType(
String name,
boolean indexed,
boolean hasDocValues,
Orientation orientation,
Parser<Geometry> parser,
boolean isSyntheticSource,
Map<String, String> meta
) {
super(name, IndexType.points(indexed, hasDocValues), false, parser, orientation, meta);
this.isSyntheticSource = isSyntheticSource;
}
@Override
public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) {
failIfNoDocValues();
return (a, b) -> new CartesianShapeIndexFieldData(
name(),
CartesianShapeValuesSourceType.instance(),
ShapeFieldMapper.CartesianShapeDocValuesField::new
);
}
@Override
public Query shapeQuery(Geometry shape, String fieldName, ShapeRelation relation, SearchExecutionContext context) {
failIfNotIndexedNorDocValuesFallback(context);
// CONTAINS queries are not supported by VECTOR strategy for indices created before version 7.5.0 (Lucene 8.3.0);
if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(IndexVersions.V_7_5_0)) {
throw new QueryShardException(
context,
ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]."
);
}
try {
return XYQueriesUtils.toXYShapeQuery(shape, fieldName, relation, indexType());
} catch (IllegalArgumentException e) {
throw new QueryShardException(context, "Exception creating query on Field [" + fieldName + "] " + e.getMessage(), e);
}
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
@Override
protected Function<List<Geometry>, List<Object>> getFormatter(String format) {
return GeometryFormatterFactory.getFormatter(format, Function.identity());
}
@Override
public BlockLoader blockLoader(BlockLoaderContext blContext) {
if (blContext.fieldExtractPreference() == FieldExtractPreference.EXTRACT_SPATIAL_BOUNDS) {
return new CartesianBoundsBlockLoader(name());
}
// Multi fields don't have fallback synthetic source.
if (isSyntheticSource && blContext.parentField(name()) == null) {
return blockLoaderFromFallbackSyntheticSource(blContext);
}
return blockLoaderFromSource(blContext);
}
static
|
ShapeFieldType
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/observers/ResumeSingleObserver.java
|
{
"start": 1080,
"end": 1718
}
|
class ____<T> implements SingleObserver<T> {
final AtomicReference<Disposable> parent;
final SingleObserver<? super T> downstream;
public ResumeSingleObserver(AtomicReference<Disposable> parent, SingleObserver<? super T> downstream) {
this.parent = parent;
this.downstream = downstream;
}
@Override
public void onSubscribe(Disposable d) {
DisposableHelper.replace(parent, d);
}
@Override
public void onSuccess(T value) {
downstream.onSuccess(value);
}
@Override
public void onError(Throwable e) {
downstream.onError(e);
}
}
|
ResumeSingleObserver
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/bean/override/mockito/MockitoBeanWithResetIntegrationTests.java
|
{
"start": 3046,
"end": 3395
}
|
class ____ implements FactoryBean<FailingExampleService> {
@Override
public @Nullable FailingExampleService getObject() {
return new FailingExampleService();
}
@Override
public @Nullable Class<?> getObjectType() {
return FailingExampleService.class;
}
}
@Configuration(proxyBeanMethods = false)
static
|
FailingExampleServiceFactory
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/http/HttpServerChannel.java
|
{
"start": 604,
"end": 827
}
|
interface ____ extends CloseableChannel {
/**
* Returns the local address for this channel.
*
* @return the local address of this channel.
*/
InetSocketAddress getLocalAddress();
}
|
HttpServerChannel
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/RefEndpointBuilderFactory.java
|
{
"start": 1943,
"end": 7722
}
|
interface ____
extends
EndpointConsumerBuilder {
default RefEndpointConsumerBuilder basic() {
return (RefEndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedRefEndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedRefEndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedRefEndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedRefEndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedRefEndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedRefEndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
}
/**
* Builder for endpoint producers for the Ref component.
*/
public
|
AdvancedRefEndpointConsumerBuilder
|
java
|
reactor__reactor-core
|
reactor-core/src/test/java/reactor/core/publisher/MonoPeekAfterTest.java
|
{
"start": 1341,
"end": 9099
}
|
class ____ {
private static final Logger LOG = Loggers.getLogger(MonoPeekAfterTest.class);
@Test
public void onSuccessNormal() {
LongAdder invoked = new LongAdder();
AtomicBoolean hasNull = new AtomicBoolean();
Mono<Integer> mono = Flux
.range(1, 10)
.reduce((a, b) -> a + b)
.hide()
.doOnSuccess(v -> {
if (v == null) hasNull.set(true);
invoked.increment();
});
StepVerifier.create(mono)
.expectFusion(Fuseable.ANY, Fuseable.NONE)
.expectNext(55)
.expectComplete()
.verify();
assertThat(hasNull.get()).as("unexpected call to onSuccess with null").isFalse();
assertThat(invoked.intValue()).isEqualTo(1);
}
@Test
public void onSuccessNormalConditional() {
LongAdder invoked = new LongAdder();
AtomicBoolean hasNull = new AtomicBoolean();
Mono<Integer> mono = Flux
.range(1, 10)
.reduce((a, b) -> a + b)
.hide()
.filter(v -> true)
.doOnSuccess(v -> {
if (v == null) hasNull.set(true);
invoked.increment();
});
StepVerifier.create(mono)
.expectFusion(Fuseable.ANY, Fuseable.NONE)
.expectNext(55)
.expectComplete()
.verify();
assertThat(hasNull.get()).as("unexpected call to onSuccess with null").isFalse();
assertThat(invoked.intValue()).isEqualTo(1);
}
@Test
public void onAfterTerminateNormalConditional() {
LongAdder invoked = new LongAdder();
Mono<Integer> mono = Flux
.range(1, 10)
.reduce((a, b) -> a + b)
.hide()
.filter(v -> true)
.doAfterTerminate(invoked::increment);
StepVerifier.create(mono)
.expectFusion(Fuseable.ANY, Fuseable.NONE)
.expectNext(55)
.expectComplete()
.verify();
assertThat(invoked.intValue()).isEqualTo(1);
}
@Test
void onAfterTerminateFuseableNegotiatedNone() {
LongAdder invoked = new LongAdder();
Mono<Integer> mono = Flux
.range(1, 10)
.reduce((a, b) -> a + b)
.doAfterTerminate(invoked::increment);
StepVerifier.create(mono.log())
.expectFusion(Fuseable.ANY, Fuseable.NONE)
.expectNext(55)
.expectComplete()
.verify();
assertThat(invoked.intValue()).isEqualTo(1);
}
@Test
public void onAfterTerminateFuseableConditionalNegotiatedNone() {
LongAdder invoked = new LongAdder();
Mono<Integer> mono = Flux
.range(1, 10)
.reduce((a, b) -> a + b)
.filter(v -> true)
.doAfterTerminate(invoked::increment);
StepVerifier.create(mono)
.expectFusion(Fuseable.ANY, Fuseable.NONE)
.expectNext(55)
.expectComplete()
.verify();
assertThat(invoked.intValue()).isEqualTo(1);
}
@Test
public void onSuccessCallbackFailureInterruptsOnNext() {
LongAdder invoked = new LongAdder();
StepVerifier.create(Mono.just("foo")
.doOnSuccess(s -> {
invoked.increment();
throw new IllegalArgumentException(s);
}))
.expectErrorMessage("foo")
.verify();
assertThat(invoked.intValue()).isEqualTo(1);
}
@Test
public void onSuccessNotCalledOnError() {
LongAdder invoked = new LongAdder();
IllegalArgumentException err = new IllegalArgumentException("boom");
StepVerifier.create(Mono.error(err)
.doOnSuccess(v -> invoked.increment()))
.expectErrorMessage("boom")
.verify();
assertThat(invoked.intValue()).isEqualTo(0);
}
@Test
public void afterTerminateForOnError() {
LongAdder invoked = new LongAdder();
IllegalArgumentException err = new IllegalArgumentException("boom");
StepVerifier.create(Mono.<String>error(err)
.doAfterTerminate(invoked::increment))
.expectErrorMessage("boom")
.verify();
assertThat(invoked.intValue()).isEqualTo(1);
}
@Test
public void onSuccessForEmpty() {
LongAdder invoked = new LongAdder();
AtomicReference<String> value = new AtomicReference<>();
StepVerifier.create(Mono.<String>empty()
.doOnSuccess(v -> {
invoked.increment();
value.set(v);
}))
.expectComplete()
.verify();
assertThat(invoked.intValue()).isEqualTo(1);
assertThat(value).hasValue(null);
}
@Test
public void afterTerminateForEmpty() {
LongAdder invoked = new LongAdder();
StepVerifier.create(Mono.<String>empty()
.doAfterTerminate(() -> {
invoked.increment();
}))
.expectComplete()
.verify();
assertThat(invoked.intValue()).isEqualTo(1);
}
@Test
public void testCallbacksNoFusion() {
AtomicReference<Integer> successInvocation = new AtomicReference<>();
AtomicReference<Throwable> errorInvocation = new AtomicReference<>();
AtomicReference<Integer> afterTerminateInvocation = new AtomicReference<>();
Mono<Integer> source = Flux
.range(1, 10)
.reduce((a, b) -> a + b)
.hide();
Mono<Integer> mono = new MonoPeekTerminal<>(source,
successInvocation::set,
errorInvocation::set,
(v, t) -> {
afterTerminateInvocation.set(v);
errorInvocation.set(t);
});
StepVerifier.create(mono.log())
.expectFusion(Fuseable.NONE)
.expectNext(55)
.expectComplete()
.verify();
assertThat((Object) successInvocation.get()).isEqualTo(55);
assertThat((Object) afterTerminateInvocation.get()).isEqualTo(55);
assertThat(errorInvocation).hasValue(null);
}
@Test
void testCallbacksWithAfterTerminateNegotiatesFusionNone() {
AtomicReference<Integer> successInvocation = new AtomicReference<>();
AtomicReference<Integer> afterTerminateInvocation = new AtomicReference<>();
AtomicReference<Throwable> errorInvocation = new AtomicReference<>();
Mono<Integer> source = Mono.fromDirect(Flux.range(55, 1));
Mono<Integer> mono = new MonoPeekTerminal<>(source,
successInvocation::set,
errorInvocation::set,
(v, t) -> {
afterTerminateInvocation.set(v);
errorInvocation.set(t);
});
StepVerifier.create(mono)
.expectFusion(Fuseable.SYNC, Fuseable.NONE)
.expectNext(55)
.expectComplete()
.verify();
assertThat((Object) successInvocation.get()).isEqualTo(55);
assertThat((Object) afterTerminateInvocation.get()).isEqualTo(55);
assertThat(errorInvocation).hasValue(null);
}
@Test
void testCallbacksFusionSync() {
AtomicReference<Integer> successInvocation = new AtomicReference<>();
AtomicReference<Throwable> errorInvocation = new AtomicReference<>();
Mono<Integer> source = Mono.fromDirect(Flux.range(55, 1));
Mono<Integer> mono = new MonoPeekTerminal<>(source,
successInvocation::set,
errorInvocation::set,
null); //afterTerminate forces the negotiation of fusion mode NONE
StepVerifier.create(mono)
.expectFusion(Fuseable.SYNC)
.expectNext(55)
.expectComplete()
.verify();
assertThat((Object) successInvocation.get()).isEqualTo(55);
assertThat(errorInvocation).hasValue(null);
}
@Test
public void should_reduce_to_10_events() {
for (int i = 0; i < 20; i++) {
AtomicInteger count = new AtomicInteger();
Flux.range(0, 10)
.flatMap(x -> Flux.range(0, 2)
.map(y -> FluxPeekFuseableTest.blockingOp(x, y))
.subscribeOn(Schedulers.parallel())
.reduce((l, r) -> l + "_" + r)
.doOnSuccess(s -> {
LOG.debug("success " + x + ": " + s);
count.incrementAndGet();
}))
.blockLast();
assertThat(count).hasValue(10);
}
}
}
|
MonoPeekAfterTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.