language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
elastic__elasticsearch
|
modules/lang-painless/src/test/java/org/elasticsearch/painless/DynamicTypeTests.java
|
{
"start": 1922,
"end": 11381
}
|
class ____ extends DynF {
public char letter() {
return 'G';
}
public int value() {
return 1;
}
}
public void testDynamicTypeResolution() {
assertEquals('D', exec("DynamicTypeTests.DynI i = new DynamicTypeTests.DynD(); return i.letter()"));
assertEquals('E', exec("DynamicTypeTests.DynI i = new DynamicTypeTests.DynE(); return i.letter()"));
assertEquals('E', exec("DynamicTypeTests.DynI i = new DynamicTypeTests.DynF(); return i.letter()"));
assertEquals('G', exec("DynamicTypeTests.DynI i = new DynamicTypeTests.DynG(); return i.letter()"));
IllegalArgumentException iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynI i = new DynamicTypeTests.DynD(); return i.value()")
);
assertTrue(iae.getMessage().contains("dynamic method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynI i = new DynamicTypeTests.DynE(); return i.value()")
);
assertTrue(iae.getMessage().contains("dynamic method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynI i = new DynamicTypeTests.DynF(); return i.value()")
);
assertTrue(iae.getMessage().contains("dynamic method") && iae.getMessage().contains("not found"));
assertEquals(1, exec("DynamicTypeTests.DynI i = new DynamicTypeTests.DynG(); return i.value()"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynA a = new DynamicTypeTests.DynD(); return a.letter()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynA a = new DynamicTypeTests.DynE(); return a.letter()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynA a = new DynamicTypeTests.DynF(); return a.letter()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynA a = new DynamicTypeTests.DynG(); return a.letter()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynA a = new DynamicTypeTests.DynD(); return a.value()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynA a = new DynamicTypeTests.DynE(); return a.value()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynA a = new DynamicTypeTests.DynF(); return a.value()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynA a = new DynamicTypeTests.DynG(); return a.value()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
assertEquals('D', exec("DynamicTypeTests.DynB b = new DynamicTypeTests.DynD(); return b.letter()"));
assertEquals('E', exec("DynamicTypeTests.DynB b = new DynamicTypeTests.DynE(); return b.letter()"));
assertEquals('E', exec("DynamicTypeTests.DynB b = new DynamicTypeTests.DynF(); return b.letter()"));
assertEquals('G', exec("DynamicTypeTests.DynB b = new DynamicTypeTests.DynG(); return b.letter()"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynB b = new DynamicTypeTests.DynD(); return b.value()")
);
assertTrue(iae.getMessage().contains("dynamic method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynB b = new DynamicTypeTests.DynE(); return b.value()")
);
assertTrue(iae.getMessage().contains("dynamic method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynB b = new DynamicTypeTests.DynF(); return b.value()")
);
assertTrue(iae.getMessage().contains("dynamic method") && iae.getMessage().contains("not found"));
assertEquals(1, exec("DynamicTypeTests.DynB b = new DynamicTypeTests.DynG(); return b.value()"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynC c = new DynamicTypeTests.DynE(); return c.letter()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynC c = new DynamicTypeTests.DynF(); return c.letter()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynC c = new DynamicTypeTests.DynG(); return c.letter()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynC c = new DynamicTypeTests.DynE(); return c.value()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynC c = new DynamicTypeTests.DynF(); return c.value()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynC c = new DynamicTypeTests.DynG(); return c.value()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
assertEquals('D', exec("DynamicTypeTests.DynD d = new DynamicTypeTests.DynD(); return d.letter()"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynD d = new DynamicTypeTests.DynD(); return d.value()")
);
assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found"));
assertEquals('E', exec("DynamicTypeTests.DynE e = new DynamicTypeTests.DynE(); return e.letter()"));
assertEquals('E', exec("DynamicTypeTests.DynE e = new DynamicTypeTests.DynF(); return e.letter()"));
assertEquals('G', exec("DynamicTypeTests.DynE e = new DynamicTypeTests.DynG(); return e.letter()"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynE e = new DynamicTypeTests.DynE(); return e.value()")
);
assertTrue(iae.getMessage().contains("dynamic method") && iae.getMessage().contains("not found"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynE e = new DynamicTypeTests.DynF(); return e.value()")
);
assertTrue(iae.getMessage().contains("dynamic method") && iae.getMessage().contains("not found"));
assertEquals(1, exec("DynamicTypeTests.DynE e = new DynamicTypeTests.DynG(); return e.value()"));
assertEquals('E', exec("DynamicTypeTests.DynF f = new DynamicTypeTests.DynF(); return f.letter()"));
assertEquals('G', exec("DynamicTypeTests.DynF f = new DynamicTypeTests.DynG(); return f.letter()"));
iae = expectScriptThrows(
IllegalArgumentException.class,
() -> exec("DynamicTypeTests.DynF f = new DynamicTypeTests.DynF(); return f.value()")
);
assertTrue(iae.getMessage().contains("dynamic method") && iae.getMessage().contains("not found"));
assertEquals(1, exec("DynamicTypeTests.DynF f = new DynamicTypeTests.DynG(); return f.value()"));
assertEquals('G', exec("DynamicTypeTests.DynG g = new DynamicTypeTests.DynG(); return g.letter()"));
assertEquals(1, exec("DynamicTypeTests.DynG g = new DynamicTypeTests.DynG(); return g.value()"));
}
}
|
DynG
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/domain/sample/EmployeeWithName.java
|
{
"start": 834,
"end": 1366
}
|
class ____ {
@Id
@GeneratedValue //
private Integer id;
private String name;
public EmployeeWithName(String name) {
this();
this.name = name;
}
protected EmployeeWithName() {}
public Integer getId() {
return this.id;
}
public String getName() {
return this.name;
}
public void setId(Integer id) {
this.id = id;
}
public void setName(String name) {
this.name = name;
}
public String toString() {
return "EmployeeWithName(id=" + this.getId() + ", name=" + this.getName() + ")";
}
}
|
EmployeeWithName
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/StringFieldTest.java
|
{
"start": 1293,
"end": 1504
}
|
class ____ {
private String value;
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
}
}
|
V0
|
java
|
elastic__elasticsearch
|
x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java
|
{
"start": 2817,
"end": 10555
}
|
class ____ extends BaseFrozenSearchableSnapshotsIntegTestCase {
private static final String repoName = "test-repo";
private static final String indexName = "test-index";
private static final String snapshotName = "test-snapshot";
@Before
public void createAndMountSearchableSnapshot() throws Exception {
createRepository(repoName, "fs");
createIndex(indexName);
createFullSnapshot(repoName, snapshotName);
assertAcked(indicesAdmin().prepareDelete(indexName));
final MountSearchableSnapshotRequest req = new MountSearchableSnapshotRequest(
TEST_REQUEST_TIMEOUT,
indexName,
repoName,
snapshotName,
indexName,
Settings.EMPTY,
Strings.EMPTY_ARRAY,
true,
randomFrom(MountSearchableSnapshotRequest.Storage.values())
);
final RestoreSnapshotResponse restoreSnapshotResponse = client().execute(MountSearchableSnapshotAction.INSTANCE, req).get();
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
ensureGreen(indexName);
assertAcked(
client().execute(TransportDeleteLicenseAction.TYPE, new AcknowledgedRequest.Plain(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT))
.get()
);
assertAcked(
client().execute(PostStartBasicAction.INSTANCE, new PostStartBasicRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT)).get()
);
ensureClusterSizeConsistency();
ensureClusterStateConsistency();
}
public void testMountRequiresLicense() {
final MountSearchableSnapshotRequest req = new MountSearchableSnapshotRequest(
TEST_REQUEST_TIMEOUT,
indexName + "-extra",
repoName,
snapshotName,
indexName,
Settings.EMPTY,
Strings.EMPTY_ARRAY,
randomBoolean(),
randomFrom(MountSearchableSnapshotRequest.Storage.values())
);
final ActionFuture<RestoreSnapshotResponse> future = client().execute(MountSearchableSnapshotAction.INSTANCE, req);
final Throwable cause = ExceptionsHelper.unwrap(expectThrows(Exception.class, future::get), ElasticsearchSecurityException.class);
assertThat(cause, notNullValue());
assertThat(cause, instanceOf(ElasticsearchSecurityException.class));
assertThat(cause.getMessage(), containsString("current license is non-compliant for [searchable-snapshots]"));
}
public void testStatsRequiresLicense() throws ExecutionException, InterruptedException {
final ActionFuture<SearchableSnapshotsStatsResponse> future = client().execute(
SearchableSnapshotsStatsAction.INSTANCE,
new SearchableSnapshotsStatsRequest(indexName)
);
final SearchableSnapshotsStatsResponse response = future.get();
assertThat(response.getTotalShards(), greaterThan(0));
assertThat(response.getSuccessfulShards(), equalTo(0));
for (DefaultShardOperationFailedException shardFailure : response.getShardFailures()) {
final Throwable cause = ExceptionsHelper.unwrap(shardFailure.getCause(), ElasticsearchSecurityException.class);
assertThat(cause, notNullValue());
assertThat(cause, instanceOf(ElasticsearchSecurityException.class));
assertThat(cause.getMessage(), containsString("current license is non-compliant for [searchable-snapshots]"));
}
}
public void testClearCacheRequiresLicense() throws ExecutionException, InterruptedException {
final ActionFuture<BroadcastResponse> future = client().execute(
ClearSearchableSnapshotsCacheAction.INSTANCE,
new ClearSearchableSnapshotsCacheRequest(indexName)
);
final BroadcastResponse response = future.get();
assertThat(response.getTotalShards(), greaterThan(0));
assertThat(response.getSuccessfulShards(), equalTo(0));
for (DefaultShardOperationFailedException shardFailure : response.getShardFailures()) {
final Throwable cause = ExceptionsHelper.unwrap(shardFailure.getCause(), ElasticsearchSecurityException.class);
assertThat(cause, notNullValue());
assertThat(cause, instanceOf(ElasticsearchSecurityException.class));
assertThat(cause.getMessage(), containsString("current license is non-compliant for [searchable-snapshots]"));
}
}
public void testShardAllocationOnInvalidLicense() throws Exception {
// check that shards have been failed as part of invalid license
assertBusy(
() -> assertEquals(
ClusterHealthStatus.RED,
clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).get().getIndices().get(indexName).getStatus()
)
);
waitNoPendingTasksOnAll();
ensureClusterStateConsistency();
// add a valid license again
// This is a bit of a hack in tests, as we can't readd a trial license
// We force this by clearing the existing basic license first
updateClusterState(
currentState -> ClusterState.builder(currentState)
.metadata(Metadata.builder(currentState.metadata()).removeCustom(LicensesMetadata.TYPE).build())
.build()
);
waitNoPendingTasksOnAll();
ensureClusterStateConsistency();
PostStartTrialRequest request = new PostStartTrialRequest(TEST_REQUEST_TIMEOUT).setType(License.LicenseType.TRIAL.getTypeName())
.acknowledge(true);
final PostStartTrialResponse response = client().execute(PostStartTrialAction.INSTANCE, request).get();
assertThat(
response.getStatus(),
oneOf(
PostStartTrialResponse.Status.UPGRADED_TO_TRIAL,
// The LicenceService automatically generates a license of {@link LicenceService#SELF_GENERATED_LICENSE_TYPE} type
// if there is no license found in the cluster state (see {@link LicenceService#registerOrUpdateSelfGeneratedLicense).
// Since this test explicitly removes the LicensesMetadata from cluster state it is possible that the self generated
// license is created before the PostStartTrialRequest is acked.
PostStartTrialResponse.Status.TRIAL_ALREADY_ACTIVATED
)
);
// check if cluster goes green again after valid license has been put in place
ensureGreen(indexName);
}
public void testCachesStatsRequiresLicense() throws Exception {
final ActionFuture<TransportSearchableSnapshotsNodeCachesStatsAction.NodesCachesStatsResponse> future = client().execute(
TransportSearchableSnapshotsNodeCachesStatsAction.TYPE,
new TransportSearchableSnapshotsNodeCachesStatsAction.NodesRequest(Strings.EMPTY_ARRAY)
);
final TransportSearchableSnapshotsNodeCachesStatsAction.NodesCachesStatsResponse response = future.get();
assertThat(response.failures().size(), equalTo(internalCluster().numDataNodes()));
assertTrue(response.hasFailures());
for (FailedNodeException nodeException : response.failures()) {
final Throwable cause = ExceptionsHelper.unwrap(nodeException.getCause(), ElasticsearchSecurityException.class);
assertThat(cause, notNullValue());
assertThat(cause, instanceOf(ElasticsearchSecurityException.class));
assertThat(cause.getMessage(), containsString("current license is non-compliant for [searchable-snapshots]"));
}
}
}
|
SearchableSnapshotsLicenseIntegTests
|
java
|
apache__maven
|
impl/maven-di/src/main/java/org/apache/maven/di/impl/Binding.java
|
{
"start": 6214,
"end": 7160
}
|
class ____<T> extends Binding<T> {
final TupleConstructorN<T> constructor;
final Dependency<?>[] args;
BindingToConstructor(
Key<? extends T> key, TupleConstructorN<T> constructor, Dependency<?>[] dependencies, int priority) {
super(key, new HashSet<>(Arrays.asList(dependencies)), null, priority);
this.constructor = constructor;
this.args = dependencies;
}
@Override
public Supplier<T> compile(Function<Dependency<?>, Supplier<?>> compiler) {
return () -> {
Object[] args =
Stream.of(this.args).map(compiler).map(Supplier::get).toArray();
return constructor.create(args);
};
}
@Override
public String toString() {
return "BindingToConstructor[" + getOriginalKey() + "]" + getDependencies();
}
}
}
|
BindingToConstructor
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformAction.java
|
{
"start": 5149,
"end": 5271
}
|
class ____ extends AbstractGetResourcesResponse<TransformConfig> implements ToXContentObject {
public static
|
Response
|
java
|
apache__camel
|
components/camel-huawei/camel-huaweicloud-frs/src/test/java/org/apache/camel/component/huaweicloud/frs/mock/FrsClientMock.java
|
{
"start": 2168,
"end": 4588
}
|
class ____ extends FrsClient {
public FrsClientMock(HcClient hcClient) {
super(null);
}
@Override
public DetectFaceByBase64Response detectFaceByBase64(DetectFaceByBase64Request request) {
return new DetectFaceByBase64Response().withFaces(MockResult.getFaceDetectionResult());
}
@Override
public DetectFaceByFileResponse detectFaceByFile(DetectFaceByFileRequest request) {
return new DetectFaceByFileResponse().withFaces(MockResult.getFaceDetectionResult());
}
@Override
public DetectFaceByUrlResponse detectFaceByUrl(DetectFaceByUrlRequest request) {
return new DetectFaceByUrlResponse().withFaces(MockResult.getFaceDetectionResult());
}
@Override
public CompareFaceByBase64Response compareFaceByBase64(CompareFaceByBase64Request request) {
return new CompareFaceByBase64Response().withImage1Face(MockResult.getCompareFaceResult())
.withImage2Face(MockResult.getCompareFaceResult()).withSimilarity(1.0);
}
@Override
public CompareFaceByUrlResponse compareFaceByUrl(CompareFaceByUrlRequest request) {
return new CompareFaceByUrlResponse().withImage1Face(MockResult.getCompareFaceResult())
.withImage2Face(MockResult.getCompareFaceResult()).withSimilarity(1.0);
}
@Override
public CompareFaceByFileResponse compareFaceByFile(CompareFaceByFileRequest request) {
return new CompareFaceByFileResponse().withImage1Face(MockResult.getCompareFaceResult())
.withImage2Face(MockResult.getCompareFaceResult()).withSimilarity(1.0);
}
@Override
public DetectLiveByBase64Response detectLiveByBase64(DetectLiveByBase64Request request) {
return new DetectLiveByBase64Response().withVideoResult(MockResult.getLiveDetectResult())
.withWarningList(Collections.emptyList());
}
@Override
public DetectLiveByUrlResponse detectLiveByUrl(DetectLiveByUrlRequest request) {
return new DetectLiveByUrlResponse().withVideoResult(MockResult.getLiveDetectResult())
.withWarningList(Collections.emptyList());
}
@Override
public DetectLiveByFileResponse detectLiveByFile(DetectLiveByFileRequest request) {
return new DetectLiveByFileResponse().withVideoResult(MockResult.getLiveDetectResult())
.withWarningList(Collections.emptyList());
}
}
|
FrsClientMock
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableWindowTimed.java
|
{
"start": 5241,
"end": 9847
}
|
class ____<T>
extends AbstractWindowObserver<T>
implements Runnable {
private static final long serialVersionUID = 1155822639622580836L;
final Scheduler scheduler;
UnicastSubject<T> window;
final SequentialDisposable timer;
static final Object NEXT_WINDOW = new Object();
final Runnable windowRunnable;
WindowExactUnboundedObserver(Observer<? super Observable<T>> actual, long timespan, TimeUnit unit,
Scheduler scheduler, int bufferSize) {
super(actual, timespan, unit, bufferSize);
this.scheduler = scheduler;
this.timer = new SequentialDisposable();
this.windowRunnable = new WindowRunnable();
}
@Override
void createFirstWindow() {
if (!downstreamCancelled.get()) {
windowCount.getAndIncrement();
window = UnicastSubject.create(bufferSize, windowRunnable);
emitted = 1;
ObservableWindowSubscribeIntercept<T> intercept = new ObservableWindowSubscribeIntercept<>(window);
downstream.onNext(intercept);
timer.replace(scheduler.schedulePeriodicallyDirect(this, timespan, timespan, unit));
if (intercept.tryAbandon()) {
window.onComplete();
}
}
}
@Override
public void run() {
queue.offer(NEXT_WINDOW);
drain();
}
@Override
void drain() {
if (getAndIncrement() != 0) {
return;
}
final SimplePlainQueue<Object> queue = this.queue;
final Observer<? super Observable<T>> downstream = this.downstream;
UnicastSubject<T> window = this.window;
int missed = 1;
for (;;) {
if (upstreamCancelled) {
queue.clear();
window = null;
this.window = null;
} else {
boolean isDone = done;
Object o = queue.poll();
boolean isEmpty = o == null;
if (isDone && isEmpty) {
Throwable ex = error;
if (ex != null) {
if (window != null) {
window.onError(ex);
}
downstream.onError(ex);
} else {
if (window != null) {
window.onComplete();
}
downstream.onComplete();
}
cleanupResources();
upstreamCancelled = true;
continue;
}
else if (!isEmpty) {
if (o == NEXT_WINDOW) {
if (window != null) {
window.onComplete();
window = null;
this.window = null;
}
if (downstreamCancelled.get()) {
timer.dispose();
} else {
emitted++;
windowCount.getAndIncrement();
window = UnicastSubject.create(bufferSize, windowRunnable);
this.window = window;
ObservableWindowSubscribeIntercept<T> intercept = new ObservableWindowSubscribeIntercept<>(window);
downstream.onNext(intercept);
if (intercept.tryAbandon()) {
window.onComplete();
}
}
} else if (window != null) {
@SuppressWarnings("unchecked")
T item = (T)o;
window.onNext(item);
}
continue;
}
}
missed = addAndGet(-missed);
if (missed == 0) {
break;
}
}
}
@Override
void cleanupResources() {
timer.dispose();
}
final
|
WindowExactUnboundedObserver
|
java
|
apache__camel
|
components/camel-salesforce/camel-salesforce-component/src/main/java/org/apache/camel/component/salesforce/api/dto/AbstractQueryRecordsBase.java
|
{
"start": 954,
"end": 1788
}
|
class ____<T extends AbstractSObjectBase> extends AbstractDTOBase {
private Boolean done;
private int totalSize;
private String nextRecordsUrl;
private List<T> records;
public Boolean getDone() {
return done;
}
public void setDone(Boolean done) {
this.done = done;
}
public int getTotalSize() {
return totalSize;
}
public void setTotalSize(int totalSize) {
this.totalSize = totalSize;
}
public String getNextRecordsUrl() {
return nextRecordsUrl;
}
public void setNextRecordsUrl(String nextRecordsUrl) {
this.nextRecordsUrl = nextRecordsUrl;
}
public List<T> getRecords() {
return records;
}
public void setRecords(List<T> records) {
this.records = records;
}
}
|
AbstractQueryRecordsBase
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/collect/AbstractImmutableSetTest.java
|
{
"start": 18493,
"end": 18850
}
|
class ____<E> extends ForwardingList<E> {
final Iterator<ImmutableList<E>> infiniteCandidates;
MutatedOnQueryList(Iterable<ImmutableList<E>> infiniteCandidates) {
this.infiniteCandidates = infiniteCandidates.iterator();
}
@Override
protected List<E> delegate() {
return infiniteCandidates.next();
}
}
}
|
MutatedOnQueryList
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
|
{
"start": 23914,
"end": 29602
}
|
interface ____<T extends @Nullable Object> {
/**
* Represents a skipped value.
*/
Object SKIP = new Object();
/**
* Extract the value from the given instance.
* @param instance the source instance
* @return the extracted value or {@link #SKIP}
*/
@Nullable T extract(@Nullable Object instance);
/**
* Only extract when the given predicate matches.
* @param predicate the predicate to test
* @return a new {@link ValueExtractor}
*/
default ValueExtractor<T> when(Predicate<? super @Nullable T> predicate) {
return (instance) -> test(extract(instance), predicate);
}
@SuppressWarnings("unchecked")
private @Nullable T test(@Nullable T extracted, Predicate<? super @Nullable T> predicate) {
return (!skip(extracted) && predicate.test(extracted)) ? extracted : (T) SKIP;
}
/**
* Adapt the extracted value.
* @param <R> the result type
* @param extractor the extractor to use
* @return a new {@link ValueExtractor}
*/
default <R> ValueExtractor<R> as(Extractor<T, R> extractor) {
return (instance) -> apply(extract(instance), extractor);
}
@SuppressWarnings("unchecked")
private <R> @Nullable R apply(@Nullable T value, Extractor<T, R> extractor) {
if (skip(value)) {
return (R) SKIP;
}
return (value != null) ? extractor.extract(value) : null;
}
/**
* Create a new {@link ValueExtractor} based on the given {@link Function}.
* @param <S> the source type
* @param <T> the extracted type
* @param extractor the extractor to use
* @return a new {@link ValueExtractor} instance
*/
@SuppressWarnings("unchecked")
static <S, T> ValueExtractor<T> of(Extractor<S, T> extractor) {
return (instance) -> {
if (instance == null) {
return null;
}
return (skip(instance)) ? (T) SKIP : extractor.extract((S) instance);
};
}
/**
* Return if the extracted value should be skipped.
* @param <T> the value type
* @param extracted the value to test
* @return if the value is to be skipped
*/
static <T> boolean skip(@Nullable T extracted) {
return extracted == SKIP;
}
}
}
/**
* A path used to identify a specific JSON member. Paths can be represented as strings
* in form {@code "my.json[1].item"} where elements are separated by {@code '.' } or
* {@code [<index>]}. Reserved characters are escaped using {@code '\'}.
*
* @param parent the parent of this path
* @param name the name of the member or {@code null} if the member is indexed. Path
* names are provided as they were defined when the member was added and do not
* include any {@link NameProcessor name processing}.
* @param index the index of the member or {@link MemberPath#UNINDEXED}
*/
record MemberPath(@Nullable MemberPath parent, @Nullable String name, int index) {
private static final String[] ESCAPED = { "\\", ".", "[", "]" };
public MemberPath {
Assert.isTrue((name != null && index < 0) || (name == null && index >= 0),
"'name' and 'index' cannot be mixed");
}
/**
* Indicates that the member has no index.
*/
public static final int UNINDEXED = -1;
/**
* The root of all member paths.
*/
static final MemberPath ROOT = new MemberPath(null, "", UNINDEXED);
/**
* Create a new child from this path with the specified index.
* @param index the index of the child
* @return a new {@link MemberPath} instance
*/
public MemberPath child(int index) {
return new MemberPath(this, null, index);
}
/**
* Create a new child from this path with the specified name.
* @param name the name of the child
* @return a new {@link MemberPath} instance
*/
public MemberPath child(String name) {
return (!StringUtils.hasLength(name)) ? this : new MemberPath(this, name, UNINDEXED);
}
@Override
public String toString() {
return toString(true);
}
/**
* Return a string representation of the path without any escaping.
* @return the unescaped string representation
*/
public String toUnescapedString() {
return toString(false);
}
private String toString(boolean escape) {
StringBuilder string = new StringBuilder((this.parent != null) ? this.parent.toString(escape) : "");
if (this.index >= 0) {
string.append("[").append(this.index).append("]");
}
else {
string.append((!string.isEmpty()) ? "." : "").append((!escape) ? this.name : escape(this.name));
}
return string.toString();
}
private @Nullable String escape(@Nullable String name) {
if (name == null) {
return null;
}
for (String escape : ESCAPED) {
name = name.replace(escape, "\\" + escape);
}
return name;
}
/**
* Create a new {@link MemberPath} instance from the given string.
* @param value the path value
* @return a new {@link MemberPath} instance
*/
public static MemberPath of(String value) {
MemberPath path = MemberPath.ROOT;
StringBuilder buffer = new StringBuilder();
boolean escape = false;
for (char ch : value.toCharArray()) {
if (!escape && ch == '\\') {
escape = true;
}
else if (!escape && (ch == '.' || ch == '[')) {
path = path.child(buffer.toString());
buffer.setLength(0);
}
else if (!escape && ch == ']') {
path = path.child(Integer.parseUnsignedInt(buffer.toString()));
buffer.setLength(0);
}
else {
buffer.append(ch);
escape = false;
}
}
path = path.child(buffer.toString());
return path;
}
}
/**
* Interface that can be used to extract name/value pairs from an element.
*
* @param <E> the element type
*/
|
ValueExtractor
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/api/InstanceOfAssertFactory.java
|
{
"start": 1265,
"end": 4214
}
|
class ____<T, ASSERT extends AbstractAssert<?, ?>> implements AssertFactory<Object, ASSERT> {
private final Type type;
private final Class<T> rawClass;
private final AssertFactory<T, ASSERT> delegate;
/**
* Instantiates a new {@code InstanceOfAssertFactory} for a given type.
*
* @param type the {@link Class} instance of the given type
* @param delegate the {@link AssertFactory} to decorate
*/
public InstanceOfAssertFactory(Class<T> type, AssertFactory<T, ASSERT> delegate) {
this(type, type, delegate);
}
/**
* Instantiates a new {@code InstanceOfAssertFactory} for a given type
* with type arguments, usually representing a {@link ParameterizedType}.
*
* @param rawClass the raw {@link Class} instance of the given type
* @param typeArguments the {@link Type} arguments of the given type
* @param delegate the {@link AssertFactory} to decorate
* @since 3.26.0
*/
public InstanceOfAssertFactory(Class<T> rawClass, Type[] typeArguments, AssertFactory<T, ASSERT> delegate) {
this(new SyntheticParameterizedType(rawClass, typeArguments), rawClass, delegate);
}
private InstanceOfAssertFactory(Type type, Class<T> rawClass, AssertFactory<T, ASSERT> delegate) {
this.type = requireNonNull(type, shouldNotBeNull("type")::create);
this.rawClass = requireNonNull(rawClass, shouldNotBeNull("rawClass")::create);
this.delegate = requireNonNull(delegate, shouldNotBeNull("delegate")::create);
}
/**
* Creates the custom {@link Assert} instance for the given value.
* <p>
* Before invoking the delegate, the factory casts the value to the raw
* {@link Class} defined during instantiation.
*
* @param actual the input value for the {@code Assert} instance
* @return the custom {@code Assert} instance for the given value
*/
@Override
public ASSERT createAssert(Object actual) {
return delegate.createAssert(rawClass.cast(actual));
}
/**
* Creates the custom {@link Assert} instance for the value provided by the
* given {@link ValueProvider}.
* <p>
* The given {@code ValueProvider} can execute type-aware logic before
* providing the required value, like type validation or value conversion.
* <p>
* This implementation requests a value compatible with the {@link Type}
* defined during instantiation and casts the provided value to the
* corresponding raw {@link Class} before invoking the delegate.
*
* @param valueProvider the value provider for the {@code Assert} instance
* @return the custom {@code Assert} instance for the provided value
* @since 3.26.0
*/
@Override
public ASSERT createAssert(ValueProvider<?> valueProvider) {
Object actual = valueProvider.apply(type);
return createAssert(actual);
}
@Override
public String toString() {
return "InstanceOfAssertFactory for " + type.getTypeName();
}
private static final
|
InstanceOfAssertFactory
|
java
|
elastic__elasticsearch
|
modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreScriptTests.java
|
{
"start": 1177,
"end": 3078
}
|
class ____ extends ESSingleNodeTestCase {
/**
* Test that needTermStats() is reported correctly depending on whether _termStats is used
*/
public void testNeedsTermStats() {
IndexService index = createIndex("test", Settings.EMPTY, "type", "d", "type=double");
Map<ScriptContext<?>, List<Whitelist>> contexts = new HashMap<>();
List<Whitelist> whitelists = new ArrayList<>(PAINLESS_BASE_WHITELIST);
whitelists.add(WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, "org.elasticsearch.script.score.txt"));
contexts.put(ScoreScript.CONTEXT, whitelists);
PainlessScriptEngine service = new PainlessScriptEngine(Settings.EMPTY, contexts);
SearchExecutionContext searchExecutionContext = index.newSearchExecutionContext(0, 0, null, () -> 0, null, emptyMap());
ScoreScript.Factory factory = service.compile(null, "1.2", ScoreScript.CONTEXT, Collections.emptyMap());
ScoreScript.LeafFactory ss = factory.newFactory(Collections.emptyMap(), searchExecutionContext.lookup());
assertFalse(ss.needs_termStats());
factory = service.compile(null, "doc['d'].value", ScoreScript.CONTEXT, Collections.emptyMap());
ss = factory.newFactory(Collections.emptyMap(), searchExecutionContext.lookup());
assertFalse(ss.needs_termStats());
factory = service.compile(null, "1/_termStats.totalTermFreq().getAverage()", ScoreScript.CONTEXT, Collections.emptyMap());
ss = factory.newFactory(Collections.emptyMap(), searchExecutionContext.lookup());
assertTrue(ss.needs_termStats());
factory = service.compile(null, "doc['d'].value * _termStats.docFreq().getSum()", ScoreScript.CONTEXT, Collections.emptyMap());
ss = factory.newFactory(Collections.emptyMap(), searchExecutionContext.lookup());
assertTrue(ss.needs_termStats());
}
}
|
ScoreScriptTests
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java
|
{
"start": 38548,
"end": 39715
}
|
class ____ extends AckedBatchedClusterStateUpdateTask {
private final ProjectId projectId;
private final String dataStreamName;
private final boolean rolloverOnWrite;
private final boolean targetFailureStore;
SetRolloverOnWriteTask(
ProjectId projectId,
String dataStreamName,
boolean rolloverOnWrite,
boolean targetFailureStore,
TimeValue ackTimeout,
ActionListener<AcknowledgedResponse> listener
) {
super(ackTimeout, listener);
this.projectId = projectId;
this.dataStreamName = dataStreamName;
this.rolloverOnWrite = rolloverOnWrite;
this.targetFailureStore = targetFailureStore;
}
public ProjectId projectId() {
return projectId;
}
public String getDataStreamName() {
return dataStreamName;
}
public boolean rolloverOnWrite() {
return rolloverOnWrite;
}
public boolean targetFailureStore() {
return targetFailureStore;
}
}
static
|
SetRolloverOnWriteTask
|
java
|
quarkusio__quarkus
|
integration-tests/hibernate-validator/src/main/java/io/quarkus/it/hibernate/validator/HibernateValidatorTestResource.java
|
{
"start": 2116,
"end": 12641
}
|
class ____
extends HibernateValidatorTestResourceSuperclass
implements HibernateValidatorTestResourceGenericInterface<Integer>, HibernateValidatorTestResourceInterface {
@Inject
Validator validator;
@Inject
GreetingService greetingService;
@Inject
EnhancedGreetingService enhancedGreetingService;
@Inject
ValidationServiceBasedOnXmlConstraints validationServiceBasedOnXmlConstraints;
@Inject
ZipCodeService zipCodeResource;
@Inject
EntityManager em;
public void testValidationOutsideOfResteasyContext(@Observes StartupEvent startupEvent) {
validator.validate(new MyOtherBean(null));
}
@GET
@Path("/basic-features")
@Produces(MediaType.TEXT_PLAIN)
public String testBasicFeatures() {
ResultBuilder result = new ResultBuilder();
Map<String, List<String>> invalidCategorizedEmails = new HashMap<>();
invalidCategorizedEmails.put("a", Collections.singletonList("b"));
result.append(formatViolations(validator.validate(new MyBean(
"Bill Jones",
"b",
Collections.singletonList("c"),
-4d,
invalidCategorizedEmails))));
Map<String, List<String>> validCategorizedEmails = new HashMap<>();
validCategorizedEmails.put("Professional", Collections.singletonList("bill.jones@example.com"));
result.append(formatViolations(validator.validate(new MyBean(
"Bill Jones",
"bill.jones@example.com",
Collections.singletonList("biji@example.com"),
5d,
validCategorizedEmails))));
return result.build();
}
@GET
@Path("/custom-class-level-constraint")
@Produces(MediaType.TEXT_PLAIN)
public String testCustomClassLevelConstraint() {
ResultBuilder result = new ResultBuilder();
result.append(formatViolations(validator.validate(new MyOtherBean(null))));
result.append(formatViolations(validator.validate(new MyOtherBean("name"))));
return result.build();
}
@GET
@Path("/cdi-bean-method-validation")
@Produces(MediaType.TEXT_PLAIN)
public String testCDIBeanMethodValidation() {
ResultBuilder result = new ResultBuilder();
greetingService.greeting("test");
result.append(formatViolations(Collections.emptySet()));
try {
greetingService.greeting(null);
} catch (ConstraintViolationException e) {
result.append(formatViolations(e.getConstraintViolations()));
}
return result.build();
}
@GET
@Path("/cdi-bean-method-validation-uncaught")
@Produces(MediaType.TEXT_PLAIN)
public String testCDIBeanMethodValidationUncaught() {
return greetingService.greeting(null);
}
@GET
@Path("/rest-end-point-validation/{id}/")
@Produces({ MediaType.TEXT_PLAIN, MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public String testRestEndPointValidation(@Digits(integer = 5, fraction = 0) @PathParam("id") String id) {
return id;
}
@GET
@Path("/rest-end-point-return-value-validation/{returnValue}/")
@Produces(MediaType.TEXT_PLAIN)
@Digits(integer = 5, fraction = 0)
public String testRestEndPointReturnValueValidation(@PathParam("returnValue") String returnValue) {
return returnValue;
}
// all JAX-RS annotations are defined in the interface
@Override
public String testRestEndPointInterfaceValidation(String id) {
return id;
}
// all JAX-RS annotations are defined in the interface
@Override
@SomeInterceptorBindingAnnotation
public String testRestEndPointInterfaceValidationWithAnnotationOnImplMethod(String id) {
return id;
}
// all JAX-RS annotations are defined in the superclass
@Override
@SomeInterceptorBindingAnnotation
public String testRestEndPointInterfaceValidationWithAnnotationOnOverriddenMethod(String id) {
return id;
}
@GET
@Path("/rest-end-point-generic-method-validation/{id}")
@Produces(MediaType.TEXT_PLAIN)
@Override
public Integer testRestEndpointGenericMethodValidation(@Digits(integer = 5, fraction = 0) @PathParam("id") Integer id) {
return id;
}
@GET
@Path("/no-produces/{id}/")
public Response noProduces(@Digits(integer = 5, fraction = 0) @PathParam("id") String id) {
return Response.accepted().build();
}
@GET
@Path("/injection")
@Produces(MediaType.TEXT_PLAIN)
public String testInjection() {
ResultBuilder result = new ResultBuilder();
result.append(formatViolations(validator.validate(new BeanWithInjectedConstraintValidatorConstraint(MyService.VALID))));
result.append(formatViolations(validator.validate(new BeanWithInjectedConstraintValidatorConstraint("Invalid value"))));
result.append(formatViolations(
validator.validate(new BeanWithInjectedRuntimeConstraintValidatorConstraint("any text is valid"))));
result.append(formatViolations(
validator.validate(new BeanWithInjectedRuntimeConstraintValidatorConstraint("numbers 12345 don't work"))));
return result.build();
}
@GET
@Path("/test-inherited-implements-constraints")
@Produces(MediaType.TEXT_PLAIN)
public String testInheritedImplementsConstraints() {
ResultBuilder result = new ResultBuilder();
zipCodeResource.echoZipCode("12345");
result.append(formatViolations(Collections.emptySet()));
try {
zipCodeResource.echoZipCode("1234");
} catch (ConstraintViolationException e) {
result.append(formatViolations(e.getConstraintViolations()));
}
return result.build();
}
@GET
@Path("/test-inherited-extends-constraints")
@Produces(MediaType.TEXT_PLAIN)
public String testInheritedExtendsConstraints() {
ResultBuilder result = new ResultBuilder();
enhancedGreetingService.greeting("test");
result.append(formatViolations(Collections.emptySet()));
try {
enhancedGreetingService.greeting(null);
} catch (ConstraintViolationException e) {
result.append(formatViolations(e.getConstraintViolations()));
}
return result.build();
}
@GET
@Path("/test-validation-message-locale/{id}/")
@Produces(MediaType.TEXT_PLAIN)
public Response testValidationMessageLocale(
@Pattern(regexp = "A.*", message = "{pattern.message}") @PathParam("id") String id) {
return Response.accepted().build();
}
@POST
@Path("/test-manual-validation-message-locale")
@Produces(MediaType.TEXT_PLAIN)
public String testManualValidationMessageLocale(MyLocaleTestBean test) {
Set<ConstraintViolation<MyLocaleTestBean>> violations = validator.validate(test);
ResultBuilder result = new ResultBuilder();
if (!violations.isEmpty()) {
result.append(formatViolations(violations));
} else {
result.append(formatViolations(Collections.emptySet()));
}
return result.build();
}
@GET
@Path("/test-hibernate-orm-integration")
@Produces(MediaType.TEXT_PLAIN)
@Transactional
public String testHibernateOrmIntegration() {
em.persist(new TestEntity());
return "FAILED";
}
@POST
@Path("/rest-end-point-validation-groups/")
@Produces(MediaType.TEXT_PLAIN)
@Consumes(MediaType.APPLICATION_JSON)
public String testRestEndPointValidationGroups_Post(
@Valid @ConvertGroup(to = ValidationGroups.Post.class) MyBeanWithGroups bean) {
return "passed";
}
@PUT
@Path("/rest-end-point-validation-groups/")
@Produces(MediaType.TEXT_PLAIN)
@Consumes(MediaType.APPLICATION_JSON)
public String testRestEndPointValidationGroups_Put(
@Valid @ConvertGroup(to = ValidationGroups.Put.class) MyBeanWithGroups bean) {
return "passed";
}
@GET
@Path("/rest-end-point-validation-groups/{id}/")
@Produces(MediaType.APPLICATION_JSON)
@Consumes(MediaType.APPLICATION_JSON)
@Valid
@ConvertGroup(to = ValidationGroups.Get.class)
public MyBeanWithGroups testRestEndPointValidationGroups_Get(@PathParam("id") long id,
@QueryParam("simulateDeleted") boolean simulateDeleted,
@QueryParam("simulateNullName") boolean simulateNullName) {
MyBeanWithGroups result = new MyBeanWithGroups();
result.setId(id);
result.setName(simulateNullName ? null : "someName");
result.setDeleted(simulateDeleted);
return result;
}
@DELETE
@Path("/rest-end-point-validation-groups/{id}/")
@Produces(MediaType.APPLICATION_JSON)
@Consumes(MediaType.APPLICATION_JSON)
@Valid
@ConvertGroup(to = ValidationGroups.Delete.class)
public MyBeanWithGroups testRestEndPointValidationGroups_Delete(@PathParam("id") long id,
@QueryParam("simulateDeleted") boolean simulateDeleted,
@QueryParam("simulateNullName") boolean simulateNullName) {
MyBeanWithGroups result = new MyBeanWithGroups();
result.setId(id);
result.setName(simulateNullName ? null : "someName");
result.setDeleted(simulateDeleted);
return result;
}
@GET
@Path("/rest-end-point-clock-based-constraints")
@Produces(MediaType.TEXT_PLAIN)
public String testClockBasedConstraints() {
ResultBuilder result = new ResultBuilder();
result.append(formatViolations(validator.validate(new Task())));
return result.build();
}
@GET
@Path("/constraints-defined-in-xml")
@Produces(MediaType.TEXT_PLAIN)
public String testConstraintsDefinedInXml() {
ResultBuilder result = new ResultBuilder();
result.append(formatViolations(validationServiceBasedOnXmlConstraints.validateSomeMyXmlBean()));
return result.build();
}
private String formatViolations(Set<? extends ConstraintViolation<?>> violations) {
if (violations.isEmpty()) {
return "passed";
}
return "failed: " + violations.stream()
.map(v -> v.getPropertyPath().toString() + " (" + v.getMessage() + ")")
.sorted()
.collect(Collectors.joining(", "));
}
public static
|
HibernateValidatorTestResource
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/iterative/concurrent/BrokerTest.java
|
{
"start": 3425,
"end": 4217
}
|
class ____ implements Callable<StringPair> {
private final Random random;
private final Broker<String> broker;
private final String key;
IterationTail(Broker<String> broker, Integer key) {
this.broker = broker;
this.key = String.valueOf(key);
random = new Random();
}
@Override
public StringPair call() throws Exception {
Thread.sleep(random.nextInt(10));
// System.out.println("Tail " + key + " asks for handover");
String value = broker.getAndRemove(key);
// System.out.println("Tail " + key + " received " + value);
Preconditions.checkNotNull(value);
return new StringPair(key, value);
}
}
}
|
IterationTail
|
java
|
google__truth
|
core/src/main/java/com/google/common/truth/GraphMatching.java
|
{
"start": 3091,
"end": 14064
}
|
class ____<U, V> {
private final Multimap<U, V> graph;
/**
* Factory method which returns an instance ready to perform the algorithm over the bipartite
* graph described by the given multimap.
*/
static <U, V> HopcroftKarp<U, V> overBipartiteGraph(Multimap<U, V> graph) {
return new HopcroftKarp<>(graph);
}
private HopcroftKarp(Multimap<U, V> graph) {
this.graph = graph;
}
/** Performs the algorithm, and returns a bimap describing the matching found. */
ImmutableBiMap<U, V> perform() {
BiMap<U, V> matching = HashBiMap.create();
while (true) {
// Perform the BFS as described below. This finds the length of the shortest augmenting path
// and a guide which locates all the augmenting paths of that length.
Map<U, Integer> layers = new HashMap<>();
Integer freeRhsVertexLayer = breadthFirstSearch(matching, layers);
if (freeRhsVertexLayer == null) {
// The BFS failed, i.e. we found no augmenting paths. So we're done.
break;
}
// Perform the DFS and update the matching as described below starting from each free LHS
// vertex. This finds a disjoint set of augmenting paths of the shortest length and updates
// the matching by computing the symmetric difference with that set.
for (U lhs : graph.keySet()) {
if (!matching.containsKey(lhs)) {
depthFirstSearch(matching, layers, freeRhsVertexLayer, lhs);
}
}
}
return ImmutableBiMap.copyOf(matching);
}
/**
* Performs the Breadth-First Search phase of the algorithm. Specifically, treats the bipartite
* graph as a directed graph where every unmatched edge (i.e. every edge not in the current
* matching) is directed from the LHS vertex to the RHS vertex and every matched edge is
* directed from the RHS vertex to the LHS vertex, and performs a BFS which starts from all of
* the free LHS vertices (i.e. the LHS vertices which are not in the current matching) and stops
* either at the end of a layer where a free RHS vertex is found or when the search is exhausted
* if no free RHS vertex is found. Keeps track of which layer of the BFS each LHS vertex was
* found in (for those LHS vertices visited during the BFS), so the free LHS vertices are in
* layer 1, those reachable by following an unmatched edge from any free LHS vertex to any
* non-free RHS vertex and then the matched edge back to a LHS vertex are in layer 2, etc. Note
* that every path in a successful search starts with a free LHS vertex and ends with a free RHS
* vertex, with every intermediate vertex being non-free.
*
* @param matching A bimap describing the matching to be used for the BFS, which is not modified
* by this method
* @param layers A map to be filled with the layer of each LHS vertex visited during the BFS,
* which should be empty when passed into this method and will be modified by this method
* @return The number of the layer in which the first free RHS vertex was found, if any, and
* {@code null} if the BFS was exhausted without finding any free RHS vertex
*/
private @Nullable Integer breadthFirstSearch(BiMap<U, V> matching, Map<U, Integer> layers) {
Queue<U> queue = new ArrayDeque<>();
Integer freeRhsVertexLayer = null;
// Enqueue all free LHS vertices and assign them to layer 1.
for (U lhs : graph.keySet()) {
if (!matching.containsKey(lhs)) {
layers.put(lhs, 1);
queue.add(lhs);
}
}
// Now proceed with the BFS.
while (!queue.isEmpty()) {
U lhs = queue.remove();
int layer = checkNotNull(layers.get(lhs));
// If the BFS has proceeded past a layer in which a free RHS vertex was found, stop.
if (freeRhsVertexLayer != null && layer > freeRhsVertexLayer) {
break;
}
// We want to consider all the unmatched edges from the current LHS vertex to the RHS, and
// then all the matched edges from those RHS vertices back to the LHS, to find the next
// layer of LHS vertices. We actually iterate over all edges, both matched and unmatched,
// from the current LHS vertex: we'll just do nothing for matched edges.
for (V rhs : graph.get(lhs)) {
if (!matching.containsValue(rhs)) {
// We found a free RHS vertex. Record the layer at which we found it. Since the RHS
// vertex is free, there is no matched edge to follow. (Note that the edge from the LHS
// to the RHS must be unmatched, because a matched edge cannot lead to a free vertex.)
if (freeRhsVertexLayer == null) {
freeRhsVertexLayer = layer;
}
} else {
// We found an RHS vertex with a matched vertex back to the LHS. If we haven't visited
// that new LHS vertex yet, add it to the next layer. (If the edge from the LHS to the
// RHS was matched then the matched edge from the RHS to the LHS will lead back to the
// current LHS vertex, which has definitely been visited, so we correctly do nothing.)
U nextLhs = checkNotNull(matching.inverse().get(rhs));
if (!layers.containsKey(nextLhs)) {
layers.put(nextLhs, layer + 1);
queue.add(nextLhs);
}
}
}
}
return freeRhsVertexLayer;
}
/**
* Performs the Depth-First Search phase of the algorithm. The DFS is guided by the BFS phase,
* i.e. it only uses paths which were used in the BFS. That means the steps in the DFS proceed
* from an LHS vertex via an unmatched edge to an RHS vertex and from an RHS vertex via a
* matched edge to an LHS vertex only if that LHS vertex is one layer deeper in the BFS than the
* previous one. It starts from the specified LHS vertex and stops either when it finds one of
* the free RHS vertices located by the BFS or when the search is exhausted. If a free RHS
* vertex is found then all the unmatched edges in the search path and added to the matching and
* all the matched edges in the search path are removed from the matching; in other words, the
* direction (which is determined by the matched/unmatched status) of every edge in the search
* path is flipped. Note several properties of this update to the matching:
*
* <ul>
* <li>Because the search path must contain one more unmatched than matched edges, the effect
* of this modification is to increase the size of the matching by one.
* <li>This modification results in the free LHS vertex at the start of the path and the free
* RHS vertex at the end of the path becoming non-free, while the intermediate non-free
* vertices stay non-free.
* <li>None of the edges used in this search path may be used in any further DFS. They cannot
* be used in the same direction as they were in this DFS because their directions are
* flipped; and they cannot be used in their new directions because we only use edges
* leading to the next layer of the BFS and, after flipping the directions, these edges
* now lead to the previous layer.
* <li>As a consequence of the previous property, repeated invocations of this method will
* find only paths which were used in the BFS and which were not used in any previous DFS
* (i.e. the set of edges used in the paths found by repeated DFSes are disjoint).
* </ul>
*
* @param matching A bimap describing the matching to be used for the BFS, which will be
* modified by this method as described above
* @param layers A map giving the layer of each LHS vertex visited during the BFS, which will
* not be modified by this method
* @param freeRhsVertexLayer The number of the layer in which the first free RHS vertex was
* found
* @param lhs The LHS vertex from which to start the DFS
* @return Whether or not the DFS was successful
*/
@CanIgnoreReturnValue
private boolean depthFirstSearch(
BiMap<U, V> matching, Map<U, Integer> layers, int freeRhsVertexLayer, U lhs) {
// Note that this differs from the method described in the text of the wikipedia article (at
// time of writing) in two ways. Firstly, we proceed from a free LHS vertex to a free RHS
// vertex in the target layer instead of the other way around, which makes no difference.
// Secondly, we update the matching using the path found from each DFS after it is found,
// rather than using all the paths at the end of the phase. As explained above, the effect of
// this is that we automatically find only the disjoint set of paths, as required. This is,
// fact, the approach taken in the pseudocode of the wikipedia article (at time of writing).
int layer = checkNotNull(layers.get(lhs));
if (layer > freeRhsVertexLayer) {
// We've gone past the target layer, so we're not going to find what we're looking for.
return false;
}
// Consider every edge from this LHS vertex.
for (V rhs : graph.get(lhs)) {
if (!matching.containsValue(rhs)) {
// We found a free RHS vertex. (This must have been in the target layer because, by
// definition, no free RHS vertex is reachable in any earlier layer, and because we stop
// when we get past that layer.) We add the unmatched edge used to get here to the
// matching, and remove any previous matched edge leading to the LHS vertex.
matching.forcePut(lhs, rhs);
return true;
} else {
// We found a non-free RHS vertex. Follow the matched edge from that RHS vertex to find
// the next LHS vertex.
U nextLhs = checkNotNull(matching.inverse().get(rhs));
if (layers.containsKey(nextLhs) && layers.get(nextLhs) == layer + 1) {
// The next LHS vertex is in the next layer of the BFS, so we can use this path for our
// DFS. Recurse into the DFS.
if (depthFirstSearch(matching, layers, freeRhsVertexLayer, nextLhs)) {
// The DFS succeeded, and we're reversing back up the search path. At each stage we
// put the unmatched edge from the LHS to the RHS into the matching, and remove any
// matched edge previously leading to the LHS. The combined effect of all the
// modifications made while reversing all the way back up the search path is to update
// the matching as described in the javadoc.
matching.forcePut(lhs, rhs);
return true;
}
}
}
}
return false;
}
}
}
|
HopcroftKarp
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/web/servlet/samples/standalone/resulthandlers/PrintingResultHandlerSmokeTests.java
|
{
"start": 2019,
"end": 2646
}
|
class ____ {
// Not intended to be executed with the build.
// Comment out class-level @Disabled to see the output.
@Test
public void testPrint() throws Exception {
StringWriter writer = new StringWriter();
standaloneSetup(new SimpleController())
.build()
.perform(get("/").content("Hello Request".getBytes()))
.andDo(log())
.andDo(print())
.andDo(print(System.err))
.andDo(print(writer))
;
System.out.println();
System.out.println("===============================================================");
System.out.println(writer);
}
@Controller
private static
|
PrintingResultHandlerSmokeTests
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/operators/sort/NormalizedKeySorter.java
|
{
"start": 1540,
"end": 22480
}
|
class ____<T> implements InMemorySorter<T> {
private static final Logger LOG = LoggerFactory.getLogger(NormalizedKeySorter.class);
private static final int OFFSET_LEN = 8;
private static final int DEFAULT_MAX_NORMALIZED_KEY_LEN = 16;
private static final int MAX_NORMALIZED_KEY_LEN_PER_ELEMENT = 8;
private static final int MIN_REQUIRED_BUFFERS = 3;
private static final int LARGE_RECORD_THRESHOLD = 10 * 1024 * 1024;
private static final long LARGE_RECORD_TAG = 1L << 63;
private static final long POINTER_MASK = LARGE_RECORD_TAG - 1;
// ------------------------------------------------------------------------
// Members
// ------------------------------------------------------------------------
private final byte[] swapBuffer;
private final TypeSerializer<T> serializer;
private final TypeComparator<T> comparator;
private final SimpleCollectingOutputView recordCollector;
private final RandomAccessInputView recordBuffer;
private final RandomAccessInputView recordBufferForComparison;
private MemorySegment currentSortIndexSegment;
private final ArrayList<MemorySegment> freeMemory;
private final ArrayList<MemorySegment> sortIndex;
private final ArrayList<MemorySegment> recordBufferSegments;
private long currentDataBufferOffset;
private long sortIndexBytes;
private int currentSortIndexOffset;
private int numRecords;
private final int numKeyBytes;
private final int indexEntrySize;
private final int indexEntriesPerSegment;
private final int lastIndexEntryOffset;
private final int segmentSize;
private final int totalNumBuffers;
private final boolean normalizedKeyFullyDetermines;
private final boolean useNormKeyUninverted;
// -------------------------------------------------------------------------
// Constructors / Destructors
// -------------------------------------------------------------------------
public NormalizedKeySorter(
TypeSerializer<T> serializer,
TypeComparator<T> comparator,
List<MemorySegment> memory) {
this(serializer, comparator, memory, DEFAULT_MAX_NORMALIZED_KEY_LEN);
}
public NormalizedKeySorter(
TypeSerializer<T> serializer,
TypeComparator<T> comparator,
List<MemorySegment> memory,
int maxNormalizedKeyBytes) {
if (serializer == null || comparator == null || memory == null) {
throw new NullPointerException();
}
if (maxNormalizedKeyBytes < 0) {
throw new IllegalArgumentException(
"Maximal number of normalized key bytes must not be negative.");
}
this.serializer = serializer;
this.comparator = comparator;
this.useNormKeyUninverted = !comparator.invertNormalizedKey();
// check the size of the first buffer and record it. all further buffers must have the same
// size.
// the size must also be a power of 2
this.totalNumBuffers = memory.size();
if (this.totalNumBuffers < MIN_REQUIRED_BUFFERS) {
throw new IllegalArgumentException(
"Normalized-Key sorter requires at least "
+ MIN_REQUIRED_BUFFERS
+ " memory buffers.");
}
this.segmentSize = memory.get(0).size();
this.freeMemory = new ArrayList<MemorySegment>(memory);
// create the buffer collections
this.sortIndex = new ArrayList<MemorySegment>(16);
this.recordBufferSegments = new ArrayList<MemorySegment>(16);
// the views for the record collections
this.recordCollector =
new SimpleCollectingOutputView(
this.recordBufferSegments,
new ListMemorySegmentSource(this.freeMemory),
this.segmentSize);
this.recordBuffer = new RandomAccessInputView(this.recordBufferSegments, this.segmentSize);
this.recordBufferForComparison =
new RandomAccessInputView(this.recordBufferSegments, this.segmentSize);
// set up normalized key characteristics
if (this.comparator.supportsNormalizedKey()) {
// compute the max normalized key length
int numPartialKeys;
try {
numPartialKeys = this.comparator.getFlatComparators().length;
} catch (Throwable t) {
numPartialKeys = 1;
}
int maxLen =
Math.min(
maxNormalizedKeyBytes,
MAX_NORMALIZED_KEY_LEN_PER_ELEMENT * numPartialKeys);
this.numKeyBytes = Math.min(this.comparator.getNormalizeKeyLen(), maxLen);
this.normalizedKeyFullyDetermines =
!this.comparator.isNormalizedKeyPrefixOnly(this.numKeyBytes);
} else {
this.numKeyBytes = 0;
this.normalizedKeyFullyDetermines = false;
}
// compute the index entry size and limits
this.indexEntrySize = this.numKeyBytes + OFFSET_LEN;
this.indexEntriesPerSegment = this.segmentSize / this.indexEntrySize;
this.lastIndexEntryOffset = (this.indexEntriesPerSegment - 1) * this.indexEntrySize;
this.swapBuffer = new byte[this.indexEntrySize];
// set to initial state
this.currentSortIndexSegment = nextMemorySegment();
this.sortIndex.add(this.currentSortIndexSegment);
}
@Override
public int recordSize() {
return indexEntrySize;
}
@Override
public int recordsPerSegment() {
return indexEntriesPerSegment;
}
// -------------------------------------------------------------------------
// Memory Segment
// -------------------------------------------------------------------------
/**
* Resets the sort buffer back to the state where it is empty. All contained data is discarded.
*/
@Override
public void reset() {
// reset all offsets
this.numRecords = 0;
this.currentSortIndexOffset = 0;
this.currentDataBufferOffset = 0;
this.sortIndexBytes = 0;
// return all memory
this.freeMemory.addAll(this.sortIndex);
this.freeMemory.addAll(this.recordBufferSegments);
this.sortIndex.clear();
this.recordBufferSegments.clear();
// grab first buffers
this.currentSortIndexSegment = nextMemorySegment();
this.sortIndex.add(this.currentSortIndexSegment);
this.recordCollector.reset();
}
/**
* Checks whether the buffer is empty.
*
* @return True, if no record is contained, false otherwise.
*/
@Override
public boolean isEmpty() {
return this.numRecords == 0;
}
@Override
public void dispose() {
this.freeMemory.clear();
this.recordBufferSegments.clear();
this.sortIndex.clear();
}
@Override
public long getCapacity() {
return ((long) this.totalNumBuffers) * this.segmentSize;
}
@Override
public long getOccupancy() {
return this.currentDataBufferOffset + this.sortIndexBytes;
}
// -------------------------------------------------------------------------
// Retrieving and Writing
// -------------------------------------------------------------------------
@Override
public T getRecord(int logicalPosition) throws IOException {
return getRecordFromBuffer(readPointer(logicalPosition));
}
@Override
public T getRecord(T reuse, int logicalPosition) throws IOException {
return getRecordFromBuffer(reuse, readPointer(logicalPosition));
}
/**
* Writes a given record to this sort buffer. The written record will be appended and take the
* last logical position.
*
* @param record The record to be written.
* @return True, if the record was successfully written, false, if the sort buffer was full.
* @throws IOException Thrown, if an error occurred while serializing the record into the
* buffers.
*/
@Override
public boolean write(T record) throws IOException {
// check whether we need a new memory segment for the sort index
if (this.currentSortIndexOffset > this.lastIndexEntryOffset) {
if (memoryAvailable()) {
this.currentSortIndexSegment = nextMemorySegment();
this.sortIndex.add(this.currentSortIndexSegment);
this.currentSortIndexOffset = 0;
this.sortIndexBytes += this.segmentSize;
} else {
return false;
}
}
// serialize the record into the data buffers
try {
this.serializer.serialize(record, this.recordCollector);
} catch (EOFException e) {
return false;
}
final long newOffset = this.recordCollector.getCurrentOffset();
final boolean shortRecord =
newOffset - this.currentDataBufferOffset < LARGE_RECORD_THRESHOLD;
if (!shortRecord && LOG.isDebugEnabled()) {
LOG.debug("Put a large record ( >" + LARGE_RECORD_THRESHOLD + " into the sort buffer");
}
// add the pointer and the normalized key
this.currentSortIndexSegment.putLong(
this.currentSortIndexOffset,
shortRecord
? this.currentDataBufferOffset
: (this.currentDataBufferOffset | LARGE_RECORD_TAG));
if (this.numKeyBytes != 0) {
this.comparator.putNormalizedKey(
record,
this.currentSortIndexSegment,
this.currentSortIndexOffset + OFFSET_LEN,
this.numKeyBytes);
}
this.currentSortIndexOffset += this.indexEntrySize;
this.currentDataBufferOffset = newOffset;
this.numRecords++;
return true;
}
// ------------------------------------------------------------------------
// Access Utilities
// ------------------------------------------------------------------------
private long readPointer(int logicalPosition) {
if (logicalPosition < 0 || logicalPosition >= this.numRecords) {
throw new IndexOutOfBoundsException();
}
final int bufferNum = logicalPosition / this.indexEntriesPerSegment;
final int segmentOffset = logicalPosition % this.indexEntriesPerSegment;
return (this.sortIndex.get(bufferNum).getLong(segmentOffset * this.indexEntrySize))
& POINTER_MASK;
}
private T getRecordFromBuffer(T reuse, long pointer) throws IOException {
this.recordBuffer.setReadPosition(pointer);
return this.serializer.deserialize(reuse, this.recordBuffer);
}
private T getRecordFromBuffer(long pointer) throws IOException {
this.recordBuffer.setReadPosition(pointer);
return this.serializer.deserialize(this.recordBuffer);
}
private int compareRecords(long pointer1, long pointer2) {
this.recordBuffer.setReadPosition(pointer1);
this.recordBufferForComparison.setReadPosition(pointer2);
try {
return this.comparator.compareSerialized(
this.recordBuffer, this.recordBufferForComparison);
} catch (IOException ioex) {
throw new RuntimeException("Error comparing two records.", ioex);
}
}
private boolean memoryAvailable() {
return !this.freeMemory.isEmpty();
}
private MemorySegment nextMemorySegment() {
return this.freeMemory.remove(this.freeMemory.size() - 1);
}
// -------------------------------------------------------------------------
// Indexed Sorting
// -------------------------------------------------------------------------
@Override
public int compare(int i, int j) {
final int segmentNumberI = i / this.indexEntriesPerSegment;
final int segmentOffsetI = (i % this.indexEntriesPerSegment) * this.indexEntrySize;
final int segmentNumberJ = j / this.indexEntriesPerSegment;
final int segmentOffsetJ = (j % this.indexEntriesPerSegment) * this.indexEntrySize;
return compare(segmentNumberI, segmentOffsetI, segmentNumberJ, segmentOffsetJ);
}
@Override
public int compare(
int segmentNumberI, int segmentOffsetI, int segmentNumberJ, int segmentOffsetJ) {
final MemorySegment segI = this.sortIndex.get(segmentNumberI);
final MemorySegment segJ = this.sortIndex.get(segmentNumberJ);
int val =
segI.compare(
segJ,
segmentOffsetI + OFFSET_LEN,
segmentOffsetJ + OFFSET_LEN,
this.numKeyBytes);
if (val != 0 || this.normalizedKeyFullyDetermines) {
return this.useNormKeyUninverted ? val : -val;
}
final long pointerI = segI.getLong(segmentOffsetI) & POINTER_MASK;
final long pointerJ = segJ.getLong(segmentOffsetJ) & POINTER_MASK;
return compareRecords(pointerI, pointerJ);
}
@Override
public void swap(int i, int j) {
final int segmentNumberI = i / this.indexEntriesPerSegment;
final int segmentOffsetI = (i % this.indexEntriesPerSegment) * this.indexEntrySize;
final int segmentNumberJ = j / this.indexEntriesPerSegment;
final int segmentOffsetJ = (j % this.indexEntriesPerSegment) * this.indexEntrySize;
swap(segmentNumberI, segmentOffsetI, segmentNumberJ, segmentOffsetJ);
}
@Override
public void swap(
int segmentNumberI, int segmentOffsetI, int segmentNumberJ, int segmentOffsetJ) {
final MemorySegment segI = this.sortIndex.get(segmentNumberI);
final MemorySegment segJ = this.sortIndex.get(segmentNumberJ);
segI.swapBytes(this.swapBuffer, segJ, segmentOffsetI, segmentOffsetJ, this.indexEntrySize);
}
@Override
public int size() {
return this.numRecords;
}
// -------------------------------------------------------------------------
/**
* Gets an iterator over all records in this buffer in their logical order.
*
* @return An iterator returning the records in their logical order.
*/
@Override
public final MutableObjectIterator<T> getIterator() {
return new MutableObjectIterator<T>() {
private final int size = size();
private int current = 0;
private int currentSegment = 0;
private int currentOffset = 0;
private MemorySegment currentIndexSegment = sortIndex.get(0);
@Override
public T next(T target) {
if (this.current < this.size) {
this.current++;
if (this.currentOffset > lastIndexEntryOffset) {
this.currentOffset = 0;
this.currentIndexSegment = sortIndex.get(++this.currentSegment);
}
long pointer =
this.currentIndexSegment.getLong(this.currentOffset) & POINTER_MASK;
this.currentOffset += indexEntrySize;
try {
return getRecordFromBuffer(target, pointer);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
} else {
return null;
}
}
@Override
public T next() {
if (this.current < this.size) {
this.current++;
if (this.currentOffset > lastIndexEntryOffset) {
this.currentOffset = 0;
this.currentIndexSegment = sortIndex.get(++this.currentSegment);
}
long pointer = this.currentIndexSegment.getLong(this.currentOffset);
this.currentOffset += indexEntrySize;
try {
return getRecordFromBuffer(pointer);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
} else {
return null;
}
}
};
}
// ------------------------------------------------------------------------
// Writing to a DataOutputView
// ------------------------------------------------------------------------
/**
* Writes the records in this buffer in their logical order to the given output.
*
* @param output The output view to write the records to.
* @throws IOException Thrown, if an I/O exception occurred writing to the output view.
*/
@Override
public void writeToOutput(ChannelWriterOutputView output) throws IOException {
writeToOutput(output, null);
}
@Override
public void writeToOutput(
ChannelWriterOutputView output, LargeRecordHandler<T> largeRecordsOutput)
throws IOException {
if (LOG.isDebugEnabled()) {
if (largeRecordsOutput == null) {
LOG.debug("Spilling sort buffer without large record handling.");
} else {
LOG.debug("Spilling sort buffer with large record handling.");
}
}
final int numRecords = this.numRecords;
int currentMemSeg = 0;
int currentRecord = 0;
while (currentRecord < numRecords) {
final MemorySegment currentIndexSegment = this.sortIndex.get(currentMemSeg++);
// go through all records in the memory segment
for (int offset = 0;
currentRecord < numRecords && offset <= this.lastIndexEntryOffset;
currentRecord++, offset += this.indexEntrySize) {
final long pointer = currentIndexSegment.getLong(offset);
// small records go into the regular spill file, large records into the special code
// path
if (pointer >= 0 || largeRecordsOutput == null) {
this.recordBuffer.setReadPosition(pointer);
this.serializer.copy(this.recordBuffer, output);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Spilling large record to large record fetch file.");
}
this.recordBuffer.setReadPosition(pointer & POINTER_MASK);
T record = this.serializer.deserialize(this.recordBuffer);
largeRecordsOutput.addRecord(record);
}
}
}
}
/**
* Writes a subset of the records in this buffer in their logical order to the given output.
*
* @param output The output view to write the records to.
* @param start The logical start position of the subset.
* @param num The number of elements to write.
* @throws IOException Thrown, if an I/O exception occurred writing to the output view.
*/
@Override
public void writeToOutput(final ChannelWriterOutputView output, final int start, int num)
throws IOException {
int currentMemSeg = start / this.indexEntriesPerSegment;
int offset = (start % this.indexEntriesPerSegment) * this.indexEntrySize;
while (num > 0) {
final MemorySegment currentIndexSegment = this.sortIndex.get(currentMemSeg++);
// check whether we have a full or partially full segment
if (num >= this.indexEntriesPerSegment && offset == 0) {
// full segment
for (; offset <= this.lastIndexEntryOffset; offset += this.indexEntrySize) {
final long pointer = currentIndexSegment.getLong(offset) & POINTER_MASK;
this.recordBuffer.setReadPosition(pointer);
this.serializer.copy(this.recordBuffer, output);
}
num -= this.indexEntriesPerSegment;
} else {
// partially filled segment
for (;
num > 0 && offset <= this.lastIndexEntryOffset;
num--, offset += this.indexEntrySize) {
final long pointer = currentIndexSegment.getLong(offset) & POINTER_MASK;
this.recordBuffer.setReadPosition(pointer);
this.serializer.copy(this.recordBuffer, output);
}
}
offset = 0;
}
}
}
|
NormalizedKeySorter
|
java
|
apache__maven
|
impl/maven-cli/src/test/java/org/apache/maven/cling/invoker/mvnup/goals/ApplyTest.java
|
{
"start": 4313,
"end": 4986
}
|
class ____ {
@Test
@DisplayName("should inherit behavior from AbstractUpgradeGoal")
void shouldInheritBehaviorFromAbstractUpgradeGoal() {
// This test verifies that Apply inherits the model version logic from AbstractUpgradeGoal
// The actual logic is tested in AbstractUpgradeGoalTest
// Here we just verify that Apply is properly configured as a subclass
assertInstanceOf(AbstractUpgradeGoal.class, applyGoal, "Apply should extend AbstractUpgradeGoal");
assertTrue(applyGoal.shouldSaveModifications(), "Apply should save modifications unlike Check goal");
}
}
}
|
IntegrationTests
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/aot/nativex/FileNativeConfigurationWriterTests.java
|
{
"start": 1732,
"end": 6874
}
|
class ____ {
@TempDir
static Path tempDir;
@Test
void emptyConfig() {
Path empty = tempDir.resolve("empty");
FileNativeConfigurationWriter generator = new FileNativeConfigurationWriter(empty);
generator.write(new RuntimeHints());
assertThat(empty.toFile().listFiles()).isNull();
}
@Test
void serializationConfig() throws IOException, JSONException {
FileNativeConfigurationWriter generator = new FileNativeConfigurationWriter(tempDir);
RuntimeHints hints = new RuntimeHints();
SerializationHints serializationHints = hints.serialization();
serializationHints.registerType(Integer.class);
serializationHints.registerType(Long.class);
generator.write(hints);
assertEquals("""
{
"serialization": [
{ "type": "java.lang.Integer" },
{ "type": "java.lang.Long" }
]
}
""");
}
@Test
void proxyConfig() throws IOException, JSONException {
FileNativeConfigurationWriter generator = new FileNativeConfigurationWriter(tempDir);
RuntimeHints hints = new RuntimeHints();
ProxyHints proxyHints = hints.proxies();
proxyHints.registerJdkProxy(Function.class);
proxyHints.registerJdkProxy(Function.class, Consumer.class);
generator.write(hints);
assertEquals("""
{
"reflection": [
{ type: {"proxy": [ "java.util.function.Function" ] } },
{ type: {"proxy": [ "java.util.function.Function", "java.util.function.Consumer" ] } }
]
}
""");
}
@Test
void reflectionConfig() throws IOException, JSONException {
FileNativeConfigurationWriter generator = new FileNativeConfigurationWriter(tempDir);
RuntimeHints hints = new RuntimeHints();
ReflectionHints reflectionHints = hints.reflection();
reflectionHints.registerType(StringDecoder.class, builder -> builder
.onReachableType(String.class)
.withMembers(MemberCategory.ACCESS_PUBLIC_FIELDS, MemberCategory.ACCESS_DECLARED_FIELDS,
MemberCategory.INVOKE_PUBLIC_CONSTRUCTORS, MemberCategory.INVOKE_DECLARED_CONSTRUCTORS,
MemberCategory.INVOKE_PUBLIC_METHODS, MemberCategory.INVOKE_DECLARED_METHODS)
.withField("DEFAULT_CHARSET")
.withField("defaultCharset")
.withMethod("setDefaultCharset", TypeReference.listOf(Charset.class), ExecutableMode.INVOKE));
generator.write(hints);
assertEquals("""
{
"reflection": [
{
"type": "org.springframework.core.codec.StringDecoder",
"condition": { "typeReached": "java.lang.String" },
"allPublicFields": true,
"allDeclaredFields": true,
"allPublicConstructors": true,
"allDeclaredConstructors": true,
"allPublicMethods": true,
"allDeclaredMethods": true,
"fields": [
{ "name": "DEFAULT_CHARSET" },
{ "name": "defaultCharset" }
],
"methods": [
{ "name": "setDefaultCharset", "parameterTypes": [ "java.nio.charset.Charset" ] }
]
}
]
}
""");
}
@Test
void jniConfig() throws IOException, JSONException {
// same format as reflection so just test basic file generation
FileNativeConfigurationWriter generator = new FileNativeConfigurationWriter(tempDir);
RuntimeHints hints = new RuntimeHints();
ReflectionHints jniHints = hints.jni();
jniHints.registerType(StringDecoder.class, builder -> builder.onReachableType(String.class));
generator.write(hints);
assertEquals("""
{
"jni": [
{
"type": "org.springframework.core.codec.StringDecoder",
"condition": { "typeReached": "java.lang.String" }
}
]
}""");
}
@Test
void resourceConfig() throws IOException, JSONException {
FileNativeConfigurationWriter generator = new FileNativeConfigurationWriter(tempDir);
RuntimeHints hints = new RuntimeHints();
ResourceHints resourceHints = hints.resources();
resourceHints.registerPattern("com/example/test.properties");
resourceHints.registerPattern("com/example/another.properties");
generator.write(hints);
assertEquals("""
{
"resources": [
{"glob": "com/example/test.properties"},
{"glob": "/"},
{"glob": "com"},
{"glob": "com/example"},
{"glob": "com/example/another.properties"}
]
}""");
}
@Test
void namespace() {
String groupId = "foo.bar";
String artifactId = "baz";
String filename = "reachability-metadata.json";
FileNativeConfigurationWriter generator = new FileNativeConfigurationWriter(tempDir, groupId, artifactId);
RuntimeHints hints = new RuntimeHints();
ResourceHints resourceHints = hints.resources();
resourceHints.registerPattern("com/example/test.properties");
generator.write(hints);
Path jsonFile = tempDir.resolve("META-INF").resolve("native-image").resolve(groupId).resolve(artifactId).resolve(filename);
assertThat(jsonFile.toFile()).exists();
}
private void assertEquals(String expectedString) throws IOException, JSONException {
Path jsonFile = tempDir.resolve("META-INF").resolve("native-image").resolve("reachability-metadata.json");
String content = Files.readString(jsonFile);
JSONAssert.assertEquals(expectedString, content, JSONCompareMode.NON_EXTENSIBLE);
}
}
|
FileNativeConfigurationWriterTests
|
java
|
spring-projects__spring-framework
|
spring-r2dbc/src/test/java/org/springframework/r2dbc/core/AbstractTransactionalDatabaseClientIntegrationTests.java
|
{
"start": 1826,
"end": 6158
}
|
class ____ {
private ConnectionFactory connectionFactory;
AnnotationConfigApplicationContext context;
DatabaseClient databaseClient;
R2dbcTransactionManager transactionManager;
TransactionalOperator rxtx;
@BeforeEach
public void before() {
connectionFactory = createConnectionFactory();
context = new AnnotationConfigApplicationContext();
context.getBeanFactory().registerResolvableDependency(ConnectionFactory.class, connectionFactory);
context.register(Config.class);
context.refresh();
Mono.from(connectionFactory.create())
.flatMapMany(connection -> Flux.from(connection.createStatement("DROP TABLE legoset").execute())
.flatMap(Result::getRowsUpdated)
.onErrorComplete()
.thenMany(connection.createStatement(getCreateTableStatement()).execute())
.flatMap(Result::getRowsUpdated).thenMany(connection.close())).as(StepVerifier::create).verifyComplete();
databaseClient = DatabaseClient.create(connectionFactory);
transactionManager = new R2dbcTransactionManager(connectionFactory);
rxtx = TransactionalOperator.create(transactionManager);
}
@AfterEach
public void tearDown() {
context.close();
}
/**
* Create a {@link ConnectionFactory} to be used in this test.
* @return the {@link ConnectionFactory} to be used in this test.
*/
protected abstract ConnectionFactory createConnectionFactory();
/**
* Return the CREATE TABLE statement for table {@code legoset} with the following three columns:
* <ul>
* <li>id integer (primary key), not null</li>
* <li>name varchar(255), nullable</li>
* <li>manual integer, nullable</li>
* </ul>
*
* @return the CREATE TABLE statement for table {@code legoset} with three columns.
*/
protected abstract String getCreateTableStatement();
/**
* Get a parameterized {@code INSERT INTO legoset} statement setting id, name, and manual values.
*/
protected String getInsertIntoLegosetStatement() {
return "INSERT INTO legoset (id, name, manual) VALUES(:id, :name, :manual)";
}
@Test
void executeInsertInTransaction() {
Flux<Long> longFlux = databaseClient
.sql(getInsertIntoLegosetStatement())
.bind(0, 42055)
.bind(1, "SCHAUFELRADBAGGER")
.bindNull(2, Integer.class)
.fetch().rowsUpdated().flux().as(rxtx::transactional);
longFlux.as(StepVerifier::create)
.expectNext(1L)
.verifyComplete();
databaseClient
.sql("SELECT id FROM legoset")
.fetch()
.first()
.as(StepVerifier::create)
.assertNext(actual -> assertThat(actual).hasEntrySatisfying("id", numberOf(42055)))
.verifyComplete();
}
@Test
void shouldRollbackTransaction() {
Mono<Object> integerFlux = databaseClient.sql(getInsertIntoLegosetStatement())
.bind(0, 42055)
.bind(1, "SCHAUFELRADBAGGER")
.bindNull(2, Integer.class)
.fetch().rowsUpdated()
.then(Mono.error(new IllegalStateException("failed")))
.as(rxtx::transactional);
integerFlux.as(StepVerifier::create)
.expectError(IllegalStateException.class)
.verify();
databaseClient
.sql("SELECT id FROM legoset")
.fetch()
.first()
.as(StepVerifier::create)
.verifyComplete();
}
@Test
void shouldRollbackTransactionUsingTransactionalOperator() {
DatabaseClient databaseClient = DatabaseClient.create(connectionFactory);
TransactionalOperator transactionalOperator = TransactionalOperator
.create(new R2dbcTransactionManager(connectionFactory), new DefaultTransactionDefinition());
Flux<Integer> integerFlux = databaseClient.sql(getInsertIntoLegosetStatement())
.bind(0, 42055)
.bind(1, "SCHAUFELRADBAGGER")
.bindNull(2, Integer.class)
.fetch().rowsUpdated()
.thenMany(Mono.fromSupplier(() -> {
throw new IllegalStateException("failed");
}));
integerFlux.as(transactionalOperator::transactional)
.as(StepVerifier::create)
.expectError(IllegalStateException.class)
.verify();
databaseClient
.sql("SELECT id FROM legoset")
.fetch()
.first()
.as(StepVerifier::create)
.verifyComplete();
}
private Condition<? super Object> numberOf(int expected) {
return new Condition<>(object -> object instanceof Number num &&
num.intValue() == expected, "Number %d", expected);
}
@Configuration(proxyBeanMethods = false)
static
|
AbstractTransactionalDatabaseClientIntegrationTests
|
java
|
quarkusio__quarkus
|
extensions/mongodb-client/deployment/src/test/java/io/quarkus/mongodb/MongoCommandListenerTest.java
|
{
"start": 624,
"end": 1416
}
|
class ____ extends MongoTestBase {
@Inject
MongoClient client;
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class).addClasses(MongoTestBase.class, MockCommandListener.class))
.withConfigurationResource("default-mongoclient.properties");
@AfterEach
void cleanup() {
if (client != null) {
client.close();
}
}
@Test
void testClientInitialization() {
assertThat(client.listDatabaseNames().first()).isNotEmpty();
assertThat(MockCommandListener.EVENTS, hasSize(1));
assertThat(MockCommandListener.EVENTS, hasItems(equalTo("listDatabases")));
}
}
|
MongoCommandListenerTest
|
java
|
apache__camel
|
components/camel-sql/src/test/java/org/apache/camel/processor/aggregate/jdbc/ClusteredJdbcAggregateRecoverTest.java
|
{
"start": 1155,
"end": 4276
}
|
class ____ extends AbstractClusteredJdbcAggregationTestSupport {
private static AtomicInteger counter = new AtomicInteger();
@Override
void configureJdbcAggregationRepository() {
repo = applicationContext.getBean("repo5", ClusteredJdbcAggregationRepository.class);
repobis = applicationContext.getBean("repo6", ClusteredJdbcAggregationRepository.class);
// enable recovery
repo.setUseRecovery(true);
// check faster
repo.setRecoveryInterval(500, TimeUnit.MILLISECONDS);
repo.setRecoveryByInstance(true);
repo.setInstanceId("INSTANCE1");
repobis.setUseRecovery(true);
repobis.setRecoveryInterval(50, TimeUnit.MILLISECONDS);
repobis.setRecoveryByInstance(true);
repobis.setInstanceId("INSTANCE2");
}
@Test
public void testJdbcAggregateRecover() throws Exception {
// should fail the first 2 times and then recover
getMockEndpoint("mock:aggregated").expectedMessageCount(3);
getMockEndpoint("mock:result").expectedBodiesReceived("ABCDE");
// should be marked as redelivered
getMockEndpoint("mock:result").message(0).header(Exchange.REDELIVERED).isEqualTo(Boolean.TRUE);
// on the 2nd redelivery attempt we success
getMockEndpoint("mock:result").message(0).header(Exchange.REDELIVERY_COUNTER).isEqualTo(2);
template.sendBodyAndHeader("direct:start", "A", "id", 123);
template.sendBodyAndHeader("direct:start", "B", "id", 123);
template.sendBodyAndHeader("direct:start", "C", "id", 123);
template.sendBodyAndHeader("direct:start", "D", "id", 123);
template.sendBodyAndHeader("direct:start", "E", "id", 123);
MockEndpoint.assertIsSatisfied(context, 30, TimeUnit.SECONDS);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
configureJdbcAggregationRepository();
from("direct:start").aggregate(header("id"), new MyAggregationStrategy()).completionSize(5)
.aggregationRepository(repo)
.log("aggregated exchange id ${exchangeId} with ${body}").to("mock:aggregated").delay(1000)
// simulate errors the first two times
.process(new Processor() {
public void process(Exchange exchange) {
int count = counter.incrementAndGet();
if (count <= 2) {
throw new IllegalArgumentException("Damn");
}
}
}).to("mock:result").end();
from("direct:tutu").aggregate(header("id"), new MyAggregationStrategy()).completionSize(5).aggregationRepository(repobis)
.log("aggregated exchange id ${exchangeId} with ${body}").log("recover bis!!!!!!!!!!!!!!!!!").end();
}
};
}
}
|
ClusteredJdbcAggregateRecoverTest
|
java
|
quarkusio__quarkus
|
extensions/vertx/deployment/src/test/java/io/quarkus/vertx/CodecRegistrationTest.java
|
{
"start": 9236,
"end": 9455
}
|
class ____ {
private final String name;
CustomType3(String name) {
this.name = name;
}
public String getName() {
return name;
}
}
static
|
CustomType3
|
java
|
apache__flink
|
flink-kubernetes/src/test/java/org/apache/flink/kubernetes/kubeclient/resources/KubernetesLeaderElectorTest.java
|
{
"start": 1271,
"end": 2344
}
|
class ____ extends KubernetesTestBase {
private String lockIdentity;
private KubernetesConfigMap leaderConfigMap;
private static final String CONFIGMAP_NAME = "test-config-map";
public void onSetup() {
lockIdentity = UUID.randomUUID().toString();
leaderConfigMap = new TestingFlinkKubeClient.MockKubernetesConfigMap(CONFIGMAP_NAME);
}
@Test
void testNoAnnotation() {
assertThat(KubernetesLeaderElector.hasLeadership(leaderConfigMap, lockIdentity)).isFalse();
}
@Test
void testAnnotationNotMatch() {
leaderConfigMap.getAnnotations().put(LEADER_ANNOTATION_KEY, "wrong lock");
assertThat(KubernetesLeaderElector.hasLeadership(leaderConfigMap, lockIdentity)).isFalse();
}
@Test
void testAnnotationMatched() {
leaderConfigMap
.getAnnotations()
.put(LEADER_ANNOTATION_KEY, "other information " + lockIdentity);
assertThat(KubernetesLeaderElector.hasLeadership(leaderConfigMap, lockIdentity)).isTrue();
}
}
|
KubernetesLeaderElectorTest
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TempDirectoryPreconditionTests.java
|
{
"start": 4648,
"end": 4762
}
|
class ____ {
final @TempDir Path path = Path.of(".");
@Test
void test() {
}
}
}
|
FinalInstanceFieldTestCase
|
java
|
quarkusio__quarkus
|
extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/tracing/intrumentation/resteasy/OpenTelemetryClassicThreadContext.java
|
{
"start": 315,
"end": 998
}
|
class ____ implements ThreadContext<Map<String, Object>> {
@Override
public Map<String, Object> capture() {
Map<String, Object> context = new HashMap<>();
context.put("context", Context.current());
return context;
}
@Override
public void push(final Map<String, Object> context) {
Context current = (Context) context.get("context");
Scope scope = current.makeCurrent();
context.put("scope", scope);
}
@Override
public void reset(final Map<String, Object> context) {
Scope scope = (Scope) context.get("scope");
scope.close();
context.clear();
}
}
|
OpenTelemetryClassicThreadContext
|
java
|
apache__camel
|
components/camel-jira/src/main/java/org/apache/camel/component/jira/producer/WatcherProducer.java
|
{
"start": 1449,
"end": 3314
}
|
class ____ extends DefaultProducer {
public WatcherProducer(JiraEndpoint endpoint) {
super(endpoint);
}
@Override
public void process(Exchange exchange) {
String issueKey = exchange.getIn().getHeader(ISSUE_KEY, String.class);
String watchersAdd = exchange.getIn().getHeader(ISSUE_WATCHERS_ADD, String.class);
String watchersRemove = exchange.getIn().getHeader(ISSUE_WATCHERS_REMOVE, String.class);
if (issueKey == null) {
throw new IllegalArgumentException(
"Missing exchange input header named 'IssueKey', it should specify the issue key to add/remove watchers to.");
}
JiraRestClient client = ((JiraEndpoint) getEndpoint()).getClient();
boolean hasWatchersToAdd = ObjectHelper.isNotEmpty(watchersAdd);
boolean hasWatchersToRemove = ObjectHelper.isNotEmpty(watchersRemove);
if (hasWatchersToAdd || hasWatchersToRemove) {
IssueRestClient issueClient = client.getIssueClient();
Issue issue = issueClient.getIssue(issueKey).claim();
if (hasWatchersToAdd) {
String[] watArr = watchersAdd.split(",");
for (String s : watArr) {
String watcher = s.trim();
if (!watcher.isEmpty()) {
issueClient.addWatcher(issue.getWatchers().getSelf(), watcher);
}
}
}
if (hasWatchersToRemove) {
String[] watArr = watchersRemove.split(",");
for (String s : watArr) {
String watcher = s.trim();
if (!watcher.isEmpty()) {
issueClient.removeWatcher(issue.getWatchers().getSelf(), watcher);
}
}
}
}
}
}
|
WatcherProducer
|
java
|
google__auto
|
value/src/it/functional/src/test/java/com/google/auto/value/AutoValueTest.java
|
{
"start": 122363,
"end": 124835
}
|
class ____ extends Giant.Builder {
abstract Builder x32(int x);
abstract Builder x33(int x);
abstract Giant33 build();
}
}
@Test
public void testGiant31() {
Giant31.Builder builder = Giant31.builder();
builder.setFirst30();
builder.x31(31);
Giant31 giant = builder.build();
assertThat(giant.x1()).isEqualTo(1);
assertThat(giant.x31()).isEqualTo(31);
builder = Giant31.builder();
builder.setFirst30();
try {
builder.build();
fail();
} catch (IllegalStateException expected) {
if (omitIdentifiers) {
assertThat(expected).hasMessageThat().isNull();
} else {
assertThat(expected).hasMessageThat().contains("x31");
assertThat(expected).hasMessageThat().doesNotContain("x30");
}
}
}
@Test
public void testGiant32() {
Giant32.Builder builder = Giant32.builder();
builder.setFirst30();
builder.x31(31);
builder.x32(32);
Giant32 giant = builder.build();
assertThat(giant.x1()).isEqualTo(1);
assertThat(giant.x31()).isEqualTo(31);
builder = Giant32.builder();
builder.setFirst30();
try {
builder.build();
fail();
} catch (IllegalStateException expected) {
if (omitIdentifiers) {
assertThat(expected).hasMessageThat().isNull();
} else {
assertThat(expected).hasMessageThat().contains("x31");
assertThat(expected).hasMessageThat().contains("x32");
assertThat(expected).hasMessageThat().doesNotContain("x30");
}
}
}
@Test
public void testGiant33() {
Giant33.Builder builder = Giant33.builder();
builder.setFirst30();
builder.x31(31);
builder.x32(32);
builder.x33(33);
Giant33 giant = builder.build();
assertThat(giant.x1()).isEqualTo(1);
assertThat(giant.x31()).isEqualTo(31);
assertThat(giant.x32()).isEqualTo(32);
assertThat(giant.x33()).isEqualTo(33);
builder = Giant33.builder();
builder.setFirst30();
try {
builder.build();
fail();
} catch (IllegalStateException expected) {
if (omitIdentifiers) {
assertThat(expected).hasMessageThat().isNull();
} else {
assertThat(expected).hasMessageThat().contains("x31");
assertThat(expected).hasMessageThat().contains("x32");
assertThat(expected).hasMessageThat().contains("x33");
assertThat(expected).hasMessageThat().doesNotContain("x30");
}
}
}
}
|
Builder
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/common/geo/SpatialStrategyTests.java
|
{
"start": 801,
"end": 2726
}
|
class ____ extends ESTestCase {
public void testValidOrdinals() {
assertThat(SpatialStrategy.TERM.ordinal(), equalTo(0));
assertThat(SpatialStrategy.RECURSIVE.ordinal(), equalTo(1));
}
public void testwriteTo() throws Exception {
try (BytesStreamOutput out = new BytesStreamOutput()) {
SpatialStrategy.TERM.writeTo(out);
try (StreamInput in = out.bytes().streamInput()) {
assertThat(in.readVInt(), equalTo(0));
}
}
try (BytesStreamOutput out = new BytesStreamOutput()) {
SpatialStrategy.RECURSIVE.writeTo(out);
try (StreamInput in = out.bytes().streamInput()) {
assertThat(in.readVInt(), equalTo(1));
}
}
}
public void testReadFrom() throws Exception {
try (BytesStreamOutput out = new BytesStreamOutput()) {
out.writeVInt(0);
try (StreamInput in = out.bytes().streamInput()) {
assertThat(SpatialStrategy.readFromStream(in), equalTo(SpatialStrategy.TERM));
}
}
try (BytesStreamOutput out = new BytesStreamOutput()) {
out.writeVInt(1);
try (StreamInput in = out.bytes().streamInput()) {
assertThat(SpatialStrategy.readFromStream(in), equalTo(SpatialStrategy.RECURSIVE));
}
}
}
public void testInvalidReadFrom() throws Exception {
try (BytesStreamOutput out = new BytesStreamOutput()) {
out.writeVInt(randomIntBetween(2, Integer.MAX_VALUE));
try (StreamInput in = out.bytes().streamInput()) {
SpatialStrategy.readFromStream(in);
fail("Expected IOException");
} catch (IOException e) {
assertThat(e.getMessage(), containsString("Unknown SpatialStrategy ordinal ["));
}
}
}
}
|
SpatialStrategyTests
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/projections/ProjectionJoinIntegrationTests.java
|
{
"start": 2556,
"end": 2749
}
|
interface ____ extends CrudRepository<User, Integer> {
<T> T findById(int id, Class<T> projectionClass);
}
@Table(name = "ProjectionJoinIntegrationTests_User")
@Entity
static
|
UserRepository
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/converted/converter/ConvertedPrimitiveAttributeAsFunctionArgumentTest.java
|
{
"start": 3429,
"end": 3745
}
|
class ____ implements AttributeConverter<Integer, String> {
@Override
public String convertToDatabaseColumn(Integer integer) {
return integer.toString();
}
@Override
public Integer convertToEntityAttribute(String s) {
return Integer.parseInt( s );
}
}
@Converter
public static
|
IntToStringConverter
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/internal/object2darrays/Object2DArrays_assertEmpty_Test.java
|
{
"start": 1022,
"end": 1275
}
|
class ____ extends Object2DArraysBaseTest {
@Test
void should_delegate_to_Arrays2D() {
// WHEN
object2dArrays.assertEmpty(info, actual);
// THEN
verify(arrays2d).assertEmpty(info, failures, actual);
}
}
|
Object2DArrays_assertEmpty_Test
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/polardb2/PolarDB2DbTypeTest.java
|
{
"start": 360,
"end": 5309
}
|
class ____ extends TestCase {
private String jdbcUrl;
private String jdbcUrl1;
private String jdbcUrl2;
private String user;
private String password;
private String validateQuery;
private DruidDataSource dataSource;
protected void setUp() throws Exception {
jdbcUrl1 = "jdbc:polardb://a.b.c.d:5432/polardb";
jdbcUrl2 = "jdbc:polardb2://a.b.c.d:5432/polardb";
user = "polardb";
password = "polardb";
validateQuery = "select 1";
}
private void configDataSource() throws Exception {
dataSource.setUrl(jdbcUrl);
dataSource.setUsername(user);
dataSource.setPassword(password);
dataSource.setValidationQuery(validateQuery);
dataSource.setFilters("stat");
}
/**
* Init datasource without setting DbType and druid will get DbType
* from jdbc url prefix "jdbc:polardb2".
*/
public void testDefaultDbType() throws Exception {
dataSource = new DruidDataSource();
jdbcUrl = jdbcUrl2;
configDataSource();
dataSource.init();
Assert.assertTrue(JdbcConstants.POLARDB2.equals(DbType.of(dataSource.getDbType())));
Assert.assertTrue(JdbcConstants.POLARDB2_DRIVER.equals(dataSource.getDriverClassName()));
Connection conn = dataSource.getConnection();
PreparedStatement stmt = conn.prepareStatement(validateQuery);
ResultSet rs = stmt.executeQuery();
while (rs.next()) {
System.out.println("result: " + rs.getInt(1));
}
JdbcUtils.close(rs);
JdbcUtils.close(stmt);
JdbcUtils.close(conn);
dataSource.close();
}
/**
* Init datasource without setting DbType and druid will get DbType
* from jdbc url. url with prefix "jdbc:polardb" is recognized as PolarDB-Oracle 1.0
* for backward compatibility. If the user want to use SQL firewall in this
* case, set DbType to PolarDB-Oracle 2.0 explicitly.
*/
public void testSetDbType() throws Exception {
/*
* Case 1: set DbType and driver name after initializing data source.
* DbType could be changed after initializing, while driver name not.
*/
dataSource = new DruidDataSource();
// JDBC url in PolarDB-Oracle 1.0 format
jdbcUrl = jdbcUrl1;
configDataSource();
dataSource.init();
// Driver and DbType are set to PolarDB-Oracle 1.0 automatically
Assert.assertTrue(JdbcConstants.POLARDB.equals(DbType.of(dataSource.getDbType())));
Assert.assertTrue(JdbcConstants.POLARDB_DRIVER.equals(dataSource.getDriverClassName()));
boolean conn_failed = false;
try {
Connection conn = dataSource.getConnection(1000);
} catch (Exception e) {
// Fail to connect to PolarDB-Oracle 2.0 with PolarDB-Oracle 1.0 driver
conn_failed = true;
e.printStackTrace();
System.out.println("failed to connect to PolarDB-Oracle 2.0 with PolarDB-Oracle 1.0 DbType and driver");
}
Assert.assertTrue(conn_failed);
// Set new DbType with string
dataSource.setDbType("polardb2");
Assert.assertTrue(JdbcConstants.POLARDB2.equals(DbType.of(dataSource.getDbType())));
// Reset
dataSource.setDbType("polardb");
Assert.assertTrue(JdbcConstants.POLARDB.equals(DbType.of(dataSource.getDbType())));
// Set new DbType with const
dataSource.setDbType(DbType.polardb2);
Assert.assertTrue(JdbcConstants.POLARDB2.equals(DbType.of(dataSource.getDbType())));
// Failed to set driver name after init
// dataSource.setDriverClassName(JdbcConstants.POLARDB2_DRIVER);
Assert.assertTrue(JdbcConstants.POLARDB_DRIVER.equals(dataSource.getDriverClassName()));
dataSource.clone();
/*
* Case 2: set DbType and driver name after initializing data source.
*/
dataSource = new DruidDataSource();
// JDBC url in PolarDB-Oracle 1.0 format
jdbcUrl = jdbcUrl1;
configDataSource();
// Set DbType and driver to PolarDB-Oracle 2.0 before init data source
dataSource.setDbType(DbType.polardb2);
dataSource.setDriverClassName(JdbcConstants.POLARDB2_DRIVER);
dataSource.init();
Assert.assertTrue(JdbcConstants.POLARDB2.equals(DbType.of(dataSource.getDbType())));
Assert.assertTrue(JdbcConstants.POLARDB2_DRIVER.equals(dataSource.getDriverClassName()));
Connection conn = dataSource.getConnection();
PreparedStatement stmt = conn.prepareStatement(validateQuery);
ResultSet rs = stmt.executeQuery();
while (rs.next()) {
System.out.println("result: " + rs.getInt(1));
}
JdbcUtils.close(rs);
JdbcUtils.close(stmt);
JdbcUtils.close(conn);
dataSource.close();
}
}
|
PolarDB2DbTypeTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/GetPipelineResponseTests.java
|
{
"start": 598,
"end": 1773
}
|
class ____ extends AbstractWireSerializingTestCase<GetPipelineResponse> {
@Override
protected Writeable.Reader<GetPipelineResponse> instanceReader() {
return GetPipelineResponse::new;
}
@Override
protected GetPipelineResponse createTestInstance() {
final int numPipelines = randomIntBetween(1, 10);
final Map<String, BytesReference> map = Maps.newMapWithExpectedSize(numPipelines);
for (int i = 0; i < numPipelines; i++) {
final String name = randomAlphaOfLengthBetween(2, 10);
final BytesReference ref = new BytesArray(randomByteArrayOfLength(randomIntBetween(1, 16)));
map.put(name, ref);
}
return new GetPipelineResponse(map);
}
@Override
protected GetPipelineResponse mutateInstance(GetPipelineResponse instance) {
Map<String, BytesReference> map = Maps.newMapWithExpectedSize(instance.pipelines().size() + 1);
map.putAll(instance.pipelines());
map.put(randomAlphaOfLengthBetween(2, 10), new BytesArray(randomByteArrayOfLength(randomIntBetween(1, 16))));
return new GetPipelineResponse(map);
}
}
|
GetPipelineResponseTests
|
java
|
apache__camel
|
components/camel-grpc/src/main/java/org/apache/camel/component/grpc/server/GrpcRequestAbstractStreamObserver.java
|
{
"start": 1177,
"end": 1843
}
|
class ____ implements StreamObserver<Object> {
protected final GrpcEndpoint endpoint;
protected final GrpcConsumer consumer;
protected Exchange exchange;
protected StreamObserver<Object> responseObserver;
protected Map<String, Object> headers;
protected GrpcRequestAbstractStreamObserver(GrpcEndpoint endpoint, GrpcConsumer consumer,
StreamObserver<Object> responseObserver, Map<String, Object> headers) {
this.endpoint = endpoint;
this.consumer = consumer;
this.responseObserver = responseObserver;
this.headers = headers;
}
}
|
GrpcRequestAbstractStreamObserver
|
java
|
google__guava
|
android/guava-testlib/test/com/google/common/collect/testing/features/FeatureUtilTest.java
|
{
"start": 6568,
"end": 6962
}
|
class ____ {}
TesterRequirements requirements = buildTesterRequirements(Tester.class);
assertThat(requirements.getPresentFeatures()).containsExactly(IMPLIES_FOO, FOO);
assertThat(requirements.getAbsentFeatures()).containsExactly(IMPLIES_IMPLIES_FOO);
}
public void testBuildTesterRequirements_class_present_method_present() throws Exception {
@Require(IMPLIES_BAR)
|
Tester
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/PutQueryRuleAction.java
|
{
"start": 1410,
"end": 1674
}
|
class ____ {
public static final String NAME = "cluster:admin/xpack/query_rules/rule/put";
public static final ActionType<Response> INSTANCE = new ActionType<>(NAME);
private PutQueryRuleAction() {/* no instances */}
public static
|
PutQueryRuleAction
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/hql/ExistsSubqueryForeignKeyTest.java
|
{
"start": 1140,
"end": 2467
}
|
class ____ {
@Test
public void testWhereClause(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final Long result = session.createQuery(
"select count(*) from Document d join d.owner o "
+ "where exists(select p.id from Person p where p.id = o.id) group by o.id",
Long.class
).getSingleResult();
assertThat( result ).isEqualTo( 1L );
} );
}
@Test
public void testSelectCaseWhen(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final Tuple result = session.createQuery(
"select case when exists(select p.id from Person p where p.id = o.id) then 1 else 0 end,"
+ "count(*) from Document d join d.owner o group by o.id",
Tuple.class
).getSingleResult();
assertThat( result.get( 0, Integer.class ) ).isEqualTo( 1 );
assertThat( result.get( 1, Long.class ) ).isEqualTo( 1L );
} );
}
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final Person person1 = new Person( 1L, "person_1" );
session.persist( person1 );
session.persist( new Document( 1L, "doc_1", person1 ) );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncateMappedObjects();
}
@Entity(name = "Person")
static
|
ExistsSubqueryForeignKeyTest
|
java
|
spring-projects__spring-framework
|
spring-core-test/src/main/java/org/springframework/aot/agent/HintType.java
|
{
"start": 1173,
"end": 2171
}
|
enum ____ {
/**
* Reflection hint, as described by {@link org.springframework.aot.hint.ReflectionHints}.
*/
REFLECTION(ReflectionHints.class),
/**
* Resource pattern hint, as described by {@link org.springframework.aot.hint.ResourceHints#resourcePatternHints()}.
*/
RESOURCE_PATTERN(ResourcePatternHint.class),
/**
* Resource bundle hint, as described by {@link org.springframework.aot.hint.ResourceHints#resourceBundleHints()}.
*/
RESOURCE_BUNDLE(ResourceBundleHint.class),
/**
* Java serialization hint, as described by {@link org.springframework.aot.hint.JavaSerializationHint}.
*/
JAVA_SERIALIZATION(JavaSerializationHint.class),
/**
* JDK proxies hint, as described by {@link org.springframework.aot.hint.ProxyHints#jdkProxyHints()}.
*/
JDK_PROXIES(JdkProxyHint.class);
private final Class<?> hintClass;
HintType(Class<?> hintClass) {
this.hintClass = hintClass;
}
public String hintClassName() {
return this.hintClass.getSimpleName();
}
}
|
HintType
|
java
|
apache__maven
|
compat/maven-resolver-provider/src/main/java/org/apache/maven/repository/internal/DefaultVersionResolver.java
|
{
"start": 20185,
"end": 20648
}
|
class ____ {
final String version;
final String repoId;
final Class<?> repoClass;
Record(String version, ArtifactRepository repository) {
this.version = version;
if (repository != null) {
repoId = repository.getId();
repoClass = repository.getClass();
} else {
repoId = null;
repoClass = null;
}
}
}
}
|
Record
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/error/ShouldHaveCause.java
|
{
"start": 871,
"end": 3581
}
|
class ____ extends BasicErrorMessageFactory {
public static ErrorMessageFactory shouldHaveCause(Throwable actual, Throwable expectedCause) {
checkArgument(expectedCause != null, "expected cause should not be null");
// actual has no cause
if (actual.getCause() == null) return new ShouldHaveCause(expectedCause);
// same message => different type
if (Objects.equals(actual.getCause().getMessage(), expectedCause.getMessage()))
return new ShouldHaveCause(actual, expectedCause.getClass());
// same type => different message
if (Objects.equals(actual.getCause().getClass(), expectedCause.getClass()))
return new ShouldHaveCause(actual, expectedCause.getMessage());
return new ShouldHaveCause(actual, expectedCause);
}
public static ErrorMessageFactory shouldHaveCause(Throwable actualCause) {
return new BasicErrorMessageFactory("Expecting actual throwable to have a cause but it did not, actual was:%n%s",
actualCause);
}
private ShouldHaveCause(Throwable actual, Throwable expectedCause) {
super("%n" +
"Expecting a cause with type:%n" +
" %s%n" +
"and message:%n" +
" %s%n" +
"but type was:%n" +
" %s%n" +
"and message was:%n" +
" %s.%n%n" +
"Throwable that failed the check:%n" +
escapePercent(getStackTrace(actual)),
expectedCause.getClass().getName(), expectedCause.getMessage(),
actual.getCause().getClass().getName(), actual.getCause().getMessage());
}
private ShouldHaveCause(Throwable expectedCause) {
super("%n" +
"Expecting a cause with type:%n" +
" %s%n" +
"and message:%n" +
" %s%n" +
"but actualCause had no cause.",
expectedCause.getClass().getName(), expectedCause.getMessage());
}
private ShouldHaveCause(Throwable actual, Class<? extends Throwable> expectedCauseClass) {
super("%n" +
"Expecting a cause with type:%n" +
" %s%n" +
"but type was:%n" +
" %s.%n%n" +
"Throwable that failed the check:%n" +
escapePercent(getStackTrace(actual)),
expectedCauseClass.getName(), actual.getCause().getClass().getName());
}
private ShouldHaveCause(Throwable actual, String expectedCauseMessage) {
super("%n" +
"Expecting a cause with message:%n" +
" %s%n" +
"but message was:%n" +
" %s.%n%n" +
"Throwable that failed the check:%n" +
escapePercent(getStackTrace(actual)),
expectedCauseMessage, actual.getCause().getMessage());
}
}
|
ShouldHaveCause
|
java
|
spring-projects__spring-framework
|
spring-messaging/src/main/java/org/springframework/messaging/tcp/reactor/ReactorNettyTcpConnection.java
|
{
"start": 1187,
"end": 2546
}
|
class ____<P> implements TcpConnection<P> {
private final NettyInbound inbound;
private final NettyOutbound outbound;
private final ReactorNettyCodec<P> codec;
private final Sinks.Empty<Void> completionSink;
public ReactorNettyTcpConnection(NettyInbound inbound, NettyOutbound outbound,
ReactorNettyCodec<P> codec, Sinks.Empty<Void> completionSink) {
this.inbound = inbound;
this.outbound = outbound;
this.codec = codec;
this.completionSink = completionSink;
}
@Override
public CompletableFuture<Void> sendAsync(Message<P> message) {
ByteBuf byteBuf = this.outbound.alloc().buffer();
this.codec.encode(message, byteBuf);
return this.outbound.send(Mono.just(byteBuf))
.then()
.toFuture();
}
@Override
public void onReadInactivity(Runnable runnable, long inactivityDuration) {
this.inbound.withConnection(conn -> conn.onReadIdle(inactivityDuration, runnable));
}
@Override
public void onWriteInactivity(Runnable runnable, long inactivityDuration) {
this.inbound.withConnection(conn -> conn.onWriteIdle(inactivityDuration, runnable));
}
@Override
public void close() {
// Ignore result: concurrent attempts to complete are ok
this.completionSink.tryEmitEmpty();
}
@Override
public String toString() {
return "ReactorNettyTcpConnection[inbound=" + this.inbound + "]";
}
}
|
ReactorNettyTcpConnection
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/event/ResourceReleaseEvent.java
|
{
"start": 1037,
"end": 1366
}
|
class ____ extends ResourceEvent {
private final ContainerId container;
public ResourceReleaseEvent(LocalResourceRequest rsrc,
ContainerId container) {
super(rsrc, ResourceEventType.RELEASE);
this.container = container;
}
public ContainerId getContainer() {
return container;
}
}
|
ResourceReleaseEvent
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/oracle/create/OracleCreateIndexTest11.java
|
{
"start": 967,
"end": 6331
}
|
class ____ extends OracleTest {
public void test_0() throws Exception {
String sql = //
"CREATE INDEX sales_ix ON composite_sales(time_id, prod_id)\n" +
" STORAGE (INITIAL 1M MAXEXTENTS UNLIMITED)\n" +
" LOCAL\n" +
" (PARTITION q1_1998,\n" +
" PARTITION q2_1998,\n" +
" PARTITION q3_1998,\n" +
" PARTITION q4_1998,\n" +
" PARTITION q1_1999,\n" +
" PARTITION q2_1999,\n" +
" PARTITION q3_1999,\n" +
" PARTITION q4_1999,\n" +
" PARTITION q1_2000,\n" +
" PARTITION q2_2000\n" +
" (SUBPARTITION pq2001, SUBPARTITION pq2002, \n" +
" SUBPARTITION pq2003, SUBPARTITION pq2004,\n" +
" SUBPARTITION pq2005, SUBPARTITION pq2006, \n" +
" SUBPARTITION pq2007, SUBPARTITION pq2008),\n" +
" PARTITION q3_2000\n" +
" (SUBPARTITION c1 TABLESPACE tbs_02, \n" +
" SUBPARTITION c2 TABLESPACE tbs_02, \n" +
" SUBPARTITION c3 TABLESPACE tbs_02,\n" +
" SUBPARTITION c4 TABLESPACE tbs_02,\n" +
" SUBPARTITION c5 TABLESPACE tbs_02),\n" +
" PARTITION q4_2000\n" +
" (SUBPARTITION pq4001 TABLESPACE tbs_03, \n" +
" SUBPARTITION pq4002 TABLESPACE tbs_03,\n" +
" SUBPARTITION pq4003 TABLESPACE tbs_03,\n" +
" SUBPARTITION pq4004 TABLESPACE tbs_03)\n" +
");";
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.ORACLE);
SQLStatement stmt = statementList.get(0);
print(statementList);
assertEquals(1, statementList.size());
SchemaStatVisitor visitor = SQLUtils.createSchemaStatVisitor(JdbcConstants.ORACLE);
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals("CREATE INDEX sales_ix ON composite_sales(time_id, prod_id)\n" +
"STORAGE (\n" +
"\tINITIAL 1M\n" +
"\tMAXEXTENTS UNLIMITED\n" +
")\n" +
"LOCAL (\n" +
"\tPARTITION q1_1998,\n" +
"\tPARTITION q2_1998,\n" +
"\tPARTITION q3_1998,\n" +
"\tPARTITION q4_1998,\n" +
"\tPARTITION q1_1999,\n" +
"\tPARTITION q2_1999,\n" +
"\tPARTITION q3_1999,\n" +
"\tPARTITION q4_1999,\n" +
"\tPARTITION q1_2000,\n" +
"\tPARTITION q2_2000 (\n" +
"\t\tSUBPARTITION pq2001,\n" +
"\t\tSUBPARTITION pq2002,\n" +
"\t\tSUBPARTITION pq2003,\n" +
"\t\tSUBPARTITION pq2004,\n" +
"\t\tSUBPARTITION pq2005,\n" +
"\t\tSUBPARTITION pq2006,\n" +
"\t\tSUBPARTITION pq2007,\n" +
"\t\tSUBPARTITION pq2008\n" +
"\t),\n" +
"\tPARTITION q3_2000 (\n" +
"\t\tSUBPARTITION c1 TABLESPACE tbs_02,\n" +
"\t\tSUBPARTITION c2 TABLESPACE tbs_02,\n" +
"\t\tSUBPARTITION c3 TABLESPACE tbs_02,\n" +
"\t\tSUBPARTITION c4 TABLESPACE tbs_02,\n" +
"\t\tSUBPARTITION c5 TABLESPACE tbs_02\n" +
"\t),\n" +
"\tPARTITION q4_2000 (\n" +
"\t\tSUBPARTITION pq4001 TABLESPACE tbs_03,\n" +
"\t\tSUBPARTITION pq4002 TABLESPACE tbs_03,\n" +
"\t\tSUBPARTITION pq4003 TABLESPACE tbs_03,\n" +
"\t\tSUBPARTITION pq4004 TABLESPACE tbs_03\n" +
"\t)\n" +
");",
SQLUtils.toSQLString(stmt, JdbcConstants.ORACLE));
assertEquals(1, visitor.getTables().size());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("composite_sales")));
assertEquals(2, visitor.getColumns().size());
// assertTrue(visitor.getColumns().contains(new TableStat.Column("xwarehouses", "sales_rep_id")));
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "YEAR")));
// assertTrue(visitor.getColumns().contains(new TableStat.Column("pivot_table", "order_mode")));
}
}
|
OracleCreateIndexTest11
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/time/GmtTimeZone.java
|
{
"start": 1003,
"end": 3459
}
|
class ____ extends TimeZone {
private static final int MILLISECONDS_PER_MINUTE = 60 * 1000;
private static final int MINUTES_PER_HOUR = 60;
private static final int HOURS_PER_DAY = 24;
// Serializable!
static final long serialVersionUID = 1L;
private static StringBuilder twoDigits(final StringBuilder sb, final int n) {
return sb.append((char) ('0' + n / 10)).append((char) ('0' + n % 10));
}
private final int offset;
private final String zoneId;
GmtTimeZone(final boolean negate, final int hours, final int minutes) {
if (hours >= HOURS_PER_DAY) {
throw new IllegalArgumentException(hours + " hours out of range");
}
if (minutes >= MINUTES_PER_HOUR) {
throw new IllegalArgumentException(minutes + " minutes out of range");
}
final int milliseconds = (minutes + hours * MINUTES_PER_HOUR) * MILLISECONDS_PER_MINUTE;
offset = negate ? -milliseconds : milliseconds;
// @formatter:off
zoneId = twoDigits(twoDigits(new StringBuilder(9)
.append(TimeZones.GMT_ID)
.append(negate ? '-' : '+'), hours)
.append(':'), minutes)
.toString();
// @formatter:on
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof GmtTimeZone)) {
return false;
}
final GmtTimeZone other = (GmtTimeZone) obj;
return offset == other.offset && Objects.equals(zoneId, other.zoneId);
}
@Override
public String getID() {
return zoneId;
}
@Override
public int getOffset(final int era, final int year, final int month, final int day, final int dayOfWeek, final int milliseconds) {
return offset;
}
@Override
public int getRawOffset() {
return offset;
}
@Override
public int hashCode() {
return Objects.hash(offset, zoneId);
}
@Override
public boolean inDaylightTime(final Date date) {
return false;
}
@Override
public void setRawOffset(final int offsetMillis) {
throw new UnsupportedOperationException();
}
@Override
public String toString() {
return "[GmtTimeZone id=\"" + zoneId + "\",offset=" + offset + ']';
}
@Override
public boolean useDaylightTime() {
return false;
}
}
|
GmtTimeZone
|
java
|
apache__maven
|
compat/maven-plugin-api/src/main/java/org/apache/maven/plugin/descriptor/Requirement.java
|
{
"start": 909,
"end": 1664
}
|
class ____ implements Cloneable {
private final String role;
private final String roleHint;
public Requirement(String role) {
this.role = role;
this.roleHint = null;
}
public Requirement(String role, String roleHint) {
this.role = role;
this.roleHint = roleHint;
}
public String getRole() {
return role;
}
public String getRoleHint() {
return roleHint;
}
/**
* Creates a shallow copy of this requirement.
*/
@Override
public Requirement clone() {
try {
return (Requirement) super.clone();
} catch (CloneNotSupportedException e) {
throw new UnsupportedOperationException(e);
}
}
}
|
Requirement
|
java
|
apache__kafka
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/ErrorLoggingCallback.java
|
{
"start": 1070,
"end": 2287
}
|
class ____ implements Callback {
private static final Logger log = LoggerFactory.getLogger(ErrorLoggingCallback.class);
private final String topic;
private final byte[] key;
private final int valueLength;
private final boolean logAsString;
private byte[] value;
public ErrorLoggingCallback(String topic, byte[] key, byte[] value, boolean logAsString) {
this.topic = topic;
this.key = key;
if (logAsString) {
this.value = value;
}
this.valueLength = value == null ? -1 : value.length;
this.logAsString = logAsString;
}
public void onCompletion(RecordMetadata metadata, Exception e) {
if (e != null) {
String keyString = (key == null) ? "null" :
logAsString ? new String(key, StandardCharsets.UTF_8) : key.length + " bytes";
String valueString = (valueLength == -1) ? "null" :
logAsString ? new String(value, StandardCharsets.UTF_8) : valueLength + " bytes";
log.error("Error when sending message to topic {} with key: {}, value: {} with error:",
topic, keyString, valueString, e);
}
}
}
|
ErrorLoggingCallback
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/DefaultCompletedCheckpointStoreUtils.java
|
{
"start": 1616,
"end": 6862
}
|
class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(DefaultCompletedCheckpointStoreUtils.class);
private DefaultCompletedCheckpointStoreUtils() {
// No-op.
}
/**
* Extracts maximum number of retained checkpoints configuration from the passed {@link
* Configuration}. The default value is used as a fallback if the passed value is a value larger
* than {@code 0}.
*
* @param config The configuration that is accessed.
* @param logger The {@link Logger} used for exposing the warning if the configured value is
* invalid.
* @return The maximum number of retained checkpoints based on the passed {@code Configuration}.
*/
public static int getMaximumNumberOfRetainedCheckpoints(Configuration config, Logger logger) {
final int maxNumberOfCheckpointsToRetain =
config.get(CheckpointingOptions.MAX_RETAINED_CHECKPOINTS);
if (maxNumberOfCheckpointsToRetain <= 0) {
// warning and use 1 as the default value if the setting in
// state.checkpoints.max-retained-checkpoints is not greater than 0.
logger.warn(
"The setting for '{} : {}' is invalid. Using default value of {}",
CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.key(),
maxNumberOfCheckpointsToRetain,
CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.defaultValue());
return CheckpointingOptions.MAX_RETAINED_CHECKPOINTS.defaultValue();
}
return maxNumberOfCheckpointsToRetain;
}
/**
* Fetch all {@link CompletedCheckpoint completed checkpoints} from an {@link StateHandleStore
* external store}. This method is intended for retrieving an initial state of {@link
* DefaultCompletedCheckpointStore}.
*
* @param checkpointStateHandleStore Completed checkpoints in external store.
* @param completedCheckpointStoreUtil Utilities for completed checkpoint store.
* @param <R> Type of {@link ResourceVersion}
* @return Immutable collection of {@link CompletedCheckpoint completed checkpoints}.
* @throws Exception If we're not able to fetch checkpoints for some reason.
*/
public static <R extends ResourceVersion<R>>
Collection<CompletedCheckpoint> retrieveCompletedCheckpoints(
StateHandleStore<CompletedCheckpoint, R> checkpointStateHandleStore,
CheckpointStoreUtil completedCheckpointStoreUtil)
throws Exception {
LOG.info("Recovering checkpoints from {}.", checkpointStateHandleStore);
// Get all there is first.
final List<Tuple2<RetrievableStateHandle<CompletedCheckpoint>, String>> initialCheckpoints =
checkpointStateHandleStore.getAllAndLock();
// Sort checkpoints by name.
initialCheckpoints.sort(Comparator.comparing(o -> o.f1));
final int numberOfInitialCheckpoints = initialCheckpoints.size();
LOG.info(
"Found {} checkpoints in {}.",
numberOfInitialCheckpoints,
checkpointStateHandleStore);
final List<CompletedCheckpoint> retrievedCheckpoints =
new ArrayList<>(numberOfInitialCheckpoints);
LOG.info("Trying to fetch {} checkpoints from storage.", numberOfInitialCheckpoints);
for (Tuple2<RetrievableStateHandle<CompletedCheckpoint>, String> checkpointStateHandle :
initialCheckpoints) {
retrievedCheckpoints.add(
checkNotNull(
retrieveCompletedCheckpoint(
completedCheckpointStoreUtil, checkpointStateHandle)));
}
return Collections.unmodifiableList(retrievedCheckpoints);
}
private static CompletedCheckpoint retrieveCompletedCheckpoint(
CheckpointStoreUtil completedCheckpointStoreUtil,
Tuple2<RetrievableStateHandle<CompletedCheckpoint>, String> stateHandle)
throws FlinkException {
final long checkpointId = completedCheckpointStoreUtil.nameToCheckpointID(stateHandle.f1);
LOG.info("Trying to retrieve checkpoint {}.", checkpointId);
try {
return stateHandle.f0.retrieveState();
} catch (ClassNotFoundException exception) {
throw new FlinkException(
String.format(
"Could not retrieve checkpoint %d from state handle under %s. This indicates that you are trying to recover from state written by an older Flink version which is not compatible. Try cleaning the state handle store.",
checkpointId, stateHandle.f1),
exception);
} catch (IOException exception) {
throw new FlinkException(
String.format(
"Could not retrieve checkpoint %d from state handle under %s. This indicates that the retrieved state handle is broken. Try cleaning the state handle store.",
checkpointId, stateHandle.f1),
exception);
}
}
}
|
DefaultCompletedCheckpointStoreUtils
|
java
|
bumptech__glide
|
library/test/src/test/java/com/bumptech/glide/load/resource/gif/GifFrameLoaderTest.java
|
{
"start": 1990,
"end": 13417
}
|
class ____ {
@Rule public TearDownGlide tearDownGlide = new TearDownGlide();
@Mock private GifFrameLoader.FrameCallback callback;
@Mock private GifDecoder gifDecoder;
@Mock private Handler handler;
@Mock private Transformation<Bitmap> transformation;
@Mock private RequestManager requestManager;
private GifFrameLoader loader;
private RequestBuilder<Bitmap> requestBuilder;
private Bitmap firstFrame;
@SuppressWarnings("unchecked")
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
when(handler.obtainMessage(anyInt(), isA(DelayTarget.class))).thenReturn(mock(Message.class));
firstFrame = Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888);
ByteBuffer byteBuffer = ByteBuffer.allocate(10);
when(gifDecoder.getData()).thenReturn(byteBuffer);
requestBuilder = mock(RequestBuilder.class, new ReturnsSelfAnswer());
loader = createGifFrameLoader(handler);
}
@NonNull
private GifFrameLoader createGifFrameLoader(Handler handler) {
Glide glide = getGlideSingleton();
GifFrameLoader result =
new GifFrameLoader(
glide.getBitmapPool(),
requestManager,
gifDecoder,
handler,
requestBuilder,
transformation,
firstFrame);
result.subscribe(callback);
return result;
}
private static Glide getGlideSingleton() {
return Glide.get(ApplicationProvider.getApplicationContext());
}
@SuppressWarnings("unchecked")
@Test
public void testSetFrameTransformationSetsTransformationOnRequestBuilder() {
verify(requestBuilder, times(2)).apply(isA(RequestOptions.class));
Transformation<Bitmap> transformation = mock(Transformation.class);
loader.setFrameTransformation(transformation, firstFrame);
verify(requestBuilder, times(3)).apply(isA(RequestOptions.class));
}
@Test(expected = NullPointerException.class)
public void testSetFrameTransformationThrowsIfGivenNullTransformation() {
loader.setFrameTransformation(null, null);
}
@Test
public void testReturnsSizeFromGifDecoderAndCurrentFrame() {
int decoderByteSize = 123456;
when(gifDecoder.getByteSize()).thenReturn(decoderByteSize);
assertThat(loader.getSize()).isEqualTo(decoderByteSize + Util.getBitmapByteSize(firstFrame));
}
@Test
public void testStartGetsNextFrameIfNotStartedAndWithNoLoadPending() {
verify(requestBuilder).into(aTarget());
}
@Test
public void testGetNextFrameIncrementsSignatureAndAdvancesDecoderBeforeStartingLoad() {
InOrder order = inOrder(gifDecoder, requestBuilder);
order.verify(gifDecoder).advance();
order.verify(requestBuilder).apply(isA(RequestOptions.class));
order.verify(requestBuilder).into(aTarget());
}
@Test
public void testGetCurrentFrameReturnsFirstFrameWHenNoLoadHasCompleted() {
assertThat(loader.getCurrentFrame()).isEqualTo(firstFrame);
}
@Test
public void testGetCurrentFrameReturnsCurrentBitmapAfterLoadHasCompleted() {
final Bitmap result = Bitmap.createBitmap(100, 200, Bitmap.Config.ARGB_8888);
DelayTarget target = mock(DelayTarget.class);
when(target.getResource()).thenReturn(result);
loader.onFrameReady(target);
assertEquals(result, loader.getCurrentFrame());
}
@Test
public void testStartDoesNotStartIfAlreadyRunning() {
loader.subscribe(mock(FrameCallback.class));
verify(requestBuilder, times(1)).into(aTarget());
}
@Test
public void testGetNextFrameDoesNotStartLoadIfLoaderIsNotRunning() {
verify(requestBuilder, times(1)).into(aTarget());
loader.unsubscribe(callback);
loader.onFrameReady(mock(DelayTarget.class));
verify(requestBuilder, times(1)).into(aTarget());
}
@Test
public void testGetNextFrameDoesNotStartLoadIfLoadIsInProgress() {
loader.unsubscribe(callback);
loader.subscribe(callback);
verify(requestBuilder, times(1)).into(aTarget());
}
@Test
public void testGetNextFrameDoesStartLoadIfRestartedAndNoLoadIsInProgress() {
loader.unsubscribe(callback);
loader.onFrameReady(mock(DelayTarget.class));
loader.subscribe(callback);
verify(requestBuilder, times(2)).into(aTarget());
}
@Test
public void testGetNextFrameDoesStartLoadAfterLoadCompletesIfStarted() {
loader.onFrameReady(mock(DelayTarget.class));
verify(requestBuilder, times(2)).into(aTarget());
}
@Test
public void testOnFrameReadyClearsPreviousFrame() {
// Force the loader to create a real Handler.
loader = createGifFrameLoader(null);
DelayTarget previous = newDelayTarget();
Request previousRequest = mock(Request.class);
previous.setRequest(previousRequest);
previous.onResourceReady(
Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888), /* transition= */ null);
DelayTarget current = mock(DelayTarget.class);
when(current.getResource()).thenReturn(Bitmap.createBitmap(100, 100, Bitmap.Config.RGB_565));
loader.onFrameReady(previous);
loader.onFrameReady(current);
verify(requestManager).clear(eq(previous));
}
@Test
public void testOnFrameReadyWithNullResourceDoesNotClearPreviousFrame() {
// Force the loader to create a real Handler by passing null.
loader = createGifFrameLoader(null);
DelayTarget previous = newDelayTarget();
Request previousRequest = mock(Request.class);
previous.setRequest(previousRequest);
previous.onResourceReady(Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888), null);
DelayTarget current = mock(DelayTarget.class);
when(current.getResource()).thenReturn(null);
loader.onFrameReady(previous);
loader.onFrameReady(current);
verify(previousRequest, never()).clear();
}
@Test
public void testDelayTargetSendsMessageWithHandlerDelayed() {
long targetTime = 1234;
DelayTarget delayTarget = new DelayTarget(handler, 1, targetTime);
delayTarget.onResourceReady(
Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888), null
/*glideAnimation*/ );
verify(handler).sendMessageAtTime(isA(Message.class), eq(targetTime));
}
@Test
public void testDelayTargetSetsResourceOnResourceReady() {
DelayTarget delayTarget = new DelayTarget(handler, 1, 1);
Bitmap expected = Bitmap.createBitmap(100, 200, Bitmap.Config.RGB_565);
delayTarget.onResourceReady(expected, null /*glideAnimation*/);
assertEquals(expected, delayTarget.getResource());
}
@Test
public void testClearsCompletedLoadOnFrameReadyIfCleared() {
// Force the loader to create a real Handler by passing null;
loader = createGifFrameLoader(null);
loader.clear();
DelayTarget delayTarget = newDelayTarget();
Request request = mock(Request.class);
delayTarget.setRequest(request);
loader.onFrameReady(delayTarget);
verify(requestManager).clear(eq(delayTarget));
}
@Test
public void
testDoesNotReturnResourceForCompletedFrameInGetCurrentFrameIfLoadCompletesWhileCleared() {
loader.clear();
DelayTarget delayTarget = mock(DelayTarget.class);
Bitmap bitmap = Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888);
when(delayTarget.getResource()).thenReturn(bitmap);
loader.onFrameReady(delayTarget);
assertNull(loader.getCurrentFrame());
}
@Test
public void onFrameReady_whenNotRunning_doesNotClearPreviouslyLoadedImage() {
loader = createGifFrameLoader(/* handler= */ null);
DelayTarget loaded = mock(DelayTarget.class);
when(loaded.getResource()).thenReturn(Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888));
loader.onFrameReady(loaded);
loader.unsubscribe(callback);
DelayTarget nextFrame = mock(DelayTarget.class);
when(nextFrame.getResource())
.thenReturn(Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888));
loader.onFrameReady(nextFrame);
verify(requestManager, never()).clear(loaded);
}
@Test
public void onFrameReady_whenNotRunning_clearsPendingFrameOnClear() {
loader = createGifFrameLoader(/* handler= */ null);
DelayTarget loaded = mock(DelayTarget.class);
when(loaded.getResource()).thenReturn(Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888));
loader.onFrameReady(loaded);
loader.unsubscribe(callback);
DelayTarget nextFrame = mock(DelayTarget.class);
when(nextFrame.getResource())
.thenReturn(Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888));
loader.onFrameReady(nextFrame);
loader.clear();
verify(requestManager).clear(loaded);
verify(requestManager).clear(nextFrame);
}
@Test
public void onFrameReady_whenNotRunning_clearsOldFrameOnStart() {
loader = createGifFrameLoader(/* handler= */ null);
DelayTarget loaded = mock(DelayTarget.class);
when(loaded.getResource()).thenReturn(Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888));
loader.onFrameReady(loaded);
loader.unsubscribe(callback);
DelayTarget nextFrame = mock(DelayTarget.class);
when(nextFrame.getResource())
.thenReturn(Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888));
loader.onFrameReady(nextFrame);
loader.subscribe(callback);
verify(requestManager).clear(loaded);
}
@Test
public void onFrameReady_whenNotRunning_callsFrameReadyWithNewFrameOnStart() {
loader = createGifFrameLoader(/* handler= */ null);
DelayTarget loaded = mock(DelayTarget.class);
when(loaded.getResource()).thenReturn(Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888));
loader.onFrameReady(loaded);
loader.unsubscribe(callback);
DelayTarget nextFrame = mock(DelayTarget.class);
Bitmap expected = Bitmap.createBitmap(200, 200, Bitmap.Config.ARGB_8888);
when(nextFrame.getResource()).thenReturn(expected);
loader.onFrameReady(nextFrame);
verify(callback, times(1)).onFrameReady();
loader.subscribe(callback);
verify(callback, times(2)).onFrameReady();
assertThat(loader.getCurrentFrame()).isEqualTo(expected);
}
@Test
public void onFrameReady_whenInvisible_setVisibleLater() {
loader = createGifFrameLoader(/* handler= */ null);
// The target is invisible at this point.
loader.unsubscribe(callback);
loader.setNextStartFromFirstFrame();
DelayTarget loaded = mock(DelayTarget.class);
when(loaded.getResource()).thenReturn(Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888));
loader.onFrameReady(loaded);
loader.subscribe(callback);
}
@Test
public void startFromFirstFrame_withPendingFrame_clearsPendingFrame() {
loader = createGifFrameLoader(/* handler= */ null);
DelayTarget loaded = mock(DelayTarget.class);
when(loaded.getResource()).thenReturn(Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888));
loader.onFrameReady(loaded);
loader.unsubscribe(callback);
DelayTarget nextFrame = mock(DelayTarget.class);
Bitmap expected = Bitmap.createBitmap(200, 200, Bitmap.Config.ARGB_8888);
when(nextFrame.getResource()).thenReturn(expected);
loader.onFrameReady(nextFrame);
loader.setNextStartFromFirstFrame();
verify(requestManager).clear(nextFrame);
loader.subscribe(callback);
verify(callback, times(1)).onFrameReady();
}
private DelayTarget newDelayTarget() {
return new DelayTarget(handler, /* index= */ 0, /* targetTime= */ 0);
}
@SuppressWarnings("unchecked")
private static Target<Bitmap> aTarget() {
return isA(Target.class);
}
}
|
GifFrameLoaderTest
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/UndertowComponentBuilderFactory.java
|
{
"start": 1929,
"end": 10510
}
|
interface ____ extends ComponentBuilder<UndertowComponent> {
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default UndertowComponentBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* If enabled and an Exchange failed processing on the consumer side the
* response's body won't contain the exception's stack trace.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param muteException the value to set
* @return the dsl builder
*/
default UndertowComponentBuilder muteException(boolean muteException) {
doSetProperty("muteException", muteException);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default UndertowComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default UndertowComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
/**
* To configure common options, such as thread pools.
*
* The option is a:
* <code>org.apache.camel.component.undertow.UndertowHostOptions</code> type.
*
* Group: advanced
*
* @param hostOptions the value to set
* @return the dsl builder
*/
default UndertowComponentBuilder hostOptions(org.apache.camel.component.undertow.UndertowHostOptions hostOptions) {
doSetProperty("hostOptions", hostOptions);
return this;
}
/**
* To use a custom HttpBinding to control the mapping between Camel
* message and HttpClient.
*
* The option is a:
* <code>org.apache.camel.component.undertow.UndertowHttpBinding</code> type.
*
* Group: advanced
*
* @param undertowHttpBinding the value to set
* @return the dsl builder
*/
default UndertowComponentBuilder undertowHttpBinding(org.apache.camel.component.undertow.UndertowHttpBinding undertowHttpBinding) {
doSetProperty("undertowHttpBinding", undertowHttpBinding);
return this;
}
/**
* Configuration used by UndertowSecurityProvider. Comma separated list
* of allowed roles.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*
* @param allowedRoles the value to set
* @return the dsl builder
*/
default UndertowComponentBuilder allowedRoles(java.lang.String allowedRoles) {
doSetProperty("allowedRoles", allowedRoles);
return this;
}
/**
* Configuration used by UndertowSecurityProvider. Security
* configuration object for use from UndertowSecurityProvider.
* Configuration is UndertowSecurityProvider specific. Each provider
* decides, whether it accepts configuration.
*
* The option is a: <code>java.lang.Object</code> type.
*
* Group: security
*
* @param securityConfiguration the value to set
* @return the dsl builder
*/
default UndertowComponentBuilder securityConfiguration(java.lang.Object securityConfiguration) {
doSetProperty("securityConfiguration", securityConfiguration);
return this;
}
/**
* Security provider allows plug in the provider, which will be used to
* secure requests. SPI approach could be used too (component then finds
* security provider using SPI).
*
* The option is a:
* <code>org.apache.camel.component.undertow.spi.UndertowSecurityProvider</code> type.
*
* Group: security
*
* @param securityProvider the value to set
* @return the dsl builder
*/
default UndertowComponentBuilder securityProvider(org.apache.camel.component.undertow.spi.UndertowSecurityProvider securityProvider) {
doSetProperty("securityProvider", securityProvider);
return this;
}
/**
* To configure security using SSLContextParameters.
*
* The option is a:
* <code>org.apache.camel.support.jsse.SSLContextParameters</code> type.
*
* Group: security
*
* @param sslContextParameters the value to set
* @return the dsl builder
*/
default UndertowComponentBuilder sslContextParameters(org.apache.camel.support.jsse.SSLContextParameters sslContextParameters) {
doSetProperty("sslContextParameters", sslContextParameters);
return this;
}
/**
* Enable usage of global SSL context parameters.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: security
*
* @param useGlobalSslContextParameters the value to set
* @return the dsl builder
*/
default UndertowComponentBuilder useGlobalSslContextParameters(boolean useGlobalSslContextParameters) {
doSetProperty("useGlobalSslContextParameters", useGlobalSslContextParameters);
return this;
}
}
|
UndertowComponentBuilder
|
java
|
micronaut-projects__micronaut-core
|
http-server-tck/src/main/java/io/micronaut/http/server/tck/tests/constraintshandler/ControllerConstraintHandlerTest.java
|
{
"start": 2112,
"end": 7336
}
|
class ____ {
public static final String SPEC_NAME = "ControllerConstraintHandlerTest";
private static final HttpResponseAssertion TEAPOT_ASSERTION = HttpResponseAssertion.builder()
.status(HttpStatus.I_AM_A_TEAPOT)
.assertResponse(response -> {
Optional<String> json = response.getBody(Argument.of(String.class));
assertTrue(json.isPresent());
assertTrue(json.get().contains("secret"));
assertTrue(json.get().contains("password"));
})
.build();
@Test
void testPojoWithNullable() throws IOException {
asserts(SPEC_NAME,
HttpRequest.POST("/constraints-via-handler", "{\"username\":\"tim@micronaut.example\",\"password\":\"secret\"}"),
(server, request) -> AssertionUtils.assertDoesNotThrow(server, request, HttpResponseAssertion.builder()
.status(HttpStatus.OK)
.build()));
asserts(SPEC_NAME,
HttpRequest.POST("/constraints-via-handler/with-at-nullable", "{\"username\":\"invalidemail\",\"password\":\"secret\"}"),
(server, request) -> AssertionUtils.assertThrows(server, request, constraintAssertion("must be a well-formed email address")));
asserts(SPEC_NAME,
HttpRequest.POST("/constraints-via-handler/with-at-nullable", "{\"username\":\"\",\"password\":\"secret\"}"),
(server, request) -> AssertionUtils.assertThrows(server, request, constraintAssertion("must not be blank\"")));
asserts(SPEC_NAME,
HttpRequest.POST("/constraints-via-on-error-method/with-at-nullable", "{\"username\":\"\",\"password\":\"secret\"}"),
(server, request) -> AssertionUtils.assertThrows(server, request, TEAPOT_ASSERTION));
asserts(SPEC_NAME,
HttpRequest.POST("/constraints-via-on-error-method/with-at-nullable", "{\"password\":\"secret\"}"),
(server, request) -> AssertionUtils.assertThrows(server, request, TEAPOT_ASSERTION));
}
@Test
void testWithPojoWithoutAnnotations() throws IOException {
asserts(SPEC_NAME,
HttpRequest.POST("/constraints-via-handler", "{\"username\":\"invalidemail\",\"password\":\"secret\"}"),
(server, request) -> AssertionUtils.assertThrows(server, request, constraintAssertion("must be a well-formed email address")));
asserts(SPEC_NAME,
HttpRequest.POST("/constraints-via-handler", "{\"username\":\"invalidemail\",\"password\":\"secret\"}"),
(server, request) -> AssertionUtils.assertThrows(server, request, constraintAssertion("must be a well-formed email address")));
asserts(SPEC_NAME,
HttpRequest.POST("/constraints-via-handler", "{\"username\":\"\",\"password\":\"secret\"}"),
(server, request) -> AssertionUtils.assertThrows(server, request, constraintAssertion("must not be blank\"")));
asserts(SPEC_NAME,
HttpRequest.POST("/constraints-via-on-error-method", "{\"username\":\"\",\"password\":\"secret\"}"),
(server, request) -> AssertionUtils.assertThrows(server, request, TEAPOT_ASSERTION));
asserts(SPEC_NAME,
HttpRequest.POST("/constraints-via-on-error-method", "{\"password\":\"secret\"}"),
(server, request) -> AssertionUtils.assertThrows(server, request, TEAPOT_ASSERTION));
}
@Test
void testPojoWithNonNullAnnotation() throws IOException {
asserts(SPEC_NAME,
HttpRequest.POST("/constraints-via-handler/with-non-null", "{\"username\":\"invalidemail\",\"password\":\"secret\"}"),
(server, request) -> AssertionUtils.assertThrows(server, request, constraintAssertion("must be a well-formed email address")));
asserts(SPEC_NAME,
HttpRequest.POST("/constraints-via-handler/with-non-null", "{\"username\":\"\",\"password\":\"secret\"}"),
(server, request) -> AssertionUtils.assertThrows(server, request, constraintAssertion("must not be blank\"")));
asserts(SPEC_NAME,
HttpRequest.POST("/constraints-via-on-error-method/with-non-null", "{\"username\":\"\",\"password\":\"secret\"}"),
(server, request) -> AssertionUtils.assertThrows(server, request, TEAPOT_ASSERTION));
asserts(SPEC_NAME,
HttpRequest.POST("/constraints-via-on-error-method/with-non-null", "{\"password\":\"secret\"}"),
(server, request) -> AssertionUtils.assertThrows(server, request, TEAPOT_ASSERTION));
}
private static HttpResponseAssertion constraintAssertion(String expectedMessage) {
return HttpResponseAssertion.builder()
.status(HttpStatus.BAD_REQUEST)
.assertResponse(response -> {
Optional<String> json = response.getBody(Argument.of(String.class));
assertTrue(json.isPresent(), "response.getBody(Argument.of(String.class)) should be present");
assertTrue(json.get().contains(expectedMessage), "Body '" + json.get() + "' should contain '" + expectedMessage + "'");
}).build();
}
@Controller("/constraints-via-handler")
@Requires(property = "spec.name", value = SPEC_NAME)
static
|
ControllerConstraintHandlerTest
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/builditem/ShutdownContextBuildItem.java
|
{
"start": 298,
"end": 857
}
|
class ____ extends SimpleBuildItem
implements ShutdownContext, BytecodeRecorderImpl.ReturnedProxy {
@Override
public String __returned$proxy$key() {
return ShutdownContext.class.getName();
}
@Override
public boolean __static$$init() {
return true;
}
@Override
public void addShutdownTask(Runnable runnable) {
throw new IllegalStateException();
}
@Override
public void addLastShutdownTask(Runnable runnable) {
throw new IllegalStateException();
}
}
|
ShutdownContextBuildItem
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/CollectionTypeRegistrationAnnotation.java
|
{
"start": 662,
"end": 2876
}
|
class ____ implements CollectionTypeRegistration {
private org.hibernate.metamodel.CollectionClassification classification;
private java.lang.Class<? extends org.hibernate.usertype.UserCollectionType> type;
private org.hibernate.annotations.Parameter[] parameters;
/**
* Used in creating dynamic annotation instances (e.g. from XML)
*/
public CollectionTypeRegistrationAnnotation(ModelsContext modelContext) {
this.parameters = new org.hibernate.annotations.Parameter[0];
}
/**
* Used in creating annotation instances from JDK variant
*/
public CollectionTypeRegistrationAnnotation(
CollectionTypeRegistration annotation,
ModelsContext modelContext) {
this.classification = annotation.classification();
this.type = annotation.type();
this.parameters = extractJdkValue( annotation, COLLECTION_TYPE_REGISTRATION, "parameters", modelContext );
}
/**
* Used in creating annotation instances from Jandex variant
*/
public CollectionTypeRegistrationAnnotation(
Map<String, Object> attributeValues,
ModelsContext modelContext) {
this.classification = (org.hibernate.metamodel.CollectionClassification) attributeValues.get( "classification" );
this.type = (Class<? extends org.hibernate.usertype.UserCollectionType>) attributeValues.get( "type" );
this.parameters = (org.hibernate.annotations.Parameter[]) attributeValues.get( "parameters" );
}
@Override
public Class<? extends Annotation> annotationType() {
return CollectionTypeRegistration.class;
}
@Override
public org.hibernate.metamodel.CollectionClassification classification() {
return classification;
}
public void classification(org.hibernate.metamodel.CollectionClassification value) {
this.classification = value;
}
@Override
public java.lang.Class<? extends org.hibernate.usertype.UserCollectionType> type() {
return type;
}
public void type(java.lang.Class<? extends org.hibernate.usertype.UserCollectionType> value) {
this.type = value;
}
@Override
public org.hibernate.annotations.Parameter[] parameters() {
return parameters;
}
public void parameters(org.hibernate.annotations.Parameter[] value) {
this.parameters = value;
}
}
|
CollectionTypeRegistrationAnnotation
|
java
|
apache__camel
|
core/camel-core-reifier/src/main/java/org/apache/camel/reifier/ToDynamicReifier.java
|
{
"start": 1429,
"end": 4470
}
|
class ____<T extends ToDynamicDefinition> extends ProcessorReifier<T> {
public ToDynamicReifier(Route route, ProcessorDefinition<?> definition) {
super(route, (T) definition);
}
@Override
public Processor createProcessor() throws Exception {
String uri;
Expression exp;
if (definition.getEndpointProducerBuilder() != null) {
uri = definition.getEndpointProducerBuilder().getRawUri();
exp = definition.getEndpointProducerBuilder().expr(camelContext);
} else {
uri = StringHelper.notEmpty(definition.getUri(), "uri", this);
exp = createExpression(uri);
}
// route templates should pre parse uri as they have dynamic values as part of their template parameters
RouteDefinition rd = ProcessorDefinitionHelper.getRoute(definition);
if (rd != null && rd.isTemplate() != null && rd.isTemplate()) {
uri = EndpointHelper.resolveEndpointUriPropertyPlaceholders(camelContext, uri);
}
SendDynamicProcessor processor = new SendDynamicProcessor(uri, exp);
processor.setDisabled(isDisabled(camelContext, definition));
processor.setCamelContext(camelContext);
processor.setPattern(parse(ExchangePattern.class, definition.getPattern()));
processor.setVariableSend(parseString(definition.getVariableSend()));
processor.setVariableReceive(parseString(definition.getVariableReceive()));
Integer num = parseInt(definition.getCacheSize());
if (num != null) {
processor.setCacheSize(num);
}
if (definition.getIgnoreInvalidEndpoint() != null) {
processor.setIgnoreInvalidEndpoint(parseBoolean(definition.getIgnoreInvalidEndpoint(), false));
}
if (definition.getAllowOptimisedComponents() != null) {
processor.setAllowOptimisedComponents(parseBoolean(definition.getAllowOptimisedComponents(), true));
}
if (definition.getAutoStartComponents() != null) {
processor.setAutoStartupComponents(parseBoolean(definition.getAutoStartComponents(), true));
}
return processor;
}
protected Expression createExpression(String uri) {
// make sure to parse property placeholders
uri = EndpointHelper.resolveEndpointUriPropertyPlaceholders(camelContext, uri);
// we use simple/constant language by default, but you can configure a different language
String language = null;
if (uri.startsWith("language:")) {
String value = StringHelper.after(uri, "language:");
language = StringHelper.before(value, ":");
uri = StringHelper.after(value, ":");
}
if (language == null) {
// only use simple language if needed
language = LanguageSupport.hasSimpleFunction(uri) ? "simple" : "constant";
}
Language lan = camelContext.resolveLanguage(language);
return lan.createExpression(uri);
}
}
|
ToDynamicReifier
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/OneToManyJoinedInheritanceAndDiscriminatorTest.java
|
{
"start": 4767,
"end": 5520
}
|
class ____ extends Company {
@OneToMany( mappedBy = "owner" )
private List<DistributorComputerSystem> computerSystems = new ArrayList<>();
public DistributorCompany() {
}
public DistributorCompany(long id) {
super( id );
}
public void addComputerSystem(DistributorComputerSystem computerSystem) {
computerSystems.add( computerSystem );
computerSystem.setOwner( this );
}
public List<DistributorComputerSystem> getComputerSystems() {
return computerSystems;
}
}
@SuppressWarnings({"FieldCanBeLocal", "unused"})
@Entity( name = "ComputerSystem" )
@Table( name = "computer_system" )
@Inheritance( strategy = InheritanceType.JOINED )
@DiscriminatorColumn( name = "disc_col" )
public static abstract
|
DistributorCompany
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
|
{
"start": 2488,
"end": 2574
}
|
class ____ the block recovery work commands.
*/
@InterfaceAudience.Private
public
|
handles
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/embeddable/EmbeddableWithIdenticallyNamedAssociationTest.java
|
{
"start": 4901,
"end": 5246
}
|
class ____ {
@OneToOne
@JoinColumn(name = "emb_entityA_id")
private EntityA entityA;
@Override
public String toString() {
return "EmbeddableB{" +
", entityA=" + entityA.getId() +
'}';
}
public EntityA getEntityA() {
return entityA;
}
public void setEntityA(EntityA a) {
this.entityA = a;
}
}
}
|
EmbeddableB
|
java
|
junit-team__junit5
|
junit-platform-engine/src/main/java/org/junit/platform/engine/EngineDiscoveryListener.java
|
{
"start": 689,
"end": 1048
}
|
interface ____ empty <em>default</em> implementations.
* Concrete implementations may therefore override one or more of these methods
* to be notified of the selected events.
*
* @since 1.6
* @see EngineDiscoveryRequest#getDiscoveryListener()
* @see org.junit.platform.launcher.LauncherDiscoveryListener
*/
@API(status = STABLE, since = "1.10")
public
|
have
|
java
|
apache__maven
|
its/core-it-support/core-it-plugins/maven-it-plugin-project/src/main/java/org/apache/maven/plugin/coreit/AbstractPomMojo.java
|
{
"start": 1269,
"end": 2422
}
|
class ____ extends AbstractMojo {
/**
* The project builder.
*/
@Component
protected MavenProjectBuilder builder;
protected void dump(Properties props, String key, MavenProject project) {
put(props, key + "project.id", project.getId());
put(props, key + "project.name", project.getName());
put(props, key + "project.description", project.getDescription());
if (project.getArtifact() != null) {
put(props, key + "artifact.id", project.getArtifact().getId());
}
}
protected void put(Properties props, String key, Object value) {
if (value != null) {
props.setProperty(key, value.toString());
}
}
protected void store(Properties props, File file) throws MojoExecutionException {
try {
file.getParentFile().mkdirs();
try (FileOutputStream os = new FileOutputStream(file)) {
props.store(os, "[MAVEN-CORE-IT-LOG]");
}
} catch (IOException e) {
throw new MojoExecutionException("Failed to dump POMs: " + e.getMessage(), e);
}
}
}
|
AbstractPomMojo
|
java
|
spring-projects__spring-boot
|
core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/condition/ConditionalOnCloudPlatformTests.java
|
{
"start": 1110,
"end": 2097
}
|
class ____ {
private final ApplicationContextRunner contextRunner = new ApplicationContextRunner();
@Test
void outcomeWhenCloudfoundryPlatformNotPresentShouldNotMatch() {
this.contextRunner.withUserConfiguration(CloudFoundryPlatformConfig.class)
.run((context) -> assertThat(context).doesNotHaveBean("foo"));
}
@Test
void outcomeWhenCloudfoundryPlatformPresentShouldMatch() {
this.contextRunner.withUserConfiguration(CloudFoundryPlatformConfig.class)
.withPropertyValues("VCAP_APPLICATION:---")
.run((context) -> assertThat(context).hasBean("foo"));
}
@Test
void outcomeWhenCloudfoundryPlatformPresentAndMethodTargetShouldMatch() {
this.contextRunner.withUserConfiguration(CloudFoundryPlatformOnMethodConfig.class)
.withPropertyValues("VCAP_APPLICATION:---")
.run((context) -> assertThat(context).hasBean("foo"));
}
@Configuration(proxyBeanMethods = false)
@ConditionalOnCloudPlatform(CloudPlatform.CLOUD_FOUNDRY)
static
|
ConditionalOnCloudPlatformTests
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/filter/factory/AbstractChangeRequestUriGatewayFilterFactory.java
|
{
"start": 1281,
"end": 2144
}
|
class ____<T> extends AbstractGatewayFilterFactory<T> {
private final int order;
public AbstractChangeRequestUriGatewayFilterFactory(Class<T> clazz, int order) {
super(clazz);
this.order = order;
}
public AbstractChangeRequestUriGatewayFilterFactory(Class<T> clazz) {
this(clazz, RouteToRequestUrlFilter.ROUTE_TO_URL_FILTER_ORDER + 1);
}
protected abstract Optional<URI> determineRequestUri(ServerWebExchange exchange, T config);
@Override
public GatewayFilter apply(T config) {
return new OrderedGatewayFilter((exchange, chain) -> {
Optional<URI> uri = this.determineRequestUri(exchange, config);
uri.ifPresent(u -> {
Map<String, Object> attributes = exchange.getAttributes();
attributes.put(GATEWAY_REQUEST_URL_ATTR, u);
});
return chain.filter(exchange);
}, this.order);
}
}
|
AbstractChangeRequestUriGatewayFilterFactory
|
java
|
spring-projects__spring-security
|
config/src/main/java/org/springframework/security/config/annotation/web/configurers/oauth2/server/resource/DPoPAuthenticationConfigurer.java
|
{
"start": 6935,
"end": 8704
}
|
class ____ implements AuthenticationConverter {
private static final Pattern AUTHORIZATION_PATTERN = Pattern.compile("^DPoP (?<token>[a-zA-Z0-9-._~+/]+=*)$",
Pattern.CASE_INSENSITIVE);
@Override
public Authentication convert(HttpServletRequest request) {
List<String> authorizationList = Collections.list(request.getHeaders(HttpHeaders.AUTHORIZATION));
if (CollectionUtils.isEmpty(authorizationList)) {
return null;
}
if (authorizationList.size() != 1) {
OAuth2Error error = new OAuth2Error(OAuth2ErrorCodes.INVALID_REQUEST,
"Found multiple Authorization headers.", null);
throw new OAuth2AuthenticationException(error);
}
String authorization = authorizationList.get(0);
if (!StringUtils.startsWithIgnoreCase(authorization, OAuth2AccessToken.TokenType.DPOP.getValue())) {
return null;
}
Matcher matcher = AUTHORIZATION_PATTERN.matcher(authorization);
if (!matcher.matches()) {
OAuth2Error error = new OAuth2Error(OAuth2ErrorCodes.INVALID_TOKEN, "DPoP access token is malformed.",
null);
throw new OAuth2AuthenticationException(error);
}
String accessToken = matcher.group("token");
List<String> dPoPProofList = Collections
.list(request.getHeaders(OAuth2AccessToken.TokenType.DPOP.getValue()));
if (CollectionUtils.isEmpty(dPoPProofList) || dPoPProofList.size() != 1) {
OAuth2Error error = new OAuth2Error(OAuth2ErrorCodes.INVALID_REQUEST,
"DPoP proof is missing or invalid.", null);
throw new OAuth2AuthenticationException(error);
}
String dPoPProof = dPoPProofList.get(0);
return new DPoPAuthenticationToken(accessToken, dPoPProof, request.getMethod(),
request.getRequestURL().toString());
}
}
private static final
|
DPoPAuthenticationConverter
|
java
|
apache__camel
|
core/camel-main/src/test/java/org/apache/camel/main/MainIoCNewRouteBuilderTest.java
|
{
"start": 2419,
"end": 2832
}
|
class ____ {
private String name = "Tiger";
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
@Override
public void configure(CamelContext camel) {
camel.getGlobalOptions().put("foo", "123");
}
}
public static
|
MyCoolBean
|
java
|
FasterXML__jackson-core
|
src/test/java/tools/jackson/core/unittest/constraints/DeeplyNestedContentReadTest.java
|
{
"start": 513,
"end": 1980
}
|
class ____
extends JacksonCoreTestBase
{
private final JsonFactory JSON_F = newStreamFactory();
private final int MAX_NESTING = StreamReadConstraints.DEFAULT_MAX_DEPTH;
public void testDeepNestingStreaming() throws Exception
{
// only needs to be one more
final String DOC = createDeepNestedDoc(MAX_NESTING + 1);
for (int mode : ALL_STREAMING_MODES) {
try (JsonParser p = createParser(JSON_F, mode, DOC)) {
_testDeepNesting(p);
}
}
}
private void _testDeepNesting(JsonParser p) throws Exception
{
try {
while (p.nextToken() != null) { }
fail("expected StreamConstraintsException");
} catch (StreamConstraintsException e) {
assertThat(e.getMessage())
.startsWith("Document nesting depth ("+
(MAX_NESTING+1)+") exceeds the maximum allowed ("+
MAX_NESTING+", from `StreamReadConstraints.getMaxNestingDepth()`)");
}
}
private String createDeepNestedDoc(final int depth) {
StringBuilder sb = new StringBuilder();
sb.append("[");
for (int i = 0; i < depth; i++) {
sb.append("{ \"a\": [");
}
sb.append(" \"val\" ");
for (int i = 0; i < depth; i++) {
sb.append("]}");
}
sb.append("]");
return sb.toString();
}
}
|
DeeplyNestedContentReadTest
|
java
|
apache__rocketmq
|
client/src/main/java/org/apache/rocketmq/client/consumer/store/OffsetSerializeWrapper.java
|
{
"start": 1172,
"end": 1573
}
|
class ____ extends RemotingSerializable {
private ConcurrentMap<MessageQueue, AtomicLong> offsetTable =
new ConcurrentHashMap<>();
public ConcurrentMap<MessageQueue, AtomicLong> getOffsetTable() {
return offsetTable;
}
public void setOffsetTable(ConcurrentMap<MessageQueue, AtomicLong> offsetTable) {
this.offsetTable = offsetTable;
}
}
|
OffsetSerializeWrapper
|
java
|
micronaut-projects__micronaut-core
|
test-suite/src/test/java/io/micronaut/docs/http/server/response/textplain/TextPlainController.java
|
{
"start": 621,
"end": 1740
}
|
class ____ {
//end::classopening[]
@Get("/boolean")
@Produces(MediaType.TEXT_PLAIN) // <1>
String bool() {
return Boolean.TRUE.toString(); // <2>
}
@Get("/boolean/mono")
@Produces(MediaType.TEXT_PLAIN) // <1>
@SingleResult
Publisher<String> monoBool() {
return Mono.just(Boolean.TRUE.toString()); // <2>
}
@Get("/boolean/flux")
@Produces(MediaType.TEXT_PLAIN)
@SingleResult
Publisher<String> fluxBool() {
return Flux.just(Boolean.TRUE.toString());
}
@Get("/bigdecimal")
@Produces(MediaType.TEXT_PLAIN) // <1>
String bigDecimal() {
return BigDecimal.valueOf(Long.MAX_VALUE).toString(); // <2>
}
//tag::method[]
@Get("/date")
@Produces(MediaType.TEXT_PLAIN) // <1>
String date() {
return new Calendar.Builder().setDate(2023,7,4).build().toString(); // <2>
}
//end::method[]
@Get("/person")
@Produces(MediaType.TEXT_PLAIN) // <1>
String person() {
return new Person("Dean Wette", 65).toString(); // <2>
}
//tag::classclosing[]
}
//end::classclosing[]
|
TextPlainController
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/output/KeyListOutput.java
|
{
"start": 1100,
"end": 2090
}
|
class ____<K, V> extends CommandOutput<K, V, List<K>> implements StreamingOutput<K> {
private boolean initialized;
private Subscriber<K> subscriber;
public KeyListOutput(RedisCodec<K, V> codec) {
super(codec, Collections.emptyList());
setSubscriber(ListSubscriber.instance());
}
@Override
public void set(ByteBuffer bytes) {
if (bytes == null) {
return;
}
subscriber.onNext(output, bytes == null ? null : codec.decodeKey(bytes));
}
@Override
public void multi(int count) {
if (!initialized) {
output = OutputFactory.newList(count);
initialized = true;
}
}
@Override
public void setSubscriber(Subscriber<K> subscriber) {
LettuceAssert.notNull(subscriber, "Subscriber must not be null");
this.subscriber = subscriber;
}
@Override
public Subscriber<K> getSubscriber() {
return subscriber;
}
}
|
KeyListOutput
|
java
|
square__retrofit
|
retrofit-adapters/rxjava2/src/test/java/retrofit2/adapter/rxjava2/CompletableThrowingTest.java
|
{
"start": 4051,
"end": 4577
}
|
class ____ implements CompletableObserver {
private final CompletableObserver delegate;
ForwardingCompletableObserver(CompletableObserver delegate) {
this.delegate = delegate;
}
@Override
public void onSubscribe(Disposable disposable) {
delegate.onSubscribe(disposable);
}
@Override
public void onComplete() {
delegate.onComplete();
}
@Override
public void onError(Throwable throwable) {
delegate.onError(throwable);
}
}
}
|
ForwardingCompletableObserver
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java
|
{
"start": 24943,
"end": 25962
}
|
class ____ {
private final boolean hasContexts;
private final List<CompletionInputMetadata> list;
private CompletionInputMetadata single;
CompletionInputMetadataContainer(boolean hasContexts) {
this.hasContexts = hasContexts;
this.list = hasContexts ? new ArrayList<>() : null;
}
void add(CompletionInputMetadata cm) {
if (hasContexts) {
list.add(cm);
} else {
if (single == null || single.weight < cm.weight) {
single = cm;
}
}
}
List<CompletionInputMetadata> getValues() {
assert hasContexts;
return list;
}
CompletionInputMetadata getValue() {
assert hasContexts == false;
return single;
}
int getWeight() {
assert hasContexts == false;
return single.weight;
}
}
static
|
CompletionInputMetadataContainer
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-timestream/src/main/java/org/apache/camel/component/aws2/timestream/client/Timestream2ClientFactory.java
|
{
"start": 1145,
"end": 1221
}
|
class ____ return the correct type of AWS Timestream client.
*/
public final
|
to
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/spi/RouteIdAware.java
|
{
"start": 1018,
"end": 1234
}
|
interface ____ {
/**
* Gets the route id
*/
String getRouteId();
/**
* Sets the route id
*
* @param routeId the route id
*/
void setRouteId(String routeId);
}
|
RouteIdAware
|
java
|
bumptech__glide
|
integration/sqljournaldiskcache/src/main/java/com/bumptech/glide/integration/sqljournaldiskcache/EvictionManager.java
|
{
"start": 255,
"end": 5452
}
|
class ____ {
private static final String TAG = "Evictor";
// You must restart the app after enabling these logs for the change to take affect.
// We cache isLoggable to avoid the performance hit of checking repeatedly.
private static final boolean LOG_DEBUG = Log.isLoggable(TAG, Log.DEBUG);
private static final boolean LOG_VERBOSE = Log.isLoggable(TAG, Log.VERBOSE);
// The maximum amount we can go over our cache size before triggering evictions, currently 25mb.
private static final long MAXIMUM_EVICTION_SLOP = 25 * 1024 * 1024;
private final Handler evictionHandler;
private final JournaledLruDiskCache diskCache;
private final File cacheDirectory;
private final FileSystem fileSystem;
private final Journal journal;
private final Looper workLooper;
private final Clock clock;
private final long evictionSlopBytes;
private final long staleEvictionThresholdMs;
@GuardedBy("this")
private long maximumSizeBytes;
EvictionManager(
JournaledLruDiskCache diskCache,
File cacheDirectory,
FileSystem fileSystem,
Journal journal,
Looper workLooper,
long maximumSizeBytes,
float slopMultiplier,
long staleEvictionThresholdMs,
Clock clock) {
this.diskCache = diskCache;
this.cacheDirectory = cacheDirectory;
this.fileSystem = fileSystem;
this.journal = journal;
this.workLooper = workLooper;
this.maximumSizeBytes = maximumSizeBytes;
this.clock = clock;
this.staleEvictionThresholdMs = staleEvictionThresholdMs;
evictionSlopBytes =
Math.min(Math.round(maximumSizeBytes * slopMultiplier), MAXIMUM_EVICTION_SLOP);
evictionHandler = new Handler(workLooper, new EvictionCallback());
}
/**
* Sets maximumSizeBytes to a new size.
*
* <p>Must be called on a background thread.
*
* <p>Decreasing the maximumSizeBytes may schedule an eviction if the current cache size exceeds
* the new maximumSizeBytes. Evictions will be scheduled and executed asynchronously. Therefore,
* the eviction will happen based on the latest maximum cache size, not the maximum size at
* scheduling.
*/
synchronized void setMaximumSizeBytes(long newMaxSizeBytes) {
long originalMaxBytes = maximumSizeBytes;
maximumSizeBytes = newMaxSizeBytes;
if (newMaxSizeBytes < originalMaxBytes) {
maybeScheduleEviction(newMaxSizeBytes);
}
}
private synchronized long getMaximumSizeBytes() {
return maximumSizeBytes;
}
/**
* Schedules a journal eviction on a work thread if the journal size currently exceeds the allowed
* cache size.
*/
void maybeScheduleEviction() {
maybeScheduleEviction(getMaximumSizeBytes());
}
private void maybeScheduleEviction(long maximumSizeBytes) {
if (isEvictionRequired(maximumSizeBytes)) {
evictionHandler.obtainMessage(MessageIds.EVICT).sendToTarget();
}
}
private boolean isEvictionRequired(long maximumSizeBytes) {
return journal.getCurrentSizeBytes() > evictionSlopBytes + maximumSizeBytes;
}
private void evictOnWorkThread() {
if (!Looper.myLooper().equals(workLooper)) {
throw new IllegalStateException(
"Cannot call evictOnWorkThread on thread: " + Thread.currentThread().getName());
}
long maximumSizeBytes = getMaximumSizeBytes();
long staleDateMs = clock.currentTimeMillis() - staleEvictionThresholdMs;
List<String> staleEntriesKeys = journal.getStaleEntries(staleDateMs);
// Writes may queue up a number of eviction messages. After the first one runs, eviction may no
// longer be necessary, so we simply ignore the message.
if (!isEvictionRequired(maximumSizeBytes) && staleEntriesKeys.isEmpty()) {
if (LOG_VERBOSE) {
Log.v(TAG, "Ignoring eviction, not needed");
}
return;
}
if (LOG_DEBUG) {
Log.d(TAG, "Starting eviction on work thread");
}
int successfullyDeletedCount = 0;
int triedToDeleteEntries = staleEntriesKeys.size();
if (!staleEntriesKeys.isEmpty()) {
successfullyDeletedCount += diskCache.delete(staleEntriesKeys).size();
}
long targetSize = maximumSizeBytes - evictionSlopBytes;
if (isEvictionRequired(maximumSizeBytes)) {
long bytesToEvict = journal.getCurrentSizeBytes() - targetSize;
List<String> leastRecentlyUsedKeys = journal.getLeastRecentlyUsed(bytesToEvict);
triedToDeleteEntries += leastRecentlyUsedKeys.size();
successfullyDeletedCount += diskCache.delete(leastRecentlyUsedKeys).size();
}
if (triedToDeleteEntries == 0) {
throw new IllegalStateException("Failed to find entries to evict.");
}
if (LOG_DEBUG) {
Log.d(
TAG,
"Ran eviction"
+ ", tried to delete: "
+ triedToDeleteEntries
+ " entries"
+ ", actually deleted: "
+ successfullyDeletedCount
+ " entries"
+ ", target journal : "
+ targetSize
+ ", journal size: "
+ journal.getCurrentSizeBytes()
+ ", file size: "
+ fileSystem.getDirectorySize(cacheDirectory));
}
}
private
|
EvictionManager
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/cache/AbstractLoadingCacheTest.java
|
{
"start": 1189,
"end": 5148
}
|
class ____ extends TestCase {
public void testGetUnchecked_checked() {
Exception cause = new Exception();
AtomicReference<Object> valueRef = new AtomicReference<>();
LoadingCache<Object, Object> cache =
new AbstractLoadingCache<Object, Object>() {
@Override
public Object get(Object key) throws ExecutionException {
Object v = valueRef.get();
if (v == null) {
throw new ExecutionException(cause);
}
return v;
}
@Override
public @Nullable Object getIfPresent(Object key) {
return valueRef.get();
}
};
UncheckedExecutionException expected =
assertThrows(UncheckedExecutionException.class, () -> cache.getUnchecked(new Object()));
assertThat(expected).hasCauseThat().isEqualTo(cause);
Object newValue = new Object();
valueRef.set(newValue);
assertThat(cache.getUnchecked(new Object())).isSameInstanceAs(newValue);
}
public void testGetUnchecked_unchecked() {
RuntimeException cause = new RuntimeException();
AtomicReference<Object> valueRef = new AtomicReference<>();
LoadingCache<Object, Object> cache =
new AbstractLoadingCache<Object, Object>() {
@Override
public Object get(Object key) throws ExecutionException {
Object v = valueRef.get();
if (v == null) {
throw new ExecutionException(cause);
}
return v;
}
@Override
public @Nullable Object getIfPresent(Object key) {
return valueRef.get();
}
};
UncheckedExecutionException expected =
assertThrows(UncheckedExecutionException.class, () -> cache.getUnchecked(new Object()));
assertThat(expected).hasCauseThat().isEqualTo(cause);
Object newValue = new Object();
valueRef.set(newValue);
assertThat(cache.getUnchecked(new Object())).isSameInstanceAs(newValue);
}
public void testGetUnchecked_error() {
Error cause = new Error();
AtomicReference<Object> valueRef = new AtomicReference<>();
LoadingCache<Object, Object> cache =
new AbstractLoadingCache<Object, Object>() {
@Override
public Object get(Object key) throws ExecutionException {
Object v = valueRef.get();
if (v == null) {
throw new ExecutionError(cause);
}
return v;
}
@Override
public @Nullable Object getIfPresent(Object key) {
return valueRef.get();
}
};
ExecutionError expected =
assertThrows(ExecutionError.class, () -> cache.getUnchecked(new Object()));
assertThat(expected).hasCauseThat().isEqualTo(cause);
Object newValue = new Object();
valueRef.set(newValue);
assertThat(cache.getUnchecked(new Object())).isSameInstanceAs(newValue);
}
public void testGetUnchecked_otherThrowable() {
Throwable cause = new Throwable();
AtomicReference<Object> valueRef = new AtomicReference<>();
LoadingCache<Object, Object> cache =
new AbstractLoadingCache<Object, Object>() {
@Override
public Object get(Object key) throws ExecutionException {
Object v = valueRef.get();
if (v == null) {
throw new ExecutionException(cause);
}
return v;
}
@Override
public @Nullable Object getIfPresent(Object key) {
return valueRef.get();
}
};
UncheckedExecutionException expected =
assertThrows(UncheckedExecutionException.class, () -> cache.getUnchecked(new Object()));
assertThat(expected).hasCauseThat().isEqualTo(cause);
Object newValue = new Object();
valueRef.set(newValue);
assertThat(cache.getUnchecked(new Object())).isSameInstanceAs(newValue);
}
}
|
AbstractLoadingCacheTest
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/taskmanager/Task.java
|
{
"start": 77989,
"end": 80997
}
|
class ____ implements Runnable {
/** The executing task thread that we wait for to terminate. */
private final Thread executorThread;
/** The TaskManager to notify if cancellation does not happen in time. */
private final TaskManagerActions taskManager;
/** The timeout for cancellation. */
private final long timeoutMillis;
private final TaskInfo taskInfo;
private final JobID jobID;
TaskCancelerWatchDog(
TaskInfo taskInfo,
Thread executorThread,
TaskManagerActions taskManager,
long timeoutMillis,
JobID jobID) {
checkArgument(timeoutMillis > 0);
this.taskInfo = taskInfo;
this.executorThread = executorThread;
this.taskManager = taskManager;
this.timeoutMillis = timeoutMillis;
this.jobID = jobID;
}
@Override
public void run() {
try (MdcCloseable ign = MdcUtils.withContext(MdcUtils.asContextData(jobID))) {
Deadline timeout = Deadline.fromNow(Duration.ofMillis(timeoutMillis));
while (executorThread.isAlive() && timeout.hasTimeLeft()) {
try {
executorThread.join(Math.max(1, timeout.timeLeft().toMillis()));
} catch (InterruptedException ignored) {
// we don't react to interrupted exceptions, simply fall through the loop
}
}
if (executorThread.isAlive()) {
logTaskThreadStackTrace(
executorThread,
taskInfo.getTaskNameWithSubtasks(),
timeoutMillis,
"notifying TM");
String msg =
"Task did not exit gracefully within "
+ (timeoutMillis / 1000)
+ " + seconds.";
taskManager.notifyFatalError(msg, new FlinkRuntimeException(msg));
}
} catch (Throwable t) {
throw new FlinkRuntimeException("Error in Task Cancellation Watch Dog", t);
}
}
}
public static void logTaskThreadStackTrace(
Thread thread, String taskName, long timeoutMs, String action) {
StackTraceElement[] stack = thread.getStackTrace();
StringBuilder stackTraceStr = new StringBuilder();
for (StackTraceElement e : stack) {
stackTraceStr.append(e).append('\n');
}
LOG.warn(
"Task '{}' did not react to cancelling signal - {}; it is stuck for {} seconds in method:\n {}",
taskName,
action,
timeoutMs / 1000,
stackTraceStr);
}
/** Various operation of notify checkpoint. */
public
|
TaskCancelerWatchDog
|
java
|
lettuce-io__lettuce-core
|
src/test/java/io/lettuce/core/dynamic/codec/ParameterWrappersUnitTests.java
|
{
"start": 4381,
"end": 4896
}
|
interface ____ {
String range(Range<String> range);
String value(Value<String> range);
String keyValue(KeyValue<Integer, String> range);
String array(String[] values);
String byteArray(byte[] values);
String withWrappers(Range<String> range, io.lettuce.core.Value<Number> value,
io.lettuce.core.KeyValue<Integer, Long> keyValue);
String withList(List<String> map);
String withMap(Map<Integer, String> map);
}
}
|
CommandMethods
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/event/service/spi/JpaBootstrapSensitive.java
|
{
"start": 367,
"end": 451
}
|
interface ____ {
void wasJpaBootstrap(boolean wasJpaBootstrap);
}
|
JpaBootstrapSensitive
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java
|
{
"start": 3725,
"end": 5460
}
|
class ____ {
@Test
public void test() {
testPbServerFactory();
testPbClientFactory();
}
private void testPbServerFactory() {
InetSocketAddress addr = new InetSocketAddress(0);
Configuration conf = new Configuration();
MRClientProtocol instance = new MRClientProtocolTestImpl();
Server server = null;
try {
server =
RpcServerFactoryPBImpl.get().getServer(
MRClientProtocol.class, instance, addr, conf, null, 1);
server.start();
} catch (YarnRuntimeException e) {
e.printStackTrace();
fail("Failed to crete server");
} finally {
server.stop();
}
}
private void testPbClientFactory() {
InetSocketAddress addr = new InetSocketAddress(0);
System.err.println(addr.getHostName() + addr.getPort());
Configuration conf = new Configuration();
MRClientProtocol instance = new MRClientProtocolTestImpl();
Server server = null;
try {
server =
RpcServerFactoryPBImpl.get().getServer(
MRClientProtocol.class, instance, addr, conf, null, 1);
server.start();
System.err.println(server.getListenerAddress());
System.err.println(NetUtils.getConnectAddress(server));
MRClientProtocol client = null;
try {
client = (MRClientProtocol) RpcClientFactoryPBImpl.get().getClient(MRClientProtocol.class, 1, NetUtils.getConnectAddress(server), conf);
} catch (YarnRuntimeException e) {
e.printStackTrace();
fail("Failed to crete client");
}
} catch (YarnRuntimeException e) {
e.printStackTrace();
fail("Failed to crete server");
} finally {
server.stop();
}
}
public
|
TestRPCFactories
|
java
|
apache__dubbo
|
dubbo-compatible/src/main/java/com/alibaba/dubbo/rpc/InvokerListener.java
|
{
"start": 933,
"end": 1509
}
|
interface ____ extends org.apache.dubbo.rpc.InvokerListener {
void referred(com.alibaba.dubbo.rpc.Invoker<?> invoker) throws com.alibaba.dubbo.rpc.RpcException;
void destroyed(com.alibaba.dubbo.rpc.Invoker<?> invoker);
@Override
default void referred(Invoker<?> invoker) throws RpcException {
this.referred(new com.alibaba.dubbo.rpc.Invoker.CompatibleInvoker<>(invoker));
}
@Override
default void destroyed(Invoker<?> invoker) {
this.destroyed(new com.alibaba.dubbo.rpc.Invoker.CompatibleInvoker<>(invoker));
}
}
|
InvokerListener
|
java
|
quarkusio__quarkus
|
extensions/jfr/runtime/src/main/java/io/quarkus/jfr/runtime/OTelIdProducer.java
|
{
"start": 176,
"end": 459
}
|
class ____ implements IdProducer {
@Inject
Span span;
@Override
public String getTraceId() {
return span.getSpanContext().getTraceId();
}
@Override
public String getSpanId() {
return span.getSpanContext().getSpanId();
}
}
|
OTelIdProducer
|
java
|
quarkusio__quarkus
|
extensions/cache/deployment/src/test/java/io/quarkus/cache/test/deployment/DeploymentExceptionsTest.java
|
{
"start": 1258,
"end": 3747
}
|
class ____ {
private static final String UNKNOWN_CACHE_1 = "unknown-cache-1";
private static final String UNKNOWN_CACHE_2 = "unknown-cache-2";
private static final String UNKNOWN_CACHE_3 = "unknown-cache-3";
@RegisterExtension
static final QuarkusUnitTest TEST = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar.addClasses(TestResource.class, TestBean.class))
.assertException(t -> {
assertEquals(DeploymentException.class, t.getClass());
assertEquals(8, t.getSuppressed().length);
assertVoidReturnTypeTargetException(t, "showThrowVoidReturnTypeTargetException");
assertClassTargetException(t, TestResource.class, 1);
assertClassTargetException(t, TestBean.class, 2);
assertKeyGeneratorConstructorException(t, KeyGen1.class);
assertKeyGeneratorConstructorException(t, KeyGen2.class);
assertKeyGeneratorConstructorException(t, KeyGen3.class);
assertKeyGeneratorConstructorException(t, KeyGen4.class);
});
private static void assertVoidReturnTypeTargetException(Throwable t, String expectedMethodName) {
assertEquals(1, filterSuppressed(t, VoidReturnTypeTargetException.class)
.filter(s -> expectedMethodName.equals(s.getMethodInfo().name())).count());
}
private static void assertClassTargetException(Throwable t, Class<?> expectedClassName, long expectedCount) {
assertEquals(expectedCount, filterSuppressed(t, ClassTargetException.class)
.filter(s -> expectedClassName.getName().equals(s.getClassName().toString())).count());
}
private static void assertKeyGeneratorConstructorException(Throwable t, Class<?> expectedClassName) {
assertEquals(1, filterSuppressed(t, KeyGeneratorConstructorException.class)
.filter(s -> expectedClassName.getName().equals(s.getClassInfo().name().toString())).count());
}
private static <T extends RuntimeException> Stream<T> filterSuppressed(Throwable t, Class<T> filterClass) {
return stream(t.getSuppressed()).filter(filterClass::isInstance).map(filterClass::cast);
}
@Test
public void shouldNotBeInvoked() {
fail("This method should not be invoked");
}
@Path("/test")
// Single annotation test.
@CacheInvalidate(cacheName = "should-throw-class-target-exception")
static
|
DeploymentExceptionsTest
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCompressorStream.java
|
{
"start": 1033,
"end": 2271
}
|
class ____ extends CompressorStream {
private static FileOutputStream fop = null;
private static File file = null;
static {
try {
file = new File("tmp.txt");
fop = new FileOutputStream(file);
if (!file.exists()) {
file.createNewFile();
}
} catch (IOException e) {
System.out.println("Error while creating a new file " + e.getMessage());
}
}
public TestCompressorStream() {
super(fop);
}
/**
* Overriding {@link CompressorStream#finish()} method in order
* to reproduce test case
*/
public void finish() throws IOException {
throw new IOException();
}
/**
* In {@link CompressorStream#close()}, if
* {@link CompressorStream#finish()} throws an IOEXception, outputStream
* object was not getting closed.
*/
@Test
public void testClose() {
TestCompressorStream testCompressorStream = new TestCompressorStream();
try {
testCompressorStream.close();
}
catch(IOException e) {
System.out.println("Expected IOException");
}
assertTrue(
((CompressorStream)testCompressorStream).closed, "closed shoud be true");
//cleanup after test case
file.delete();
}
}
|
TestCompressorStream
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/aroundconstruct/AroundConstructWithParameterChangeTest.java
|
{
"start": 2002,
"end": 2251
}
|
class ____ {
@AroundConstruct
void aroundConstruct(InvocationContext ctx) throws Exception {
ctx.setParameters(new Object[] { new MyDependency("from interceptor") });
ctx.proceed();
}
}
}
|
MyInterceptor
|
java
|
spring-projects__spring-boot
|
module/spring-boot-micrometer-metrics/src/dockerTest/java/org/springframework/boot/micrometer/metrics/testcontainers/otlp/GrafanaOpenTelemetryMetricsContainerConnectionDetailsFactoryIntegrationTests.java
|
{
"start": 4276,
"end": 4370
}
|
class ____ {
@Bean
Clock customClock() {
return Clock.SYSTEM;
}
}
}
|
TestConfiguration
|
java
|
FasterXML__jackson-core
|
src/test/java/tools/jackson/core/unittest/read/loc/LocationOfError1173Test.java
|
{
"start": 7982,
"end": 8096
}
|
interface ____
{
JsonParser createParser(String input) throws Exception;
}
static
|
ParserGenerator
|
java
|
apache__camel
|
components/camel-quartz/src/test/java/org/apache/camel/component/quartz/QuartzAddRoutesAfterCamelContextStartedTest.java
|
{
"start": 1049,
"end": 1803
}
|
class ____ extends BaseQuartzTest {
@Test
public void testAddRoutes() throws Exception {
// camel context should already be started
assertTrue(context.getStatus().isStarted());
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(2);
// add the quartz router after CamelContext has been started
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("quartz://myGroup/myTimerName?trigger.repeatInterval=100&trigger.repeatCount=1").to("mock:result");
}
});
// it should also work
MockEndpoint.assertIsSatisfied(context);
}
}
|
QuartzAddRoutesAfterCamelContextStartedTest
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
|
{
"start": 8401,
"end": 15847
}
|
class
____ translateInterruptedException(exception, innerCause, message);
}
if (isMessageTranslatableToEOF(exception)) {
// call considered an sign of connectivity failure
return (EOFException)new EOFException(message).initCause(exception);
}
// if the exception came from the auditor, hand off translation
// to it.
IOException ioe = maybeTranslateAuditException(path, exception);
if (ioe != null) {
return ioe;
}
ioe = maybeTranslateCredentialException(path, exception);
if (ioe != null) {
return ioe;
}
// network problems covered by an IOE inside the exception chain.
ioe = maybeExtractIOException(path, exception, message);
if (ioe != null) {
return ioe;
}
// timeout issues
// ApiCallAttemptTimeoutException: a single HTTP request attempt failed.
// ApiCallTimeoutException: a request with any configured retries failed.
// The ApiCallTimeoutException exception should be the only one seen in
// the S3A code, but for due diligence both are handled and mapped to
// our own AWSApiCallTimeoutException.
if (exception instanceof ApiCallTimeoutException
|| exception instanceof ApiCallAttemptTimeoutException) {
// An API call to an AWS service timed out.
// This is a subclass of ConnectTimeoutException so
// all retry logic for that exception is handled without
// having to look down the stack for a
return new AWSApiCallTimeoutException(message, exception);
}
// no custom handling.
return new AWSClientIOException(message, exception);
} else {
// "error response returned by an S3 or other service."
// These contain more details and should be translated based
// on the HTTP status code and other details.
IOException ioe;
AwsServiceException ase = (AwsServiceException) exception;
// this exception is non-null if the service exception is an s3 one
S3Exception s3Exception = ase instanceof S3Exception
? (S3Exception) ase
: null;
int status = ase.statusCode();
// error details, may be null
final AwsErrorDetails errorDetails = ase.awsErrorDetails();
// error code, will be null if errorDetails is null
String errorCode = "";
if (errorDetails != null) {
errorCode = errorDetails.errorCode();
message = message + ":" + errorCode;
}
// big switch on the HTTP status code.
switch (status) {
case SC_301_MOVED_PERMANENTLY:
case SC_307_TEMPORARY_REDIRECT:
if (s3Exception != null) {
message = String.format("Received permanent redirect response to "
+ "region %s. This likely indicates that the S3 region "
+ "configured in %s does not match the AWS region containing " + "the bucket.",
s3Exception.awsErrorDetails().sdkHttpResponse().headers().get(BUCKET_REGION_HEADER),
AWS_REGION);
ioe = new AWSRedirectException(message, s3Exception);
} else {
ioe = new AWSRedirectException(message, ase);
}
break;
case SC_400_BAD_REQUEST:
ioe = new AWSBadRequestException(message, ase);
break;
// permissions
case SC_401_UNAUTHORIZED:
case SC_403_FORBIDDEN:
ioe = new AccessDeniedException(path, null, message);
ioe.initCause(ase);
break;
// the object isn't there
case SC_404_NOT_FOUND:
if (isUnknownBucket(ase)) {
// this is a missing bucket
ioe = new UnknownStoreException(path, message, ase);
} else {
// a normal unknown object.
// Can also be raised by third-party stores when aborting an unknown multipart upload
ioe = new FileNotFoundException(message);
ioe.initCause(ase);
}
break;
// Caused by duplicate create bucket call.
case SC_409_CONFLICT:
ioe = new AWSBadRequestException(message, ase);
break;
// this also surfaces sometimes and is considered to
// be ~ a not found exception.
case SC_410_GONE:
ioe = new FileNotFoundException(message);
ioe.initCause(ase);
break;
// errors which stores can return from requests which
// the store does not support.
case SC_405_METHOD_NOT_ALLOWED:
case SC_415_UNSUPPORTED_MEDIA_TYPE:
case SC_501_NOT_IMPLEMENTED:
ioe = new AWSUnsupportedFeatureException(message, ase);
break;
// precondition failure: the object is there, but the precondition
// (e.g. etag) didn't match. Assume remote file change during
// rename or status passed in to openfile had an etag which didn't match.
// See the SC_200 handler for the treatment of the S3 Express failure
// variant.
case SC_412_PRECONDITION_FAILED:
ioe = new RemoteFileChangedException(path, message, "", ase);
break;
// out of range. This may happen if an object is overwritten with
// a shorter one while it is being read or openFile() was invoked
// passing a FileStatus or file length less than that of the object.
// although the HTTP specification says that the response should
// include a range header specifying the actual range available,
// this isn't picked up here.
case SC_416_RANGE_NOT_SATISFIABLE:
ioe = new RangeNotSatisfiableEOFException(message, ase);
break;
// this has surfaced as a "no response from server" message.
// so rare we haven't replicated it.
// Treating as an idempotent proxy error.
case SC_443_NO_RESPONSE:
case SC_444_NO_RESPONSE:
ioe = new AWSNoResponseException(message, ase);
break;
// throttling
case SC_429_TOO_MANY_REQUESTS_GCS: // google cloud through this connector
case SC_503_SERVICE_UNAVAILABLE: // AWS
ioe = new AWSServiceThrottledException(message, ase);
break;
// gateway timeout
case SC_504_GATEWAY_TIMEOUT:
ioe = new AWSApiCallTimeoutException(message, ase);
break;
// internal error
case SC_500_INTERNAL_SERVER_ERROR:
ioe = new AWSStatus500Exception(message, ase);
break;
case SC_200_OK:
if (exception instanceof MultiObjectDeleteException) {
// failure during a bulk delete
return ((MultiObjectDeleteException) exception)
.translateException(message);
}
if (PRECONDITION_FAILED.equals(errorCode)) {
// S3 Express stores report conflict in conditional writes
// as a 200 + an error code of "PreconditionFailed".
// This is mapped to RemoteFileChangedException for consistency
// with SC_412_PRECONDITION_FAILED handling.
return new RemoteFileChangedException(path,
operation,
exception.getMessage(),
exception);
}
// other 200: FALL THROUGH
default:
// no specifically handled exit code.
// convert all unknown 500+ errors to a 500 exception
if (status > SC_500_INTERNAL_SERVER_ERROR) {
ioe = new AWSStatus500Exception(message, ase);
break;
}
// Choose an IOE subclass based on the
|
return
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/commit/ResilientCommitByRename.java
|
{
"start": 1717,
"end": 3987
}
|
interface ____ extends IOStatisticsSource {
/**
* Rename source file to dest path *Exactly*; no subdirectory games here.
* if the method does not raise an exception,then
* the data at dest is the data which was at source.
*
* Requirements
*
* <pre>
* exists(FS, source) else raise FileNotFoundException
* source != dest else raise PathIOException
* not exists(FS, dest)
* isDir(FS, dest.getParent)
* </pre>
* <ol>
* <li>source != dest else raise PathIOException</li>
* <li>source must exist else raise FileNotFoundException</li>
* <li>source must exist and be a file</li>
* <li>dest must not exist; </li>
* <li>dest.getParent() must be a dir</li>
* <li>if sourceEtag is non-empty, it MAY be used to qualify/validate the rename.</li>
* </ol>
*
* The outcome of the operation is undefined if source is not a file, dest exists,
* dest.getParent() doesn't exist/is a file.
* That is: implementations SHOULD assume that the code calling this method has
* set up the destination directory tree and is only invoking this call on a file.
* Accordingly: <i>implementations MAY skip validation checks</i>
*
* Post Conditions on a successful operation:
* <pre>
* FS' where:
* not exists(FS', source)
* and exists(FS', dest)
* and data(FS', dest) == data (FS, source)
* </pre>
* This is exactly the same outcome as `FileSystem.rename()` when the same preconditions
* are met. This API call simply restricts the operation to file rename with strict
* conditions, (no need to be 'clever' about dest path calculation) and the ability
* to pass in etags, modtimes and file status values.
*
* @param source path to source file
* @param dest destination of rename.
* @param sourceEtag etag of source file. may be null or empty
* @return true if recovery was needed.
* @throws FileNotFoundException source file not found
* @throws PathIOException failure, including source and dest being the same path
* @throws IOException any other exception
*/
Pair<Boolean, Duration> commitSingleFileByRename(
Path source,
Path dest,
@Nullable String sourceEtag) throws IOException;
}
|
ResilientCommitByRename
|
java
|
alibaba__nacos
|
core/src/main/java/com/alibaba/nacos/core/auth/AuthFilter.java
|
{
"start": 1105,
"end": 2333
}
|
class ____ extends AbstractWebAuthFilter {
private final NacosAuthConfig authConfig;
private final InnerApiAuthEnabled innerApiAuthEnabled;
public AuthFilter(NacosAuthConfig authConfig, ControllerMethodsCache methodsCache,
InnerApiAuthEnabled innerApiAuthEnabled) {
super(authConfig, methodsCache);
this.authConfig = authConfig;
this.innerApiAuthEnabled = innerApiAuthEnabled;
}
@Override
protected boolean isAuthEnabled() {
return authConfig.isAuthEnabled();
}
@Override
protected boolean isMatchFilter(Secured secured) {
// ADMIN API use {@link AuthAdminFilter} to handle
return !ApiType.ADMIN_API.equals(secured.apiType());
}
@Override
protected ServerIdentityResult checkServerIdentity(HttpServletRequest request, Secured secured) {
// During Upgrading, Old Nacos server might not with server identity for some Inner API, follow old version logic.
if (ApiType.INNER_API.equals(secured.apiType()) && !innerApiAuthEnabled.isEnabled()) {
return ServerIdentityResult.success();
}
return super.checkServerIdentity(request, secured);
}
}
|
AuthFilter
|
java
|
micronaut-projects__micronaut-core
|
management/src/main/java/io/micronaut/management/endpoint/routes/RoutesEndpoint.java
|
{
"start": 1172,
"end": 2012
}
|
class ____ {
private final Router router;
private final RouteDataCollector<Object> routeDataCollector;
/**
* @param router The {@link Router}
* @param routeDataCollector The {@link RouteDataCollector}
*/
public RoutesEndpoint(Router router, RouteDataCollector<Object> routeDataCollector) {
this.router = router;
this.routeDataCollector = routeDataCollector;
}
/**
* @return The routes data representing the routes.
*/
@Read
@SingleResult
public Object getRoutes() {
Stream<UriRouteInfo<?, ?>> uriRoutes = router.uriRoutes()
.sorted(Comparator.comparing((UriRouteInfo<?, ?> r) -> r.getUriMatchTemplate().toPathString()).thenComparing(UriRouteInfo::getHttpMethodName));
return routeDataCollector.getData(uriRoutes);
}
}
|
RoutesEndpoint
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/OnNextFailureStrategy.java
|
{
"start": 1606,
"end": 6580
}
|
interface ____ extends BiFunction<Throwable, Object, Throwable>,
BiPredicate<Throwable, Object> {
/**
* The key that can be used to store an {@link OnNextFailureStrategy} in a {@link Context}.
*/
String KEY_ON_NEXT_ERROR_STRATEGY = "reactor.onNextError.localStrategy";
@Override
default @Nullable Throwable apply(Throwable throwable, @Nullable Object o) {
return process(throwable, o, Context.empty());
}
@Override
boolean test(Throwable throwable, @Nullable Object o);
/**
* Process an error and the optional value that caused it (when applicable) in
* preparation for sequence resume, so that the error is not completely swallowed.
* <p>
* If the strategy cannot resume this kind of error (ie. {@link #test(Throwable, Object)}
* returns false), return the original error. Any exception in the processing will be
* caught and returned. If the strategy was able to process the error correctly,
* returns null.
*
* @param error the error being recovered from.
* @param value the value causing the error, null if not applicable.
* @param context the {@link Context} associated with the recovering sequence.
* @return null if the error was processed for resume, a {@link Throwable} to propagate
* otherwise.
* @see #test(Throwable, Object)
*/
@Nullable Throwable process(Throwable error, @Nullable Object value, Context context);
/**
* A strategy that never let any error resume.
*/
static OnNextFailureStrategy stop() {
return STOP;
}
/**
* A strategy that let all error resume. When processing, the error is passed to the
* {@link Operators#onErrorDropped(Throwable, Context)} hook and the incriminating
* source value (if available) is passed to the {@link Operators#onNextDropped(Object, Context)}
* hook, allowing the sequence to continue with further values.
*/
static OnNextFailureStrategy resumeDrop() {
return RESUME_DROP;
}
/**
* A strategy that let some error resume. When processing, the error is passed to the
* {@link Operators#onErrorDropped(Throwable, Context)} hook and the incriminating
* source value (if available) is passed to the {@link Operators#onNextDropped(Object, Context)}
* hook, allowing the sequence to continue with further values.
* <p>
* Any exception thrown by the predicate is thrown as is.
*
* @param causePredicate the predicate to match in order to resume from an error.
*/
static OnNextFailureStrategy resumeDropIf(Predicate<Throwable> causePredicate) {
return new ResumeDropStrategy(causePredicate);
}
/**
* A strategy that let all errors resume. When processing, the error and the
* incriminating source value are passed to custom {@link Consumer Consumers}.
* <p>
* Any exception thrown by the consumers will suppress the original error and be
* returned for propagation. If the original error is fatal then it is thrown
* upon processing it (see {@link Exceptions#throwIfFatal(Throwable)}).
*
* @param errorConsumer the {@link BiConsumer<Throwable, Object>} to process the recovered errors with.
* It must deal with potential {@code null}s.
* @return a new {@link OnNextFailureStrategy} that allows resuming the sequence.
*/
static OnNextFailureStrategy resume(BiConsumer<Throwable, Object> errorConsumer) {
return new ResumeStrategy(null, errorConsumer);
}
/**
* A strategy that let some errors resume. When processing, the error and the
* incriminating source value are passed to custom {@link Consumer Consumers}.
* <p>
* Any exception thrown by the predicate is thrown as is. Any exception thrown by
* the consumers will suppress the original error and be returned for propagation.
* Even if the original error is fatal, if it passes the predicate then it can
* be processed and recovered from (see {@link Exceptions#throwIfFatal(Throwable)}).
*
* @param causePredicate the {@link Predicate} to use to determine if a failure
* should be recovered from.
* @param errorConsumer the {@link BiConsumer<Throwable, Object>} to process the recovered errors with.
* It must deal with potential {@code null}s.
* @return a new {@link OnNextFailureStrategy} that allows resuming the sequence.
*/
static OnNextFailureStrategy resumeIf(
Predicate<Throwable> causePredicate,
BiConsumer<Throwable, Object> errorConsumer) {
return new ResumeStrategy(causePredicate, errorConsumer);
}
//==== IMPLEMENTATIONS ====
OnNextFailureStrategy STOP = new OnNextFailureStrategy() {
@Override
public boolean test(Throwable error, @Nullable Object value) {
return false;
}
@Override
public Throwable process(Throwable error, @Nullable Object value, Context context) {
Exceptions.throwIfFatal(error);
Throwable iee = new IllegalStateException("STOP strategy cannot process errors");
iee.addSuppressed(error);
return iee;
}
};
OnNextFailureStrategy RESUME_DROP = new ResumeDropStrategy(null);
final
|
OnNextFailureStrategy
|
java
|
google__gson
|
gson/src/test/java/com/google/gson/internal/ConstructorConstructorTest.java
|
{
"start": 3648,
"end": 4834
}
|
class ____<E> extends ArrayList<E> {
// Removes default no-args constructor
@SuppressWarnings("unused")
CustomList(Void v) {}
}
/**
* Tests that creation of custom {@code Collection} subclasses without no-args constructor should
* not use default JDK types (which would cause {@link ClassCastException}).
*
* <p>Currently this test is rather contrived because the instances created using Unsafe are not
* usable because their fields are not properly initialized, but assume that user has custom
* classes which would be functional.
*/
@Test
public void testCustomCollectionCreation() {
Class<?>[] collectionTypes = {
CustomSortedSet.class, CustomSet.class, CustomQueue.class, CustomList.class,
};
for (Class<?> collectionType : collectionTypes) {
Object actual =
constructorConstructor
.get(TypeToken.getParameterized(collectionType, Integer.class))
.construct();
assertWithMessage(
"Failed for " + collectionType + "; created instance of " + actual.getClass())
.that(actual)
.isInstanceOf(collectionType);
}
}
private static
|
CustomList
|
java
|
alibaba__nacos
|
persistence/src/test/java/com/alibaba/nacos/persistence/configuration/condition/ConditionStandaloneEmbedStorageTest.java
|
{
"start": 1206,
"end": 2985
}
|
class ____ {
@Mock
ConditionContext context;
@Mock
AnnotatedTypeMetadata metadata;
private ConditionStandaloneEmbedStorage conditionStandaloneEmbedStorage;
@BeforeEach
void init() {
conditionStandaloneEmbedStorage = new ConditionStandaloneEmbedStorage();
}
@Test
void testMatches() {
MockedStatic<DatasourceConfiguration> propertyUtilMockedStatic = Mockito.mockStatic(DatasourceConfiguration.class);
MockedStatic<EnvUtil> envUtilMockedStatic = Mockito.mockStatic(EnvUtil.class);
propertyUtilMockedStatic.when(DatasourceConfiguration::isEmbeddedStorage).thenReturn(true);
envUtilMockedStatic.when(EnvUtil::getStandaloneMode).thenReturn(true);
assertTrue(conditionStandaloneEmbedStorage.matches(context, metadata));
propertyUtilMockedStatic.when(DatasourceConfiguration::isEmbeddedStorage).thenReturn(true);
envUtilMockedStatic.when(EnvUtil::getStandaloneMode).thenReturn(false);
assertFalse(conditionStandaloneEmbedStorage.matches(context, metadata));
propertyUtilMockedStatic.when(DatasourceConfiguration::isEmbeddedStorage).thenReturn(false);
envUtilMockedStatic.when(EnvUtil::getStandaloneMode).thenReturn(true);
assertFalse(conditionStandaloneEmbedStorage.matches(context, metadata));
propertyUtilMockedStatic.when(DatasourceConfiguration::isEmbeddedStorage).thenReturn(false);
envUtilMockedStatic.when(EnvUtil::getStandaloneMode).thenReturn(false);
assertFalse(conditionStandaloneEmbedStorage.matches(context, metadata));
propertyUtilMockedStatic.close();
envUtilMockedStatic.close();
}
}
|
ConditionStandaloneEmbedStorageTest
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/headers/UriUserInfoTest.java
|
{
"start": 2423,
"end": 2684
}
|
interface ____ {
@GET
String call();
@ClientHeaderParam(name = "Authorization", value = "whatever")
@GET
String call2();
@GET
String call3(@HeaderParam("Authorization") String authorization);
}
}
|
Client
|
java
|
jhy__jsoup
|
src/test/java/org/jsoup/parser/AttributeParseTest.java
|
{
"start": 425,
"end": 4045
}
|
class ____ {
@Test public void parsesRoughAttributeString() {
String html = "<a id=\"123\" class=\"baz = 'bar'\" style = 'border: 2px'qux zim foo = 12 mux=18 />";
// should be: <id=123>, <class=baz = 'bar'>, <qux=>, <zim=>, <foo=12>, <mux.=18>
Element el = Jsoup.parse(html).getElementsByTag("a").get(0);
Attributes attr = el.attributes();
assertEquals(7, attr.size());
assertEquals("123", attr.get("id"));
assertEquals("baz = 'bar'", attr.get("class"));
assertEquals("border: 2px", attr.get("style"));
assertEquals("", attr.get("qux"));
assertEquals("", attr.get("zim"));
assertEquals("12", attr.get("foo"));
assertEquals("18", attr.get("mux"));
}
@Test public void handlesNewLinesAndReturns() {
String html = "<a\r\nfoo='bar\r\nqux'\r\nbar\r\n=\r\ntwo>One</a>";
Element el = Jsoup.parse(html).select("a").first();
assertEquals(2, el.attributes().size());
assertEquals("bar\r\nqux", el.attr("foo")); // currently preserves newlines in quoted attributes. todo confirm if should.
assertEquals("two", el.attr("bar"));
}
@Test public void parsesEmptyString() {
String html = "<a />";
Element el = Jsoup.parse(html).getElementsByTag("a").get(0);
Attributes attr = el.attributes();
assertEquals(0, attr.size());
}
@Test public void canStartWithEq() {
String html = "<a =empty />";
// TODO this is the weirdest thing in the spec - why not consider this an attribute with an empty name, not where name is '='?
// am I reading it wrong? https://html.spec.whatwg.org/multipage/parsing.html#before-attribute-name-state
Element el = Jsoup.parse(html).getElementsByTag("a").get(0);
Attributes attr = el.attributes();
assertEquals(1, attr.size());
assertTrue(attr.hasKey("=empty"));
assertEquals("", attr.get("=empty"));
}
@Test public void strictAttributeUnescapes() {
String html = "<a id=1 href='?foo=bar&mid<=true'>One</a> <a id=2 href='?foo=bar<qux&lg=1'>Two</a>";
Elements els = Jsoup.parse(html).select("a");
assertEquals("?foo=bar&mid<=true", els.first().attr("href"));
assertEquals("?foo=bar<qux&lg=1", els.last().attr("href"));
}
@Test public void moreAttributeUnescapes() {
String html = "<a href='&wr_id=123&mid-size=true&ok=&wr'>Check</a>";
Elements els = Jsoup.parse(html).select("a");
assertEquals("&wr_id=123&mid-size=true&ok=&wr", els.first().attr("href"));
}
@Test public void parsesBooleanAttributes() {
String html = "<a normal=\"123\" boolean empty=\"\"></a>";
Element el = Jsoup.parse(html).select("a").first();
assertEquals("123", el.attr("normal"));
assertEquals("", el.attr("boolean"));
assertEquals("", el.attr("empty"));
List<Attribute> attributes = el.attributes().asList();
assertEquals(3, attributes.size(), "There should be 3 attribute present");
assertEquals(html, el.outerHtml()); // vets boolean syntax
}
@Test public void dropsSlashFromAttributeName() {
String html = "<img /onerror='doMyJob'/>";
Document doc = Jsoup.parse(html);
assertFalse(doc.select("img[onerror]").isEmpty(), "SelfClosingStartTag ignores last character");
assertEquals("<img onerror=\"doMyJob\">", doc.body().html());
doc = Jsoup.parse(html, "", Parser.xmlParser());
assertEquals("<img onerror=\"doMyJob\" />", doc.html());
}
}
|
AttributeParseTest
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
|
{
"start": 9771,
"end": 95198
}
|
interface ____ the Namenode. */
private FileSystem nnFS;
/** File in the Router. */
private String routerFile;
/** File in the Namenode. */
private String nnFile;
@BeforeAll
public static void globalSetUp() throws Exception {
// Start routers with only an RPC service
Configuration routerConf = new RouterConfigBuilder()
.metrics()
.rpc()
.build();
// We decrease the DN cache times to make the test faster
routerConf.setTimeDuration(
RBFConfigKeys.DN_REPORT_CACHE_EXPIRE, 1, TimeUnit.SECONDS);
setUp(routerConf);
}
public static void setUp(Configuration routerConf) throws Exception {
Configuration namenodeConf = new Configuration();
namenodeConf.setBoolean(DFSConfigKeys.HADOOP_CALLER_CONTEXT_ENABLED_KEY,
true);
namenodeConf.set(HADOOP_CALLER_CONTEXT_MAX_SIZE_KEY, "256");
// It's very easy to become overloaded for some specific dn in this small
// cluster, which will cause the EC file block allocation failure. To avoid
// this issue, we disable considerLoad option.
namenodeConf.setBoolean(DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
namenodeConf.setBoolean(DFS_NAMENODE_AUDIT_LOG_WITH_REMOTE_PORT_KEY, true);
cluster = new MiniRouterDFSCluster(false, NUM_SUBCLUSTERS);
cluster.setNumDatanodesPerNameservice(NUM_DNS);
cluster.addNamenodeOverrides(namenodeConf);
cluster.setIndependentDNs();
Configuration conf = new Configuration();
// Setup proxy users.
conf.set("hadoop.proxyuser.testRealUser.groups", "*");
conf.set("hadoop.proxyuser.testRealUser.hosts", "*");
String loginUser = UserGroupInformation.getLoginUser().getUserName();
conf.set(String.format("hadoop.proxyuser.%s.groups", loginUser), "*");
conf.set(String.format("hadoop.proxyuser.%s.hosts", loginUser), "*");
// Enable IP proxy users.
conf.set(DFSConfigKeys.DFS_NAMENODE_IP_PROXY_USERS, "placeholder");
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 5);
cluster.addNamenodeOverrides(conf);
// Start NNs and DNs and wait until ready
cluster.startCluster();
cluster.addRouterOverrides(routerConf);
cluster.startRouters();
// Register and verify all NNs with all routers
cluster.registerNamenodes();
cluster.waitNamenodeRegistration();
// We decrease the DN heartbeat expire interval to make them dead faster
cluster.getCluster().getNamesystem(0).getBlockManager()
.getDatanodeManager().setHeartbeatInterval(1);
cluster.getCluster().getNamesystem(1).getBlockManager()
.getDatanodeManager().setHeartbeatInterval(1);
cluster.getCluster().getNamesystem(0).getBlockManager()
.getDatanodeManager().setHeartbeatExpireInterval(3000);
cluster.getCluster().getNamesystem(1).getBlockManager()
.getDatanodeManager().setHeartbeatExpireInterval(3000);
}
@AfterEach
public void cleanup() {
// clear client context
CallerContext.setCurrent(null);
}
@AfterAll
public static void tearDown() {
cluster.shutdown();
}
@BeforeEach
public void testSetup() throws Exception {
// Create mock locations
cluster.installMockLocations();
// Delete all files via the NNs and verify
cluster.deleteAllFiles();
// Create test fixtures on NN
cluster.createTestDirectoriesNamenode();
// Wait to ensure NN has fully created its test directories
Thread.sleep(100);
// Random router for this test
RouterContext rndRouter = cluster.getRandomRouter();
this.setRouter(rndRouter);
// Pick a namenode for this test
String ns0 = cluster.getNameservices().get(0);
this.setNs(ns0);
this.setNamenode(cluster.getNamenode(ns0, null));
// Create a test file on the NN
Random rnd = new Random();
String randomFile = "testfile-" + rnd.nextInt();
this.nnFile =
cluster.getNamenodeTestDirectoryForNS(ns) + "/" + randomFile;
this.routerFile =
cluster.getFederatedTestDirectoryForNS(ns) + "/" + randomFile;
createFile(nnFS, nnFile, 32);
verifyFileExists(nnFS, nnFile);
}
@Test
public void testRpcService() throws IOException {
Router testRouter = new Router();
List<String> nss = cluster.getNameservices();
String ns0 = nss.get(0);
Configuration routerConfig = cluster.generateRouterConfiguration(ns0, null);
RouterRpcServer server = new RouterRpcServer(routerConfig, testRouter,
testRouter.getNamenodeResolver(), testRouter.getSubclusterResolver());
server.init(routerConfig);
assertEquals(STATE.INITED, server.getServiceState());
server.start();
assertEquals(STATE.STARTED, server.getServiceState());
server.stop();
assertEquals(STATE.STOPPED, server.getServiceState());
server.close();
testRouter.close();
}
protected MiniRouterDFSCluster getCluster() {
return TestRouterRpc.cluster;
}
protected RouterContext getRouterContext() {
return this.router;
}
protected void setRouter(RouterContext r)
throws IOException, URISyntaxException {
this.router = r;
this.routerProtocol = r.getClient().getNamenode();
this.routerFS = r.getFileSystem();
this.routerNamenodeProtocol = NameNodeProxies.createProxy(router.getConf(),
router.getFileSystem().getUri(), NamenodeProtocol.class).getProxy();
}
protected FileSystem getRouterFileSystem() {
return this.routerFS;
}
protected FileSystem getNamenodeFileSystem() {
return this.nnFS;
}
protected ClientProtocol getRouterProtocol() {
return this.routerProtocol;
}
protected ClientProtocol getNamenodeProtocol() {
return this.nnProtocol;
}
protected NamenodeContext getNamenode() {
return this.namenode;
}
protected void setNamenodeFile(String filename) {
this.nnFile = filename;
}
protected String getNamenodeFile() {
return this.nnFile;
}
protected void setRouterFile(String filename) {
this.routerFile = filename;
}
protected String getRouterFile() {
return this.routerFile;
}
protected void setNamenode(NamenodeContext nn)
throws IOException, URISyntaxException {
this.namenode = nn;
this.nnProtocol = nn.getClient().getNamenode();
this.nnFS = nn.getFileSystem();
// Namenode from the default namespace
String ns0 = cluster.getNameservices().get(0);
NamenodeContext nn0 = cluster.getNamenode(ns0, null);
this.nnNamenodeProtocol = NameNodeProxies.createProxy(nn0.getConf(),
nn0.getFileSystem().getUri(), NamenodeProtocol.class).getProxy();
// Namenode from the other namespace
String ns1 = cluster.getNameservices().get(1);
NamenodeContext nn1 = cluster.getNamenode(ns1, null);
this.nnNamenodeProtocol1 = NameNodeProxies.createProxy(nn1.getConf(),
nn1.getFileSystem().getUri(), NamenodeProtocol.class).getProxy();
}
protected String getNs() {
return this.ns;
}
protected void setNs(String nameservice) {
this.ns = nameservice;
}
protected static void compareResponses(
ClientProtocol protocol1, ClientProtocol protocol2,
Method m, Object[] paramList) {
Object return1 = null;
Exception exception1 = null;
try {
return1 = m.invoke(protocol1, paramList);
} catch (Exception ex) {
exception1 = ex;
}
Object return2 = null;
Exception exception2 = null;
try {
return2 = m.invoke(protocol2, paramList);
} catch (Exception ex) {
exception2 = ex;
}
assertEquals(return1, return2);
if (exception1 == null && exception2 == null) {
return;
}
assertEquals(
exception1.getCause().getClass(),
exception2.getCause().getClass());
}
@Test
public void testProxyListFiles() throws IOException, InterruptedException,
URISyntaxException, NoSuchMethodException, SecurityException {
// Verify that the root listing is a union of the mount table destinations
// and the files stored at all nameservices mounted at the root (ns0 + ns1)
//
// / -->
// /ns0 (from mount table)
// /ns1 (from mount table)
// all items in / of ns0 (default NS)
// Collect the mount table entries from the root mount point
Set<String> requiredPaths = new TreeSet<>();
FileSubclusterResolver fileResolver =
router.getRouter().getSubclusterResolver();
for (String mount : fileResolver.getMountPoints("/")) {
requiredPaths.add(mount);
}
// Collect all files/dirs on the root path of the default NS
String defaultNs = cluster.getNameservices().get(0);
NamenodeContext nn = cluster.getNamenode(defaultNs, null);
FileStatus[] iterator = nn.getFileSystem().listStatus(new Path("/"));
for (FileStatus file : iterator) {
requiredPaths.add(file.getPath().getName());
}
// Fetch listing
DirectoryListing listing =
routerProtocol.getListing("/", HdfsFileStatus.EMPTY_NAME, false);
Iterator<String> requiredPathsIterator = requiredPaths.iterator();
// Match each path returned and verify order returned
for(HdfsFileStatus f : listing.getPartialListing()) {
String fileName = requiredPathsIterator.next();
String currentFile = f.getFullPath(new Path("/")).getName();
assertEquals(currentFile, fileName);
}
// Verify the total number of results found/matched
assertEquals(requiredPaths.size(), listing.getPartialListing().length);
// List a path that doesn't exist and validate error response with NN
// behavior.
Method m = ClientProtocol.class.getMethod(
"getListing", String.class, byte[].class, boolean.class);
String badPath = "/unknownlocation/unknowndir";
compareResponses(routerProtocol, nnProtocol, m,
new Object[] {badPath, HdfsFileStatus.EMPTY_NAME, false});
}
@Test
public void testProxyListFilesLargeDir() throws IOException {
// Call listStatus against a dir with many files
// Create a parent point as well as a subfolder mount
// /parent
// ns0 -> /parent
// /parent/file-7
// ns0 -> /parent/file-7
// /parent/file-0
// ns0 -> /parent/file-0
for (RouterContext rc : cluster.getRouters()) {
MockResolver resolver =
(MockResolver) rc.getRouter().getSubclusterResolver();
resolver.addLocation("/parent", ns, "/parent");
// file-0 is only in mount table
resolver.addLocation("/parent/file-0", ns, "/parent/file-0");
// file-7 is both in mount table and in file system
resolver.addLocation("/parent/file-7", ns, "/parent/file-7");
}
// Test the case when there is no subcluster path and only mount point
FileStatus[] result = routerFS.listStatus(new Path("/parent"));
assertEquals(2, result.length);
// this makes sure file[0-8] is added in order
assertEquals("file-0", result[0].getPath().getName());
assertEquals("file-7", result[1].getPath().getName());
// Create files and test full listing in order
NamenodeContext nn = cluster.getNamenode(ns, null);
FileSystem nnFileSystem = nn.getFileSystem();
for (int i = 1; i < 9; i++) {
createFile(nnFileSystem, "/parent/file-"+i, 32);
}
result = routerFS.listStatus(new Path("/parent"));
assertEquals(9, result.length);
// this makes sure file[0-8] is added in order
for (int i = 0; i < 9; i++) {
assertEquals("file-"+i, result[i].getPath().getName());
}
// Add file-9 and now this listing will be added from mount point
for (RouterContext rc : cluster.getRouters()) {
MockResolver resolver =
(MockResolver) rc.getRouter().getSubclusterResolver();
resolver.addLocation("/parent/file-9", ns, "/parent/file-9");
}
assertFalse(verifyFileExists(nnFileSystem, "/parent/file-9"));
result = routerFS.listStatus(new Path("/parent"));
// file-9 will be added by mount point
assertEquals(10, result.length);
for (int i = 0; i < 10; i++) {
assertEquals("file-"+i, result[i].getPath().getName());
}
}
@Test
public void testProxyListFilesWithConflict()
throws IOException, InterruptedException {
// Add a directory to the namespace that conflicts with a mount point
NamenodeContext nn = cluster.getNamenode(ns, null);
FileSystem nnFs = nn.getFileSystem();
addDirectory(nnFs, cluster.getFederatedTestDirectoryForNS(ns));
FileSystem routerFs = router.getFileSystem();
int initialCount = countContents(routerFs, "/");
// Root file system now for NS X:
// / ->
// /ns0 (mount table)
// /ns1 (mount table)
// /target-ns0 (the target folder for the NS0 mapped to /
// /nsX (local directory that duplicates mount table)
int newCount = countContents(routerFs, "/");
assertEquals(initialCount, newCount);
// Verify that each root path is readable and contains one test directory
assertEquals(1, countContents(routerFs, cluster.getFederatedPathForNS(ns)));
// Verify that real folder for the ns contains a single test directory
assertEquals(1, countContents(nnFs, cluster.getNamenodePathForNS(ns)));
}
protected void testRename(RouterContext testRouter, String filename,
String renamedFile, boolean exceptionExpected) throws IOException {
createFile(testRouter.getFileSystem(), filename, 32);
// verify
verifyFileExists(testRouter.getFileSystem(), filename);
// rename
boolean exceptionThrown = false;
try {
DFSClient client = testRouter.getClient();
ClientProtocol clientProtocol = client.getNamenode();
clientProtocol.rename(filename, renamedFile);
} catch (Exception ex) {
exceptionThrown = true;
}
if (exceptionExpected) {
// Error was expected
assertTrue(exceptionThrown);
FileContext fileContext = testRouter.getFileContext();
assertTrue(fileContext.delete(new Path(filename), true));
} else {
// No error was expected
assertFalse(exceptionThrown);
// verify
assertTrue(verifyFileExists(testRouter.getFileSystem(), renamedFile));
// delete
FileContext fileContext = testRouter.getFileContext();
assertTrue(fileContext.delete(new Path(renamedFile), true));
}
}
protected void testRename2(RouterContext testRouter, String filename,
String renamedFile, boolean exceptionExpected) throws IOException {
createFile(testRouter.getFileSystem(), filename, 32);
// verify
verifyFileExists(testRouter.getFileSystem(), filename);
// rename
boolean exceptionThrown = false;
try {
DFSClient client = testRouter.getClient();
ClientProtocol clientProtocol = client.getNamenode();
clientProtocol.rename2(filename, renamedFile, new Options.Rename[] {});
} catch (Exception ex) {
exceptionThrown = true;
}
assertEquals(exceptionExpected, exceptionThrown);
if (exceptionExpected) {
// Error was expected
FileContext fileContext = testRouter.getFileContext();
assertTrue(fileContext.delete(new Path(filename), true));
} else {
// verify
assertTrue(verifyFileExists(testRouter.getFileSystem(), renamedFile));
// delete
FileContext fileContext = testRouter.getFileContext();
assertTrue(fileContext.delete(new Path(renamedFile), true));
}
}
@Test
public void testProxyRenameFiles() throws IOException, InterruptedException {
Thread.sleep(5000);
List<String> nss = cluster.getNameservices();
String ns0 = nss.get(0);
String ns1 = nss.get(1);
// Rename within the same namespace
// /ns0/testdir/testrename -> /ns0/testdir/testrename-append
String filename =
cluster.getFederatedTestDirectoryForNS(ns0) + "/testrename";
String renamedFile = filename + "-append";
testRename(router, filename, renamedFile, false);
testRename2(router, filename, renamedFile, false);
// Rename a file to a destination that is in a different namespace (fails)
filename = cluster.getFederatedTestDirectoryForNS(ns0) + "/testrename";
renamedFile = cluster.getFederatedTestDirectoryForNS(ns1) + "/testrename";
testRename(router, filename, renamedFile, true);
testRename2(router, filename, renamedFile, true);
}
@Test
public void testProxyChownFiles() throws Exception {
String newUsername = "TestUser";
String newGroup = "TestGroup";
// change owner
routerProtocol.setOwner(routerFile, newUsername, newGroup);
// Verify with NN
FileStatus file = getFileStatus(namenode.getFileSystem(), nnFile);
assertEquals(file.getOwner(), newUsername);
assertEquals(file.getGroup(), newGroup);
// Bad request and validate router response matches NN response.
Method m = ClientProtocol.class.getMethod("setOwner", String.class,
String.class, String.class);
String badPath = "/unknownlocation/unknowndir";
compareResponses(routerProtocol, nnProtocol, m,
new Object[] {badPath, newUsername, newGroup});
}
@Test
public void testProxyGetStats() throws Exception {
// Some of the statistics are out of sync because of the mini cluster
Supplier<Boolean> check = new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
long[] combinedData = routerProtocol.getStats();
long[] individualData = getAggregateStats();
int len = Math.min(combinedData.length, individualData.length);
for (int i = 0; i < len; i++) {
if (combinedData[i] != individualData[i]) {
LOG.error("Stats for {} don't match: {} != {}",
i, combinedData[i], individualData[i]);
return false;
}
}
return true;
} catch (Exception e) {
LOG.error("Cannot get stats: {}", e.getMessage());
return false;
}
}
};
GenericTestUtils.waitFor(check, 500, 5 * 1000);
}
/**
* Get the sum of each subcluster statistics.
* @return Aggregated statistics.
* @throws Exception If it cannot get the stats from the Router or Namenode.
*/
private long[] getAggregateStats() throws Exception {
long[] individualData = new long[10];
for (String nameservice : cluster.getNameservices()) {
NamenodeContext n = cluster.getNamenode(nameservice, null);
DFSClient client = n.getClient();
ClientProtocol clientProtocol = client.getNamenode();
long[] data = clientProtocol.getStats();
for (int i = 0; i < data.length; i++) {
individualData[i] += data[i];
}
}
return individualData;
}
@Test
public void testProxyGetDatanodeReport() throws Exception {
DatanodeInfo[] combinedData =
routerProtocol.getDatanodeReport(DatanodeReportType.ALL);
assertEquals(0, routerProtocol.getSlowDatanodeReport().length);
final Map<Integer, String> routerDNMap = new TreeMap<>();
for (DatanodeInfo dn : combinedData) {
String subcluster = dn.getNetworkLocation().split("/")[1];
routerDNMap.put(dn.getXferPort(), subcluster);
}
final Map<Integer, String> nnDNMap = new TreeMap<>();
for (String nameservice : cluster.getNameservices()) {
NamenodeContext n = cluster.getNamenode(nameservice, null);
DFSClient client = n.getClient();
ClientProtocol clientProtocol = client.getNamenode();
DatanodeInfo[] data =
clientProtocol.getDatanodeReport(DatanodeReportType.ALL);
for (int i = 0; i < data.length; i++) {
// Collect unique DNs based on their xfer port
DatanodeInfo info = data[i];
nnDNMap.put(info.getXferPort(), nameservice);
}
}
assertEquals(nnDNMap, routerDNMap);
}
@Test
public void testProxyGetDatanodeStorageReport()
throws IOException, InterruptedException, URISyntaxException {
DatanodeStorageReport[] combinedData =
routerProtocol.getDatanodeStorageReport(DatanodeReportType.ALL);
Set<String> individualData = new HashSet<>();
for (String nameservice : cluster.getNameservices()) {
NamenodeContext n = cluster.getNamenode(nameservice, null);
DFSClient client = n.getClient();
ClientProtocol clientProtocol = client.getNamenode();
DatanodeStorageReport[] data =
clientProtocol.getDatanodeStorageReport(DatanodeReportType.ALL);
for (DatanodeStorageReport report : data) {
// Determine unique DN instances
DatanodeInfo dn = report.getDatanodeInfo();
individualData.add(dn.toString());
}
}
assertEquals(combinedData.length, individualData.size());
}
@Test
public void testProxyMkdir() throws Exception {
// Check the initial folders
FileStatus[] filesInitial = routerFS.listStatus(new Path("/"));
// Create a directory via the router at the root level
String dirPath = "/testdir";
FsPermission permission = new FsPermission("705");
routerProtocol.mkdirs(dirPath, permission, false);
// Verify the root listing has the item via the router
FileStatus[] files = routerFS.listStatus(new Path("/"));
assertEquals(filesInitial.length + 1, files.length, Arrays.toString(files) + " should be " +
Arrays.toString(filesInitial) + " + " + dirPath);
assertTrue(verifyFileExists(routerFS, dirPath));
// Verify the directory is present in only 1 Namenode
int foundCount = 0;
for (NamenodeContext n : cluster.getNamenodes()) {
if (verifyFileExists(n.getFileSystem(), dirPath)) {
foundCount++;
}
}
assertEquals(1, foundCount);
assertTrue(deleteFile(routerFS, dirPath));
// Validate router failure response matches NN failure response.
Method m = ClientProtocol.class.getMethod("mkdirs", String.class,
FsPermission.class, boolean.class);
String badPath = "/unknownlocation/unknowndir";
compareResponses(routerProtocol, nnProtocol, m,
new Object[] {badPath, permission, false});
}
@Test
public void testProxyChmodFiles() throws Exception {
FsPermission permission = new FsPermission("444");
// change permissions
routerProtocol.setPermission(routerFile, permission);
// Validate permissions NN
FileStatus file = getFileStatus(namenode.getFileSystem(), nnFile);
assertEquals(permission, file.getPermission());
// Validate router failure response matches NN failure response.
Method m = ClientProtocol.class.getMethod(
"setPermission", String.class, FsPermission.class);
String badPath = "/unknownlocation/unknowndir";
compareResponses(routerProtocol, nnProtocol, m,
new Object[] {badPath, permission});
}
@Test
public void testProxySetReplication() throws Exception {
// Check current replication via NN
FileStatus file = getFileStatus(nnFS, nnFile);
assertEquals(1, file.getReplication());
// increment replication via router
routerProtocol.setReplication(routerFile, (short) 2);
// Verify via NN
file = getFileStatus(nnFS, nnFile);
assertEquals(2, file.getReplication());
// Validate router failure response matches NN failure response.
Method m = ClientProtocol.class.getMethod(
"setReplication", String.class, short.class);
String badPath = "/unknownlocation/unknowndir";
compareResponses(routerProtocol, nnProtocol, m,
new Object[] {badPath, (short) 2});
}
@Test
public void testProxyTruncateFile() throws Exception {
// Check file size via NN
FileStatus file = getFileStatus(nnFS, nnFile);
assertTrue(file.getLen() > 0);
// Truncate to 0 bytes via router
routerProtocol.truncate(routerFile, 0, "testclient");
// Verify via NN
file = getFileStatus(nnFS, nnFile);
assertEquals(0, file.getLen());
// Validate router failure response matches NN failure response.
Method m = ClientProtocol.class.getMethod(
"truncate", String.class, long.class, String.class);
String badPath = "/unknownlocation/unknowndir";
compareResponses(routerProtocol, nnProtocol, m,
new Object[] {badPath, (long) 0, "testclient"});
}
@Test
public void testAllowDisallowSnapshots() throws Exception {
// Create a directory via the router at the root level
String dirPath = "/testdir";
String filePath1 = "/sample";
FsPermission permission = new FsPermission("705");
routerProtocol.mkdirs(dirPath, permission, false);
createFile(routerFS, filePath1, 32);
// Check that initially doesn't allow snapshots
NamenodeContext nnContext = cluster.getNamenodes().get(0);
NameNode nn = nnContext.getNamenode();
FSNamesystem fsn = NameNodeAdapter.getNamesystem(nn);
FSDirectory fsdir = fsn.getFSDirectory();
INodeDirectory dirNode = fsdir.getINode4Write(dirPath).asDirectory();
assertFalse(dirNode.isSnapshottable());
// Allow snapshots and verify the folder allows them
routerProtocol.allowSnapshot("/testdir");
dirNode = fsdir.getINode4Write(dirPath).asDirectory();
assertTrue(dirNode.isSnapshottable());
// Disallow snapshot on dir and verify does not allow snapshots anymore
routerProtocol.disallowSnapshot("/testdir");
dirNode = fsdir.getINode4Write(dirPath).asDirectory();
assertFalse(dirNode.isSnapshottable());
// Cleanup
routerProtocol.delete(dirPath, true);
}
@Test
public void testManageSnapshot() throws Exception {
final String mountPoint = "/mntsnapshot";
final String snapshotFolder = mountPoint + "/folder";
LOG.info("Setup a mount point for snapshots: {}", mountPoint);
Router r = router.getRouter();
MockResolver resolver = (MockResolver) r.getSubclusterResolver();
String ns0 = cluster.getNameservices().get(0);
resolver.addLocation(mountPoint, ns0, "/");
FsPermission permission = new FsPermission("777");
routerProtocol.mkdirs(snapshotFolder, permission, false);
try {
for (int i = 1; i <= 9; i++) {
String folderPath = snapshotFolder + "/subfolder" + i;
routerProtocol.mkdirs(folderPath, permission, false);
}
LOG.info("Create the snapshot: {}", snapshotFolder);
routerProtocol.allowSnapshot(snapshotFolder);
String snapshotName =
routerProtocol.createSnapshot(snapshotFolder, "snap");
assertEquals(snapshotFolder + "/.snapshot/snap", snapshotName);
assertTrue(
verifyFileExists(routerFS, snapshotFolder + "/.snapshot/snap"));
LOG.info("Rename the snapshot and check it changed");
routerProtocol.renameSnapshot(snapshotFolder, "snap", "newsnap");
assertFalse(
verifyFileExists(routerFS, snapshotFolder + "/.snapshot/snap"));
assertTrue(
verifyFileExists(routerFS, snapshotFolder + "/.snapshot/newsnap"));
LambdaTestUtils.intercept(SnapshotException.class,
"Cannot delete snapshot snap from path " + snapshotFolder + ":",
() -> routerFS.deleteSnapshot(new Path(snapshotFolder), "snap"));
LOG.info("Delete the snapshot and check it is not there");
routerProtocol.deleteSnapshot(snapshotFolder, "newsnap");
assertFalse(
verifyFileExists(routerFS, snapshotFolder + "/.snapshot/newsnap"));
} finally {
// Cleanup
assertTrue(routerProtocol.delete(snapshotFolder, true));
assertTrue(resolver.removeLocation(mountPoint, ns0, "/"));
}
}
@Test
public void testGetSnapshotListing() throws IOException {
// Create a directory via the router and allow snapshots
final String snapshotPath = "/testGetSnapshotListing";
final String childDir = snapshotPath + "/subdir";
FsPermission permission = new FsPermission("705");
routerProtocol.mkdirs(snapshotPath, permission, false);
routerProtocol.allowSnapshot(snapshotPath);
// Create two snapshots
final String snapshot1 = "snap1";
final String snapshot2 = "snap2";
routerProtocol.createSnapshot(snapshotPath, snapshot1);
routerProtocol.mkdirs(childDir, permission, false);
routerProtocol.createSnapshot(snapshotPath, snapshot2);
// Check for listing through the Router
SnapshottableDirectoryStatus[] dirList =
routerProtocol.getSnapshottableDirListing();
assertEquals(1, dirList.length);
SnapshottableDirectoryStatus snapshotDir0 = dirList[0];
assertEquals(snapshotPath, snapshotDir0.getFullPath().toString());
// check for snapshot listing through the Router
SnapshotStatus[] snapshots = routerProtocol.
getSnapshotListing(snapshotPath);
assertEquals(2, snapshots.length);
assertEquals(SnapshotTestHelper.getSnapshotRoot(
new Path(snapshotPath), snapshot1),
snapshots[0].getFullPath());
assertEquals(SnapshotTestHelper.getSnapshotRoot(
new Path(snapshotPath), snapshot2),
snapshots[1].getFullPath());
// Check for difference report in two snapshot
SnapshotDiffReport diffReport = routerProtocol.getSnapshotDiffReport(
snapshotPath, snapshot1, snapshot2);
assertEquals(2, diffReport.getDiffList().size());
// Check for difference in two snapshot
byte[] startPath = {};
SnapshotDiffReportListing diffReportListing =
routerProtocol.getSnapshotDiffReportListing(
snapshotPath, snapshot1, snapshot2, startPath, -1);
assertEquals(1, diffReportListing.getModifyList().size());
assertEquals(1, diffReportListing.getCreateList().size());
// Cleanup
routerProtocol.deleteSnapshot(snapshotPath, snapshot1);
routerProtocol.deleteSnapshot(snapshotPath, snapshot2);
routerProtocol.disallowSnapshot(snapshotPath);
}
@Test
public void testProxyGetBlockLocations() throws Exception {
// Fetch block locations via router
LocatedBlocks locations =
routerProtocol.getBlockLocations(routerFile, 0, 1024);
assertEquals(1, locations.getLocatedBlocks().size());
// Validate router failure response matches NN failure response.
Method m = ClientProtocol.class.getMethod(
"getBlockLocations", String.class, long.class, long.class);
String badPath = "/unknownlocation/unknowndir";
compareResponses(routerProtocol, nnProtocol,
m, new Object[] {badPath, (long) 0, (long) 0});
}
@Test
public void testProxyStoragePolicy() throws Exception {
// Query initial policy via NN
HdfsFileStatus status = namenode.getClient().getFileInfo(nnFile);
// Set a random policy via router
BlockStoragePolicy[] policies = namenode.getClient().getStoragePolicies();
BlockStoragePolicy policy = policies[0];
while (policy.isCopyOnCreateFile()) {
// Pick a non copy on create policy
Random rand = new Random();
int randIndex = rand.nextInt(policies.length);
policy = policies[randIndex];
}
routerProtocol.setStoragePolicy(routerFile, policy.getName());
// Verify policy via NN
HdfsFileStatus newStatus = namenode.getClient().getFileInfo(nnFile);
assertTrue(newStatus.getStoragePolicy() == policy.getId());
assertTrue(newStatus.getStoragePolicy() != status.getStoragePolicy());
// Validate router failure response matches NN failure response.
Method m = ClientProtocol.class.getMethod("setStoragePolicy", String.class,
String.class);
String badPath = "/unknownlocation/unknowndir";
compareResponses(routerProtocol, nnProtocol,
m, new Object[] {badPath, "badpolicy"});
}
@Test
public void testProxyGetAndUnsetStoragePolicy() throws Exception {
String file = "/testGetStoragePolicy";
String nnFilePath = cluster.getNamenodeTestDirectoryForNS(ns) + file;
String routerFilePath = cluster.getFederatedTestDirectoryForNS(ns) + file;
createFile(routerFS, routerFilePath, 32);
// Get storage policy via router
BlockStoragePolicy policy = routerProtocol.getStoragePolicy(routerFilePath);
// Verify default policy is HOT
assertEquals(HdfsConstants.HOT_STORAGE_POLICY_NAME, policy.getName());
assertEquals(HdfsConstants.HOT_STORAGE_POLICY_ID, policy.getId());
// Get storage policies via router
BlockStoragePolicy[] policies = routerProtocol.getStoragePolicies();
BlockStoragePolicy[] nnPolicies = namenode.getClient().getStoragePolicies();
// Verify policie returned by router is same as policies returned by NN
assertArrayEquals(nnPolicies, policies);
BlockStoragePolicy newPolicy = policies[0];
while (newPolicy.isCopyOnCreateFile()) {
// Pick a non copy on create policy. Beacuse if copyOnCreateFile is set
// then the policy cannot be changed after file creation.
Random rand = new Random();
int randIndex = rand.nextInt(policies.length);
newPolicy = policies[randIndex];
}
routerProtocol.setStoragePolicy(routerFilePath, newPolicy.getName());
// Get storage policy via router
policy = routerProtocol.getStoragePolicy(routerFilePath);
// Verify default policy
assertEquals(newPolicy.getName(), policy.getName());
assertEquals(newPolicy.getId(), policy.getId());
// Verify policy via NN
BlockStoragePolicy nnPolicy =
namenode.getClient().getStoragePolicy(nnFilePath);
assertEquals(nnPolicy.getName(), policy.getName());
assertEquals(nnPolicy.getId(), policy.getId());
// Unset storage policy via router
routerProtocol.unsetStoragePolicy(routerFilePath);
// Get storage policy
policy = routerProtocol.getStoragePolicy(routerFilePath);
assertEquals(HdfsConstants.HOT_STORAGE_POLICY_NAME, policy.getName());
assertEquals(HdfsConstants.HOT_STORAGE_POLICY_ID, policy.getId());
// Verify policy via NN
nnPolicy = namenode.getClient().getStoragePolicy(nnFilePath);
assertEquals(nnPolicy.getName(), policy.getName());
assertEquals(nnPolicy.getId(), policy.getId());
}
@Test
public void testListStoragePolicies() throws IOException, URISyntaxException {
MockResolver resolver =
(MockResolver) router.getRouter().getSubclusterResolver();
try {
// Check with default namespace specified.
BlockStoragePolicy[] policies = namenode.getClient().getStoragePolicies();
assertArrayEquals(policies, routerProtocol.getStoragePolicies());
// Check with default namespace unspecified.
resolver.setDisableNamespace(true);
assertArrayEquals(policies, routerProtocol.getStoragePolicies());
} finally {
resolver.setDisableNamespace(false);
}
}
@Test
public void testGetServerDefaults() throws IOException, URISyntaxException {
MockResolver resolver =
(MockResolver) router.getRouter().getSubclusterResolver();
try {
// Check with default namespace specified.
FsServerDefaults defaults = namenode.getClient().getServerDefaults();
assertEquals(defaults.getBlockSize(),
routerProtocol.getServerDefaults().getBlockSize());
// Check with default namespace unspecified.
resolver.setDisableNamespace(true);
assertEquals(defaults.getBlockSize(),
routerProtocol.getServerDefaults().getBlockSize());
} finally {
resolver.setDisableNamespace(false);
}
}
@Test
public void testProxyGetPreferedBlockSize() throws Exception {
// Query via NN and Router and verify
long namenodeSize = nnProtocol.getPreferredBlockSize(nnFile);
long routerSize = routerProtocol.getPreferredBlockSize(routerFile);
assertEquals(routerSize, namenodeSize);
// Validate router failure response matches NN failure response.
Method m = ClientProtocol.class.getMethod(
"getPreferredBlockSize", String.class);
String badPath = "/unknownlocation/unknowndir";
compareResponses(
routerProtocol, nnProtocol, m, new Object[] {badPath});
}
private void testConcat(
String source, String target, boolean failureExpected, boolean verfiyException, String msg) {
boolean failure = false;
try {
// Concat test file with fill block length file via router
routerProtocol.concat(target, new String[] {source});
} catch (IOException ex) {
failure = true;
if (verfiyException) {
assertExceptionContains(msg, ex);
}
}
assertEquals(failureExpected, failure);
}
private void testConcat(
String source, String target, boolean failureExpected) {
boolean failure = false;
try {
// Concat test file with fill block length file via router
routerProtocol.concat(target, new String[] {source});
} catch (IOException ex) {
failure = true;
}
assertEquals(failureExpected, failure);
}
@Test
public void testProxyConcatFile() throws Exception {
// Create a stub file in the primary ns
String sameNameservice = ns;
String existingFile =
cluster.getFederatedTestDirectoryForNS(sameNameservice) +
"_concatfile";
int existingFileSize = 32;
createFile(routerFS, existingFile, existingFileSize);
// Identify an alternate nameservice that doesn't match the existing file
String alternateNameservice = null;
for (String n : cluster.getNameservices()) {
if (!n.equals(sameNameservice)) {
alternateNameservice = n;
break;
}
}
// Create new files, must be a full block to use concat. One file is in the
// same namespace as the target file, the other is in a different namespace.
String altRouterFile =
cluster.getFederatedTestDirectoryForNS(alternateNameservice) +
"_newfile";
String sameRouterFile =
cluster.getFederatedTestDirectoryForNS(sameNameservice) +
"_newfile";
createFile(routerFS, altRouterFile, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
createFile(routerFS, sameRouterFile, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
// Concat in different namespaces, fails
testConcat(existingFile, altRouterFile, true);
// Concat in same namespaces, succeeds
testConcat(existingFile, sameRouterFile, false);
// Check target file length
FileStatus status = getFileStatus(routerFS, sameRouterFile);
assertEquals(
existingFileSize + DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT,
status.getLen());
// Validate router failure response matches NN failure response.
Method m = ClientProtocol.class.getMethod(
"concat", String.class, String[].class);
String badPath = "/unknownlocation/unknowndir";
compareResponses(routerProtocol, nnProtocol, m,
new Object[] {badPath, new String[] {routerFile}});
// Test when concat trg is a empty file
createFile(routerFS, existingFile, existingFileSize);
String sameRouterEmptyFile =
cluster.getFederatedTestDirectoryForNS(sameNameservice) +
"_newemptyfile";
createFile(routerFS, sameRouterEmptyFile, 0);
// Concat in same namespaces, succeeds
testConcat(existingFile, sameRouterEmptyFile, false);
FileStatus mergedStatus = getFileStatus(routerFS, sameRouterEmptyFile);
assertEquals(existingFileSize, mergedStatus.getLen());
// Test when concat srclist has some empty file, namenode will throw IOException.
String srcEmptyFile = cluster.getFederatedTestDirectoryForNS(sameNameservice) + "_srcEmptyFile";
createFile(routerFS, srcEmptyFile, 0);
String targetFile = cluster.getFederatedTestDirectoryForNS(sameNameservice) + "_targetFile";
createFile(routerFS, targetFile, existingFileSize);
// Concat in same namespaces, succeeds
testConcat(srcEmptyFile, targetFile, true, true,
"org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.HadoopIllegalArgumentException): "
+ "concat: source file " + srcEmptyFile + " is invalid or empty or underConstruction");
}
@Test
public void testProxyAppend() throws Exception {
// Append a test string via router
EnumSet<CreateFlag> createFlag = EnumSet.of(CreateFlag.APPEND);
DFSClient routerClient = getRouterContext().getClient();
HdfsDataOutputStream stream =
routerClient.append(routerFile, 1024, createFlag, null, null);
stream.writeBytes(TEST_STRING);
stream.close();
// Verify file size via NN
FileStatus status = getFileStatus(nnFS, nnFile);
assertTrue(status.getLen() > TEST_STRING.length());
// Validate router failure response matches NN failure response.
Method m = ClientProtocol.class.getMethod("append", String.class,
String.class, EnumSetWritable.class);
String badPath = "/unknownlocation/unknowndir";
EnumSetWritable<CreateFlag> createFlagWritable =
new EnumSetWritable<CreateFlag>(createFlag);
compareResponses(routerProtocol, nnProtocol, m,
new Object[] {badPath, "testClient", createFlagWritable});
}
@Test
public void testProxyGetAdditionalDatanode()
throws IOException, InterruptedException, URISyntaxException {
// Use primitive APIs to open a file, add a block, and get datanode location
EnumSet<CreateFlag> createFlag = EnumSet.of(CreateFlag.CREATE);
String clientName = getRouterContext().getClient().getClientName();
String newRouterFile = routerFile + "_additionalDatanode";
HdfsFileStatus status = routerProtocol.create(
newRouterFile, new FsPermission("777"), clientName,
new EnumSetWritable<CreateFlag>(createFlag), true, (short) 1,
(long) 1024, CryptoProtocolVersion.supported(), null, null);
// Add a block via router (requires client to have same lease)
LocatedBlock block = routerProtocol.addBlock(
newRouterFile, clientName, null, null,
status.getFileId(), null, null);
DatanodeInfo[] exclusions = DatanodeInfo.EMPTY_ARRAY;
LocatedBlock newBlock = routerProtocol.getAdditionalDatanode(
newRouterFile, status.getFileId(), block.getBlock(),
block.getLocations(), block.getStorageIDs(), exclusions, 1, clientName);
assertNotNull(newBlock);
}
@Test
public void testProxyCreateFileAlternateUser()
throws IOException, URISyntaxException, InterruptedException {
// Create via Router
String routerDir = cluster.getFederatedTestDirectoryForNS(ns);
String namenodeDir = cluster.getNamenodeTestDirectoryForNS(ns);
String newRouterFile = routerDir + "/unknownuser";
String newNamenodeFile = namenodeDir + "/unknownuser";
String username = "unknownuser";
// Allow all user access to dir
namenode.getFileContext().setPermission(
new Path(namenodeDir), new FsPermission("777"));
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(username);
DFSClient client = getRouterContext().getClient(ugi);
client.create(newRouterFile, true);
// Fetch via NN and check user
FileStatus status = getFileStatus(nnFS, newNamenodeFile);
assertEquals(status.getOwner(), username);
}
@Test
public void testProxyGetFileInfoAcessException() throws IOException {
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("unknownuser");
// List files from the NN and trap the exception
Exception nnFailure = null;
try {
String testFile = cluster.getNamenodeTestFileForNS(ns);
namenode.getClient(ugi).getLocatedBlocks(testFile, 0);
} catch (Exception e) {
nnFailure = e;
}
assertNotNull(nnFailure);
// List files from the router and trap the exception
Exception routerFailure = null;
try {
String testFile = cluster.getFederatedTestFileForNS(ns);
getRouterContext().getClient(ugi).getLocatedBlocks(testFile, 0);
} catch (Exception e) {
routerFailure = e;
}
assertNotNull(routerFailure);
assertEquals(routerFailure.getClass(), nnFailure.getClass());
}
@Test
public void testProxyVersionRequest() throws Exception {
MockResolver resolver =
(MockResolver) router.getRouter().getSubclusterResolver();
try {
// Check with default namespace specified.
NamespaceInfo rVersion = routerNamenodeProtocol.versionRequest();
NamespaceInfo nnVersion = nnNamenodeProtocol.versionRequest();
NamespaceInfo nnVersion1 = nnNamenodeProtocol1.versionRequest();
compareVersion(rVersion, nnVersion);
// Check with default namespace unspecified.
resolver.setDisableNamespace(true);
// Verify the NamespaceInfo is of nn0 or nn1
boolean isNN0 =
rVersion.getBlockPoolID().equals(nnVersion.getBlockPoolID());
compareVersion(rVersion, isNN0 ? nnVersion : nnVersion1);
} finally {
resolver.setDisableNamespace(false);
}
}
private void compareVersion(NamespaceInfo rVersion, NamespaceInfo nnVersion) {
assertEquals(nnVersion.getBlockPoolID(), rVersion.getBlockPoolID());
assertEquals(nnVersion.getNamespaceID(), rVersion.getNamespaceID());
assertEquals(nnVersion.getClusterID(), rVersion.getClusterID());
assertEquals(nnVersion.getLayoutVersion(), rVersion.getLayoutVersion());
assertEquals(nnVersion.getCTime(), rVersion.getCTime());
}
@Test
public void testProxyGetBlockKeys() throws Exception {
MockResolver resolver =
(MockResolver) router.getRouter().getSubclusterResolver();
try {
// Check with default namespace specified.
ExportedBlockKeys rKeys = routerNamenodeProtocol.getBlockKeys();
ExportedBlockKeys nnKeys = nnNamenodeProtocol.getBlockKeys();
compareBlockKeys(rKeys, nnKeys);
// Check with default namespace unspecified.
resolver.setDisableNamespace(true);
rKeys = routerNamenodeProtocol.getBlockKeys();
compareBlockKeys(rKeys, nnKeys);
} finally {
resolver.setDisableNamespace(false);
}
}
private void compareBlockKeys(ExportedBlockKeys rKeys,
ExportedBlockKeys nnKeys) {
assertEquals(nnKeys.getCurrentKey(), rKeys.getCurrentKey());
assertEquals(nnKeys.getKeyUpdateInterval(), rKeys.getKeyUpdateInterval());
assertEquals(nnKeys.getTokenLifetime(), rKeys.getTokenLifetime());
}
@Test
public void testProxyGetBlocks() throws Exception {
// Get datanodes
DatanodeInfo[] dns =
routerProtocol.getDatanodeReport(DatanodeReportType.ALL);
DatanodeInfo dn0 = dns[0];
// Verify that checking that datanode works
BlocksWithLocations routerBlockLocations =
routerNamenodeProtocol.getBlocks(dn0, 1024, 0, 0,
null);
BlocksWithLocations nnBlockLocations =
nnNamenodeProtocol.getBlocks(dn0, 1024, 0, 0,
null);
BlockWithLocations[] routerBlocks = routerBlockLocations.getBlocks();
BlockWithLocations[] nnBlocks = nnBlockLocations.getBlocks();
assertEquals(nnBlocks.length, routerBlocks.length);
for (int i = 0; i < routerBlocks.length; i++) {
assertEquals(
nnBlocks[i].getBlock().getBlockId(),
routerBlocks[i].getBlock().getBlockId());
}
}
@Test
public void testProxyGetTransactionID() throws IOException {
MockResolver resolver =
(MockResolver) router.getRouter().getSubclusterResolver();
try {
// Check with default namespace specified.
long routerTransactionID = routerNamenodeProtocol.getTransactionID();
long nnTransactionID = nnNamenodeProtocol.getTransactionID();
long nnTransactionID1 = nnNamenodeProtocol1.getTransactionID();
assertEquals(nnTransactionID, routerTransactionID);
// Check with default namespace unspecified.
resolver.setDisableNamespace(true);
// Verify the transaction ID is of nn0 or nn1
routerTransactionID = routerNamenodeProtocol.getTransactionID();
assertThat(routerTransactionID).isIn(nnTransactionID, nnTransactionID1);
} finally {
resolver.setDisableNamespace(false);
}
}
@Test
public void testProxyGetMostRecentCheckpointTxId() throws IOException {
MockResolver resolver =
(MockResolver) router.getRouter().getSubclusterResolver();
try {
// Check with default namespace specified.
long routerCheckPointId =
routerNamenodeProtocol.getMostRecentCheckpointTxId();
long nnCheckPointId = nnNamenodeProtocol.getMostRecentCheckpointTxId();
assertEquals(nnCheckPointId, routerCheckPointId);
// Check with default namespace unspecified.
resolver.setDisableNamespace(true);
routerCheckPointId = routerNamenodeProtocol.getMostRecentCheckpointTxId();
} finally {
resolver.setDisableNamespace(false);
}
}
@Test
public void testProxySetSafemode() throws Exception {
boolean routerSafemode =
routerProtocol.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, false);
boolean nnSafemode =
nnProtocol.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, false);
assertEquals(nnSafemode, routerSafemode);
routerSafemode =
routerProtocol.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, true);
nnSafemode =
nnProtocol.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, true);
assertEquals(nnSafemode, routerSafemode);
assertFalse(routerProtocol.setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_GET, false));
assertTrue(routerProtocol.setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false));
assertTrue(routerProtocol.setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_GET, false));
assertFalse(routerProtocol.setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false));
assertFalse(routerProtocol.setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_GET, false));
}
@Test
public void testProxyRestoreFailedStorage() throws Exception {
boolean routerSuccess = routerProtocol.restoreFailedStorage("check");
boolean nnSuccess = nnProtocol.restoreFailedStorage("check");
assertEquals(nnSuccess, routerSuccess);
}
private void testRenewLeaseInternal(DistributedFileSystem dfs,
FederationRPCMetrics rpcMetrics, Path testPath, boolean createFlag)
throws Exception {
FSDataOutputStream outputStream = null;
try {
if (createFlag) {
outputStream = dfs.create(testPath);
} else {
outputStream = dfs.append(testPath);
}
outputStream.write("hello world. \n".getBytes());
long proxyOpBeforeRenewLease = rpcMetrics.getProxyOps();
assertTrue(dfs.getClient().renewLease());
long proxyOpAfterRenewLease = rpcMetrics.getProxyOps();
assertEquals((proxyOpBeforeRenewLease + 1), proxyOpAfterRenewLease);
} finally {
if (outputStream != null) {
outputStream.close();
}
}
}
@Test
public void testRenewLeaseForECFile() throws Exception {
String ecName = "RS-6-3-1024k";
FederationRPCMetrics metrics = router.getRouterRpcServer().getRPCMetrics();
// Install a mount point to a different path to check
MockResolver resolver =
(MockResolver)router.getRouter().getSubclusterResolver();
String ns0 = cluster.getNameservices().get(0);
resolver.addLocation("/testRenewLease0", ns0, "/testRenewLease0");
// Stop LeaseRenewer
DistributedFileSystem routerDFS = (DistributedFileSystem) routerFS;
routerDFS.getClient().getLeaseRenewer().interruptAndJoin();
Path testECPath = new Path("/testRenewLease0/ecDirectory/test_ec.txt");
routerDFS.mkdirs(testECPath.getParent());
routerDFS.setErasureCodingPolicy(
testECPath.getParent(), ecName);
testRenewLeaseInternal(routerDFS, metrics, testECPath, true);
ErasureCodingPolicy ecPolicy = routerDFS.getErasureCodingPolicy(testECPath);
assertNotNull(ecPolicy);
assertEquals(ecName, ecPolicy.getName());
}
@Test
public void testRenewLeaseForReplicaFile() throws Exception {
FederationRPCMetrics metrics = router.getRouterRpcServer().getRPCMetrics();
// Install a mount point to a different path to check
MockResolver resolver =
(MockResolver)router.getRouter().getSubclusterResolver();
String ns0 = cluster.getNameservices().get(0);
resolver.addLocation("/testRenewLease0", ns0, "/testRenewLease0");
// Stop LeaseRenewer
DistributedFileSystem routerDFS = (DistributedFileSystem) routerFS;
routerDFS.getClient().getLeaseRenewer().interruptAndJoin();
// Test Replica File
Path testPath = new Path("/testRenewLease0/test_replica.txt");
testRenewLeaseInternal(routerDFS, metrics, testPath, true);
testRenewLeaseInternal(routerDFS, metrics, testPath, false);
}
@Test
public void testRenewLeaseWithMultiStream() throws Exception {
FederationRPCMetrics metrics = router.getRouterRpcServer().getRPCMetrics();
// Install a mount point to a different path to check
MockResolver resolver =
(MockResolver)router.getRouter().getSubclusterResolver();
String ns0 = cluster.getNameservices().get(0);
String ns1 = cluster.getNameservices().get(1);
resolver.addLocation("/testRenewLease0", ns0, "/testRenewLease0");
resolver.addLocation("/testRenewLease1", ns1, "/testRenewLease1");
// Stop LeaseRenewer
DistributedFileSystem routerDFS = (DistributedFileSystem) routerFS;
routerDFS.getClient().getLeaseRenewer().interruptAndJoin();
Path newTestPath0 = new Path("/testRenewLease0/test1.txt");
Path newTestPath1 = new Path("/testRenewLease1/test1.txt");
try (FSDataOutputStream outStream1 = routerDFS.create(newTestPath0);
FSDataOutputStream outStream2 = routerDFS.create(newTestPath1)) {
outStream1.write("hello world \n".getBytes());
outStream2.write("hello world \n".getBytes());
long proxyOpBeforeRenewLease2 = metrics.getProxyOps();
assertTrue(routerDFS.getClient().renewLease());
long proxyOpAfterRenewLease2 = metrics.getProxyOps();
assertEquals((proxyOpBeforeRenewLease2 + 2), proxyOpAfterRenewLease2);
}
}
@Test
public void testMkdirWithDisableNameService() throws Exception {
MockResolver resolver = (MockResolver)router.getRouter().getSubclusterResolver();
String ns0 = cluster.getNameservices().get(0);
resolver.addLocation("/mnt", ns0, "/");
MockResolver activeNamenodeResolver = (MockResolver)router.getRouter().getNamenodeResolver();
activeNamenodeResolver.disableNamespace(ns0);
try {
FsPermission permission = new FsPermission("777");
RouterRpcServer rpcServer = router.getRouter().getRpcServer();
LambdaTestUtils.intercept(NoLocationException.class,
() -> rpcServer.mkdirs("/mnt/folder0/folder1", permission, true));
} finally {
activeNamenodeResolver.clearDisableNamespaces();
}
}
@Test
public void testProxyExceptionMessages() throws IOException {
// Install a mount point to a different path to check
MockResolver resolver =
(MockResolver)router.getRouter().getSubclusterResolver();
String ns0 = cluster.getNameservices().get(0);
resolver.addLocation("/mnt", ns0, "/");
try {
FsPermission permission = new FsPermission("777");
routerProtocol.mkdirs("/mnt/folder0/folder1", permission, false);
fail("mkdirs for non-existing parent folder should have failed");
} catch (IOException ioe) {
assertExceptionContains("/mnt/folder0", ioe,
"Wrong path in exception for mkdirs");
}
try {
FsPermission permission = new FsPermission("777");
routerProtocol.setPermission("/mnt/testfile.txt", permission);
fail("setPermission for non-existing file should have failed");
} catch (IOException ioe) {
assertExceptionContains("/mnt/testfile.txt", ioe,
"Wrong path in exception for setPermission");
}
try {
FsPermission permission = new FsPermission("777");
routerProtocol.mkdirs("/mnt/folder0/folder1", permission, false);
routerProtocol.delete("/mnt/folder0", false);
fail("delete for non-existing file should have failed");
} catch (IOException ioe) {
assertExceptionContains("/mnt/folder0", ioe,
"Wrong path in exception for delete");
}
resolver.cleanRegistrations();
// Check corner cases
assertEquals(
"Parent directory doesn't exist: /ns1/a/a/b",
RouterRpcClient.processExceptionMsg(
"Parent directory doesn't exist: /a/a/b", "/a", "/ns1/a"));
}
/**
* Create a file for each NameSpace, then find their 1st block and mark one of
* the replica as corrupt through BlockManager#findAndMarkBlockAsCorrupt.
*
* After all NameNode received the corrupt replica report, the
* replicatedBlockStats.getCorruptBlocks() should equal to the sum of
* corruptBlocks of all NameSpaces.
*/
@Test
public void testGetReplicatedBlockStats() throws Exception {
String testFile = "/test-file";
for (String nsid : cluster.getNameservices()) {
NamenodeContext context = cluster.getNamenode(nsid, null);
NameNode nameNode = context.getNamenode();
FSNamesystem namesystem = nameNode.getNamesystem();
BlockManager bm = namesystem.getBlockManager();
FileSystem fileSystem = context.getFileSystem();
// create a test file
createFile(fileSystem, testFile, 1024);
// mark a replica as corrupt
LocatedBlock block = NameNodeAdapter
.getBlockLocations(nameNode, testFile, 0, 1024).get(0);
namesystem.writeLock(RwLockMode.BM);
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
"STORAGE_ID", "TEST");
namesystem.writeUnlock(RwLockMode.BM, "findAndMarkBlockAsCorrupt");
BlockManagerTestUtil.updateState(bm);
DFSTestUtil.waitCorruptReplicas(fileSystem, namesystem,
new Path(testFile), block.getBlock(), 1);
// save the getReplicatedBlockStats result
ReplicatedBlockStats stats =
context.getClient().getNamenode().getReplicatedBlockStats();
assertEquals(1, stats.getCorruptBlocks());
}
ReplicatedBlockStats routerStat = routerProtocol.getReplicatedBlockStats();
assertEquals(cluster.getNameservices().size(), routerStat.getCorruptBlocks(),
"There should be 1 corrupt blocks for each NN");
}
@Test
public void testErasureCoding() throws Exception {
LOG.info("List the available erasurce coding policies");
ErasureCodingPolicyInfo[] policies = checkErasureCodingPolicies();
for (ErasureCodingPolicyInfo policy : policies) {
LOG.info(" {}", policy);
}
LOG.info("List the erasure coding codecs");
Map<String, String> codecsRouter = routerProtocol.getErasureCodingCodecs();
Map<String, String> codecsNamenode = nnProtocol.getErasureCodingCodecs();
assertTrue(Maps.difference(codecsRouter, codecsNamenode).areEqual());
for (Entry<String, String> entry : codecsRouter.entrySet()) {
LOG.info(" {}: {}", entry.getKey(), entry.getValue());
}
LOG.info("Create a testing directory via the router at the root level");
String dirPath = "/testec";
String filePath1 = dirPath + "/testfile1";
FsPermission permission = new FsPermission("755");
routerProtocol.mkdirs(dirPath, permission, false);
createFile(routerFS, filePath1, 32);
assertTrue(verifyFileExists(routerFS, filePath1));
DFSClient file1Protocol = getFileDFSClient(filePath1);
LOG.info("The policy for the new file should not be set");
assertNull(routerProtocol.getErasureCodingPolicy(filePath1));
assertNull(file1Protocol.getErasureCodingPolicy(filePath1));
String policyName = "RS-6-3-1024k";
LOG.info("Set policy \"{}\" for \"{}\"", policyName, dirPath);
routerProtocol.setErasureCodingPolicy(dirPath, policyName);
String filePath2 = dirPath + "/testfile2";
LOG.info("Create {} in the path with the new EC policy", filePath2);
createFile(routerFS, filePath2, 32);
assertTrue(verifyFileExists(routerFS, filePath2));
DFSClient file2Protocol = getFileDFSClient(filePath2);
LOG.info("Check that the policy is set for {}", filePath2);
ErasureCodingPolicy policyRouter1 =
routerProtocol.getErasureCodingPolicy(filePath2);
ErasureCodingPolicy policyNamenode1 =
file2Protocol.getErasureCodingPolicy(filePath2);
assertNotNull(policyRouter1);
assertEquals(policyName, policyRouter1.getName());
assertEquals(policyName, policyNamenode1.getName());
LOG.info("Create a new erasure coding policy");
String newPolicyName = "RS-6-3-128k";
ECSchema ecSchema = new ECSchema(ErasureCodeConstants.RS_CODEC_NAME, 6, 3);
ErasureCodingPolicy ecPolicy = new ErasureCodingPolicy(
newPolicyName,
ecSchema,
128 * 1024,
(byte) -1);
ErasureCodingPolicy[] newPolicies = new ErasureCodingPolicy[] {
ecPolicy
};
AddErasureCodingPolicyResponse[] responses =
routerProtocol.addErasureCodingPolicies(newPolicies);
assertEquals(1, responses.length);
assertTrue(responses[0].isSucceed());
routerProtocol.disableErasureCodingPolicy(newPolicyName);
LOG.info("The new policy should be there and disabled");
policies = checkErasureCodingPolicies();
boolean found = false;
for (ErasureCodingPolicyInfo policy : policies) {
LOG.info(" {}" + policy);
if (policy.getPolicy().getName().equals(newPolicyName)) {
found = true;
assertEquals(ErasureCodingPolicyState.DISABLED, policy.getState());
break;
}
}
assertTrue(found);
LOG.info("Set the test folder to use the new policy");
routerProtocol.enableErasureCodingPolicy(newPolicyName);
routerProtocol.setErasureCodingPolicy(dirPath, newPolicyName);
LOG.info("Create a file in the path with the new EC policy");
String filePath3 = dirPath + "/testfile3";
createFile(routerFS, filePath3, 32);
assertTrue(verifyFileExists(routerFS, filePath3));
DFSClient file3Protocol = getFileDFSClient(filePath3);
ErasureCodingPolicy policyRouterFile3 =
routerProtocol.getErasureCodingPolicy(filePath3);
assertEquals(newPolicyName, policyRouterFile3.getName());
ErasureCodingPolicy policyNamenodeFile3 =
file3Protocol.getErasureCodingPolicy(filePath3);
assertEquals(newPolicyName, policyNamenodeFile3.getName());
LOG.info("Remove the policy and check the one for the test folder");
routerProtocol.removeErasureCodingPolicy(newPolicyName);
ErasureCodingPolicy policyRouter3 =
routerProtocol.getErasureCodingPolicy(filePath3);
assertEquals(newPolicyName, policyRouter3.getName());
ErasureCodingPolicy policyNamenode3 =
file3Protocol.getErasureCodingPolicy(filePath3);
assertEquals(newPolicyName, policyNamenode3.getName());
LOG.info("Check the stats");
ECBlockGroupStats statsRouter = routerProtocol.getECBlockGroupStats();
ECBlockGroupStats statsNamenode = getNamenodeECBlockGroupStats();
assertEquals(statsNamenode, statsRouter);
}
/**
* Get the EC stats from all namenodes and aggregate them.
* @return Aggregated EC stats from all namenodes.
* @throws Exception If we cannot get the stats.
*/
private ECBlockGroupStats getNamenodeECBlockGroupStats() throws Exception {
List<ECBlockGroupStats> nnStats = new ArrayList<>();
for (NamenodeContext nnContext : cluster.getNamenodes()) {
ClientProtocol cp = nnContext.getClient().getNamenode();
nnStats.add(cp.getECBlockGroupStats());
}
return ECBlockGroupStats.merge(nnStats);
}
@Test
public void testGetCurrentTXIDandRollEdits() throws IOException {
Long rollEdits = routerProtocol.rollEdits();
Long currentTXID = routerProtocol.getCurrentEditLogTxid();
assertEquals(rollEdits, currentTXID);
}
@Test
public void testSaveNamespace() throws IOException {
cluster.getCluster().getFileSystem(0)
.setSafeMode(SafeModeAction.ENTER);
cluster.getCluster().getFileSystem(1)
.setSafeMode(SafeModeAction.ENTER);
Boolean saveNamespace = routerProtocol.saveNamespace(0, 0);
assertTrue(saveNamespace);
cluster.getCluster().getFileSystem(0)
.setSafeMode(SafeModeAction.LEAVE);
cluster.getCluster().getFileSystem(1)
.setSafeMode(SafeModeAction.LEAVE);
}
/*
* This case is used to test NameNodeMetrics on 2 purposes:
* 1. NameNodeMetrics should be cached, since the cost of gathering the
* metrics is expensive
* 2. Metrics cache should updated regularly
* 3. Without any subcluster available, we should return an empty list
*/
@Test
public void testNamenodeMetrics() throws Exception {
final NamenodeBeanMetrics metrics =
router.getRouter().getNamenodeMetrics();
final String jsonString0 = metrics.getLiveNodes();
// We should have the nodes in all the subclusters
JSONObject jsonObject = new JSONObject(jsonString0);
assertEquals(NUM_SUBCLUSTERS * NUM_DNS, jsonObject.names().length());
JSONObject jsonObjectNn =
new JSONObject(cluster.getRandomNamenode().getNamenode().getNamesystem().getLiveNodes());
// DN report by NN and router should be the same
String randomDn = (String) jsonObjectNn.names().get(0);
JSONObject randomReportNn = jsonObjectNn.getJSONObject(randomDn);
JSONObject randomReportRouter = jsonObject.getJSONObject(randomDn);
JSONArray keys = randomReportNn.names();
for (int i = 0; i < keys.length(); i++) {
String key = keys.getString(i);
// Skip the 2 keys that always return -1
if (key.equals("blockScheduled") || key.equals("volfails")) {
continue;
}
assertEquals(randomReportRouter.get(key), randomReportNn.get(key));
}
// We should be caching this information
String jsonString1 = metrics.getLiveNodes();
assertEquals(jsonString0, jsonString1);
// We wait until the cached value is updated
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return !jsonString0.equals(metrics.getLiveNodes());
}
}, 500, 5 * 1000);
// The cache should be updated now
final String jsonString2 = metrics.getLiveNodes();
assertNotEquals(jsonString0, jsonString2);
// Without any subcluster available, we should return an empty list
MockResolver resolver =
(MockResolver) router.getRouter().getNamenodeResolver();
resolver.cleanRegistrations();
resolver.setDisableRegistration(true);
try {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return !jsonString2.equals(metrics.getLiveNodes());
}
}, 500, 5 * 1000);
assertEquals("{}", metrics.getLiveNodes());
} finally {
// Reset the registrations again
resolver.setDisableRegistration(false);
cluster.registerNamenodes();
cluster.waitNamenodeRegistration();
}
}
@Test
public void testRBFMetricsMethodsRelayOnStateStore() {
assertNull(router.getRouter().getStateStore());
RBFMetrics metrics = router.getRouter().getMetrics();
assertEquals("{}", metrics.getNamenodes());
assertEquals("[]", metrics.getMountTable());
assertEquals("{}", metrics.getRouters());
assertEquals(0, metrics.getNumNamenodes());
assertEquals(0, metrics.getNumExpiredNamenodes());
// These 2 methods relays on {@link RBFMetrics#getNamespaceInfo()}
assertEquals("[]", metrics.getClusterId());
assertEquals("[]", metrics.getBlockPoolId());
// These methods relays on
// {@link RBFMetrics#getActiveNamenodeRegistration()}
assertEquals("{}", metrics.getNameservices());
assertEquals(0, metrics.getNumLiveNodes());
}
@Test
public void testNamenodeMetricsEnteringMaintenanceNodes() throws IOException {
final NamenodeBeanMetrics metrics =
router.getRouter().getNamenodeMetrics();
assertEquals("{}", metrics.getEnteringMaintenanceNodes());
}
@Test
public void testCacheAdmin() throws Exception {
DistributedFileSystem routerDFS = (DistributedFileSystem) routerFS;
// Verify cache directive commands.
CachePoolInfo cpInfo = new CachePoolInfo("Check");
cpInfo.setOwnerName("Owner");
// Add a cache pool.
routerProtocol.addCachePool(cpInfo);
RemoteIterator<CachePoolEntry> iter = routerDFS.listCachePools();
assertTrue(iter.hasNext());
// Modify a cache pool.
CachePoolInfo info = iter.next().getInfo();
assertEquals("Owner", info.getOwnerName());
cpInfo.setOwnerName("new Owner");
routerProtocol.modifyCachePool(cpInfo);
iter = routerDFS.listCachePools();
assertTrue(iter.hasNext());
info = iter.next().getInfo();
assertEquals("new Owner", info.getOwnerName());
// Remove a cache pool.
routerProtocol.removeCachePool("Check");
iter = routerDFS.listCachePools();
assertFalse(iter.hasNext());
// Verify cache directive commands.
cpInfo.setOwnerName("Owner");
routerProtocol.addCachePool(cpInfo);
routerDFS.mkdirs(new Path("/ns1/dir"));
// Add a cache directive.
CacheDirectiveInfo cacheDir = new CacheDirectiveInfo.Builder()
.setPath(new Path("/ns1/dir"))
.setReplication((short) 1)
.setPool("Check")
.build();
long id = routerDFS.addCacheDirective(cacheDir);
CacheDirectiveInfo filter =
new CacheDirectiveInfo.Builder().setPath(new Path("/ns1/dir")).build();
assertTrue(routerDFS.listCacheDirectives(filter).hasNext());
// List cache directive.
assertEquals("Check",
routerDFS.listCacheDirectives(filter).next().getInfo().getPool());
cacheDir = new CacheDirectiveInfo.Builder().setReplication((short) 2)
.setId(id).setPath(new Path("/ns1/dir")).build();
// Modify cache directive.
routerDFS.modifyCacheDirective(cacheDir);
assertEquals((short) 2, (short) routerDFS.listCacheDirectives(filter).next()
.getInfo().getReplication());
routerDFS.removeCacheDirective(id);
assertFalse(routerDFS.listCacheDirectives(filter).hasNext());
}
@Test
public void testgetGroupsForUser() throws Exception {
String[] group = new String[] {"bar", "group2"};
UserGroupInformation.createUserForTesting("user",
new String[] {"bar", "group2"});
String[] result =
router.getRouter().getRpcServer().getGroupsForUser("user");
assertArrayEquals(group, result);
}
@Test
public void testGetCachedDatanodeReport() throws Exception {
RouterRpcServer rpcServer = router.getRouter().getRpcServer();
final DatanodeInfo[] datanodeReport =
rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE);
// We should have 12 nodes in total
assertEquals(12, datanodeReport.length);
// We should be caching this information
DatanodeInfo[] datanodeReport1 =
rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE);
assertArrayEquals(datanodeReport1, datanodeReport);
// Stop one datanode
MiniDFSCluster miniDFSCluster = getCluster().getCluster();
DataNodeProperties dnprop = miniDFSCluster.stopDataNode(0);
// We wait until the cached value is updated
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
DatanodeInfo[] dn = null;
try {
dn = rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE);
} catch (IOException ex) {
LOG.error("Error on getCachedDatanodeReport");
}
return !Arrays.equals(datanodeReport, dn);
}
}, 500, 5 * 1000);
// The cache should be updated now
final DatanodeInfo[] datanodeReport2 =
rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE);
assertEquals(datanodeReport.length - 1, datanodeReport2.length);
// Restart the DN we just stopped
miniDFSCluster.restartDataNode(dnprop);
miniDFSCluster.waitActive();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
DatanodeInfo[] dn = null;
try {
dn = rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE);
} catch (IOException ex) {
LOG.error("Error on getCachedDatanodeReport");
}
return datanodeReport.length == dn.length;
}
}, 100, 10 * 1000);
// The cache should be updated now
final DatanodeInfo[] datanodeReport3 =
rpcServer.getCachedDatanodeReport(DatanodeReportType.LIVE);
assertEquals(datanodeReport.length, datanodeReport3.length);
}
/**
* Check the erasure coding policies in the Router and the Namenode.
* @return The erasure coding policies.
*/
private ErasureCodingPolicyInfo[] checkErasureCodingPolicies()
throws IOException {
ErasureCodingPolicyInfo[] policiesRouter =
routerProtocol.getErasureCodingPolicies();
assertNotNull(policiesRouter);
ErasureCodingPolicyInfo[] policiesNamenode =
nnProtocol.getErasureCodingPolicies();
Arrays.sort(policiesRouter, EC_POLICY_CMP);
Arrays.sort(policiesNamenode, EC_POLICY_CMP);
assertArrayEquals(policiesRouter, policiesNamenode);
return policiesRouter;
}
/**
* Find the Namenode for a particular file and return the DFSClient.
* @param path Path of the file to check.
* @return The DFSClient to the Namenode holding the file.
*/
private DFSClient getFileDFSClient(final String path) {
for (String nsId : cluster.getNameservices()) {
LOG.info("Checking {} for {}", nsId, path);
NamenodeContext nn = cluster.getNamenode(nsId, null);
try {
DFSClient nnClientProtocol = nn.getClient();
if (nnClientProtocol.getFileInfo(path) != null) {
return nnClientProtocol;
}
} catch (Exception ignore) {
// ignore
}
}
return null;
}
@Test
public void testMkdirsWithCallerContext() throws IOException {
GenericTestUtils.LogCapturer auditlog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// Current callerContext is null
assertNull(CallerContext.getCurrent());
// Set client context
CallerContext.setCurrent(
new CallerContext.Builder("clientContext").build());
// Create a directory via the router
String dirPath = "/test_dir_with_callercontext";
FsPermission permission = new FsPermission("755");
routerProtocol.mkdirs(dirPath, permission, false);
// The audit log should contains "callerContext=clientIp:...,clientContext"
final String logOutput = auditlog.getOutput();
assertTrue(logOutput.contains("clientIp:"));
assertTrue(logOutput.contains("clientContext"));
assertTrue(logOutput.contains("clientId"));
assertTrue(logOutput.contains("clientCallId"));
assertTrue(verifyFileExists(routerFS, dirPath));
}
@Test
public void testRealUserPropagationInCallerContext()
throws IOException, InterruptedException {
GenericTestUtils.LogCapturer auditlog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// Current callerContext is null
assertNull(CallerContext.getCurrent());
UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
UserGroupInformation realUser = UserGroupInformation
.createUserForTesting("testRealUser", new String[]{"group"});
UserGroupInformation proxyUser = UserGroupInformation
.createProxyUser("testProxyUser", realUser);
FileSystem proxyFs = proxyUser.doAs(
(PrivilegedExceptionAction<FileSystem>) () -> router.getFileSystem());
proxyFs.listStatus(new Path("/"));
final String logOutput = auditlog.getOutput();
// Login user, which is used as the router's user, is different from the realUser.
assertNotEquals(loginUser.getUserName(), realUser.getUserName());
// Login user is used in the audit log's ugi field.
assertTrue(
logOutput.contains(String.format("ugi=%s (auth:PROXY) via %s (auth:SIMPLE)",
proxyUser.getUserName(),
loginUser.getUserName())), "The login user is the proxyUser in the UGI field");
// Real user is added to the caller context.
assertTrue(logOutput.contains(String.format("realUser:%s", realUser.getUserName())),
"The audit log should contain the real user.");
assertTrue(logOutput.contains(PROXY_USER_PORT),
"The audit log should contain the proxyuser port.");
}
@Test
public void testSetBalancerBandwidth() throws Exception {
long defaultBandwidth =
DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT;
long newBandwidth = defaultBandwidth * 2;
routerProtocol.setBalancerBandwidth(newBandwidth);
ArrayList<DataNode> datanodes = cluster.getCluster().getDataNodes();
GenericTestUtils.waitFor(() -> {
return datanodes.get(0).getBalancerBandwidth() == newBandwidth;
}, 100, 60 * 1000);
}
@Test
public void testAddClientIpPortToCallerContext() throws IOException {
GenericTestUtils.LogCapturer auditLog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// 1. ClientIp and ClientPort are not set on the client.
// Set client context.
CallerContext.setCurrent(
new CallerContext.Builder("clientContext").build());
// Create a directory via the router.
String dirPath = "/test";
routerProtocol.mkdirs(dirPath, new FsPermission("755"), false);
// The audit log should contains "clientIp:" and "clientPort:".
assertTrue(auditLog.getOutput().contains("clientIp:"));
assertTrue(auditLog.getOutput().contains("clientPort:"));
assertTrue(verifyFileExists(routerFS, dirPath));
auditLog.clearOutput();
// 2. ClientIp and ClientPort are set on the client.
// Reset client context.
CallerContext.setCurrent(
new CallerContext.Builder(
"clientContext,clientIp:1.1.1.1,clientPort:1234").build());
// Create a directory via the router.
routerProtocol.getFileInfo(dirPath);
// The audit log should not contain the original clientIp and clientPort
// set by client.
assertFalse(auditLog.getOutput().contains("clientIp:1.1.1.1"));
assertFalse(auditLog.getOutput().contains("clientPort:1234"));
}
@Test
public void testAddClientIdAndCallIdToCallerContext() throws IOException {
GenericTestUtils.LogCapturer auditLog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
// 1. ClientId and ClientCallId are not set on the client.
// Set client context.
CallerContext.setCurrent(
new CallerContext.Builder("clientContext").build());
// Create a directory via the router.
String dirPath = "/test";
routerProtocol.mkdirs(dirPath, new FsPermission("755"), false);
// The audit log should contains "clientId:" and "clientCallId:".
assertTrue(auditLog.getOutput().contains("clientId:"));
assertTrue(auditLog.getOutput().contains("clientCallId:"));
assertTrue(verifyFileExists(routerFS, dirPath));
auditLog.clearOutput();
// 2. ClientId and ClientCallId are set on the client.
// Reset client context.
CallerContext.setCurrent(
new CallerContext.Builder(
"clientContext,clientId:mockClientId,clientCallId:4321").build());
// Create a directory via the router.
routerProtocol.getFileInfo(dirPath);
// The audit log should not contain the original clientId and clientCallId
// set by client.
assertFalse(auditLog.getOutput().contains("clientId:mockClientId"));
assertFalse(auditLog.getOutput().contains("clientCallId:4321"));
}
@Test
public void testContentSummaryWithSnapshot() throws Exception {
DistributedFileSystem routerDFS = (DistributedFileSystem) routerFS;
Path dirPath = new Path("/testdir");
Path subdirPath = new Path(dirPath, "subdir");
Path filePath1 = new Path(dirPath, "file");
Path filePath2 = new Path(subdirPath, "file2");
// Create directories.
routerDFS.mkdirs(dirPath);
routerDFS.mkdirs(subdirPath);
// Create files.
createFile(routerDFS, filePath1.toString(), 32);
createFile(routerDFS, filePath2.toString(), 16);
// Allow & Create snapshot.
routerDFS.allowSnapshot(dirPath);
routerDFS.createSnapshot(dirPath, "s1");
try {
// Check content summary, snapshot count should be 0
ContentSummary contentSummary = routerDFS.getContentSummary(dirPath);
assertEquals(0, contentSummary.getSnapshotDirectoryCount());
assertEquals(0, contentSummary.getSnapshotFileCount());
// Delete the file & subdir(Total 2 files deleted & 1 directory)
routerDFS.delete(filePath1, true);
routerDFS.delete(subdirPath, true);
// Get the Content Summary
contentSummary = routerDFS.getContentSummary(dirPath);
assertEquals(1, contentSummary.getSnapshotDirectoryCount());
assertEquals(2, contentSummary.getSnapshotFileCount());
} finally {
// Cleanup
routerDFS.deleteSnapshot(dirPath, "s1");
routerDFS.disallowSnapshot(dirPath);
routerDFS.delete(dirPath, true);
}
}
@Test
public void testDisableNodeUsageInRBFMetrics() throws JSONException {
RBFMetrics rbfMetrics = router.getRouter().getMetrics();
FederationRPCMetrics federationRPCMetrics = router.getRouter().getRpcServer().getRPCMetrics();
long proxyOpBefore = federationRPCMetrics.getProxyOps();
String nodeUsageEnable = router.getRouter().getMetrics().getNodeUsage();
assertNotNull(nodeUsageEnable);
long proxyOpAfterWithEnable = federationRPCMetrics.getProxyOps();
assertEquals(proxyOpBefore + 2, proxyOpAfterWithEnable);
rbfMetrics.setEnableGetDNUsage(false);
String nodeUsageDisable = rbfMetrics.getNodeUsage();
assertNotNull(nodeUsageDisable);
long proxyOpAfterWithDisable = federationRPCMetrics.getProxyOps();
assertEquals(proxyOpAfterWithEnable, proxyOpAfterWithDisable);
JSONObject jsonObject = new JSONObject(nodeUsageDisable);
JSONObject json = jsonObject.getJSONObject("nodeUsage");
assertEquals("0.00%", json.get("min"));
assertEquals("0.00%", json.get("median"));
assertEquals("0.00%", json.get("max"));
assertEquals("0.00%", json.get("stdDev"));
rbfMetrics.setEnableGetDNUsage(true);
String nodeUsageWithReEnable = rbfMetrics.getNodeUsage();
assertNotNull(nodeUsageWithReEnable);
long proxyOpAfterWithReEnable = federationRPCMetrics.getProxyOps();
assertEquals(proxyOpAfterWithDisable + 2, proxyOpAfterWithReEnable);
}
@Test
public void testGetListingOrder() throws Exception {
String ns1 = getCluster().getNameservices().get(1);
String destBasePath = cluster.getNamenodeTestDirectoryForNS(ns1);
final String testPath1 = destBasePath + "/ßtestGetListingOrder";
final String testPath2 = destBasePath + "/%testGetListingOrder";
final FileSystem fileSystem1 = getCluster().
getNamenode(ns1, null).getFileSystem();
try {
// Create the test file in ns1.
createFile(fileSystem1, testPath1, 32);
createFile(fileSystem1, testPath2, 32);
NamenodeContext nn = cluster.getNamenode(ns1, null);
FileStatus[] fileStatuses =
nn.getFileSystem().listStatus(new Path(destBasePath));
List<String> requiredPaths = Arrays.stream(fileStatuses)
.map(fileStatus -> fileStatus.getPath().getName())
.collect(Collectors.toList());
Iterator<String> requiredPathsIterator = requiredPaths.iterator();
// Fetch listing.
DirectoryListing listing =
routerProtocol.getListing(cluster.getFederatedTestDirectoryForNS(ns1),
HdfsFileStatus.EMPTY_NAME, false);
assertEquals(requiredPaths.size(), listing.getPartialListing().length);
// Match each path returned and verify order returned.
for (HdfsFileStatus f : listing.getPartialListing()) {
String fileName = requiredPathsIterator.next();
String currentFile = f.getFullPath(new Path("/")).getName();
assertEquals(currentFile, fileName);
}
} finally {
fileSystem1.delete(new Path(testPath1), true);
fileSystem1.delete(new Path(testPath2), true);
}
}
@Test
public void testCallerContextNotResetByAsyncHandler() throws IOException {
GenericTestUtils.LogCapturer auditLog =
GenericTestUtils.LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
String dirPath = "/test";
// The reason we start this child thread is that CallContext use InheritableThreadLocal.
SubjectInheritingThread t1 = new SubjectInheritingThread(() -> {
// Set flag async:true.
CallerContext.setCurrent(
new CallerContext.Builder("async:true").build());
// Issue some RPCs via the router to populate the CallerContext of async handler thread.
for (int i = 0; i < 10; i++) {
try {
routerProtocol.mkdirs(dirPath, new FsPermission("755"), false);
assertTrue(verifyFileExists(routerFS, dirPath));
routerProtocol.delete(dirPath, true);
assertFalse(verifyFileExists(routerFS, dirPath));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
// The audit log should contains async:true.
assertTrue(auditLog.getOutput().contains("async:true"));
auditLog.clearOutput();
assertFalse(auditLog.getOutput().contains("async:true"));
});
t1.start();
try {
t1.join();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
routerProtocol.getFileInfo(dirPath);
// The audit log should not contain async:true.
assertFalse(auditLog.getOutput().contains("async:true"));
}
@Test
public void testConcurrentCallExecutorInitial() {
assertNotNull(router.getRouterRpcClient().getExecutorService());
}
}
|
to
|
java
|
quarkusio__quarkus
|
extensions/oidc-token-propagation/runtime/src/main/java/io/quarkus/oidc/token/propagation/runtime/OidcTokenPropagationConfig.java
|
{
"start": 401,
"end": 2628
}
|
interface ____ {
/**
* Enable either AccessTokenRequestFilter or JsonWebTokenRequestFilter for all the injected MP RestClient implementations.
*
* AccessTokenRequestFilter can propagate both opaque (binary) and JsonWebToken tokens but it can not modify
* and secure the updated JsonWebToken tokens.
* JsonWebTokenRequestFilter can only propagate JsonWebToken tokens but it can also modify and secure them again.
* Enable the 'jsonWebToken' property to have JsonWebTokenRequestFilter registered.
*
* Alternatively, instead of using this property for registering these filters with all the injected MP RestClient
* implementations, both filters can be registered as MP RestClient providers with the specific MP RestClient
* implementations.
*/
@WithDefault("false")
boolean registerFilter();
/**
* Enable JsonWebTokenRequestFilter instead of AccessTokenRequestFilter for all the injected MP RestClient implementations.
* This filter can propagate as well as modify and secure the updated JsonWebToken tokens.
*
* Note this property is ignored unless the 'registerFilter' property is enabled.
*/
@WithDefault("false")
boolean jsonWebToken();
/**
* Secure the injected and possibly modified JsonWebToken.
* For example, a JsonWebToken produced and signed by OpenId Connect provider can be re-signed using a new private key.
*
* Note this property is injected into JsonWebTokenRequestFilter.
*/
@WithDefault("false")
boolean secureJsonWebToken();
/**
* Exchange the current token with OpenId Connect Provider for a new token using either
* "urn:ietf:params:oauth:grant-type:token-exchange" or "urn:ietf:params:oauth:grant-type:jwt-bearer" token grant
* before propagating it.
*
* Note this property is injected into AccessTokenRequestFilter.
*/
@WithDefault("false")
boolean exchangeToken();
/**
* Name of the configured OidcClient.
*
* Note this property is injected into AccessTokenRequestFilter and is only used if the `exchangeToken` property is enabled.
*/
Optional<String> clientName();
}
|
OidcTokenPropagationConfig
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/test/java/org/springframework/web/reactive/result/condition/CompositeRequestConditionTests.java
|
{
"start": 1215,
"end": 5240
}
|
class ____ {
private ParamsRequestCondition param1;
private ParamsRequestCondition param2;
private ParamsRequestCondition param3;
private HeadersRequestCondition header1;
private HeadersRequestCondition header2;
private HeadersRequestCondition header3;
@BeforeEach
void setup() {
this.param1 = new ParamsRequestCondition("param1");
this.param2 = new ParamsRequestCondition("param2");
this.param3 = this.param1.combine(this.param2);
this.header1 = new HeadersRequestCondition("header1");
this.header2 = new HeadersRequestCondition("header2");
this.header3 = this.header1.combine(this.header2);
}
@Test
void combine() {
CompositeRequestCondition cond1 = new CompositeRequestCondition(this.param1, this.header1);
CompositeRequestCondition cond2 = new CompositeRequestCondition(this.param2, this.header2);
CompositeRequestCondition cond3 = new CompositeRequestCondition(this.param3, this.header3);
assertThat(cond1.combine(cond2)).isEqualTo(cond3);
}
@Test
void combineEmpty() {
CompositeRequestCondition empty = new CompositeRequestCondition();
CompositeRequestCondition notEmpty = new CompositeRequestCondition(this.param1);
assertThat(empty.combine(empty)).isSameAs(empty);
assertThat(notEmpty.combine(empty)).isSameAs(notEmpty);
assertThat(empty.combine(notEmpty)).isSameAs(notEmpty);
}
@Test
void combineDifferentLength() {
CompositeRequestCondition cond1 = new CompositeRequestCondition(this.param1);
CompositeRequestCondition cond2 = new CompositeRequestCondition(this.param1, this.header1);
assertThatIllegalArgumentException().isThrownBy(() ->
cond1.combine(cond2));
}
@Test
void match() {
MockServerHttpRequest request = MockServerHttpRequest.get("/path?param1=paramValue1").build();
MockServerWebExchange exchange = MockServerWebExchange.from(request);
RequestCondition<?> condition1 = new RequestMethodsRequestCondition(RequestMethod.GET, RequestMethod.POST);
RequestCondition<?> condition2 = new RequestMethodsRequestCondition(RequestMethod.GET);
CompositeRequestCondition composite1 = new CompositeRequestCondition(this.param1, condition1);
CompositeRequestCondition composite2 = new CompositeRequestCondition(this.param1, condition2);
assertThat(composite1.getMatchingCondition(exchange)).isEqualTo(composite2);
}
@Test
void noMatch() {
CompositeRequestCondition cond = new CompositeRequestCondition(this.param1);
assertThat(cond.getMatchingCondition(MockServerWebExchange.from(MockServerHttpRequest.get("/")))).isNull();
}
@Test
void matchEmpty() {
CompositeRequestCondition empty = new CompositeRequestCondition();
assertThat(empty.getMatchingCondition(MockServerWebExchange.from(MockServerHttpRequest.get("/")))).isSameAs(empty);
}
@Test
void compare() {
CompositeRequestCondition cond1 = new CompositeRequestCondition(this.param1);
CompositeRequestCondition cond3 = new CompositeRequestCondition(this.param3);
MockServerWebExchange exchange = MockServerWebExchange.from(MockServerHttpRequest.get("/"));
assertThat(cond1.compareTo(cond3, exchange)).isEqualTo(1);
assertThat(cond3.compareTo(cond1, exchange)).isEqualTo(-1);
}
@Test
void compareEmpty() {
CompositeRequestCondition empty = new CompositeRequestCondition();
CompositeRequestCondition notEmpty = new CompositeRequestCondition(this.param1);
MockServerWebExchange exchange = MockServerWebExchange.from(MockServerHttpRequest.get("/"));
assertThat(empty.compareTo(empty, exchange)).isEqualTo(0);
assertThat(notEmpty.compareTo(empty, exchange)).isEqualTo(-1);
assertThat(empty.compareTo(notEmpty, exchange)).isEqualTo(1);
}
@Test
void compareDifferentLength() {
CompositeRequestCondition cond1 = new CompositeRequestCondition(this.param1);
CompositeRequestCondition cond2 = new CompositeRequestCondition(this.param1, this.header1);
assertThatIllegalArgumentException().isThrownBy(() ->
cond1.compareTo(cond2, MockServerWebExchange.from(MockServerHttpRequest.get("/"))));
}
}
|
CompositeRequestConditionTests
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/decorators/delegate/DelegateSubtypeTest.java
|
{
"start": 745,
"end": 829
}
|
interface ____ {
int ping();
}
@ApplicationScoped
public static
|
A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.