language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/api/EndOfSegmentEvent.java | {
"start": 1222,
"end": 2160
} | class ____ extends RuntimeEvent {
/** The singleton instance of this event. */
public static final EndOfSegmentEvent INSTANCE = new EndOfSegmentEvent();
private EndOfSegmentEvent() {}
@Override
public void write(DataOutputView out) throws IOException {
throw new UnsupportedOperationException("This method should never be called");
}
@Override
public void read(DataInputView in) throws IOException {
throw new UnsupportedOperationException("This method should never be called");
}
// ------------------------------------------------------------------------
@Override
public int hashCode() {
return 1965146672;
}
@Override
public boolean equals(Object obj) {
return obj != null && obj.getClass() == EndOfSegmentEvent.class;
}
@Override
public String toString() {
return getClass().getSimpleName();
}
}
| EndOfSegmentEvent |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/lazytoone/LanyProxylessManyToOneTests.java | {
"start": 9089,
"end": 9765
} | class ____ {
@Id
private Integer id;
@ManyToOne( fetch = LAZY )
private Customer customer;
private BigDecimal amount;
public Order() {
}
public Order(Integer id, Customer customer, BigDecimal amount) {
this.id = id;
this.customer = customer;
this.amount = amount;
}
public Integer getId() {
return id;
}
private void setId(Integer id) {
this.id = id;
}
public Customer getCustomer() {
return customer;
}
public void setCustomer(Customer customer) {
this.customer = customer;
}
public BigDecimal getAmount() {
return amount;
}
public void setAmount(BigDecimal amount) {
this.amount = amount;
}
}
}
| Order |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ThreadSafeCheckerTest.java | {
"start": 23640,
"end": 24100
} | class ____ extends Super {}
""")
.doTest();
}
// Report errors in compilation order, and detect transitive errors even if immediate
// supertype is unannotated.
@Test
public void transitive() {
compilationHelper
.addSourceLines(
"threadsafety/I.java",
"""
package threadsafety;
import com.google.errorprone.annotations.ThreadSafe;
@ThreadSafe
| Test |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/JmsEndpointBuilderFactory.java | {
"start": 26813,
"end": 28033
} | class ____ is good enough as
* subscription name). Note that shared subscriptions may also be
* durable, so this flag can (and often will) be combined with
* subscriptionDurable as well. Only makes sense when listening to a
* topic (pub-sub domain), therefore this method switches the
* pubSubDomain flag as well. Requires a JMS 2.0 compatible message
* broker.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param subscriptionShared the value to set
* @return the dsl builder
*/
default JmsEndpointConsumerBuilder subscriptionShared(boolean subscriptionShared) {
doSetProperty("subscriptionShared", subscriptionShared);
return this;
}
/**
* Set whether to make the subscription shared. The shared subscription
* name to be used can be specified through the subscriptionName
* property. Default is false. Set this to true to register a shared
* subscription, typically in combination with a subscriptionName value
* (unless your message listener | name |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/TestSQLFederationStateStore.java | {
"start": 5427,
"end": 7507
} | class ____ extends FederationStateStoreBaseTest {
public static final Logger LOG =
LoggerFactory.getLogger(TestSQLFederationStateStore.class);
private static final String HSQLDB_DRIVER = "org.hsqldb.jdbc.JDBCDataSource";
private static final String DATABASE_URL = "jdbc:hsqldb:mem:state";
private static final String DATABASE_USERNAME = "SA";
private static final String DATABASE_PASSWORD = "";
private SQLFederationStateStore sqlFederationStateStore = null;
@Override
protected FederationStateStore createStateStore() {
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.FEDERATION_STATESTORE_SQL_JDBC_CLASS,
HSQLDB_DRIVER);
conf.set(YarnConfiguration.FEDERATION_STATESTORE_SQL_USERNAME,
DATABASE_USERNAME);
conf.set(YarnConfiguration.FEDERATION_STATESTORE_SQL_PASSWORD,
DATABASE_PASSWORD);
conf.set(YarnConfiguration.FEDERATION_STATESTORE_SQL_URL,
DATABASE_URL + System.currentTimeMillis());
conf.setInt(YarnConfiguration.FEDERATION_STATESTORE_MAX_APPLICATIONS, 10);
conf.setInt(YarnConfiguration.FEDERATION_STATESTORE_SQL_MAXCONNECTIONS, 10);
super.setConf(conf);
sqlFederationStateStore = new HSQLDBFederationStateStore();
return sqlFederationStateStore;
}
@Test
public void testSqlConnectionsCreatedCount() throws YarnException {
FederationStateStore stateStore = getStateStore();
SubClusterId subClusterId = SubClusterId.newInstance("SC");
ApplicationId appId = ApplicationId.newInstance(1, 1);
SubClusterInfo subClusterInfo = createSubClusterInfo(subClusterId);
stateStore.registerSubCluster(
SubClusterRegisterRequest.newInstance(subClusterInfo));
assertEquals(subClusterInfo, querySubClusterInfo(subClusterId));
addApplicationHomeSC(appId, subClusterId);
assertEquals(subClusterId, queryApplicationHomeSC(appId));
// Verify if connection is created only once at statestore init
assertEquals(1,
FederationStateStoreClientMetrics.getNumConnections());
}
| TestSQLFederationStateStore |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/event/spi/AbstractPreDatabaseOperationEvent.java | {
"start": 376,
"end": 981
} | class ____ extends AbstractDatabaseOperationEvent {
/**
* Constructs an event containing the pertinent information.
*
* @param source The session from which the event originated.
* @param entity The entity to be involved in the database operation.
* @param id The entity id to be involved in the database operation.
* @param persister The entity's persister.
*/
public AbstractPreDatabaseOperationEvent(
SharedSessionContractImplementor source,
Object entity,
Object id,
EntityPersister persister) {
super( source, entity, id, persister );
}
}
| AbstractPreDatabaseOperationEvent |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/HSClientProtocolPBClientImpl.java | {
"start": 1190,
"end": 1643
} | class ____ extends MRClientProtocolPBClientImpl
implements HSClientProtocol {
public HSClientProtocolPBClientImpl(long clientVersion,
InetSocketAddress addr, Configuration conf) throws IOException {
super();
RPC.setProtocolEngine(conf, HSClientProtocolPB.class,
ProtobufRpcEngine2.class);
proxy = (HSClientProtocolPB)RPC.getProxy(
HSClientProtocolPB.class, clientVersion, addr, conf);
}
} | HSClientProtocolPBClientImpl |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/checkreturnvalue/CanIgnoreReturnValueSuggesterTest.java | {
"start": 1716,
"end": 2180
} | class ____ {
private String name;
@CanIgnoreReturnValue
public Client setName(String name) {
this.name = name;
return this;
}
}
""")
.doTest();
}
@Test
public void b362106953_returnThis() {
helper
.addInputLines(
"Client.java",
"""
package com.google.frobber;
public final | Client |
java | google__guava | android/guava/src/com/google/common/collect/ImmutableRangeSet.java | {
"start": 10879,
"end": 18817
} | class ____<C extends Comparable>
extends ImmutableList<Range<C>> {
private final ImmutableList<Range<C>> ranges;
// True if the "positive" range set is empty or bounded below.
private final boolean positiveBoundedBelow;
// True if the "positive" range set is empty or bounded above.
private final boolean positiveBoundedAbove;
private final int size;
ComplementRanges(ImmutableList<Range<C>> ranges) {
this.ranges = ranges;
this.positiveBoundedBelow = ranges.get(0).hasLowerBound();
this.positiveBoundedAbove = Iterables.getLast(ranges).hasUpperBound();
int size = ranges.size() - 1;
if (positiveBoundedBelow) {
size++;
}
if (positiveBoundedAbove) {
size++;
}
this.size = size;
}
@Override
public int size() {
return size;
}
@Override
public Range<C> get(int index) {
checkElementIndex(index, size);
Cut<C> lowerBound;
if (positiveBoundedBelow) {
lowerBound = (index == 0) ? Cut.belowAll() : ranges.get(index - 1).upperBound;
} else {
lowerBound = ranges.get(index).upperBound;
}
Cut<C> upperBound;
if (positiveBoundedAbove && index == size - 1) {
upperBound = Cut.aboveAll();
} else {
upperBound = ranges.get(index + (positiveBoundedBelow ? 0 : 1)).lowerBound;
}
return Range.create(lowerBound, upperBound);
}
@Override
boolean isPartialView() {
return true;
}
// redeclare to help optimizers with b/310253115
@SuppressWarnings("RedundantOverride")
@Override
@J2ktIncompatible // serialization
Object writeReplace() {
return super.writeReplace();
}
}
@Override
public ImmutableRangeSet<C> complement() {
if (complement != null) {
return complement;
} else if (ranges.isEmpty()) {
return all();
} else if (ranges.size() == 1 && ranges.get(0).equals(Range.all())) {
return of();
} else {
return lazyComplement();
}
}
@LazyInit @RetainedWith private transient @Nullable ImmutableRangeSet<C> lazyComplement;
private ImmutableRangeSet<C> lazyComplement() {
ImmutableRangeSet<C> result = lazyComplement;
return result == null
? lazyComplement =
new ImmutableRangeSet<>(new ComplementRanges<>(ranges), /* complement= */ this)
: result;
}
/**
* Returns a new range set consisting of the union of this range set and {@code other}.
*
* <p>This is essentially the same as {@code TreeRangeSet.create(this).addAll(other)} except it
* returns an {@code ImmutableRangeSet}.
*
* @since 21.0
*/
public ImmutableRangeSet<C> union(RangeSet<C> other) {
return unionOf(Iterables.concat(asRanges(), other.asRanges()));
}
/**
* Returns a new range set consisting of the intersection of this range set and {@code other}.
*
* <p>This is essentially the same as {@code
* TreeRangeSet.create(this).removeAll(other.complement())} except it returns an {@code
* ImmutableRangeSet}.
*
* @since 21.0
*/
public ImmutableRangeSet<C> intersection(RangeSet<C> other) {
RangeSet<C> copy = TreeRangeSet.create(this);
copy.removeAll(other.complement());
return copyOf(copy);
}
/**
* Returns a new range set consisting of the difference of this range set and {@code other}.
*
* <p>This is essentially the same as {@code TreeRangeSet.create(this).removeAll(other)} except it
* returns an {@code ImmutableRangeSet}.
*
* @since 21.0
*/
public ImmutableRangeSet<C> difference(RangeSet<C> other) {
RangeSet<C> copy = TreeRangeSet.create(this);
copy.removeAll(other);
return copyOf(copy);
}
/**
* Returns a list containing the nonempty intersections of {@code range} with the ranges in this
* range set.
*/
private ImmutableList<Range<C>> intersectRanges(Range<C> range) {
if (ranges.isEmpty() || range.isEmpty()) {
return ImmutableList.of();
} else if (range.encloses(span())) {
return ranges;
}
int fromIndex;
if (range.hasLowerBound()) {
fromIndex =
SortedLists.binarySearch(
ranges,
Range::upperBound,
range.lowerBound,
KeyPresentBehavior.FIRST_AFTER,
KeyAbsentBehavior.NEXT_HIGHER);
} else {
fromIndex = 0;
}
int toIndex;
if (range.hasUpperBound()) {
toIndex =
SortedLists.binarySearch(
ranges,
Range::lowerBound,
range.upperBound,
KeyPresentBehavior.FIRST_PRESENT,
KeyAbsentBehavior.NEXT_HIGHER);
} else {
toIndex = ranges.size();
}
int length = toIndex - fromIndex;
if (length == 0) {
return ImmutableList.of();
} else {
return new ImmutableList<Range<C>>() {
@Override
public int size() {
return length;
}
@Override
public Range<C> get(int index) {
checkElementIndex(index, length);
if (index == 0 || index == length - 1) {
return ranges.get(index + fromIndex).intersection(range);
} else {
return ranges.get(index + fromIndex);
}
}
@Override
boolean isPartialView() {
return true;
}
// redeclare to help optimizers with b/310253115
@SuppressWarnings("RedundantOverride")
@Override
@J2ktIncompatible
@GwtIncompatible
Object writeReplace() {
return super.writeReplace();
}
};
}
}
/** Returns a view of the intersection of this range set with the given range. */
@Override
public ImmutableRangeSet<C> subRangeSet(Range<C> range) {
if (!isEmpty()) {
Range<C> span = span();
if (range.encloses(span)) {
return this;
} else if (range.isConnected(span)) {
return new ImmutableRangeSet<>(intersectRanges(range));
}
}
return of();
}
/**
* Returns an {@link ImmutableSortedSet} containing the same values in the given domain
* {@linkplain RangeSet#contains contained} by this range set.
*
* <p><b>Note:</b> {@code a.asSet(d).equals(b.asSet(d))} does not imply {@code a.equals(b)}! For
* example, {@code a} and {@code b} could be {@code [2..4]} and {@code (1..5)}, or the empty
* ranges {@code [3..3)} and {@code [4..4)}.
*
* <p><b>Warning:</b> Be extremely careful what you do with the {@code asSet} view of a large
* range set (such as {@code ImmutableRangeSet.of(Range.greaterThan(0))}). Certain operations on
* such a set can be performed efficiently, but others (such as {@link Set#hashCode} or {@link
* Collections#frequency}) can cause major performance problems.
*
* <p>The returned set's {@link Object#toString} method returns a shorthand form of the set's
* contents, such as {@code "[1..100]}"}.
*
* @throws IllegalArgumentException if neither this range nor the domain has a lower bound, or if
* neither has an upper bound
*/
public ImmutableSortedSet<C> asSet(DiscreteDomain<C> domain) {
checkNotNull(domain);
if (isEmpty()) {
return ImmutableSortedSet.of();
}
Range<C> span = span().canonical(domain);
if (!span.hasLowerBound()) {
// according to the spec of canonical, neither this ImmutableRangeSet nor
// the range have a lower bound
throw new IllegalArgumentException(
"Neither the DiscreteDomain nor this range set are bounded below");
} else if (!span.hasUpperBound()) {
try {
domain.maxValue();
} catch (NoSuchElementException e) {
throw new IllegalArgumentException(
"Neither the DiscreteDomain nor this range set are bounded above");
}
}
return new AsSet(domain);
}
private final | ComplementRanges |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/ser/GenericTypeSerializationTest.java | {
"start": 10313,
"end": 10649
} | class ____ implements Indexed<String> {
final UUID value;
@JsonCreator(mode = JsonCreator.Mode.DELEGATING)
TestIndexed(UUID value) {
this.value = value;
}
@Override
public String index() {
return value.toString();
}
}
public static final | TestIndexed |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java | {
"start": 2580,
"end": 3453
} | class ____ {
public static final Logger LOG =
LoggerFactory.getLogger(BlockScanner.class);
/**
* The DataNode that this scanner is associated with.
*/
private final DataNode datanode;
/**
* Maps Storage IDs to VolumeScanner objects.
*/
private final TreeMap<String, VolumeScanner> scanners =
new TreeMap<String, VolumeScanner>();
/**
* The scanner configuration.
*/
private Conf conf;
/**
* Timeout duration in milliseconds waiting for {@link VolumeScanner} to stop
* inside {@link #removeAllVolumeScanners}.
*/
private long joinVolumeScannersTimeOutMs;
@VisibleForTesting
void setConf(Conf conf) {
this.conf = conf;
for (Entry<String, VolumeScanner> entry : scanners.entrySet()) {
entry.getValue().setConf(conf);
}
}
/**
* The cached scanner configuration.
*/
static | BlockScanner |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/arch/Processor.java | {
"start": 1061,
"end": 1365
} | enum ____ the architecture of
* a microprocessor. The architecture represents the bit value
* of the microprocessor.
* The following architectures are defined:
* <ul>
* <li>32-bit</li>
* <li>64-bit</li>
* <li>Unknown</li>
* </ul>
*/
public | defines |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/LiteProtoToStringTest.java | {
"start": 4075,
"end": 4713
} | class ____ {
static void i(String s) {}
static void d(String s) {}
static void v(String s) {}
}
private void test(GeneratedMessageLite message) {
Log.i(message.toString());
Log.d(message.toString());
Log.v(message.toString());
}
}
""")
.doTest();
}
@Test
public void androidLogAtWarning_error() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import com.google.protobuf.GeneratedMessageLite;
| Log |
java | quarkusio__quarkus | integration-tests/kubernetes/quarkus-standard-way/src/test/java/io/quarkus/it/kubernetes/OpenshiftWithUberJarDeploymentConfigTest.java | {
"start": 792,
"end": 3523
} | class ____ {
@RegisterExtension
static final QuarkusProdModeTest config = new QuarkusProdModeTest()
.withApplicationRoot((jar) -> jar.addClasses(GreetingResource.class))
.setApplicationName("openshift-uberjar")
.setApplicationVersion("0.1-SNAPSHOT")
.setRun(true)
.setLogFileName("k8s.log")
.withConfigurationResource("openshift-with-uberjar.properties")
.overrideConfigKey("quarkus.openshift.deployment-kind", "deployment-config")
.setForcedDependencies(
List.of(
Dependency.of("io.quarkus", "quarkus-openshift", Version.getVersion())));
@ProdBuildResults
private ProdModeTestResults prodModeTestResults;
@LogFile
private Path logfile;
@Test
public void assertApplicationRuns() {
assertThat(logfile).isRegularFile().hasFileName("k8s.log");
TestUtil.assertLogFileContents(logfile, "kubernetes", "uberjar");
given().when().get("/greeting").then().statusCode(200).body(is("hello"));
}
@Test
public void assertGeneratedResources() throws IOException {
Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes");
assertThat(kubernetesDir).isDirectoryContaining(p -> p.getFileName().endsWith("openshift.json"))
.isDirectoryContaining(p -> p.getFileName().endsWith("openshift.yml"));
List<HasMetadata> openshiftList = DeserializationUtil.deserializeAsList(kubernetesDir.resolve("openshift.yml"));
assertThat(openshiftList).filteredOn(h -> "DeploymentConfig".equals(h.getKind())).singleElement().satisfies(h -> {
assertThat(h.getMetadata()).satisfies(m -> {
assertThat(m.getName()).isEqualTo("openshift-uberjar");
});
assertThat(h).extracting("spec").extracting("template").extracting("spec").isInstanceOfSatisfying(PodSpec.class,
podSpec -> {
assertThat(podSpec.getContainers()).singleElement().satisfies(container -> {
assertThat(container.getEnv())
.filteredOn(new Condition<EnvVar>(e -> e.getName().equals("APP_JAVA_LIB"),
"Environment variable JAVA_APP_LIB"))
.hasSize(0);
assertThat(container.getArgs())
.filteredOn(new Condition<String>(e -> e.equals("-cp"), "Classpath arguments"))
.hasSize(0);
});
});
});
}
}
| OpenshiftWithUberJarDeploymentConfigTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java | {
"start": 22771,
"end": 42364
} | class ____
extends LocalWrapperScriptBuilder {
private final String containerIdStr;
/**
* Create an instance for the given container and working directory.
*
* @param containerIdStr the container ID
* @param containerWorkDir the container's working directory
*/
public WindowsLocalWrapperScriptBuilder(String containerIdStr,
Path containerWorkDir) {
super(containerWorkDir);
this.containerIdStr = containerIdStr;
}
@Override
public void writeLocalWrapperScript(Path launchDst, Path pidFile,
PrintStream pout) {
// TODO: exit code script for Windows
// On Windows, the pid is the container ID, so that it can also serve as
// the name of the job object created by winutils for task management.
// Write to temp file followed by atomic move.
String normalizedPidFile = new File(pidFile.toString()).getPath();
pout.println("@echo " + containerIdStr + " > " + normalizedPidFile +
".tmp");
pout.println("@move /Y " + normalizedPidFile + ".tmp " +
normalizedPidFile);
pout.println("@call " + launchDst.toString());
}
}
@Override
public boolean signalContainer(ContainerSignalContext ctx)
throws IOException {
String user = ctx.getUser();
String pid = ctx.getPid();
Signal signal = ctx.getSignal();
LOG.debug("Sending signal {} to pid {} as user {}",
signal.getValue(), pid, user);
if (!containerIsAlive(pid)) {
return false;
}
try {
killContainer(pid, signal);
} catch (IOException e) {
if (!containerIsAlive(pid)) {
return false;
}
throw e;
}
return true;
}
/**
* No-op for reaping containers within the DefaultContainerExecutor.
*
* @param ctx Encapsulates information necessary for reaping containers.
* @return true given no operations are needed.
*/
@Override
public boolean reapContainer(ContainerReapContext ctx) {
return true;
}
@Override
public boolean isContainerAlive(ContainerLivenessContext ctx)
throws IOException {
String pid = ctx.getPid();
return containerIsAlive(pid);
}
/**
* Returns true if the process with the specified pid is alive.
*
* @param pid String pid
* @return boolean true if the process is alive
* @throws IOException if the command to test process liveliness fails
*/
@VisibleForTesting
public static boolean containerIsAlive(String pid) throws IOException {
try {
new ShellCommandExecutor(Shell.getCheckProcessIsAliveCommand(pid))
.execute();
// successful execution means process is alive
return true;
}
catch (ExitCodeException e) {
// failure (non-zero exit code) means process is not alive
return false;
}
}
/**
* Send a specified signal to the specified pid
*
* @param pid the pid of the process [group] to signal.
* @param signal signal to send
* @throws IOException if the command to kill the process fails
*/
protected void killContainer(String pid, Signal signal) throws IOException {
new ShellCommandExecutor(Shell.getSignalKillCommand(signal.getValue(), pid))
.execute();
}
@Override
public void deleteAsUser(DeletionAsUserContext ctx)
throws IOException, InterruptedException {
Path subDir = ctx.getSubDir();
List<Path> baseDirs = ctx.getBasedirs();
if (baseDirs == null || baseDirs.size() == 0) {
LOG.info("Deleting absolute path : {}", subDir);
if (!lfs.delete(subDir, true)) {
//Maybe retry
LOG.warn("delete returned false for path: [{}]", subDir);
}
return;
}
for (Path baseDir : baseDirs) {
Path del = subDir == null ? baseDir : new Path(baseDir, subDir);
LOG.info("Deleting path : {}", del);
try {
if (!lfs.delete(del, true)) {
LOG.warn("delete returned false for path: [{}]", del);
}
} catch (FileNotFoundException e) {
continue;
}
}
}
@Override
public void symLink(String target, String symlink) throws IOException {
FileUtil.symLink(target, symlink);
}
/**
* Permissions for user dir.
* $local.dir/usercache/$user
*/
static final short USER_PERM = (short)0750;
/**
* Permissions for user appcache dir.
* $local.dir/usercache/$user/appcache
*/
static final short APPCACHE_PERM = (short)0710;
/**
* Permissions for user filecache dir.
* $local.dir/usercache/$user/filecache
*/
static final short FILECACHE_PERM = (short)0710;
/**
* Permissions for user app dir.
* $local.dir/usercache/$user/appcache/$appId
*/
static final short APPDIR_PERM = (short)0710;
private long getDiskFreeSpace(Path base) throws IOException {
return lfs.getFsStatus(base).getRemaining();
}
private Path getApplicationDir(Path base, String user, String appId) {
return new Path(getAppcacheDir(base, user), appId);
}
private Path getUserCacheDir(Path base, String user) {
return new Path(new Path(base, ContainerLocalizer.USERCACHE), user);
}
private Path getAppcacheDir(Path base, String user) {
return new Path(getUserCacheDir(base, user),
ContainerLocalizer.APPCACHE);
}
private Path getFileCacheDir(Path base, String user) {
return new Path(getUserCacheDir(base, user),
ContainerLocalizer.FILECACHE);
}
/**
* Return a randomly chosen application directory from a list of local storage
* directories. The probability of selecting a directory is proportional to
* its size.
*
* @param localDirs the target directories from which to select
* @param user the user who owns the application
* @param appId the application ID
* @return the selected directory
* @throws IOException if no application directories for the user can be
* found
*/
protected Path getWorkingDir(List<String> localDirs, String user,
String appId) throws IOException {
long totalAvailable = 0L;
long[] availableOnDisk = new long[localDirs.size()];
int i = 0;
// randomly choose the app directory
// the chance of picking a directory is proportional to
// the available space on the directory.
// firstly calculate the sum of all available space on these directories
for (String localDir : localDirs) {
Path curBase = getApplicationDir(new Path(localDir), user, appId);
long space = 0L;
try {
space = getDiskFreeSpace(curBase);
} catch (IOException e) {
LOG.warn("Unable to get Free Space for {}", curBase, e);
}
availableOnDisk[i++] = space;
totalAvailable += space;
}
// throw an IOException if totalAvailable is 0.
if (totalAvailable <= 0L) {
throw new IOException("Not able to find a working directory for " + user);
}
// make probability to pick a directory proportional to
// the available space on the directory.
long randomPosition = RandomUtils.nextLong() % totalAvailable;
int dir = pickDirectory(randomPosition, availableOnDisk);
return getApplicationDir(new Path(localDirs.get(dir)), user, appId);
}
/**
* Picks a directory based on the input random number and
* available size at each dir.
*/
@Private
@VisibleForTesting
int pickDirectory(long randomPosition, final long[] availableOnDisk) {
int dir = 0;
// skip zero available space directory,
// because totalAvailable is greater than 0 and randomPosition
// is less than totalAvailable, we can find a valid directory
// with nonzero available space.
while (availableOnDisk[dir] == 0L) {
dir++;
}
while (randomPosition >= availableOnDisk[dir]) {
randomPosition -= availableOnDisk[dir++];
}
return dir;
}
/**
* Use the {@link #lfs} {@link FileContext} to create the target directory.
*
* @param dirPath the target directory
* @param perms the target permissions for the target directory
* @param createParent whether the parent directories should also be created
* @param user the user as whom the target directory should be created.
* Used only on secure Windows hosts.
* @throws IOException if there's a failure performing a file operation
* @see WindowsSecureContainerExecutor
*/
protected void createDir(Path dirPath, FsPermission perms,
boolean createParent, String user) throws IOException {
lfs.mkdir(dirPath, perms, createParent);
if (!perms.equals(perms.applyUMask(lfs.getUMask()))) {
lfs.setPermission(dirPath, perms);
}
}
/**
* Initialize the local directories for a particular user.
* <ul>.mkdir
* <li>$local.dir/usercache/$user</li>
* </ul>
*
* @param localDirs the target directories to create
* @param user the user whose local cache directories should be initialized
* @throws IOException if there's an issue initializing the user local
* directories
*/
void createUserLocalDirs(List<String> localDirs, String user)
throws IOException {
boolean userDirStatus = false;
FsPermission userperms = new FsPermission(USER_PERM);
for (String localDir : localDirs) {
// create $local.dir/usercache/$user and its immediate parent
try {
createDir(getUserCacheDir(new Path(localDir), user), userperms, true,
user);
} catch (IOException e) {
LOG.warn("Unable to create the user directory : {}", localDir, e);
continue;
}
userDirStatus = true;
}
if (!userDirStatus) {
throw new IOException("Not able to initialize user directories "
+ "in any of the configured local directories for user " + user);
}
}
/**
* Initialize the local cache directories for a particular user.
* <ul>
* <li>$local.dir/usercache/$user</li>
* <li>$local.dir/usercache/$user/appcache</li>
* <li>$local.dir/usercache/$user/filecache</li>
* </ul>
*
* @param localDirs the target directories to create
* @param user the user whose local cache directories should be initialized
* @throws IOException if there's an issue initializing the cache
* directories
*/
void createUserCacheDirs(List<String> localDirs, String user)
throws IOException {
LOG.info("Initializing user {}", user);
boolean appcacheDirStatus = false;
boolean distributedCacheDirStatus = false;
FsPermission appCachePerms = new FsPermission(APPCACHE_PERM);
FsPermission fileperms = new FsPermission(FILECACHE_PERM);
for (String localDir : localDirs) {
// create $local.dir/usercache/$user/appcache
Path localDirPath = new Path(localDir);
final Path appDir = getAppcacheDir(localDirPath, user);
try {
createDir(appDir, appCachePerms, true, user);
appcacheDirStatus = true;
} catch (IOException e) {
LOG.warn("Unable to create app cache directory : {}", appDir, e);
}
// create $local.dir/usercache/$user/filecache
final Path distDir = getFileCacheDir(localDirPath, user);
try {
createDir(distDir, fileperms, true, user);
distributedCacheDirStatus = true;
} catch (IOException e) {
LOG.warn("Unable to create file cache directory : {}", distDir, e);
}
}
if (!appcacheDirStatus) {
throw new IOException("Not able to initialize app-cache directories "
+ "in any of the configured local directories for user " + user);
}
if (!distributedCacheDirStatus) {
throw new IOException(
"Not able to initialize distributed-cache directories "
+ "in any of the configured local directories for user "
+ user);
}
}
/**
* Initialize the local directories for a particular user.
* <ul>
* <li>$local.dir/usercache/$user/appcache/$appid</li>
* </ul>
*
* @param localDirs the target directories to create
* @param user the user whose local cache directories should be initialized
* @param appId the application ID
* @throws IOException if there's an issue initializing the application
* directories
*/
void createAppDirs(List<String> localDirs, String user, String appId)
throws IOException {
boolean initAppDirStatus = false;
FsPermission appperms = new FsPermission(APPDIR_PERM);
for (String localDir : localDirs) {
Path fullAppDir = getApplicationDir(new Path(localDir), user, appId);
// create $local.dir/usercache/$user/appcache/$appId
try {
createDir(fullAppDir, appperms, true, user);
initAppDirStatus = true;
} catch (IOException e) {
LOG.warn("Unable to create app directory {}",
fullAppDir, e);
}
}
if (!initAppDirStatus) {
throw new IOException("Not able to initialize app directories "
+ "in any of the configured local directories for app "
+ appId.toString());
}
}
/**
* Create application log directories on all disks.
*
* @param appId the application ID
* @param logDirs the target directories to create
* @param user the user whose local cache directories should be initialized
* @throws IOException if there's an issue initializing the application log
* directories
*/
void createAppLogDirs(String appId, List<String> logDirs, String user)
throws IOException {
boolean appLogDirStatus = false;
FsPermission appLogDirPerms = new
FsPermission(getLogDirPermissions());
for (String rootLogDir : logDirs) {
// create $log.dir/$appid
Path appLogDir = new Path(rootLogDir, appId);
try {
createDir(appLogDir, appLogDirPerms, true, user);
} catch (IOException e) {
LOG.warn("Unable to create the app-log directory : {}", appLogDir, e);
continue;
}
appLogDirStatus = true;
}
if (!appLogDirStatus) {
throw new IOException("Not able to initialize app-log directories "
+ "in any of the configured local directories for app " + appId);
}
}
/**
* Create application log directories on all disks.
*
* @param appId the application ID
* @param containerId the container ID
* @param logDirs the target directories to create
* @param user the user as whom the directories should be created.
* Used only on secure Windows hosts.
* @throws IOException if there's an issue initializing the container log
* directories
*/
void createContainerLogDirs(String appId, String containerId,
List<String> logDirs, String user) throws IOException {
boolean containerLogDirStatus = false;
FsPermission containerLogDirPerms = new
FsPermission(getLogDirPermissions());
for (String rootLogDir : logDirs) {
// create $log.dir/$appid/$containerid
Path appLogDir = new Path(rootLogDir, appId);
Path containerLogDir = new Path(appLogDir, containerId);
try {
createDir(containerLogDir, containerLogDirPerms, true, user);
} catch (IOException e) {
LOG.warn("Unable to create the container-log directory : {}",
appLogDir, e);
continue;
}
containerLogDirStatus = true;
}
if (!containerLogDirStatus) {
throw new IOException(
"Not able to initialize container-log directories "
+ "in any of the configured local directories for container "
+ containerId);
}
}
/**
* Return the default container log directory permissions.
*
* @return the default container log directory permissions
*/
@VisibleForTesting
public String getLogDirPermissions() {
if (this.logDirPermissions==null) {
this.logDirPermissions = getConf().get(
YarnConfiguration.NM_DEFAULT_CONTAINER_EXECUTOR_LOG_DIRS_PERMISSIONS,
YarnConfiguration.NM_DEFAULT_CONTAINER_EXECUTOR_LOG_DIRS_PERMISSIONS_DEFAULT);
}
return this.logDirPermissions;
}
/**
* Clear the internal variable for repeatable testing.
*/
@VisibleForTesting
public void clearLogDirPermissions() {
this.logDirPermissions = null;
}
/**
*
* @param ctx Encapsulates information necessary for exec containers.
* @return the input/output stream of interactive docker shell.
* @throws ContainerExecutionException
*/
@Override
public IOStreamPair execContainer(ContainerExecContext ctx)
throws ContainerExecutionException {
return null;
}
/**
* Return the list of paths of given local directories.
*
* @return the list of paths of given local directories
*/
private static List<Path> getPaths(List<String> dirs) {
List<Path> paths = new ArrayList<>(dirs.size());
for (int i = 0; i < dirs.size(); i++) {
paths.add(new Path(dirs.get(i)));
}
return paths;
}
@Override
public void updateYarnSysFS(Context ctx, String user,
String appId, String spec) throws IOException {
throw new ServiceStateException("Implementation unavailable");
}
@Override
public int reacquireContainer(ContainerReacquisitionContext ctx)
throws IOException, InterruptedException {
try {
if (numaResourceAllocator != null) {
numaResourceAllocator.recoverNumaResource(ctx.getContainerId());
}
return super.reacquireContainer(ctx);
} finally {
postComplete(ctx.getContainerId());
}
}
/**
* clean up and release of resources.
*
* @param containerId containerId of running container
*/
public void postComplete(final ContainerId containerId) {
if (numaResourceAllocator != null) {
try {
numaResourceAllocator.releaseNumaResource(containerId);
} catch (ResourceHandlerException e) {
LOG.warn("NumaResource release failed for " +
"containerId: {}. Exception: ", containerId, e);
}
}
}
/**
* @param resourceAllocation NonNull NumaResourceAllocation object reference
* @return Array of numa specific commands
*/
String[] getNumaCommands(NumaResourceAllocation resourceAllocation) {
String[] numaCommand = new String[3];
numaCommand[0] = numactl;
numaCommand[1] = "--interleave=" + String.join(",", resourceAllocation.getMemNodes());
numaCommand[2] = "--cpunodebind=" + String.join(",", resourceAllocation.getCpuNodes());
return numaCommand;
}
/**
* @param firstStringArray Array of String
* @param secondStringArray Array of String
* @return combined array of string where first elements are from firstStringArray
* and later are the elements from secondStringArray
*/
String[] concatStringCommands(String[] firstStringArray, String[] secondStringArray) {
if(firstStringArray == null && secondStringArray == null) {
return secondStringArray;
}
else if(firstStringArray == null || firstStringArray.length == 0) {
return secondStringArray;
}
else if(secondStringArray == null || secondStringArray.length == 0){
return firstStringArray;
}
int len = firstStringArray.length + secondStringArray.length;
String[] ret = new String[len];
int idx = 0;
for (String s : firstStringArray) {
ret[idx] = s;
idx++;
}
for (String s : secondStringArray) {
ret[idx] = s;
idx++;
}
return ret;
}
@VisibleForTesting
public void setNumaResourceAllocator(NumaResourceAllocator numaResourceAllocator) {
this.numaResourceAllocator = numaResourceAllocator;
}
@VisibleForTesting
public void setNumactl(String numactl) {
this.numactl = numactl;
}
}
| WindowsLocalWrapperScriptBuilder |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/descriptor/subpackage/ClassWithStaticInnerTestCases.java | {
"start": 539,
"end": 678
} | class ____ {
@Test
void test1() {
}
}
@SuppressWarnings({ "unused", "JUnitMalformedDeclaration" })
private static | ShouldBeDiscovered |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/jmx/export/annotation/AnnotationTestBeanFactory.java | {
"start": 775,
"end": 1329
} | class ____ implements FactoryBean<FactoryCreatedAnnotationTestBean> {
private final FactoryCreatedAnnotationTestBean instance = new FactoryCreatedAnnotationTestBean();
public AnnotationTestBeanFactory() {
this.instance.setName("FACTORY");
}
@Override
public FactoryCreatedAnnotationTestBean getObject() {
return this.instance;
}
@Override
public Class<? extends AnnotationTestBean> getObjectType() {
return FactoryCreatedAnnotationTestBean.class;
}
@Override
public boolean isSingleton() {
return true;
}
}
| AnnotationTestBeanFactory |
java | elastic__elasticsearch | modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java | {
"start": 4756,
"end": 5204
} | class ____ extends AbstractClient {
MockClient(Settings settings, ThreadPool threadPool) {
super(settings, threadPool, TestProjectResolvers.alwaysThrow());
}
@Override
protected <Request extends ActionRequest, Response extends ActionResponse> void doExecute(
ActionType<Response> action,
Request request,
ActionListener<Response> listener
) {}
}
}
| MockClient |
java | junit-team__junit5 | junit-platform-commons/src/main/java/org/junit/platform/commons/support/ModifierSupport.java | {
"start": 1410,
"end": 1460
} | class ____ {@code public}.
*
* @param clazz the | is |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/ToDynamicVariableHeadersTest.java | {
"start": 973,
"end": 3925
} | class ____ extends ContextTestSupport {
@Test
public void testSend() throws Exception {
getMockEndpoint("mock:before").expectedBodiesReceived("World");
getMockEndpoint("mock:before").expectedVariableReceived("hello", "Camel");
getMockEndpoint("mock:result").expectedBodiesReceived("Bye Camel");
getMockEndpoint("mock:result").expectedVariableReceived("hello", "Camel");
getMockEndpoint("mock:result").message(0).header("echo").isEqualTo("CamelCamel");
template.sendBodyAndHeader("direct:send", "World", "where", "foo");
assertMockEndpointsSatisfied();
}
@Test
public void testReceive() throws Exception {
getMockEndpoint("mock:after").expectedBodiesReceived("World");
getMockEndpoint("mock:after").expectedVariableReceived("bye", "Bye World");
getMockEndpoint("mock:result").expectedBodiesReceived("Bye World");
getMockEndpoint("mock:result").expectedVariableReceived("bye", "Bye World");
getMockEndpoint("mock:result").message(0).header("echo").isNull();
template.sendBodyAndHeader("direct:receive", "World", "where", "foo");
assertMockEndpointsSatisfied();
}
@Test
public void testSendAndReceive() throws Exception {
getMockEndpoint("mock:before").expectedBodiesReceived("World");
getMockEndpoint("mock:before").expectedVariableReceived("hello", "Camel");
getMockEndpoint("mock:result").expectedBodiesReceived("World");
getMockEndpoint("mock:result").expectedVariableReceived("bye", "Bye Camel");
getMockEndpoint("mock:result").message(0).header("echo").isNull();
template.sendBodyAndHeader("direct:sendAndReceive", "World", "where", "foo");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:send")
.setVariable("hello", simple("Camel"))
.to("mock:before")
.toD("direct:${header.where}", "hello", null)
.to("mock:result");
from("direct:receive")
.toD("direct:${header.where}", null, "bye")
.to("mock:after")
.setBody(simple("${variable:bye}"))
.to("mock:result");
from("direct:sendAndReceive")
.setVariable("hello", simple("Camel"))
.to("mock:before")
.toD("direct:${header.where}", "hello", "bye")
.to("mock:result");
from("direct:foo")
.setHeader("echo", simple("${body}${body}"))
.transform().simple("Bye ${body}");
}
};
}
}
| ToDynamicVariableHeadersTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/batch/BatchEntityWithDisabledProxyTest.java | {
"start": 3469,
"end": 3910
} | class ____ {
@Id
Long id;
String name;
public Order() {
}
public Order(Long id, String name) {
this.id = id;
this.name = name;
}
@OneToMany
List<Product> products = new ArrayList<>();
public List<Product> getProducts() {
return products;
}
public void addProduct(Product product) {
this.products.add( product );
}
}
@Entity(name = "Product")
@BatchSize(size = 512)
@Cacheable
public static | Order |
java | apache__maven | impl/maven-testing/src/test/java/org/apache/maven/api/di/testing/SimpleDITest.java | {
"start": 2374,
"end": 2722
} | class
____(context.getRequiredTestInstance()).thenReturn(new TestClass()); // Valid instance
MavenDIExtension.context = context;
assertThrows(
IllegalStateException.class,
extension::setupContainer,
"Should throw IllegalStateException for null test class");
}
static | when |
java | google__dagger | javatests/dagger/functional/binds/NeedsFactory.java | {
"start": 718,
"end": 937
} | class ____ {
@Inject
NeedsFactory(
@SuppressWarnings("unused") NeedsFactory_SomethingFactory somethingFactory,
@SuppressWarnings("unused") SomethingFactoryImpl somethingFactoryImpl) {}
public | NeedsFactory |
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/aspectj/InstantiationModelAwarePointcutAdvisor.java | {
"start": 1019,
"end": 1318
} | interface ____ extends PointcutAdvisor {
/**
* Return whether this advisor is lazily initializing its underlying advice.
*/
boolean isLazy();
/**
* Return whether this advisor has already instantiated its advice.
*/
boolean isAdviceInstantiated();
}
| InstantiationModelAwarePointcutAdvisor |
java | apache__camel | components/camel-quickfix/src/main/java/org/apache/camel/component/quickfixj/MessageCorrelator.java | {
"start": 1189,
"end": 2810
} | class ____ implements QuickfixjEventListener {
public static final long DEFAULT_CORRELATION_TIMEOUT = 1000L;
private final List<MessageCorrelationRule> rules = new CopyOnWriteArrayList<>();
public Callable<Message> getReply(Exchange exchange) {
MessagePredicate messageCriteria = (MessagePredicate) exchange.getProperty(QuickfixjProducer.CORRELATION_CRITERIA_KEY);
final MessageCorrelationRule correlationRule = new MessageCorrelationRule(exchange, messageCriteria);
rules.add(correlationRule);
final long timeout = exchange.getProperty(
QuickfixjProducer.CORRELATION_TIMEOUT_KEY,
DEFAULT_CORRELATION_TIMEOUT, Long.class);
return new Callable<Message>() {
@Override
public Message call() throws Exception {
if (!correlationRule.getLatch().await(timeout, TimeUnit.MILLISECONDS)) {
throw new ExchangeTimedOutException(correlationRule.getExchange(), timeout);
}
return correlationRule.getReplyMessage();
}
};
}
@Override
public void onEvent(QuickfixjEventCategory eventCategory, SessionID sessionID, Message message) throws Exception {
if (message != null) {
for (MessageCorrelationRule rule : rules) {
if (rule.getMessageCriteria().evaluate(message)) {
rule.setReplyMessage(message);
rules.remove(rule);
rule.getLatch().countDown();
}
}
}
}
private static | MessageCorrelator |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/SinkV2ITCase.java | {
"start": 4092,
"end": 17951
} | class ____ extends AbstractTestBase {
private static final Logger LOG = LoggerFactory.getLogger(SinkV2ITCase.class);
static final List<Integer> SOURCE_DATA =
Arrays.asList(
895, 127, 148, 161, 148, 662, 822, 491, 275, 122, 850, 630, 682, 765, 434, 970,
714, 795, 288, 422);
static final List<Record<Integer>> EXPECTED_COMMITTED_DATA_IN_STREAMING_MODE =
SOURCE_DATA.stream()
// source send data two times
.flatMap(
x ->
Collections.nCopies(2, new Record<>(x, null, Long.MIN_VALUE))
.stream())
.collect(Collectors.toList());
static final List<Record<Integer>> EXPECTED_COMMITTED_DATA_IN_BATCH_MODE =
SOURCE_DATA.stream()
.map(x -> new Record<>(x, null, Long.MIN_VALUE))
.collect(Collectors.toList());
@RegisterExtension
static final SharedObjectsExtension SHARED_OBJECTS = SharedObjectsExtension.create();
@Test
public void writerAndCommitterExecuteInStreamingMode() throws Exception {
final StreamExecutionEnvironment env = buildStreamEnv();
SharedReference<Queue<Committer.CommitRequest<Record<Integer>>>> committed =
SHARED_OBJECTS.add(new ConcurrentLinkedQueue<>());
final Source<Integer, ?, ?> source = createStreamingSource();
env.fromSource(source, WatermarkStrategy.noWatermarks(), "source")
// Introduce the keyBy to assert unaligned checkpoint is enabled on the source ->
// sink writer edge
.keyBy((KeySelector<Integer, Integer>) value -> value)
.sinkTo(
TestSinkV2.<Integer>newBuilder()
.setCommitter(
new TrackingCommitter(committed), RecordSerializer::new)
.build());
executeAndVerifyStreamGraph(env);
assertThat(committed.get())
.extracting(Committer.CommitRequest::getCommittable)
.containsExactlyInAnyOrderElementsOf(EXPECTED_COMMITTED_DATA_IN_STREAMING_MODE);
}
@Test
public void writerAndPrecommitToplogyAndCommitterExecuteInStreamingMode() throws Exception {
final StreamExecutionEnvironment env = buildStreamEnv();
SharedReference<Queue<Committer.CommitRequest<Record<Integer>>>> committed =
SHARED_OBJECTS.add(new ConcurrentLinkedQueue<>());
final Source<Integer, ?, ?> source = createStreamingSource();
env.fromSource(source, WatermarkStrategy.noWatermarks(), "source")
// Introduce the keyBy to assert unaligned checkpoint is enabled on the source ->
// sink writer edge
.keyBy((KeySelector<Integer, Integer>) value -> value)
.sinkTo(
TestSinkV2.<Integer>newBuilder()
.setCommitter(
new TrackingCommitter(committed), RecordSerializer::new)
.setWithPreCommitTopology(SinkV2ITCase::flipValue)
.build());
executeAndVerifyStreamGraph(env);
assertThat(committed.get())
.extracting(Committer.CommitRequest::getCommittable)
.containsExactlyInAnyOrderElementsOf(
EXPECTED_COMMITTED_DATA_IN_STREAMING_MODE.stream()
.map(SinkV2ITCase::flipValue)
.collect(Collectors.toList()));
}
private static Record<Integer> flipValue(Record<Integer> r) {
return r.withValue(-r.getValue());
}
@ParameterizedTest
@CsvSource({"1, 2", "2, 1", "1, 1"})
public void writerAndCommitterExecuteInStreamingModeWithScaling(
int initialParallelism,
int scaledParallelism,
@TempDir File checkpointDir,
@InjectMiniCluster MiniCluster miniCluster,
@InjectClusterClient ClusterClient<?> clusterClient)
throws Exception {
SharedReference<Queue<Committer.CommitRequest<Record<Integer>>>> committed =
SHARED_OBJECTS.add(new ConcurrentLinkedQueue<>());
final TrackingCommitter trackingCommitter = new TrackingCommitter(committed);
final Configuration config = createConfigForScalingTest(checkpointDir, initialParallelism);
// first run
final JobID jobID =
runStreamingWithScalingTest(
config,
initialParallelism,
trackingCommitter,
true,
miniCluster,
clusterClient);
// second run
config.set(StateRecoveryOptions.SAVEPOINT_PATH, getCheckpointPath(miniCluster, jobID));
config.set(CoreOptions.DEFAULT_PARALLELISM, scaledParallelism);
runStreamingWithScalingTest(
config, initialParallelism, trackingCommitter, false, miniCluster, clusterClient);
assertThat(committed.get())
.extracting(Committer.CommitRequest::getCommittable)
.containsExactlyInAnyOrderElementsOf(
duplicate(EXPECTED_COMMITTED_DATA_IN_STREAMING_MODE));
}
private static List<Record<Integer>> duplicate(List<Record<Integer>> values) {
return IntStream.range(0, 2)
.boxed()
.flatMap(i -> values.stream())
.collect(Collectors.toList());
}
@Test
public void writerAndCommitterExecuteInBatchMode() throws Exception {
final StreamExecutionEnvironment env = buildBatchEnv();
SharedReference<Queue<Committer.CommitRequest<Record<Integer>>>> committed =
SHARED_OBJECTS.add(new ConcurrentLinkedQueue<>());
final DataGeneratorSource<Integer> source =
new DataGeneratorSource<>(
l -> SOURCE_DATA.get(l.intValue()),
SOURCE_DATA.size(),
IntegerTypeInfo.INT_TYPE_INFO);
env.fromSource(source, WatermarkStrategy.noWatermarks(), "source")
// Introduce the rebalance to assert unaligned checkpoint is enabled on the source
// -> sink writer edge
.rebalance()
.sinkTo(
TestSinkV2.<Integer>newBuilder()
.setCommitter(
new TrackingCommitter(committed), RecordSerializer::new)
.build());
executeAndVerifyStreamGraph(env);
assertThat(committed.get())
.extracting(Committer.CommitRequest::getCommittable)
.containsExactlyInAnyOrderElementsOf(EXPECTED_COMMITTED_DATA_IN_BATCH_MODE);
}
@Test
public void writerAndPrecommitToplogyAndCommitterExecuteInBatchMode() throws Exception {
final StreamExecutionEnvironment env = buildBatchEnv();
SharedReference<Queue<Committer.CommitRequest<Record<Integer>>>> committed =
SHARED_OBJECTS.add(new ConcurrentLinkedQueue<>());
final DataGeneratorSource<Integer> source =
new DataGeneratorSource<>(
l -> SOURCE_DATA.get(l.intValue()),
SOURCE_DATA.size(),
IntegerTypeInfo.INT_TYPE_INFO);
env.fromSource(source, WatermarkStrategy.noWatermarks(), "source")
// Introduce the rebalance to assert unaligned checkpoint is enabled on the source
// -> sink writer edge
.rebalance()
.sinkTo(
TestSinkV2.<Integer>newBuilder()
.setCommitter(
new TrackingCommitter(committed), RecordSerializer::new)
.setWithPreCommitTopology(SinkV2ITCase::flipValue)
.build());
executeAndVerifyStreamGraph(env);
assertThat(committed.get())
.extracting(Committer.CommitRequest::getCommittable)
.containsExactlyInAnyOrderElementsOf(
EXPECTED_COMMITTED_DATA_IN_BATCH_MODE.stream()
.map(SinkV2ITCase::flipValue)
.collect(Collectors.toList()));
}
private StreamExecutionEnvironment buildStreamEnv() {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setRuntimeMode(RuntimeExecutionMode.STREAMING);
env.enableCheckpointing(100);
return env;
}
private Configuration createConfigForScalingTest(File checkpointDir, int parallelism) {
final Configuration config = new Configuration();
config.set(CoreOptions.DEFAULT_PARALLELISM, parallelism);
config.set(StateBackendOptions.STATE_BACKEND, "hashmap");
config.set(CheckpointingOptions.CHECKPOINTS_DIRECTORY, checkpointDir.toURI().toString());
config.set(
CheckpointingOptions.EXTERNALIZED_CHECKPOINT_RETENTION,
ExternalizedCheckpointRetention.RETAIN_ON_CANCELLATION);
config.set(CheckpointingOptions.MAX_RETAINED_CHECKPOINTS, 2000);
config.set(RestartStrategyOptions.RESTART_STRATEGY, "disable");
return config;
}
private StreamExecutionEnvironment buildStreamEnvWithCheckpointDir(Configuration config) {
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment(config);
env.setRuntimeMode(RuntimeExecutionMode.STREAMING);
env.enableCheckpointing(100);
return env;
}
private JobID runStreamingWithScalingTest(
Configuration config,
int parallelism,
TrackingCommitter trackingCommitter,
boolean shouldMapperFail,
MiniCluster miniCluster,
ClusterClient<?> clusterClient)
throws Exception {
final StreamExecutionEnvironment env = buildStreamEnvWithCheckpointDir(config);
final Source<Integer, ?, ?> source = createStreamingSource();
env.fromSource(source, WatermarkStrategy.noWatermarks(), "source")
.rebalance()
.map(
new FailingCheckpointMapper(
SHARED_OBJECTS.add(new AtomicBoolean(!shouldMapperFail))))
.sinkTo(
TestSinkV2.<Integer>newBuilder()
.setCommitter(trackingCommitter, RecordSerializer::new)
.setWithPostCommitTopology(true)
.build());
final JobID jobId = clusterClient.submitJob(env.getStreamGraph().getJobGraph()).get();
clusterClient.requestJobResult(jobId).get();
return jobId;
}
private String getCheckpointPath(MiniCluster miniCluster, JobID secondJobId)
throws InterruptedException, ExecutionException, FlinkJobNotFoundException {
final Optional<String> completedCheckpoint =
CommonTestUtils.getLatestCompletedCheckpointPath(secondJobId, miniCluster);
assertThat(completedCheckpoint).isPresent();
return completedCheckpoint.get();
}
private StreamExecutionEnvironment buildBatchEnv() {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setRuntimeMode(RuntimeExecutionMode.BATCH);
return env;
}
private void executeAndVerifyStreamGraph(StreamExecutionEnvironment env) throws Exception {
StreamGraph streamGraph = env.getStreamGraph();
assertNoUnalignedCheckpointInSink(streamGraph);
assertUnalignedCheckpointInNonSink(streamGraph);
env.execute(streamGraph);
}
private void assertNoUnalignedCheckpointInSink(StreamGraph streamGraph) {
// all the out edges between sink nodes should not support unaligned checkpoints
assertThat(streamGraph.getStreamNodes())
.filteredOn(t -> t.getOperatorName().contains("Sink"))
.flatMap(StreamNode::getOutEdges)
.allMatch(e -> !e.supportsUnalignedCheckpoints())
.isNotEmpty();
}
private void assertUnalignedCheckpointInNonSink(StreamGraph streamGraph) {
// All connections are rebalance between source and source, so all the out edges of nodes
// upstream of the sink should support unaligned checkpoints
assertThat(streamGraph.getStreamNodes())
.filteredOn(t -> !t.getOperatorName().contains("Sink"))
.flatMap(StreamNode::getOutEdges)
.allMatch(StreamEdge::supportsUnalignedCheckpoints)
.isNotEmpty();
}
/**
* A stream source that: 1) emits a list of elements without allowing checkpoints, 2) then waits
* for two more checkpoints to complete, 3) then re-emits the same elements before 4) waiting
* for another two checkpoints and 5) exiting.
*/
private Source<Integer, ?, ?> createStreamingSource() {
RateLimiterStrategy<NumberSequenceSource.NumberSequenceSplit> rateLimiterStrategy =
parallelism -> new BurstingRateLimiter(SOURCE_DATA.size() / 4, 2);
return new DataGeneratorSource<>(
l -> SOURCE_DATA.get(l.intValue() % SOURCE_DATA.size()),
SOURCE_DATA.size() * 2L,
rateLimiterStrategy,
IntegerTypeInfo.INT_TYPE_INFO);
}
private static | SinkV2ITCase |
java | junit-team__junit5 | junit-platform-console/src/main/java/org/junit/platform/console/output/TreeNode.java | {
"start": 794,
"end": 2665
} | class ____ {
private final String caption;
private final long creation;
long duration;
private @Nullable String reason;
private @Nullable TestIdentifier identifier;
private @Nullable TestExecutionResult result;
final Queue<ReportEntry> reports = new ConcurrentLinkedQueue<>();
final Queue<FileEntry> files = new ConcurrentLinkedQueue<>();
final Queue<TreeNode> children = new ConcurrentLinkedQueue<>();
boolean visible;
TreeNode(String caption) {
this.caption = caption;
this.creation = System.currentTimeMillis();
this.visible = false;
}
TreeNode(TestIdentifier identifier) {
this(createCaption(identifier.getDisplayName()));
this.identifier = identifier;
this.visible = true;
}
TreeNode(TestIdentifier identifier, String reason) {
this(identifier);
this.reason = reason;
}
TreeNode addChild(TreeNode node) {
children.add(node);
return this;
}
TreeNode addReportEntry(ReportEntry reportEntry) {
reports.add(reportEntry);
return this;
}
TreeNode addFileEntry(FileEntry file) {
files.add(file);
return this;
}
TreeNode setResult(TestExecutionResult result) {
this.result = result;
this.duration = System.currentTimeMillis() - creation;
return this;
}
public String caption() {
return caption;
}
Optional<String> reason() {
return Optional.ofNullable(reason);
}
Optional<TestExecutionResult> result() {
return Optional.ofNullable(result);
}
Optional<TestIdentifier> identifier() {
return Optional.ofNullable(identifier);
}
@SuppressWarnings("DataFlowIssue")
static String createCaption(String displayName) {
boolean normal = displayName.length() <= 80;
String caption = normal ? displayName : displayName.substring(0, 80) + "...";
String whites = StringUtils.replaceWhitespaceCharacters(caption, " ");
return StringUtils.replaceIsoControlCharacters(whites, ".");
}
}
| TreeNode |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/bootstrap/BootstrapContextClosedEvent.java | {
"start": 1028,
"end": 1724
} | class ____ extends ApplicationEvent {
private final ConfigurableApplicationContext applicationContext;
BootstrapContextClosedEvent(BootstrapContext source, ConfigurableApplicationContext applicationContext) {
super(source);
this.applicationContext = applicationContext;
}
/**
* Return the {@link BootstrapContext} that was closed.
* @return the bootstrap context
*/
public BootstrapContext getBootstrapContext() {
return (BootstrapContext) this.source;
}
/**
* Return the prepared application context.
* @return the application context
*/
public ConfigurableApplicationContext getApplicationContext() {
return this.applicationContext;
}
}
| BootstrapContextClosedEvent |
java | apache__camel | components/camel-mllp/src/test/java/org/apache/camel/test/tcp/JavaSocketManualTests.java | {
"start": 1838,
"end": 1945
} | class ____ be deleted in the future
*/
@Disabled(value = "Tests validating Java Socket behaviours")
public | may |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/internal/strings/Strings_assertIsXmlEqualCase_Test.java | {
"start": 1592,
"end": 4623
} | class ____ extends StringsBaseTest {
@Test
void should_pass_if_both_Strings_are_XML_equals() {
String actual = "<rss version=\"2.0\"><channel> <title>Java Tutorials and Examples 1</title> <language>en-us</language></channel></rss>";
String expected = String.format("<rss version=\"2.0\">%n"
+ "<channel><title>Java Tutorials and Examples 1</title><language>en-us</language></channel>%n"
+ "</rss>");
strings.assertXmlEqualsTo(someInfo(), actual, expected);
}
@Test
void should_fail_if_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> strings.assertXmlEqualsTo(someInfo(), null,
"<jedi>yoda</jedi>"))
.withMessage(actualIsNull());
}
@Test
void should_fail_if_expected_is_null() {
assertThatNullPointerException().isThrownBy(() -> strings.assertXmlEqualsTo(someInfo(), "<jedi>yoda</jedi>", null))
.withMessage("The char sequence to look for should not be null");
}
@Test
void should_fail_if_both_Strings_are_not_XML_equals() {
String actual = "<rss version=\"2.0\"><channel><title>Java Tutorials</title></channel></rss>";
String expected = "<rss version=\"2.0\"><channel><title>Java Tutorials and Examples</title></channel></rss>";
AssertionInfo info = someInfo();
Throwable error = catchThrowable(() -> strings.assertXmlEqualsTo(info, actual, expected));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldBeEqual(xmlPrettyFormat(actual), xmlPrettyFormat(expected), info.representation()));
}
@Test
void should_pass_if_both_Strings_are_XML_equals_case_insensitively() {
String actual = "<rss version=\"2.0\"><Channel><title>Java Tutorials</title></Channel></rss>";
String expected = "<rss version=\"2.0\"><channel><TITLE>JAVA Tutorials</TITLE></channel></rss>";
stringsWithCaseInsensitiveComparisonStrategy.assertXmlEqualsTo(someInfo(), actual, expected);
}
@Test
void should_fail_if_both_Strings_are_not_XML_equal_regardless_of_case() {
AssertionInfo info = someInfo();
String actual = "<rss version=\"2.0\"><channel><title>Java Tutorials</title></channel></rss>";
String expected = "<rss version=\"2.0\"><channel><title>Java Tutorials and Examples</title></channel></rss>";
Throwable error = catchThrowable(() -> stringsWithCaseInsensitiveComparisonStrategy.assertXmlEqualsTo(someInfo(), actual,
expected));
assertThat(error).isInstanceOf(AssertionError.class);
verify(failures).failure(info, shouldBeEqual(xmlPrettyFormat(actual), xmlPrettyFormat(expected),
info.representation()));
}
}
| Strings_assertIsXmlEqualCase_Test |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/support/ListenerTimeoutsTests.java | {
"start": 1079,
"end": 4314
} | class ____ extends ESTestCase {
private final TimeValue timeout = TimeValue.timeValueMillis(10);
private DeterministicTaskQueue taskQueue;
private ThreadPool threadPool;
private Executor timeoutExecutor;
@Before
public void setUp() throws Exception {
super.setUp();
taskQueue = new DeterministicTaskQueue();
threadPool = taskQueue.getThreadPool();
timeoutExecutor = threadPool.generic();
}
public void testListenerTimeout() {
AtomicBoolean success = new AtomicBoolean(false);
AtomicReference<Exception> exception = new AtomicReference<>();
ActionListener<Void> listener = wrap(success, exception);
ActionListener<Void> wrapped = ListenerTimeouts.wrapWithTimeout(threadPool, listener, timeout, timeoutExecutor, "test");
assertTrue(taskQueue.hasDeferredTasks());
taskQueue.advanceTime();
taskQueue.runAllRunnableTasks();
wrapped.onResponse(null);
wrapped.onFailure(new IOException("incorrect exception"));
assertFalse(success.get());
assertThat(exception.get(), instanceOf(ElasticsearchTimeoutException.class));
}
public void testFinishNormallyBeforeTimeout() {
AtomicBoolean success = new AtomicBoolean(false);
AtomicReference<Exception> exception = new AtomicReference<>();
ActionListener<Void> listener = wrap(success, exception);
ActionListener<Void> wrapped = ListenerTimeouts.wrapWithTimeout(threadPool, listener, timeout, timeoutExecutor, "test");
wrapped.onResponse(null);
wrapped.onFailure(new IOException("boom"));
wrapped.onResponse(null);
assertTrue(taskQueue.hasDeferredTasks());
taskQueue.advanceTime();
taskQueue.runAllRunnableTasks();
assertTrue(success.get());
assertNull(exception.get());
}
public void testFinishExceptionallyBeforeTimeout() {
AtomicBoolean success = new AtomicBoolean(false);
AtomicReference<Exception> exception = new AtomicReference<>();
ActionListener<Void> listener = wrap(success, exception);
ActionListener<Void> wrapped = ListenerTimeouts.wrapWithTimeout(threadPool, listener, timeout, timeoutExecutor, "test");
wrapped.onFailure(new IOException("boom"));
assertTrue(taskQueue.hasDeferredTasks());
taskQueue.advanceTime();
taskQueue.runAllRunnableTasks();
assertFalse(success.get());
assertThat(exception.get(), instanceOf(IOException.class));
}
private ActionListener<Void> wrap(AtomicBoolean success, AtomicReference<Exception> exception) {
return new ActionListener<Void>() {
private final AtomicBoolean completed = new AtomicBoolean();
@Override
public void onResponse(Void aVoid) {
assertTrue(completed.compareAndSet(false, true));
assertTrue(success.compareAndSet(false, true));
}
@Override
public void onFailure(Exception e) {
assertTrue(completed.compareAndSet(false, true));
assertTrue(exception.compareAndSet(null, e));
}
};
}
}
| ListenerTimeoutsTests |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/context/TestExecutionListeners.java | {
"start": 1053,
"end": 1680
} | class ____ configure which {@link TestExecutionListener TestExecutionListeners}
* should be registered with a {@link TestContextManager}.
*
* <p>{@code @TestExecutionListeners} is used to register listeners for a
* particular test class, its subclasses, and its nested classes. If you wish to
* register a listener globally, you should register it via the automatic discovery
* mechanism described in {@link TestExecutionListener}.
*
* <p>This annotation may be used as a <em>meta-annotation</em> to create custom
* <em>composed annotations</em>. In addition, this annotation will be inherited
* from an enclosing test | to |
java | netty__netty | handler/src/test/java/io/netty/handler/ssl/util/BouncyCastleUtilTest.java | {
"start": 1240,
"end": 3452
} | class ____ {
@Test
public void testBouncyCastleProviderLoaded() {
// tests org.bouncycastle.jce.provider.BouncyCastleProvider is detected as available
// because provider with matching name is present in 'java.security.Security'
assertTrue(BouncyCastleUtil.isBcProvAvailable());
assertTrue(BouncyCastleUtil.isBcPkixAvailable());
assertTrue(BouncyCastleUtil.isBcTlsAvailable());
Provider bcProvider = BouncyCastleUtil.getBcProviderJce();
assertNotNull(bcProvider);
assertNotNull(BouncyCastleUtil.getBcProviderJsse());
BouncyCastleUtil.reset();
Provider bouncyCastleProvider = new BouncyCastleProvider();
Security.addProvider(bouncyCastleProvider);
assertTrue(BouncyCastleUtil.isBcProvAvailable());
bcProvider = BouncyCastleUtil.getBcProviderJce();
assertSame(bouncyCastleProvider, bcProvider);
Security.removeProvider(bouncyCastleProvider.getName());
BouncyCastleUtil.reset();
}
@Test
public void testBouncyCastleFipsProviderLoaded() {
// tests org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider is detected as available
// because provider with matching name is present in 'java.security.Security'
assertTrue(BouncyCastleUtil.isBcProvAvailable());
Provider bcProvider = BouncyCastleUtil.getBcProviderJce();
assertInstanceOf(BouncyCastleProvider.class, bcProvider);
BouncyCastleUtil.reset();
// we don't expect to have both BC and BCFIPS available, but BouncyCastleProvider is on the classpath
// hence we need to add a fake BouncyCastleFipsProvider provider
Provider fakeBouncyCastleFipsProvider = new Provider("BCFIPS", 1.000205,
"BouncyCastle Security Provider (FIPS edition) v1.0.2.5") { };
Security.addProvider(fakeBouncyCastleFipsProvider);
assertTrue(BouncyCastleUtil.isBcProvAvailable());
bcProvider = BouncyCastleUtil.getBcProviderJce();
assertSame(fakeBouncyCastleFipsProvider, bcProvider);
Security.removeProvider(fakeBouncyCastleFipsProvider.getName());
BouncyCastleUtil.reset();
}
}
| BouncyCastleUtilTest |
java | quarkusio__quarkus | integration-tests/spring-web/src/test/java/io/quarkus/it/spring/web/openapi/OpenApiPathWithSegmentsPMT.java | {
"start": 224,
"end": 1563
} | class ____ {
private static final String OPEN_API_PATH = "/path/with/segments";
@RegisterExtension
static QuarkusProdModeTest runner = new QuarkusProdModeTest()
.withApplicationRoot((jar) -> jar
.addClasses(OpenApiController.class)
.addAsResource("test-roles.properties")
.addAsResource("test-users.properties"))
.overrideConfigKey("quarkus.smallrye-openapi.path", OPEN_API_PATH)
.setRun(true);
@Test
public void testOpenApiPathAccessResource() {
RestAssured.given().header("Accept", "application/yaml")
.when().get(OPEN_API_PATH)
.then().header("Content-Type", "application/yaml;charset=UTF-8");
RestAssured.given().queryParam("format", "YAML")
.when().get(OPEN_API_PATH)
.then().header("Content-Type", "application/yaml;charset=UTF-8");
RestAssured.given().header("Accept", "application/json")
.when().get(OPEN_API_PATH)
.then().header("Content-Type", "application/json;charset=UTF-8");
RestAssured.given().queryParam("format", "JSON")
.when().get(OPEN_API_PATH)
.then().header("Content-Type", "application/json;charset=UTF-8");
}
}
| OpenApiPathWithSegmentsPMT |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/env/OriginTrackedYamlLoader.java | {
"start": 5613,
"end": 5873
} | class ____ extends Resolver {
@Override
public void addImplicitResolver(Tag tag, Pattern regexp, String first, int limit) {
if (tag == Tag.TIMESTAMP) {
return;
}
super.addImplicitResolver(tag, regexp, first, limit);
}
}
}
| NoTimestampResolver |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/error/ShouldBeFinite.java | {
"start": 657,
"end": 924
} | class ____ extends BasicErrorMessageFactory {
public static ErrorMessageFactory shouldBeFinite(Number actual) {
return new ShouldBeFinite(actual);
}
private ShouldBeFinite(Number actual) {
super("%nExpecting %s to be finite", actual);
}
}
| ShouldBeFinite |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/context/annotation/DefaultImplementation.java | {
"start": 1632,
"end": 1851
} | class ____ allow the replacement of an implementation that isn't
* accessible due to visibility restrictions.</p>
*
* <p>For example:</p>
*
* <pre class="code">
* @DefaultImplementation(MyImpl.class)
* public | to |
java | elastic__elasticsearch | x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java | {
"start": 1794,
"end": 12994
} | class ____ extends ESTestCase {
private ThreadPool threadPool;
@Before
public void init() throws Exception {
threadPool = new TestThreadPool("SessionFactoryTests thread pool");
}
@After
public void shutdown() throws InterruptedException {
terminate(threadPool);
}
public void testConnectionFactoryReturnsCorrectLDAPConnectionOptionsWithDefaultSettings() throws Exception {
final RealmConfig.RealmIdentifier realmIdentifier = new RealmConfig.RealmIdentifier("ldap", "conn_settings");
final Environment environment = TestEnvironment.newEnvironment(
Settings.builder()
.put("path.home", createTempDir())
.put(getFullSettingKey(realmIdentifier, RealmSettings.ORDER_SETTING), 0)
.build()
);
RealmConfig realmConfig = new RealmConfig(realmIdentifier, environment.settings(), environment, new ThreadContext(Settings.EMPTY));
LDAPConnectionOptions options = SessionFactory.connectionOptions(realmConfig, new SSLService(environment), logger);
assertThat(options.followReferrals(), is(equalTo(true)));
assertThat(options.allowConcurrentSocketFactoryUse(), is(equalTo(true)));
assertThat(options.getConnectTimeoutMillis(), is(equalTo(5000)));
assertThat(options.getResponseTimeoutMillis(), is(equalTo(5000L)));
assertThat(options.getSSLSocketVerifier(), is(instanceOf(HostNameSSLSocketVerifier.class)));
}
public void testSessionFactoryWithResponseTimeout() throws Exception {
final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "response_settings");
final Path pathHome = createTempDir();
{
Settings settings = Settings.builder()
.put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_RESPONSE_SETTING), "10s")
.put(getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0)
.put("path.home", pathHome)
.build();
final Environment environment = TestEnvironment.newEnvironment(settings);
RealmConfig realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings));
LDAPConnectionOptions options = SessionFactory.connectionOptions(realmConfig, new SSLService(settings, environment), logger);
assertThat(options.getResponseTimeoutMillis(), is(equalTo(10000L)));
}
{
Settings settings = Settings.builder()
.put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_TCP_READ_SETTING), "7s")
.put(getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0)
.put("path.home", pathHome)
.build();
final Environment environment = TestEnvironment.newEnvironment(settings);
RealmConfig realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings));
LDAPConnectionOptions options = SessionFactory.connectionOptions(realmConfig, new SSLService(settings, environment), logger);
assertThat(options.getResponseTimeoutMillis(), is(equalTo(7000L)));
assertSettingDeprecationsAndWarnings(
new Setting<?>[] {
SessionFactorySettings.TIMEOUT_TCP_READ_SETTING.apply("ldap").getConcreteSettingForNamespace("response_settings") }
);
}
{
Settings settings = Settings.builder()
.put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_RESPONSE_SETTING), "11s")
.put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_TCP_READ_SETTING), "6s")
.put(getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0)
.put("path.home", pathHome)
.build();
final Environment environment = TestEnvironment.newEnvironment(settings);
RealmConfig realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings));
IllegalArgumentException ex = expectThrows(
IllegalArgumentException.class,
() -> SessionFactory.connectionOptions(realmConfig, new SSLService(settings, environment), logger)
);
assertThat(
ex.getMessage(),
is(
"[xpack.security.authc.realms.ldap.response_settings.timeout.tcp_read] and [xpack.security"
+ ".authc.realms.ldap.response_settings.timeout.response] may not be used at the same time"
)
);
}
{
Settings settings = Settings.builder()
.put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_LDAP_SETTING), "750ms")
.put(getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0)
.put("path.home", pathHome)
.build();
final Environment environment = TestEnvironment.newEnvironment(settings);
RealmConfig realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings));
LDAPConnectionOptions options = SessionFactory.connectionOptions(realmConfig, new SSLService(settings, environment), logger);
assertThat(options.getResponseTimeoutMillis(), is(equalTo(750L)));
}
}
public void testConnectionFactoryReturnsCorrectLDAPConnectionOptions() throws Exception {
final RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier("ldap", "conn_settings");
final Path pathHome = createTempDir();
Settings settings = Settings.builder()
.put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_TCP_CONNECTION_SETTING), "10ms")
.put(getFullSettingKey(realmId, SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING), "false")
.put(getFullSettingKey(realmId, SessionFactorySettings.TIMEOUT_RESPONSE_SETTING), "20ms")
.put(getFullSettingKey(realmId, SessionFactorySettings.FOLLOW_REFERRALS_SETTING), "false")
.put(getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0)
.put("path.home", pathHome)
.build();
Environment environment = TestEnvironment.newEnvironment(settings);
RealmConfig realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings));
LDAPConnectionOptions options = SessionFactory.connectionOptions(realmConfig, new SSLService(environment), logger);
assertThat(options.followReferrals(), is(equalTo(false)));
assertThat(options.allowConcurrentSocketFactoryUse(), is(equalTo(true)));
assertThat(options.getConnectTimeoutMillis(), is(equalTo(10)));
assertThat(options.getResponseTimeoutMillis(), is(equalTo(20L)));
assertThat(options.getSSLSocketVerifier(), is(instanceOf(TrustAllSSLSocketVerifier.class)));
assertWarnings(
"the setting [xpack.security.authc.realms.ldap.conn_settings.hostname_verification] has been deprecated and will be "
+ "removed in a future version. use [xpack.security.authc.realms.ldap.conn_settings.ssl.verification_mode] instead"
);
settings = Settings.builder()
.put(getFullSettingKey(realmId, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), SslVerificationMode.CERTIFICATE)
.put("path.home", pathHome)
.put(getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0)
.build();
realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings));
options = SessionFactory.connectionOptions(realmConfig, new SSLService(TestEnvironment.newEnvironment(settings)), logger);
assertThat(options.getSSLSocketVerifier(), is(instanceOf(TrustAllSSLSocketVerifier.class)));
// Can't run in FIPS with verification_mode none, disable this check instead of duplicating the test case
if (inFipsJvm() == false) {
settings = Settings.builder()
.put(getFullSettingKey(realmId, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), SslVerificationMode.NONE)
.put("path.home", pathHome)
.put(getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0)
.build();
environment = TestEnvironment.newEnvironment(settings);
realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings));
options = SessionFactory.connectionOptions(realmConfig, new SSLService(environment), logger);
assertThat(options.getSSLSocketVerifier(), is(instanceOf(TrustAllSSLSocketVerifier.class)));
}
settings = Settings.builder()
.put(getFullSettingKey(realmId, SSLConfigurationSettings.VERIFICATION_MODE_SETTING_REALM), SslVerificationMode.FULL)
.put("path.home", pathHome)
.put(getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0)
.build();
environment = TestEnvironment.newEnvironment(settings);
realmConfig = new RealmConfig(realmId, settings, environment, new ThreadContext(settings));
options = SessionFactory.connectionOptions(realmConfig, new SSLService(environment), logger);
assertThat(options.getSSLSocketVerifier(), is(instanceOf(HostNameSSLSocketVerifier.class)));
}
public void testSessionFactoryDoesNotSupportUnauthenticated() {
assertThat(createSessionFactory().supportsUnauthenticatedSession(), is(false));
}
public void testUnauthenticatedSessionThrowsUnsupportedOperationException() throws Exception {
UnsupportedOperationException e = expectThrows(
UnsupportedOperationException.class,
() -> createSessionFactory().unauthenticatedSession(randomAlphaOfLength(5), new PlainActionFuture<>())
);
assertThat(e.getMessage(), containsString("unauthenticated sessions"));
}
private SessionFactory createSessionFactory() {
Settings global = Settings.builder().put("path.home", createTempDir()).build();
final RealmConfig.RealmIdentifier realmIdentifier = new RealmConfig.RealmIdentifier("ldap", "_name");
final RealmConfig realmConfig = new RealmConfig(
realmIdentifier,
Settings.builder()
.put(getFullSettingKey(realmIdentifier, SessionFactorySettings.URLS_SETTING), "ldap://localhost:389")
.put(global)
.put(getFullSettingKey(realmIdentifier, RealmSettings.ORDER_SETTING), 0)
.build(),
TestEnvironment.newEnvironment(global),
new ThreadContext(Settings.EMPTY)
);
return new SessionFactory(realmConfig, null, threadPool) {
@Override
public void session(String user, SecureString password, ActionListener<LdapSession> listener) {
listener.onResponse(null);
}
@Override
public void reload(Settings settings) {
// no-op
}
};
}
}
| SessionFactoryTests |
java | quarkusio__quarkus | extensions/liquibase/liquibase/deployment/src/test/java/io/quarkus/liquibase/test/LiquibaseExtensionMigrateAtStartJsonChangeLogTest.java | {
"start": 444,
"end": 1595
} | class ____ {
// Quarkus built object
@Inject
LiquibaseFactory liquibaseFactory;
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addAsResource("db/json/changeLog.json")
.addAsResource("db/json/create-tables.json")
.addAsResource("db/json/test/test.json")
.addAsResource("migrate-at-start-json-config.properties", "application.properties"));
@Test
@DisplayName("Migrates at start with change log config correctly")
public void testLiquibaseConfigInjection() throws Exception {
try (Liquibase liquibase = liquibaseFactory.createLiquibase()) {
List<ChangeSetStatus> status = liquibase.getChangeSetStatuses(liquibaseFactory.createContexts(),
liquibaseFactory.createLabels());
assertNotNull(status);
assertEquals(2, status.size());
assertFalse(status.get(0).getWillRun());
assertFalse(status.get(1).getWillRun());
}
}
}
| LiquibaseExtensionMigrateAtStartJsonChangeLogTest |
java | netty__netty | example/src/main/java/io/netty/example/spdy/server/SpdyOrHttpHandler.java | {
"start": 1501,
"end": 2976
} | class ____ extends ApplicationProtocolNegotiationHandler {
private static final int MAX_CONTENT_LENGTH = 1024 * 100;
protected SpdyOrHttpHandler() {
super(ApplicationProtocolNames.HTTP_1_1);
}
@Override
protected void configurePipeline(ChannelHandlerContext ctx, String protocol) throws Exception {
if (ApplicationProtocolNames.SPDY_3_1.equals(protocol)) {
configureSpdy(ctx, SpdyVersion.SPDY_3_1);
return;
}
if (ApplicationProtocolNames.HTTP_1_1.equals(protocol)) {
configureHttp1(ctx);
return;
}
throw new IllegalStateException("unknown protocol: " + protocol);
}
private static void configureSpdy(ChannelHandlerContext ctx, SpdyVersion version) throws Exception {
ChannelPipeline p = ctx.pipeline();
p.addLast(new SpdyFrameCodec(version));
p.addLast(new SpdySessionHandler(version, true));
p.addLast(new SpdyHttpEncoder(version));
p.addLast(new SpdyHttpDecoder(version, MAX_CONTENT_LENGTH));
p.addLast(new SpdyHttpResponseStreamIdHandler());
p.addLast(new SpdyServerHandler());
}
private static void configureHttp1(ChannelHandlerContext ctx) throws Exception {
ChannelPipeline p = ctx.pipeline();
p.addLast(new HttpServerCodec());
p.addLast(new HttpObjectAggregator(MAX_CONTENT_LENGTH));
p.addLast(new SpdyServerHandler());
}
}
| SpdyOrHttpHandler |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/assigners/LocalityAwareSplitAssigner.java | {
"start": 9481,
"end": 13640
} | class ____ {
/** list of all input splits. */
private final LinkedList<SplitWithInfo> splits = new LinkedList<>();
/** The current minimum local count. We look for splits with this local count. */
private int minLocalCount = -1;
/** The second smallest count observed so far. */
private int nextMinLocalCount = -1;
/** number of elements we need to inspect for the minimum local count. */
private int elementCycleCount = 0;
LocatableSplitChooser() {}
LocatableSplitChooser(Collection<SplitWithInfo> splits) {
for (SplitWithInfo split : splits) {
addInputSplit(split);
}
}
/** Adds a single input split. */
void addInputSplit(SplitWithInfo split) {
int localCount = split.getLocalCount();
if (minLocalCount == -1) {
// first split to add
this.minLocalCount = localCount;
this.elementCycleCount = 1;
this.splits.offerFirst(split);
} else if (localCount < minLocalCount) {
// split with new min local count
this.nextMinLocalCount = this.minLocalCount;
this.minLocalCount = localCount;
// all other splits have more local host than this one
this.elementCycleCount = 1;
splits.offerFirst(split);
} else if (localCount == minLocalCount) {
this.elementCycleCount++;
this.splits.offerFirst(split);
} else {
if (localCount < nextMinLocalCount) {
nextMinLocalCount = localCount;
}
splits.offerLast(split);
}
}
/**
* Retrieves a LocatableInputSplit with minimum local count. InputSplits which have already
* been assigned (i.e., which are not contained in the provided set) are filtered out. The
* returned input split is NOT removed from the provided set.
*
* @param unassignedSplits Set of unassigned input splits.
* @return An input split with minimum local count or null if all splits have been assigned.
*/
@Nullable
SplitWithInfo getNextUnassignedMinLocalCountSplit(Set<SplitWithInfo> unassignedSplits) {
if (splits.size() == 0) {
return null;
}
do {
elementCycleCount--;
// take first split of the list
SplitWithInfo split = splits.pollFirst();
if (unassignedSplits.contains(split)) {
int localCount = split.getLocalCount();
// still unassigned, check local count
if (localCount > minLocalCount) {
// re-insert at end of the list and continue to look for split with smaller
// local count
splits.offerLast(split);
// check and update second smallest local count
if (nextMinLocalCount == -1 || split.getLocalCount() < nextMinLocalCount) {
nextMinLocalCount = split.getLocalCount();
}
split = null;
}
} else {
// split was already assigned
split = null;
}
if (elementCycleCount == 0) {
// one full cycle, but no split with min local count found
// update minLocalCnt and element cycle count for next pass over the splits
minLocalCount = nextMinLocalCount;
nextMinLocalCount = -1;
elementCycleCount = splits.size();
}
if (split != null) {
// found a split to assign
return split;
}
} while (elementCycleCount > 0);
// no split left
return null;
}
}
}
| LocatableSplitChooser |
java | spring-projects__spring-boot | build-plugin/spring-boot-gradle-plugin/src/test/java/org/springframework/boot/gradle/plugin/OnlyDependencyManagementIntegrationTests.java | {
"start": 1074,
"end": 1414
} | class ____ {
@SuppressWarnings("NullAway.Init")
GradleBuild gradleBuild;
@TestTemplate
void dependencyManagementCanBeConfiguredUsingCoordinatesConstant() {
assertThat(this.gradleBuild.build("dependencyManagement").getOutput())
.contains("org.springframework.boot:spring-boot-starter ");
}
}
| OnlyDependencyManagementIntegrationTests |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/customexceptions/InvalidConditionalMappersTest.java | {
"start": 655,
"end": 1480
} | class ____ {
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
return ShrinkWrap.create(JavaArchive.class)
.addClasses(TestResource.class, Mappers.class);
}
}).assertException(t -> {
String message = t.getMessage();
assertTrue(message.contains("@ServerExceptionMapper"));
assertTrue(message.contains("request"));
assertTrue(message.contains(Mappers.class.getName()));
});
@Test
public void test() {
fail("Should never have been called");
}
@Path("test")
public static | InvalidConditionalMappersTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java | {
"start": 7793,
"end": 25562
} | interface ____ map between the global and subcluster name spaces
this.subclusterResolver = newFileSubclusterResolver(this.conf, this);
if (this.subclusterResolver == null) {
throw new IOException("Cannot find subcluster resolver");
}
if (conf.getBoolean(
RBFConfigKeys.DFS_ROUTER_RPC_ENABLE,
RBFConfigKeys.DFS_ROUTER_RPC_ENABLE_DEFAULT)) {
// Create RPC server
this.rpcServer = createRpcServer();
addService(this.rpcServer);
this.setRpcServerAddress(rpcServer.getRpcAddress());
}
checkRouterId();
if (conf.getBoolean(
RBFConfigKeys.DFS_ROUTER_ADMIN_ENABLE,
RBFConfigKeys.DFS_ROUTER_ADMIN_ENABLE_DEFAULT)) {
// Create admin server
this.adminServer = createAdminServer();
addService(this.adminServer);
}
if (conf.getBoolean(
RBFConfigKeys.DFS_ROUTER_HTTP_ENABLE,
RBFConfigKeys.DFS_ROUTER_HTTP_ENABLE_DEFAULT)) {
// Create HTTP server
this.httpServer = createHttpServer();
addService(this.httpServer);
}
boolean isRouterHeartbeatEnabled = conf.getBoolean(
RBFConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE,
RBFConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT);
boolean isNamenodeHeartbeatEnable = conf.getBoolean(
RBFConfigKeys.DFS_ROUTER_NAMENODE_HEARTBEAT_ENABLE,
isRouterHeartbeatEnabled);
if (isNamenodeHeartbeatEnable) {
// Create status updater for each monitored Namenode
this.namenodeHeartbeatServices = createNamenodeHeartbeatServices();
for (NamenodeHeartbeatService heartbeatService :
this.namenodeHeartbeatServices) {
addService(heartbeatService);
}
if (this.namenodeHeartbeatServices.isEmpty()) {
LOG.error("Heartbeat is enabled but there are no namenodes to monitor");
}
}
if (isRouterHeartbeatEnabled) {
// Periodically update the router state
this.routerHeartbeatService = new RouterHeartbeatService(this);
addService(this.routerHeartbeatService);
}
// Router metrics system
if (conf.getBoolean(
RBFConfigKeys.DFS_ROUTER_METRICS_ENABLE,
RBFConfigKeys.DFS_ROUTER_METRICS_ENABLE_DEFAULT)) {
DefaultMetricsSystem.initialize("Router");
this.metrics = new RouterMetricsService(this);
addService(this.metrics);
// JVM pause monitor
this.pauseMonitor = new JvmPauseMonitor();
this.pauseMonitor.init(conf);
}
// Initial quota relevant service
if (conf.getBoolean(RBFConfigKeys.DFS_ROUTER_QUOTA_ENABLE,
RBFConfigKeys.DFS_ROUTER_QUOTA_ENABLED_DEFAULT)) {
this.quotaManager = new RouterQuotaManager();
this.quotaUpdateService = new RouterQuotaUpdateService(this);
addService(this.quotaUpdateService);
}
// Safemode service to refuse RPC calls when the router is out of sync
if (conf.getBoolean(
RBFConfigKeys.DFS_ROUTER_SAFEMODE_ENABLE,
RBFConfigKeys.DFS_ROUTER_SAFEMODE_ENABLE_DEFAULT)) {
// Create safemode monitoring service
this.safemodeService = new RouterSafemodeService(this);
addService(this.safemodeService);
}
/*
* Refresh mount table cache immediately after adding, modifying or deleting
* the mount table entries. If this service is not enabled mount table cache
* are refreshed periodically by StateStoreCacheUpdateService
*/
if (conf.getBoolean(RBFConfigKeys.MOUNT_TABLE_CACHE_UPDATE,
RBFConfigKeys.MOUNT_TABLE_CACHE_UPDATE_DEFAULT)) {
// There is no use of starting refresh service if state store and admin
// servers are not enabled
String disabledDependentServices = getDisabledDependentServices();
/*
* disabledDependentServices null means all dependent services are
* enabled.
*/
if (disabledDependentServices == null) {
MountTableRefresherService refreshService =
new MountTableRefresherService(this);
addService(refreshService);
LOG.info("Service {} is enabled.",
MountTableRefresherService.class.getSimpleName());
} else {
LOG.warn(
"Service {} not enabled: dependent service(s) {} not enabled.",
MountTableRefresherService.class.getSimpleName(),
disabledDependentServices);
}
}
super.serviceInit(conf);
// Set quota manager in mount store to update quota usage in mount table.
if (stateStore != null) {
MountTableStore mountstore =
this.stateStore.getRegisteredRecordStore(MountTableStore.class);
mountstore.setQuotaManager(this.quotaManager);
}
}
/**
* Set the router id if not set to prevent RouterHeartbeatService
* update state store with a null router id.
*/
private void checkRouterId() {
if (this.routerId == null) {
InetSocketAddress confRpcAddress = conf.getSocketAddr(
RBFConfigKeys.DFS_ROUTER_RPC_BIND_HOST_KEY,
RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY,
RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_DEFAULT,
RBFConfigKeys.DFS_ROUTER_RPC_PORT_DEFAULT);
setRpcServerAddress(confRpcAddress);
}
}
private String getDisabledDependentServices() {
if (this.stateStore == null && this.adminServer == null) {
return StateStoreService.class.getSimpleName() + ","
+ RouterAdminServer.class.getSimpleName();
} else if (this.stateStore == null) {
return StateStoreService.class.getSimpleName();
} else if (this.adminServer == null) {
return RouterAdminServer.class.getSimpleName();
}
return null;
}
/**
* Returns the hostname for this Router. If the hostname is not
* explicitly configured in the given config, then it is determined.
*
* @param config configuration
* @return the hostname (NB: may not be a FQDN)
* @throws UnknownHostException if the hostname cannot be determined
*/
private static String getHostName(Configuration config)
throws UnknownHostException {
String name = config.get(DFS_ROUTER_KERBEROS_PRINCIPAL_HOSTNAME_KEY);
if (name == null) {
name = InetAddress.getLocalHost().getHostName();
}
return name;
}
@Override
protected void serviceStart() throws Exception {
if (this.safemodeService == null) {
// Router is running now
updateRouterState(RouterServiceState.RUNNING);
}
if (this.pauseMonitor != null) {
this.pauseMonitor.start();
JvmMetrics jvmMetrics = this.metrics.getJvmMetrics();
if (jvmMetrics != null) {
jvmMetrics.setPauseMonitor(pauseMonitor);
}
}
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
// Update state
updateRouterState(RouterServiceState.SHUTDOWN);
// JVM pause monitor
if (this.pauseMonitor != null) {
this.pauseMonitor.stop();
}
super.serviceStop();
}
/**
* Shutdown the router.
*/
public void shutDown() {
new SubjectInheritingThread() {
@Override
public void work() {
Router.this.stop();
}
}.start();
}
/////////////////////////////////////////////////////////
// RPC Server
/////////////////////////////////////////////////////////
/**
* Create a new Router RPC server to proxy ClientProtocol requests.
*
* @return New Router RPC Server.
* @throws IOException If the router RPC server was not started.
*/
public RouterRpcServer createRpcServer() throws IOException {
return new RouterRpcServer(this.conf, this, this.getNamenodeResolver(),
this.getSubclusterResolver());
}
/**
* Get the Router RPC server.
*
* @return Router RPC server.
*/
public RouterRpcServer getRpcServer() {
return this.rpcServer;
}
/**
* Set the current RPC socket for the router.
*
* @param address RPC address.
*/
protected void setRpcServerAddress(InetSocketAddress address) {
this.rpcAddress = address;
// Use the RPC address as our unique router Id
if (this.rpcAddress != null) {
try {
String hostname = InetAddress.getLocalHost().getHostName();
setRouterId(hostname + ":" + this.rpcAddress.getPort());
} catch (UnknownHostException ex) {
LOG.error("Cannot set unique router ID, address not resolvable {}",
this.rpcAddress);
}
}
}
/**
* Get the current RPC socket address for the router.
*
* @return InetSocketAddress
*/
public InetSocketAddress getRpcServerAddress() {
return this.rpcAddress;
}
/////////////////////////////////////////////////////////
// Admin server
/////////////////////////////////////////////////////////
/**
* Create a new router admin server to handle the router admin interface.
*
* @return RouterAdminServer
* @throws IOException If the admin server was not successfully started.
*/
protected RouterAdminServer createAdminServer() throws IOException {
return new RouterAdminServer(this.conf, this);
}
/**
* Set the current Admin socket for the router.
*
* @param address Admin RPC address.
*/
protected void setAdminServerAddress(InetSocketAddress address) {
this.adminAddress = address;
}
/**
* Get the current Admin socket address for the router.
*
* @return InetSocketAddress Admin address.
*/
public InetSocketAddress getAdminServerAddress() {
return adminAddress;
}
/////////////////////////////////////////////////////////
// HTTP server
/////////////////////////////////////////////////////////
/**
* Create an HTTP server for this Router.
*
* @return HTTP server for this Router.
*/
protected RouterHttpServer createHttpServer() {
return new RouterHttpServer(this);
}
/**
* Get the current HTTP socket address for the router.
*
* @return InetSocketAddress HTTP address.
*/
public InetSocketAddress getHttpServerAddress() {
if (httpServer != null) {
return httpServer.getHttpAddress();
}
return null;
}
@Override
public void verifyToken(DelegationTokenIdentifier tokenId, byte[] password)
throws IOException {
getRpcServer().getRouterSecurityManager().verifyToken(tokenId, password);
}
/////////////////////////////////////////////////////////
// Namenode heartbeat monitors
/////////////////////////////////////////////////////////
/**
* Create each of the services that will monitor a Namenode.
*
* @return List of heartbeat services.
*/
protected Collection<NamenodeHeartbeatService>
createNamenodeHeartbeatServices() {
Map<String, NamenodeHeartbeatService> ret = new HashMap<>();
if (conf.getBoolean(
RBFConfigKeys.DFS_ROUTER_MONITOR_LOCAL_NAMENODE,
RBFConfigKeys.DFS_ROUTER_MONITOR_LOCAL_NAMENODE_DEFAULT)) {
// Create a local heartbeat service
NamenodeHeartbeatService localHeartbeatService =
createLocalNamenodeHeartbeatService();
if (localHeartbeatService != null) {
String nnDesc = localHeartbeatService.getNamenodeDesc();
ret.put(nnDesc, localHeartbeatService);
}
}
// Create heartbeat services for a list specified by the admin
Collection<String> namenodes = this.conf.getTrimmedStringCollection(
RBFConfigKeys.DFS_ROUTER_MONITOR_NAMENODE);
for (String namenode : namenodes) {
String[] namenodeSplit = namenode.split("\\.");
String nsId = null;
String nnId = null;
if (namenodeSplit.length == 2) {
nsId = namenodeSplit[0];
nnId = namenodeSplit[1];
} else if (namenodeSplit.length == 1) {
nsId = namenode;
} else {
LOG.error("Wrong Namenode to monitor: {}", namenode);
}
if (nsId != null) {
String configKeyWithHost =
RBFConfigKeys.DFS_ROUTER_MONITOR_NAMENODE_RESOLUTION_ENABLED + "." + nsId;
boolean resolveNeeded = conf.getBoolean(configKeyWithHost,
RBFConfigKeys.DFS_ROUTER_MONITOR_NAMENODE_RESOLUTION_ENABLED_DEFAULT);
if (nnId != null && resolveNeeded) {
DomainNameResolver dnr = DomainNameResolverFactory.newInstance(
conf, nsId, RBFConfigKeys.DFS_ROUTER_MONITOR_NAMENODE_RESOLVER_IMPL);
Map<String, InetSocketAddress> hosts = Maps.newLinkedHashMap();
Map<String, InetSocketAddress> resolvedHosts =
DFSUtilClient.getResolvedAddressesForNnId(conf, nsId, nnId, dnr,
null, DFS_NAMENODE_RPC_ADDRESS_KEY,
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
hosts.putAll(resolvedHosts);
for (InetSocketAddress isa : hosts.values()) {
NamenodeHeartbeatService heartbeatService =
createNamenodeHeartbeatService(nsId, nnId, isa.getHostName());
if (heartbeatService != null) {
ret.put(heartbeatService.getNamenodeDesc(), heartbeatService);
}
}
} else {
NamenodeHeartbeatService heartbeatService =
createNamenodeHeartbeatService(nsId, nnId);
if (heartbeatService != null) {
ret.put(heartbeatService.getNamenodeDesc(), heartbeatService);
}
}
}
}
return ret.values();
}
/**
* Create a new status updater for the local Namenode.
*
* @return Updater of the status for the local Namenode.
*/
@VisibleForTesting
public NamenodeHeartbeatService createLocalNamenodeHeartbeatService() {
// Detect NN running in this machine
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
if (nsId == null) {
LOG.error("Cannot find local nameservice id");
return null;
}
String nnId = null;
if (HAUtil.isHAEnabled(conf, nsId)) {
nnId = HAUtil.getNameNodeId(conf, nsId);
if (nnId == null) {
LOG.error("Cannot find namenode id for local {}", nsId);
return null;
}
}
return createNamenodeHeartbeatService(nsId, nnId);
}
/**
* Create a heartbeat monitor for a particular Namenode.
*
* @param nsId Identifier of the nameservice to monitor.
* @param nnId Identifier of the namenode (HA) to monitor.
* @return Updater of the status for the specified Namenode.
*/
protected NamenodeHeartbeatService createNamenodeHeartbeatService(
String nsId, String nnId) {
LOG.info("Creating heartbeat service for Namenode {} in {}", nnId, nsId);
NamenodeHeartbeatService ret = new NamenodeHeartbeatService(
namenodeResolver, nsId, nnId);
return ret;
}
protected NamenodeHeartbeatService createNamenodeHeartbeatService(
String nsId, String nnId, String resolvedHost) {
LOG.info("Creating heartbeat service for" +
" Namenode {}, resolved host {}, in {}", nnId, resolvedHost, nsId);
NamenodeHeartbeatService ret = new NamenodeHeartbeatService(
namenodeResolver, nsId, nnId, resolvedHost);
return ret;
}
/////////////////////////////////////////////////////////
// Router State Management
/////////////////////////////////////////////////////////
/**
* Update the router state and heartbeat to the state store.
*
* @param newState The new router state.
*/
public void updateRouterState(RouterServiceState newState) {
this.state = newState;
if (this.routerHeartbeatService != null) {
this.routerHeartbeatService.updateStateAsync();
}
}
/**
* Get the status of the router.
*
* @return Status of the router.
*/
public RouterServiceState getRouterState() {
return this.state;
}
/**
* Compare router state.
*
* @param routerState the router service state.
* @return true if the given router state is same as the state maintained by the router object.
*/
public boolean isRouterState(RouterServiceState routerState) {
return routerState.equals(this.state);
}
/////////////////////////////////////////////////////////
// Submodule getters
/////////////////////////////////////////////////////////
/**
* Get the State Store service.
*
* @return State Store service.
*/
public StateStoreService getStateStore() {
return this.stateStore;
}
/**
* Get the metrics system for the Router.
*
* @return Router metrics.
*/
public RouterMetrics getRouterMetrics() {
if (this.metrics != null) {
return this.metrics.getRouterMetrics();
}
return null;
}
/**
* Get the metrics system for the Router Client.
*
* @return Router Client metrics.
*/
public RouterClientMetrics getRouterClientMetrics() {
if (this.metrics != null) {
return this.metrics.getRouterClientMetrics();
}
return null;
}
/**
* Get the federation metrics.
*
* @return Federation metrics.
*/
public RBFMetrics getMetrics() {
if (this.metrics != null) {
return this.metrics.getRBFMetrics();
}
return null;
}
/**
* Get the namenode metrics.
*
* @return the namenode metrics.
* @throws IOException if the namenode metrics are not initialized.
*/
public NamenodeBeanMetrics getNamenodeMetrics() throws IOException {
if (this.metrics == null) {
throw new IOException("Namenode metrics is not initialized");
}
return this.metrics.getNamenodeMetrics();
}
/**
* Get the subcluster resolver for files.
*
* @return Subcluster resolver for files.
*/
public FileSubclusterResolver getSubclusterResolver() {
return this.subclusterResolver;
}
/**
* Get the namenode resolver for a subcluster.
*
* @return The namenode resolver for a subcluster.
*/
public ActiveNamenodeResolver getNamenodeResolver() {
return this.namenodeResolver;
}
/**
* Get the state store | to |
java | spring-projects__spring-framework | spring-websocket/src/main/java/org/springframework/web/socket/server/standard/ServerEndpointRegistration.java | {
"start": 1931,
"end": 2215
} | class ____ extends
* {@link jakarta.websocket.server.ServerEndpointConfig.Configurator} to make it easier
* to override methods for customizing the handshake process.
*
* @author Rossen Stoyanchev
* @author Juergen Hoeller
* @since 4.0
* @see ServerEndpointExporter
*/
public | also |
java | quarkusio__quarkus | integration-tests/jaxp/src/test/java/io/quarkus/it/jaxp/JaxpIT.java | {
"start": 114,
"end": 149
} | class ____ extends JaxpTest {
}
| JaxpIT |
java | spring-projects__spring-boot | module/spring-boot-zipkin/src/test/java/org/springframework/boot/zipkin/autoconfigure/ZipkinAutoConfigurationTests.java | {
"start": 5068,
"end": 5288
} | class ____ implements HttpEndpointSupplier.Factory {
@Override
public HttpEndpointSupplier create(String endpoint) {
return new CustomHttpEndpointSupplier(endpoint);
}
}
static | CustomHttpEndpointSupplierFactory |
java | ReactiveX__RxJava | src/jmh/java/io/reactivex/rxjava3/core/StrictPerf.java | {
"start": 1017,
"end": 1657
} | class ____ {
@Param({ "1", "10", "100", "1000", "10000", "100000", "1000000" })
public int count;
@Param({ "1", "10", "100", "1000", "10000" })
public int cpu;
Flowable<Integer> source;
@Setup
public void setup() {
Integer[] array = new Integer[count];
Arrays.fill(array, 777);
source = Flowable.fromArray(array);
}
@Benchmark
public void internal(Blackhole bh) {
source.subscribe(new InternalConsumer(bh, cpu));
}
@Benchmark
public void external(Blackhole bh) {
source.subscribe(new ExternalConsumer(bh, cpu));
}
static final | StrictPerf |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/Transformation.java | {
"start": 229,
"end": 561
} | class ____ performing an arbitrary transformation on a resource that implements {@link
* #equals(Object)} and {@link #hashCode()}} to identify the transformation in the memory cache and
* {@link #updateDiskCacheKey(java.security.MessageDigest)}} to identify the transformation in disk
* caches.
*
* <p>Using the fully qualified | for |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/state/internals/WindowStoreFetchTest.java | {
"start": 2882,
"end": 13091
} | enum ____ { InMemory, RocksDB, Timed }
private static final String STORE_NAME = "store";
private static final int DATA_SIZE = 5;
private static final long WINDOW_SIZE = 500L;
private static final long RETENTION_MS = 10000L;
private StoreType storeType;
private boolean enableLogging;
private boolean enableCaching;
private boolean forward;
private LinkedList<KeyValue<Windowed<String>, Long>> expectedRecords;
private LinkedList<KeyValue<String, String>> records;
private Properties streamsConfig;
private String low;
private String high;
private String middle;
private String innerLow;
private String innerHigh;
private String innerLowBetween;
private String innerHighBetween;
public void setup(final StoreType storeType,
final boolean enableLogging,
final boolean enableCaching,
final boolean forward) {
this.storeType = storeType;
this.enableLogging = enableLogging;
this.enableCaching = enableCaching;
this.forward = forward;
this.records = new LinkedList<>();
this.expectedRecords = new LinkedList<>();
final int m = DATA_SIZE / 2;
for (int i = 0; i < DATA_SIZE; i++) {
final String key = "key-" + i * 2;
final String value = "val-" + i * 2;
final KeyValue<String, String> r = new KeyValue<>(key, value);
records.add(r);
records.add(r);
// expected the count of each key is 2
final long windowStartTime = i < m ? 0 : WINDOW_SIZE;
expectedRecords.add(new KeyValue<>(new Windowed<>(key, new TimeWindow(windowStartTime, windowStartTime + WINDOW_SIZE)), 2L));
high = key;
if (low == null) {
low = key;
}
if (i == m) {
middle = key;
}
if (i == 1) {
innerLow = key;
final int index = i * 2 - 1;
innerLowBetween = "key-" + index;
}
if (i == DATA_SIZE - 2) {
innerHigh = key;
final int index = i * 2 + 1;
innerHighBetween = "key-" + index;
}
}
assertNotNull(low);
assertNotNull(high);
assertNotNull(middle);
assertNotNull(innerLow);
assertNotNull(innerHigh);
assertNotNull(innerLowBetween);
assertNotNull(innerHighBetween);
}
public static Stream<Arguments> data() {
final List<StoreType> types = Arrays.asList(StoreType.InMemory, StoreType.RocksDB, StoreType.Timed);
final List<Boolean> logging = Arrays.asList(true, false);
final List<Boolean> caching = Arrays.asList(true, false);
final List<Boolean> forward = Arrays.asList(true, false);
return buildParameters(types, logging, caching, forward);
}
@BeforeEach
public void setup() {
streamsConfig = mkProperties(mkMap(
mkEntry(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath())
));
}
@ParameterizedTest
@MethodSource("data")
public void testStoreConfig(final StoreType storeType, final boolean enableLogging, final boolean enableCaching, final boolean forward) {
setup(storeType, enableLogging, enableCaching, forward);
final Materialized<String, Long, WindowStore<Bytes, byte[]>> stateStoreConfig = getStoreConfig(this.storeType, this.enableLogging, this.enableCaching);
//Create topology: table from input topic
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> stream = builder.stream("input", Consumed.with(Serdes.String(), Serdes.String()));
stream.
groupByKey(Grouped.with(Serdes.String(), Serdes.String()))
.windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(WINDOW_SIZE)))
.count(stateStoreConfig)
.toStream()
.to("output");
final Topology topology = builder.build();
try (final TopologyTestDriver driver = new TopologyTestDriver(topology)) {
//get input topic and stateStore
final TestInputTopic<String, String> input = driver
.createInputTopic("input", new StringSerializer(), new StringSerializer());
final WindowStore<String, Long> stateStore = driver.getWindowStore(STORE_NAME);
//write some data
final int medium = DATA_SIZE / 2 * 2;
for (int i = 0; i < records.size(); i++) {
final KeyValue<String, String> kv = records.get(i);
final long windowStartTime = i < medium ? 0 : WINDOW_SIZE;
input.pipeInput(kv.key, kv.value, windowStartTime + i);
}
// query the state store
try (final KeyValueIterator<Windowed<String>, Long> scanIterator = this.forward ?
stateStore.fetchAll(0, Long.MAX_VALUE) :
stateStore.backwardFetchAll(0, Long.MAX_VALUE)) {
final Iterator<KeyValue<Windowed<String>, Long>> dataIterator = this.forward ?
expectedRecords.iterator() :
expectedRecords.descendingIterator();
TestUtils.checkEquals(scanIterator, dataIterator);
}
try (final KeyValueIterator<Windowed<String>, Long> scanIterator = this.forward ?
stateStore.fetch(null, null, 0, Long.MAX_VALUE) :
stateStore.backwardFetch(null, null, 0, Long.MAX_VALUE)) {
final Iterator<KeyValue<Windowed<String>, Long>> dataIterator = this.forward ?
expectedRecords.iterator() :
expectedRecords.descendingIterator();
TestUtils.checkEquals(scanIterator, dataIterator);
}
testRange(stateStore, innerLow, innerHigh, this.forward);
testRange(stateStore, null, middle, this.forward);
testRange(stateStore, middle, null, this.forward);
testRange(stateStore, null, innerHighBetween, this.forward);
testRange(stateStore, innerLowBetween, null, this.forward);
}
}
private List<KeyValue<Windowed<String>, Long>> filterList(final KeyValueIterator<Windowed<String>, Long> iterator, final String from, final String to) {
final Predicate<KeyValue<Windowed<String>, Long>> pred = elem -> {
if (from != null && elem.key.key().compareTo(from) < 0) {
return false;
}
if (to != null && elem.key.key().compareTo(to) > 0) {
return false;
}
return elem != null;
};
return Utils.toList(iterator, pred);
}
private void testRange(final WindowStore<String, Long> store, final String from, final String to, final boolean forward) {
try (final KeyValueIterator<Windowed<String>, Long> resultIterator = forward ? store.fetch(from, to, 0, Long.MAX_VALUE) : store.backwardFetch(from, to, 0, Long.MAX_VALUE);
final KeyValueIterator<Windowed<String>, Long> expectedIterator = forward ? store.fetchAll(0, Long.MAX_VALUE) : store.backwardFetchAll(0, Long.MAX_VALUE)) {
final List<KeyValue<Windowed<String>, Long>> result = Utils.toList(resultIterator);
final List<KeyValue<Windowed<String>, Long>> expected = filterList(expectedIterator, from, to);
assertThat(result, is(expected));
}
}
private static Stream<Arguments> buildParameters(final List<StoreType> types,
final List<Boolean> logging,
final List<Boolean> caching,
final List<Boolean> forward) {
final Stream.Builder<Arguments> builder = Stream.builder();
for (final StoreType type : types) {
for (final boolean log : logging) {
for (final boolean cache : caching) {
for (final boolean f : forward) {
builder.add(Arguments.of(type, log, cache, f));
}
}
}
}
return builder.build();
}
private Materialized<String, Long, WindowStore<Bytes, byte[]>> getStoreConfig(final StoreType type, final boolean cachingEnabled, final boolean loggingEnabled) {
final Supplier<WindowBytesStoreSupplier> createStore = () -> {
if (type == StoreType.InMemory) {
return Stores.inMemoryWindowStore(STORE_NAME, Duration.ofMillis(RETENTION_MS),
Duration.ofMillis(WINDOW_SIZE),
false);
} else if (type == StoreType.RocksDB) {
return Stores.persistentWindowStore(STORE_NAME, Duration.ofMillis(RETENTION_MS),
Duration.ofMillis(WINDOW_SIZE),
false);
} else if (type == StoreType.Timed) {
return Stores.persistentTimestampedWindowStore(STORE_NAME, Duration.ofMillis(RETENTION_MS),
Duration.ofMillis(WINDOW_SIZE),
false);
} else {
return Stores.inMemoryWindowStore(STORE_NAME, Duration.ofMillis(RETENTION_MS),
Duration.ofMillis(WINDOW_SIZE),
false);
}
};
final WindowBytesStoreSupplier stateStoreSupplier = createStore.get();
final Materialized<String, Long, WindowStore<Bytes, byte[]>> stateStoreConfig = Materialized
.<String, Long>as(stateStoreSupplier)
.withKeySerde(Serdes.String())
.withValueSerde(Serdes.Long());
if (cachingEnabled) {
stateStoreConfig.withCachingEnabled();
} else {
stateStoreConfig.withCachingDisabled();
}
if (loggingEnabled) {
stateStoreConfig.withLoggingEnabled(new HashMap<>());
} else {
stateStoreConfig.withLoggingDisabled();
}
return stateStoreConfig;
}
}
| StoreType |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestLocalRunner.java | {
"start": 2208,
"end": 2604
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestLocalRunner.class);
private static int INPUT_SIZES[] =
new int[] { 50000, 500, 500, 20, 5000, 500};
private static int OUTPUT_SIZES[] =
new int[] { 1, 500, 500, 500, 500, 500};
private static int SLEEP_INTERVALS[] =
new int[] { 10000, 15, 15, 20, 250, 60 };
private static | TestLocalRunner |
java | resilience4j__resilience4j | resilience4j-circuitbreaker/src/main/java/io/github/resilience4j/circuitbreaker/internal/CircuitBreakerStateMachine.java | {
"start": 30704,
"end": 32446
} | class ____ implements CircuitBreakerState {
private final CircuitBreakerMetrics circuitBreakerMetrics;
DisabledState() {
this.circuitBreakerMetrics = CircuitBreakerMetrics
.forDisabled(getCircuitBreakerConfig());
}
/**
* Returns always true, because the CircuitBreaker is disabled.
*
* @return always true, because the CircuitBreaker is disabled.
*/
@Override
public boolean tryAcquirePermission() {
return true;
}
/**
* Does not throw an exception, because the CircuitBreaker is disabled.
*/
@Override
public void acquirePermission() {
// noOp
}
@Override
public void releasePermission() {
// noOp
}
@Override
public void onError(long duration, TimeUnit durationUnit, Throwable throwable) {
// noOp
}
@Override
public void onSuccess(long duration, TimeUnit durationUnit) {
// noOp
}
@Override
public void handlePossibleTransition(CircuitBreakerConfig.TransitionCheckResult result) {
// noOp
}
@Override
public int attempts() {
return 0;
}
/**
* Get the state of the CircuitBreaker
*/
@Override
public CircuitBreaker.State getState() {
return CircuitBreaker.State.DISABLED;
}
/**
* Get metrics of the CircuitBreaker
*/
@Override
public CircuitBreakerMetrics getMetrics() {
return circuitBreakerMetrics;
}
}
private | DisabledState |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/batch/BatchingInheritanceDeleteTest.java | {
"start": 4424,
"end": 4627
} | class ____ {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
private long id;
public long getId() {
return id;
}
public void setId(final long id) {
this.id = id;
}
}
}
| AbstractFoo |
java | apache__camel | components/camel-zookeeper/src/main/java/org/apache/camel/component/zookeeper/ZooKeeperConnectionManager.java | {
"start": 1641,
"end": 2463
} | class ____ implements ZookeeperConnectionStrategy {
private ConnectionHolder holder;
private ZooKeeperConfiguration configuration;
DefaultZookeeperConnectionStrategy(ZooKeeperEndpoint endpoint) {
this.configuration = endpoint.getConfiguration();
LOG.debug("Creating connection to ZooKeeper: {}", configuration);
holder = new ConnectionHolder(configuration);
}
@Override
public ConnectionHolder getConnection() {
return holder;
}
@Override
public void shutdown() {
LOG.debug("Shutting down connection to ZooKeeper: {}", configuration);
holder.closeConnection();
}
}
public void shutdown() {
strategy.shutdown();
}
}
| DefaultZookeeperConnectionStrategy |
java | spring-projects__spring-security | config/src/integration-test/java/org/springframework/security/config/annotation/rsocket/JwtITests.java | {
"start": 2874,
"end": 5404
} | class ____ {
@Autowired
RSocketMessageHandler handler;
@Autowired
SecuritySocketAcceptorInterceptor interceptor;
@Autowired
ServerController controller;
@Autowired
ReactiveJwtDecoder decoder;
private CloseableChannel server;
private RSocketRequester requester;
@BeforeEach
public void setup() {
// @formatter:off
this.server = RSocketServer.create()
.payloadDecoder(PayloadDecoder.ZERO_COPY)
.interceptors((registry) -> registry.forSocketAcceptor(this.interceptor)
)
.acceptor(this.handler.responder())
.bind(TcpServerTransport.create("localhost", 0))
.block();
// @formatter:on
}
@AfterEach
public void dispose() {
this.requester.rsocket().dispose();
this.server.dispose();
this.controller.payloads.clear();
}
@Test
public void routeWhenBearerThenAuthorized() {
BearerTokenMetadata credentials = new BearerTokenMetadata("token");
given(this.decoder.decode(any())).willReturn(Mono.just(jwt()));
// @formatter:off
this.requester = requester()
.setupMetadata(credentials.getToken(), BearerTokenMetadata.BEARER_AUTHENTICATION_MIME_TYPE)
.connectTcp(this.server.address().getHostName(), this.server.address().getPort())
.block();
String hiRob = this.requester.route("secure.retrieve-mono")
.data("rob")
.retrieveMono(String.class)
.block();
// @formatter:on
assertThat(hiRob).isEqualTo("Hi rob");
}
@Test
public void routeWhenAuthenticationBearerThenAuthorized() {
MimeType authenticationMimeType = MimeTypeUtils
.parseMimeType(WellKnownMimeType.MESSAGE_RSOCKET_AUTHENTICATION.getString());
BearerTokenMetadata credentials = new BearerTokenMetadata("token");
given(this.decoder.decode(any())).willReturn(Mono.just(jwt()));
// @formatter:off
this.requester = requester().setupMetadata(credentials, authenticationMimeType)
.connectTcp(this.server.address().getHostName(), this.server.address().getPort())
.block();
String hiRob = this.requester.route("secure.retrieve-mono")
.data("rob")
.retrieveMono(String.class).block();
// @formatter:on
assertThat(hiRob).isEqualTo("Hi rob");
}
private Jwt jwt() {
return TestJwts.jwt()
.claim(IdTokenClaimNames.ISS, "https://issuer.example.com")
.claim(IdTokenClaimNames.SUB, "rob")
.claim(IdTokenClaimNames.AUD, Arrays.asList("client-id"))
.build();
}
private RSocketRequester.Builder requester() {
return RSocketRequester.builder().rsocketStrategies(this.handler.getRSocketStrategies());
}
@Configuration
@EnableRSocketSecurity
static | JwtITests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/identifier/CompositeIdGenerationTypeTest.java | {
"start": 3692,
"end": 4098
} | class ____ {
@Id
private Long id;
@Id
@UuidGenerator
private String uuid;
private String name;
public SingleIdClass() {
}
public SingleIdClass(Long id, String name) {
this.id = id;
this.name = name;
}
public Long getId() {
return id;
}
public String getUuid() {
return uuid;
}
public String getName() {
return name;
}
}
@Embeddable
static | SingleIdClass |
java | apache__kafka | group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/TopicIds.java | {
"start": 5556,
"end": 8605
} | class ____ implements Iterator<Uuid> {
final Iterator<String> iterator;
final TopicResolver resolver;
private Uuid next = null;
private TopicIdIterator(
Iterator<String> iterator,
TopicResolver resolver
) {
this.iterator = Objects.requireNonNull(iterator);
this.resolver = Objects.requireNonNull(resolver);
}
@Override
public boolean hasNext() {
if (next != null) return true;
Uuid result = null;
do {
if (!iterator.hasNext()) {
return false;
}
String next = iterator.next();
Uuid topicId = resolver.id(next);
if (topicId != null) {
result = topicId;
}
} while (result == null);
next = result;
return true;
}
@Override
public Uuid next() {
if (!hasNext()) throw new NoSuchElementException();
Uuid result = next;
next = null;
return result;
}
}
@Override
public Iterator<Uuid> iterator() {
return new TopicIdIterator(topicNames.iterator(), resolver);
}
@Override
public Object[] toArray() {
throw new UnsupportedOperationException();
}
@Override
public <T> T[] toArray(T[] a) {
throw new UnsupportedOperationException();
}
@Override
public boolean add(Uuid o) {
throw new UnsupportedOperationException();
}
@Override
public boolean remove(Object o) {
throw new UnsupportedOperationException();
}
@Override
public boolean addAll(Collection<? extends Uuid> c) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
public boolean removeAll(Collection<?> c) {
throw new UnsupportedOperationException();
}
@Override
public boolean retainAll(Collection<?> c) {
throw new UnsupportedOperationException();
}
@Override
public boolean containsAll(Collection<?> c) {
for (Object o : c) {
if (!contains(o)) return false;
}
return true;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TopicIds uuids = (TopicIds) o;
if (!Objects.equals(topicNames, uuids.topicNames)) return false;
return Objects.equals(resolver.image(), uuids.resolver.image());
}
@Override
public int hashCode() {
int result = topicNames.hashCode();
result = 31 * result + resolver.image().hashCode();
return result;
}
@Override
public String toString() {
return "TopicIds(topicNames=" + topicNames +
", resolver=" + resolver +
')';
}
}
| TopicIdIterator |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/SpelDocumentationTests.java | {
"start": 9381,
"end": 9910
} | class ____ {
@Test
void methodInvocation() {
// string literal, evaluates to "bc"
String c = parser.parseExpression("'abc'.substring(1, 3)").getValue(String.class);
assertThat(c).isEqualTo("bc");
StandardEvaluationContext societyContext = new StandardEvaluationContext();
societyContext.setRootObject(new IEEE());
// evaluates to true
boolean isMember = parser.parseExpression("isMember('Mihajlo Pupin')").getValue(societyContext, Boolean.class);
assertThat(isMember).isTrue();
}
}
@Nested
| Methods |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bytecode/enhancement/cache/AbstractManyToOneNoProxyTest.java | {
"start": 3497,
"end": 3883
} | class ____ {
Long id;
String name;
public Actor() {
}
@Id
@GeneratedValue
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
@Entity
@BatchSize(size = 512)
@DiscriminatorValue(value = "USER")
public static | Actor |
java | spring-projects__spring-security | oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/oidc/http/converter/OidcProviderConfigurationHttpMessageConverter.java | {
"start": 5765,
"end": 8308
} | class ____
implements Converter<Map<String, Object>, OidcProviderConfiguration> {
private static final ClaimConversionService CLAIM_CONVERSION_SERVICE = ClaimConversionService
.getSharedInstance();
private static final TypeDescriptor OBJECT_TYPE_DESCRIPTOR = TypeDescriptor.valueOf(Object.class);
private static final TypeDescriptor STRING_TYPE_DESCRIPTOR = TypeDescriptor.valueOf(String.class);
private static final TypeDescriptor URL_TYPE_DESCRIPTOR = TypeDescriptor.valueOf(URL.class);
private final ClaimTypeConverter claimTypeConverter;
private OidcProviderConfigurationConverter() {
Converter<Object, ?> collectionStringConverter = getConverter(
TypeDescriptor.collection(Collection.class, STRING_TYPE_DESCRIPTOR));
Converter<Object, ?> urlConverter = getConverter(URL_TYPE_DESCRIPTOR);
Map<String, Converter<Object, ?>> claimConverters = new HashMap<>();
claimConverters.put(OidcProviderMetadataClaimNames.ISSUER, urlConverter);
claimConverters.put(OidcProviderMetadataClaimNames.AUTHORIZATION_ENDPOINT, urlConverter);
claimConverters.put(OidcProviderMetadataClaimNames.TOKEN_ENDPOINT, urlConverter);
claimConverters.put(OidcProviderMetadataClaimNames.TOKEN_ENDPOINT_AUTH_METHODS_SUPPORTED,
collectionStringConverter);
claimConverters.put(OidcProviderMetadataClaimNames.JWKS_URI, urlConverter);
claimConverters.put(OidcProviderMetadataClaimNames.USER_INFO_ENDPOINT, urlConverter);
claimConverters.put(OidcProviderMetadataClaimNames.RESPONSE_TYPES_SUPPORTED, collectionStringConverter);
claimConverters.put(OidcProviderMetadataClaimNames.GRANT_TYPES_SUPPORTED, collectionStringConverter);
claimConverters.put(OidcProviderMetadataClaimNames.SUBJECT_TYPES_SUPPORTED, collectionStringConverter);
claimConverters.put(OidcProviderMetadataClaimNames.ID_TOKEN_SIGNING_ALG_VALUES_SUPPORTED,
collectionStringConverter);
claimConverters.put(OidcProviderMetadataClaimNames.SCOPES_SUPPORTED, collectionStringConverter);
this.claimTypeConverter = new ClaimTypeConverter(claimConverters);
}
@Override
public OidcProviderConfiguration convert(Map<String, Object> source) {
Map<String, Object> parsedClaims = this.claimTypeConverter.convert(source);
return OidcProviderConfiguration.withClaims(parsedClaims).build();
}
private static Converter<Object, ?> getConverter(TypeDescriptor targetDescriptor) {
return (source) -> CLAIM_CONVERSION_SERVICE.convert(source, OBJECT_TYPE_DESCRIPTOR, targetDescriptor);
}
}
}
| OidcProviderConfigurationConverter |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/jsontype/PolymorphicTypeValidator.java | {
"start": 7556,
"end": 7746
} | class ____ partial implementation (with all validation calls returning
* {@link Validity#INDETERMINATE}) and convenience methods for indicating failure reasons.
* Use of this base | with |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/factories/TestFunctionDefinitionFactory.java | {
"start": 1285,
"end": 1650
} | class ____ implements FunctionDefinitionFactory {
@Override
public FunctionDefinition createFunctionDefinition(
String name, CatalogFunction catalogFunction, Context context) {
return UserDefinedFunctionHelper.instantiateFunction(
context.getClassLoader(), null, name, catalogFunction);
}
}
| TestFunctionDefinitionFactory |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/operations/utils/ColumnOperationUtils.java | {
"start": 4935,
"end": 5382
} | class ____ extends ApiExpressionDefaultVisitor<String> {
@Override
public String visit(UnresolvedReferenceExpression unresolvedReference) {
return unresolvedReference.getName();
}
@Override
protected String defaultMethod(Expression expression) {
throw new ValidationException("Unexpected drop column expression: " + expression);
}
}
private static | DropColumnsExtractor |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/ast/expr/SQLDbLinkExpr.java | {
"start": 910,
"end": 3687
} | class ____ extends SQLExprImpl implements SQLName, SQLExpr, SQLReplaceable {
private SQLExpr expr;
private String dbLink;
private long dbLinkHashCode64;
private long hashCode64;
public SQLDbLinkExpr() {
}
public String getSimpleName() {
return dbLink;
}
public SQLExpr getExpr() {
return this.expr;
}
public void setExpr(SQLExpr expr) {
this.expr = expr;
}
public String getDbLink() {
return this.dbLink;
}
public void setDbLink(String dbLink) {
this.dbLink = dbLink;
}
@Override
protected void accept0(SQLASTVisitor visitor) {
if (visitor.visit(this)) {
acceptChild(visitor, this.expr);
}
visitor.endVisit(this);
}
@Override
public boolean replace(SQLExpr expr, SQLExpr target) {
if (this.expr == expr) {
setExpr(target);
return true;
}
return false;
}
@Override
public List<SQLObject> getChildren() {
return Collections.<SQLObject>singletonList(this.expr);
}
@Override
public int hashCode() {
long value = hashCode64();
return (int) (value ^ (value >>> 32));
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
SQLDbLinkExpr other = (SQLDbLinkExpr) obj;
return this.hashCode64() == other.hashCode64();
}
public SQLDbLinkExpr clone() {
SQLDbLinkExpr x = new SQLDbLinkExpr();
if (expr != null) {
x.setExpr(expr.clone());
}
x.dbLink = dbLink;
return x;
}
public long nameHashCode64() {
if (dbLinkHashCode64 == 0
&& dbLink != null) {
dbLinkHashCode64 = FnvHash.hashCode64(dbLink);
}
return dbLinkHashCode64;
}
@Override
public long hashCode64() {
if (hashCode64 == 0) {
long hash;
if (expr instanceof SQLName) {
hash = ((SQLName) expr).hashCode64();
hash ^= '@';
hash *= FnvHash.PRIME;
} else if (expr == null) {
hash = FnvHash.BASIC;
} else {
hash = FnvHash.fnv1a_64_lower(expr.toString());
hash ^= '@';
hash *= FnvHash.PRIME;
}
hash = FnvHash.hashCode64(hash, dbLink);
hashCode64 = hash;
}
return hashCode64;
}
@Override
public SQLColumnDefinition getResolvedColumn() {
return null;
}
}
| SQLDbLinkExpr |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutorResourceSpec.java | {
"start": 1351,
"end": 3198
} | class ____ {
private final CPUResource cpuCores;
private final MemorySize taskHeapSize;
private final MemorySize taskOffHeapSize;
private final MemorySize networkMemSize;
private final MemorySize managedMemorySize;
private final Map<String, ExternalResource> extendedResources;
public TaskExecutorResourceSpec(
CPUResource cpuCores,
MemorySize taskHeapSize,
MemorySize taskOffHeapSize,
MemorySize networkMemSize,
MemorySize managedMemorySize,
Collection<ExternalResource> extendedResources) {
this.cpuCores = cpuCores;
this.taskHeapSize = taskHeapSize;
this.taskOffHeapSize = taskOffHeapSize;
this.networkMemSize = networkMemSize;
this.managedMemorySize = managedMemorySize;
this.extendedResources =
Preconditions.checkNotNull(extendedResources).stream()
.filter(resource -> !resource.isZero())
.collect(Collectors.toMap(ExternalResource::getName, Function.identity()));
Preconditions.checkArgument(
this.extendedResources.size() == extendedResources.size(),
"Duplicate resource name encountered in external resources.");
}
public CPUResource getCpuCores() {
return cpuCores;
}
public MemorySize getTaskHeapSize() {
return taskHeapSize;
}
public MemorySize getTaskOffHeapSize() {
return taskOffHeapSize;
}
public MemorySize getNetworkMemSize() {
return networkMemSize;
}
public MemorySize getManagedMemorySize() {
return managedMemorySize;
}
public Map<String, ExternalResource> getExtendedResources() {
return Collections.unmodifiableMap(extendedResources);
}
}
| TaskExecutorResourceSpec |
java | apache__spark | sql/connect/client/jdbc/src/main/java/org/apache/spark/sql/connect/client/jdbc/SparkConnectDriver.java | {
"start": 922,
"end": 1165
} | class ____ extends NonRegisteringSparkConnectDriver {
static {
try {
DriverManager.registerDriver(new SparkConnectDriver());
} catch (SQLException rethrow) {
throw new RuntimeException(rethrow);
}
}
}
| SparkConnectDriver |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java | {
"start": 41962,
"end": 44550
} | class ____ extends FoldingRule<PivotExec> {
@Override
protected PhysicalPlan rule(PivotExec plan) {
if (plan.child() instanceof EsQueryExec exec) {
Pivot p = plan.pivot();
EsQueryExec fold = FoldAggregate.fold(
new AggregateExec(plan.source(), exec, new ArrayList<>(p.groupingSet()), combine(p.groupingSet(), p.aggregates())),
exec
);
// replace the aggregate extractors with pivot specific extractors
// these require a reference to the pivoting column in order to compare the value
// due to the Pivot structure - the column is the last entry in the grouping set
QueryContainer query = fold.queryContainer();
List<QueryContainer.FieldInfo> fields = new ArrayList<>(query.fields());
int startingIndex = fields.size() - p.aggregates().size() - 1;
// pivot grouping
QueryContainer.FieldInfo groupField = fields.remove(startingIndex);
AttributeMap<Literal> values = p.valuesToLiterals();
for (int i = startingIndex; i < fields.size(); i++) {
QueryContainer.FieldInfo field = fields.remove(i);
for (Map.Entry<Attribute, Literal> entry : values.entrySet()) {
fields.add(
new QueryContainer.FieldInfo(
new PivotColumnRef(groupField.extraction(), field.extraction(), entry.getValue().value()),
Expressions.id(entry.getKey()),
entry.getKey()
)
);
}
i += values.size();
}
return fold.with(
new QueryContainer(
query.query(),
query.aggs(),
fields,
query.aliases(),
query.pseudoFunctions(),
query.scalarFunctions(),
query.sort(),
query.limit(),
query.shouldTrackHits(),
query.shouldIncludeFrozen(),
values.size(),
query.allowPartialSearchResults()
)
);
}
return plan;
}
}
//
// local
//
private static | FoldPivot |
java | elastic__elasticsearch | x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEventTests.java | {
"start": 985,
"end": 6273
} | class ____ extends ESTestCase {
public void testPagerDutyXContent() throws IOException {
String serviceKey = randomAlphaOfLength(3);
boolean attachPayload = randomBoolean();
Payload payload = null;
if (attachPayload) {
payload = new Payload.Simple(Collections.singletonMap(randomAlphaOfLength(3), randomAlphaOfLength(3)));
}
String watchId = randomAlphaOfLength(3);
String description = randomAlphaOfLength(3);
String eventType = randomAlphaOfLength(3);
String incidentKey = rarely() ? null : randomAlphaOfLength(3);
String client = rarely() ? null : randomAlphaOfLength(3);
String clientUrl = rarely() ? null : randomAlphaOfLength(3);
String account = rarely() ? null : randomAlphaOfLength(3);
IncidentEventContext[] contexts = null;
List<IncidentEventContext> links = new ArrayList<>();
List<IncidentEventContext> images = new ArrayList<>();
if (randomBoolean()) {
int numContexts = randomIntBetween(0, 3);
contexts = new IncidentEventContext[numContexts];
for (int i = 0; i < numContexts; i++) {
if (randomBoolean()) {
contexts[i] = IncidentEventContext.link("href", "text");
links.add(contexts[i]);
} else {
contexts[i] = IncidentEventContext.image("src", "href", "alt");
images.add(contexts[i]);
}
}
}
HttpProxy proxy = rarely() ? null : HttpProxy.NO_PROXY;
IncidentEvent event = new IncidentEvent(
description,
eventType,
incidentKey,
client,
clientUrl,
account,
attachPayload,
contexts,
proxy
);
XContentBuilder jsonBuilder = jsonBuilder();
jsonBuilder.startObject(); // since its a snippet
event.buildAPIXContent(jsonBuilder, ToXContent.EMPTY_PARAMS, serviceKey, payload, watchId);
jsonBuilder.endObject();
XContentParser parser = createParser(jsonBuilder);
parser.nextToken();
ObjectPath objectPath = ObjectPath.createFromXContent(jsonBuilder.contentType().xContent(), BytesReference.bytes(jsonBuilder));
String actualServiceKey = objectPath.evaluate(IncidentEvent.Fields.ROUTING_KEY.getPreferredName());
String actualWatchId = objectPath.evaluate(
IncidentEvent.Fields.PAYLOAD.getPreferredName() + "." + IncidentEvent.Fields.SOURCE.getPreferredName()
);
if (actualWatchId == null) {
actualWatchId = "watcher"; // hardcoded if the SOURCE is null
}
String actualDescription = objectPath.evaluate(
IncidentEvent.Fields.PAYLOAD.getPreferredName() + "." + IncidentEvent.Fields.SUMMARY.getPreferredName()
);
String actualEventType = objectPath.evaluate(IncidentEvent.Fields.EVENT_ACTION.getPreferredName());
String actualIncidentKey = objectPath.evaluate(IncidentEvent.Fields.DEDUP_KEY.getPreferredName());
String actualClient = objectPath.evaluate(IncidentEvent.Fields.CLIENT.getPreferredName());
String actualClientUrl = objectPath.evaluate(IncidentEvent.Fields.CLIENT_URL.getPreferredName());
String actualSeverity = objectPath.evaluate(
IncidentEvent.Fields.PAYLOAD.getPreferredName() + "." + IncidentEvent.Fields.SEVERITY.getPreferredName()
);
Map<String, Object> payloadDetails = objectPath.evaluate("payload.custom_details.payload");
Payload actualPayload = null;
if (payloadDetails != null) {
actualPayload = new Payload.Simple(payloadDetails);
}
List<IncidentEventContext> actualLinks = new ArrayList<>();
List<Map<String, String>> linkMap = objectPath.evaluate(IncidentEvent.Fields.LINKS.getPreferredName());
if (linkMap != null) {
for (Map<String, String> iecValue : linkMap) {
actualLinks.add(IncidentEventContext.link(iecValue.get("href"), iecValue.get("text")));
}
}
List<IncidentEventContext> actualImages = new ArrayList<>();
List<Map<String, String>> imgMap = objectPath.evaluate(IncidentEvent.Fields.IMAGES.getPreferredName());
if (imgMap != null) {
for (Map<String, String> iecValue : imgMap) {
actualImages.add(IncidentEventContext.image(iecValue.get("src"), iecValue.get("href"), iecValue.get("alt")));
}
}
// assert the actuals were the same as expected
assertThat(serviceKey, equalTo(actualServiceKey));
assertThat(eventType, equalTo(actualEventType));
assertThat(incidentKey, equalTo(actualIncidentKey));
assertThat(description, equalTo(actualDescription));
assertThat(watchId, equalTo(actualWatchId));
assertThat("critical", equalTo(actualSeverity));
assertThat(client, equalTo(actualClient));
assertThat(clientUrl, equalTo(actualClientUrl));
assertThat(links, equalTo(actualLinks));
assertThat(images, equalTo(actualImages));
assertThat(payload, equalTo(actualPayload));
}
}
| IncidentEventTests |
java | mybatis__mybatis-3 | src/test/java/org/apache/ibatis/submitted/annotion_many_one_add_columnprefix/RoleDao.java | {
"start": 917,
"end": 1187
} | interface ____ {
@Select("select * from role")
// @formatter:off
@Results(id = "roleMap1", value = {
@Result(id = true, column = "id", property = "id"),
@Result(column = "name", property = "name")
})
// @formatter:on
List<Role> findAll();
}
| RoleDao |
java | quarkusio__quarkus | extensions/devui/runtime/src/main/java/io/quarkus/devui/runtime/jsonrpc/JsonRpcResponse.java | {
"start": 845,
"end": 1270
} | class ____ {
public final int code;
public final String message;
public Error(int code, String message) {
this.code = code;
this.message = message;
}
@Override
public String toString() {
return "Error{" +
"code=" + code +
", message='" + message + '\'' +
'}';
}
}
}
| Error |
java | spring-projects__spring-boot | module/spring-boot-jooq/src/main/java/org/springframework/boot/jooq/autoconfigure/ExceptionTranslatorExecuteListener.java | {
"start": 1436,
"end": 2188
} | interface ____ extends ExecuteListener {
/**
* Default {@link ExceptionTranslatorExecuteListener} suitable for most applications.
*/
ExceptionTranslatorExecuteListener DEFAULT = new DefaultExceptionTranslatorExecuteListener();
/**
* Creates a new {@link ExceptionTranslatorExecuteListener} backed by an
* {@link SQLExceptionTranslator}.
* @param translatorFactory factory function used to create the
* {@link SQLExceptionTranslator}
* @return a new {@link ExceptionTranslatorExecuteListener} instance
*/
static ExceptionTranslatorExecuteListener of(Function<ExecuteContext, SQLExceptionTranslator> translatorFactory) {
return new DefaultExceptionTranslatorExecuteListener(translatorFactory);
}
}
| ExceptionTranslatorExecuteListener |
java | apache__camel | components/camel-openapi-java/src/main/java/org/apache/camel/openapi/RestModelConverters.java | {
"start": 1575,
"end": 3143
} | class ____ {
private static final ModelConverters MODEL31_CONVERTERS;
static {
MODEL31_CONVERTERS = ModelConverters.getInstance(true);
MODEL31_CONVERTERS.addConverter(new ClassNameExtensionModelResolver(new FqnModelResolver(true)));
}
private static final ModelConverters MODEL30_CONVERTERS;
static {
MODEL30_CONVERTERS = ModelConverters.getInstance();
MODEL30_CONVERTERS.addConverter(new ClassNameExtensionModelResolver(new FqnModelResolver()));
}
private final boolean openapi31;
public RestModelConverters(boolean openapi31) {
this.openapi31 = openapi31;
}
public List<? extends Schema<?>> readClass(OpenAPI oasDocument, Class<?> clazz) {
if (clazz.equals(java.io.File.class)) {
// File is a special type in OAS2 / OAS3 (no model)
return null;
} else {
return readClassOpenApi3(clazz);
}
}
private List<? extends Schema<?>> readClassOpenApi3(Class<?> clazz) {
String name = clazz.getName();
if (!name.contains(".")) {
return null;
}
ModelConverters modelConverters = openapi31 ? MODEL31_CONVERTERS : MODEL30_CONVERTERS;
Map<String, Schema> swaggerModel = modelConverters.readAll(clazz);
List<Schema<?>> modelSchemas = new java.util.ArrayList<>();
swaggerModel.forEach((key, schema) -> {
schema.setName(key);
modelSchemas.add(schema);
});
return modelSchemas;
}
private static | RestModelConverters |
java | apache__avro | lang/java/perf/src/main/java/org/apache/avro/perf/test/basic/ArrayTest.java | {
"start": 1314,
"end": 2521
} | class ____ {
@Benchmark
@OperationsPerInvocation(BasicState.BATCH_SIZE)
public void encode(final TestStateEncode state) throws Exception {
final Encoder e = state.encoder;
final int items = state.getBatchSize() / 4;
e.writeArrayStart();
e.setItemCount(1);
e.startItem();
e.writeArrayStart();
e.setItemCount(items);
for (int i = 0; i < state.getBatchSize(); i += 4) {
e.startItem();
e.writeFloat(state.testData[i + 0]);
e.writeFloat(state.testData[i + 1]);
e.writeFloat(state.testData[i + 2]);
e.writeFloat(state.testData[i + 3]);
}
e.writeArrayEnd();
e.writeArrayEnd();
}
@Benchmark
@OperationsPerInvocation(BasicState.BATCH_SIZE)
public float decode(final TestStateDecode state) throws Exception {
final Decoder d = state.decoder;
float total = 0.0f;
d.readArrayStart();
for (long i = d.readArrayStart(); i != 0; i = d.arrayNext()) {
for (long j = 0; j < i; j++) {
total += d.readFloat();
total += d.readFloat();
total += d.readFloat();
total += d.readFloat();
}
}
d.arrayNext();
return total;
}
@State(Scope.Thread)
public static | ArrayTest |
java | quarkusio__quarkus | extensions/observability-devservices/runtime/src/main/java/io/quarkus/observability/runtime/DevResourceShutdownRecorder.java | {
"start": 210,
"end": 363
} | class ____ {
public void shutdown(ShutdownContext context) {
context.addLastShutdownTask(DevResources::stop);
}
}
| DevResourceShutdownRecorder |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/commit/ITestAbfsManifestStoreOperations.java | {
"start": 3816,
"end": 6804
} | class ____ via Configuration")
.isInstanceOf(AbfsManifestStoreOperations.class);
final FileStatus st = operations.getFileStatus(path);
final String etag = operations.getEtag(st);
Assertions.assertThat(etag)
.describedAs("Etag of %s", st)
.isNotBlank();
LOG.info("etag of empty file is \"{}\"", etag);
final FileStatus[] statuses = fs.listStatus(path);
Assertions.assertThat(statuses)
.describedAs("List(%s)", path)
.hasSize(1);
final FileStatus lsStatus = statuses[0];
Assertions.assertThat(operations.getEtag(lsStatus))
.describedAs("etag of list status (%s) compared to HEAD value of %s", lsStatus, st)
.isEqualTo(etag);
}
@Test
public void testEtagsOfDifferentDataDifferent() throws Throwable {
describe("Verify that two different blocks of data written have different tags");
final Path path = methodPath();
final FileSystem fs = getFileSystem();
Path src = new Path(path, "src");
ContractTestUtils.createFile(fs, src, true,
"data1234".getBytes(StandardCharsets.UTF_8));
final ManifestStoreOperations operations = createManifestStoreOperations();
final FileStatus srcStatus = operations.getFileStatus(src);
final String srcTag = operations.getEtag(srcStatus);
LOG.info("etag of file 1 is \"{}\"", srcTag);
// now overwrite with data of same length
// (ensure that path or length aren't used exclusively as tag)
ContractTestUtils.createFile(fs, src, true,
"1234data".getBytes(StandardCharsets.UTF_8));
// validate
final String tag2 = operations.getEtag(operations.getFileStatus(src));
LOG.info("etag of file 2 is \"{}\"", tag2);
Assertions.assertThat(tag2)
.describedAs("etag of updated file")
.isNotEqualTo(srcTag);
}
@Test
public void testEtagConsistencyAcrossRename() throws Throwable {
describe("Verify that when a file is renamed, the etag remains unchanged");
final Path path = methodPath();
final FileSystem fs = getFileSystem();
Path src = new Path(path, "src");
Path dest = new Path(path, "dest");
ContractTestUtils.createFile(fs, src, true,
"sample data".getBytes(StandardCharsets.UTF_8));
final ManifestStoreOperations operations = createManifestStoreOperations();
final FileStatus srcStatus = operations.getFileStatus(src);
final String srcTag = operations.getEtag(srcStatus);
LOG.info("etag of short file is \"{}\"", srcTag);
Assertions.assertThat(srcTag)
.describedAs("Etag of %s", srcStatus)
.isNotBlank();
// rename
operations.commitFile(new FileEntry(src, dest, 0, srcTag));
// validate
FileStatus destStatus = operations.getFileStatus(dest);
final String destTag = operations.getEtag(destStatus);
Assertions.assertThat(destTag)
.describedAs("etag of list status (%s) compared to HEAD value of %s", destStatus, srcStatus)
.isEqualTo(srcTag);
}
}
| loaded |
java | netty__netty | handler/src/test/java/io/netty/handler/flow/FlowControlHandlerTest.java | {
"start": 2445,
"end": 24342
} | class ____ {
private static EventLoopGroup GROUP;
@BeforeAll
public static void init() {
GROUP = new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory());
}
@AfterAll
public static void destroy() {
GROUP.shutdownGracefully();
}
/**
* The {@link OneByteToThreeStringsDecoder} decodes this {@code byte[]} into three messages.
*/
private static ByteBuf newOneMessage() {
return Unpooled.wrappedBuffer(new byte[]{ 1 });
}
private static Channel newServer(final boolean autoRead, final ChannelHandler... handlers) {
assertTrue(handlers.length >= 1);
ServerBootstrap serverBootstrap = new ServerBootstrap();
serverBootstrap.group(GROUP)
.channel(NioServerSocketChannel.class)
.childOption(ChannelOption.AUTO_READ, autoRead)
.childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) {
ChannelPipeline pipeline = ch.pipeline();
pipeline.addLast(new OneByteToThreeStringsDecoder());
pipeline.addLast(handlers);
}
});
return serverBootstrap.bind(0)
.syncUninterruptibly()
.channel();
}
private static Channel newClient(SocketAddress server) {
Bootstrap bootstrap = new Bootstrap();
bootstrap.group(GROUP)
.channel(NioSocketChannel.class)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 1000)
.handler(new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
fail("In this test the client is never receiving a message from the server.");
}
});
return bootstrap.connect(server)
.syncUninterruptibly()
.channel();
}
/**
* This test demonstrates the default behavior if auto reading
* is turned on from the get-go and you're trying to turn it off
* once you've received your first message.
*
* NOTE: This test waits for the client to disconnect which is
* interpreted as the signal that all {@code byte}s have been
* transferred to the server.
*/
@Test
public void testAutoReadingOn() throws Exception {
final CountDownLatch latch = new CountDownLatch(3);
ChannelInboundHandlerAdapter handler = new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
ReferenceCountUtil.release(msg);
// We're turning off auto reading in the hope that no
// new messages are being sent but that is not true.
ctx.channel().config().setAutoRead(false);
latch.countDown();
}
};
Channel server = newServer(true, handler);
Channel client = newClient(server.localAddress());
try {
client.writeAndFlush(newOneMessage())
.syncUninterruptibly();
// We received three messages even through auto reading
// was turned off after we received the first message.
assertTrue(latch.await(1L, SECONDS));
} finally {
client.close();
server.close();
}
}
/**
* This test demonstrates the default behavior if auto reading
* is turned off from the get-go and you're calling read() in
* the hope that only one message will be returned.
*
* NOTE: This test waits for the client to disconnect which is
* interpreted as the signal that all {@code byte}s have been
* transferred to the server.
*/
@Test
public void testAutoReadingOff() throws Exception {
final Exchanger<Channel> peerRef = new Exchanger<Channel>();
final CountDownLatch latch = new CountDownLatch(3);
ChannelInboundHandlerAdapter handler = new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
peerRef.exchange(ctx.channel(), 1L, SECONDS);
ctx.fireChannelActive();
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
ReferenceCountUtil.release(msg);
latch.countDown();
}
};
Channel server = newServer(false, handler);
Channel client = newClient(server.localAddress());
try {
// The client connection on the server side
Channel peer = peerRef.exchange(null, 1L, SECONDS);
// Write the message
client.writeAndFlush(newOneMessage())
.syncUninterruptibly();
// Read the message
peer.read();
// We received all three messages but hoped that only one
// message was read because auto reading was off and we
// invoked the read() method only once.
assertTrue(latch.await(1L, SECONDS));
} finally {
client.close();
server.close();
}
}
/**
* The {@link FlowControlHandler} will simply pass-through all messages
* if auto reading is on and remains on.
*/
@Test
public void testFlowAutoReadOn() throws Exception {
final CountDownLatch latch = new CountDownLatch(3);
final Exchanger<Channel> peerRef = new Exchanger<Channel>();
ChannelInboundHandlerAdapter handler = new ChannelDuplexHandler() {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
peerRef.exchange(ctx.channel(), 1L, SECONDS);
super.channelActive(ctx);
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
ReferenceCountUtil.release(msg);
latch.countDown();
}
};
final FlowControlHandler flow = new FlowControlHandler();
Channel server = newServer(true, flow, handler);
Channel client = newClient(server.localAddress());
try {
// The client connection on the server side
Channel peer = peerRef.exchange(null, 1L, SECONDS);
// Write the message
client.writeAndFlush(newOneMessage())
.syncUninterruptibly();
// We should receive 3 messages
assertTrue(latch.await(1L, SECONDS));
assertTrue(peer.eventLoop().submit(new Callable<Boolean>() {
@Override
public Boolean call() {
return flow.isQueueEmpty();
}
}).get());
} finally {
client.close();
server.close();
}
}
/**
* The {@link FlowControlHandler} will pass down messages one by one
* if {@link ChannelConfig#setAutoRead(boolean)} is being toggled.
*/
@Test
public void testFlowToggleAutoRead() throws Exception {
final Exchanger<Channel> peerRef = new Exchanger<Channel>();
final CountDownLatch msgRcvLatch1 = new CountDownLatch(1);
final CountDownLatch msgRcvLatch2 = new CountDownLatch(1);
final CountDownLatch msgRcvLatch3 = new CountDownLatch(1);
final CountDownLatch setAutoReadLatch1 = new CountDownLatch(1);
final CountDownLatch setAutoReadLatch2 = new CountDownLatch(1);
ChannelInboundHandlerAdapter handler = new ChannelInboundHandlerAdapter() {
private int msgRcvCount;
private int expectedMsgCount;
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
peerRef.exchange(ctx.channel(), 1L, SECONDS);
ctx.fireChannelActive();
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws InterruptedException {
ReferenceCountUtil.release(msg);
// Disable auto reading after each message
ctx.channel().config().setAutoRead(false);
if (msgRcvCount++ != expectedMsgCount) {
return;
}
switch (msgRcvCount) {
case 1:
msgRcvLatch1.countDown();
if (setAutoReadLatch1.await(1L, SECONDS)) {
++expectedMsgCount;
}
break;
case 2:
msgRcvLatch2.countDown();
if (setAutoReadLatch2.await(1L, SECONDS)) {
++expectedMsgCount;
}
break;
default:
msgRcvLatch3.countDown();
break;
}
}
};
final FlowControlHandler flow = new FlowControlHandler();
Channel server = newServer(true, flow, handler);
Channel client = newClient(server.localAddress());
try {
// The client connection on the server side
Channel peer = peerRef.exchange(null, 1L, SECONDS);
client.writeAndFlush(newOneMessage())
.syncUninterruptibly();
// channelRead(1)
assertTrue(msgRcvLatch1.await(1L, SECONDS));
// channelRead(2)
peer.config().setAutoRead(true);
setAutoReadLatch1.countDown();
assertTrue(msgRcvLatch1.await(1L, SECONDS));
// channelRead(3)
peer.config().setAutoRead(true);
setAutoReadLatch2.countDown();
assertTrue(msgRcvLatch3.await(1L, SECONDS));
assertTrue(peer.eventLoop().submit(new Callable<Boolean>() {
@Override
public Boolean call() {
return flow.isQueueEmpty();
}
}).get());
} finally {
client.close();
server.close();
}
}
/**
* The {@link FlowControlHandler} will pass down messages one by one
* if auto reading is off and the user is calling {@code read()} on
* their own.
*/
@Test
public void testFlowAutoReadOff() throws Exception {
final Exchanger<Channel> peerRef = new Exchanger<Channel>();
final CountDownLatch msgRcvLatch1 = new CountDownLatch(1);
final CountDownLatch msgRcvLatch2 = new CountDownLatch(2);
final CountDownLatch msgRcvLatch3 = new CountDownLatch(3);
ChannelInboundHandlerAdapter handler = new ChannelDuplexHandler() {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
ctx.fireChannelActive();
peerRef.exchange(ctx.channel(), 1L, SECONDS);
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
msgRcvLatch1.countDown();
msgRcvLatch2.countDown();
msgRcvLatch3.countDown();
}
};
final FlowControlHandler flow = new FlowControlHandler();
Channel server = newServer(false, flow, handler);
Channel client = newClient(server.localAddress());
try {
// The client connection on the server side
Channel peer = peerRef.exchange(null, 1L, SECONDS);
// Write the message
client.writeAndFlush(newOneMessage())
.syncUninterruptibly();
// channelRead(1)
peer.read();
assertTrue(msgRcvLatch1.await(1L, SECONDS));
// channelRead(2)
peer.read();
assertTrue(msgRcvLatch2.await(1L, SECONDS));
// channelRead(3)
peer.read();
assertTrue(msgRcvLatch3.await(1L, SECONDS));
assertTrue(peer.eventLoop().submit(new Callable<Boolean>() {
@Override
public Boolean call() {
return flow.isQueueEmpty();
}
}).get());
} finally {
client.close();
server.close();
}
}
/**
* The {@link FlowControlHandler} will not pass read events onto the
* pipeline when the user is calling {@code read()} on their own if the
* queue is not empty and auto-reading is turned off for the channel.
*/
@Test
public void testFlowAutoReadOffAndQueueNonEmpty() throws Exception {
final Exchanger<Channel> peerRef = new Exchanger<Channel>();
final CountDownLatch msgRcvLatch1 = new CountDownLatch(1);
final CountDownLatch msgRcvLatch2 = new CountDownLatch(2);
final CountDownLatch msgRcvLatch3 = new CountDownLatch(3);
ChannelInboundHandlerAdapter handler = new ChannelDuplexHandler() {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
ctx.fireChannelActive();
peerRef.exchange(ctx.channel(), 1L, SECONDS);
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
msgRcvLatch1.countDown();
msgRcvLatch2.countDown();
msgRcvLatch3.countDown();
}
};
final FlowControlHandler flow = new FlowControlHandler();
Channel server = newServer(false, flow, handler);
Channel client = newClient(server.localAddress());
try {
// The client connection on the server side
Channel peer = peerRef.exchange(null, 1L, SECONDS);
// Write the first message
client.writeAndFlush(newOneMessage())
.syncUninterruptibly();
// channelRead(1)
peer.read();
assertTrue(msgRcvLatch1.await(1L, SECONDS));
assertFalse(peer.eventLoop().submit(new Callable<Boolean>() {
@Override
public Boolean call() {
return flow.isQueueEmpty();
}
}).get());
// Write the second message
client.writeAndFlush(newOneMessage())
.syncUninterruptibly();
// channelRead(2)
peer.read();
assertTrue(msgRcvLatch2.await(1L, SECONDS));
// channelRead(3)
peer.read();
assertTrue(msgRcvLatch3.await(1L, SECONDS));
assertTrue(peer.eventLoop().submit(new Callable<Boolean>() {
@Override
public Boolean call() {
return flow.isQueueEmpty();
}
}).get());
} finally {
client.close();
server.close();
}
}
@Test
public void testReentranceNotCausesNPE() throws Throwable {
final Exchanger<Channel> peerRef = new Exchanger<Channel>();
final CountDownLatch latch = new CountDownLatch(3);
final AtomicReference<Throwable> causeRef = new AtomicReference<Throwable>();
ChannelInboundHandlerAdapter handler = new ChannelDuplexHandler() {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
ctx.fireChannelActive();
peerRef.exchange(ctx.channel(), 1L, SECONDS);
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
latch.countDown();
ctx.read();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
causeRef.set(cause);
}
};
final FlowControlHandler flow = new FlowControlHandler();
Channel server = newServer(false, flow, handler);
Channel client = newClient(server.localAddress());
try {
// The client connection on the server side
Channel peer = peerRef.exchange(null, 1L, SECONDS);
// Write the message
client.writeAndFlush(newOneMessage())
.syncUninterruptibly();
// channelRead(1)
peer.read();
assertTrue(latch.await(1L, SECONDS));
assertTrue(peer.eventLoop().submit(new Callable<Boolean>() {
@Override
public Boolean call() {
return flow.isQueueEmpty();
}
}).get());
Throwable cause = causeRef.get();
if (cause != null) {
throw cause;
}
} finally {
client.close();
server.close();
}
}
@Test
public void testSwallowedReadComplete() throws Exception {
final long delayMillis = 100;
final Queue<IdleStateEvent> userEvents = new LinkedBlockingQueue<IdleStateEvent>();
final EmbeddedChannel channel = new EmbeddedChannel(false, false,
new FlowControlHandler(),
new IdleStateHandler(delayMillis, 0, 0, MILLISECONDS),
new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(ChannelHandlerContext ctx) {
ctx.fireChannelActive();
ctx.read();
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
ctx.fireChannelRead(msg);
ctx.read();
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
ctx.fireChannelReadComplete();
ctx.read();
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
if (evt instanceof IdleStateEvent) {
userEvents.add((IdleStateEvent) evt);
}
ctx.fireUserEventTriggered(evt);
}
}
);
channel.config().setAutoRead(false);
assertFalse(channel.config().isAutoRead());
channel.register();
// Reset read timeout by some message
assertTrue(channel.writeInbound(Unpooled.EMPTY_BUFFER));
channel.flushInbound();
assertEquals(Unpooled.EMPTY_BUFFER, channel.readInbound());
// Emulate 'no more messages in NIO channel' on the next read attempt.
channel.flushInbound();
assertNull(channel.readInbound());
Thread.sleep(delayMillis + 20L);
channel.runPendingTasks();
assertEquals(IdleStateEvent.FIRST_READER_IDLE_STATE_EVENT, userEvents.poll());
assertFalse(channel.finish());
}
@Test
public void testRemoveFlowControl() throws Exception {
final Exchanger<Channel> peerRef = new Exchanger<Channel>();
final CountDownLatch latch = new CountDownLatch(3);
ChannelInboundHandlerAdapter handler = new ChannelDuplexHandler() {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
peerRef.exchange(ctx.channel(), 1L, SECONDS);
//do the first read
ctx.read();
super.channelActive(ctx);
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
latch.countDown();
super.channelRead(ctx, msg);
}
};
final FlowControlHandler flow = new FlowControlHandler() {
private int num;
@Override
public void channelRead(final ChannelHandlerContext ctx, Object msg) throws Exception {
super.channelRead(ctx, msg);
++num;
if (num >= 3) {
//We have received 3 messages. Remove myself later
final ChannelHandler handler = this;
ctx.channel().eventLoop().execute(new Runnable() {
@Override
public void run() {
ctx.pipeline().remove(handler);
}
});
}
}
};
ChannelInboundHandlerAdapter tail = new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
//consume this msg
ReferenceCountUtil.release(msg);
}
};
Channel server = newServer(false /* no auto read */, flow, handler, tail);
Channel client = newClient(server.localAddress());
try {
// The client connection on the server side
Channel peer = peerRef.exchange(null, 1L, SECONDS);
// Write one message
client.writeAndFlush(newOneMessage()).sync();
// We should receive 3 messages
assertTrue(latch.await(1L, SECONDS));
assertTrue(peer.eventLoop().submit(new Callable<Boolean>() {
@Override
public Boolean call() {
return flow.isQueueEmpty();
}
}).get());
} finally {
client.close();
server.close();
}
}
/**
* This is a fictional message decoder. It decodes each {@code byte}
* into three strings.
*/
private static final | FlowControlHandlerTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java | {
"start": 66764,
"end": 137021
} | interface ____ {
String run(int exitcode, String... options) throws IOException;
}
@Test
@Timeout(value = 30)
public void testRemoteException() throws Exception {
UserGroupInformation tmpUGI =
UserGroupInformation.createUserForTesting("tmpname", new String[] {"mygroup"});
PrintStream bak = null;
try {
Path p = new Path("/foo");
dfs.mkdirs(p);
dfs.setPermission(p, new FsPermission((short)0700));
bak = System.err;
tmpUGI.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
FsShell fshell = new FsShell(dfs.getConf());
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream tmp = new PrintStream(out);
System.setErr(tmp);
String[] args = new String[2];
args[0] = "-ls";
args[1] = "/foo";
int ret = ToolRunner.run(fshell, args);
assertEquals(1, ret, "returned should be 1");
String str = out.toString();
assertTrue(
str.indexOf("Permission denied") != -1, "permission denied printed");
out.reset();
return null;
}
});
} finally {
if (bak != null) {
System.setErr(bak);
}
}
}
@Test
@Timeout(value = 30)
public void testGet() throws IOException {
GenericTestUtils.setLogLevel(FSInputChecker.LOG, Level.TRACE);
final String fname = "testGet.txt";
Path root = new Path("/test/get");
final Path remotef = new Path(root, fname);
final Configuration conf = new HdfsConfiguration();
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
TestGetRunner runner = new TestGetRunner() {
private int count = 0;
private final FsShell shell = new FsShell(conf);
public String run(int exitcode, String... options) throws IOException {
String dst = new File(TEST_ROOT_DIR, fname + ++count)
.getAbsolutePath();
String[] args = new String[options.length + 3];
args[0] = "-get";
args[args.length - 2] = remotef.toString();
args[args.length - 1] = dst;
for (int i = 0; i < options.length; i++) {
args[i + 1] = options[i];
}
show("args=" + Arrays.asList(args));
try {
assertEquals(exitcode, shell.run(args));
} catch (Exception e) {
assertTrue(false, StringUtils.stringifyException(e));
}
return exitcode == 0 ? DFSTestUtil.readFile(new File(dst)) : null;
}
};
File localf = createLocalFile(new File(TEST_ROOT_DIR, fname));
MiniDFSCluster cluster = null;
DistributedFileSystem dfs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true)
.build();
dfs = cluster.getFileSystem();
mkdir(dfs, root);
dfs.copyFromLocalFile(false, false, new Path(localf.getPath()), remotef);
String localfcontent = DFSTestUtil.readFile(localf);
assertEquals(localfcontent, runner.run(0));
assertEquals(localfcontent, runner.run(0, "-ignoreCrc"));
// find block files to modify later
List<MaterializedReplica> replicas = getMaterializedReplicas(cluster);
// Shut down miniCluster and then corrupt the block files by overwriting a
// portion with junk data. We must shut down the miniCluster so that threads
// in the data node do not hold locks on the block files while we try to
// write into them. Particularly on Windows, the data node's use of the
// FileChannel.transferTo method can cause block files to be memory mapped
// in read-only mode during the transfer to a client, and this causes a
// locking conflict. The call to shutdown the miniCluster blocks until all
// DataXceiver threads exit, preventing this problem.
dfs.close();
cluster.shutdown();
show("replicas=" + replicas);
corrupt(replicas, localfcontent);
// Start the miniCluster again, but do not reformat, so prior files remain.
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(false)
.build();
dfs = cluster.getFileSystem();
assertEquals(null, runner.run(1));
String corruptedcontent = runner.run(0, "-ignoreCrc");
assertEquals(localfcontent.substring(1), corruptedcontent.substring(1));
assertEquals(localfcontent.charAt(0) + 1, corruptedcontent.charAt(0));
} finally {
if (null != dfs) {
try {
dfs.close();
} catch (Exception e) {
}
}
if (null != cluster) {
cluster.shutdown();
}
localf.delete();
}
}
/**
* Test -stat [format] <path>... prints statistics about the file/directory
* at <path> in the specified format.
*/
@Test
@Timeout(value = 30)
public void testStat() throws Exception {
final SimpleDateFormat fmt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
final Path testDir1 = new Path("testStat", "dir1");
dfs.mkdirs(testDir1);
final Path testFile2 = new Path(testDir1, "file2");
DFSTestUtil.createFile(dfs, testFile2, 2 * BLOCK_SIZE, (short) 3, 0);
final FileStatus status1 = dfs.getFileStatus(testDir1);
final String mtime1 = fmt.format(new Date(status1.getModificationTime()));
final String atime1 = fmt.format(new Date(status1.getAccessTime()));
long now = Time.now();
dfs.setTimes(testFile2, now + 3000, now + 6000);
final FileStatus status2 = dfs.getFileStatus(testFile2);
final String mtime2 = fmt.format(new Date(status2.getModificationTime()));
final String atime2 = fmt.format(new Date(status2.getAccessTime()));
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
doFsStat(dfs.getConf(), null);
out.reset();
doFsStat(dfs.getConf(), null, testDir1);
assertEquals(out.toString(), String.format("%s%n", mtime1),
"Unexpected -stat output: " + out);
out.reset();
doFsStat(dfs.getConf(), null, testDir1, testFile2);
assertEquals(out.toString(), String.format("%s%n%s%n", mtime1, mtime2),
"Unexpected -stat output: " + out);
doFsStat(dfs.getConf(), "%F %u:%g %b %y %n");
out.reset();
doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %y %n", testDir1);
assertTrue(out.toString().contains(mtime1), out.toString());
assertTrue(out.toString().contains("directory"), out.toString());
assertTrue(out.toString().contains(status1.getGroup()), out.toString());
assertTrue(out.toString().contains(status1.getPermission().toString()), out.toString());
int n = status1.getPermission().toShort();
int octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7);
assertTrue(out.toString().contains(String.valueOf(octal)), out.toString());
out.reset();
doFsStat(dfs.getConf(), "%F %a %A %u:%g %b %x %y %n", testDir1, testFile2);
n = status2.getPermission().toShort();
octal = (n>>>9&1)*1000 + (n>>>6&7)*100 + (n>>>3&7)*10 + (n&7);
assertTrue(out.toString().contains(mtime1), out.toString());
assertTrue(out.toString().contains(atime1), out.toString());
assertTrue(out.toString().contains("regular file"), out.toString());
assertTrue(out.toString().contains(status2.getPermission().toString()), out.toString());
assertTrue(out.toString().contains(String.valueOf(octal)), out.toString());
assertTrue(out.toString().contains(mtime2), out.toString());
assertTrue(out.toString().contains(atime2), out.toString());
}
private static void doFsStat(Configuration conf, String format, Path... files)
throws Exception {
if (files == null || files.length == 0) {
final String[] argv = (format == null ? new String[] {"-stat"} :
new String[] {"-stat", format});
assertEquals(-1, ToolRunner.run(new FsShell(conf), argv),
"Should have failed with missing arguments");
} else {
List<String> argv = new LinkedList<>();
argv.add("-stat");
if (format != null) {
argv.add(format);
}
for (Path f : files) {
argv.add(f.toString());
}
int ret = ToolRunner.run(new FsShell(conf), argv.toArray(new String[0]));
assertEquals(0, ret, argv + " returned non-zero status " + ret);
}
}
@Test
@Timeout(value = 30)
public void testLsr() throws Exception {
final Configuration conf = dfs.getConf();
final String root = createTree(dfs, "lsr");
dfs.mkdirs(new Path(root, "zzz"));
runLsr(new FsShell(conf), root, 0);
final Path sub = new Path(root, "sub");
dfs.setPermission(sub, new FsPermission((short)0));
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
final String tmpusername = ugi.getShortUserName() + "1";
UserGroupInformation tmpUGI = UserGroupInformation.createUserForTesting(
tmpusername, new String[] {tmpusername});
String results = tmpUGI.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws Exception {
return runLsr(new FsShell(conf), root, 1);
}
});
assertTrue(results.contains("zzz"));
}
private static String runLsr(final FsShell shell, String root, int returnvalue
) throws Exception {
System.out.println("root=" + root + ", returnvalue=" + returnvalue);
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(bytes);
final PrintStream oldOut = System.out;
final PrintStream oldErr = System.err;
System.setOut(out);
System.setErr(out);
final String results;
try {
assertEquals(returnvalue, shell.run(new String[]{"-lsr", root}));
results = bytes.toString();
} finally {
System.setOut(oldOut);
System.setErr(oldErr);
IOUtils.closeStream(out);
}
System.out.println("results:\n" + results);
return results;
}
/**
* default setting is file:// which is not a DFS
* so DFSAdmin should throw and catch InvalidArgumentException
* and return -1 exit code.
* @throws Exception
*/
@Test
@Timeout(value = 30)
public void testInvalidShell() throws Exception {
Configuration conf = new Configuration(); // default FS (non-DFS)
DFSAdmin admin = new DFSAdmin();
admin.setConf(conf);
int res = admin.run(new String[] {"-refreshNodes"});
assertEquals(res, -1, "expected to fail -1");
}
// Preserve Copy Option is -ptopxa (timestamps, ownership, permission, XATTR,
// ACLs)
@Test
@Timeout(value = 120)
public void testCopyCommandsWithPreserveOption() throws Exception {
FsShell shell = null;
final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithPreserveOption-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
dfs.mkdirs(hdfsTestDir);
Path src = new Path(hdfsTestDir, "srcfile");
dfs.create(src).close();
dfs.setAcl(src, Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
aclEntry(ACCESS, OTHER, EXECUTE)));
FileStatus status = dfs.getFileStatus(src);
final long mtime = status.getModificationTime();
final long atime = status.getAccessTime();
final String owner = status.getOwner();
final String group = status.getGroup();
final FsPermission perm = status.getPermission();
dfs.setXAttr(src, USER_A1, USER_A1_VALUE);
dfs.setXAttr(src, TRUSTED_A1, TRUSTED_A1_VALUE);
shell = new FsShell(dfs.getConf());
// -p
Path target1 = new Path(hdfsTestDir, "targetfile1");
String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
target1.toUri().toString() };
int ret = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, ret, "cp -p is not working");
FileStatus targetStatus = dfs.getFileStatus(target1);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
FsPermission targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map<String, byte[]> xattrs = dfs.getXAttrs(target1);
assertTrue(xattrs.isEmpty());
List<AclEntry> acls = dfs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetStatus.hasAcl());
// -ptop
Path target2 = new Path(hdfsTestDir, "targetfile2");
argv = new String[] { "-cp", "-ptop", src.toUri().toString(),
target2.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, ret, "cp -ptop is not working");
targetStatus = dfs.getFileStatus(target2);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = dfs.getXAttrs(target2);
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(target2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetStatus.hasAcl());
// -ptopx
Path target3 = new Path(hdfsTestDir, "targetfile3");
argv = new String[] { "-cp", "-ptopx", src.toUri().toString(),
target3.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, ret, "cp -ptopx is not working");
targetStatus = dfs.getFileStatus(target3);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = dfs.getXAttrs(target3);
assertEquals(xattrs.size(), 2);
assertArrayEquals(USER_A1_VALUE, xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
acls = dfs.getAclStatus(target3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetStatus.hasAcl());
// -ptopa
Path target4 = new Path(hdfsTestDir, "targetfile4");
argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
target4.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, ret, "cp -ptopa is not working");
targetStatus = dfs.getFileStatus(target4);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = dfs.getXAttrs(target4);
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(target4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetStatus.hasAcl());
assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target4));
// -ptoa (verify -pa option will preserve permissions also)
Path target5 = new Path(hdfsTestDir, "targetfile5");
argv = new String[] { "-cp", "-ptoa", src.toUri().toString(),
target5.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, ret, "cp -ptoa is not working");
targetStatus = dfs.getFileStatus(target5);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = dfs.getXAttrs(target5);
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(target5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetStatus.hasAcl());
assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target5));
} finally {
if (null != shell) {
shell.close();
}
}
}
@Test
@Timeout(value = 120)
public void testCopyCommandsWithRawXAttrs() throws Exception {
FsShell shell = null;
final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithRawXAttrs-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
final Path rawHdfsTestDir = new Path("/.reserved/raw" + testdir);
try {
dfs.mkdirs(hdfsTestDir);
final Path src = new Path(hdfsTestDir, "srcfile");
final String rawSrcBase = "/.reserved/raw" + testdir;
final Path rawSrc = new Path(rawSrcBase, "srcfile");
dfs.create(src).close();
final Path srcDir = new Path(hdfsTestDir, "srcdir");
final Path rawSrcDir = new Path("/.reserved/raw" + testdir, "srcdir");
dfs.mkdirs(srcDir);
final Path srcDirFile = new Path(srcDir, "srcfile");
final Path rawSrcDirFile =
new Path("/.reserved/raw" + srcDirFile);
dfs.create(srcDirFile).close();
final Path[] paths = { rawSrc, rawSrcDir, rawSrcDirFile };
final String[] xattrNames = { USER_A1, RAW_A1 };
final byte[][] xattrVals = { USER_A1_VALUE, RAW_A1_VALUE };
for (int i = 0; i < paths.length; i++) {
for (int j = 0; j < xattrNames.length; j++) {
dfs.setXAttr(paths[i], xattrNames[j], xattrVals[j]);
}
}
shell = new FsShell(dfs.getConf());
/* Check that a file as the source path works ok. */
doTestCopyCommandsWithRawXAttrs(shell, dfs, src, hdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, dfs, rawSrc, hdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, dfs, src, rawHdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, dfs, rawSrc, rawHdfsTestDir, true);
/* Use a relative /.reserved/raw path. */
final Path savedWd = dfs.getWorkingDirectory();
try {
dfs.setWorkingDirectory(new Path(rawSrcBase));
final Path relRawSrc = new Path("../srcfile");
final Path relRawHdfsTestDir = new Path("..");
doTestCopyCommandsWithRawXAttrs(shell, dfs, relRawSrc,
relRawHdfsTestDir, true);
} finally {
dfs.setWorkingDirectory(savedWd);
}
/* Check that a directory as the source path works ok. */
doTestCopyCommandsWithRawXAttrs(shell, dfs, srcDir, hdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, dfs, rawSrcDir, hdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, dfs, srcDir, rawHdfsTestDir, false);
doTestCopyCommandsWithRawXAttrs(shell, dfs, rawSrcDir, rawHdfsTestDir,
true);
/* Use relative in an absolute path. */
final String relRawSrcDir = "./.reserved/../.reserved/raw/../raw" +
testdir + "/srcdir";
final String relRawDstDir = "./.reserved/../.reserved/raw/../raw" +
testdir;
doTestCopyCommandsWithRawXAttrs(shell, dfs, new Path(relRawSrcDir),
new Path(relRawDstDir), true);
} finally {
if (null != shell) {
shell.close();
}
dfs.delete(hdfsTestDir, true);
}
}
private void doTestCopyCommandsWithRawXAttrs(FsShell shell, FileSystem fs,
Path src, Path hdfsTestDir, boolean expectRaw) throws Exception {
Path target;
boolean srcIsRaw;
if (src.isAbsolute()) {
srcIsRaw = src.toString().contains("/.reserved/raw");
} else {
srcIsRaw = new Path(fs.getWorkingDirectory(), src).
toString().contains("/.reserved/raw");
}
final boolean destIsRaw = hdfsTestDir.toString().contains("/.reserved/raw");
final boolean srcDestMismatch = srcIsRaw ^ destIsRaw;
// -p (possibly preserve raw if src & dst are both /.r/r */
if (srcDestMismatch) {
doCopyAndTest(shell, hdfsTestDir, src, "-p", ERROR);
} else {
target = doCopyAndTest(shell, hdfsTestDir, src, "-p", SUCCESS);
checkXAttrs(fs, target, expectRaw, false);
}
// -px (possibly preserve raw, always preserve non-raw xattrs. */
if (srcDestMismatch) {
doCopyAndTest(shell, hdfsTestDir, src, "-px", ERROR);
} else {
target = doCopyAndTest(shell, hdfsTestDir, src, "-px", SUCCESS);
checkXAttrs(fs, target, expectRaw, true);
}
// no args (possibly preserve raw, never preserve non-raw xattrs. */
if (srcDestMismatch) {
doCopyAndTest(shell, hdfsTestDir, src, null, ERROR);
} else {
target = doCopyAndTest(shell, hdfsTestDir, src, null, SUCCESS);
checkXAttrs(fs, target, expectRaw, false);
}
}
private Path doCopyAndTest(FsShell shell, Path dest, Path src,
String cpArgs, int expectedExitCode) throws Exception {
final Path target = new Path(dest, "targetfile" +
counter.getAndIncrement());
final String[] argv = cpArgs == null ?
new String[] { "-cp", src.toUri().toString(),
target.toUri().toString() } :
new String[] { "-cp", cpArgs, src.toUri().toString(),
target.toUri().toString() };
final int ret = ToolRunner.run(shell, argv);
assertEquals(expectedExitCode, ret, "cp -p is not working");
return target;
}
private void checkXAttrs(FileSystem fs, Path target, boolean expectRaw,
boolean expectVanillaXAttrs) throws Exception {
final Map<String, byte[]> xattrs = fs.getXAttrs(target);
int expectedCount = 0;
if (expectRaw) {
assertArrayEquals(RAW_A1_VALUE, xattrs.get(RAW_A1), "raw.a1 has incorrect value");
expectedCount++;
}
if (expectVanillaXAttrs) {
assertArrayEquals(USER_A1_VALUE, xattrs.get(USER_A1), "user.a1 has incorrect value");
expectedCount++;
}
assertEquals(expectedCount, xattrs.size(), "xattrs size mismatch");
}
// verify cp -ptopxa option will preserve directory attributes.
@Test
@Timeout(value = 120)
public void testCopyCommandsToDirectoryWithPreserveOption()
throws Exception {
FsShell shell = null;
final String testdir =
"/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
dfs.mkdirs(hdfsTestDir);
Path srcDir = new Path(hdfsTestDir, "srcDir");
dfs.mkdirs(srcDir);
dfs.setAcl(srcDir, Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, GROUP, "bar", READ_EXECUTE),
aclEntry(ACCESS, OTHER, EXECUTE)));
// set sticky bit
dfs.setPermission(srcDir,
new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
// Create a file in srcDir to check if modification time of
// srcDir to be preserved after copying the file.
// If cp -p command is to preserve modification time and then copy child
// (srcFile), modification time will not be preserved.
Path srcFile = new Path(srcDir, "srcFile");
dfs.create(srcFile).close();
FileStatus status = dfs.getFileStatus(srcDir);
final long mtime = status.getModificationTime();
final long atime = status.getAccessTime();
final String owner = status.getOwner();
final String group = status.getGroup();
final FsPermission perm = status.getPermission();
dfs.setXAttr(srcDir, USER_A1, USER_A1_VALUE);
dfs.setXAttr(srcDir, TRUSTED_A1, TRUSTED_A1_VALUE);
shell = new FsShell(dfs.getConf());
// -p
Path targetDir1 = new Path(hdfsTestDir, "targetDir1");
String[] argv = new String[] { "-cp", "-p", srcDir.toUri().toString(),
targetDir1.toUri().toString() };
int ret = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, ret, "cp -p is not working");
FileStatus targetStatus = dfs.getFileStatus(targetDir1);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
FsPermission targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map<String, byte[]> xattrs = dfs.getXAttrs(targetDir1);
assertTrue(xattrs.isEmpty());
List<AclEntry> acls = dfs.getAclStatus(targetDir1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetStatus.hasAcl());
// -ptop
Path targetDir2 = new Path(hdfsTestDir, "targetDir2");
argv = new String[] { "-cp", "-ptop", srcDir.toUri().toString(),
targetDir2.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, ret, "cp -ptop is not working");
targetStatus = dfs.getFileStatus(targetDir2);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = dfs.getXAttrs(targetDir2);
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(targetDir2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetStatus.hasAcl());
// -ptopx
Path targetDir3 = new Path(hdfsTestDir, "targetDir3");
argv = new String[] { "-cp", "-ptopx", srcDir.toUri().toString(),
targetDir3.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, ret, "cp -ptopx is not working");
targetStatus = dfs.getFileStatus(targetDir3);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = dfs.getXAttrs(targetDir3);
assertEquals(xattrs.size(), 2);
assertArrayEquals(USER_A1_VALUE, xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
acls = dfs.getAclStatus(targetDir3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetStatus.hasAcl());
// -ptopa
Path targetDir4 = new Path(hdfsTestDir, "targetDir4");
argv = new String[] { "-cp", "-ptopa", srcDir.toUri().toString(),
targetDir4.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, ret, "cp -ptopa is not working");
targetStatus = dfs.getFileStatus(targetDir4);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = dfs.getXAttrs(targetDir4);
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(targetDir4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetStatus.hasAcl());
assertEquals(dfs.getAclStatus(srcDir), dfs.getAclStatus(targetDir4));
// -ptoa (verify -pa option will preserve permissions also)
Path targetDir5 = new Path(hdfsTestDir, "targetDir5");
argv = new String[] { "-cp", "-ptoa", srcDir.toUri().toString(),
targetDir5.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, ret, "cp -ptoa is not working");
targetStatus = dfs.getFileStatus(targetDir5);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs = dfs.getXAttrs(targetDir5);
assertTrue(xattrs.isEmpty());
acls = dfs.getAclStatus(targetDir5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetStatus.hasAcl());
assertEquals(dfs.getAclStatus(srcDir), dfs.getAclStatus(targetDir5));
} finally {
if (shell != null) {
shell.close();
}
}
}
// Verify cp -pa option will preserve both ACL and sticky bit.
@Test
@Timeout(value = 120)
public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
FsShell shell = null;
final String testdir =
"/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
dfs.mkdirs(hdfsTestDir);
Path src = new Path(hdfsTestDir, "srcfile");
dfs.create(src).close();
dfs.setAcl(src, Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
aclEntry(ACCESS, OTHER, EXECUTE)));
// set sticky bit
dfs.setPermission(src,
new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
FileStatus status = dfs.getFileStatus(src);
final long mtime = status.getModificationTime();
final long atime = status.getAccessTime();
final String owner = status.getOwner();
final String group = status.getGroup();
final FsPermission perm = status.getPermission();
shell = new FsShell(dfs.getConf());
// -p preserves sticky bit and doesn't preserve ACL
Path target1 = new Path(hdfsTestDir, "targetfile1");
String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
target1.toUri().toString() };
int ret = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, ret, "cp is not working");
FileStatus targetStatus = dfs.getFileStatus(target1);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
FsPermission targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
List<AclEntry> acls = dfs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetStatus.hasAcl());
// -ptopa preserves both sticky bit and ACL
Path target2 = new Path(hdfsTestDir, "targetfile2");
argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
target2.toUri().toString() };
ret = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, ret, "cp -ptopa is not working");
targetStatus = dfs.getFileStatus(target2);
assertEquals(mtime, targetStatus.getModificationTime());
assertEquals(atime, targetStatus.getAccessTime());
assertEquals(owner, targetStatus.getOwner());
assertEquals(group, targetStatus.getGroup());
targetPerm = targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
acls = dfs.getAclStatus(target2).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetStatus.hasAcl());
assertEquals(dfs.getAclStatus(src), dfs.getAclStatus(target2));
} finally {
if (null != shell) {
shell.close();
}
}
}
// force Copy Option is -f
@Test
@Timeout(value = 30)
public void testCopyCommandsWithForceOption() throws Exception {
FsShell shell = null;
final File localFile = new File(TEST_ROOT_DIR, "testFileForPut");
final String localfilepath = new Path(localFile.getAbsolutePath()).toUri().toString();
final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithForceOption-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
dfs.mkdirs(hdfsTestDir);
localFile.createNewFile();
writeFile(dfs, new Path(testdir, "testFileForPut"));
shell = new FsShell();
// Tests for put
String[] argv = new String[] { "-put", "-f", localfilepath, testdir };
int res = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, res, "put -f is not working");
argv = new String[] { "-put", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals(ERROR, res, "put command itself is able to overwrite the file");
// Tests for copyFromLocal
argv = new String[] { "-copyFromLocal", "-f", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, res, "copyFromLocal -f is not working");
argv = new String[] { "-copyFromLocal", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals(ERROR, res,
"copyFromLocal command itself is able to overwrite the file");
// Tests for cp
argv = new String[] { "-cp", "-f", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals(SUCCESS, res, "cp -f is not working");
argv = new String[] { "-cp", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals(ERROR, res, "cp command itself is able to overwrite the file");
} finally {
if (null != shell)
shell.close();
if (localFile.exists())
localFile.delete();
}
}
/* [refs HDFS-5033]
*
* return a "Permission Denied" message instead of "No such file or Directory"
* when trying to put/copyFromLocal a file that doesn't have read access
*
*/
@Test
@Timeout(value = 30)
public void testCopyFromLocalWithPermissionDenied() throws Exception {
FsShell shell = null;
PrintStream bak = null;
final File localFile = new File(TEST_ROOT_DIR, "testFileWithNoReadPermissions");
final String localfilepath = new Path(localFile.getAbsolutePath()).toUri().toString();
final String testdir = "/tmp/TestDFSShell-CopyFromLocalWithPermissionDenied-"
+ counter.getAndIncrement();
final Path hdfsTestDir = new Path(testdir);
try {
dfs.mkdirs(hdfsTestDir);
localFile.createNewFile();
localFile.setReadable(false);
writeFile(dfs, new Path(testdir, "testFileForPut"));
shell = new FsShell();
// capture system error messages, snarfed from testErrOutPut()
bak = System.err;
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream tmp = new PrintStream(out);
System.setErr(tmp);
// Tests for put
String[] argv = new String[] { "-put", localfilepath, testdir };
int res = ToolRunner.run(shell, argv);
assertEquals(ERROR, res, "put is working");
String returned = out.toString();
assertTrue((returned.lastIndexOf("Permission denied") != -1),
" outputs Permission denied error message");
// Tests for copyFromLocal
argv = new String[] { "-copyFromLocal", localfilepath, testdir };
res = ToolRunner.run(shell, argv);
assertEquals(ERROR, res, "copyFromLocal -f is working");
returned = out.toString();
assertTrue((returned.lastIndexOf("Permission denied") != -1),
" outputs Permission denied error message");
} finally {
if (bak != null) {
System.setErr(bak);
}
if (null != shell)
shell.close();
if (localFile.exists())
localFile.delete();
dfs.delete(hdfsTestDir, true);
}
}
/**
* Test -setrep with a replication factor that is too low. We have to test
* this here because the mini-miniCluster used with testHDFSConf.xml uses a
* replication factor of 1 (for good reason).
*/
@Test
@Timeout(value = 30)
public void testSetrepLow() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, 2);
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
MiniDFSCluster cluster = builder.numDataNodes(2).format(true).build();
FsShell shell = new FsShell(conf);
cluster.waitActive();
final String testdir = "/tmp/TestDFSShell-testSetrepLow";
final Path hdfsFile = new Path(testdir, "testFileForSetrepLow");
final PrintStream origOut = System.out;
final PrintStream origErr = System.err;
try {
final FileSystem fs = cluster.getFileSystem();
assertTrue(fs.mkdirs(new Path(testdir)), "Unable to create test directory");
fs.create(hdfsFile, true).close();
// Capture the command output so we can examine it
final ByteArrayOutputStream bao = new ByteArrayOutputStream();
final PrintStream capture = new PrintStream(bao);
System.setOut(capture);
System.setErr(capture);
final String[] argv = new String[] { "-setrep", "1", hdfsFile.toString() };
try {
assertEquals(1, shell.run(argv),
"Command did not return the expected exit code");
} finally {
System.setOut(origOut);
System.setErr(origErr);
}
assertTrue(bao.toString().startsWith(
"setrep: Requested replication factor of 1 is less than " +
"the required minimum of 2 for /tmp/TestDFSShell-" +
"testSetrepLow/testFileForSetrepLow"),
"Error message is not the expected error message"
+ bao.toString());
} finally {
shell.close();
cluster.shutdown();
}
}
// setrep for file and directory.
@Test
@Timeout(value = 30)
public void testSetrep() throws Exception {
FsShell shell = null;
final String testdir1 = "/tmp/TestDFSShell-testSetrep-" + counter.getAndIncrement();
final String testdir2 = testdir1 + "/nestedDir";
final Path hdfsFile1 = new Path(testdir1, "testFileForSetrep");
final Path hdfsFile2 = new Path(testdir2, "testFileForSetrep");
final Short oldRepFactor = new Short((short) 2);
final Short newRepFactor = new Short((short) 3);
try {
String[] argv;
assertThat(dfs.mkdirs(new Path(testdir2))).isEqualTo(true);
shell = new FsShell(dfs.getConf());
dfs.create(hdfsFile1, true).close();
dfs.create(hdfsFile2, true).close();
// Tests for setrep on a file.
argv = new String[] { "-setrep", newRepFactor.toString(), hdfsFile1.toString() };
assertThat(shell.run(argv)).isEqualTo(SUCCESS);
assertThat(dfs.getFileStatus(hdfsFile1).getReplication()).isEqualTo(newRepFactor);
assertThat(dfs.getFileStatus(hdfsFile2).getReplication()).isEqualTo(oldRepFactor);
// Tests for setrep
// Tests for setrep on a directory and make sure it is applied recursively.
argv = new String[] { "-setrep", newRepFactor.toString(), testdir1 };
assertThat(shell.run(argv)).isEqualTo(SUCCESS);
assertThat(dfs.getFileStatus(hdfsFile1).getReplication()).isEqualTo(newRepFactor);
assertThat(dfs.getFileStatus(hdfsFile2).getReplication()).isEqualTo(newRepFactor);
} finally {
if (shell != null) {
shell.close();
}
}
}
/**
* Delete a file optionally configuring trash on the server and client.
*/
private void deleteFileUsingTrash(
boolean serverTrash, boolean clientTrash) throws Exception {
// Run a miniCluster, optionally with trash enabled on the server
Configuration serverConf = new HdfsConfiguration();
if (serverTrash) {
serverConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf)
.numDataNodes(1).format(true).build();
Configuration clientConf = new Configuration(serverConf);
// Create a client, optionally with trash enabled
if (clientTrash) {
clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
} else {
clientConf.setLong(FS_TRASH_INTERVAL_KEY, 0);
}
FsShell shell = new FsShell(clientConf);
FileSystem fs = null;
try {
// Create and delete a file
fs = cluster.getFileSystem();
// Use a separate tmp dir for each invocation.
final String testdir = "/tmp/TestDFSShell-deleteFileUsingTrash-" +
counter.getAndIncrement();
writeFile(fs, new Path(testdir, "foo"));
final String testFile = testdir + "/foo";
final String trashFile = shell.getCurrentTrashDir() + "/" + testFile;
String[] argv = new String[] { "-rm", testFile };
int res = ToolRunner.run(shell, argv);
assertEquals(0, res, "rm failed");
if (serverTrash) {
// If the server config was set we should use it unconditionally
assertTrue(fs.exists(new Path(trashFile)), "File not in trash");
} else if (clientTrash) {
// If the server config was not set but the client config was
// set then we should use it
assertTrue(fs.exists(new Path(trashFile)), "File not in trashed");
} else {
// If neither was set then we should not have trashed the file
assertFalse(fs.exists(new Path(testFile)), "File was not removed");
assertFalse(fs.exists(new Path(trashFile)), "File was trashed");
}
} finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
@Timeout(value = 300)
public void testAppendToFile() throws Exception {
final int inputFileLength = 1024 * 1024;
File testRoot = new File(TEST_ROOT_DIR, "testAppendtoFileDir");
testRoot.mkdirs();
File file1 = new File(testRoot, "file1");
File file2 = new File(testRoot, "file2");
createLocalFileWithRandomData(inputFileLength, file1);
createLocalFileWithRandomData(inputFileLength, file2);
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem dfs = cluster.getFileSystem();
assertTrue(dfs instanceof DistributedFileSystem, "Not a HDFS: " + dfs.getUri());
// Run appendToFile once, make sure that the target file is
// created and is of the right size.
Path remoteFile = new Path("/remoteFile");
FsShell shell = new FsShell();
shell.setConf(conf);
String[] argv = new String[] {
"-appendToFile", file1.toString(), file2.toString(), remoteFile.toString() };
int res = ToolRunner.run(shell, argv);
assertThat(res).isEqualTo(0);
assertThat(dfs.getFileStatus(remoteFile).getLen()).isEqualTo((long) inputFileLength * 2);
// Run the command once again and make sure that the target file
// size has been doubled.
res = ToolRunner.run(shell, argv);
assertThat(res).isEqualTo(0);
assertThat(dfs.getFileStatus(remoteFile).getLen()).isEqualTo((long) inputFileLength * 4);
} finally {
cluster.shutdown();
}
}
@Test
@Timeout(value = 300)
public void testAppendToFileBadArgs() throws Exception {
final int inputFileLength = 1024 * 1024;
File testRoot = new File(TEST_ROOT_DIR, "testAppendToFileBadArgsDir");
testRoot.mkdirs();
File file1 = new File(testRoot, "file1");
createLocalFileWithRandomData(inputFileLength, file1);
// Run appendToFile with insufficient arguments.
FsShell shell = new FsShell();
shell.setConf(dfs.getConf());
String[] argv = new String[] {
"-appendToFile", file1.toString() };
int res = ToolRunner.run(shell, argv);
assertThat(res).isNotEqualTo(0);
// Mix stdin with other input files. Must fail.
Path remoteFile = new Path("/remoteFile");
argv = new String[] {
"-appendToFile", file1.toString(), "-", remoteFile.toString() };
res = ToolRunner.run(shell, argv);
assertThat(res).isNotEqualTo(0);
}
@SuppressWarnings("checkstyle:MethodLength")
@Test
@Timeout(value = 300)
public void testAppendToFileWithOptionN() throws Exception {
final int inputFileLength = 1024 * 1024;
File testRoot = new File(TEST_ROOT_DIR, "testAppendToFileWithOptionN");
testRoot.mkdirs();
File file1 = new File(testRoot, "file1");
createLocalFileWithRandomData(inputFileLength, file1);
Configuration conf = new HdfsConfiguration();
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6).build()) {
cluster.waitActive();
FileSystem hdfs = cluster.getFileSystem();
assertTrue(hdfs instanceof DistributedFileSystem, "Not a HDFS: " + hdfs.getUri());
// Run appendToFile with option n by replica policy once, make sure that the target file is
// created and is of the right size and block number is correct.
String dir = "/replica";
boolean mkdirs = hdfs.mkdirs(new Path(dir));
assertTrue(mkdirs, "Mkdir fail");
Path remoteFile = new Path(dir + "/remoteFile");
FsShell shell = new FsShell();
shell.setConf(conf);
String[] argv = new String[] {
"-appendToFile", "-n", file1.toString(), remoteFile.toString() };
int res = ToolRunner.run(shell, argv);
assertEquals(0, res, "Run appendToFile command fail");
FileStatus fileStatus = hdfs.getFileStatus(remoteFile);
assertEquals(inputFileLength, fileStatus.getLen(),
"File size should be " + inputFileLength);
BlockLocation[] fileBlockLocations =
hdfs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
assertEquals(1, fileBlockLocations.length, "Block Num should be 1");
// Run appendToFile with option n by replica policy again and
// make sure that the target file size has been doubled and block number has been doubled.
res = ToolRunner.run(shell, argv);
assertEquals(0, res, "Run appendToFile command fail");
fileStatus = hdfs.getFileStatus(remoteFile);
assertEquals(inputFileLength * 2, fileStatus.getLen(),
"File size should be " + inputFileLength * 2);
fileBlockLocations = hdfs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
assertEquals(2, fileBlockLocations.length, "Block Num should be 2");
// Before run appendToFile with option n by ec policy, set ec policy for the dir.
dir = "/ecPolicy";
final String ecPolicyName = "RS-6-3-1024k";
mkdirs = hdfs.mkdirs(new Path(dir));
assertTrue(mkdirs, "Mkdir fail");
((DistributedFileSystem) hdfs).setErasureCodingPolicy(new Path(dir), ecPolicyName);
ErasureCodingPolicy erasureCodingPolicy =
((DistributedFileSystem) hdfs).getErasureCodingPolicy(new Path(dir));
assertEquals(ecPolicyName, erasureCodingPolicy.getName(), "Set ec policy fail");
// Run appendToFile with option n by ec policy once, make sure that the target file is
// created and is of the right size and block group number is correct.
remoteFile = new Path(dir + "/remoteFile");
argv = new String[] {
"-appendToFile", "-n", file1.toString(), remoteFile.toString() };
res = ToolRunner.run(shell, argv);
assertEquals(0, res, "Run appendToFile command fail");
fileStatus = hdfs.getFileStatus(remoteFile);
assertEquals(inputFileLength, fileStatus.getLen(),
"File size should be " + inputFileLength);
fileBlockLocations = hdfs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
assertEquals(1, fileBlockLocations.length, "Block Group Num should be 1");
// Run appendToFile without option n by ec policy again and make sure that
// append on EC file without new block must fail.
argv = new String[] {
"-appendToFile", file1.toString(), remoteFile.toString() };
res = ToolRunner.run(shell, argv);
assertTrue(res != 0, "Run appendToFile command must fail");
// Run appendToFile with option n by ec policy again and
// make sure that the target file size has been doubled
// and block group number has been doubled.
argv = new String[] {
"-appendToFile", "-n", file1.toString(), remoteFile.toString() };
res = ToolRunner.run(shell, argv);
assertEquals(0, res, "Run appendToFile command fail");
fileStatus = hdfs.getFileStatus(remoteFile);
assertEquals(inputFileLength * 2, fileStatus.getLen(),
"File size should be " + inputFileLength * 2);
fileBlockLocations = hdfs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
assertEquals(2, fileBlockLocations.length, "Block Group Num should be 2");
}
}
@Test
@Timeout(value = 30)
public void testSetXAttrPermission() throws Exception {
UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] {"mygroup"});
PrintStream bak = null;
try {
Path p = new Path("/foo");
dfs.mkdirs(p);
bak = System.err;
final FsShell fshell = new FsShell(dfs.getConf());
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
// No permission to write xattr
dfs.setPermission(p, new FsPermission((short) 0700));
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
assertEquals(1, ret, "Returned should be 1");
String str = out.toString();
assertTrue(str.indexOf("Permission denied") != -1,
"Permission denied printed");
out.reset();
return null;
}
});
int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
assertEquals(0, ret, "Returned should be 0");
out.reset();
// No permission to read and remove
dfs.setPermission(p, new FsPermission((short) 0750));
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Read
int ret = ToolRunner.run(fshell, new String[]{
"-getfattr", "-n", "user.a1", "/foo"});
assertEquals(1, ret, "Returned should be 1");
String str = out.toString();
assertTrue(str.indexOf("Permission denied") != -1,
"Permission denied printed");
out.reset();
// Remove
ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-x", "user.a1", "/foo"});
assertEquals(1, ret, "Returned should be 1");
str = out.toString();
assertTrue(str.indexOf("Permission denied") != -1,
"Permission denied printed");
out.reset();
return null;
}
});
} finally {
if (bak != null) {
System.setErr(bak);
}
}
}
/* HDFS-6413 xattr names erroneously handled as case-insensitive */
@Test
@Timeout(value = 30)
public void testSetXAttrCaseSensitivity() throws Exception {
PrintStream bak = null;
try {
Path p = new Path("/mydir");
dfs.mkdirs(p);
bak = System.err;
final FsShell fshell = new FsShell(dfs.getConf());
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
doSetXattr(out, fshell,
new String[] {"-setfattr", "-n", "User.Foo", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo"},
new String[] {});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-n", "user.FOO", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo", "user.FOO"},
new String[] {});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-n", "USER.foo", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo", "user.FOO", "user.foo"},
new String[] {});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-n", "USER.fOo", "-v", "myval", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo", "user.FOO", "user.foo", "user.fOo=\"myval\""},
new String[] {"user.Foo=", "user.FOO=", "user.foo="});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-x", "useR.foo", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo", "user.FOO"},
new String[] {"foo"});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-x", "USER.FOO", "/mydir"},
new String[] {"-getfattr", "-d", "/mydir"},
new String[] {"user.Foo"},
new String[] {"FOO"});
doSetXattr(out, fshell,
new String[] {"-setfattr", "-x", "useR.Foo", "/mydir"},
new String[] {"-getfattr", "-n", "User.Foo", "/mydir"},
new String[] {},
new String[] {"Foo"});
} finally {
if (bak != null) {
System.setOut(bak);
}
}
}
private void doSetXattr(ByteArrayOutputStream out, FsShell fshell,
String[] setOp, String[] getOp, String[] expectArr,
String[] dontExpectArr) throws Exception {
int ret = ToolRunner.run(fshell, setOp);
out.reset();
ret = ToolRunner.run(fshell, getOp);
final String str = out.toString();
for (int i = 0; i < expectArr.length; i++) {
final String expect = expectArr[i];
final StringBuilder sb = new StringBuilder
("Incorrect results from getfattr. Expected: ");
sb.append(expect).append(" Full Result: ");
sb.append(str);
assertTrue(str.indexOf(expect) != -1, sb.toString());
}
for (int i = 0; i < dontExpectArr.length; i++) {
String dontExpect = dontExpectArr[i];
final StringBuilder sb = new StringBuilder
("Incorrect results from getfattr. Didn't Expect: ");
sb.append(dontExpect).append(" Full Result: ");
sb.append(str);
assertTrue(str.indexOf(dontExpect) == -1, sb.toString());
}
out.reset();
}
/**
*
* Test to make sure that user namespace xattrs can be set only if path has
* access and for sticky directorries, only owner/privileged user can write.
* Trusted namespace xattrs can be set only with privileged users.
*
* As user1: Create a directory (/foo) as user1, chown it to user1 (and
* user1's group), grant rwx to "other".
*
* As user2: Set an xattr (should pass with path access).
*
* As user1: Set an xattr (should pass).
*
* As user2: Read the xattr (should pass). Remove the xattr (should pass with
* path access).
*
* As user1: Read the xattr (should pass). Remove the xattr (should pass).
*
* As user1: Change permissions only to owner
*
* As User2: Set an Xattr (Should fail set with no path access) Remove an
* Xattr (Should fail with no path access)
*
* As SuperUser: Set an Xattr with Trusted (Should pass)
*/
@SuppressWarnings("checkstyle:MethodLength")
@Test
@Timeout(value = 30)
public void testSetXAttrPermissionAsDifferentOwner() throws Exception {
final String root = "/testSetXAttrPermissionAsDifferentOwner";
final String USER1 = "user1";
final String GROUP1 = "supergroup";
final UserGroupInformation user1 = UserGroupInformation.
createUserForTesting(USER1, new String[] {GROUP1});
final UserGroupInformation user2 = UserGroupInformation.
createUserForTesting("user2", new String[] {"mygroup2"});
final UserGroupInformation SUPERUSER = UserGroupInformation.getCurrentUser();
PrintStream bak = null;
try {
dfs.mkdirs(new Path(root));
dfs.setOwner(new Path(root), USER1, GROUP1);
bak = System.err;
final FsShell fshell = new FsShell(dfs.getConf());
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
//Test 1. Let user1 be owner for /foo
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final int ret = ToolRunner.run(fshell, new String[]{
"-mkdir", root + "/foo"});
assertEquals(0, ret, "Return should be 0");
out.reset();
return null;
}
});
//Test 2. Give access to others
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Give access to "other"
final int ret = ToolRunner.run(fshell, new String[]{
"-chmod", "707", root + "/foo"});
assertEquals(0, ret, "Return should be 0");
out.reset();
return null;
}
});
// Test 3. Should be allowed to write xattr if there is a path access to
// user (user2).
user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", root + "/foo"});
assertEquals(0, ret, "Returned should be 0");
out.reset();
return null;
}
});
//Test 4. There should be permission to write xattr for
// the owning user with write permissions.
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final int ret = ToolRunner.run(fshell, new String[]{
"-setfattr", "-n", "user.a1", "-v", "1234", root + "/foo"});
assertEquals(0, ret, "Returned should be 0");
out.reset();
return null;
}
});
// Test 5. There should be permission to read non-owning user (user2) if
// there is path access to that user and also can remove.
user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Read
int ret = ToolRunner.run(fshell, new String[] { "-getfattr", "-n",
"user.a1", root + "/foo" });
assertEquals(0, ret, "Returned should be 0");
out.reset();
// Remove
ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
"user.a1", root + "/foo" });
assertEquals(0, ret, "Returned should be 0");
out.reset();
return null;
}
});
// Test 6. There should be permission to read/remove for
// the owning user with path access.
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
return null;
}
});
// Test 7. Change permission to have path access only to owner(user1)
user1.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// Give access to "other"
final int ret = ToolRunner.run(fshell, new String[]{
"-chmod", "700", root + "/foo"});
assertEquals(0, ret, "Return should be 0");
out.reset();
return null;
}
});
// Test 8. There should be no permissions to set for
// the non-owning user with no path access.
user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// set
int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
"user.a2", root + "/foo" });
assertEquals(1, ret, "Returned should be 1");
final String str = out.toString();
assertTrue(str.indexOf("Permission denied") != -1,
"Permission denied printed");
out.reset();
return null;
}
});
// Test 9. There should be no permissions to remove for
// the non-owning user with no path access.
user2.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// set
int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-x",
"user.a2", root + "/foo" });
assertEquals(1, ret, "Returned should be 1");
final String str = out.toString();
assertTrue(str.indexOf("Permission denied") != -1,
"Permission denied printed");
out.reset();
return null;
}
});
// Test 10. Superuser should be allowed to set with trusted namespace
SUPERUSER.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// set
int ret = ToolRunner.run(fshell, new String[] { "-setfattr", "-n",
"trusted.a3", root + "/foo" });
assertEquals(0, ret, "Returned should be 0");
out.reset();
return null;
}
});
} finally {
if (bak != null) {
System.setErr(bak);
}
}
}
/*
* 1. Test that CLI throws an exception and returns non-0 when user does
* not have permission to read an xattr.
* 2. Test that CLI throws an exception and returns non-0 when a non-existent
* xattr is requested.
*/
@Test
@Timeout(value = 120)
public void testGetFAttrErrors() throws Exception {
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] {"mygroup"});
PrintStream bakErr = null;
try {
final Path p = new Path("/testGetFAttrErrors");
dfs.mkdirs(p);
bakErr = System.err;
final FsShell fshell = new FsShell(dfs.getConf());
final ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
// No permission for "other".
dfs.setPermission(p, new FsPermission((short) 0700));
{
final int ret = ToolRunner.run(fshell, new String[] {
"-setfattr", "-n", "user.a1", "-v", "1234", p.toString()});
assertEquals(0, ret, "Returned should be 0");
out.reset();
}
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
int ret = ToolRunner.run(fshell, new String[] {
"-getfattr", "-n", "user.a1", p.toString()});
String str = out.toString();
assertTrue(str.indexOf("1234") == -1,
"xattr value was incorrectly returned");
out.reset();
return null;
}
});
{
final int ret = ToolRunner.run(fshell, new String[]{
"-getfattr", "-n", "user.nonexistent", p.toString()});
String str = out.toString();
assertTrue(str.indexOf("getfattr: " + XAttrNotFoundException.DEFAULT_EXCEPTION_MSG)
>= 0, "xattr value was incorrectly returned");
out.reset();
}
} finally {
if (bakErr != null) {
System.setErr(bakErr);
}
}
}
/**
* Test that the server trash configuration is respected when
* the client configuration is not set.
*/
@Test
@Timeout(value = 30)
public void testServerConfigRespected() throws Exception {
deleteFileUsingTrash(true, false);
}
/**
* Test that server trash configuration is respected even when the
* client configuration is set.
*/
@Test
@Timeout(value = 30)
public void testServerConfigRespectedWithClient() throws Exception {
deleteFileUsingTrash(true, true);
}
/**
* Test that the client trash configuration is respected when
* the server configuration is not set.
*/
@Test
@Timeout(value = 30)
public void testClientConfigRespected() throws Exception {
deleteFileUsingTrash(false, true);
}
/**
* Test that trash is disabled by default.
*/
@Test
@Timeout(value = 30)
public void testNoTrashConfig() throws Exception {
deleteFileUsingTrash(false, false);
}
@Test
@Timeout(value = 30)
public void testListReserved() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
FsShell shell = new FsShell();
shell.setConf(conf);
FileStatus test = fs.getFileStatus(new Path("/.reserved"));
assertEquals(FSDirectory.DOT_RESERVED_STRING, test.getPath().getName());
// Listing /.reserved/ should show 2 items: raw and .inodes
FileStatus[] stats = fs.listStatus(new Path("/.reserved"));
assertEquals(2, stats.length);
assertEquals(FSDirectory.DOT_INODES_STRING, stats[0].getPath().getName());
assertEquals(conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
stats[0].getGroup());
assertEquals("raw", stats[1].getPath().getName());
assertEquals(conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
stats[1].getGroup());
// Listing / should not show /.reserved
stats = fs.listStatus(new Path("/"));
assertEquals(0, stats.length);
// runCmd prints error into System.err, thus verify from there.
PrintStream syserr = System.err;
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
System.setErr(ps);
try {
runCmd(shell, "-ls", "/.reserved");
assertEquals(0, baos.toString().length());
runCmd(shell, "-ls", "/.reserved/raw/.reserved");
assertTrue(baos.toString().contains("No such file or directory"));
} finally {
System.setErr(syserr);
cluster.shutdown();
}
}
@Test
@Timeout(value = 30)
public void testMkdirReserved() throws IOException {
try {
dfs.mkdirs(new Path("/.reserved"));
fail("Can't mkdir /.reserved");
} catch (Exception e) {
// Expected, HadoopIllegalArgumentException thrown from remote
assertTrue(e.getMessage().contains("\".reserved\" is reserved"));
}
}
@Test
@Timeout(value = 30)
public void testRmReserved() throws IOException {
try {
dfs.delete(new Path("/.reserved"), true);
fail("Can't delete /.reserved");
} catch (Exception e) {
// Expected, InvalidPathException thrown from remote
assertTrue(e.getMessage().contains("Invalid path name /.reserved"));
}
}
@Test //(timeout = 30000)
public void testCopyReserved() throws IOException {
final File localFile = new File(TEST_ROOT_DIR, "testFileForPut");
localFile.createNewFile();
final String localfilepath =
new Path(localFile.getAbsolutePath()).toUri().toString();
try {
dfs.copyFromLocalFile(new Path(localfilepath), new Path("/.reserved"));
fail("Can't copyFromLocal to /.reserved");
} catch (Exception e) {
// Expected, InvalidPathException thrown from remote
assertTrue(e.getMessage().contains("Invalid path name /.reserved"));
}
final String testdir = GenericTestUtils.getTempPath(
"TestDFSShell-testCopyReserved");
final Path hdfsTestDir = new Path(testdir);
writeFile(dfs, new Path(testdir, "testFileForPut"));
final Path src = new Path(hdfsTestDir, "srcfile");
dfs.create(src).close();
assertTrue(dfs.exists(src));
// runCmd prints error into System.err, thus verify from there.
PrintStream syserr = System.err;
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
System.setErr(ps);
try {
FsShell shell = new FsShell(dfs.getConf());
runCmd(shell, "-cp", src.toString(), "/.reserved");
assertTrue(baos.toString().contains("Invalid path name /.reserved"));
} finally {
System.setErr(syserr);
}
}
@Test
@Timeout(value = 30)
public void testChmodReserved() throws IOException {
// runCmd prints error into System.err, thus verify from there.
PrintStream syserr = System.err;
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
System.setErr(ps);
try {
FsShell shell = new FsShell(dfs.getConf());
runCmd(shell, "-chmod", "777", "/.reserved");
assertTrue(baos.toString().contains("Invalid path name /.reserved"));
} finally {
System.setErr(syserr);
}
}
@Test
@Timeout(value = 30)
public void testChownReserved() throws IOException {
// runCmd prints error into System.err, thus verify from there.
PrintStream syserr = System.err;
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
System.setErr(ps);
try {
FsShell shell = new FsShell(dfs.getConf());
runCmd(shell, "-chown", "user1", "/.reserved");
assertTrue(baos.toString().contains("Invalid path name /.reserved"));
} finally {
System.setErr(syserr);
}
}
@Test
@Timeout(value = 30)
public void testSymLinkReserved() throws IOException {
try {
dfs.createSymlink(new Path("/.reserved"), new Path("/rl1"), false);
fail("Can't create symlink to /.reserved");
} catch (Exception e) {
// Expected, InvalidPathException thrown from remote
assertTrue(e.getMessage().contains("Invalid target name: /.reserved"));
}
}
@Test
@Timeout(value = 30)
public void testSnapshotReserved() throws IOException {
final Path reserved = new Path("/.reserved");
try {
dfs.allowSnapshot(reserved);
fail("Can't allow snapshot on /.reserved");
} catch (FileNotFoundException e) {
assertTrue(e.getMessage().contains("Directory does not exist"));
}
try {
dfs.createSnapshot(reserved, "snap");
fail("Can't create snapshot on /.reserved");
} catch (FileNotFoundException e) {
assertTrue(e.getMessage().contains("Directory/File does not exist"));
}
}
}
| TestGetRunner |
java | apache__kafka | trogdor/src/main/java/org/apache/kafka/trogdor/fault/NetworkPartitionFaultWorker.java | {
"start": 1370,
"end": 3453
} | class ____ implements TaskWorker {
private static final Logger log = LoggerFactory.getLogger(NetworkPartitionFaultWorker.class);
private final String id;
private final List<Set<String>> partitionSets;
private WorkerStatusTracker status;
public NetworkPartitionFaultWorker(String id, List<Set<String>> partitionSets) {
this.id = id;
this.partitionSets = partitionSets;
}
@Override
public void start(Platform platform, WorkerStatusTracker status,
KafkaFutureImpl<String> errorFuture) throws Exception {
log.info("Activating NetworkPartitionFault {}.", id);
this.status = status;
this.status.update(new TextNode("creating network partition " + id));
runIptablesCommands(platform, "-A");
this.status.update(new TextNode("created network partition " + id));
}
@Override
public void stop(Platform platform) throws Exception {
log.info("Deactivating NetworkPartitionFault {}.", id);
this.status.update(new TextNode("removing network partition " + id));
runIptablesCommands(platform, "-D");
this.status.update(new TextNode("removed network partition " + id));
}
private void runIptablesCommands(Platform platform, String iptablesAction) throws Exception {
Node curNode = platform.curNode();
Topology topology = platform.topology();
TreeSet<String> toBlock = new TreeSet<>();
for (Set<String> partitionSet : partitionSets) {
if (!partitionSet.contains(curNode.name())) {
toBlock.addAll(partitionSet);
}
}
for (String nodeName : toBlock) {
Node node = topology.node(nodeName);
InetAddress addr = InetAddress.getByName(node.hostname());
platform.runCommand(new String[] {
"sudo", "iptables", iptablesAction, "INPUT", "-p", "tcp", "-s",
addr.getHostAddress(), "-j", "DROP", "-m", "comment", "--comment", nodeName
});
}
}
}
| NetworkPartitionFaultWorker |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleResponseTests.java | {
"start": 1558,
"end": 4566
} | class ____ extends ESTestCase {
protected Response createTestInstance() {
String randomPrefix = randomAlphaOfLength(5);
List<LifecyclePolicyResponseItem> responseItems = new ArrayList<>();
for (int i = 0; i < randomIntBetween(0, 2); i++) {
responseItems.add(
new LifecyclePolicyResponseItem(
randomTestLifecyclePolicy(randomPrefix + i),
randomNonNegativeLong(),
randomAlphaOfLength(8),
randomUsage()
)
);
}
return new Response(responseItems);
}
@SuppressWarnings("unchecked")
public void testToXContent() throws IOException {
Response response = createTestInstance();
XContentBuilder builder = jsonBuilder().prettyPrint();
response.toXContentChunked(EMPTY_PARAMS).forEachRemaining(xcontent -> {
try {
xcontent.toXContent(builder, EMPTY_PARAMS);
} catch (IOException e) {
logger.error(e.getMessage(), e);
fail(e.getMessage());
}
});
Map<String, Object> xContentMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2();
assertThat(xContentMap.size(), is(response.getPolicies().size()));
for (LifecyclePolicyResponseItem policy : response.getPolicies()) {
Map<String, Object> policyAsXContent = (Map<String, Object>) xContentMap.get(policy.getLifecyclePolicy().getName());
assertThat(policyAsXContent, notNullValue());
assertThat(policyAsXContent.get("version"), is(policy.getVersion()));
assertThat(policyAsXContent.get("modified_date"), is(policy.getModifiedDate()));
assertThat(policyAsXContent.get("policy"), notNullValue());
}
}
public void testChunkCount() {
Response response = createTestInstance();
// we have 2 chunks surrounding the policies - one for { and } respectively
// we have one chunk / policy
AbstractChunkedSerializingTestCase.assertChunkCount(response, ignored -> 2 + response.getPolicies().size());
}
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return new NamedWriteableRegistry(
List.of(
new NamedWriteableRegistry.Entry(LifecycleAction.class, MockAction.NAME, MockAction::new),
new NamedWriteableRegistry.Entry(LifecycleType.class, TestLifecycleType.TYPE, in -> TestLifecycleType.INSTANCE)
)
);
}
public static ItemUsage randomUsage() {
return new ItemUsage(randomStringList(), randomStringList(), randomStringList());
}
@Nullable
private static List<String> randomStringList() {
if (randomBoolean()) {
return null;
} else {
return randomList(0, 1, () -> randomAlphaOfLengthBetween(2, 10));
}
}
}
| GetLifecycleResponseTests |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/SinkEmptyMulticast.java | {
"start": 5235,
"end": 5519
} | interface ____<T> extends InnerProducer<T> {
//API must be compatible with Operators.MonoInnerProducerBase
void error(Throwable t);
void complete(T value);
void complete();
boolean isCancelled();
}
//VoidInner is optimized for not storing request / value
final static | Inner |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/xprocessing/XTypeElements.java | {
"start": 8800,
"end": 9550
} | class ____<T1 extends Bar, T2 extends List<T1>>`, we want the
// bounds for `T2` to show up as `List<T1>` and not as `List<T1 extends Bar>`.
TypeVariableName typeVariableName = (TypeVariableName) typeName;
return typeVariableName.bounds.isEmpty() || depth != 1
? typeVariableName.name
: String.format(
"%s extends %s",
typeVariableName.name,
typeVariableName.bounds.stream()
.map(bound -> toStableString(bound, visited, depth + 1))
.collect(joining(" & ")));
} else {
// For all other types (e.g. primitive types) just use the TypeName's toString()
return typeName.toString();
}
}
private XTypeElements() {}
}
| Foo |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySchedulerMixedModeAbsoluteAndWeight.java | {
"start": 2976,
"end": 3892
} | class ____
extends JerseyTestBase {
private static final String EXPECTED_FILE_TMPL = "webapp/mixed-%s-%s.json";
public TestRMWebServicesCapacitySchedulerMixedModeAbsoluteAndWeight() {
backupSchedulerConfigFileInTarget();
}
private MockRM rm;
private Configuration conf;
private RMWebServices rmWebServices;
@AfterAll
public static void afterClass() {
restoreSchedulerConfigFileInTarget();
}
@Override
protected Application configure() {
ResourceConfig config = new ResourceConfig();
config.register(RMWebServices.class);
config.register(new JerseyBinder());
config.register(GenericExceptionHandler.class);
config.register(TestRMWebServicesAppsModification.TestRMCustomAuthFilter.class);
config.register(new JettisonFeature()).register(JAXBContextResolver.class);
return config;
}
private | TestRMWebServicesCapacitySchedulerMixedModeAbsoluteAndWeight |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/java/typeutils/LambdaExtractionTest.java | {
"start": 4455,
"end": 4650
} | class ____ implements MapFunction<String, Integer> {
@Override
public Integer map(String value) {
return Integer.parseInt(value);
}
}
private | StaticMapper |
java | assertj__assertj-core | assertj-core/src/main/java/org/assertj/core/api/AutoCloseableSoftAssertionsProvider.java | {
"start": 803,
"end": 1108
} | interface ____ extends SoftAssertionsProvider, AutoCloseable {
/**
* Allows the SoftAssertionsProvider to be used as an {@link AutoCloseable} by calling {@link #assertAll()}.
*
* @see #assertAll
*/
@Override
default void close() {
assertAll();
}
}
| AutoCloseableSoftAssertionsProvider |
java | apache__dubbo | dubbo-config/dubbo-config-api/src/main/java/org/apache/dubbo/config/metadata/MetadataServiceURLParamsMetadataCustomizer.java | {
"start": 2265,
"end": 4877
} | class ____ implements ServiceInstanceCustomizer {
@Override
public void customize(ServiceInstance serviceInstance, ApplicationModel applicationModel) {
Map<String, String> metadata = serviceInstance.getMetadata();
String propertyName = resolveMetadataPropertyName(serviceInstance);
String propertyValue = resolveMetadataPropertyValue(applicationModel);
if (!isBlank(propertyName) && !isBlank(propertyValue)) {
metadata.put(propertyName, propertyValue);
}
String version = resolveMetadataServiceVersion(applicationModel);
metadata.put(METADATA_SERVICE_VERSION_NAME, version);
}
public static String resolveMetadataServiceVersion(ApplicationModel applicationModel) {
boolean needExportV2 = MetadataServiceVersionUtils.needExportV2(applicationModel);
String version;
if (needExportV2) {
version = MetadataServiceDelegationV2.VERSION;
} else {
version = MetadataServiceDelegation.VERSION;
}
return version;
}
private String resolveMetadataPropertyName(ServiceInstance serviceInstance) {
return METADATA_SERVICE_URL_PARAMS_PROPERTY_NAME;
}
private String resolveMetadataPropertyValue(ApplicationModel applicationModel) {
ModuleServiceRepository serviceRepository =
applicationModel.getInternalModule().getServiceRepository();
String key;
if (MetadataServiceVersionUtils.needExportV2(applicationModel)) {
key = BaseServiceMetadata.buildServiceKey(
MetadataServiceV2.class.getName(),
applicationModel.getApplicationName(),
MetadataServiceDelegationV2.VERSION);
} else {
// If MetadataService and MetadataServiceV2 are both exported, use v1 path for capacity.
// Client will use version and protocol to judge if it needs to refer v2 path.
key = BaseServiceMetadata.buildServiceKey(
MetadataService.class.getName(), applicationModel.getApplicationName(), MetadataService.VERSION);
}
ProviderModel providerModel = serviceRepository.lookupExportedService(key);
String metadataValue = "";
if (providerModel != null) {
List<URL> metadataURLs = providerModel.getServiceUrls();
if (CollectionUtils.isNotEmpty(metadataURLs)) {
metadataValue = getMetadataServiceParameter(metadataURLs.get(0));
}
}
return metadataValue;
}
}
| MetadataServiceURLParamsMetadataCustomizer |
java | apache__maven | its/core-it-suite/src/test/resources/mng-7474-session-scope/plugin/src/main/java/org/apache/maven/its/mng7474/TestComponent.java | {
"start": 1826,
"end": 2002
} | class ____ {
public static List<TestComponent> allInstances = new CopyOnWriteArrayList<>();
public TestComponent() {
allInstances.add(this);
}
}
| TestComponent |
java | apache__camel | components/camel-disruptor/src/main/java/org/apache/camel/component/disruptor/DisruptorNotStartedException.java | {
"start": 1004,
"end": 1456
} | class ____ extends Exception {
private static final long serialVersionUID = 1L;
public DisruptorNotStartedException() {
}
public DisruptorNotStartedException(String message) {
super(message);
}
public DisruptorNotStartedException(String message, Throwable cause) {
super(message, cause);
}
public DisruptorNotStartedException(Throwable cause) {
super(cause);
}
}
| DisruptorNotStartedException |
java | spring-projects__spring-security | oauth2/oauth2-client/src/main/java/org/springframework/security/oauth2/client/jackson2/OAuth2AuthorizationRequestDeserializer.java | {
"start": 1734,
"end": 3766
} | class ____ extends JsonDeserializer<OAuth2AuthorizationRequest> {
private static final StdConverter<JsonNode, AuthorizationGrantType> AUTHORIZATION_GRANT_TYPE_CONVERTER = new StdConverters.AuthorizationGrantTypeConverter();
@Override
public OAuth2AuthorizationRequest deserialize(JsonParser parser, DeserializationContext context)
throws IOException {
ObjectMapper mapper = (ObjectMapper) parser.getCodec();
JsonNode root = mapper.readTree(parser);
return deserialize(parser, mapper, root);
}
private OAuth2AuthorizationRequest deserialize(JsonParser parser, ObjectMapper mapper, JsonNode root)
throws JsonParseException {
AuthorizationGrantType authorizationGrantType = AUTHORIZATION_GRANT_TYPE_CONVERTER
.convert(JsonNodeUtils.findObjectNode(root, "authorizationGrantType"));
Builder builder = getBuilder(parser, authorizationGrantType);
builder.authorizationUri(JsonNodeUtils.findStringValue(root, "authorizationUri"));
builder.clientId(JsonNodeUtils.findStringValue(root, "clientId"));
builder.redirectUri(JsonNodeUtils.findStringValue(root, "redirectUri"));
builder.scopes(JsonNodeUtils.findValue(root, "scopes", JsonNodeUtils.STRING_SET, mapper));
builder.state(JsonNodeUtils.findStringValue(root, "state"));
builder.additionalParameters(
JsonNodeUtils.findValue(root, "additionalParameters", JsonNodeUtils.STRING_OBJECT_MAP, mapper));
builder.authorizationRequestUri(JsonNodeUtils.findStringValue(root, "authorizationRequestUri"));
builder.attributes(JsonNodeUtils.findValue(root, "attributes", JsonNodeUtils.STRING_OBJECT_MAP, mapper));
return builder.build();
}
private OAuth2AuthorizationRequest.Builder getBuilder(JsonParser parser,
AuthorizationGrantType authorizationGrantType) throws JsonParseException {
if (AuthorizationGrantType.AUTHORIZATION_CODE.equals(authorizationGrantType)) {
return OAuth2AuthorizationRequest.authorizationCode();
}
throw new JsonParseException(parser, "Invalid authorizationGrantType");
}
}
| OAuth2AuthorizationRequestDeserializer |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/pool/ExeptionSorterTest.java | {
"start": 268,
"end": 921
} | class ____ extends TestCase {
private DruidDataSource dataSource;
protected void setUp() throws Exception {
dataSource = new DruidDataSource();
dataSource.setDriver(new MyDriver());
}
protected void tearDown() throws Exception {
JdbcUtils.close(dataSource);
}
public void test_dataSource() throws Exception {
Method method = DruidDataSource.class.getDeclaredMethod("initExceptionSorter");
method.setAccessible(true);
method.invoke(dataSource);
assertEquals(dataSource.getExceptionSorter().getClass(), MySqlExceptionSorter.class);
}
public static | ExeptionSorterTest |
java | apache__flink | flink-table/flink-table-common/src/test/java/org/apache/flink/table/types/extraction/TypeInferenceExtractorTest.java | {
"start": 101687,
"end": 101943
} | class ____ implements Procedure {
public double[] call(Object procedureContext, Object... o) {
return new double[] {0.0};
}
}
@ProcedureHint(output = @DataTypeHint("STRING"))
private static | ExtractWithInputHintProcedure |
java | apache__rocketmq | store/src/main/java/org/apache/rocketmq/store/ha/HAConnectionState.java | {
"start": 849,
"end": 1189
} | enum ____ {
/**
* Ready to start connection.
*/
READY,
/**
* CommitLog consistency checking.
*/
HANDSHAKE,
/**
* Synchronizing data.
*/
TRANSFER,
/**
* Temporarily stop transferring.
*/
SUSPEND,
/**
* Connection shutdown.
*/
SHUTDOWN,
}
| HAConnectionState |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/bytearray/ByteArrayAssert_usingDefaultComparator_Test.java | {
"start": 1161,
"end": 1788
} | class ____ extends ByteArrayAssertBaseTest {
private ByteArrays arraysBefore;
@BeforeEach
void before() {
arraysBefore = getArrays(assertions);
}
@Override
protected ByteArrayAssert invoke_api_method() {
return assertions.usingComparator(alwaysEqual())
.usingDefaultComparator();
}
@Override
protected void verify_internal_effects() {
assertThat(getObjects(assertions).getComparator()).isNull();
assertThat(getObjects(assertions)).isSameAs(Objects.instance());
assertThat(getArrays(assertions)).isSameAs(arraysBefore);
}
}
| ByteArrayAssert_usingDefaultComparator_Test |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/convert/converter/Converter.java | {
"start": 889,
"end": 1174
} | interface ____ thread-safe and can be shared.
*
* <p>Implementations may additionally implement {@link ConditionalConverter}.
*
* @author Keith Donald
* @author Josh Cummings
* @since 3.0
* @param <S> the source type
* @param <T> the target type
*/
@FunctionalInterface
public | are |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/support/http/UserAgentAndroid_4_0_3_Test_2.java | {
"start": 757,
"end": 2108
} | class ____ extends TestCase {
public void test_mac_firefox() throws Exception {
WebAppStat stat = new WebAppStat("");
stat.computeUserAgent("Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30");
assertEquals(0, stat.getBrowserChromeCount());
assertEquals(0, stat.getBrowserFirefoxCount());
assertEquals(0, stat.getBrowserOperaCount());
assertEquals(0, stat.getBrowserSafariCount());
assertEquals(0, stat.getBrowserIECount());
assertEquals(0, stat.getBrowserIE10Count());
assertEquals(1, stat.getDeviceAndroidCount());
assertEquals(0, stat.getDeviceIphoneCount());
assertEquals(0, stat.getDeviceWindowsPhoneCount());
assertEquals(1, stat.getOSLinuxCount());
assertEquals(1, stat.getOSAndroidCount());
assertEquals(1, stat.getOSAndroid40Count());
assertEquals(0, stat.getOSLinuxUbuntuCount());
assertEquals(0, stat.getOSMacOSXCount());
assertEquals(0, stat.getOSWindowsCount());
assertEquals(0, stat.getOSWindows8Count());
assertEquals(0, stat.getOSSymbianCount());
assertEquals(0, stat.getOSFreeBSDCount());
assertEquals(0, stat.getOSOpenBSDCount());
}
}
| UserAgentAndroid_4_0_3_Test_2 |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java | {
"start": 1585,
"end": 5866
} | class ____ extends ESIntegTestCase {
/**
* Making sure that if there are multiple aggregations, working on the same field, yet require different
* value source type, they can all still work. It used to fail as we used to cache the ValueSource by the
* field name. If the cached value source was of type "bytes" and another aggregation on the field required to see
* it as "numeric", it didn't work. Now we cache the Value Sources by a custom key (field name + ValueSource type)
* so there's no conflict there.
*/
public void testMultipleAggsOnSameField_WithDifferentRequiredValueSourceType() throws Exception {
createIndex("idx");
IndexRequestBuilder[] builders = new IndexRequestBuilder[randomInt(30)];
Map<Integer, Integer> values = new HashMap<>();
long missingValues = 0;
for (int i = 0; i < builders.length; i++) {
String name = "name_" + randomIntBetween(1, 10);
if (rarely()) {
missingValues++;
builders[i] = prepareIndex("idx").setSource(jsonBuilder().startObject().field("name", name).endObject());
} else {
int value = randomIntBetween(1, 10);
values.put(value, values.getOrDefault(value, 0) + 1);
builders[i] = prepareIndex("idx").setSource(
jsonBuilder().startObject().field("name", name).field("value", value).endObject()
);
}
}
indexRandom(true, builders);
ensureSearchable();
final long finalMissingValues = missingValues;
SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values());
assertNoFailuresAndResponse(
prepareSearch("idx").addAggregation(missing("missing_values").field("value"))
.addAggregation(terms("values").field("value").collectMode(aggCollectionMode)),
response -> {
InternalAggregations aggs = response.getAggregations();
SingleBucketAggregation missing = aggs.get("missing_values");
assertNotNull(missing);
assertThat(missing.getDocCount(), equalTo(finalMissingValues));
Terms terms = aggs.get("values");
assertNotNull(terms);
List<? extends Terms.Bucket> buckets = terms.getBuckets();
assertThat(buckets.size(), equalTo(values.size()));
for (Terms.Bucket bucket : buckets) {
values.remove(((Number) bucket.getKey()).intValue());
}
assertTrue(values.isEmpty());
}
);
}
/**
* Some top aggs (eg. date_/histogram) that are executed on unmapped fields, will generate an estimate count of buckets - zero.
* when the sub aggregator is then created, it will take this estimation into account. This used to cause
* and an ArrayIndexOutOfBoundsException...
*/
public void testSubAggregationForTopAggregationOnUnmappedField() throws Exception {
prepareCreate("idx").setMapping(
jsonBuilder().startObject()
.startObject("_doc")
.startObject("properties")
.startObject("name")
.field("type", "keyword")
.endObject()
.startObject("value")
.field("type", "integer")
.endObject()
.endObject()
.endObject()
.endObject()
).get();
ensureSearchable("idx");
SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values());
assertNoFailuresAndResponse(
prepareSearch("idx").addAggregation(
histogram("values").field("value1").interval(1).subAggregation(terms("names").field("name").collectMode(aggCollectionMode))
),
response -> {
assertThat(response.getHits().getTotalHits().value(), Matchers.equalTo(0L));
Histogram values = response.getAggregations().get("values");
assertThat(values, notNullValue());
assertThat(values.getBuckets().isEmpty(), is(true));
}
);
}
}
| CombiIT |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/LambdaMatchers.java | {
"start": 5380,
"end": 8432
} | class ____<T, U> extends TypeSafeMatcher<T[]> {
private final String transformDescription;
private final Matcher<U[]> matcher;
private final Function<T, U> transform;
private ArrayTransformMatcher(String transformDescription, Matcher<U[]> matcher, Function<T, U> transform) {
this.transformDescription = transformDescription;
this.matcher = matcher;
this.transform = transform;
}
@Override
@SuppressWarnings("unchecked")
protected boolean matchesSafely(T[] item) {
U[] us = null;
for (int i = 0; i < item.length; i++) {
U u;
try {
u = transform.apply(item[i]); // this might not actually be a T
} catch (ClassCastException e) {
throw new AssertionError(e);
}
if (us == null) {
// now we actually have a U, we can create an array of the correct type
us = (U[]) Array.newInstance(u.getClass(), item.length);
}
us[i] = u;
}
return matcher.matches(us);
}
@Override
@SuppressWarnings("unchecked")
protected void describeMismatchSafely(T[] item, Description description) {
U[] us = null;
for (int i = 0; i < item.length; i++) {
U u;
try {
u = transform.apply(item[i]); // this might not actually be a T
} catch (ClassCastException e) {
description.appendValue(i)
.appendText(" at index " + i)
.appendText(" is not of the correct type (")
.appendText(e.getMessage())
.appendText(")");
return;
}
if (us == null) {
// now we actually have a U, we can create an array of the correct type
us = (U[]) Array.newInstance(u.getClass(), item.length);
}
us[i] = u;
}
description.appendText(transformDescription).appendText(" ");
matcher.describeMismatch(us, description);
}
@Override
public void describeTo(Description description) {
description.appendText("array with ").appendText(transformDescription).appendText(" matching ").appendDescriptionOf(matcher);
}
}
public static <T, U> Matcher<T[]> transformedArrayItemsMatch(Function<T, U> function, Matcher<U[]> matcher) {
return new ArrayTransformMatcher<>("transformed items", matcher, function);
}
public static <T, U> Matcher<T[]> transformedArrayItemsMatch(
String transformDescription,
Function<T, U> function,
Matcher<U[]> matcher
) {
return new ArrayTransformMatcher<>(transformDescription, matcher, function);
}
private static | ArrayTransformMatcher |
java | apache__kafka | connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SamplingTestPlugin.java | {
"start": 965,
"end": 1050
} | class ____ plugins so we can sample information about their initialization
*/
public | for |
java | apache__dubbo | dubbo-config/dubbo-config-spring/src/test/java/org/apache/dubbo/config/spring/JavaConfigBeanTest.java | {
"start": 5995,
"end": 7171
} | class ____ {
@Bean("dubbo-demo-application")
public ApplicationConfig applicationConfig() {
ApplicationConfig applicationConfig = new ApplicationConfig();
applicationConfig.setName("dubbo-demo-application");
return applicationConfig;
}
@Bean(MY_PROTOCOL_ID)
public ProtocolConfig protocolConfig() {
ProtocolConfig protocolConfig = new ProtocolConfig();
protocolConfig.setName("rest");
protocolConfig.setPort(1234);
return protocolConfig;
}
@Bean(MY_REGISTRY_ID)
public RegistryConfig registryConfig() {
RegistryConfig registryConfig = new RegistryConfig();
registryConfig.setAddress("N/A");
return registryConfig;
}
@Bean
public ConsumerConfig consumerConfig() {
ConsumerConfig consumer = new ConsumerConfig();
consumer.setTimeout(1000);
consumer.setGroup("demo");
consumer.setCheck(false);
consumer.setRetries(2);
return consumer;
}
}
@Configuration
static | TestConfiguration |
java | apache__camel | components/camel-kafka/src/main/java/org/apache/camel/component/kafka/PollExceptionStrategy.java | {
"start": 1268,
"end": 2054
} | interface ____ {
/**
* Reset any error flags set by a previous error condition
*/
default void reset() {
}
/**
* This method provides an "answer" to whether the consumer can continue polling or not. This is specific to each
* polling exception strategy and must be implemented accordingly
*
* @return true if polling should continue or false otherwise
*/
boolean canContinue();
/**
* Controls how to handle the exception while polling from Kafka.
*
* @param exception the caused exception which typically would be a {@link org.apache.kafka.common.KafkaException}
* @return how to handle the exception
*/
void handle(long partitionLastOffset, Exception exception);
}
| PollExceptionStrategy |
java | elastic__elasticsearch | x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/PyTorchStateStreamerIT.java | {
"start": 1427,
"end": 5500
} | class ____ extends MlSingleNodeTestCase {
public void testRestoreState() throws IOException, InterruptedException {
int numChunks = 5;
int chunkSize = 100;
int modelSize = numChunks * chunkSize;
String modelId = "test-state-streamer-restore";
List<byte[]> chunks = new ArrayList<>(numChunks);
for (int i = 0; i < numChunks; i++) {
chunks.add(randomByteArrayOfLength(chunkSize));
}
List<TrainedModelDefinitionDoc> docs = createModelDefinitionDocs(chunks, modelId);
putModelDefinition(docs);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream(modelSize);
PyTorchStateStreamer stateStreamer = new PyTorchStateStreamer(
client(),
client().threadPool().executor(MachineLearning.UTILITY_THREAD_POOL_NAME),
xContentRegistry()
);
AtomicReference<Boolean> onSuccess = new AtomicReference<>();
AtomicReference<Exception> onFailure = new AtomicReference<>();
blockingCall(
listener -> stateStreamer.writeStateToStream(modelId, InferenceIndexConstants.LATEST_INDEX_NAME, outputStream, listener),
onSuccess,
onFailure
);
byte[] writtenData = outputStream.toByteArray();
// the first 4 bytes are the model size
int writtenSize = ByteBuffer.wrap(writtenData, 0, 4).getInt();
assertEquals(modelSize, writtenSize);
byte[] writtenChunk = new byte[chunkSize];
for (int i = 0; i < numChunks; i++) {
System.arraycopy(writtenData, i * chunkSize + 4, writtenChunk, 0, chunkSize);
assertArrayEquals(chunks.get(i), writtenChunk);
}
}
private List<TrainedModelDefinitionDoc> createModelDefinitionDocs(List<byte[]> binaryChunks, String modelId) {
int totalLength = binaryChunks.stream().map(arr -> arr.length).reduce(0, Integer::sum);
List<TrainedModelDefinitionDoc> docs = new ArrayList<>();
for (int i = 0; i < binaryChunks.size(); i++) {
String encodedData = new String(Base64.getEncoder().encode(binaryChunks.get(i)), StandardCharsets.UTF_8);
docs.add(
new TrainedModelDefinitionDoc.Builder().setDocNum(i)
.setCompressedString(encodedData)
.setCompressionVersion(TrainedModelConfig.CURRENT_DEFINITION_COMPRESSION_VERSION)
.setTotalDefinitionLength(totalLength)
.setDefinitionLength(encodedData.length())
.setEos(i == binaryChunks.size() - 1)
.setModelId(modelId)
.build()
);
}
return docs;
}
private void putModelDefinition(List<TrainedModelDefinitionDoc> docs) throws IOException {
BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
for (int i = 0; i < docs.size(); i++) {
TrainedModelDefinitionDoc doc = docs.get(i);
try (XContentBuilder xContentBuilder = doc.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) {
IndexRequestBuilder indexRequestBuilder = prepareIndex(InferenceIndexConstants.LATEST_INDEX_NAME).setSource(xContentBuilder)
.setId(TrainedModelDefinitionDoc.docId(doc.getModelId(), i));
bulkRequestBuilder.add(indexRequestBuilder);
}
}
BulkResponse bulkResponse = bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get();
if (bulkResponse.hasFailures()) {
int failures = 0;
for (BulkItemResponse itemResponse : bulkResponse) {
if (itemResponse.isFailed()) {
failures++;
logger.error("Item response failure [{}]", itemResponse.getFailureMessage());
}
}
fail("Bulk response contained " + failures + " failures");
}
logger.debug("Indexed [{}] documents", docs.size());
}
}
| PyTorchStateStreamerIT |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.