language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java | {
"start": 40665,
"end": 48032
} | class ____ in a few cases:
// a) If there are no matching ciphers or TLS version or the private key is invalid, client will be
// unable to process the server message and an SSLException is thrown:
// javax.net.ssl.SSLException: Unrecognized SSL message, plaintext connection?
// b) If server closes the connection gracefully during handshake, client may receive close_notify
// and and an SSLException is thrown:
// javax.net.ssl.SSLException: Received close_notify during handshake
// We want to handle a) as a non-retriable SslAuthenticationException and b) as a retriable IOException.
// To do this we need to rely on the exception string. Since it is safer to throw a retriable exception
// when we are not sure, we will treat only the first exception string as a handshake exception.
private void maybeProcessHandshakeFailure(SSLException sslException, boolean flush, IOException ioException) throws IOException {
if (sslException instanceof SSLHandshakeException || sslException instanceof SSLProtocolException ||
sslException instanceof SSLPeerUnverifiedException || sslException instanceof SSLKeyException ||
sslException.getMessage().contains("Unrecognized SSL message") ||
sslException.getMessage().contains("Received fatal alert: "))
handshakeFailure(sslException, flush);
else if (ioException == null)
throw sslException;
else {
log.debug("SSLException while unwrapping data after IOException, original IOException will be propagated", sslException);
throw ioException;
}
}
// If handshake has already failed, throw the authentication exception.
private void maybeThrowSslAuthenticationException() {
if (handshakeException != null)
throw handshakeException;
}
/**
* Perform handshake wrap after an SSLException or any IOException.
*
* If `doWrite=false`, we are processing IOException after peer has disconnected, so we
* cannot send any more data. We perform any pending wraps so that we can unwrap any
* peer data that is already available.
*
* If `doWrite=true`, we are processing SSLException, we perform wrap and flush
* any data to notify the peer of the handshake failure.
*
* Returns true if no more wrap is required and any data is flushed or discarded.
*/
private boolean handshakeWrapAfterFailure(boolean doWrite) {
try {
log.trace("handshakeWrapAfterFailure status {} doWrite {}", handshakeStatus, doWrite);
while (handshakeStatus == HandshakeStatus.NEED_WRAP && (!doWrite || flush(netWriteBuffer))) {
if (!doWrite)
clearWriteBuffer();
handshakeWrap(doWrite);
}
} catch (Exception e) {
log.debug("Failed to wrap and flush all bytes before closing channel", e);
clearWriteBuffer();
}
if (!doWrite)
clearWriteBuffer();
return !netWriteBuffer.hasRemaining();
}
private void clearWriteBuffer() {
if (netWriteBuffer.hasRemaining())
log.debug("Discarding write buffer {} since peer has disconnected", netWriteBuffer);
netWriteBuffer.position(0);
netWriteBuffer.limit(0);
}
@Override
public boolean isMute() {
return key.isValid() && (key.interestOps() & SelectionKey.OP_READ) == 0;
}
@Override
public boolean hasBytesBuffered() {
return hasBytesBuffered;
}
// Update `hasBytesBuffered` status. If any bytes were read from the network or
// if data was returned from read, `hasBytesBuffered` is set to true if any buffered
// data is still remaining. If not, `hasBytesBuffered` is set to false since no progress
// can be made until more data is available to read from the network.
private void updateBytesBuffered(boolean madeProgress) {
if (madeProgress)
hasBytesBuffered = netReadBuffer.position() != 0 || appReadBuffer.position() != 0;
else
hasBytesBuffered = false;
}
@Override
public long transferFrom(FileChannel fileChannel, long position, long count) throws IOException {
if (state == State.CLOSING)
throw closingException();
if (state != State.READY)
return 0;
if (!flush(netWriteBuffer))
return 0;
long channelSize = fileChannel.size();
if (position > channelSize)
return 0;
int totalBytesToWrite = (int) Math.min(Math.min(count, channelSize - position), Integer.MAX_VALUE);
if (fileChannelBuffer == null) {
// Pick a size that allows for reasonably efficient disk reads, keeps the memory overhead per connection
// manageable and can typically be drained in a single `write` call. The `netWriteBuffer` is typically 16k
// and the socket send buffer is 100k by default, so 32k is a good number given the mentioned trade-offs.
int transferSize = 32768;
// Allocate a direct buffer to avoid one heap to heap buffer copy. SSLEngine copies the source
// buffer (fileChannelBuffer) to the destination buffer (netWriteBuffer) and then encrypts in-place.
// FileChannel.read() to a heap buffer requires a copy from a direct buffer to a heap buffer, which is not
// useful here.
fileChannelBuffer = ByteBuffer.allocateDirect(transferSize);
// The loop below drains any remaining bytes from the buffer before reading from disk, so we ensure there
// are no remaining bytes in the empty buffer
fileChannelBuffer.position(fileChannelBuffer.limit());
}
int totalBytesWritten = 0;
long pos = position;
try {
while (totalBytesWritten < totalBytesToWrite) {
if (!fileChannelBuffer.hasRemaining()) {
fileChannelBuffer.clear();
int bytesRemaining = totalBytesToWrite - totalBytesWritten;
if (bytesRemaining < fileChannelBuffer.limit())
fileChannelBuffer.limit(bytesRemaining);
int bytesRead = fileChannel.read(fileChannelBuffer, pos);
if (bytesRead <= 0)
break;
fileChannelBuffer.flip();
}
int networkBytesWritten = write(fileChannelBuffer);
totalBytesWritten += networkBytesWritten;
// In the case of a partial write we only return the written bytes to the caller. As a result, the
// `position` passed in the next `transferFrom` call won't include the bytes remaining in
// `fileChannelBuffer`. By draining `fileChannelBuffer` first, we ensure we update `pos` before
// we invoke `fileChannel.read`.
if (fileChannelBuffer.hasRemaining())
break;
pos += networkBytesWritten;
}
return totalBytesWritten;
} catch (IOException e) {
if (totalBytesWritten > 0)
return totalBytesWritten;
throw e;
}
}
}
| SSLException |
java | google__guava | android/guava/src/com/google/common/collect/Lists.java | {
"start": 24578,
"end": 26225
} | class ____<
F extends @Nullable Object, T extends @Nullable Object>
extends AbstractSequentialList<T> implements Serializable {
final List<F> fromList;
final Function<? super F, ? extends T> function;
TransformingSequentialList(List<F> fromList, Function<? super F, ? extends T> function) {
this.fromList = checkNotNull(fromList);
this.function = checkNotNull(function);
}
/**
* The default implementation inherited is based on iteration and removal of each element which
* can be overkill. That's why we forward this call directly to the backing list.
*/
@Override
protected void removeRange(int fromIndex, int toIndex) {
fromList.subList(fromIndex, toIndex).clear();
}
@Override
public int size() {
return fromList.size();
}
@Override
public boolean isEmpty() {
return fromList.isEmpty();
}
@Override
public ListIterator<T> listIterator(int index) {
return new TransformedListIterator<F, T>(fromList.listIterator(index)) {
@Override
@ParametricNullness
T transform(@ParametricNullness F from) {
return function.apply(from);
}
};
}
@GwtIncompatible @J2ktIncompatible private static final long serialVersionUID = 0;
}
/**
* Implementation of a transforming random access list. We try to make as many of these methods
* pass-through to the source list as possible so that the performance characteristics of the
* source list and transformed list are similar.
*
* @see Lists#transform
*/
private static final | TransformingSequentialList |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/PeriodGetTemporalUnitTest.java | {
"start": 877,
"end": 1278
} | class ____ {
private final CompilationTestHelper helper =
CompilationTestHelper.newInstance(PeriodGetTemporalUnit.class, getClass());
@Test
public void periodGetTemporalUnit() {
helper
.addSourceLines(
"TestClass.java",
"""
import java.time.Period;
import java.time.temporal.ChronoUnit;
public | PeriodGetTemporalUnitTest |
java | quarkusio__quarkus | integration-tests/narayana-jta/src/test/java/io/quarkus/narayana/jta/TransactionJdbcObjectStoreTest.java | {
"start": 322,
"end": 451
} | class ____ extends BaseTransactionTest {
@Test
public void test() {
runTest();
}
}
| TransactionJdbcObjectStoreTest |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/suite/engine/SuiteLauncherDiscoveryRequestBuilderTests.java | {
"start": 12965,
"end": 13160
} | class ____ {
}
@SelectMethod(typeName = "org.junit.platform.suite.engine.SuiteLauncherDiscoveryRequestBuilderTests$OneParameterTestCase", name = "testMethod", parameterTypes = int.class)
| SuiteD |
java | spring-projects__spring-boot | module/spring-boot-web-server/src/test/java/org/springframework/boot/web/server/servlet/context/config/ExampleServletWebServerApplicationConfiguration.java | {
"start": 1182,
"end": 1420
} | class ____ {
@Bean
public MockServletWebServerFactory webServerFactory() {
return new MockServletWebServerFactory();
}
@Bean
public Servlet servlet() {
return new MockServlet();
}
}
| ExampleServletWebServerApplicationConfiguration |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/type/AspectJTypeFilterTests.java | {
"start": 1186,
"end": 6831
} | class ____ {
@Test
void namePatternMatches() throws Exception {
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClass",
"example.type.AspectJTypeFilterTestsTypes.SomeClass");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClass",
"*");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClass",
"*..SomeClass");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClass",
"example..SomeClass");
}
@Test
void namePatternNoMatches() throws Exception {
assertNoMatch("example.type.AspectJTypeFilterTestsTypes$SomeClass",
"example.type.AspectJTypeFilterTestsTypes.SomeClassX");
}
@Test
void subclassPatternMatches() throws Exception {
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassExtendingSomeClass",
"example.type.AspectJTypeFilterTestsTypes.SomeClass+");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassExtendingSomeClass",
"*+");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassExtendingSomeClass",
"java.lang.Object+");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassImplementingSomeInterface",
"example.type.AspectJTypeFilterTestsTypes.SomeInterface+");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassImplementingSomeInterface",
"*+");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassImplementingSomeInterface",
"java.lang.Object+");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassExtendingSomeClassExtendingSomeClassAndImplementingSomeInterface",
"example.type.AspectJTypeFilterTestsTypes.SomeInterface+");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassExtendingSomeClassExtendingSomeClassAndImplementingSomeInterface",
"example.type.AspectJTypeFilterTestsTypes.SomeClassExtendingSomeClass+");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassExtendingSomeClassExtendingSomeClassAndImplementingSomeInterface",
"example.type.AspectJTypeFilterTestsTypes.SomeClass+");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassExtendingSomeClassExtendingSomeClassAndImplementingSomeInterface",
"*+");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassExtendingSomeClassExtendingSomeClassAndImplementingSomeInterface",
"java.lang.Object+");
}
@Test
void subclassPatternNoMatches() throws Exception {
assertNoMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassExtendingSomeClass",
"java.lang.String+");
}
@Test
void annotationPatternMatches() throws Exception {
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassAnnotatedWithComponent",
"@org.springframework.core.testfixture.stereotype.Component *..*");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassAnnotatedWithComponent",
"@* *..*");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassAnnotatedWithComponent",
"@*..* *..*");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassAnnotatedWithComponent",
"@*..*Component *..*");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassAnnotatedWithComponent",
"@org.springframework.core.testfixture.stereotype.Component *..*Component");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassAnnotatedWithComponent",
"@org.springframework.core.testfixture.stereotype.Component *");
}
@Test
void annotationPatternNoMatches() throws Exception {
assertNoMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassAnnotatedWithComponent",
"@org.springframework.stereotype.Repository *..*");
}
@Test
void compositionPatternMatches() throws Exception {
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClass",
"!*..SomeOtherClass");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassExtendingSomeClassExtendingSomeClassAndImplementingSomeInterface",
"example.type.AspectJTypeFilterTestsTypes.SomeInterface+ " +
"&& example.type.AspectJTypeFilterTestsTypes.SomeClass+ " +
"&& example.type.AspectJTypeFilterTestsTypes.SomeClassExtendingSomeClass+");
assertMatch("example.type.AspectJTypeFilterTestsTypes$SomeClassExtendingSomeClassExtendingSomeClassAndImplementingSomeInterface",
"example.type.AspectJTypeFilterTestsTypes.SomeInterface+ " +
"|| example.type.AspectJTypeFilterTestsTypes.SomeClass+ " +
"|| example.type.AspectJTypeFilterTestsTypes.SomeClassExtendingSomeClass+");
}
@Test
void compositionPatternNoMatches() throws Exception {
assertNoMatch("example.type.AspectJTypeFilterTestsTypes$SomeClass",
"*..Bogus && example.type.AspectJTypeFilterTestsTypes.SomeClass");
}
private void assertMatch(String type, String typePattern) throws Exception {
MetadataReaderFactory metadataReaderFactory = new SimpleMetadataReaderFactory();
MetadataReader metadataReader = metadataReaderFactory.getMetadataReader(type);
AspectJTypeFilter filter = new AspectJTypeFilter(typePattern, getClass().getClassLoader());
assertThat(filter.match(metadataReader, metadataReaderFactory)).isTrue();
ClassloadingAssertions.assertClassNotLoaded(type);
}
private void assertNoMatch(String type, String typePattern) throws Exception {
MetadataReaderFactory metadataReaderFactory = new SimpleMetadataReaderFactory();
MetadataReader metadataReader = metadataReaderFactory.getMetadataReader(type);
AspectJTypeFilter filter = new AspectJTypeFilter(typePattern, getClass().getClassLoader());
assertThat(filter.match(metadataReader, metadataReaderFactory)).isFalse();
ClassloadingAssertions.assertClassNotLoaded(type);
}
}
| AspectJTypeFilterTests |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/binding/LegacyBindingGraphFactory.java | {
"start": 34715,
"end": 39718
} | class ____ {
private final Set<Object> cycleChecker = new HashSet<>();
/**
* Returns {@code true} if any of the bindings resolved for {@code key} are multibindings with
* contributions declared within this component's modules or optional bindings with present
* values declared within this component's modules, or if any of its unscoped dependencies
* depend on such bindings.
*
* <p>We don't care about scoped dependencies because they will never depend on bindings from
* subcomponents.
*
* @throws IllegalArgumentException if {@link #getPreviouslyResolvedBindings(Key)} is empty
*/
private boolean requiresResolution(Key key) {
// Don't recur infinitely if there are valid cycles in the dependency graph.
// http://b/23032377
if (!cycleChecker.add(key)) {
return false;
}
return reentrantComputeIfAbsent(
keyDependsOnLocalBindingsCache, key, this::requiresResolutionUncached);
}
/**
* Returns {@code true} if {@code binding} is unscoped (or has {@link Reusable @Reusable}
* scope) and depends on multibindings with contributions declared within this component's
* modules, or if any of its unscoped or {@link Reusable @Reusable} scoped dependencies depend
* on such local multibindings.
*
* <p>We don't care about non-reusable scoped dependencies because they will never depend on
* multibindings with contributions from subcomponents.
*/
private boolean requiresResolution(Binding binding) {
if (!cycleChecker.add(binding)) {
return false;
}
return reentrantComputeIfAbsent(
bindingDependsOnLocalBindingsCache, binding, this::requiresResolutionUncached);
}
private boolean requiresResolutionUncached(Key key) {
checkArgument(
getPreviouslyResolvedBindings(key).isPresent(),
"no previously resolved bindings in %s for %s",
Resolver.this,
key);
LegacyResolvedBindings previouslyResolvedBindings =
getPreviouslyResolvedBindings(key).get();
if (hasLocalBindings(previouslyResolvedBindings)) {
return true;
}
for (Binding binding : previouslyResolvedBindings.bindings()) {
if (requiresResolution(binding)) {
return true;
}
}
return false;
}
private boolean requiresResolutionUncached(Binding binding) {
if ((!binding.scope().isPresent() || binding.scope().get().isReusable())
// TODO(beder): Figure out what happens with production subcomponents.
&& !binding.kind().equals(BindingKind.PRODUCTION)) {
for (DependencyRequest dependency : binding.dependencies()) {
if (requiresResolution(dependency.key())) {
return true;
}
}
}
return false;
}
}
private boolean hasLocalBindings(LegacyResolvedBindings resolvedBindings) {
return hasLocalMultibindingContributions(resolvedBindings.key())
|| hasLocalOptionalBindingContribution(resolvedBindings);
}
/**
* Returns {@code true} if there is at least one multibinding contribution declared within
* this component's modules that matches the key.
*/
private boolean hasLocalMultibindingContributions(Key requestKey) {
return !declarations.multibindingContributions(requestKey).isEmpty()
|| !declarations.delegateMultibindingContributions(requestKey).isEmpty();
}
/**
* Returns {@code true} if there is a contribution in this component for an {@code
* Optional<Foo>} key that has not been contributed in a parent.
*/
private boolean hasLocalOptionalBindingContribution(LegacyResolvedBindings resolvedBindings) {
return hasLocalOptionalBindingContribution(
resolvedBindings.key(), resolvedBindings.bindings());
}
private boolean hasLocalOptionalBindingContribution(
Key key, ImmutableSet<? extends Binding> previouslyResolvedBindings) {
if (previouslyResolvedBindings.stream()
.map(Binding::kind)
.anyMatch(isEqual(OPTIONAL))) {
return hasLocalExplicitBindings(keyFactory.unwrapOptional(key).get());
} else {
// If a parent contributes a @Provides Optional<Foo> binding and a child has a
// @BindsOptionalOf Foo method, the two should conflict, even if there is no binding for
// Foo on its own
return !getOptionalBindingDeclarations(key).isEmpty();
}
}
/**
* Returns {@code true} if there is at least one explicit binding that matches the given key.
*/
private boolean hasLocalExplicitBindings(Key requestKey) {
return !declarations.bindings(requestKey).isEmpty()
|| !declarations.delegates(requestKey).isEmpty();
}
}
}
| RequiresResolutionChecker |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java | {
"start": 95436,
"end": 115115
} | class ____ implements DockerCommandPlugin {
private final String volume;
private final String driver;
public MockDockerCommandPlugin(String volume, String driver) {
this.volume = volume;
this.driver = driver;
}
@Override
public void updateDockerRunCommand(DockerRunCommand dockerRunCommand,
Container container) throws ContainerExecutionException {
dockerRunCommand.setVolumeDriver("driver-1");
dockerRunCommand.addReadOnlyMountLocation("/source/path",
"/destination/path", true);
}
@Override
public DockerVolumeCommand getCreateDockerVolumeCommand(Container container)
throws ContainerExecutionException {
return new DockerVolumeCommand("create").setVolumeName(volume)
.setDriverName(driver);
}
@Override
public DockerVolumeCommand getCleanupDockerVolumesCommand(
Container container) throws ContainerExecutionException {
return null;
}
}
private void testDockerCommandPluginWithVolumesOutput(
String dockerVolumeListOutput, boolean expectFail)
throws PrivilegedOperationException, ContainerExecutionException,
IOException {
mockExecutor = Mockito
.mock(PrivilegedOperationExecutor.class);
DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
mockExecutor, mockCGroupsHandler);
when(mockExecutor
.executePrivilegedOperation(any(), any(PrivilegedOperation.class),
any(), any(), anyBoolean(), anyBoolean())).thenReturn(
null, dockerVolumeListOutput);
Context mockNMContext = createMockNMContext();
ResourcePluginManager rpm = mock(ResourcePluginManager.class);
Map<String, ResourcePlugin> pluginsMap = new HashMap<>();
ResourcePlugin plugin1 = mock(ResourcePlugin.class);
// Create the docker command plugin logic, which will set volume driver
DockerCommandPlugin dockerCommandPlugin = new MockDockerCommandPlugin(
"volume1", "local");
when(plugin1.getDockerCommandPluginInstance()).thenReturn(
dockerCommandPlugin);
ResourcePlugin plugin2 = mock(ResourcePlugin.class);
pluginsMap.put("plugin1", plugin1);
pluginsMap.put("plugin2", plugin2);
when(rpm.getNameToPlugins()).thenReturn(pluginsMap);
when(mockNMContext.getResourcePluginManager()).thenReturn(rpm);
runtime.initialize(conf, mockNMContext);
ContainerRuntimeContext containerRuntimeContext = builder.build();
try {
runtime.prepareContainer(containerRuntimeContext);
runtime.launchContainer(containerRuntimeContext);
checkVolumeCreateCommand();
} catch (ContainerExecutionException e) {
if (expectFail) {
// Expected
return;
} else{
fail("Should successfully prepareContainers" + e);
}
}
if (expectFail) {
fail("Should fail because output is illegal");
}
}
@ParameterizedTest(name = "https={0}")
@MethodSource("data")
public void testDockerCommandPluginCheckVolumeAfterCreation(boolean pHttps)
throws Exception {
initHttps(pHttps);
// For following tests, we expect to have volume1,local in output
// Failure cases
testDockerCommandPluginWithVolumesOutput(
"DRIVER VOLUME NAME\n", true);
testDockerCommandPluginWithVolumesOutput("", true);
testDockerCommandPluginWithVolumesOutput("volume1", true);
testDockerCommandPluginWithVolumesOutput(
"DRIVER VOLUME NAME\n" +
"nvidia-docker nvidia_driver_375.66\n", true);
testDockerCommandPluginWithVolumesOutput(
"DRIVER VOLUME NAME\n" +
" volume1\n", true);
testDockerCommandPluginWithVolumesOutput("local", true);
testDockerCommandPluginWithVolumesOutput("volume2,local", true);
testDockerCommandPluginWithVolumesOutput(
"DRIVER VOLUME NAME\n" +
"local volume2\n", true);
testDockerCommandPluginWithVolumesOutput("volum1,something", true);
testDockerCommandPluginWithVolumesOutput(
"DRIVER VOLUME NAME\n" +
"something volume1\n", true);
testDockerCommandPluginWithVolumesOutput("volum1,something\nvolum2,local",
true);
// Success case
testDockerCommandPluginWithVolumesOutput(
"DRIVER VOLUME NAME\n" +
"nvidia-docker nvidia_driver_375.66\n" +
"local volume1\n", false);
testDockerCommandPluginWithVolumesOutput(
"volume_xyz,nvidia\nvolume1,local\n\n", false);
testDockerCommandPluginWithVolumesOutput(" volume1, local \n", false);
testDockerCommandPluginWithVolumesOutput(
"volume_xyz,\tnvidia\n volume1,\tlocal\n\n", false);
}
@ParameterizedTest(name = "https={0}")
@MethodSource("data")
public void testDockerCommandPlugin(boolean pHttps) throws Exception {
initHttps(pHttps);
DockerLinuxContainerRuntime runtime =
new DockerLinuxContainerRuntime(mockExecutor, mockCGroupsHandler);
when(mockExecutor
.executePrivilegedOperation(any(), any(PrivilegedOperation.class),
any(), any(), anyBoolean(), anyBoolean())).thenReturn(
null, "volume1,local");
Context mockNMContext = createMockNMContext();
ResourcePluginManager rpm = mock(ResourcePluginManager.class);
Map<String, ResourcePlugin> pluginsMap = new HashMap<>();
ResourcePlugin plugin1 = mock(ResourcePlugin.class);
// Create the docker command plugin logic, which will set volume driver
DockerCommandPlugin dockerCommandPlugin = new MockDockerCommandPlugin(
"volume1", "local");
when(plugin1.getDockerCommandPluginInstance()).thenReturn(
dockerCommandPlugin);
ResourcePlugin plugin2 = mock(ResourcePlugin.class);
pluginsMap.put("plugin1", plugin1);
pluginsMap.put("plugin2", plugin2);
when(rpm.getNameToPlugins()).thenReturn(pluginsMap);
when(mockNMContext.getResourcePluginManager()).thenReturn(rpm);
runtime.initialize(conf, mockNMContext);
ContainerRuntimeContext containerRuntimeContext = builder.build();
runtime.prepareContainer(containerRuntimeContext);
runtime.launchContainer(containerRuntimeContext);
checkVolumeCreateCommand();
List<String> dockerCommands = readDockerCommands(3);
int expected = 14;
int counter = 0;
assertEquals(expected, dockerCommands.size());
assertEquals("[docker-command-execution]",
dockerCommands.get(counter++));
assertEquals(" cap-add=SYS_CHROOT,NET_BIND_SERVICE",
dockerCommands.get(counter++));
assertEquals(" cap-drop=ALL", dockerCommands.get(counter++));
assertEquals(" detach=true", dockerCommands.get(counter++));
assertEquals(" docker-command=run", dockerCommands.get(counter++));
assertEquals(" group-add=" + String.join(",", groups),
dockerCommands.get(counter++));
assertEquals(" image=busybox:latest", dockerCommands.get(counter++));
assertEquals(
" launch-command=bash,/test_container_work_dir/launch_container.sh",
dockerCommands.get(counter++));
assertEquals(" mounts="
+ "/test_container_log_dir:/test_container_log_dir:rw,"
+ "/test_application_local_dir:/test_application_local_dir:rw,"
+ "/test_filecache_dir:/test_filecache_dir:ro,"
+ "/test_user_filecache_dir:/test_user_filecache_dir:ro,"
+ "/source/path:/destination/path:ro",
dockerCommands.get(counter++));
assertEquals(
" name=container_e11_1518975676334_14532816_01_000001",
dockerCommands.get(counter++));
assertEquals(" net=host", dockerCommands.get(counter++));
assertEquals(" user=" + uidGidPair, dockerCommands.get(counter++));
// Verify volume-driver is set to expected value.
assertEquals(" volume-driver=driver-1",
dockerCommands.get(counter++));
assertEquals(" workdir=/test_container_work_dir",
dockerCommands.get(counter));
}
@ParameterizedTest(name = "https={0}")
@MethodSource("data")
public void testDockerCapabilities(boolean pHttps) throws ContainerExecutionException {
initHttps(pHttps);
DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
mockExecutor, mockCGroupsHandler);
try {
conf.setStrings(YarnConfiguration.NM_DOCKER_CONTAINER_CAPABILITIES,
"none", "CHOWN", "DAC_OVERRIDE");
runtime.initialize(conf, nmContext);
fail("Initialize didn't fail with invalid capabilities " +
"'none', 'CHOWN', 'DAC_OVERRIDE'");
} catch (ContainerExecutionException e) {
}
try {
conf.setStrings(YarnConfiguration.NM_DOCKER_CONTAINER_CAPABILITIES,
"CHOWN", "DAC_OVERRIDE", "NONE");
runtime.initialize(conf, nmContext);
fail("Initialize didn't fail with invalid capabilities " +
"'CHOWN', 'DAC_OVERRIDE', 'NONE'");
} catch (ContainerExecutionException e) {
}
conf.setStrings(YarnConfiguration.NM_DOCKER_CONTAINER_CAPABILITIES,
"NONE");
runtime.initialize(conf, nmContext);
assertEquals(0, runtime.getCapabilities().size());
conf.setStrings(YarnConfiguration.NM_DOCKER_CONTAINER_CAPABILITIES,
"none");
runtime.initialize(conf, nmContext);
assertEquals(0, runtime.getCapabilities().size());
conf.setStrings(YarnConfiguration.NM_DOCKER_CONTAINER_CAPABILITIES,
"CHOWN", "DAC_OVERRIDE");
runtime.initialize(conf, nmContext);
Iterator<String> it = runtime.getCapabilities().iterator();
assertEquals("CHOWN", it.next());
assertEquals("DAC_OVERRIDE", it.next());
}
@ParameterizedTest(name = "https={0}")
@MethodSource("data")
public void testLaunchContainerWithDockerTokens(boolean pHttps)
throws ContainerExecutionException, PrivilegedOperationException, IOException {
initHttps(pHttps);
// Get the credentials object with the Tokens.
Credentials credentials = DockerClientConfigHandler.readCredentialsFromConfigFile(
new Path(getDockerClientConfigFile().toURI()), conf, appId);
DataOutputBuffer dob = new DataOutputBuffer();
credentials.writeTokenStorageToStream(dob);
ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
testLaunchContainer(tokens, null);
}
@ParameterizedTest(name = "https={0}")
@MethodSource("data")
public void testLaunchContainerWithAdditionalDockerClientConfig(boolean pHttps)
throws ContainerExecutionException, PrivilegedOperationException, IOException {
initHttps(pHttps);
testLaunchContainer(null, getDockerClientConfigFile());
}
public void testLaunchContainer(ByteBuffer tokens, File dockerConfigFile)
throws ContainerExecutionException, PrivilegedOperationException,
IOException {
if (dockerConfigFile != null) {
// load the docker client config file from system environment
env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_CLIENT_CONFIG,
dockerConfigFile.getPath());
}
if (tokens != null) {
// Configure the runtime and launch the container
when(context.getTokens()).thenReturn(tokens);
}
DockerLinuxContainerRuntime runtime =
new DockerLinuxContainerRuntime(mockExecutor, mockCGroupsHandler);
runtime.initialize(conf, nmContext);
Set<PosixFilePermission> perms =
PosixFilePermissions.fromString("rwxr-xr--");
FileAttribute<Set<PosixFilePermission>> attr =
PosixFilePermissions.asFileAttribute(perms);
Path outDir = new Path(
Files.createTempDirectory("docker-client-config-out", attr).toUri()
.getPath() + "/launch_container.sh");
builder.setExecutionAttribute(NM_PRIVATE_CONTAINER_SCRIPT_PATH, outDir);
runtime.launchContainer(builder.build());
PrivilegedOperation op = capturePrivilegedOperation();
assertEquals(
PrivilegedOperation.OperationType.LAUNCH_DOCKER_CONTAINER,
op.getOperationType());
List<String> args = op.getArguments();
int expectedArgs = (https) ? 15 : 13;
int argsCounter = 0;
assertEquals(expectedArgs, args.size());
assertEquals(runAsUser, args.get(argsCounter++));
assertEquals(user, args.get(argsCounter++));
assertEquals(Integer.toString(
PrivilegedOperation.RunAsUserCommand.LAUNCH_DOCKER_CONTAINER
.getValue()), args.get(argsCounter++));
assertEquals(appId, args.get(argsCounter++));
assertEquals(containerId, args.get(argsCounter++));
assertEquals(containerWorkDir.toString(), args.get(argsCounter++));
assertEquals(outDir.toUri().getPath(), args.get(argsCounter++));
assertEquals(nmPrivateTokensPath.toUri().getPath(),
args.get(argsCounter++));
if (https) {
assertEquals("--https", args.get(argsCounter++));
assertEquals(nmPrivateKeystorePath.toUri().toString(),
args.get(argsCounter++));
assertEquals(nmPrivateTruststorePath.toUri().toString(),
args.get(argsCounter++));
} else {
assertEquals("--http", args.get(argsCounter++));
}
assertEquals(pidFilePath.toString(), args.get(argsCounter++));
assertEquals(localDirs.get(0), args.get(argsCounter++));
assertEquals(logDirs.get(0), args.get(argsCounter++));
String dockerCommandFile = args.get(argsCounter++);
List<String> dockerCommands = Files
.readAllLines(Paths.get(dockerCommandFile), StandardCharsets.UTF_8);
int expected = 14;
int counter = 0;
assertEquals(expected, dockerCommands.size());
assertEquals("[docker-command-execution]",
dockerCommands.get(counter++));
assertEquals(" cap-add=SYS_CHROOT,NET_BIND_SERVICE",
dockerCommands.get(counter++));
assertEquals(" cap-drop=ALL", dockerCommands.get(counter++));
assertEquals(" detach=true", dockerCommands.get(counter++));
assertEquals(" docker-command=run", dockerCommands.get(counter++));
assertEquals(" docker-config=" + outDir.getParent(),
dockerCommands.get(counter++));
assertEquals(" group-add=" + String.join(",", groups),
dockerCommands.get(counter++));
assertEquals(" image=busybox:latest",
dockerCommands.get(counter++));
assertEquals(
" launch-command=bash,/test_container_work_dir/launch_container.sh",
dockerCommands.get(counter++));
assertEquals(" mounts="
+ "/test_container_log_dir:/test_container_log_dir:rw,"
+ "/test_application_local_dir:/test_application_local_dir:rw,"
+ "/test_filecache_dir:/test_filecache_dir:ro,"
+ "/test_user_filecache_dir:/test_user_filecache_dir:ro",
dockerCommands.get(counter++));
assertEquals(
" name=container_e11_1518975676334_14532816_01_000001",
dockerCommands.get(counter++));
assertEquals(" net=host", dockerCommands.get(counter++));
assertEquals(" user=" + uidGidPair, dockerCommands.get(counter++));
assertEquals(" workdir=/test_container_work_dir",
dockerCommands.get(counter++));
}
private File getDockerClientConfigFile() throws IOException {
// Write the JSOn to a temp file.
File file = File.createTempFile("docker-client-config", "runtime-test");
file.deleteOnExit();
BufferedWriter bw = new BufferedWriter(new FileWriter(file));
bw.write(TestDockerClientConfigHandler.JSON);
bw.close();
return file;
}
@ParameterizedTest(name = "https={0}")
@MethodSource("data")
public void testDockerContainerRelaunch(boolean pHttps)
throws ContainerExecutionException, PrivilegedOperationException,
IOException {
initHttps(pHttps);
DockerLinuxContainerRuntime runtime =
new DockerLinuxContainerRuntime(mockExecutor, mockCGroupsHandler);
when(mockExecutor
.executePrivilegedOperation(any(), any(PrivilegedOperation.class),
any(), any(), anyBoolean(), anyBoolean())).thenReturn(
DockerCommandExecutor.DockerContainerStatus.STOPPED.getName());
runtime.initialize(conf, nmContext);
runtime.relaunchContainer(builder.build());
List<String> dockerCommands = readDockerCommands(2);
int expected = 3;
int counter = 0;
assertEquals(expected, dockerCommands.size());
assertEquals("[docker-command-execution]",
dockerCommands.get(counter++));
assertEquals(" docker-command=start",
dockerCommands.get(counter++));
assertEquals(
" name=container_e11_1518975676334_14532816_01_000001",
dockerCommands.get(counter));
}
@ParameterizedTest(name = "https={0}")
@MethodSource("data")
public void testLaunchContainersWithSpecificDockerRuntime(boolean pHttps)
throws ContainerExecutionException, PrivilegedOperationException,
IOException {
initHttps(pHttps);
DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
mockExecutor, mockCGroupsHandler);
runtime.initialize(conf, nmContext);
env.put(DockerLinuxContainerRuntime
.ENV_DOCKER_CONTAINER_DOCKER_RUNTIME, "runc");
runtime.launchContainer(builder.build());
List<String> dockerCommands = readDockerCommands();
assertEquals(14, dockerCommands.size());
assertEquals(" runtime=runc", dockerCommands.get(11));
}
@ParameterizedTest(name = "https={0}")
@MethodSource("data")
@SuppressWarnings("unchecked")
public void testContainerLaunchWithAllowedRuntimes(boolean pHttps)
throws ContainerExecutionException, IOException,
PrivilegedOperationException {
initHttps(pHttps);
DockerLinuxContainerRuntime runtime =
new DockerLinuxContainerRuntime(mockExecutor, mockCGroupsHandler);
runtime.initialize(conf, nmContext);
String disallowedRuntime = "runc2";
try {
env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_DOCKER_RUNTIME,
disallowedRuntime);
runtime.launchContainer(builder.build());
fail("Runtime was expected to be disallowed: " +
disallowedRuntime);
} catch (ContainerExecutionException e) {
LOG.info("Caught expected exception: " + e);
}
String allowedRuntime = "runc";
env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_DOCKER_RUNTIME,
allowedRuntime);
//this should cause no failures.
runtime.launchContainer(builder.build());
List<String> dockerCommands = readDockerCommands();
//This is the expected docker invocation for this case
assertEquals(14, dockerCommands.size());
assertEquals(" runtime=runc", dockerCommands.get(11));
}
private static void verifyStopCommand(List<String> dockerCommands,
String signal) {
assertEquals(4, dockerCommands.size());
assertEquals("[docker-command-execution]", dockerCommands.get(0));
assertEquals(" docker-command=kill", dockerCommands.get(1));
assertEquals(" name=container_e11_1518975676334_14532816_01_000001",
dockerCommands.get(2));
assertEquals(" signal=" + signal, dockerCommands.get(3));
}
private List<String> readDockerCommands() throws IOException,
PrivilegedOperationException {
return readDockerCommands(1);
}
private List<String> readDockerCommands(int invocations) throws IOException,
PrivilegedOperationException {
PrivilegedOperation op = (invocations == 1)
? capturePrivilegedOperationAndVerifyArgs()
: capturePrivilegedOperation(invocations);
List<String> args = op.getArguments();
String dockerCommandFile = args.get((https) ? 14 : 12);
List<String> dockerCommands = Files.readAllLines(
Paths.get(dockerCommandFile), StandardCharsets.UTF_8);
return dockerCommands;
}
}
| MockDockerCommandPlugin |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/literal/interval/Intervals.java | {
"start": 9576,
"end": 15312
} | class ____ {
private static final char PLUS = '+', MINUS = '-';
private final List<TimeUnit> units;
private final List<Token> tokens;
private final String name;
Parser(List<TimeUnit> units, List<Token> tokens, String name) {
this.units = units;
this.tokens = tokens;
this.name = name;
}
TemporalAmount parse(Source source, String string) {
int unitIndex = 0;
int startToken = 0;
int endToken = 0;
long[] values = new long[units.size()];
boolean negate = false;
// first check if there's a sign
char maybeSign = string.charAt(0);
if (PLUS == maybeSign) {
startToken = 1;
} else if (MINUS == maybeSign) {
startToken = 1;
negate = true;
}
// take each token and use it to consume a part of the string
// validate each token and that the whole string is consumed
for (Token token : tokens) {
endToken = startToken;
if (startToken >= string.length()) {
// consumed the string, bail out
if (token.optional) {
break;
}
throw new ParsingException(
source,
invalidIntervalMessage(string) + ": incorrect format, expecting {}",
Strings.collectionToDelimitedString(tokens, "")
);
}
// char token
if (token.ch != 0) {
char found = string.charAt(startToken);
if (found != token.ch) {
throw new ParsingException(
source,
invalidIntervalMessage(string) + ": expected [{}] (at [{}]) but found [{}]",
token.ch,
startToken,
found
);
}
startToken++;
}
// number char
else {
// go through the group the digits
for (; endToken < string.length() && Character.isDigit(string.charAt(endToken)); endToken++) {
}
if (endToken == startToken) {
throw new ParsingException(
source,
invalidIntervalMessage(string) + ": expected digit (at [{}]) but found [{}]",
endToken,
string.charAt(endToken)
);
}
String number = string.substring(startToken, endToken);
try {
long v = StringUtils.parseLong(number);
if (token.maxValue > 0 && v > token.maxValue) {
throw new ParsingException(
source,
invalidIntervalMessage(string)
+ ": [{}] unit has illegal value [{}], expected a positive number up to [{}]",
units.get(unitIndex).name(),
v,
token.maxValue
);
}
if (v < 0) {
throw new ParsingException(
source,
invalidIntervalMessage(string) + ": negative value [{}] not allowed (negate the entire interval instead)",
v
);
}
if (units.get(unitIndex) == TimeUnit.MILLISECOND && number.length() < 3) {
// normalize the number past DOT to millis
v *= number.length() < 2 ? 100 : 10;
}
values[unitIndex++] = v;
} catch (QlIllegalArgumentException siae) {
throw new ParsingException(source, invalidIntervalMessage(string), siae.getMessage());
}
startToken = endToken;
}
}
if (endToken <= string.length() - 1) {
throw new ParsingException(
source,
invalidIntervalMessage(string) + ": unexpected trailing characters found [{}]",
string.substring(endToken)
);
}
TemporalAmount interval = units.get(0) == TimeUnit.YEAR || units.get(0) == TimeUnit.MONTH ? Period.ZERO : Duration.ZERO;
for (int i = 0; i < values.length; i++) {
TemporalAmount ta = of(source, values[i], units.get(i));
interval = ta instanceof Period ? ((Period) ta).plus(interval) : ((Duration) ta).plus((Duration) interval);
}
if (negate) {
interval = negate(interval);
}
return interval;
}
private String invalidIntervalMessage(String interval) {
return "Invalid [" + name + "] value [" + interval + "]";
}
@Override
public String toString() {
return name;
}
}
public static TemporalAmount negate(TemporalAmount interval) {
// negated is not present on TemporalAmount though present in both Period and Duration so handle each | Parser |
java | google__dagger | javatests/dagger/internal/codegen/MultibindingTest.java | {
"start": 11905,
"end": 12295
} | interface ____ {",
" Set<String> set();",
"}");
Source childModule =
CompilerTests.javaSource(
"test.ChildModule",
"package test;",
"",
"import dagger.Module;",
"import dagger.multibindings.IntoSet;",
"import dagger.Provides;",
"",
"@Module",
" | Child |
java | elastic__elasticsearch | modules/lang-painless/src/test/java/org/elasticsearch/painless/action/ContextInfoTests.java | {
"start": 743,
"end": 8370
} | class ____ extends AbstractXContentSerializingTestCase<PainlessContextInfo> {
@Override
protected PainlessContextInfo doParseInstance(XContentParser parser) {
return PainlessContextInfo.fromXContent(parser);
}
@Override
protected PainlessContextInfo createTestInstance() {
int classesSize = randomIntBetween(20, 100);
List<PainlessContextClassInfo> classes = new ArrayList<>();
for (int clazz = 0; clazz < classesSize; ++clazz) {
int constructorsSize = randomInt(4);
List<PainlessContextConstructorInfo> constructors = new ArrayList<>(constructorsSize);
for (int constructor = 0; constructor < constructorsSize; ++constructor) {
int parameterSize = randomInt(12);
List<String> parameters = new ArrayList<>(parameterSize);
for (int parameter = 0; parameter < parameterSize; ++parameter) {
parameters.add(randomAlphaOfLengthBetween(1, 20));
}
constructors.add(new PainlessContextConstructorInfo(randomAlphaOfLength(randomIntBetween(4, 10)), parameters));
}
;
int staticMethodsSize = randomInt(4);
List<PainlessContextMethodInfo> staticMethods = new ArrayList<>(staticMethodsSize);
for (int staticMethod = 0; staticMethod < staticMethodsSize; ++staticMethod) {
int parameterSize = randomInt(12);
List<String> parameters = new ArrayList<>(parameterSize);
for (int parameter = 0; parameter < parameterSize; ++parameter) {
parameters.add(randomAlphaOfLengthBetween(1, 20));
}
staticMethods.add(
new PainlessContextMethodInfo(
randomAlphaOfLength(randomIntBetween(4, 10)),
randomAlphaOfLength(randomIntBetween(4, 10)),
randomAlphaOfLength(randomIntBetween(4, 10)),
parameters
)
);
}
int methodsSize = randomInt(10);
List<PainlessContextMethodInfo> methods = new ArrayList<>(methodsSize);
for (int method = 0; method < methodsSize; ++method) {
int parameterSize = randomInt(12);
List<String> parameters = new ArrayList<>(parameterSize);
for (int parameter = 0; parameter < parameterSize; ++parameter) {
parameters.add(randomAlphaOfLengthBetween(1, 20));
}
methods.add(
new PainlessContextMethodInfo(
randomAlphaOfLength(randomIntBetween(4, 10)),
randomAlphaOfLength(randomIntBetween(4, 10)),
randomAlphaOfLength(randomIntBetween(4, 10)),
parameters
)
);
}
int staticFieldsSize = randomInt(10);
List<PainlessContextFieldInfo> staticFields = new ArrayList<>();
for (int staticField = 0; staticField < staticFieldsSize; ++staticField) {
staticFields.add(
new PainlessContextFieldInfo(
randomAlphaOfLength(randomIntBetween(4, 10)),
randomAlphaOfLength(randomIntBetween(4, 10)),
randomAlphaOfLength(randomIntBetween(4, 10))
)
);
}
int fieldsSize = randomInt(4);
List<PainlessContextFieldInfo> fields = new ArrayList<>();
for (int field = 0; field < fieldsSize; ++field) {
fields.add(
new PainlessContextFieldInfo(
randomAlphaOfLength(randomIntBetween(4, 10)),
randomAlphaOfLength(randomIntBetween(4, 10)),
randomAlphaOfLength(randomIntBetween(4, 10))
)
);
}
classes.add(
new PainlessContextClassInfo(
randomAlphaOfLength(randomIntBetween(3, 200)),
randomBoolean(),
constructors,
staticMethods,
methods,
fields,
staticFields
)
);
}
int importedMethodsSize = randomInt(4);
List<PainlessContextMethodInfo> importedMethods = new ArrayList<>(importedMethodsSize);
for (int importedMethod = 0; importedMethod < importedMethodsSize; ++importedMethod) {
int parameterSize = randomInt(12);
List<String> parameters = new ArrayList<>(parameterSize);
for (int parameter = 0; parameter < parameterSize; ++parameter) {
parameters.add(randomAlphaOfLengthBetween(1, 20));
}
importedMethods.add(
new PainlessContextMethodInfo(
randomAlphaOfLength(randomIntBetween(4, 10)),
randomAlphaOfLength(randomIntBetween(4, 10)),
randomAlphaOfLength(randomIntBetween(4, 10)),
parameters
)
);
}
int classBindingsSize = randomInt(3);
List<PainlessContextClassBindingInfo> classBindings = new ArrayList<>(classBindingsSize);
for (int classBinding = 0; classBinding < classBindingsSize; ++classBinding) {
int parameterSize = randomIntBetween(2, 5);
int readOnly = randomIntBetween(1, parameterSize - 1);
List<String> parameters = new ArrayList<>(parameterSize);
for (int parameter = 0; parameter < parameterSize; ++parameter) {
parameters.add(randomAlphaOfLengthBetween(1, 20));
}
classBindings.add(
new PainlessContextClassBindingInfo(
randomAlphaOfLength(randomIntBetween(4, 10)),
randomAlphaOfLength(randomIntBetween(4, 10)),
randomAlphaOfLength(randomIntBetween(4, 10)),
readOnly,
parameters
)
);
}
int instanceBindingsSize = randomInt(3);
List<PainlessContextInstanceBindingInfo> instanceBindings = new ArrayList<>(classBindingsSize);
for (int instanceBinding = 0; instanceBinding < instanceBindingsSize; ++instanceBinding) {
int parameterSize = randomInt(12);
List<String> parameters = new ArrayList<>(parameterSize);
for (int parameter = 0; parameter < parameterSize; ++parameter) {
parameters.add(randomAlphaOfLengthBetween(1, 20));
}
instanceBindings.add(
new PainlessContextInstanceBindingInfo(
randomAlphaOfLength(randomIntBetween(4, 10)),
randomAlphaOfLength(randomIntBetween(4, 10)),
randomAlphaOfLength(randomIntBetween(4, 10)),
parameters
)
);
}
return new PainlessContextInfo(randomAlphaOfLength(20), classes, importedMethods, classBindings, instanceBindings);
}
@Override
protected PainlessContextInfo mutateInstance(PainlessContextInfo instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected Writeable.Reader<PainlessContextInfo> instanceReader() {
return PainlessContextInfo::new;
}
}
| ContextInfoTests |
java | apache__flink | flink-datastream-api/src/main/java/org/apache/flink/datastream/api/stream/GlobalStream.java | {
"start": 3379,
"end": 3547
} | interface ____<T>
extends GlobalStream<T>, ProcessConfigurable<ProcessConfigurableAndGlobalStream<T>> {}
/**
* This | ProcessConfigurableAndGlobalStream |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCommandFactory.java | {
"start": 3301,
"end": 3522
} | class ____ {
public static void registerCommands(CommandFactory factory) {
factory.addClass(TestCommand1.class, "tc1");
factory.addClass(TestCommand2.class, "tc2", "tc2.1");
}
}
static | TestRegistrar |
java | jhy__jsoup | src/main/java/org/jsoup/select/Evaluator.java | {
"start": 29728,
"end": 30680
} | class ____ instead.");
}
}
@Override
public boolean matches(Element root, Element element) {
if (element instanceof PseudoTextElement)
return true;
List<TextNode> textNodes = element.textNodes();
for (TextNode textNode : textNodes) {
PseudoTextElement pel = new PseudoTextElement(
org.jsoup.parser.Tag.valueOf(element.tagName(), element.tag().namespace(), ParseSettings.preserveCase), element.baseUri(), element.attributes());
textNode.replaceWith(pel);
pel.appendChild(textNode);
}
return false;
}
@Override protected int cost() {
return -1; // forces first evaluation, which prepares the DOM for later evaluator matches
}
@Override
public String toString() {
return ":matchText";
}
}
}
| TextNode |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java | {
"start": 16068,
"end": 20672
} | class ____ implements SourceLoader {
final SourceLoader sourceLoader;
final SyntheticVectorsLoader patchLoader;
SyntheticVectors(@Nullable SourceFilter sourceFilter, SyntheticVectorsLoader patchLoader) {
this.sourceLoader = sourceFilter == null ? FROM_STORED_SOURCE : new Stored(sourceFilter);
this.patchLoader = patchLoader;
}
@Override
public boolean reordersFieldValues() {
return false;
}
@Override
public Set<String> requiredStoredFields() {
return sourceLoader.requiredStoredFields();
}
@Override
public Leaf leaf(LeafReader reader, int[] docIdsInLeaf) throws IOException {
var sourceLeaf = sourceLoader.leaf(reader, docIdsInLeaf);
var patchLeaf = patchLoader.leaf(reader.getContext());
return new Leaf() {
@Override
public Source source(LeafStoredFieldLoader storedFields, int docId) throws IOException {
Source source = sourceLeaf.source(storedFields, docId);
if (patchLeaf == null) {
return source;
}
List<SyntheticVectorPatch> patches = new ArrayList<>();
patchLeaf.load(docId, patches);
if (patches.size() == 0) {
return source;
}
return applySyntheticVectors(source, patches);
}
@Override
public void write(LeafStoredFieldLoader storedFields, int docId, XContentBuilder b) throws IOException {
throw new IllegalStateException("This operation is not allowed in the current context");
}
};
}
}
/**
* Applies a list of {@link SyntheticVectorPatch} instances to the given {@link Source}.
*
* @param originalSource the original source object
* @param patches the list of patches to apply
* @return a new {@link Source} with the patches applied
*/
static Source applySyntheticVectors(Source originalSource, List<SyntheticVectorPatch> patches) {
Map<String, Object> newMap = originalSource.source();
// Make sure we have a mutable map, empty implies `Map.of()`
if (newMap.isEmpty()) {
newMap = new LinkedHashMap<>();
}
applyPatches("", newMap, patches);
return Source.fromMap(newMap, originalSource.sourceContentType());
}
/**
* Recursively applies synthetic vector patches to a nested map.
*
* @param rootPath the current root path for nested structures
* @param map the map to apply patches to
* @param patches the list of patches to apply
*/
private static void applyPatches(String rootPath, Map<String, Object> map, List<SyntheticVectorPatch> patches) {
for (SyntheticVectorPatch patch : patches) {
if (patch instanceof LeafSyntheticVectorPath leaf) {
String key = extractRelativePath(rootPath, leaf.fullPath());
XContentMapValues.insertValue(key, map, leaf.value(), false);
} else if (patch instanceof NestedSyntheticVectorPath nested) {
String nestedPath = extractRelativePath(rootPath, nested.fullPath());
List<Map<?, ?>> nestedMaps = XContentMapValues.extractNestedSources(nestedPath, map);
for (SyntheticVectorPatch childPatch : nested.children()) {
if (childPatch instanceof NestedOffsetSyntheticVectorPath offsetPatch) {
Map<String, Object> nestedMap = XContentMapValues.nodeMapValue(nestedMaps.get(offsetPatch.offset()), nestedPath);
applyPatches(nested.fullPath(), nestedMap, offsetPatch.children());
} else {
throw new IllegalStateException(
"Unexpected child patch type of " + patch.getClass().getSimpleName() + " in nested structure."
);
}
}
} else {
throw new IllegalStateException("Unknown patch type: " + patch.getClass().getSimpleName());
}
}
}
private static String extractRelativePath(String rootPath, String fullPath) {
return rootPath.isEmpty() ? fullPath : fullPath.substring(rootPath.length() + 1);
}
/**
* Represents a patch to be applied to a source structure.
*/
sealed | SyntheticVectors |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/metamodel/mapping/internal/ToOneAttributeMapping.java | {
"start": 54635,
"end": 54779
} | class ____ {
...
@ManyToOne(fetch = FetchType.EAGER)
private EntityB entityB;
}
@Entity(name = "EntityB")
public static | EntityA |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/checkpointing/UnalignedCheckpointRescaleITCase.java | {
"start": 33790,
"end": 35118
} | class ____ extends RichMapFunction<Long, Long>
implements CheckpointedFunction {
private static final long serialVersionUID = -1098571965968341646L;
private final LongCounter numInputCounter = new LongCounter();
private ListState<Long> state;
@Override
public void open(OpenContext openContext) throws Exception {
super.open(openContext);
getRuntimeContext().addAccumulator(NUM_INPUTS, numInputCounter);
}
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
ListStateDescriptor<Long> descriptor =
new ListStateDescriptor<>("num-inputs", Types.LONG);
state = context.getOperatorStateStore().getListState(descriptor);
for (Long numInputs : state.get()) {
numInputCounter.add(numInputs);
}
}
@Override
public void snapshotState(FunctionSnapshotContext context) throws Exception {
state.update(Collections.singletonList(numInputCounter.getLocalValue()));
}
@Override
public Long map(Long value) throws Exception {
numInputCounter.add(1L);
return checkHeader(value);
}
}
private static | InputCountFunction |
java | micronaut-projects__micronaut-core | benchmarks/src/jmh/java/io/micronaut/http/server/stack/FullHttpStackBenchmark.java | {
"start": 3179,
"end": 6331
} | class ____ {
@Param({"MICRONAUT"/*, "PURE_NETTY"*/})
StackFactory stack = StackFactory.MICRONAUT;
AutoCloseable ctx;
EmbeddedChannel channel;
ByteBuf requestBytes;
ByteBuf responseBytes;
@Setup
public void setUp() {
if (checkFtlThread && !(Thread.currentThread() instanceof FastThreadLocalThread)) {
throw new IllegalStateException("Should run on a netty FTL thread");
}
Stack stack = this.stack.openChannel();
ctx = stack.closeable;
channel = stack.serverChannel;
channel.freezeTime();
EmbeddedChannel clientChannel = new EmbeddedChannel();
clientChannel.pipeline().addLast(new HttpClientCodec());
clientChannel.pipeline().addLast(new HttpObjectAggregator(1000));
FullHttpRequest request = new DefaultFullHttpRequest(
HttpVersion.HTTP_1_1,
HttpMethod.POST,
"/search/find",
Unpooled.wrappedBuffer("{\"haystack\": [\"xniomb\", \"seelzp\", \"nzogdq\", \"omblsg\", \"idgtlm\", \"ydonzo\"], \"needle\": \"idg\"}".getBytes(StandardCharsets.UTF_8))
);
request.headers().add(HttpHeaderNames.CONTENT_LENGTH, request.content().readableBytes());
request.headers().add(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.APPLICATION_JSON);
request.headers().add(HttpHeaderNames.ACCEPT, HttpHeaderValues.APPLICATION_JSON);
clientChannel.writeOutbound(request);
clientChannel.flushOutbound();
requestBytes = NettyUtil.readAllOutboundContiguous(clientChannel);
// sanity check: run req/resp once and see that the response is correct
responseBytes = exchange();
clientChannel.writeInbound(responseBytes.retainedDuplicate());
FullHttpResponse response = clientChannel.readInbound();
//System.out.println(response);
//System.out.println(response.content().toString(StandardCharsets.UTF_8));
Assertions.assertEquals(HttpResponseStatus.OK, response.status());
Assertions.assertEquals("application/json", response.headers().get(HttpHeaderNames.CONTENT_TYPE));
Assertions.assertNull(response.headers().get(HttpHeaderNames.CONNECTION));
String expectedResponseBody = "{\"listIndex\":4,\"stringIndex\":0}";
Assertions.assertEquals(expectedResponseBody, response.content().toString(StandardCharsets.UTF_8));
Assertions.assertEquals(expectedResponseBody.length(), response.headers().getInt(HttpHeaderNames.CONTENT_LENGTH));
response.release();
}
private ByteBuf exchange() {
channel.writeInbound(requestBytes.retainedDuplicate());
channel.runPendingTasks();
return NettyUtil.readAllOutboundComposite(channel);
}
@TearDown
public void tearDown() throws Exception {
ctx.close();
requestBytes.release();
responseBytes.release();
}
}
public | Holder |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/subquery/SubqueryMultipleLeftJoinsTest.java | {
"start": 1320,
"end": 4743
} | class ____ {
private static final long ENTITY_WITH_ASSOCIATION_ID_1 = 1L;
private static final long ENTITY_WITH_ASSOCIATION_ID_2 = 2L;
private static final long ANOTHER_ENTITY_ID_1 = 3L;
private static final long ANOTHER_ENTITY_ID_2 = 4L;
private static final long AGAIN_ANOTHER_ENTITY_ID = 5L;
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final AgainAnotherEntity againAnotherEntity = new AgainAnotherEntity(
AGAIN_ANOTHER_ENTITY_ID,
"again"
);
session.persist( againAnotherEntity );
final AnotherEntity anotherEntity1 = new AnotherEntity(
ANOTHER_ENTITY_ID_1,
"another 1",
true,
null
);
session.persist( anotherEntity1 );
final AnotherEntity anotherEntity2 = new AnotherEntity(
ANOTHER_ENTITY_ID_2,
"another 2",
false,
againAnotherEntity
);
session.persist( anotherEntity2 );
session.persist( new MyEntity( ENTITY_WITH_ASSOCIATION_ID_1, "without association", anotherEntity1 ) );
session.persist( new MyEntity( ENTITY_WITH_ASSOCIATION_ID_2, "with association", anotherEntity2 ) );
session.persist( new MyUnrelatedEntity( ENTITY_WITH_ASSOCIATION_ID_1, "unrelated 1" ) );
session.persist( new MyUnrelatedEntity( ENTITY_WITH_ASSOCIATION_ID_2, "unrelated 2" ) );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.createMutationQuery( "delete from MyUnrelatedEntity" ).executeUpdate();
session.createMutationQuery( "delete from MyEntity" ).executeUpdate();
session.createMutationQuery( "delete from AnotherEntity" ).executeUpdate();
session.createMutationQuery( "delete from AgainAnotherEntity" ).executeUpdate();
} );
}
@Test
public void subqueryWithLeftJoinsCriteriaApi(SessionFactoryScope sessionFactoryScope) {
sessionFactoryScope.inTransaction( session -> {
final CriteriaBuilder cb = session.getCriteriaBuilder();
final CriteriaQuery<MyUnrelatedEntity> cq = cb.createQuery( MyUnrelatedEntity.class );
final Root<MyUnrelatedEntity> root = cq.from( MyUnrelatedEntity.class );
final Subquery<Long> subquery = cq.subquery( Long.class );
final Root<MyEntity> myEntityRoot = subquery.from( MyEntity.class );
final Join<MyEntity, AnotherEntity> anotherEntityJoin = myEntityRoot.join(
"otherEntity",
JoinType.LEFT
);
final Join<AnotherEntity, AgainAnotherEntity> againAnotherEntityJoin = anotherEntityJoin.join(
"otherEntity",
JoinType.LEFT
);
subquery.select( myEntityRoot.get( "id" ) ).where( cb.and(
cb.equal( anotherEntityJoin.get( "aString" ), "another 1" ),
cb.or(
cb.and(
cb.equal( anotherEntityJoin.get( "aBoolean" ), false ),
cb.equal( againAnotherEntityJoin.get( "aString" ), "again" )
),
cb.and(
// This should be true since "another 1" has no association and the join is LEFT
cb.equal( anotherEntityJoin.get( "aBoolean" ), true ),
cb.isNull( againAnotherEntityJoin.get( "aString" ) )
)
)
) );
final MyUnrelatedEntity result = session.createQuery(
cq.select( root ).where( root.get( "id" ).in( subquery ) )
).getSingleResult();
assertThat( result.getId() ).isEqualTo( 1L );
assertThat( result.getaString() ).isEqualTo( "unrelated 1" );
} );
}
@Entity( name = "MyUnrelatedEntity" )
public static | SubqueryMultipleLeftJoinsTest |
java | google__guava | android/guava/src/com/google/common/collect/ForwardingSetMultimap.java | {
"start": 1487,
"end": 2280
} | class ____<K extends @Nullable Object, V extends @Nullable Object>
extends ForwardingMultimap<K, V> implements SetMultimap<K, V> {
/** Constructor for use by subclasses. */
public ForwardingSetMultimap() {}
@Override
protected abstract SetMultimap<K, V> delegate();
@Override
public Set<Entry<K, V>> entries() {
return delegate().entries();
}
@Override
public Set<V> get(@ParametricNullness K key) {
return delegate().get(key);
}
@CanIgnoreReturnValue
@Override
public Set<V> removeAll(@Nullable Object key) {
return delegate().removeAll(key);
}
@CanIgnoreReturnValue
@Override
public Set<V> replaceValues(@ParametricNullness K key, Iterable<? extends V> values) {
return delegate().replaceValues(key, values);
}
}
| ForwardingSetMultimap |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/nullness/RedundantNullCheckTest.java | {
"start": 14995,
"end": 15575
} | class ____ {
void process() {
if (AnnotatedLibNullable.getString() == null) {
/* This check should NOT be redundant */
}
}
}
""")
.doTest();
}
@Test
public void positive_methodCall_fromNonAnnotatedLib_returnNonNull_inNullMarkedScope() {
compilationHelper
.addSourceLines(
"NonAnnotatedLibNonNull.java", // Not @NullMarked
"""
package mylib;
import org.jspecify.annotations.NonNull;
public | Test |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-jackson/deployment/src/test/java/io/quarkus/resteasy/reactive/jackson/deployment/test/response/RestResponseTest.java | {
"start": 410,
"end": 1338
} | class ____ {
@RegisterExtension
static QuarkusUnitTest test = new QuarkusUnitTest()
.setArchiveProducer(new Supplier<>() {
@Override
public JavaArchive get() {
return ShrinkWrap.create(JavaArchive.class)
.addClasses(RestResponseResource.class, JsonSomething.class);
}
});
@Test
public void test() {
RestAssured.get("/json")
.then().statusCode(200)
.and().body(Matchers.equalTo("{\"firstName\":\"Stef\",\"lastName\":\"Epardaud\"}"))
.and().contentType("application/json");
RestAssured.get("/rest-response-json")
.then().statusCode(200)
.and().body(Matchers.equalTo("{\"firstName\":\"Stef\",\"lastName\":\"Epardaud\"}"))
.and().contentType("application/json");
}
}
| RestResponseTest |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/asm/ClassVisitor.java | {
"start": 3676,
"end": 3786
} | class ____ to which this visitor must delegate method calls. May be {@literal null}.
*
* @return the | visitor |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/observers/EmptyCompletableObserverTest.java | {
"start": 783,
"end": 1026
} | class ____ extends RxJavaTest {
@Test
public void defaultShouldReportNoCustomOnError() {
EmptyCompletableObserver o = new EmptyCompletableObserver();
assertFalse(o.hasCustomOnError());
}
}
| EmptyCompletableObserverTest |
java | apache__maven | compat/maven-resolver-provider/src/main/java/org/apache/maven/repository/internal/RemoteSnapshotMetadata.java | {
"start": 1592,
"end": 5671
} | class ____ extends MavenSnapshotMetadata {
public static final String DEFAULT_SNAPSHOT_TIMESTAMP_FORMAT = "yyyyMMdd.HHmmss";
public static final TimeZone DEFAULT_SNAPSHOT_TIME_ZONE = TimeZone.getTimeZone("Etc/UTC");
private final Map<String, SnapshotVersion> versions = new LinkedHashMap<>();
private final Integer buildNumber;
RemoteSnapshotMetadata(Artifact artifact, Date timestamp, Integer buildNumber) {
super(createRepositoryMetadata(artifact), null, timestamp);
this.buildNumber = buildNumber;
}
private RemoteSnapshotMetadata(Metadata metadata, Path path, Date timestamp, Integer buildNumber) {
super(metadata, path, timestamp);
this.buildNumber = buildNumber;
}
@Deprecated
@Override
public MavenMetadata setFile(File file) {
return new RemoteSnapshotMetadata(metadata, file.toPath(), timestamp, buildNumber);
}
@Override
public MavenMetadata setPath(Path path) {
return new RemoteSnapshotMetadata(metadata, path, timestamp, buildNumber);
}
public String getExpandedVersion(Artifact artifact) {
String key = getKey(artifact.getClassifier(), artifact.getExtension());
return versions.get(key).getVersion();
}
@Override
protected void merge(Metadata recessive) {
Snapshot snapshot;
String lastUpdated;
if (metadata.getVersioning() == null) {
DateFormat utcDateFormatter = new SimpleDateFormat(DEFAULT_SNAPSHOT_TIMESTAMP_FORMAT);
utcDateFormatter.setCalendar(new GregorianCalendar());
utcDateFormatter.setTimeZone(DEFAULT_SNAPSHOT_TIME_ZONE);
snapshot = new Snapshot();
snapshot.setBuildNumber(buildNumber != null ? buildNumber : getBuildNumber(recessive) + 1);
snapshot.setTimestamp(utcDateFormatter.format(timestamp));
Versioning versioning = new Versioning();
versioning.setSnapshot(snapshot);
versioning.setLastUpdatedTimestamp(timestamp);
lastUpdated = versioning.getLastUpdated();
metadata.setVersioning(versioning);
} else {
snapshot = metadata.getVersioning().getSnapshot();
lastUpdated = metadata.getVersioning().getLastUpdated();
}
for (Artifact artifact : artifacts) {
String version = artifact.getVersion();
if (version.endsWith(SNAPSHOT)) {
String qualifier = snapshot.getTimestamp() + '-' + snapshot.getBuildNumber();
version = version.substring(0, version.length() - SNAPSHOT.length()) + qualifier;
}
SnapshotVersion sv = new SnapshotVersion();
sv.setClassifier(artifact.getClassifier());
sv.setExtension(artifact.getExtension());
sv.setVersion(version);
sv.setUpdated(lastUpdated);
versions.put(getKey(sv.getClassifier(), sv.getExtension()), sv);
}
artifacts.clear();
Versioning versioning = recessive.getVersioning();
if (versioning != null) {
for (SnapshotVersion sv : versioning.getSnapshotVersions()) {
String key = getKey(sv.getClassifier(), sv.getExtension());
if (!versions.containsKey(key)) {
versions.put(key, sv);
}
}
}
metadata.getVersioning().setSnapshotVersions(new ArrayList<>(versions.values()));
// just carry-on as-is
if (!recessive.getPlugins().isEmpty()) {
metadata.setPlugins(new ArrayList<>(recessive.getPlugins()));
}
}
private static int getBuildNumber(Metadata metadata) {
int number = 0;
Versioning versioning = metadata.getVersioning();
if (versioning != null) {
Snapshot snapshot = versioning.getSnapshot();
if (snapshot != null && snapshot.getBuildNumber() > 0) {
number = snapshot.getBuildNumber();
}
}
return number;
}
}
| RemoteSnapshotMetadata |
java | apache__commons-lang | src/test/java/org/apache/commons/lang3/EnumUtilsTest.java | {
"start": 1949,
"end": 33221
} | class ____ extends AbstractLangTest {
private void assertArrayEquals(final long[] actual, final long... expected) {
Assertions.assertArrayEquals(expected, actual);
}
@Test
void testConstructable() {
// enforce public constructor
new EnumUtils();
}
@Test
void testGenerateBitVector() {
assertEquals(0L, EnumUtils.generateBitVector(Traffic.class, EnumSet.noneOf(Traffic.class)));
assertEquals(1L, EnumUtils.generateBitVector(Traffic.class, EnumSet.of(Traffic.RED)));
assertEquals(2L, EnumUtils.generateBitVector(Traffic.class, EnumSet.of(Traffic.AMBER)));
assertEquals(4L, EnumUtils.generateBitVector(Traffic.class, EnumSet.of(Traffic.GREEN)));
assertEquals(3L, EnumUtils.generateBitVector(Traffic.class, EnumSet.of(Traffic.RED, Traffic.AMBER)));
assertEquals(5L, EnumUtils.generateBitVector(Traffic.class, EnumSet.of(Traffic.RED, Traffic.GREEN)));
assertEquals(6L, EnumUtils.generateBitVector(Traffic.class, EnumSet.of(Traffic.AMBER, Traffic.GREEN)));
assertEquals(7L,
EnumUtils.generateBitVector(Traffic.class, EnumSet.of(Traffic.RED, Traffic.AMBER, Traffic.GREEN)));
// 64 values Enum (to test whether no int<->long jdk conversion issue exists)
assertEquals(1L << 31, EnumUtils.generateBitVector(Enum64.class, EnumSet.of(Enum64.A31)));
assertEquals(1L << 32, EnumUtils.generateBitVector(Enum64.class, EnumSet.of(Enum64.A32)));
assertEquals(1L << 63, EnumUtils.generateBitVector(Enum64.class, EnumSet.of(Enum64.A63)));
assertEquals(Long.MIN_VALUE, EnumUtils.generateBitVector(Enum64.class, EnumSet.of(Enum64.A63)));
}
@Test
void testGenerateBitVector_longClass() {
assertIllegalArgumentException(
() -> EnumUtils.generateBitVector(TooMany.class, EnumSet.of(TooMany.A1)));
}
@Test
void testGenerateBitVector_longClassWithArray() {
assertIllegalArgumentException(() -> EnumUtils.generateBitVector(TooMany.class, TooMany.A1));
}
@SuppressWarnings("unchecked")
@Test
void testGenerateBitVector_nonEnumClass() {
@SuppressWarnings("rawtypes")
final Class rawType = Object.class;
@SuppressWarnings("rawtypes")
final List rawList = new ArrayList();
assertIllegalArgumentException(() -> EnumUtils.generateBitVector(rawType, rawList));
}
@SuppressWarnings("unchecked")
@Test
void testGenerateBitVector_nonEnumClassWithArray() {
@SuppressWarnings("rawtypes")
final Class rawType = Object.class;
assertIllegalArgumentException(() -> EnumUtils.generateBitVector(rawType));
}
@Test
void testGenerateBitVector_nullArray() {
assertNullPointerException(() -> EnumUtils.generateBitVector(Traffic.class, (Traffic[]) null));
}
@Test
void testGenerateBitVector_nullArrayElement() {
assertIllegalArgumentException(
() -> EnumUtils.generateBitVector(Traffic.class, Traffic.RED, null));
}
@Test
void testGenerateBitVector_nullClass() {
assertNullPointerException(() -> EnumUtils.generateBitVector(null, EnumSet.of(Traffic.RED)));
}
@Test
void testGenerateBitVector_nullClassWithArray() {
assertNullPointerException(() -> EnumUtils.generateBitVector(null, Traffic.RED));
}
@Test
void testGenerateBitVector_nullElement() {
assertNullPointerException(
() -> EnumUtils.generateBitVector(Traffic.class, Arrays.asList(Traffic.RED, null)));
}
@Test
void testGenerateBitVector_nullIterable() {
assertNullPointerException(
() -> EnumUtils.generateBitVector(Traffic.class, (Iterable<Traffic>) null));
}
@Test
void testGenerateBitVectorFromArray() {
assertEquals(0L, EnumUtils.generateBitVector(Traffic.class));
assertEquals(1L, EnumUtils.generateBitVector(Traffic.class, Traffic.RED));
assertEquals(2L, EnumUtils.generateBitVector(Traffic.class, Traffic.AMBER));
assertEquals(4L, EnumUtils.generateBitVector(Traffic.class, Traffic.GREEN));
assertEquals(3L, EnumUtils.generateBitVector(Traffic.class, Traffic.RED, Traffic.AMBER));
assertEquals(5L, EnumUtils.generateBitVector(Traffic.class, Traffic.RED, Traffic.GREEN));
assertEquals(6L, EnumUtils.generateBitVector(Traffic.class, Traffic.AMBER, Traffic.GREEN));
assertEquals(7L, EnumUtils.generateBitVector(Traffic.class, Traffic.RED, Traffic.AMBER, Traffic.GREEN));
// gracefully handles duplicates:
assertEquals(7L,
EnumUtils.generateBitVector(Traffic.class, Traffic.RED, Traffic.AMBER, Traffic.GREEN, Traffic.GREEN));
// 64 values Enum (to test whether no int<->long jdk conversion issue exists)
assertEquals(1L << 31, EnumUtils.generateBitVector(Enum64.class, Enum64.A31));
assertEquals(1L << 32, EnumUtils.generateBitVector(Enum64.class, Enum64.A32));
assertEquals(1L << 63, EnumUtils.generateBitVector(Enum64.class, Enum64.A63));
assertEquals(Long.MIN_VALUE, EnumUtils.generateBitVector(Enum64.class, Enum64.A63));
}
@Test
void testGenerateBitVectors() {
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class, EnumSet.noneOf(Traffic.class)), 0L);
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class, EnumSet.of(Traffic.RED)), 1L);
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class, EnumSet.of(Traffic.AMBER)), 2L);
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class, EnumSet.of(Traffic.GREEN)), 4L);
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class, EnumSet.of(Traffic.RED, Traffic.AMBER)), 3L);
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class, EnumSet.of(Traffic.RED, Traffic.GREEN)), 5L);
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class, EnumSet.of(Traffic.AMBER, Traffic.GREEN)), 6L);
assertArrayEquals(
EnumUtils.generateBitVectors(Traffic.class, EnumSet.of(Traffic.RED, Traffic.AMBER, Traffic.GREEN)), 7L);
// 64 values Enum (to test whether no int<->long jdk conversion issue exists)
assertArrayEquals(EnumUtils.generateBitVectors(Enum64.class, EnumSet.of(Enum64.A31)), 1L << 31);
assertArrayEquals(EnumUtils.generateBitVectors(Enum64.class, EnumSet.of(Enum64.A32)), 1L << 32);
assertArrayEquals(EnumUtils.generateBitVectors(Enum64.class, EnumSet.of(Enum64.A63)), 1L << 63);
assertArrayEquals(EnumUtils.generateBitVectors(Enum64.class, EnumSet.of(Enum64.A63)), Long.MIN_VALUE);
// More than 64 values Enum
assertArrayEquals(EnumUtils.generateBitVectors(TooMany.class, EnumSet.of(TooMany.M2)), 1L, 0L);
assertArrayEquals(EnumUtils.generateBitVectors(TooMany.class, EnumSet.of(TooMany.L2, TooMany.M2)), 1L,
1L << 63);
}
@SuppressWarnings("unchecked")
@Test
void testGenerateBitVectors_nonEnumClass() {
@SuppressWarnings("rawtypes")
final Class rawType = Object.class;
@SuppressWarnings("rawtypes")
final List rawList = new ArrayList();
assertIllegalArgumentException(() -> EnumUtils.generateBitVectors(rawType, rawList));
}
@SuppressWarnings("unchecked")
@Test
void testGenerateBitVectors_nonEnumClassWithArray() {
@SuppressWarnings("rawtypes")
final Class rawType = Object.class;
assertIllegalArgumentException(() -> EnumUtils.generateBitVectors(rawType));
}
@Test
void testGenerateBitVectors_nullArray() {
assertNullPointerException(() -> EnumUtils.generateBitVectors(Traffic.class, (Traffic[]) null));
}
@Test
void testGenerateBitVectors_nullArrayElement() {
assertIllegalArgumentException(
() -> EnumUtils.generateBitVectors(Traffic.class, Traffic.RED, null));
}
@Test
void testGenerateBitVectors_nullClass() {
assertNullPointerException(() -> EnumUtils.generateBitVectors(null, EnumSet.of(Traffic.RED)));
}
@Test
void testGenerateBitVectors_nullClassWithArray() {
assertNullPointerException(() -> EnumUtils.generateBitVectors(null, Traffic.RED));
}
@Test
void testGenerateBitVectors_nullElement() {
assertNullPointerException(
() -> EnumUtils.generateBitVectors(Traffic.class, Arrays.asList(Traffic.RED, null)));
}
@Test
void testGenerateBitVectors_nullIterable() {
assertNullPointerException(() -> EnumUtils.generateBitVectors(null, (Iterable<Traffic>) null));
}
@Test
void testGenerateBitVectorsFromArray() {
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class), 0L);
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class, Traffic.RED), 1L);
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class, Traffic.AMBER), 2L);
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class, Traffic.GREEN), 4L);
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class, Traffic.RED, Traffic.AMBER), 3L);
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class, Traffic.RED, Traffic.GREEN), 5L);
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class, Traffic.AMBER, Traffic.GREEN), 6L);
assertArrayEquals(EnumUtils.generateBitVectors(Traffic.class, Traffic.RED, Traffic.AMBER, Traffic.GREEN), 7L);
// gracefully handles duplicates:
assertArrayEquals(
EnumUtils.generateBitVectors(Traffic.class, Traffic.RED, Traffic.AMBER, Traffic.GREEN, Traffic.GREEN), 7L);
// 64 values Enum (to test whether no int<->long jdk conversion issue exists)
assertArrayEquals(EnumUtils.generateBitVectors(Enum64.class, Enum64.A31), 1L << 31);
assertArrayEquals(EnumUtils.generateBitVectors(Enum64.class, Enum64.A32), 1L << 32);
assertArrayEquals(EnumUtils.generateBitVectors(Enum64.class, Enum64.A63), 1L << 63);
assertArrayEquals(EnumUtils.generateBitVectors(Enum64.class, Enum64.A63), Long.MIN_VALUE);
// More than 64 values Enum
assertArrayEquals(EnumUtils.generateBitVectors(TooMany.class, TooMany.M2), 1L, 0L);
assertArrayEquals(EnumUtils.generateBitVectors(TooMany.class, TooMany.L2, TooMany.M2), 1L, 1L << 63);
}
@Test
void testGetEnum() {
assertEquals(Traffic.RED, EnumUtils.getEnum(Traffic.class, "RED"));
assertEquals(Traffic.AMBER, EnumUtils.getEnum(Traffic.class, "AMBER"));
assertEquals(Traffic.GREEN, EnumUtils.getEnum(Traffic.class, "GREEN"));
assertNull(EnumUtils.getEnum(Traffic.class, "PURPLE"));
assertNull(EnumUtils.getEnum(Traffic.class, null));
}
@Test
void testGetEnum_defaultEnum() {
assertEquals(Traffic.RED, EnumUtils.getEnum(Traffic.class, "RED", Traffic.AMBER));
assertEquals(Traffic.AMBER, EnumUtils.getEnum(Traffic.class, "AMBER", Traffic.GREEN));
assertEquals(Traffic.GREEN, EnumUtils.getEnum(Traffic.class, "GREEN", Traffic.RED));
assertEquals(Traffic.AMBER, EnumUtils.getEnum(Traffic.class, "PURPLE", Traffic.AMBER));
assertEquals(Traffic.GREEN, EnumUtils.getEnum(Traffic.class, "PURPLE", Traffic.GREEN));
assertEquals(Traffic.RED, EnumUtils.getEnum(Traffic.class, "PURPLE", Traffic.RED));
assertEquals(Traffic.AMBER, EnumUtils.getEnum(Traffic.class, null, Traffic.AMBER));
assertEquals(Traffic.GREEN, EnumUtils.getEnum(Traffic.class, null, Traffic.GREEN));
assertEquals(Traffic.RED, EnumUtils.getEnum(Traffic.class, null, Traffic.RED));
assertNull(EnumUtils.getEnum(Traffic.class, "PURPLE", null));
assertEquals(Traffic.AMBER, EnumUtils.getEnum(null, "RED", Traffic.AMBER));
}
/**
* Tests raw type.
*/
@SuppressWarnings("unchecked")
@Test
void testGetEnum_nonEnumClass() {
@SuppressWarnings("rawtypes")
final Class rawType = Object.class;
assertNull(EnumUtils.getEnum(rawType, "rawType"));
}
@Test
void testGetEnum_nullClass() {
assertNull(EnumUtils.getEnum((Class<Traffic>) null, "PURPLE"));
}
@Test
void testGetEnumIgnoreCase() {
assertEquals(Traffic.RED, EnumUtils.getEnumIgnoreCase(Traffic.class, "red"));
assertEquals(Traffic.AMBER, EnumUtils.getEnumIgnoreCase(Traffic.class, "Amber"));
assertEquals(Traffic.GREEN, EnumUtils.getEnumIgnoreCase(Traffic.class, "grEEn"));
assertNull(EnumUtils.getEnumIgnoreCase(Traffic.class, "purple"));
assertNull(EnumUtils.getEnumIgnoreCase(Traffic.class, null));
}
@Test
void testGetEnumIgnoreCase_defaultEnum() {
assertEquals(Traffic.RED, EnumUtils.getEnumIgnoreCase(Traffic.class, "red", Traffic.AMBER));
assertEquals(Traffic.AMBER, EnumUtils.getEnumIgnoreCase(Traffic.class, "Amber", Traffic.GREEN));
assertEquals(Traffic.GREEN, EnumUtils.getEnumIgnoreCase(Traffic.class, "grEEn", Traffic.RED));
assertEquals(Traffic.AMBER, EnumUtils.getEnumIgnoreCase(Traffic.class, "PURPLE", Traffic.AMBER));
assertEquals(Traffic.GREEN, EnumUtils.getEnumIgnoreCase(Traffic.class, "purple", Traffic.GREEN));
assertEquals(Traffic.RED, EnumUtils.getEnumIgnoreCase(Traffic.class, "pUrPlE", Traffic.RED));
assertEquals(Traffic.AMBER, EnumUtils.getEnumIgnoreCase(Traffic.class, null, Traffic.AMBER));
assertEquals(Traffic.GREEN, EnumUtils.getEnumIgnoreCase(Traffic.class, null, Traffic.GREEN));
assertEquals(Traffic.RED, EnumUtils.getEnumIgnoreCase(Traffic.class, null, Traffic.RED));
assertNull(EnumUtils.getEnumIgnoreCase(Traffic.class, "PURPLE", null));
assertNull(EnumUtils.getEnumIgnoreCase(null, "PURPLE", null));
}
/**
* Tests raw type.
*/
@SuppressWarnings("unchecked")
@Test
void testGetEnumIgnoreCase_nonEnumClass() {
@SuppressWarnings("rawtypes")
final Class rawType = Object.class;
assertNull(EnumUtils.getEnumIgnoreCase(rawType, "rawType"));
}
@Test
void testGetEnumIgnoreCase_nullClass() {
assertNull(EnumUtils.getEnumIgnoreCase((Class<Traffic>) null, "PURPLE"));
}
@Test
void testGetEnumList() {
final List<Traffic> test = EnumUtils.getEnumList(Traffic.class);
assertEquals(3, test.size());
assertEquals(Traffic.RED, test.get(0));
assertEquals(Traffic.AMBER, test.get(1));
assertEquals(Traffic.GREEN, test.get(2));
}
@Test
void testGetEnumMap() {
final Map<String, Traffic> test = EnumUtils.getEnumMap(Traffic.class);
final Map<String, Traffic> expected = new HashMap<>();
expected.put("RED", Traffic.RED);
expected.put("AMBER", Traffic.AMBER);
expected.put("GREEN", Traffic.GREEN);
assertEquals(expected, test, "getEnumMap not created correctly");
assertEquals(3, test.size());
assertTrue(test.containsKey("RED"));
assertEquals(Traffic.RED, test.get("RED"));
assertTrue(test.containsKey("AMBER"));
assertEquals(Traffic.AMBER, test.get("AMBER"));
assertTrue(test.containsKey("GREEN"));
assertEquals(Traffic.GREEN, test.get("GREEN"));
assertFalse(test.containsKey("PURPLE"));
}
@Test
void testGetEnumMap_keyFunction() {
final Map<Integer, Month> test = EnumUtils.getEnumMap(Month.class, Month::getId);
final Map<Integer, Month> expected = new HashMap<>();
expected.put(1, Month.JAN);
expected.put(2, Month.FEB);
expected.put(3, Month.MAR);
expected.put(4, Month.APR);
expected.put(5, Month.MAY);
expected.put(6, Month.JUN);
expected.put(7, Month.JUL);
expected.put(8, Month.AUG);
expected.put(9, Month.SEP);
expected.put(10, Month.OCT);
expected.put(11, Month.NOV);
expected.put(12, Month.DEC);
assertEquals(expected, test, "getEnumMap not created correctly");
assertEquals(12, test.size());
assertFalse(test.containsKey(0));
assertTrue(test.containsKey(1));
assertEquals(Month.JAN, test.get(1));
assertTrue(test.containsKey(2));
assertEquals(Month.FEB, test.get(2));
assertTrue(test.containsKey(3));
assertEquals(Month.MAR, test.get(3));
assertTrue(test.containsKey(4));
assertEquals(Month.APR, test.get(4));
assertTrue(test.containsKey(5));
assertEquals(Month.MAY, test.get(5));
assertTrue(test.containsKey(6));
assertEquals(Month.JUN, test.get(6));
assertTrue(test.containsKey(7));
assertEquals(Month.JUL, test.get(7));
assertTrue(test.containsKey(8));
assertEquals(Month.AUG, test.get(8));
assertTrue(test.containsKey(9));
assertEquals(Month.SEP, test.get(9));
assertTrue(test.containsKey(10));
assertEquals(Month.OCT, test.get(10));
assertTrue(test.containsKey(11));
assertEquals(Month.NOV, test.get(11));
assertTrue(test.containsKey(12));
assertEquals(Month.DEC, test.get(12));
assertFalse(test.containsKey(13));
}
@Test
void testGetEnumSystemProperty() {
final String key = getClass().getName();
System.setProperty(key, Traffic.RED.toString());
try {
assertEquals(Traffic.RED, EnumUtils.getEnumSystemProperty(Traffic.class, key, null));
assertEquals(Traffic.RED, EnumUtils.getEnumSystemProperty(Traffic.class, "?", Traffic.RED));
assertEquals(Traffic.RED, EnumUtils.getEnumSystemProperty(null, null, Traffic.RED));
assertEquals(Traffic.RED, EnumUtils.getEnumSystemProperty(null, "?", Traffic.RED));
assertEquals(Traffic.RED, EnumUtils.getEnumSystemProperty(Traffic.class, null, Traffic.RED));
} finally {
System.getProperties().remove(key);
}
}
@Test
void testGetFirstEnumIgnoreCase_defaultEnum() {
final Function<Traffic2, String> f = Traffic2::getLabel;
assertEquals(Traffic2.RED, EnumUtils.getFirstEnumIgnoreCase(Traffic2.class, "***red***", f, Traffic2.AMBER));
assertEquals(Traffic2.AMBER, EnumUtils.getFirstEnumIgnoreCase(Traffic2.class, "**Amber**", f, Traffic2.GREEN));
assertEquals(Traffic2.GREEN, EnumUtils.getFirstEnumIgnoreCase(Traffic2.class, "*grEEn*", f, Traffic2.RED));
assertEquals(Traffic2.AMBER, EnumUtils.getFirstEnumIgnoreCase(Traffic2.class, "PURPLE", f, Traffic2.AMBER));
assertEquals(Traffic2.GREEN, EnumUtils.getFirstEnumIgnoreCase(Traffic2.class, "purple", f, Traffic2.GREEN));
assertEquals(Traffic2.RED, EnumUtils.getFirstEnumIgnoreCase(Traffic2.class, "pUrPlE", f, Traffic2.RED));
assertEquals(Traffic2.AMBER, EnumUtils.getFirstEnumIgnoreCase(Traffic2.class, null, f, Traffic2.AMBER));
assertEquals(Traffic2.GREEN, EnumUtils.getFirstEnumIgnoreCase(Traffic2.class, null, f, Traffic2.GREEN));
assertEquals(Traffic2.RED, EnumUtils.getFirstEnumIgnoreCase(Traffic2.class, null, f, Traffic2.RED));
assertNull(EnumUtils.getFirstEnumIgnoreCase(Traffic2.class, "PURPLE", f, null));
assertNull(EnumUtils.getFirstEnumIgnoreCase(null, "PURPLE", f, null));
}
@Test
void testGetFirstEnumToIntFunction() {
final ToIntFunction<Traffic2> f = Traffic2::getValue;
assertEquals(Traffic2.RED, EnumUtils.getFirstEnum(Traffic2.class, 1, f, Traffic2.AMBER));
assertEquals(Traffic2.AMBER, EnumUtils.getFirstEnum(Traffic2.class, 2, f, Traffic2.GREEN));
assertEquals(Traffic2.GREEN, EnumUtils.getFirstEnum(Traffic2.class, 3, f, Traffic2.RED));
assertEquals(Traffic2.AMBER, EnumUtils.getFirstEnum(Traffic2.class, 4, f, Traffic2.AMBER));
assertEquals(Traffic2.GREEN, EnumUtils.getFirstEnum(Traffic2.class, 5, f, Traffic2.GREEN));
assertEquals(Traffic2.RED, EnumUtils.getFirstEnum(Traffic2.class, 6, f, Traffic2.RED));
assertEquals(Traffic2.AMBER, EnumUtils.getFirstEnum(Traffic2.class, 0, f, Traffic2.AMBER));
assertEquals(Traffic2.GREEN, EnumUtils.getFirstEnum(Traffic2.class, -1, f, Traffic2.GREEN));
assertEquals(Traffic2.RED, EnumUtils.getFirstEnum(Traffic2.class, 0, f, Traffic2.RED));
assertNull(EnumUtils.getFirstEnum(Traffic2.class, 7, f, null));
// Edge cases for 1st argument
assertEquals(Traffic2.AMBER, EnumUtils.getFirstEnum(null, 1, f, Traffic2.AMBER));
assertEquals(Traffic2.AMBER, EnumUtils.getFirstEnum((Class) String.class, 1, f, Traffic2.AMBER));
}
@Test
void testIsValidEnum() {
assertTrue(EnumUtils.isValidEnum(Traffic.class, "RED"));
assertTrue(EnumUtils.isValidEnum(Traffic.class, "AMBER"));
assertTrue(EnumUtils.isValidEnum(Traffic.class, "GREEN"));
assertFalse(EnumUtils.isValidEnum(Traffic.class, "PURPLE"));
assertFalse(EnumUtils.isValidEnum(Traffic.class, null));
}
@Test
void testIsValidEnum_nullClass() {
assertFalse(EnumUtils.isValidEnum(null, "PURPLE"));
}
@Test
void testIsValidEnumIgnoreCase() {
assertTrue(EnumUtils.isValidEnumIgnoreCase(Traffic.class, "red"));
assertTrue(EnumUtils.isValidEnumIgnoreCase(Traffic.class, "Amber"));
assertTrue(EnumUtils.isValidEnumIgnoreCase(Traffic.class, "grEEn"));
assertFalse(EnumUtils.isValidEnumIgnoreCase(Traffic.class, "purple"));
assertFalse(EnumUtils.isValidEnumIgnoreCase(Traffic.class, null));
}
@Test
void testIsValidEnumIgnoreCase_nullClass() {
assertFalse(EnumUtils.isValidEnumIgnoreCase(null, "PURPLE"));
}
@Test
void testProcessBitVector() {
assertEquals(EnumSet.noneOf(Traffic.class), EnumUtils.processBitVector(Traffic.class, 0L));
assertEquals(EnumSet.of(Traffic.RED), EnumUtils.processBitVector(Traffic.class, 1L));
assertEquals(EnumSet.of(Traffic.AMBER), EnumUtils.processBitVector(Traffic.class, 2L));
assertEquals(EnumSet.of(Traffic.RED, Traffic.AMBER), EnumUtils.processBitVector(Traffic.class, 3L));
assertEquals(EnumSet.of(Traffic.GREEN), EnumUtils.processBitVector(Traffic.class, 4L));
assertEquals(EnumSet.of(Traffic.RED, Traffic.GREEN), EnumUtils.processBitVector(Traffic.class, 5L));
assertEquals(EnumSet.of(Traffic.AMBER, Traffic.GREEN), EnumUtils.processBitVector(Traffic.class, 6L));
assertEquals(EnumSet.of(Traffic.RED, Traffic.AMBER, Traffic.GREEN),
EnumUtils.processBitVector(Traffic.class, 7L));
// 64 values Enum (to test whether no int<->long jdk conversion issue exists)
assertEquals(EnumSet.of(Enum64.A31), EnumUtils.processBitVector(Enum64.class, 1L << 31));
assertEquals(EnumSet.of(Enum64.A32), EnumUtils.processBitVector(Enum64.class, 1L << 32));
assertEquals(EnumSet.of(Enum64.A63), EnumUtils.processBitVector(Enum64.class, 1L << 63));
assertEquals(EnumSet.of(Enum64.A63), EnumUtils.processBitVector(Enum64.class, Long.MIN_VALUE));
}
@Test
void testProcessBitVector_longClass() {
assertIllegalArgumentException(() -> EnumUtils.processBitVector(TooMany.class, 0L));
}
@Test
void testProcessBitVector_nullClass() {
final Class<Traffic> empty = null;
assertNullPointerException(() -> EnumUtils.processBitVector(empty, 0L));
}
@Test
void testProcessBitVectors() {
assertEquals(EnumSet.noneOf(Traffic.class), EnumUtils.processBitVectors(Traffic.class, 0L));
assertEquals(EnumSet.of(Traffic.RED), EnumUtils.processBitVectors(Traffic.class, 1L));
assertEquals(EnumSet.of(Traffic.AMBER), EnumUtils.processBitVectors(Traffic.class, 2L));
assertEquals(EnumSet.of(Traffic.RED, Traffic.AMBER), EnumUtils.processBitVectors(Traffic.class, 3L));
assertEquals(EnumSet.of(Traffic.GREEN), EnumUtils.processBitVectors(Traffic.class, 4L));
assertEquals(EnumSet.of(Traffic.RED, Traffic.GREEN), EnumUtils.processBitVectors(Traffic.class, 5L));
assertEquals(EnumSet.of(Traffic.AMBER, Traffic.GREEN), EnumUtils.processBitVectors(Traffic.class, 6L));
assertEquals(EnumSet.of(Traffic.RED, Traffic.AMBER, Traffic.GREEN),
EnumUtils.processBitVectors(Traffic.class, 7L));
assertEquals(EnumSet.noneOf(Traffic.class), EnumUtils.processBitVectors(Traffic.class, 0L, 0L));
assertEquals(EnumSet.of(Traffic.RED), EnumUtils.processBitVectors(Traffic.class, 0L, 1L));
assertEquals(EnumSet.of(Traffic.AMBER), EnumUtils.processBitVectors(Traffic.class, 0L, 2L));
assertEquals(EnumSet.of(Traffic.RED, Traffic.AMBER), EnumUtils.processBitVectors(Traffic.class, 0L, 3L));
assertEquals(EnumSet.of(Traffic.GREEN), EnumUtils.processBitVectors(Traffic.class, 0L, 4L));
assertEquals(EnumSet.of(Traffic.RED, Traffic.GREEN), EnumUtils.processBitVectors(Traffic.class, 0L, 5L));
assertEquals(EnumSet.of(Traffic.AMBER, Traffic.GREEN), EnumUtils.processBitVectors(Traffic.class, 0L, 6L));
assertEquals(EnumSet.of(Traffic.RED, Traffic.AMBER, Traffic.GREEN),
EnumUtils.processBitVectors(Traffic.class, 0L, 7L));
// demonstrate tolerance of irrelevant high-order digits:
assertEquals(EnumSet.noneOf(Traffic.class), EnumUtils.processBitVectors(Traffic.class, 666L, 0L));
assertEquals(EnumSet.of(Traffic.RED), EnumUtils.processBitVectors(Traffic.class, 666L, 1L));
assertEquals(EnumSet.of(Traffic.AMBER), EnumUtils.processBitVectors(Traffic.class, 666L, 2L));
assertEquals(EnumSet.of(Traffic.RED, Traffic.AMBER), EnumUtils.processBitVectors(Traffic.class, 666L, 3L));
assertEquals(EnumSet.of(Traffic.GREEN), EnumUtils.processBitVectors(Traffic.class, 666L, 4L));
assertEquals(EnumSet.of(Traffic.RED, Traffic.GREEN), EnumUtils.processBitVectors(Traffic.class, 666L, 5L));
assertEquals(EnumSet.of(Traffic.AMBER, Traffic.GREEN), EnumUtils.processBitVectors(Traffic.class, 666L, 6L));
assertEquals(EnumSet.of(Traffic.RED, Traffic.AMBER, Traffic.GREEN),
EnumUtils.processBitVectors(Traffic.class, 666L, 7L));
// 64 values Enum (to test whether no int<->long jdk conversion issue exists)
assertEquals(EnumSet.of(Enum64.A31), EnumUtils.processBitVectors(Enum64.class, 1L << 31));
assertEquals(EnumSet.of(Enum64.A32), EnumUtils.processBitVectors(Enum64.class, 1L << 32));
assertEquals(EnumSet.of(Enum64.A63), EnumUtils.processBitVectors(Enum64.class, 1L << 63));
assertEquals(EnumSet.of(Enum64.A63), EnumUtils.processBitVectors(Enum64.class, Long.MIN_VALUE));
}
@Test
void testProcessBitVectors_longClass() {
assertEquals(EnumSet.noneOf(TooMany.class), EnumUtils.processBitVectors(TooMany.class, 0L));
assertEquals(EnumSet.of(TooMany.A), EnumUtils.processBitVectors(TooMany.class, 1L));
assertEquals(EnumSet.of(TooMany.B), EnumUtils.processBitVectors(TooMany.class, 2L));
assertEquals(EnumSet.of(TooMany.A, TooMany.B), EnumUtils.processBitVectors(TooMany.class, 3L));
assertEquals(EnumSet.of(TooMany.C), EnumUtils.processBitVectors(TooMany.class, 4L));
assertEquals(EnumSet.of(TooMany.A, TooMany.C), EnumUtils.processBitVectors(TooMany.class, 5L));
assertEquals(EnumSet.of(TooMany.B, TooMany.C), EnumUtils.processBitVectors(TooMany.class, 6L));
assertEquals(EnumSet.of(TooMany.A, TooMany.B, TooMany.C), EnumUtils.processBitVectors(TooMany.class, 7L));
assertEquals(EnumSet.noneOf(TooMany.class), EnumUtils.processBitVectors(TooMany.class, 0L, 0L));
assertEquals(EnumSet.of(TooMany.A), EnumUtils.processBitVectors(TooMany.class, 0L, 1L));
assertEquals(EnumSet.of(TooMany.B), EnumUtils.processBitVectors(TooMany.class, 0L, 2L));
assertEquals(EnumSet.of(TooMany.A, TooMany.B), EnumUtils.processBitVectors(TooMany.class, 0L, 3L));
assertEquals(EnumSet.of(TooMany.C), EnumUtils.processBitVectors(TooMany.class, 0L, 4L));
assertEquals(EnumSet.of(TooMany.A, TooMany.C), EnumUtils.processBitVectors(TooMany.class, 0L, 5L));
assertEquals(EnumSet.of(TooMany.B, TooMany.C), EnumUtils.processBitVectors(TooMany.class, 0L, 6L));
assertEquals(EnumSet.of(TooMany.A, TooMany.B, TooMany.C), EnumUtils.processBitVectors(TooMany.class, 0L, 7L));
assertEquals(EnumSet.of(TooMany.A, TooMany.B, TooMany.C), EnumUtils.processBitVectors(TooMany.class, 0L, 7L));
assertEquals(EnumSet.of(TooMany.M2), EnumUtils.processBitVectors(TooMany.class, 1L, 0L));
assertEquals(EnumSet.of(TooMany.A, TooMany.M2), EnumUtils.processBitVectors(TooMany.class, 1L, 1L));
assertEquals(EnumSet.of(TooMany.B, TooMany.M2), EnumUtils.processBitVectors(TooMany.class, 1L, 2L));
assertEquals(EnumSet.of(TooMany.A, TooMany.B, TooMany.M2), EnumUtils.processBitVectors(TooMany.class, 1L, 3L));
assertEquals(EnumSet.of(TooMany.C, TooMany.M2), EnumUtils.processBitVectors(TooMany.class, 1L, 4L));
assertEquals(EnumSet.of(TooMany.A, TooMany.C, TooMany.M2), EnumUtils.processBitVectors(TooMany.class, 1L, 5L));
assertEquals(EnumSet.of(TooMany.B, TooMany.C, TooMany.M2), EnumUtils.processBitVectors(TooMany.class, 1L, 6L));
assertEquals(EnumSet.of(TooMany.A, TooMany.B, TooMany.C, TooMany.M2),
EnumUtils.processBitVectors(TooMany.class, 1L, 7L));
assertEquals(EnumSet.of(TooMany.A, TooMany.B, TooMany.C, TooMany.M2),
EnumUtils.processBitVectors(TooMany.class, 1L, 7L));
// demonstrate tolerance of irrelevant high-order digits:
assertEquals(EnumSet.of(TooMany.M2), EnumUtils.processBitVectors(TooMany.class, 9L, 0L));
assertEquals(EnumSet.of(TooMany.A, TooMany.M2), EnumUtils.processBitVectors(TooMany.class, 9L, 1L));
assertEquals(EnumSet.of(TooMany.B, TooMany.M2), EnumUtils.processBitVectors(TooMany.class, 9L, 2L));
assertEquals(EnumSet.of(TooMany.A, TooMany.B, TooMany.M2), EnumUtils.processBitVectors(TooMany.class, 9L, 3L));
assertEquals(EnumSet.of(TooMany.C, TooMany.M2), EnumUtils.processBitVectors(TooMany.class, 9L, 4L));
assertEquals(EnumSet.of(TooMany.A, TooMany.C, TooMany.M2), EnumUtils.processBitVectors(TooMany.class, 9L, 5L));
assertEquals(EnumSet.of(TooMany.B, TooMany.C, TooMany.M2), EnumUtils.processBitVectors(TooMany.class, 9L, 6L));
assertEquals(EnumSet.of(TooMany.A, TooMany.B, TooMany.C, TooMany.M2),
EnumUtils.processBitVectors(TooMany.class, 9L, 7L));
assertEquals(EnumSet.of(TooMany.A, TooMany.B, TooMany.C, TooMany.M2),
EnumUtils.processBitVectors(TooMany.class, 9L, 7L));
}
@Test
void testProcessBitVectors_nullClass() {
final Class<Traffic> empty = null;
assertNullPointerException(() -> EnumUtils.processBitVectors(empty, 0L));
}
@Test
void testStream() {
assertEquals(7, EnumUtils.stream(TimeUnit.class).count());
Assertions.assertArrayEquals(TimeUnit.values(), EnumUtils.stream(TimeUnit.class).toArray(TimeUnit[]::new));
assertEquals(0, EnumUtils.stream(Object.class).count());
assertEquals(0, EnumUtils.stream(null).count());
}
}
| EnumUtilsTest |
java | google__guice | core/test/com/google/inject/ProvisionListenerTest.java | {
"start": 1763,
"end": 18548
} | class ____ extends TestCase {
public void testExceptionInListenerBeforeProvisioning() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(Matchers.any(), new FailBeforeProvision());
}
});
try {
injector.getInstance(Foo.class);
fail();
} catch (ProvisionException pe) {
assertEquals(1, pe.getErrorMessages().size());
assertContains(
pe.getMessage(),
"Error notifying ProvisionListener ProvisionListenerTest$FailBeforeProvision of"
+ " ProvisionListenerTest$Foo.",
"Reason: RuntimeException: boo",
"while locating ProvisionListenerTest$Foo");
assertEquals("boo", pe.getCause().getMessage());
}
}
public void testExceptionInListenerAfterProvisioning() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(Matchers.any(), new FailAfterProvision());
}
});
try {
injector.getInstance(Foo.class);
fail();
} catch (ProvisionException pe) {
assertEquals(1, pe.getErrorMessages().size());
assertContains(
pe.getMessage(),
"1) Error notifying ProvisionListener ProvisionListenerTest$FailAfterProvision of"
+ " ProvisionListenerTest$Foo.",
"Reason: RuntimeException: boo",
"while locating ProvisionListenerTest$Foo");
assertEquals("boo", pe.getCause().getMessage());
}
}
public void testExceptionInProvisionExplicitlyCalled() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(Matchers.any(), new JustProvision());
}
});
try {
injector.getInstance(FooBomb.class);
fail();
} catch (ProvisionException pe) {
assertEquals(1, pe.getErrorMessages().size());
assertContains(
pe.getMessage(),
"1) [Guice/ErrorInjectingConstructor]: RuntimeException: Retry, Abort, Fail",
" at ProvisionListenerTest$FooBomb",
" while locating ProvisionListenerTest$FooBom");
assertEquals("Retry, Abort, Fail", pe.getCause().getMessage());
}
}
public void testExceptionInProvisionAutomaticallyCalled() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(Matchers.any(), new NoProvision());
}
});
try {
injector.getInstance(FooBomb.class);
fail();
} catch (ProvisionException pe) {
assertEquals(1, pe.getErrorMessages().size());
assertContains(
pe.getMessage(),
"1) [Guice/ErrorInjectingConstructor]: RuntimeException: Retry, Abort, Fail",
" at ProvisionListenerTest$FooBomb",
" while locating ProvisionListenerTest$FooBom");
assertEquals("Retry, Abort, Fail", pe.getCause().getMessage());
}
}
public void testExceptionInFieldProvision() throws Exception {
final CountAndCaptureExceptionListener listener = new CountAndCaptureExceptionListener();
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(
new AbstractMatcher<Binding<?>>() {
@Override
public boolean matches(Binding<?> binding) {
return binding.getKey().getRawType().equals(DependsOnFooBombInField.class);
}
},
listener);
}
});
assertEquals(0, listener.beforeProvision);
String expectedMsg = null;
try {
injector.getInstance(DependsOnFooBombInField.class);
fail();
} catch (ProvisionException expected) {
assertEquals(1, expected.getErrorMessages().size());
expectedMsg = Iterables.getOnlyElement(expected.getErrorMessages()).getMessage();
assertContains(
expected.getMessage(),
"1) [Guice/ErrorInjectingConstructor]: RuntimeException: Retry, Abort, Fail",
"at ProvisionListenerTest$FooBomb",
"while locating ProvisionListenerTest$DependsOnFooBombInField");
assertContains(
listener.capture.get().getMessage(),
"1) [Guice/ErrorInjectingConstructor]: RuntimeException: Retry, Abort, Fail",
"at ProvisionListenerTest$FooBomb");
// The message that is captures by the provision listener does not show what is depending on
// the thing being listened to.
assertThat(listener.capture.get().getMessage())
.doesNotContain(" while locating ProvisionListenerTestDependsOnFooBombInField");
}
assertEquals(1, listener.beforeProvision);
assertEquals(
expectedMsg,
Iterables.getOnlyElement(((ProvisionException) listener.capture.get()).getErrorMessages())
.getMessage());
assertEquals(0, listener.afterProvision);
}
public void testExceptionInCxtorProvision() throws Exception {
final CountAndCaptureExceptionListener listener = new CountAndCaptureExceptionListener();
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(
new AbstractMatcher<Binding<?>>() {
@Override
public boolean matches(Binding<?> binding) {
return binding.getKey().getRawType().equals(DependsOnFooBombInCxtor.class);
}
},
listener);
}
});
assertEquals(0, listener.beforeProvision);
String expectedMsg = null;
try {
injector.getInstance(DependsOnFooBombInCxtor.class);
fail();
} catch (ProvisionException expected) {
assertEquals(1, expected.getErrorMessages().size());
expectedMsg = Iterables.getOnlyElement(expected.getErrorMessages()).getMessage();
assertContains(
expected.getMessage(),
"1) [Guice/ErrorInjectingConstructor]: RuntimeException: Retry, Abort, Fail",
"at ProvisionListenerTest$FooBomb.<init>(ProvisionListenerTest.java:",
"at ProvisionListenerTest$DependsOnFooBombInCxtor.<init>(ProvisionListenerTest.java",
"while locating ProvisionListenerTest$DependsOnFooBombInCxtor");
assertContains(
listener.capture.get().getMessage(),
"1) [Guice/ErrorInjectingConstructor]: RuntimeException: Retry, Abort, Fail",
"at ProvisionListenerTest$FooBomb.<init>(ProvisionListenerTest.java:");
// The message that is captures by the provision listener does not show what is depending on
// the thing being listened to.
assertThat(listener.capture.get().getMessage())
.doesNotContain("while locating ProvisionListenerTest$DependsOnFooBombInField");
}
assertEquals(1, listener.beforeProvision);
assertEquals(
expectedMsg,
Iterables.getOnlyElement(((ProvisionException) listener.capture.get()).getErrorMessages())
.getMessage());
assertEquals(0, listener.afterProvision);
}
public void testListenerCallsProvisionTwice() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(Matchers.any(), new ProvisionTwice());
}
});
try {
injector.getInstance(Foo.class);
fail();
} catch (ProvisionException pe) {
assertEquals(1, pe.getErrorMessages().size());
assertContains(
pe.getMessage(),
"1) Error notifying ProvisionListener ProvisionListenerTest$ProvisionTwice of"
+ " ProvisionListenerTest$Foo.",
"Reason: IllegalStateException: Already provisioned in this listener.",
"while locating ProvisionListenerTest$Foo");
assertEquals("Already provisioned in this listener.", pe.getCause().getMessage());
}
}
public void testCachedInScopePreventsProvisionNotify() {
final Counter count1 = new Counter();
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(Matchers.any(), count1);
bind(Foo.class).in(Scopes.SINGLETON);
}
});
Foo foo = injector.getInstance(Foo.class);
assertNotNull(foo);
assertEquals(1, count1.count);
// not notified the second time because nothing is provisioned
// (it's cached in the scope)
count1.count = 0;
assertSame(foo, injector.getInstance(Foo.class));
assertEquals(0, count1.count);
}
public void testCombineAllBindListenerCalls() {
final Counter count1 = new Counter();
final Counter count2 = new Counter();
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(Matchers.any(), count1);
bindListener(Matchers.any(), count2);
}
});
assertNotNull(injector.getInstance(Foo.class));
assertEquals(1, count1.count);
assertEquals(1, count2.count);
}
public void testNotifyEarlyListenersIfFailBeforeProvision() {
final Counter count1 = new Counter();
final Counter count2 = new Counter();
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(Matchers.any(), count1, new FailBeforeProvision(), count2);
}
});
try {
injector.getInstance(Foo.class);
fail();
} catch (ProvisionException pe) {
assertEquals(1, pe.getErrorMessages().size());
assertContains(
pe.getMessage(),
"1) Error notifying ProvisionListener ProvisionListenerTest$FailBeforeProvision of "
+ "ProvisionListenerTest$Foo.",
"Reason: RuntimeException: boo",
"while locating ProvisionListenerTest$Foo");
assertEquals("boo", pe.getCause().getMessage());
assertEquals(1, count1.count);
assertEquals(0, count2.count);
}
}
public void testNotifyLaterListenersIfFailAfterProvision() {
final Counter count1 = new Counter();
final Counter count2 = new Counter();
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(Matchers.any(), count1, new FailAfterProvision(), count2);
}
});
try {
injector.getInstance(Foo.class);
fail();
} catch (ProvisionException pe) {
assertEquals(1, pe.getErrorMessages().size());
assertContains(
pe.getMessage(),
"1) Error notifying ProvisionListener ProvisionListenerTest$FailAfterProvision of "
+ "ProvisionListenerTest$Foo.",
"Reason: RuntimeException: boo",
"while locating ProvisionListenerTest$Foo");
assertEquals("boo", pe.getCause().getMessage());
assertEquals(1, count1.count);
assertEquals(1, count2.count);
}
}
public void testNotifiedKeysOfAllBindTypes() {
final Capturer capturer = new Capturer();
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(Matchers.any(), capturer);
bind(Foo.class).annotatedWith(named("pk")).toProvider(FooP.class);
try {
bind(Foo.class)
.annotatedWith(named("cxtr"))
.toConstructor(Foo.class.getDeclaredConstructor());
} catch (Exception ex) {
throw new RuntimeException(ex);
}
bind(LinkedFoo.class).to(Foo.class);
bind(Interface.class).toInstance(new Implementation());
bindConstant().annotatedWith(named("constant")).to("MyConstant");
}
@Provides
@Named("pi")
Foo provideFooBar() {
return new Foo();
}
});
// toInstance & constant bindings are notified in random order, at the very beginning.
assertEquals(
ImmutableSet.of(Key.get(Interface.class), Key.get(String.class, named("constant"))),
capturer.getAsSetAndClear());
// simple binding
assertNotNull(injector.getInstance(Foo.class));
assertEquals(of(Key.get(Foo.class)), capturer.getAndClear());
// provider key binding -- notifies about provider & the object, always
assertNotNull(injector.getInstance(Key.get(Foo.class, named("pk"))));
assertEquals(of(Key.get(FooP.class), Key.get(Foo.class, named("pk"))), capturer.getAndClear());
assertNotNull(injector.getInstance(Key.get(Foo.class, named("pk"))));
assertEquals(of(Key.get(FooP.class), Key.get(Foo.class, named("pk"))), capturer.getAndClear());
// JIT provider key binding -- notifies about provider & the object, always
assertNotNull(injector.getInstance(JitFoo2.class));
assertEquals(of(Key.get(JitFoo2P.class), Key.get(JitFoo2.class)), capturer.getAndClear());
assertNotNull(injector.getInstance(JitFoo2.class));
assertEquals(of(Key.get(JitFoo2P.class), Key.get(JitFoo2.class)), capturer.getAndClear());
// provider instance binding -- just the object (not the provider)
assertNotNull(injector.getInstance(Key.get(Foo.class, named("pi"))));
assertEquals(of(Key.get(Foo.class, named("pi"))), capturer.getAndClear());
// toConstructor binding
assertNotNull(injector.getInstance(Key.get(Foo.class, named("cxtr"))));
assertEquals(of(Key.get(Foo.class, named("cxtr"))), capturer.getAndClear());
// linked binding -- notifies about the target (that's what's provisioned), not the link
assertNotNull(injector.getInstance(LinkedFoo.class));
assertEquals(of(Key.get(Foo.class)), capturer.getAndClear());
// JIT linked binding -- notifies about the target (that's what's provisioned), not the link
assertNotNull(injector.getInstance(JitFoo.class));
assertEquals(of(Key.get(Foo.class)), capturer.getAndClear());
}
public void testSingletonMatcher() {
final Counter counter = new Counter();
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(
new AbstractMatcher<Binding<?>>() {
@Override
public boolean matches(Binding<?> t) {
return Scopes.isSingleton(t);
}
},
counter);
}
});
assertEquals(0, counter.count);
// no increment for getting Many.
injector.getInstance(Many.class);
assertEquals(0, counter.count);
// but an increment for getting Sole, since it's a singleton.
injector.getInstance(Sole.class);
assertEquals(1, counter.count);
}
public void testCallingBindingDotGetProviderDotGet() {
Injector injector =
Guice.createInjector(
new AbstractModule() {
@Override
protected void configure() {
bindListener(
Matchers.any(),
new ProvisionListener() {
@Override
public <T> void onProvision(ProvisionInvocation<T> provision) {
provision.getBinding().getProvider().get(); // AGH!
}
});
}
});
try {
injector.getInstance(Sole.class);
fail();
} catch (ProvisionException expected) {
// We don't really care what kind of error you get, we only care you get an error.
}
try {
injector.getInstance(Many.class);
fail();
} catch (ProvisionException expected) {
// We don't really care what kind of error you get, we only care you get an error.
}
}
| ProvisionListenerTest |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/scripting/groovy/GroovyClassLoadingTests.java | {
"start": 1419,
"end": 2142
} | class ____ { def myMethod() { \"bar\" } }");
context.registerBeanDefinition("testBean", new RootBeanDefinition(class1));
Object testBean1 = context.getBean("testBean");
Method method1 = class1.getDeclaredMethod("myMethod", new Class<?>[0]);
Object result1 = ReflectionUtils.invokeMethod(method1, testBean1);
assertThat(result1).isEqualTo("foo");
context.removeBeanDefinition("testBean");
context.registerBeanDefinition("testBean", new RootBeanDefinition(class2));
Object testBean2 = context.getBean("testBean");
Method method2 = class2.getDeclaredMethod("myMethod", new Class<?>[0]);
Object result2 = ReflectionUtils.invokeMethod(method2, testBean2);
assertThat(result2).isEqualTo("bar");
}
}
| TestBean |
java | spring-projects__spring-security | crypto/src/main/java/org/springframework/security/crypto/password/AbstractValidatingPasswordEncoder.java | {
"start": 740,
"end": 1808
} | class ____ implements PasswordEncoder {
@Override
public final @Nullable String encode(@Nullable CharSequence rawPassword) {
if (rawPassword == null) {
return null;
}
return encodeNonNullPassword(rawPassword.toString());
}
protected abstract String encodeNonNullPassword(String rawPassword);
@Override
public final boolean matches(@Nullable CharSequence rawPassword, @Nullable String encodedPassword) {
if (rawPassword == null || rawPassword.length() == 0 || encodedPassword == null
|| encodedPassword.length() == 0) {
return false;
}
return matchesNonNull(rawPassword.toString(), encodedPassword);
}
protected abstract boolean matchesNonNull(String rawPassword, String encodedPassword);
@Override
public final boolean upgradeEncoding(@Nullable String encodedPassword) {
if (encodedPassword == null || encodedPassword.length() == 0) {
return false;
}
return upgradeEncodingNonNull(encodedPassword);
}
protected boolean upgradeEncodingNonNull(String encodedPassword) {
return false;
}
}
| AbstractValidatingPasswordEncoder |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/catalog/CatalogManager.java | {
"start": 79578,
"end": 95005
} | interface ____ {
void execute(Catalog catalog, ObjectPath path) throws Exception;
}
private void execute(
ModifyCatalog command,
ObjectIdentifier objectIdentifier,
boolean ignoreNoCatalog,
String commandName) {
Optional<Catalog> catalog = getCatalog(objectIdentifier.getCatalogName());
if (catalog.isPresent()) {
try {
command.execute(catalog.get(), objectIdentifier.toObjectPath());
} catch (TableAlreadyExistException
| TableNotExistException
| ModelNotExistException
| ModelAlreadyExistException
| DatabaseNotExistException e) {
throw new ValidationException(getErrorMessage(objectIdentifier, commandName), e);
} catch (Exception e) {
throw new TableException(getErrorMessage(objectIdentifier, commandName), e);
}
} else if (!ignoreNoCatalog) {
throw new ValidationException(
String.format("Catalog %s does not exist.", objectIdentifier.getCatalogName()));
}
}
private String getErrorMessage(ObjectIdentifier objectIdentifier, String commandName) {
return String.format("Could not execute %s in path %s", commandName, objectIdentifier);
}
/** Resolves a {@link CatalogBaseTable} to a validated {@link ResolvedCatalogBaseTable}. */
public ResolvedCatalogBaseTable<?> resolveCatalogBaseTable(CatalogBaseTable baseTable) {
Preconditions.checkNotNull(schemaResolver, "Schema resolver is not initialized.");
if (baseTable instanceof CatalogTable) {
return resolveCatalogTable((CatalogTable) baseTable);
} else if (baseTable instanceof CatalogMaterializedTable) {
return resolveCatalogMaterializedTable((CatalogMaterializedTable) baseTable);
} else if (baseTable instanceof CatalogView) {
return resolveCatalogView((CatalogView) baseTable);
}
throw new IllegalArgumentException(
"Unknown kind of catalog base table: " + baseTable.getClass());
}
/** Resolves a {@link CatalogTable} to a validated {@link ResolvedCatalogTable}. */
public ResolvedCatalogTable resolveCatalogTable(CatalogTable table) {
Preconditions.checkNotNull(schemaResolver, "Schema resolver is not initialized.");
if (table instanceof ResolvedCatalogTable) {
return (ResolvedCatalogTable) table;
}
final ResolvedSchema resolvedSchema = table.getUnresolvedSchema().resolve(schemaResolver);
// Validate distribution keys are included in physical columns
final List<String> physicalColumns =
resolvedSchema.getColumns().stream()
.filter(Column::isPhysical)
.map(Column::getName)
.collect(Collectors.toList());
final Consumer<TableDistribution> distributionValidation =
distribution -> {
distribution
.getBucketKeys()
.forEach(
bucketKey -> {
if (!physicalColumns.contains(bucketKey)) {
throw new ValidationException(
String.format(
"Invalid bucket key '%s'. A bucket key for a distribution must "
+ "reference a physical column in the schema. "
+ "Available columns are: %s",
bucketKey, physicalColumns));
}
});
distribution
.getBucketCount()
.ifPresent(
c -> {
if (c <= 0) {
throw new ValidationException(
String.format(
"Invalid bucket count '%s'. The number of "
+ "buckets for a distributed table must be at least 1.",
c));
}
});
};
table.getDistribution().ifPresent(distributionValidation);
table.getPartitionKeys()
.forEach(
partitionKey -> {
if (!physicalColumns.contains(partitionKey)) {
throw new ValidationException(
String.format(
"Invalid partition key '%s'. A partition key must "
+ "reference a physical column in the schema. "
+ "Available columns are: %s",
partitionKey, physicalColumns));
}
});
return new ResolvedCatalogTable(table, resolvedSchema);
}
/**
* Resolves a {@link CatalogMaterializedTable} to a validated {@link
* ResolvedCatalogMaterializedTable}.
*/
public ResolvedCatalogMaterializedTable resolveCatalogMaterializedTable(
CatalogMaterializedTable table) {
Preconditions.checkNotNull(schemaResolver, "Schema resolver is not initialized.");
if (table instanceof ResolvedCatalogMaterializedTable) {
return (ResolvedCatalogMaterializedTable) table;
}
final ResolvedSchema resolvedSchema = table.getUnresolvedSchema().resolve(schemaResolver);
final MaterializedTableEnrichmentResult enrichmentResult =
this.materializedTableEnricher.enrich(table);
IntervalFreshness freshness = enrichmentResult.getFreshness();
RefreshMode resolvedRefreshMode = enrichmentResult.getRefreshMode();
// Validate partition keys are included in physical columns
final List<String> physicalColumns =
resolvedSchema.getColumns().stream()
.filter(Column::isPhysical)
.map(Column::getName)
.collect(Collectors.toList());
table.getPartitionKeys()
.forEach(
partitionKey -> {
if (!physicalColumns.contains(partitionKey)) {
throw new ValidationException(
String.format(
"Invalid partition key '%s'. A partition key must "
+ "reference a physical column in the schema. "
+ "Available columns are: %s",
partitionKey, physicalColumns));
}
});
return new ResolvedCatalogMaterializedTable(
table, resolvedSchema, resolvedRefreshMode, freshness);
}
/** Resolves a {@link CatalogView} to a validated {@link ResolvedCatalogView}. */
public ResolvedCatalogView resolveCatalogView(CatalogView view) {
Preconditions.checkNotNull(schemaResolver, "Schema resolver is not initialized.");
if (view instanceof ResolvedCatalogView) {
return (ResolvedCatalogView) view;
}
if (view instanceof QueryOperationCatalogView) {
final QueryOperation queryOperation =
((QueryOperationCatalogView) view).getQueryOperation();
return new ResolvedCatalogView(view, queryOperation.getResolvedSchema());
}
final ResolvedSchema resolvedSchema = view.getUnresolvedSchema().resolve(schemaResolver);
final List<Operation> parse;
try {
parse = parser.parse(view.getExpandedQuery());
} catch (Throwable e) {
// in case of a failure during parsing, let the lower layers fail
return new ResolvedCatalogView(view, resolvedSchema);
}
if (parse.size() != 1 || !(parse.get(0) instanceof QueryOperation)) {
// parsing a view should result in a single query operation
// if it is not what we expect, we let the lower layers fail
return new ResolvedCatalogView(view, resolvedSchema);
} else {
final QueryOperation operation = (QueryOperation) parse.get(0);
final ResolvedSchema querySchema = operation.getResolvedSchema();
if (querySchema.getColumns().size() != resolvedSchema.getColumns().size()) {
// in case the query does not match the number of expected columns, let the lower
// layers fail
return new ResolvedCatalogView(view, resolvedSchema);
}
final ResolvedSchema renamedQuerySchema =
new ResolvedSchema(
IntStream.range(0, resolvedSchema.getColumnCount())
.mapToObj(
i ->
querySchema
.getColumn(i)
.get()
.rename(
resolvedSchema
.getColumnNames()
.get(i)))
.collect(Collectors.toList()),
resolvedSchema.getWatermarkSpecs(),
resolvedSchema.getPrimaryKey().orElse(null),
resolvedSchema.getIndexes());
return new ResolvedCatalogView(
// pass a view that has the query parsed and
// validated already
new QueryOperationCatalogView(operation, view), renamedQuerySchema);
}
}
/**
* Create a database.
*
* @param catalogName Name of the catalog for database
* @param databaseName Name of the database to be created
* @param database The database definition
* @param ignoreIfExists Flag to specify behavior when a database with the given name already
* exists: if set to false, throw a DatabaseAlreadyExistException, if set to true, do
* nothing.
* @throws DatabaseAlreadyExistException if the given database already exists and ignoreIfExists
* is false
* @throws CatalogException in case of any runtime exception
*/
public void createDatabase(
String catalogName,
String databaseName,
CatalogDatabase database,
boolean ignoreIfExists)
throws DatabaseAlreadyExistException, CatalogException {
Catalog catalog = getCatalogOrThrowException(catalogName);
catalog.createDatabase(databaseName, database, ignoreIfExists);
catalogModificationListeners.forEach(
listener ->
listener.onEvent(
CreateDatabaseEvent.createEvent(
CatalogContext.createContext(catalogName, catalog),
databaseName,
database,
ignoreIfExists)));
}
/**
* Drop a database.
*
* @param catalogName Name of the catalog for database.
* @param databaseName Name of the database to be dropped.
* @param ignoreIfNotExists Flag to specify behavior when the database does not exist: if set to
* false, throw an exception, if set to true, do nothing.
* @param cascade Flag to specify behavior when the database contains table or function: if set
* to true, delete all tables and functions in the database and then delete the database, if
* set to false, throw an exception.
* @throws DatabaseNotExistException if the given database does not exist
* @throws DatabaseNotEmptyException if the given database is not empty and isRestrict is true
* @throws CatalogException in case of any runtime exception
*/
public void dropDatabase(
String catalogName, String databaseName, boolean ignoreIfNotExists, boolean cascade)
throws DatabaseNotExistException, DatabaseNotEmptyException, CatalogException {
if (Objects.equals(currentCatalogName, catalogName)
&& Objects.equals(currentDatabaseName, databaseName)) {
throw new ValidationException("Cannot drop a database which is currently in use.");
}
Catalog catalog = getCatalogOrError(catalogName);
catalog.dropDatabase(databaseName, ignoreIfNotExists, cascade);
catalogModificationListeners.forEach(
listener ->
listener.onEvent(
DropDatabaseEvent.createEvent(
CatalogContext.createContext(catalogName, catalog),
databaseName,
ignoreIfNotExists,
cascade)));
}
/**
* Modify an existing database.
*
* @param catalogName Name of the catalog for database
* @param databaseName Name of the database to be dropped
* @param newDatabase The new database definition
* @param ignoreIfNotExists Flag to specify behavior when the given database does not exist: if
* set to false, throw an exception, if set to true, do nothing.
* @throws DatabaseNotExistException if the given database does not exist
* @throws CatalogException in case of any runtime exception
*/
public void alterDatabase(
String catalogName,
String databaseName,
CatalogDatabase newDatabase,
boolean ignoreIfNotExists)
throws DatabaseNotExistException, CatalogException {
Catalog catalog = getCatalogOrError(catalogName);
catalog.alterDatabase(databaseName, newDatabase, ignoreIfNotExists);
catalogModificationListeners.forEach(
listener ->
listener.onEvent(
AlterDatabaseEvent.createEvent(
CatalogContext.createContext(catalogName, catalog),
databaseName,
newDatabase,
ignoreIfNotExists)));
}
}
| ModifyCatalog |
java | FasterXML__jackson-core | src/test/java/tools/jackson/core/unittest/read/NonStandardAllowRSTest.java | {
"start": 627,
"end": 3112
} | class ____
extends JacksonCoreTestBase
{
@Test
void recordSeparatorEnabled() throws Exception {
doRecordSeparationTest(true);
}
@Test
void recordSeparatorDisabled() throws Exception {
doRecordSeparationTest(false);
}
// Testing record separation for all parser implementations
private void doRecordSeparationTest(boolean recordSeparation) throws Exception {
String contents = "{\"key\":true}\u001E";
JsonFactory factory = JsonFactory.builder()
.configure(JsonReadFeature.ALLOW_RS_CONTROL_CHAR, recordSeparation)
.build();
try (JsonParser parser = factory.createParser(ObjectReadContext.empty(), contents)) {
verifyRecordSeparation(parser, recordSeparation);
}
try (JsonParser parser = factory.createParser(ObjectReadContext.empty(), new StringReader(contents))) {
verifyRecordSeparation(parser, recordSeparation);
}
try (JsonParser parser = factory.createParser(ObjectReadContext.empty(), contents.getBytes(StandardCharsets.UTF_8))) {
verifyRecordSeparation(parser, recordSeparation);
}
try (NonBlockingByteArrayJsonParser parser = (NonBlockingByteArrayJsonParser)
factory.createNonBlockingByteArrayParser(ObjectReadContext.empty())) {
byte[] data = contents.getBytes(StandardCharsets.UTF_8);
parser.feedInput(data, 0, data.length);
parser.endOfInput();
verifyRecordSeparation(parser, recordSeparation);
}
}
private void verifyRecordSeparation(JsonParser parser, boolean recordSeparation) throws Exception {
try {
assertToken(JsonToken.START_OBJECT, parser.nextToken());
String field1 = parser.nextName();
assertEquals("key", field1);
assertToken(JsonToken.VALUE_TRUE, parser.nextToken());
assertToken(JsonToken.END_OBJECT, parser.nextToken());
parser.nextToken(); // RS token
if (!recordSeparation) {
fail("Should have thrown an exception");
}
} catch (StreamReadException e) {
if (!recordSeparation) {
verifyException(e, "Illegal character ((CTRL-CHAR");
verifyException(e, "consider enabling `JsonReadFeature.ALLOW_RS_CONTROL_CHAR`");
} else {
throw e;
}
}
}
}
| NonStandardAllowRSTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java | {
"start": 13893,
"end": 73401
} | interface ____ {
public void checkChooseStorageTypes(BlockStoragePolicy p, short replication,
List<StorageType> chosen, StorageType... expected);
/** Basic case: pass only replication and chosen */
static final CheckChooseStorageTypes Basic = new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p, short replication,
List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication, chosen);
assertStorageTypes(types, expected);
}
};
/** With empty unavailables and isNewBlock=true */
static final CheckChooseStorageTypes EmptyUnavailablesAndNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, none, true);
assertStorageTypes(types, expected);
}
};
/** With empty unavailables and isNewBlock=false */
static final CheckChooseStorageTypes EmptyUnavailablesAndNonNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, none, false);
assertStorageTypes(types, expected);
}
};
/** With both DISK and ARCHIVE unavailables and isNewBlock=true */
static final CheckChooseStorageTypes BothUnavailableAndNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, disk_archive, true);
assertStorageTypes(types, expected);
}
};
/** With both DISK and ARCHIVE unavailable and isNewBlock=false */
static final CheckChooseStorageTypes BothUnavailableAndNonNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, disk_archive, false);
assertStorageTypes(types, expected);
}
};
/** With ARCHIVE unavailable and isNewBlock=true */
static final CheckChooseStorageTypes ArchivalUnavailableAndNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, archive, true);
assertStorageTypes(types, expected);
}
};
/** With ARCHIVE unavailable and isNewBlock=true */
static final CheckChooseStorageTypes ArchivalUnavailableAndNonNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, archive, false);
assertStorageTypes(types, expected);
}
};
}
@Test
public void testChooseStorageTypes() {
run(CheckChooseStorageTypes.Basic);
run(CheckChooseStorageTypes.EmptyUnavailablesAndNewBlock);
run(CheckChooseStorageTypes.EmptyUnavailablesAndNonNewBlock);
}
private static void run(CheckChooseStorageTypes method) {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final BlockStoragePolicy allnvdimm = POLICY_SUITE.getPolicy(ALLNVDIMM);
final short replication = 3;
{
final List<StorageType> chosen = Lists.newArrayList();
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(allnvdimm, replication, chosen,
StorageType.NVDIMM, StorageType.NVDIMM, StorageType.NVDIMM);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(allnvdimm, replication, chosen,
StorageType.NVDIMM, StorageType.NVDIMM, StorageType.NVDIMM);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(allnvdimm, replication, chosen,
StorageType.NVDIMM, StorageType.NVDIMM, StorageType.NVDIMM);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(allnvdimm, replication, chosen,
StorageType.NVDIMM, StorageType.NVDIMM, StorageType.NVDIMM);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(allnvdimm, replication, chosen,
StorageType.NVDIMM, StorageType.NVDIMM, StorageType.NVDIMM);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE);
method.checkChooseStorageTypes(allnvdimm, replication, chosen,
StorageType.NVDIMM, StorageType.NVDIMM, StorageType.NVDIMM);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(allnvdimm, replication, chosen,
StorageType.NVDIMM, StorageType.NVDIMM, StorageType.NVDIMM);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(allnvdimm, replication, chosen,
StorageType.NVDIMM, StorageType.NVDIMM, StorageType.NVDIMM);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE);
method.checkChooseStorageTypes(allnvdimm, replication, chosen,
StorageType.NVDIMM, StorageType.NVDIMM, StorageType.NVDIMM);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
method.checkChooseStorageTypes(allnvdimm, replication, chosen,
StorageType.NVDIMM, StorageType.NVDIMM, StorageType.NVDIMM);
}
}
@Test
public void testChooseStorageTypesWithBothUnavailable() {
runWithBothUnavailable(CheckChooseStorageTypes.BothUnavailableAndNewBlock);
runWithBothUnavailable(CheckChooseStorageTypes.BothUnavailableAndNonNewBlock);
}
private static void runWithBothUnavailable(CheckChooseStorageTypes method) {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
for(int n = 0; n <= 3; n++) {
for(int d = 0; d <= n; d++) {
final int a = n - d;
final List<StorageType> chosen = asList(d, a);
method.checkChooseStorageTypes(hot, replication, chosen);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen);
}
}
}
@Test
public void testChooseStorageTypesWithDiskUnavailableAndNewBlock() {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
final EnumSet<StorageType> unavailables = disk;
final boolean isNewBlock = true;
{
final List<StorageType> chosen = Lists.newArrayList();
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock);
}
}
@Test
public void testChooseStorageTypesWithArchiveUnavailable() {
runWithArchiveUnavailable(CheckChooseStorageTypes.ArchivalUnavailableAndNewBlock);
runWithArchiveUnavailable(CheckChooseStorageTypes.ArchivalUnavailableAndNonNewBlock);
}
private static void runWithArchiveUnavailable(CheckChooseStorageTypes method) {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
{
final List<StorageType> chosen = Lists.newArrayList();
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
}
@Test
public void testChooseStorageTypesWithDiskUnavailableAndNonNewBlock() {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
final EnumSet<StorageType> unavailables = disk;
final boolean isNewBlock = false;
{
final List<StorageType> chosen = Lists.newArrayList();
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock);
}
}
static void checkChooseStorageTypes(BlockStoragePolicy p, short replication,
List<StorageType> chosen, EnumSet<StorageType> unavailables,
boolean isNewBlock, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication, chosen,
unavailables, isNewBlock);
assertStorageTypes(types, expected);
}
static void assertStorageTypes(List<StorageType> computed, StorageType... expected) {
assertStorageTypes(computed.toArray(StorageType.EMPTY_ARRAY), expected);
}
static void assertStorageTypes(StorageType[] computed, StorageType... expected) {
Arrays.sort(expected);
Arrays.sort(computed);
assertArrayEquals(expected, computed);
}
@Test
public void testChooseExcess() {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
for(int n = 0; n <= 6; n++) {
for(int d = 0; d <= n; d++) {
final int a = n - d;
final List<StorageType> chosen = asList(d, a);
{
final int nDisk = Math.max(0, d - replication);
final int nArchive = a;
final StorageType[] expected = newStorageTypes(nDisk, nArchive);
checkChooseExcess(hot, replication, chosen, expected);
}
{
final int nDisk = Math.max(0, d - 1);
final int nArchive = Math.max(0, a - replication + 1);
final StorageType[] expected = newStorageTypes(nDisk, nArchive);
checkChooseExcess(warm, replication, chosen, expected);
}
{
final int nDisk = d;
final int nArchive = Math.max(0, a - replication );
final StorageType[] expected = newStorageTypes(nDisk, nArchive);
checkChooseExcess(cold, replication, chosen, expected);
}
}
}
}
static void checkChooseExcess(BlockStoragePolicy p, short replication,
List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseExcess(replication, chosen);
assertStorageTypes(types, expected);
}
private void checkDirectoryListing(HdfsFileStatus[] stats, byte... policies) {
assertEquals(stats.length, policies.length);
for (int i = 0; i < stats.length; i++) {
assertEquals(stats[i].getStoragePolicy(), policies[i]);
}
}
@Test
public void testSetStoragePolicy() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path dir = new Path("/testSetStoragePolicy");
final Path fooFile = new Path(dir, "foo");
final Path barDir = new Path(dir, "bar");
final Path barFile1= new Path(barDir, "f1");
final Path barFile2= new Path(barDir, "f2");
DFSTestUtil.createFile(fs, fooFile, FILE_LEN, REPLICATION, 0L);
DFSTestUtil.createFile(fs, barFile1, FILE_LEN, REPLICATION, 0L);
DFSTestUtil.createFile(fs, barFile2, FILE_LEN, REPLICATION, 0L);
final String invalidPolicyName = "INVALID-POLICY";
try {
fs.setStoragePolicy(fooFile, invalidPolicyName);
fail("Should throw a HadoopIllegalArgumentException");
} catch (RemoteException e) {
GenericTestUtils.assertExceptionContains(invalidPolicyName, e);
}
// check storage policy
HdfsFileStatus[] dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
HdfsFileStatus[] barList = fs.getClient().listPaths(barDir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(dirList, BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
checkDirectoryListing(barList, BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
final Path invalidPath = new Path("/invalidPath");
try {
fs.setStoragePolicy(invalidPath, HdfsConstants.WARM_STORAGE_POLICY_NAME);
fail("Should throw a FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
}
try {
fs.getStoragePolicy(invalidPath);
fail("Should throw a FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
}
fs.setStoragePolicy(fooFile, HdfsConstants.COLD_STORAGE_POLICY_NAME);
fs.setStoragePolicy(barDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
fs.setStoragePolicy(barFile2, HdfsConstants.HOT_STORAGE_POLICY_NAME);
assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME,
fs.getStoragePolicy(fooFile).getName(),
"File storage policy should be COLD");
assertEquals(HdfsConstants.WARM_STORAGE_POLICY_NAME,
fs.getStoragePolicy(barDir).getName(),
"File storage policy should be WARM");
assertEquals(HdfsConstants.HOT_STORAGE_POLICY_NAME,
fs.getStoragePolicy(barFile2).getName(),
"File storage policy should be HOT");
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
barList = fs.getClient().listPaths(barDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(dirList, WARM, COLD); // bar is warm, foo is cold
checkDirectoryListing(barList, WARM, HOT);
// restart namenode to make sure the editlog is correct
cluster.restartNameNode(true);
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
barList = fs.getClient().listPaths(barDir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(dirList, WARM, COLD); // bar is warm, foo is cold
checkDirectoryListing(barList, WARM, HOT);
// restart namenode with checkpoint to make sure the fsimage is correct
fs.setSafeMode(SafeModeAction.ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.LEAVE);
cluster.restartNameNode(true);
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
barList = fs.getClient().listPaths(barDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(dirList, WARM, COLD); // bar is warm, foo is cold
checkDirectoryListing(barList, WARM, HOT);
} finally {
cluster.shutdown();
}
}
@Test
public void testGetStoragePolicy() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path dir = new Path("/testGetStoragePolicy");
final Path fooFile = new Path(dir, "foo");
DFSTestUtil.createFile(fs, fooFile, FILE_LEN, REPLICATION, 0L);
DFSClient client = new DFSClient(cluster.getNameNode(0)
.getNameNodeAddress(), conf);
client.setStoragePolicy("/testGetStoragePolicy/foo",
HdfsConstants.COLD_STORAGE_POLICY_NAME);
String policyName = client.getStoragePolicy("/testGetStoragePolicy/foo")
.getName();
assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME, policyName,
"File storage policy should be COLD");
} finally {
cluster.shutdown();
}
}
@Test
public void testSetStoragePolicyWithSnapshot() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path dir = new Path("/testSetStoragePolicyWithSnapshot");
final Path fooDir = new Path(dir, "foo");
final Path fooFile1= new Path(fooDir, "f1");
final Path fooFile2= new Path(fooDir, "f2");
DFSTestUtil.createFile(fs, fooFile1, FILE_LEN, REPLICATION, 0L);
DFSTestUtil.createFile(fs, fooFile2, FILE_LEN, REPLICATION, 0L);
fs.setStoragePolicy(fooDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
HdfsFileStatus[] dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(dirList, WARM);
HdfsFileStatus[] fooList = fs.getClient().listPaths(fooDir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(fooList, WARM, WARM);
// take snapshot
SnapshotTestHelper.createSnapshot(fs, dir, "s1");
// change the storage policy of fooFile1
fs.setStoragePolicy(fooFile1, HdfsConstants.COLD_STORAGE_POLICY_NAME);
fooList = fs.getClient().listPaths(fooDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(fooList, COLD, WARM);
// check the policy for /dir/.snapshot/s1/foo/f1. Note we always return
// the latest storage policy for a file/directory.
Path s1f1 = SnapshotTestHelper.getSnapshotPath(dir, "s1", "foo/f1");
DirectoryListing f1Listing = fs.getClient().listPaths(s1f1.toString(),
HdfsFileStatus.EMPTY_NAME);
checkDirectoryListing(f1Listing.getPartialListing(), COLD);
// delete f1
fs.delete(fooFile1, true);
fooList = fs.getClient().listPaths(fooDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(fooList, WARM);
// check the policy for /dir/.snapshot/s1/foo/f1 again after the deletion
checkDirectoryListing(fs.getClient().listPaths(s1f1.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD);
// change the storage policy of foo dir
fs.setStoragePolicy(fooDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
// /dir/foo is now hot
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(dirList, HOT);
// /dir/foo/f2 is hot
fooList = fs.getClient().listPaths(fooDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(fooList, HOT);
// check storage policy of snapshot path
Path s1 = SnapshotTestHelper.getSnapshotRoot(dir, "s1");
Path s1foo = SnapshotTestHelper.getSnapshotPath(dir, "s1", "foo");
checkDirectoryListing(fs.getClient().listPaths(s1.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), HOT);
// /dir/.snapshot/.s1/foo/f1 and /dir/.snapshot/.s1/foo/f2 should still
// follow the latest
checkDirectoryListing(fs.getClient().listPaths(s1foo.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD, HOT);
// delete foo
fs.delete(fooDir, true);
checkDirectoryListing(fs.getClient().listPaths(s1.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), HOT);
checkDirectoryListing(fs.getClient().listPaths(s1foo.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD, HOT);
} finally {
cluster.shutdown();
}
}
private static StorageType[][] genStorageTypes(int numDataNodes) {
StorageType[][] types = new StorageType[numDataNodes][];
for (int i = 0; i < types.length; i++) {
types[i] = new StorageType[]{StorageType.DISK, StorageType.ARCHIVE};
}
return types;
}
private void checkLocatedBlocks(HdfsLocatedFileStatus status, int blockNum,
int replicaNum, StorageType... types) {
List<StorageType> typeList = Lists.newArrayList();
Collections.addAll(typeList, types);
LocatedBlocks lbs = status.getLocatedBlocks();
assertEquals(blockNum, lbs.getLocatedBlocks().size());
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
assertEquals(replicaNum, lb.getStorageTypes().length);
for (StorageType type : lb.getStorageTypes()) {
assertTrue(typeList.remove(type));
}
}
assertTrue(typeList.isEmpty());
}
private void testChangeFileRep(String policyName, byte policyId,
StorageType[] before,
StorageType[] after) throws Exception {
final int numDataNodes = 5;
final StorageType[][] types = genStorageTypes(numDataNodes);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDataNodes).storageTypes(types).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path dir = new Path("/test");
fs.mkdirs(dir);
fs.setStoragePolicy(dir, policyName);
final Path foo = new Path(dir, "foo");
DFSTestUtil.createFile(fs, foo, FILE_LEN, REPLICATION, 0L);
HdfsFileStatus[] status = fs.getClient().listPaths(foo.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
HdfsLocatedFileStatus fooStatus = (HdfsLocatedFileStatus) status[0];
checkLocatedBlocks(fooStatus, 1, 3, before);
// change the replication factor to 5
fs.setReplication(foo, (short) numDataNodes);
Thread.sleep(1000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
Thread.sleep(1000);
status = fs.getClient().listPaths(foo.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
fooStatus = (HdfsLocatedFileStatus) status[0];
checkLocatedBlocks(fooStatus, 1, numDataNodes, after);
// change the replication factor back to 3
fs.setReplication(foo, REPLICATION);
Thread.sleep(1000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
Thread.sleep(1000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(dn);
}
Thread.sleep(1000);
status = fs.getClient().listPaths(foo.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
fooStatus = (HdfsLocatedFileStatus) status[0];
checkLocatedBlocks(fooStatus, 1, REPLICATION, before);
} finally {
cluster.shutdown();
}
}
/**
* Consider a File with Hot storage policy. Increase replication factor of
* that file from 3 to 5. Make sure all replications are created in DISKS.
*/
@Test
public void testChangeHotFileRep() throws Exception {
testChangeFileRep(HdfsConstants.HOT_STORAGE_POLICY_NAME, HOT,
new StorageType[]{StorageType.DISK, StorageType.DISK,
StorageType.DISK},
new StorageType[]{StorageType.DISK, StorageType.DISK, StorageType.DISK,
StorageType.DISK, StorageType.DISK});
}
/**
* Consider a File with Warm temperature. Increase replication factor of
* that file from 3 to 5. Make sure all replicas are created in DISKS
* and ARCHIVE.
*/
@Test
public void testChangeWarmRep() throws Exception {
testChangeFileRep(HdfsConstants.WARM_STORAGE_POLICY_NAME, WARM,
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
StorageType.ARCHIVE},
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE});
}
/**
* Consider a File with Cold temperature. Increase replication factor of
* that file from 3 to 5. Make sure all replicas are created in ARCHIVE.
*/
@Test
public void testChangeColdRep() throws Exception {
testChangeFileRep(HdfsConstants.COLD_STORAGE_POLICY_NAME, COLD,
new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
StorageType.ARCHIVE},
new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE});
}
@Test
public void testChooseTargetWithTopology() throws Exception {
BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1",
new StorageType[]{StorageType.SSD, StorageType.DISK,
StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2",
new StorageType[]{StorageType.DISK, StorageType.SSD,
StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
final String[] racks = {"/d1/r1", "/d1/r2", "/d1/r2"};
final String[] hosts = {"host1", "host2", "host3"};
final StorageType[] types = {StorageType.DISK, StorageType.SSD,
StorageType.ARCHIVE};
final DatanodeStorageInfo[] storages = DFSTestUtil
.createDatanodeStorageInfos(3, racks, hosts, types);
final DatanodeDescriptor[] dataNodes = DFSTestUtil
.toDatanodeDescriptor(storages);
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(conf);
NameNode namenode = new NameNode(conf);
try {
final BlockManager bm = namenode.getNamesystem().getBlockManager();
BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
for (DatanodeDescriptor datanode : dataNodes) {
cluster.add(datanode);
}
DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
new HashSet<Node>(), 0, policy1, null);
System.out.println(Arrays.asList(targets));
assertEquals(3, targets.length);
targets = replicator.chooseTarget("/foo", 3,
dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
new HashSet<Node>(), 0, policy2, null);
System.out.println(Arrays.asList(targets));
assertEquals(3, targets.length);
} finally {
if (namenode != null) {
namenode.stop();
}
}
}
@Test
public void testChooseSsdOverDisk() throws Exception {
BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1",
new StorageType[]{StorageType.SSD, StorageType.DISK,
StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"};
final String[] hosts = {"host1", "host2", "host3"};
final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};
final DatanodeStorageInfo[] diskStorages
= DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
final DatanodeDescriptor[] dataNodes
= DFSTestUtil.toDatanodeDescriptor(diskStorages);
for(int i = 0; i < dataNodes.length; i++) {
BlockManagerTestUtil.updateStorage(dataNodes[i],
new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL,
StorageType.SSD));
}
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(conf);
NameNode namenode = new NameNode(conf);
try {
final BlockManager bm = namenode.getNamesystem().getBlockManager();
BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
for (DatanodeDescriptor datanode : dataNodes) {
cluster.add(datanode);
}
DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
new HashSet<Node>(), 0, policy, null);
System.out.println(policy.getName() + ": " + Arrays.asList(targets));
assertEquals(2, targets.length);
assertEquals(StorageType.SSD, targets[0].getStorageType());
assertEquals(StorageType.DISK, targets[1].getStorageType());
} finally {
if (namenode != null) {
namenode.stop();
}
}
}
/**
* Consider a File with All_SSD storage policy.
* 1. Choose 3 DISK DNs for pipeline because SSD DNs no enough at
* the beginning.
* 2. One of DISK DNs fails And it need choose one new DN for existing.
* pipeline {@link DataStreamer addDatanode2ExistingPipeline()}.
* Make sure the number of target DNs are 3.
* see HDFS-16182.
*/
@Test
public void testAddDatanode2ExistingPipelineInSsd() throws Exception {
BlockStoragePolicy policy = POLICY_SUITE.getPolicy(ALLSSD);
final String[] racks = {"/d1/r1", "/d2/r2", "/d3/r3", "/d4/r4", "/d5/r5",
"/d6/r6", "/d7/r7"};
final String[] hosts = {"host1", "host2", "host3", "host4", "host5",
"host6", "host7"};
final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};
final DatanodeStorageInfo[] diskStorages
= DFSTestUtil.createDatanodeStorageInfos(7, racks, hosts, disks);
final DatanodeDescriptor[] dataNodes
= DFSTestUtil.toDatanodeDescriptor(diskStorages);
for (int i = 0; i < dataNodes.length; i++) {
BlockManagerTestUtil.updateStorage(dataNodes[i],
new DatanodeStorage("ssd" + i + 1, DatanodeStorage.State.NORMAL,
StorageType.SSD));
}
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(conf);
NameNode namenode = new NameNode(conf);
try {
final BlockManager bm = namenode.getNamesystem().getBlockManager();
BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
for (DatanodeDescriptor datanode : dataNodes) {
cluster.add(datanode);
}
// chsenDs are DISK StorageType to simulate not enough SDD Storage
List<DatanodeStorageInfo> chsenDs = new ArrayList<>();
chsenDs.add(diskStorages[0]);
chsenDs.add(diskStorages[1]);
DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 1,
null, chsenDs, true,
new HashSet<Node>(), 0, policy, null);
assertEquals(3, targets.length);
} finally {
if (namenode != null) {
namenode.stop();
}
}
}
@Test
public void testGetFileStoragePolicyAfterRestartNN() throws Exception {
//HDFS8219
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.storageTypes(
new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
.build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final String file = "/testScheduleWithinSameNode/file";
Path dir = new Path("/testScheduleWithinSameNode");
fs.mkdirs(dir);
// 2. Set Dir policy
fs.setStoragePolicy(dir, "COLD");
// 3. Create file
final FSDataOutputStream out = fs.create(new Path(file));
out.writeChars("testScheduleWithinSameNode");
out.close();
// 4. Set Dir policy
fs.setStoragePolicy(dir, "HOT");
HdfsFileStatus status = fs.getClient().getFileInfo(file);
// 5. get file policy, it should be parent policy.
assertTrue(status.getStoragePolicy() == HOT,
"File storage policy should be HOT");
// 6. restart NameNode for reloading edits logs.
cluster.restartNameNode(true);
// 7. get file policy, it should be parent policy.
status = fs.getClient().getFileInfo(file);
assertTrue(status.getStoragePolicy() == HOT,
"File storage policy should be HOT");
} finally {
cluster.shutdown();
}
}
/**
* Verify that {@link FileSystem#getAllStoragePolicies} returns all
* known storage policies for DFS.
*
* @throws IOException
*/
@Test
public void testGetAllStoragePoliciesFromFs() throws IOException {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.storageTypes(
new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
.build();
try {
cluster.waitActive();
// Get policies via {@link FileSystem#getAllStoragePolicies}
Set<String> policyNamesSet1 = new HashSet<>();
for (BlockStoragePolicySpi policy :
cluster.getFileSystem().getAllStoragePolicies()) {
policyNamesSet1.add(policy.getName());
}
// Get policies from the default BlockStoragePolicySuite.
BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
Set<String> policyNamesSet2 = new HashSet<>();
for (BlockStoragePolicy policy : suite.getAllPolicies()) {
policyNamesSet2.add(policy.getName());
}
// Ensure that we got the same set of policies in both cases.
assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
} finally {
cluster.shutdown();
}
}
@Test
public void testStorageType() {
final EnumMap<StorageType, Integer> map = new EnumMap<>(StorageType.class);
//put storage type is reversed order
map.put(StorageType.NVDIMM, 1);
map.put(StorageType.ARCHIVE, 1);
map.put(StorageType.DISK, 1);
map.put(StorageType.SSD, 1);
map.put(StorageType.RAM_DISK, 1);
{
final Iterator<StorageType> i = map.keySet().iterator();
assertEquals(StorageType.RAM_DISK, i.next());
assertEquals(StorageType.SSD, i.next());
assertEquals(StorageType.DISK, i.next());
assertEquals(StorageType.ARCHIVE, i.next());
assertEquals(StorageType.NVDIMM, i.next());
}
{
final Iterator<Map.Entry<StorageType, Integer>> i
= map.entrySet().iterator();
assertEquals(StorageType.RAM_DISK, i.next().getKey());
assertEquals(StorageType.SSD, i.next().getKey());
assertEquals(StorageType.DISK, i.next().getKey());
assertEquals(StorageType.ARCHIVE, i.next().getKey());
assertEquals(StorageType.NVDIMM, i.next().getKey());
}
}
@Test
public void testStorageTypeCheckAccess(){
testStorageTypeCheckAccessResult(new StorageType[]{StorageType.DEFAULT},
new StorageType[]{StorageType.DEFAULT}, true);
testStorageTypeCheckAccessResult(StorageType.EMPTY_ARRAY,
StorageType.EMPTY_ARRAY, false);
testStorageTypeCheckAccessResult(new StorageType[]{StorageType.DISK},
StorageType.EMPTY_ARRAY, false);
testStorageTypeCheckAccessResult(StorageType.EMPTY_ARRAY,
new StorageType[]{StorageType.RAM_DISK}, true);
testStorageTypeCheckAccessResult(new StorageType[]{StorageType.DISK},
new StorageType[]{StorageType.DISK}, true);
testStorageTypeCheckAccessResult(new StorageType[]{StorageType.DISK},
new StorageType[]{StorageType.DISK, StorageType.DISK, StorageType.DISK},
false);
testStorageTypeCheckAccessResult(
new StorageType[]{StorageType.DISK, StorageType.DISK, StorageType.DISK},
new StorageType[]{StorageType.DISK, StorageType.DISK, StorageType.DISK},
true);
testStorageTypeCheckAccessResult(
new StorageType[]{StorageType.RAM_DISK, StorageType.SSD},
new StorageType[]{StorageType.DISK, StorageType.RAM_DISK,
StorageType.SSD},
false);
testStorageTypeCheckAccessResult(
new StorageType[]{StorageType.DISK, StorageType.SSD},
new StorageType[]{StorageType.SSD},
true);
testStorageTypeCheckAccessResult(new StorageType[]{StorageType.DISK},
new StorageType[]{StorageType.RAM_DISK}, false);
testStorageTypeCheckAccessResult(new StorageType[]{StorageType.DISK},
new StorageType[]{StorageType.RAM_DISK, StorageType.SSD,
StorageType.ARCHIVE},
false);
testStorageTypeCheckAccessResult(new StorageType[]{StorageType.RAM_DISK,
StorageType.SSD, StorageType.ARCHIVE},
new StorageType[]{StorageType.DISK}, false);
testStorageTypeCheckAccessResult(
new StorageType[]{StorageType.DISK, StorageType.SSD},
new StorageType[]{StorageType.SSD},
true);
testStorageTypeCheckAccessResult(new StorageType[]{StorageType.RAM_DISK},
new StorageType[]{StorageType.DISK}, false);
testStorageTypeCheckAccessResult(
new StorageType[]{StorageType.RAM_DISK, StorageType.SSD,
StorageType.ARCHIVE},
new StorageType[]{StorageType.DISK},
false);
testStorageTypeCheckAccessResult(
new StorageType[]{StorageType.RAM_DISK, StorageType.SSD,
StorageType.ARCHIVE},
new StorageType[]{StorageType.DISK},
false);
testStorageTypeCheckAccessResult(
new StorageType[]{StorageType.DISK, StorageType.NVDIMM},
new StorageType[]{StorageType.NVDIMM}, true);
testStorageTypeCheckAccessResult(
new StorageType[]{StorageType.RAM_DISK, StorageType.NVDIMM,
StorageType.ARCHIVE},
new StorageType[]{StorageType.SSD}, false);
}
private void testStorageTypeCheckAccessResult(StorageType[] requested,
StorageType[] allowed, boolean expAccess) {
try {
BlockTokenSecretManager.checkAccess(requested, allowed, "StorageTypes");
if (!expAccess) {
fail("No expected access with allowed StorageTypes "
+ Arrays.toString(allowed) + " and requested StorageTypes "
+ Arrays.toString(requested));
}
} catch (SecretManager.InvalidToken e) {
if (expAccess) {
fail("Expected access with allowed StorageTypes "
+ Arrays.toString(allowed) + " and requested StorageTypes "
+ Arrays.toString(requested));
}
}
}
@Test
public void testStorageIDCheckAccess() {
testStorageIDCheckAccessResult(
new String[]{"DN1-Storage1"},
new String[]{"DN1-Storage1"}, true);
testStorageIDCheckAccessResult(new String[]{"DN1-Storage1", "DN2-Storage1"},
new String[]{"DN1-Storage1"},
true);
testStorageIDCheckAccessResult(new String[]{"DN1-Storage1", "DN2-Storage1"},
new String[]{"DN1-Storage1", "DN1-Storage2"}, false);
testStorageIDCheckAccessResult(
new String[]{"DN1-Storage1", "DN1-Storage2"},
new String[]{"DN1-Storage1"}, true);
testStorageIDCheckAccessResult(
new String[]{"DN1-Storage1", "DN1-Storage2"},
new String[]{"DN2-Storage1"}, false);
testStorageIDCheckAccessResult(
new String[]{"DN1-Storage2", "DN2-Storage2"},
new String[]{"DN1-Storage1", "DN2-Storage1"}, false);
testStorageIDCheckAccessResult(new String[0], new String[0], false);
testStorageIDCheckAccessResult(new String[0], new String[]{"DN1-Storage1"},
true);
testStorageIDCheckAccessResult(new String[]{"DN1-Storage1"}, new String[0],
false);
}
private void testStorageIDCheckAccessResult(String[] requested,
String[] allowed, boolean expAccess) {
try {
BlockTokenSecretManager.checkAccess(requested, allowed, "StorageIDs");
if (!expAccess) {
fail("No expected access with allowed StorageIDs"
+ Arrays.toString(allowed) + " and requested StorageIDs"
+ Arrays.toString(requested));
}
} catch (SecretManager.InvalidToken e) {
if (expAccess) {
fail("Expected access with allowed StorageIDs "
+ Arrays.toString(allowed) + " and requested StorageIDs"
+ Arrays.toString(requested));
}
}
}
@Test
public void testCreateDefaultPoliciesFromConf() {
BlockStoragePolicySuite suite =
BlockStoragePolicySuite.createDefaultSuite();
assertEquals(HdfsConstants.StoragePolicy.HOT.value(),
suite.getDefaultPolicy().getId());
Configuration newConf = new Configuration();
newConf.setEnum(DFSConfigKeys.DFS_STORAGE_DEFAULT_POLICY,
HdfsConstants.StoragePolicy.ONE_SSD);
BlockStoragePolicySuite suiteConf =
BlockStoragePolicySuite.createDefaultSuite(newConf);
assertEquals(HdfsConstants.StoragePolicy.ONE_SSD.value(),
suiteConf.getDefaultPolicy().getId());
}
@Test
public void testCreateFileWithConfiguredDefaultPolicies()
throws IOException{
Configuration newConf = new HdfsConfiguration();
newConf.set(DFSConfigKeys.DFS_STORAGE_DEFAULT_POLICY,
HdfsConstants.StoragePolicy.WARM.name());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(newConf)
.numDataNodes(0).build();
try {
cluster.waitActive();
final Path fooFile = new Path("/foo");
FileSystem newfs = cluster.getFileSystem();
DFSTestUtil.createFile(newfs, fooFile, 0, REPLICATION, 0L);
String policy = newfs.getStoragePolicy(fooFile).getName();
assertEquals(HdfsConstants.StoragePolicy.WARM.name(), policy);
} finally {
cluster.shutdown();
}
}
}
| CheckChooseStorageTypes |
java | google__dagger | javatests/dagger/internal/codegen/ComponentCreatorTest.java | {
"start": 41211,
"end": 41366
} | interface ____ {",
" <T> SimpleComponent build();",
" }",
"",
" @Component.Builder",
" | Parent |
java | google__guava | android/guava/src/com/google/common/escape/CharEscaper.java | {
"start": 1913,
"end": 6859
} | class ____ extends Escaper {
/** Constructor for use by subclasses. */
protected CharEscaper() {}
/**
* Returns the escaped form of a given literal string.
*
* @param string the literal string to be escaped
* @return the escaped form of {@code string}
* @throws NullPointerException if {@code string} is null
*/
@Override
public String escape(String string) {
checkNotNull(string); // GWT specific check (do not optimize)
// Inlineable fast-path loop which hands off to escapeSlow() only if needed
int length = string.length();
for (int index = 0; index < length; index++) {
if (escape(string.charAt(index)) != null) {
return escapeSlow(string, index);
}
}
return string;
}
/**
* Returns the escaped form of the given character, or {@code null} if this character does not
* need to be escaped. If an empty array is returned, this effectively strips the input character
* from the resulting text.
*
* <p>If the character does not need to be escaped, this method should return {@code null}, rather
* than a one-character array containing the character itself. This enables the escaping algorithm
* to perform more efficiently.
*
* <p>An escaper is expected to be able to deal with any {@code char} value, so this method should
* not throw any exceptions.
*
* @param c the character to escape if necessary
* @return the replacement characters, or {@code null} if no escaping was needed
*/
protected abstract char @Nullable [] escape(char c);
/**
* Returns the escaped form of a given literal string, starting at the given index. This method is
* called by the {@link #escape(String)} method when it discovers that escaping is required. It is
* protected to allow subclasses to override the fastpath escaping function to inline their
* escaping test. See {@link CharEscaperBuilder} for an example usage.
*
* @param s the literal string to be escaped
* @param index the index to start escaping from
* @return the escaped form of {@code string}
* @throws NullPointerException if {@code string} is null
*/
protected final String escapeSlow(String s, int index) {
int slen = s.length();
// Get a destination buffer and setup some loop variables.
char[] dest = Platform.charBufferFromThreadLocal();
int destSize = dest.length;
int destIndex = 0;
int lastEscape = 0;
// Loop through the rest of the string, replacing when needed into the
// destination buffer, which gets grown as needed as well.
for (; index < slen; index++) {
// Get a replacement for the current character.
char[] r = escape(s.charAt(index));
// If no replacement is needed, just continue.
if (r == null) {
continue;
}
int rlen = r.length;
int charsSkipped = index - lastEscape;
// This is the size needed to add the replacement, not the full size
// needed by the string. We only regrow when we absolutely must, and
// when we do grow, grow enough to avoid excessive growing. Grow.
int sizeNeeded = destIndex + charsSkipped + rlen;
if (destSize < sizeNeeded) {
destSize = sizeNeeded + DEST_PAD_MULTIPLIER * (slen - index);
dest = growBuffer(dest, destIndex, destSize);
}
// If we have skipped any characters, we need to copy them now.
if (charsSkipped > 0) {
s.getChars(lastEscape, index, dest, destIndex);
destIndex += charsSkipped;
}
// Copy the replacement string into the dest buffer as needed.
if (rlen > 0) {
System.arraycopy(r, 0, dest, destIndex, rlen);
destIndex += rlen;
}
lastEscape = index + 1;
}
// Copy leftover characters if there are any.
int charsLeft = slen - lastEscape;
if (charsLeft > 0) {
int sizeNeeded = destIndex + charsLeft;
if (destSize < sizeNeeded) {
// Regrow and copy, expensive! No padding as this is the final copy.
dest = growBuffer(dest, destIndex, sizeNeeded);
}
s.getChars(lastEscape, slen, dest, destIndex);
destIndex = sizeNeeded;
}
return new String(dest, 0, destIndex);
}
/**
* Helper method to grow the character buffer as needed, this only happens once in a while so it's
* ok if it's in a method call. If the index passed in is 0 then no copying will be done.
*/
private static char[] growBuffer(char[] dest, int index, int size) {
if (size < 0) { // overflow - should be OutOfMemoryError but GWT/j2cl don't support it
throw new AssertionError("Cannot increase internal buffer any further");
}
char[] copy = new char[size];
if (index > 0) {
System.arraycopy(dest, 0, copy, 0, index);
}
return copy;
}
/** The multiplier for padding to use when growing the escape buffer. */
private static final int DEST_PAD_MULTIPLIER = 2;
}
| CharEscaper |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/ConcurrentLruCache.java | {
"start": 13420,
"end": 14011
} | class ____<K, V> extends AtomicReference<CacheEntry<V>> {
final K key;
@Nullable Node<K, V> prev;
@Nullable Node<K, V> next;
Node(K key, CacheEntry<V> cacheEntry) {
super(cacheEntry);
this.key = key;
}
public @Nullable Node<K, V> getPrevious() {
return this.prev;
}
public void setPrevious(@Nullable Node<K, V> prev) {
this.prev = prev;
}
public @Nullable Node<K, V> getNext() {
return this.next;
}
public void setNext(@Nullable Node<K, V> next) {
this.next = next;
}
V getValue() {
return get().value;
}
}
private static final | Node |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java | {
"start": 3399,
"end": 9851
} | class ____ {
private final TransportAddress transportAddress;
ConnectionAttempt(TransportAddress transportAddress) {
this.transportAddress = transportAddress;
}
void run(ActionListener<ProbeConnectionResult> listener) {
SubscribableListener.newForked(this::openProbeConnection)
.andThen(this::handshakeProbeConnection)
.andThen(this::openFullConnection)
.addListener(listener);
}
private void openProbeConnection(ActionListener<Transport.Connection> listener) {
// We could skip this if the transportService were already connected to the given address, but the savings would be minimal so
// we open a new connection anyway.
logger.trace("[{}] opening probe connection", transportAddress);
transportService.openConnection(
new DiscoveryNode(
"",
transportAddress.toString(),
UUIDs.randomBase64UUID(Randomness.get()), // generated deterministically for reproducible tests
transportAddress.address().getHostString(),
transportAddress.getAddress(),
transportAddress,
emptyMap(),
emptySet(),
new VersionInformation(
Version.CURRENT.minimumCompatibilityVersion(),
IndexVersions.MINIMUM_COMPATIBLE,
IndexVersions.MINIMUM_READONLY_COMPATIBLE,
IndexVersion.current()
)
),
handshakeConnectionProfile,
ActionListener.assertOnce(listener)
);
}
private void handshakeProbeConnection(ActionListener<DiscoveryNode> listener, Transport.Connection connection) {
logger.trace("[{}] opened probe connection", transportAddress);
final var probeHandshakeTimeout = handshakeConnectionProfile.getHandshakeTimeout();
transportService.handshake(connection, probeHandshakeTimeout, ActionListener.assertOnce(new ActionListener<>() {
@Override
public void onResponse(DiscoveryNode remoteNode) {
// success means (amongst other things) that the cluster names match
logger.trace("[{}] handshake successful: {}", transportAddress, remoteNode);
IOUtils.closeWhileHandlingException(connection);
listener.onResponse(remoteNode);
}
@Override
public void onFailure(Exception e) {
// We opened a connection and successfully performed a low-level handshake, so we were definitely talking to an
// Elasticsearch node, but the high-level handshake failed indicating some kind of mismatched configurations (e.g.
// cluster name) that the user should address.
logger.warn(() -> "handshake to [" + transportAddress + "] failed", e);
IOUtils.closeWhileHandlingException(connection);
listener.onFailure(e);
}
}));
}
private void openFullConnection(ActionListener<ProbeConnectionResult> listener, DiscoveryNode remoteNode) {
if (remoteNode.equals(transportService.getLocalNode())) {
throw new ConnectTransportException(
remoteNode,
String.format(
Locale.ROOT,
"successfully discovered local node %s at [%s]",
remoteNode.descriptionWithoutAttributes(),
transportAddress
)
);
}
if (remoteNode.isMasterNode() == false) {
throw new ConnectTransportException(
remoteNode,
String.format(
Locale.ROOT,
"""
successfully discovered master-ineligible node %s at [%s]; to suppress this message, remove address [%s] from \
your discovery configuration or ensure that traffic to this address is routed only to master-eligible nodes""",
remoteNode.descriptionWithoutAttributes(),
transportAddress,
transportAddress
)
);
}
transportService.connectToNode(remoteNode, ActionListener.assertOnce(new ActionListener<>() {
@Override
public void onResponse(Releasable connectionReleasable) {
logger.trace("[{}] completed full connection with [{}]", transportAddress, remoteNode);
listener.onResponse(new ProbeConnectionResult(remoteNode, connectionReleasable));
}
@Override
public void onFailure(Exception e) {
// We opened a connection and successfully performed a handshake, so we're definitely talking to a master-eligible node
// with a matching cluster name and a good version, but the attempt to open a full connection to its publish address
// failed; a common reason is that the remote node is listening on 0.0.0.0 but has made an inappropriate choice for its
// publish address.
logger.warn(
() -> format(
"""
Successfully discovered master-eligible node [%s] at address [%s] but could not connect to it at its \
publish address of [%s]. Each node in a cluster must be accessible at its publish address by all other \
nodes in the cluster. See %s for more information.""",
remoteNode.descriptionWithoutAttributes(),
transportAddress,
remoteNode.getAddress(),
ReferenceDocs.NETWORK_BINDING_AND_PUBLISHING
),
e
);
listener.onFailure(e);
}
}));
}
}
}
| ConnectionAttempt |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/reflect/InstantiationUtils.java | {
"start": 8454,
"end": 9609
} | class ____ any exceptions as {@link InstantiationException}.
*
* @param type The type
* @param <T> The generic type
* @return The instantiated instance
* @throws InstantiationException When an error occurs
*/
public static <T> T instantiate(Class<T> type) {
try {
return BeanIntrospector.SHARED.findIntrospection(type).map(BeanIntrospection::instantiate).orElseGet(() -> {
try {
Logger log = ClassUtils.REFLECTION_LOGGER;
if (log.isDebugEnabled()) {
log.debug("Reflectively instantiating type: {}", type);
}
return type.getDeclaredConstructor().newInstance();
} catch (Throwable e) {
throw new InstantiationException("Could not instantiate type [" + type.getName() + "]: " + e.getMessage(), e);
}
});
} catch (Throwable e) {
throw new InstantiationException("Could not instantiate type [" + type.getName() + "]: " + e.getMessage(), e);
}
}
/**
* Instantiate the given | rethrowing |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_3300/Issue3376.java | {
"start": 782,
"end": 1261
} | class ____ {
private final long offset;
private final long timestamp;
public Model(long offset, long timestamp) {
this.offset = offset;
this.timestamp = timestamp;
}
/**
* 这种 类似的 get 方法不正规,没办法确定那个方法才算是获取参数的接口,可以参考例子 3
*/
public long timestamp() {
return timestamp;
}
public long offset() {
return this.offset;
}
}
public static | Model |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/comparison/fields/RecursiveComparisonAssert_for_maps_Test.java | {
"start": 1301,
"end": 6232
} | class ____ extends WithComparingFieldsIntrospectionStrategyBaseTest {
@Test
// verify we don't need to cast actual to an Object as before when only Object assertions provided
// usingRecursiveComparison(configuration)
void should_be_directly_usable_with_maps() {
// GIVEN
Person sheldon = new Person("Sheldon");
Person leonard = new Person("Leonard");
Person raj = new Person("Rajesh");
PersonDto sheldonDto = new PersonDto("Sheldon");
PersonDto leonardDto = new PersonDto("Leonard");
PersonDto rajDto = new PersonDto("Rajesh");
Map<String, Person> actual = mapOf(entry(sheldon.name, sheldon),
entry(leonard.name, leonard),
entry(raj.name, raj));
Map<String, PersonDto> expected = mapOf(entry(sheldonDto.name, sheldonDto),
entry(leonardDto.name, leonardDto),
entry(rajDto.name, rajDto));
// WHEN/THEN no need to cast actual to an Object as before (since only object assertions provided
// usingRecursiveComparison(configuration)
then(actual).usingRecursiveComparison(recursiveComparisonConfiguration)
.ignoringAllOverriddenEquals()
.isEqualTo(expected);
}
@Test
public void should_honor_ignored_fields() {
// GIVEN
Map<String, Object> mapA = ImmutableMap.of("foo", "bar", "description", "foobar", "submap",
ImmutableMap.of("subFoo", "subBar", "description", "subFooBar"));
Map<String, Object> mapB = ImmutableMap.of("foo", "bar", "description", "barfoo", "submap",
ImmutableMap.of("subFoo", "subBar", "description", "subBarFoo"));
// THEN
then(mapA).usingRecursiveComparison(recursiveComparisonConfiguration)
.ignoringFields("description", "submap.description")
.isEqualTo(mapB);
then(mapA).usingRecursiveComparison(recursiveComparisonConfiguration)
.ignoringFieldsMatchingRegexes(".*description")
.isEqualTo(mapB);
}
@Test
public void should_honor_ignored_fields_with_sorted_maps() {
// GIVEN
Map<String, Object> mapA = ImmutableSortedMap.of("foo", "bar", "description", "foobar", "submap",
ImmutableSortedMap.of("subFoo", "subBar", "description", "subFooBar"));
Map<String, Object> mapB = ImmutableSortedMap.of("foo", "bar", "description", "barfoo", "submap",
ImmutableSortedMap.of("subFoo", "subBar", "description", "subBarFoo"));
// THEN
then(mapA).usingRecursiveComparison(recursiveComparisonConfiguration)
.ignoringFields("description", "submap.description")
.isEqualTo(mapB);
then(mapA).usingRecursiveComparison(recursiveComparisonConfiguration)
.ignoringFieldsMatchingRegexes(".*description")
.isEqualTo(mapB);
}
@Test
public void should_report_missing_keys_as_missing_fields() {
// GIVEN
Map<String, Object> mapA = ImmutableSortedMap.of("foo", "bar", "desc", "foobar", "submap",
ImmutableSortedMap.of("subFoo", "subBar", "description", "subFooBar"));
Map<String, Object> mapB = ImmutableSortedMap.of("fu", "bar", "description", "foobar", "submap",
ImmutableSortedMap.of("subFu", "subBar", "description", "subFuBar"));
// WHEN
var assertionError = expectAssertionError(() -> assertThat(mapA).usingRecursiveComparison(recursiveComparisonConfiguration)
.isEqualTo(mapB));
// THEN
then(assertionError).hasMessageContainingAll(format("map key difference:%n"
+ "- actual key : \"foo\"%n"
+ "- expected key: \"fu\""),
format("map key difference:%n"
+ "- actual key : \"desc\"%n"
+ "- expected key: \"description\""),
format("map key difference:%n"
+ "- actual key : \"subFoo\"%n"
+ "- expected key: \"subFu\""),
format("field/property 'submap.description' differ:%n"
+ "- actual value : \"subFooBar\"%n"
+ "- expected value: \"subFuBar\"")
);
}
}
| RecursiveComparisonAssert_for_maps_Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/MultipleJoinFetchesDifferentAliasTest.java | {
"start": 4770,
"end": 5039
} | class ____ {
private Point start;
private Point end;
public StartAndEndModel(Point start, Point end) {
this.start = start;
this.end = end;
}
public Point getStart() {
return start;
}
public Point getEnd() {
return end;
}
}
}
| StartAndEndModel |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/MulticastRedeliverTest.java | {
"start": 1153,
"end": 4694
} | class ____ extends ContextTestSupport {
private static int counter;
@Test
public void testOk() throws Exception {
getMockEndpoint("mock:a").expectedMessageCount(1);
getMockEndpoint("mock:b").expectedMessageCount(1);
template.sendBody("direct:test1", "Hello World");
assertMockEndpointsSatisfied();
}
@Test
public void testThrowExceptionAtA() throws Exception {
counter = 0;
getMockEndpoint("mock:a").expectedMessageCount(1);
getMockEndpoint("mock:b").expectedMessageCount(0);
try {
template.sendBody("direct:test2", "Hello World");
fail("Should have thrown exception");
} catch (CamelExecutionException e) {
assertEquals("Forced", e.getCause().getCause().getMessage());
}
assertMockEndpointsSatisfied();
// first call + 3 redeliveries
assertEquals(1 + 3, counter);
}
@Test
public void testThrowExceptionAtB() throws Exception {
counter = 0;
getMockEndpoint("mock:a").expectedMessageCount(1);
getMockEndpoint("mock:b").expectedMessageCount(1);
getMockEndpoint("mock:c").expectedMessageCount(0);
try {
template.sendBody("direct:test3", "Hello World");
fail("Should have thrown exception");
} catch (CamelExecutionException e) {
assertEquals("Forced", e.getCause().getCause().getMessage());
}
assertMockEndpointsSatisfied();
// first call + 3 redeliveries
assertEquals(1 + 3, counter);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
// try to redeliver up till 3 times
errorHandler(defaultErrorHandler().maximumRedeliveries(3).redeliveryDelay(0));
from("direct:test1").multicast().stopOnException().to("mock:a").to("mock:b");
from("direct:test2").multicast().stopOnException().to("mock:a").to("direct:a").to("mock:b");
from("direct:test3").multicast().stopOnException().to("mock:a").to("mock:b").to("direct:b").to("mock:c");
from("direct:a").process(new Processor() {
@Override
public void process(Exchange exchange) {
// should be same input body
assertEquals("Hello World", exchange.getIn().getBody());
assertFalse(exchange.hasOut(), "Should not have OUT");
assertNull(exchange.getException());
counter++;
throw new IllegalArgumentException("Forced");
}
});
from("direct:b").process(new Processor() {
@Override
public void process(Exchange exchange) {
// should be same input body
assertEquals("Hello World", exchange.getIn().getBody());
assertFalse(exchange.hasOut(), "Should not have OUT");
assertNull(exchange.getException());
// mutate OUT body
exchange.getMessage().setBody("Bye World");
counter++;
throw new IllegalArgumentException("Forced");
}
});
}
};
}
}
| MulticastRedeliverTest |
java | apache__camel | test-infra/camel-test-infra-jetty/src/test/java/org/apache/camel/test/infra/jetty/services/JettyService.java | {
"start": 968,
"end": 1134
} | interface ____ extends TestService {
/**
* Gets the port used to run the service
*
* @return the port number
*/
int getPort();
}
| JettyService |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/scheduling/concurrent/DecoratedThreadPoolTaskExecutorTests.java | {
"start": 922,
"end": 1414
} | class ____ extends AbstractSchedulingTaskExecutorTests {
@Override
protected AsyncTaskExecutor buildExecutor() {
ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
executor.setTaskDecorator(runnable ->
new DelegatingErrorHandlingRunnable(runnable, TaskUtils.LOG_AND_PROPAGATE_ERROR_HANDLER));
executor.setThreadNamePrefix(this.threadNamePrefix);
executor.setMaxPoolSize(1);
executor.afterPropertiesSet();
return executor;
}
}
| DecoratedThreadPoolTaskExecutorTests |
java | apache__maven | impl/maven-cli/src/main/java/org/apache/maven/cling/invoker/mvn/MavenInvoker.java | {
"start": 3550,
"end": 28904
} | class ____ extends LookupInvoker<MavenContext> {
public MavenInvoker(Lookup protoLookup, @Nullable Consumer<LookupContext> contextConsumer) {
super(protoLookup, contextConsumer);
}
@Override
protected MavenContext createContext(InvokerRequest invokerRequest) {
return new MavenContext(
invokerRequest, true, (MavenOptions) invokerRequest.options().orElse(null));
}
@Override
protected int execute(MavenContext context) throws Exception {
MavenExecutionRequest request = prepareMavenExecutionRequest();
toolchains(context, request);
populateRequest(context, context.lookup, request);
return doExecute(context, request);
}
protected MavenExecutionRequest prepareMavenExecutionRequest() throws Exception {
// explicitly fill in "defaults"?
DefaultMavenExecutionRequest mavenExecutionRequest = new DefaultMavenExecutionRequest();
mavenExecutionRequest.setRepositoryCache(new DefaultRepositoryCache());
mavenExecutionRequest.setInteractiveMode(true);
mavenExecutionRequest.setCacheTransferError(false);
mavenExecutionRequest.setIgnoreInvalidArtifactDescriptor(true);
mavenExecutionRequest.setIgnoreMissingArtifactDescriptor(true);
mavenExecutionRequest.setRecursive(true);
mavenExecutionRequest.setReactorFailureBehavior(MavenExecutionRequest.REACTOR_FAIL_FAST);
mavenExecutionRequest.setStartInstant(MonotonicClock.now());
mavenExecutionRequest.setLoggingLevel(MavenExecutionRequest.LOGGING_LEVEL_INFO);
mavenExecutionRequest.setDegreeOfConcurrency(1);
mavenExecutionRequest.setBuilderId("singlethreaded");
return mavenExecutionRequest;
}
@Override
protected void lookup(MavenContext context) throws Exception {
if (context.maven == null) {
super.lookup(context);
context.maven = context.lookup.lookup(Maven.class);
}
}
@Override
protected void postCommands(MavenContext context) throws Exception {
super.postCommands(context);
Logger logger = context.logger;
if (context.options().relaxedChecksums().orElse(false)) {
logger.info("Disabling strict checksum verification on all artifact downloads.");
} else if (context.options().strictChecksums().orElse(false)) {
logger.info("Enabling strict checksum verification on all artifact downloads.");
}
}
protected void toolchains(MavenContext context, MavenExecutionRequest request) throws Exception {
Path userToolchainsFile = null;
if (context.options().altUserToolchains().isPresent()) {
userToolchainsFile =
context.cwd.resolve(context.options().altUserToolchains().get());
if (!Files.isRegularFile(userToolchainsFile)) {
throw new FileNotFoundException(
"The specified user toolchains file does not exist: " + userToolchainsFile);
}
} else {
String userToolchainsFileStr =
context.protoSession.getEffectiveProperties().get(Constants.MAVEN_USER_TOOLCHAINS);
if (userToolchainsFileStr != null) {
userToolchainsFile = context.cwd.resolve(userToolchainsFileStr);
}
}
Path installationToolchainsFile = null;
if (context.options().altInstallationToolchains().isPresent()) {
installationToolchainsFile = context.cwd.resolve(
context.options().altInstallationToolchains().get());
if (!Files.isRegularFile(installationToolchainsFile)) {
throw new FileNotFoundException(
"The specified installation toolchains file does not exist: " + installationToolchainsFile);
}
} else {
String installationToolchainsFileStr =
context.protoSession.getEffectiveProperties().get(Constants.MAVEN_INSTALLATION_TOOLCHAINS);
if (installationToolchainsFileStr != null) {
installationToolchainsFile = context.installationDirectory
.resolve(installationToolchainsFileStr)
.normalize();
}
}
request.setInstallationToolchainsFile(
installationToolchainsFile != null ? installationToolchainsFile.toFile() : null);
request.setUserToolchainsFile(userToolchainsFile != null ? userToolchainsFile.toFile() : null);
ToolchainsBuilderRequest toolchainsRequest = ToolchainsBuilderRequest.builder()
.session(context.protoSession)
.installationToolchainsSource(
installationToolchainsFile != null && Files.isRegularFile(installationToolchainsFile)
? Sources.fromPath(installationToolchainsFile)
: null)
.userToolchainsSource(
userToolchainsFile != null && Files.isRegularFile(userToolchainsFile)
? Sources.fromPath(userToolchainsFile)
: null)
.build();
context.eventSpyDispatcher.onEvent(toolchainsRequest);
context.logger.debug("Reading installation toolchains from '" + installationToolchainsFile + "'");
context.logger.debug("Reading user toolchains from '" + userToolchainsFile + "'");
ToolchainsBuilderResult toolchainsResult =
context.lookup.lookup(ToolchainsBuilder.class).build(toolchainsRequest);
context.eventSpyDispatcher.onEvent(toolchainsResult);
context.effectiveToolchains = toolchainsResult.getEffectiveToolchains();
if (toolchainsResult.getProblems().hasWarningProblems()) {
int totalProblems = toolchainsResult.getProblems().totalProblemsReported();
context.logger.info("");
context.logger.info(String.format(
"%s %s encountered while building the effective toolchains (use -e to see details)",
totalProblems, (totalProblems == 1) ? "problem was" : "problems were"));
if (context.options().showErrors().orElse(false)) {
for (BuilderProblem problem :
toolchainsResult.getProblems().problems().toList()) {
context.logger.warn(problem.getMessage() + " @ " + problem.getLocation());
}
}
context.logger.info("");
}
}
@Override
protected void populateRequest(MavenContext context, Lookup lookup, MavenExecutionRequest request)
throws Exception {
super.populateRequest(context, lookup, request);
if (context.invokerRequest.rootDirectory().isEmpty()) {
// maven requires this to be set; so default it (and see below at POM)
request.setMultiModuleProjectDirectory(
context.invokerRequest.topDirectory().toFile());
request.setRootDirectory(context.invokerRequest.topDirectory());
}
request.setToolchains(
Optional.ofNullable(context.effectiveToolchains).map(PersistedToolchains::getToolchains).stream()
.flatMap(List::stream)
.map(ToolchainModel::new)
.collect(Collectors.groupingBy(ToolchainModel::getType)));
request.setNoSnapshotUpdates(context.options().suppressSnapshotUpdates().orElse(false));
request.setGoals(context.options().goals().orElse(List.of()));
request.setReactorFailureBehavior(determineReactorFailureBehaviour(context));
request.setRecursive(!context.options().nonRecursive().orElse(!request.isRecursive()));
request.setOffline(context.options().offline().orElse(request.isOffline()));
request.setUpdateSnapshots(context.options().updateSnapshots().orElse(false));
request.setGlobalChecksumPolicy(determineGlobalChecksumPolicy(context));
Path pom = determinePom(context, lookup);
if (pom != null) {
request.setPom(pom.toFile());
if (pom.getParent() != null) {
request.setBaseDirectory(pom.getParent().toFile());
}
// project present, but we could not determine rootDirectory: extra work needed
if (context.invokerRequest.rootDirectory().isEmpty()) {
Path rootDirectory = CliUtils.findMandatoryRoot(context.invokerRequest.topDirectory());
request.setMultiModuleProjectDirectory(rootDirectory.toFile());
request.setRootDirectory(rootDirectory);
}
}
request.setTransferListener(determineTransferListener(
context, context.options().noTransferProgress().orElse(false)));
request.setExecutionListener(determineExecutionListener(context));
request.setResumeFrom(context.options().resumeFrom().orElse(null));
request.setResume(context.options().resume().orElse(false));
request.setMakeBehavior(determineMakeBehavior(context));
request.setCacheNotFound(context.options().cacheArtifactNotFound().orElse(true));
request.setCacheTransferError(false);
if (context.options().strictArtifactDescriptorPolicy().orElse(false)) {
request.setIgnoreMissingArtifactDescriptor(false);
request.setIgnoreInvalidArtifactDescriptor(false);
} else {
request.setIgnoreMissingArtifactDescriptor(true);
request.setIgnoreInvalidArtifactDescriptor(true);
}
request.setIgnoreTransitiveRepositories(
context.options().ignoreTransitiveRepositories().orElse(false));
performProjectActivation(context, request.getProjectActivation());
performProfileActivation(context, request.getProfileActivation());
//
// Builder, concurrency and parallelism
//
// We preserve the existing methods for builder selection which is to look for various inputs in the threading
// configuration. We don't have an easy way to allow a pluggable builder to provide its own configuration
// parameters but this is sufficient for now. Ultimately we want components like Builders to provide a way to
// extend the command line to accept its own configuration parameters.
//
if (context.options().threads().isPresent()) {
int degreeOfConcurrency =
calculateDegreeOfConcurrency(context.options().threads().get());
if (degreeOfConcurrency > 1) {
request.setBuilderId("multithreaded");
request.setDegreeOfConcurrency(degreeOfConcurrency);
}
}
//
// Allow the builder to be overridden by the user if requested. The builders are now pluggable.
//
if (context.options().builder().isPresent()) {
request.setBuilderId(context.options().builder().get());
}
}
protected Path determinePom(MavenContext context, Lookup lookup) {
Path current = context.cwd.get();
if (context.options().alternatePomFile().isPresent()) {
current = context.cwd.resolve(context.options().alternatePomFile().get());
}
ModelProcessor modelProcessor =
lookup.lookupOptional(ModelProcessor.class).orElse(null);
if (modelProcessor != null) {
return modelProcessor.locateExistingPom(current);
} else {
return Files.isRegularFile(current) ? current : null;
}
}
protected String determineReactorFailureBehaviour(MavenContext context) {
if (context.options().failFast().isPresent()) {
return MavenExecutionRequest.REACTOR_FAIL_FAST;
} else if (context.options().failAtEnd().isPresent()) {
return MavenExecutionRequest.REACTOR_FAIL_AT_END;
} else if (context.options().failNever().isPresent()) {
return MavenExecutionRequest.REACTOR_FAIL_NEVER;
} else {
return MavenExecutionRequest.REACTOR_FAIL_FAST;
}
}
protected String determineGlobalChecksumPolicy(MavenContext context) {
if (context.options().strictChecksums().orElse(false)) {
return MavenExecutionRequest.CHECKSUM_POLICY_FAIL;
} else if (context.options().relaxedChecksums().orElse(false)) {
return MavenExecutionRequest.CHECKSUM_POLICY_WARN;
} else {
return null;
}
}
protected ExecutionListener determineExecutionListener(MavenContext context) {
ExecutionListener listener = new ExecutionEventLogger(context.invokerRequest.messageBuilderFactory());
if (context.eventSpyDispatcher != null) {
listener = context.eventSpyDispatcher.chainListener(listener);
}
return new LoggingExecutionListener(listener, determineBuildEventListener(context));
}
protected TransferListener determineTransferListener(MavenContext context, boolean noTransferProgress) {
boolean quiet = context.options().quiet().orElse(false);
boolean logFile = context.options().logFile().isPresent();
boolean quietCI = context.invokerRequest.ciInfo().isPresent()
&& !context.options().forceInteractive().orElse(false);
TransferListener delegate;
if (quiet || noTransferProgress || quietCI) {
delegate = new QuietMavenTransferListener();
} else if (context.interactive && !logFile) {
if (context.simplexTransferListener == null) {
SimplexTransferListener simplex = new SimplexTransferListener(new ConsoleMavenTransferListener(
context.invokerRequest.messageBuilderFactory(),
context.terminal.writer(),
context.invokerRequest.effectiveVerbose()));
context.closeables.add(simplex);
context.simplexTransferListener = simplex;
}
delegate = context.simplexTransferListener;
} else {
delegate = new Slf4jMavenTransferListener();
}
return new MavenTransferListener(delegate, determineBuildEventListener(context));
}
protected String determineMakeBehavior(MavenContext context) {
if (context.options().alsoMake().isPresent()
&& context.options().alsoMakeDependents().isEmpty()) {
return MavenExecutionRequest.REACTOR_MAKE_UPSTREAM;
} else if (context.options().alsoMake().isEmpty()
&& context.options().alsoMakeDependents().isPresent()) {
return MavenExecutionRequest.REACTOR_MAKE_DOWNSTREAM;
} else if (context.options().alsoMake().isPresent()
&& context.options().alsoMakeDependents().isPresent()) {
return MavenExecutionRequest.REACTOR_MAKE_BOTH;
} else {
return null;
}
}
protected void performProjectActivation(MavenContext context, ProjectActivation projectActivation) {
if (context.options().projects().isPresent()
&& !context.options().projects().get().isEmpty()) {
List<String> optionValues = context.options().projects().get();
for (final String optionValue : optionValues) {
for (String token : optionValue.split(",")) {
String selector = token.trim();
boolean active = true;
if (!selector.isEmpty()) {
if (selector.charAt(0) == '-' || selector.charAt(0) == '!') {
active = false;
selector = selector.substring(1);
} else if (token.charAt(0) == '+') {
selector = selector.substring(1);
}
}
boolean optional = false;
if (!selector.isEmpty() && selector.charAt(0) == '?') {
optional = true;
selector = selector.substring(1);
}
projectActivation.addProjectActivation(selector, active, optional);
}
}
}
}
protected void performProfileActivation(MavenContext context, ProfileActivation profileActivation) {
if (context.options().activatedProfiles().isPresent()
&& !context.options().activatedProfiles().get().isEmpty()) {
List<String> optionValues = context.options().activatedProfiles().get();
for (final String optionValue : optionValues) {
for (String token : optionValue.split(",")) {
String profileId = token.trim();
boolean active = true;
if (!profileId.isEmpty()) {
if (profileId.charAt(0) == '-' || profileId.charAt(0) == '!') {
active = false;
profileId = profileId.substring(1);
} else if (token.charAt(0) == '+') {
profileId = profileId.substring(1);
}
}
boolean optional = false;
if (!profileId.isEmpty() && profileId.charAt(0) == '?') {
optional = true;
profileId = profileId.substring(1);
}
profileActivation.addProfileActivation(profileId, active, optional);
}
}
}
}
protected int doExecute(MavenContext context, MavenExecutionRequest request) throws Exception {
context.eventSpyDispatcher.onEvent(request);
MavenExecutionResult result;
try {
result = context.maven.execute(request);
context.eventSpyDispatcher.onEvent(result);
} finally {
context.eventSpyDispatcher.close();
}
if (result.hasExceptions()) {
ExceptionHandler handler = new DefaultExceptionHandler();
Map<String, String> references = new LinkedHashMap<>();
List<MavenProject> failedProjects = new ArrayList<>();
for (Throwable exception : result.getExceptions()) {
ExceptionSummary summary = handler.handleException(exception);
logSummary(context, summary, references, "");
if (exception instanceof LifecycleExecutionException lifecycleExecutionException) {
failedProjects.add(lifecycleExecutionException.getProject());
}
}
context.logger.error("");
if (!context.options().showErrors().orElse(false)) {
context.logger.error("To see the full stack trace of the errors, re-run Maven with the '"
+ MessageUtils.builder().strong("-e") + "' switch");
}
if (!context.invokerRequest.effectiveVerbose()) {
context.logger.error("Re-run Maven using the '"
+ MessageUtils.builder().strong("-X") + "' switch to enable verbose output");
}
if (!references.isEmpty()) {
context.logger.error("");
context.logger.error("For more information about the errors and possible solutions"
+ ", please read the following articles:");
for (Map.Entry<String, String> entry : references.entrySet()) {
context.logger.error(MessageUtils.builder().strong(entry.getValue()) + " " + entry.getKey());
}
}
if (result.canResume()) {
logBuildResumeHint(context, "mvn [args] -r");
} else if (!failedProjects.isEmpty()) {
List<MavenProject> sortedProjects = result.getTopologicallySortedProjects();
// Sort the failedProjects list in the topologically sorted order.
failedProjects.sort(comparing(sortedProjects::indexOf));
MavenProject firstFailedProject = failedProjects.get(0);
if (!firstFailedProject.equals(sortedProjects.get(0))) {
String resumeFromSelector = getResumeFromSelector(sortedProjects, firstFailedProject);
logBuildResumeHint(context, "mvn [args] -rf " + resumeFromSelector);
}
}
if (context.options().failNever().orElse(false)) {
context.logger.info("Build failures were ignored.");
return 0;
} else {
return 1;
}
} else {
return 0;
}
}
protected void logBuildResumeHint(MavenContext context, String resumeBuildHint) {
context.logger.error("");
context.logger.error("After correcting the problems, you can resume the build with the command");
context.logger.error(
MessageUtils.builder().a(" ").strong(resumeBuildHint).toString());
}
/**
* A helper method to determine the value to resume the build with {@code -rf} taking into account the edge case
* where multiple modules in the reactor have the same artifactId.
* <p>
* {@code -rf :artifactId} will pick up the first module which matches, but when multiple modules in the reactor
* have the same artifactId, effective failed module might be later in build reactor.
* This means that developer will either have to type groupId or wait for build execution of all modules which
* were fine, but they are still before one which reported errors.
* <p>Then the returned value is {@code groupId:artifactId} when there is a name clash and
* {@code :artifactId} if there is no conflict.
* This method is made package-private for testing purposes.
*
* @param mavenProjects Maven projects which are part of build execution.
* @param firstFailedProject The first project which has failed.
* @return Value for -rf flag to resume build exactly from place where it failed ({@code :artifactId} in general
* and {@code groupId:artifactId} when there is a name clash).
*/
protected String getResumeFromSelector(List<MavenProject> mavenProjects, MavenProject firstFailedProject) {
boolean hasOverlappingArtifactId = mavenProjects.stream()
.filter(project -> firstFailedProject.getArtifactId().equals(project.getArtifactId()))
.count()
> 1;
if (hasOverlappingArtifactId) {
return firstFailedProject.getGroupId() + ":" + firstFailedProject.getArtifactId();
}
return ":" + firstFailedProject.getArtifactId();
}
protected static final Pattern NEXT_LINE = Pattern.compile("\r?\n");
protected static final Pattern LAST_ANSI_SEQUENCE = Pattern.compile("(\u001B\\[[;\\d]*[ -/]*[@-~])[^\u001B]*$");
protected static final String ANSI_RESET = "\u001B\u005Bm";
protected void logSummary(
MavenContext context, ExceptionSummary summary, Map<String, String> references, String indent) {
String referenceKey = "";
if (summary.getReference() != null && !summary.getReference().isEmpty()) {
referenceKey =
references.computeIfAbsent(summary.getReference(), k -> "[Help " + (references.size() + 1) + "]");
}
String msg = summary.getMessage();
if (!referenceKey.isEmpty()) {
if (msg.indexOf('\n') < 0) {
msg += " -> " + MessageUtils.builder().strong(referenceKey);
} else {
msg += "\n-> " + MessageUtils.builder().strong(referenceKey);
}
}
String[] lines = NEXT_LINE.split(msg);
String currentColor = "";
for (int i = 0; i < lines.length; i++) {
// add eventual current color inherited from previous line
String line = currentColor + lines[i];
// look for last ANSI escape sequence to check if nextColor
Matcher matcher = LAST_ANSI_SEQUENCE.matcher(line);
String nextColor = "";
if (matcher.find()) {
nextColor = matcher.group(1);
if (ANSI_RESET.equals(nextColor)) {
// last ANSI escape code is reset: no next color
nextColor = "";
}
}
// effective line, with indent and reset if end is colored
line = indent + line + ("".equals(nextColor) ? "" : ANSI_RESET);
if ((i == lines.length - 1)
&& (context.options().showErrors().orElse(false)
|| (summary.getException() instanceof InternalErrorException))) {
context.logger.error(line, summary.getException());
} else {
context.logger.error(line);
}
currentColor = nextColor;
}
indent += " ";
for (ExceptionSummary child : summary.getChildren()) {
logSummary(context, child, references, indent);
}
}
}
| MavenInvoker |
java | quarkusio__quarkus | extensions/arc/deployment/src/test/java/io/quarkus/arc/test/startup/StartupAnnotationTest.java | {
"start": 5498,
"end": 6247
} | class ____ {
@Startup(Integer.MAX_VALUE - 10)
@Produces
String produceString() {
LOG.add("produce_string");
return "ok";
}
void disposeString(@Disposes String val) {
LOG.add("dispose_string");
}
@Startup(Integer.MAX_VALUE - 20)
@Produces
Long produceLong() {
LOG.add("produce_long");
return 1l;
}
void disposeLong(@Disposes Long val) {
LOG.add("dispose_long");
}
@PostConstruct
void init() {
LOG.add("producer_pc");
}
@PreDestroy
void destroy() {
LOG.add("producer_pd");
}
}
static | ProducerStartMe |
java | apache__avro | lang/java/ipc/src/main/java/org/apache/avro/ipc/RPCContext.java | {
"start": 1392,
"end": 6706
} | class ____ {
private HandshakeRequest handshakeRequest;
private HandshakeResponse handshakeResponse;
protected Map<String, ByteBuffer> requestCallMeta, responseCallMeta;
protected Object response;
protected Exception error;
private Message message;
List<ByteBuffer> requestPayload;
List<ByteBuffer> responsePayload;
/** Set the handshake request of this RPC. */
public void setHandshakeRequest(HandshakeRequest handshakeRequest) {
this.handshakeRequest = handshakeRequest;
}
/** Get the handshake request of this RPC. */
public HandshakeRequest getHandshakeRequest() {
return this.handshakeRequest;
}
/** Set the handshake response of this RPC. */
public void setHandshakeResponse(HandshakeResponse handshakeResponse) {
this.handshakeResponse = handshakeResponse;
}
/** Get the handshake response of this RPC. */
public HandshakeResponse getHandshakeResponse() {
return this.handshakeResponse;
}
/**
* This is an access method for the handshake state provided by the client to
* the server.
*
* @return a map representing handshake state from the client to the server
*/
public Map<String, ByteBuffer> requestHandshakeMeta() {
if (handshakeRequest.getMeta() == null)
handshakeRequest.setMeta(new HashMap<>());
return handshakeRequest.getMeta();
}
void setRequestHandshakeMeta(Map<String, ByteBuffer> newmeta) {
handshakeRequest.setMeta(newmeta);
}
/**
* This is an access method for the handshake state provided by the server back
* to the client
*
* @return a map representing handshake state from the server to the client
*/
public Map<String, ByteBuffer> responseHandshakeMeta() {
if (handshakeResponse.getMeta() == null)
handshakeResponse.setMeta(new HashMap<>());
return handshakeResponse.getMeta();
}
void setResponseHandshakeMeta(Map<String, ByteBuffer> newmeta) {
handshakeResponse.setMeta(newmeta);
}
/**
* This is an access method for the per-call state provided by the client to the
* server.
*
* @return a map representing per-call state from the client to the server
*/
public Map<String, ByteBuffer> requestCallMeta() {
if (requestCallMeta == null) {
requestCallMeta = new HashMap<>();
}
return requestCallMeta;
}
void setRequestCallMeta(Map<String, ByteBuffer> newmeta) {
requestCallMeta = newmeta;
}
/**
* This is an access method for the per-call state provided by the server back
* to the client.
*
* @return a map representing per-call state from the server to the client
*/
public Map<String, ByteBuffer> responseCallMeta() {
if (responseCallMeta == null) {
responseCallMeta = new HashMap<>();
}
return responseCallMeta;
}
void setResponseCallMeta(Map<String, ByteBuffer> newmeta) {
responseCallMeta = newmeta;
}
void setResponse(Object response) {
this.response = response;
this.error = null;
}
/**
* The response object generated at the server, if it exists. If an exception
* was generated, this will be null.
*
* @return the response created by this RPC, no null if an exception was
* generated
*/
public Object response() {
return response;
}
void setError(Exception error) {
this.response = null;
this.error = error;
}
/**
* The exception generated at the server, or null if no such exception has
* occurred
*
* @return the exception generated at the server, or null if no such exception
*/
public Exception error() {
return error;
}
/**
* Indicates whether an exception was generated at the server
*
* @return true is an exception was generated at the server, or false if not
*/
public boolean isError() {
return error != null;
}
/** Sets the {@link Message} corresponding to this RPC */
public void setMessage(Message message) {
this.message = message;
}
/**
* Returns the {@link Message} corresponding to this RPC
*
* @return this RPC's {@link Message}
*/
public Message getMessage() {
return message;
}
/**
* Sets the serialized payload of the request in this RPC. Will not include
* handshake or meta-data.
*/
public void setRequestPayload(List<ByteBuffer> payload) {
this.requestPayload = payload;
}
/**
* Returns the serialized payload of the request in this RPC. Will only be
* generated from a Requestor and will not include handshake or meta-data. If
* the request payload has not been set yet, returns null.
*
* @return this RPC's request payload.
*/
public List<ByteBuffer> getRequestPayload() {
return this.requestPayload;
}
/**
* Returns the serialized payload of the response in this RPC. Will only be
* generated from a Responder and will not include handshake or meta-data. If
* the response payload has not been set yet, returns null.
*
* @return this RPC's response payload.
*/
public List<ByteBuffer> getResponsePayload() {
return this.responsePayload;
}
/**
* Sets the serialized payload of the response in this RPC. Will not include
* handshake or meta-data.
*/
public void setResponsePayload(List<ByteBuffer> payload) {
this.responsePayload = payload;
}
}
| RPCContext |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/IncompatibleModifiersCheckerTest.java | {
"start": 3195,
"end": 3703
} | class ____ {
// BUG: Diagnostic contains: The annotation '@NotPrivateOrFinal' has specified that it should not
// be used together with the following modifiers: [final]
@NotPrivateOrFinal public final int n = 0;
}
""")
.doTest();
}
@Test
public void annotationWithIncompatibleModifierOnMethodFails() {
compilationHelper
.addSourceLines(
"test/IncompatibleModifiersTestCase.java",
"""
package test;
import test.NotPrivateOrFinal;
public | IncompatibleModifiersTestCase |
java | quarkusio__quarkus | extensions/hibernate-search-backend-elasticsearch-common/runtime/src/main/java/io/quarkus/hibernate/search/backend/elasticsearch/common/runtime/HibernateSearchBackendElasticsearchRuntimeConfig.java | {
"start": 6293,
"end": 6477
} | interface ____ {
/**
* Configuration for the behavior on shard failure.
*/
QueryShardFailureConfig shardFailure();
}
@ConfigGroup
| QueryConfig |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/cglib/proxy/ProxyRefDispatcher.java | {
"start": 840,
"end": 1225
} | interface ____ extends Callback {
/**
* Return the object which the original method invocation should
* be dispatched. This method is called for <b>every</b> method invocation.
* @param proxy a reference to the proxy (generated) object
* @return an object that can invoke the method
*/
Object loadObject(Object proxy) throws Exception;
}
| ProxyRefDispatcher |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/spatial/SpatialExtentGeoPointSourceValuesAggregator.java | {
"start": 1577,
"end": 2395
} | class ____ extends SpatialExtentLongitudeWrappingAggregator {
// TODO support non-longitude wrapped geo shapes.
public static SpatialExtentStateWrappedLongitudeState initSingle() {
return new SpatialExtentStateWrappedLongitudeState();
}
public static SpatialExtentGroupingStateWrappedLongitudeState initGrouping() {
return new SpatialExtentGroupingStateWrappedLongitudeState();
}
public static void combine(SpatialExtentStateWrappedLongitudeState current, BytesRef bytes) {
current.add(SpatialAggregationUtils.decode(bytes));
}
public static void combine(SpatialExtentGroupingStateWrappedLongitudeState current, int groupId, BytesRef bytes) {
current.add(groupId, SpatialAggregationUtils.decode(bytes));
}
}
| SpatialExtentGeoPointSourceValuesAggregator |
java | elastic__elasticsearch | x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/CheckLicenseTests.java | {
"start": 6699,
"end": 7915
} | class ____ extends Limit implements LicenseAware {
private final LicensedFeature licensedFeature;
public LicensedLimit(Source source, Expression limit, LogicalPlan child, LicensedFeature licensedFeature) {
super(source, limit, child);
this.licensedFeature = licensedFeature;
}
@Override
public boolean licenseCheck(XPackLicenseState state) {
return checkLicense(state, licensedFeature);
}
@Override
public Limit replaceChild(LogicalPlan newChild) {
return new LicensedLimit(source(), limit(), newChild, licensedFeature);
}
@Override
protected NodeInfo<Limit> info() {
return NodeInfo.create(this, LicensedLimit::new, limit(), child(), licensedFeature);
}
@Override
public String sourceText() {
return "LicensedLimit";
}
}
private static boolean checkLicense(XPackLicenseState state, LicensedFeature licensedFeature) {
return licensedFeature instanceof LicensedFeature.Momentary momentary
? momentary.check(state)
: licensedFeature.checkWithoutTracking(state);
}
}
| LicensedLimit |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KafkaEndpointBuilderFactory.java | {
"start": 223949,
"end": 229662
} | class ____ {
/**
* The internal instance of the builder used to access to all the
* methods representing the name of headers.
*/
private static final KafkaHeaderNameBuilder INSTANCE = new KafkaHeaderNameBuilder();
/**
* Explicitly specify the partition.
*
* The option is a: {@code Integer} type.
*
* Group: producer
*
* @return the name of the header {@code kafka.PARTITION_KEY}.
*/
public String kafkaPartitionKey() {
return "kafka.PARTITION_KEY";
}
/**
* The partition where the message was stored.
*
* The option is a: {@code Integer} type.
*
* Group: consumer
*
* @return the name of the header {@code kafka.PARTITION}.
*/
public String kafkaPartition() {
return "kafka.PARTITION";
}
/**
* Producer: The key of the message in order to ensure that all related
* message goes in the same partition. Consumer: The key of the message
* if configured.
*
* The option is a: {@code Object} type.
*
* Required: true
* Group: common
*
* @return the name of the header {@code kafka.KEY}.
*/
public String kafkaKey() {
return "kafka.KEY";
}
/**
* The topic from where the message originated.
*
* The option is a: {@code String} type.
*
* Group: consumer
*
* @return the name of the header {@code kafka.TOPIC}.
*/
public String kafkaTopic() {
return "kafka.TOPIC";
}
/**
* The topic to which send the message (override and takes precedence),
* and the header is not preserved.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code kafka.OVERRIDE_TOPIC}.
*/
public String kafkaOverrideTopic() {
return "kafka.OVERRIDE_TOPIC";
}
/**
* The offset of the message.
*
* The option is a: {@code Long} type.
*
* Group: consumer
*
* @return the name of the header {@code kafka.OFFSET}.
*/
public String kafkaOffset() {
return "kafka.OFFSET";
}
/**
* The record headers.
*
* The option is a: {@code org.apache.kafka.common.header.Headers} type.
*
* Group: consumer
*
* @return the name of the header {@code kafka.HEADERS}.
*/
public String kafkaHeaders() {
return "kafka.HEADERS";
}
/**
* Whether or not it's the last record before commit (only available if
* autoCommitEnable endpoint parameter is false).
*
* The option is a: {@code Boolean} type.
*
* Group: consumer
*
* @return the name of the header {@code
* kafka.LAST_RECORD_BEFORE_COMMIT}.
*/
public String kafkaLastRecordBeforeCommit() {
return "kafka.LAST_RECORD_BEFORE_COMMIT";
}
/**
* Indicates the last record within the current poll request (only
* available if autoCommitEnable endpoint parameter is false or
* allowManualCommit is true).
*
* The option is a: {@code Boolean} type.
*
* Group: consumer
*
* @return the name of the header {@code kafka.LAST_POLL_RECORD}.
*/
public String kafkaLastPollRecord() {
return "kafka.LAST_POLL_RECORD";
}
/**
* The timestamp of the message.
*
* The option is a: {@code Long} type.
*
* Group: consumer
*
* @return the name of the header {@code kafka.TIMESTAMP}.
*/
public String kafkaTimestamp() {
return "kafka.TIMESTAMP";
}
/**
* The ProducerRecord also has an associated timestamp. If the user did
* provide a timestamp, the producer will stamp the record with the
* provided timestamp and the header is not preserved.
*
* The option is a: {@code Long} type.
*
* Group: producer
*
* @return the name of the header {@code kafka.OVERRIDE_TIMESTAMP}.
*/
public String kafkaOverrideTimestamp() {
return "kafka.OVERRIDE_TIMESTAMP";
}
/**
* The metadata (only configured if recordMetadata endpoint parameter is
* true).
*
* The option is a: {@code List<RecordMetadata>} type.
*
* Group: producer
*
* @return the name of the header {@code kafka.RECORD_META}.
*/
public String kafkaRecordMeta() {
return "kafka.RECORD_META";
}
/**
* Can be used for forcing manual offset commit when using Kafka
* consumer.
*
* The option is a: {@code
* org.apache.camel.component.kafka.consumer.KafkaManualCommit} type.
*
* Group: consumer
*
* @return the name of the header {@code KafkaManualCommit}.
*/
public String kafkaManualCommit() {
return "CamelKafkaManualCommit";
}
}
static KafkaEndpointBuilder endpointBuilder(String componentName, String path) {
| KafkaHeaderNameBuilder |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryBoxedAssignmentTest.java | {
"start": 3879,
"end": 5839
} | class ____ {
void negative_void() {
return;
}
boolean positive_booleanPrimitive(boolean aBoolean) {
return aBoolean;
}
Boolean positive_booleanWrapped(boolean aBoolean) {
Boolean b = aBoolean;
return aBoolean;
}
Boolean negative_booleanString(String aString) {
Boolean b = Boolean.valueOf(aString);
return Boolean.valueOf(aString);
}
byte positive_bytePrimitive(byte aByte) {
return aByte;
}
Byte positive_byteWrapped(byte aByte) {
Byte b = aByte;
return aByte;
}
Byte negative_byteString(String aString) {
Byte b = Byte.valueOf(aString);
return Byte.valueOf(aString);
}
int positive_integerPrimitive(int aInteger) {
return aInteger;
}
Integer positive_integerWrapped(int aInteger) {
Integer i = aInteger;
return aInteger;
}
Integer negative_integerString(String aString) {
Integer i = Integer.valueOf(aString);
return Integer.valueOf(aString);
}
Long negative_integerWrapped(int aInteger) {
Long aLong = Long.valueOf(aInteger);
return Long.valueOf(aInteger);
}
Integer positive_wrappedAgain(int aInteger) {
Integer a = aInteger;
a = aInteger;
return a;
}
void negative_methodReference() {
Function<String, Boolean> toBoolean = Boolean::valueOf;
}
}\
""")
.doTest();
}
}
| UnnecessaryBoxedAssignmentCases |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/entrypoint/component/AbstractUserClassPathJobGraphRetriever.java | {
"start": 1322,
"end": 2389
} | class ____ implements JobGraphRetriever {
/** User classpaths in relative form to the working directory. */
@Nonnull private final Collection<URL> userClassPaths;
protected AbstractUserClassPathJobGraphRetriever(@Nullable File jobDir) throws IOException {
if (jobDir == null) {
userClassPaths = Collections.emptyList();
} else {
final Path workingDirectory = FileUtils.getCurrentWorkingDirectory();
final Collection<URL> relativeJarURLs =
FileUtils.listFilesInDirectory(jobDir.toPath(), FileUtils::isJarFile).stream()
.map(path -> FileUtils.relativizePath(workingDirectory, path))
.map(FunctionUtils.uncheckedFunction(FileUtils::toURL))
.collect(Collectors.toList());
this.userClassPaths = Collections.unmodifiableCollection(relativeJarURLs);
}
}
protected Collection<URL> getUserClassPaths() {
return userClassPaths;
}
}
| AbstractUserClassPathJobGraphRetriever |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java | {
"start": 1645,
"end": 4702
} | class ____ extends RunListener {
protected final Logger logger = LogManager.getLogger(ESTestCase.class);
@Override
public void testStarted(Description description) throws Exception {
logger.trace("Test {} started", description.getDisplayName());
}
@Override
public void testFinished(Description description) throws Exception {
logger.trace("Test {} finished", description.getDisplayName());
}
/**
* Are we in the integ test phase?
*/
static boolean inVerifyPhase() {
return Booleans.parseBoolean(System.getProperty("tests.verify.phase", "false"));
}
@Override
public void testFailure(Failure failure) throws Exception {
// Ignore assumptions.
if (failure.getException() instanceof AssumptionViolatedException) {
return;
}
final String gradlew = Constants.WINDOWS ? "gradlew" : "./gradlew";
final StringBuilder b = new StringBuilder("REPRODUCE WITH: " + gradlew + " ");
String task = System.getProperty("tests.task");
boolean isBwcTest = Booleans.parseBoolean(System.getProperty("tests.bwc", "false"))
|| System.getProperty("tests.bwc.main.version") != null
|| System.getProperty("tests.bwc.refspec.main") != null;
// append Gradle test runner test filter string
b.append("\"" + task + "\"");
if (isBwcTest) {
// Use "legacy" method for bwc tests so that it applies globally to all upstream bwc test tasks
b.append(" -Dtests.class=\"");
} else {
b.append(" --tests \"");
}
b.append(failure.getDescription().getClassName());
final String methodName = failure.getDescription().getMethodName();
if (methodName != null) {
// fallback to system property filter when tests contain "."
if (methodName.contains(".") || isBwcTest) {
b.append("\" -Dtests.method=\"");
b.append(methodName);
} else {
b.append(".");
b.append(methodName);
}
}
b.append("\"");
GradleMessageBuilder gradleMessageBuilder = new GradleMessageBuilder(b);
gradleMessageBuilder.appendAllOpts(failure.getDescription());
if (isRestApiCompatibilityTest()) {
b.append(System.lineSeparator());
b.append(
"This is a Rest Api Compatibility Test. "
+ "See the developers guide for details how to troubleshoot - "
+ "https://github.com/elastic/elasticsearch/blob/main/REST_API_COMPATIBILITY.md"
);
}
printToErr(b.toString());
}
private static boolean isRestApiCompatibilityTest() {
return Booleans.parseBoolean(System.getProperty("tests.restCompat", "false"));
}
@SuppressForbidden(reason = "printing repro info")
private static void printToErr(String s) {
System.err.println(s);
}
protected static | ReproduceInfoPrinter |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/enumeratedvalue/EnumAndColumnDefinitionTest.java | {
"start": 2520,
"end": 2955
} | enum ____ value has been saved
assertThat( selectAnotherMyEnum2 ).isEqualTo( "A" );
}
);
scope.inSession(
session -> {
var testEntity = session.find( TestEntity.class, id );
assertThat( testEntity.myEnum ).isEqualTo( enumValue );
assertThat( testEntity.anotherMyEnum ).isEqualTo( anotherEnumValue );
assertThat( testEntity.anotherMyEnum2 ).isEqualTo( anotherEnumValue2 );
}
);
}
public | string |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/LocaleResolver.java | {
"start": 2186,
"end": 3002
} | interface ____ {
/**
* Resolve the current locale via the given request.
* <p>Can return a default locale as fallback in any case.
* @param request the request to resolve the locale for
* @return the current locale (never {@code null})
*/
Locale resolveLocale(HttpServletRequest request);
/**
* Set the current locale to the given one.
* @param request the request to be used for locale modification
* @param response the response to be used for locale modification
* @param locale the new locale, or {@code null} to clear the locale
* @throws UnsupportedOperationException if the LocaleResolver
* implementation does not support dynamic changing of the locale
*/
void setLocale(HttpServletRequest request, @Nullable HttpServletResponse response, @Nullable Locale locale);
}
| LocaleResolver |
java | google__guava | android/guava/src/com/google/common/cache/CacheBuilderSpec.java | {
"start": 16079,
"end": 17386
} | class ____ implements ValueParser {
protected abstract void parseDuration(CacheBuilderSpec spec, long duration, TimeUnit unit);
@Override
public void parse(CacheBuilderSpec spec, String key, @Nullable String value) {
if (isNullOrEmpty(value)) {
throw new IllegalArgumentException("value of key " + key + " omitted");
}
try {
char lastChar = value.charAt(value.length() - 1);
TimeUnit timeUnit;
switch (lastChar) {
case 'd':
timeUnit = DAYS;
break;
case 'h':
timeUnit = HOURS;
break;
case 'm':
timeUnit = MINUTES;
break;
case 's':
timeUnit = SECONDS;
break;
default:
throw new IllegalArgumentException(
format("key %s invalid unit: was %s, must end with one of [dhms]", key, value));
}
long duration = Long.parseLong(value.substring(0, value.length() - 1));
parseDuration(spec, duration, timeUnit);
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
format("key %s value set to %s, must be integer", key, value));
}
}
}
/** Parse expireAfterAccess */
private static final | DurationParser |
java | mockito__mockito | mockito-core/src/test/java/org/mockitousage/basicapi/MocksCreationTest.java | {
"start": 3361,
"end": 3421
} | interface ____ {}
@SomeAnnotation
static | SomeAnnotation |
java | apache__kafka | server-common/src/test/java/org/apache/kafka/queue/KafkaEventQueueTest.java | {
"start": 2008,
"end": 14281
} | class ____<T> implements EventQueue.Event {
private final CompletableFuture<T> future;
private final Supplier<T> supplier;
FutureEvent(CompletableFuture<T> future, Supplier<T> supplier) {
this.future = future;
this.supplier = supplier;
}
@Override
public void run() {
T value = supplier.get();
future.complete(value);
}
@Override
public void handleException(Throwable e) {
future.completeExceptionally(e);
}
}
private LogContext logContext;
@AfterAll
public static void tearDown() throws InterruptedException {
TestUtils.waitForCondition(
() -> Thread.getAllStackTraces().keySet().stream()
.map(Thread::getName)
.noneMatch(t -> t.endsWith(KafkaEventQueue.EVENT_HANDLER_THREAD_SUFFIX)),
"Thread leak detected"
);
}
@BeforeEach
public void setUp(TestInfo testInfo) {
logContext = new LogContext("[KafkaEventQueue test=" + testInfo.getDisplayName() + "]");
}
@Test
public void testCreateAndClose() throws Exception {
KafkaEventQueue queue =
new KafkaEventQueue(Time.SYSTEM, logContext, "testCreateAndClose");
queue.close();
}
@Test
public void testHandleEvents() throws Exception {
try (KafkaEventQueue queue = new KafkaEventQueue(Time.SYSTEM, logContext, "testHandleEvents")) {
AtomicInteger numEventsExecuted = new AtomicInteger(0);
CompletableFuture<Integer> future1 = new CompletableFuture<>();
queue.prepend(new FutureEvent<>(future1, () -> {
assertEquals(1, numEventsExecuted.incrementAndGet());
return 1;
}));
CompletableFuture<Integer> future2 = new CompletableFuture<>();
queue.appendWithDeadline(Time.SYSTEM.nanoseconds() + TimeUnit.SECONDS.toNanos(60),
new FutureEvent<>(future2, () -> {
assertEquals(2, numEventsExecuted.incrementAndGet());
return 2;
}));
CompletableFuture<Integer> future3 = new CompletableFuture<>();
queue.append(new FutureEvent<>(future3, () -> {
assertEquals(3, numEventsExecuted.incrementAndGet());
return 3;
}));
assertEquals(Integer.valueOf(1), future1.get());
assertEquals(Integer.valueOf(3), future3.get());
assertEquals(Integer.valueOf(2), future2.get());
CompletableFuture<Integer> future4 = new CompletableFuture<>();
queue.appendWithDeadline(Time.SYSTEM.nanoseconds() + TimeUnit.SECONDS.toNanos(60),
new FutureEvent<>(future4, () -> {
assertEquals(4, numEventsExecuted.incrementAndGet());
return 4;
}));
future4.get();
}
}
@Test
public void testTimeouts() throws Exception {
try (KafkaEventQueue queue = new KafkaEventQueue(Time.SYSTEM, logContext, "testTimeouts")) {
AtomicInteger numEventsExecuted = new AtomicInteger(0);
CompletableFuture<Integer> future1 = new CompletableFuture<>();
queue.append(new FutureEvent<>(future1, () -> {
assertEquals(1, numEventsExecuted.incrementAndGet());
return 1;
}));
CompletableFuture<Integer> future2 = new CompletableFuture<>();
queue.append(new FutureEvent<>(future2, () -> {
assertEquals(2, numEventsExecuted.incrementAndGet());
Time.SYSTEM.sleep(1);
return 2;
}));
CompletableFuture<Integer> future3 = new CompletableFuture<>();
queue.appendWithDeadline(Time.SYSTEM.nanoseconds() + 1,
new FutureEvent<>(future3, () -> {
numEventsExecuted.incrementAndGet();
return 3;
}));
CompletableFuture<Integer> future4 = new CompletableFuture<>();
queue.append(new FutureEvent<>(future4, () -> {
numEventsExecuted.incrementAndGet();
return 4;
}));
assertEquals(Integer.valueOf(1), future1.get());
assertEquals(Integer.valueOf(2), future2.get());
assertEquals(Integer.valueOf(4), future4.get());
assertEquals(TimeoutException.class,
assertThrows(ExecutionException.class,
() -> future3.get()).getCause().getClass());
assertEquals(3, numEventsExecuted.get());
}
}
@Test
public void testScheduleDeferred() throws Exception {
try (KafkaEventQueue queue = new KafkaEventQueue(Time.SYSTEM, logContext, "testAppendDeferred")) {
// Wait for the deferred event to happen after the non-deferred event.
// It may not happen every time, so we keep trying until it does.
AtomicLong counter = new AtomicLong(0);
CompletableFuture<Boolean> future1;
do {
counter.addAndGet(1);
future1 = new CompletableFuture<>();
queue.scheduleDeferred(null,
__ -> OptionalLong.of(Time.SYSTEM.nanoseconds() + 1000000),
new FutureEvent<>(future1, () -> counter.get() % 2 == 0));
CompletableFuture<Long> future2 = new CompletableFuture<>();
queue.append(new FutureEvent<>(future2, () -> counter.addAndGet(1)));
future2.get();
} while (!future1.get());
}
}
private static final long ONE_HOUR_NS = TimeUnit.NANOSECONDS.convert(1, HOURS);
@Test
public void testScheduleDeferredWithTagReplacement() throws Exception {
try (KafkaEventQueue queue = new KafkaEventQueue(Time.SYSTEM, logContext, "testScheduleDeferredWithTagReplacement")) {
AtomicInteger ai = new AtomicInteger(0);
CompletableFuture<Integer> future1 = new CompletableFuture<>();
queue.scheduleDeferred("foo",
__ -> OptionalLong.of(Time.SYSTEM.nanoseconds() + ONE_HOUR_NS),
new FutureEvent<>(future1, () -> ai.addAndGet(1000)));
CompletableFuture<Integer> future2 = new CompletableFuture<>();
queue.scheduleDeferred("foo", prev -> OptionalLong.of(prev.orElse(0) - ONE_HOUR_NS),
new FutureEvent<>(future2, () -> ai.addAndGet(1)));
assertFalse(future1.isDone());
assertEquals(Integer.valueOf(1), future2.get());
assertEquals(1, ai.get());
}
}
@Test
public void testDeferredIsQueuedAfterTriggering() throws Exception {
MockTime time = new MockTime(0, 100000, 1);
try (KafkaEventQueue queue = new KafkaEventQueue(time, logContext, "testDeferredIsQueuedAfterTriggering")) {
AtomicInteger count = new AtomicInteger(0);
List<CompletableFuture<Integer>> futures = List.of(
new CompletableFuture<>(),
new CompletableFuture<>(),
new CompletableFuture<>());
queue.scheduleDeferred("foo", __ -> OptionalLong.of(2L),
new FutureEvent<>(futures.get(0), () -> count.getAndIncrement()));
queue.append(new FutureEvent<>(futures.get(1), () -> count.getAndAdd(1)));
assertEquals(Integer.valueOf(0), futures.get(1).get());
time.sleep(1);
queue.append(new FutureEvent<>(futures.get(2), () -> count.getAndAdd(1)));
assertEquals(Integer.valueOf(1), futures.get(0).get());
assertEquals(Integer.valueOf(2), futures.get(2).get());
}
}
@Test
public void testShutdownBeforeDeferred() throws Exception {
try (KafkaEventQueue queue = new KafkaEventQueue(Time.SYSTEM, logContext, "testShutdownBeforeDeferred")) {
final AtomicInteger count = new AtomicInteger(0);
CompletableFuture<Integer> future = new CompletableFuture<>();
queue.scheduleDeferred("myDeferred",
__ -> OptionalLong.of(Time.SYSTEM.nanoseconds() + HOURS.toNanos(1)),
new FutureEvent<>(future, () -> count.getAndAdd(1)));
queue.beginShutdown("testShutdownBeforeDeferred");
assertEquals(RejectedExecutionException.class, assertThrows(ExecutionException.class, () -> future.get()).getCause().getClass());
assertEquals(0, count.get());
}
}
@Test
public void testRejectedExecutionException() throws Exception {
KafkaEventQueue queue = new KafkaEventQueue(Time.SYSTEM, logContext,
"testRejectedExecutionException");
queue.close();
CompletableFuture<Void> future = new CompletableFuture<>();
queue.append(new EventQueue.Event() {
@Override
public void run() {
future.complete(null);
}
@Override
public void handleException(Throwable e) {
future.completeExceptionally(e);
}
});
assertEquals(RejectedExecutionException.class, assertThrows(
ExecutionException.class, () -> future.get()).getCause().getClass());
}
@Test
public void testSize() throws Exception {
try (KafkaEventQueue queue = new KafkaEventQueue(Time.SYSTEM, logContext, "testEmpty")) {
assertTrue(queue.isEmpty());
CompletableFuture<Void> future = new CompletableFuture<>();
queue.append(() -> future.get());
assertFalse(queue.isEmpty());
assertEquals(1, queue.size());
queue.append(() -> future.get());
assertEquals(2, queue.size());
future.complete(null);
TestUtils.waitForCondition(() -> queue.isEmpty(), "Failed to see the queue become empty.");
queue.scheduleDeferred("later",
__ -> OptionalLong.of(Time.SYSTEM.nanoseconds() + HOURS.toNanos(1)),
() -> {
});
assertFalse(queue.isEmpty());
queue.scheduleDeferred("soon",
__ -> OptionalLong.of(Time.SYSTEM.nanoseconds() + TimeUnit.MILLISECONDS.toNanos(1)),
() -> {
});
assertFalse(queue.isEmpty());
queue.cancelDeferred("later");
queue.cancelDeferred("soon");
TestUtils.waitForCondition(() -> queue.isEmpty(), "Failed to see the queue become empty.");
assertTrue(queue.isEmpty());
}
}
/**
* Test that we continue handling events after Event#handleException itself throws an exception.
*/
@Test
public void testHandleExceptionThrowingAnException() throws Exception {
try (KafkaEventQueue queue = new KafkaEventQueue(Time.SYSTEM, logContext, "testHandleExceptionThrowingAnException")) {
CompletableFuture<Void> initialFuture = new CompletableFuture<>();
queue.append(() -> initialFuture.get());
AtomicInteger counter = new AtomicInteger(0);
queue.append(new EventQueue.Event() {
@Override
public void run() {
counter.incrementAndGet();
throw new IllegalStateException("First exception");
}
@Override
public void handleException(Throwable e) {
if (e instanceof IllegalStateException) {
counter.incrementAndGet();
throw new RuntimeException("Second exception");
}
}
});
queue.append(() -> counter.incrementAndGet());
assertEquals(3, queue.size());
initialFuture.complete(null);
TestUtils.waitForCondition(() -> counter.get() == 3,
"Failed to see all events execute as planned.");
}
}
private static | FutureEvent |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/deletedetached/DeleteDetachedJpaComplianceTest.java | {
"start": 781,
"end": 2304
} | class ____ {
@Test
void testComposite(EntityManagerFactoryScope scope) {
RestaurantWithCompositeKey restaurant = new RestaurantWithCompositeKey();
restaurant.name = "Some stuff about the thing";
scope.inTransaction( s -> s.persist( restaurant ) );
scope.inTransaction( s -> {
RestaurantWithCompositeKey otherRestaurant = s.find(
RestaurantWithCompositeKey.class,
new RestaurantPK( restaurant.regionId, restaurant.restaurantId )
);
assertNotNull( otherRestaurant );
assertThrows(IllegalArgumentException.class,
() -> s.remove( restaurant ),
"Given entity is not associated with the persistence context"
);
} );
scope.inTransaction( s -> {
assertNotNull( s.find(
RestaurantWithCompositeKey.class,
new RestaurantPK( restaurant.regionId, restaurant.restaurantId )
) );
} );
}
@Test
void testRegular(EntityManagerFactoryScope scope) {
Restaurant restaurant = new Restaurant();
restaurant.name = "Some stuff about the thing";
scope.inTransaction( s -> s.persist( restaurant ) );
scope.inTransaction( s -> {
Restaurant otherRestaurant = s.find( Restaurant.class, restaurant.restaurantId );
assertNotNull( otherRestaurant );
assertThrows(IllegalArgumentException.class,
() -> s.remove( restaurant ),
"Given entity is not associated with the persistence context"
);
} );
scope.inTransaction( s -> {
assertNotNull( s.find( Restaurant.class, restaurant.restaurantId ) );
} );
}
@Entity
static | DeleteDetachedJpaComplianceTest |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/batch/BatchExecExecutionOrderEnforcer.java | {
"start": 3105,
"end": 7584
} | class ____ extends ExecNodeBase<RowData>
implements BatchExecNode<RowData> {
public BatchExecExecutionOrderEnforcer(
ReadableConfig tableConfig,
List<InputProperty> inputProperties,
LogicalType outputType,
String description) {
super(
ExecNodeContext.newNodeId(),
ExecNodeContext.newContext(BatchExecExecutionOrderEnforcer.class),
ExecNodeContext.newPersistedConfig(
BatchExecExecutionOrderEnforcer.class, tableConfig),
inputProperties,
outputType,
description);
}
@Override
public boolean supportFusionCodegen() {
return true;
}
@Override
protected OpFusionCodegenSpecGenerator translateToFusionCodegenSpecInternal(
PlannerBase planner, ExecNodeConfig config, CodeGeneratorContext parentCtx) {
OpFusionCodegenSpecGenerator leftInput =
getInputEdges().get(0).translateToFusionCodegenSpec(planner, parentCtx);
OpFusionCodegenSpecGenerator rightInput =
getInputEdges().get(1).translateToFusionCodegenSpec(planner, parentCtx);
OpFusionCodegenSpecGenerator runtimeFilterGenerator =
new TwoInputOpFusionCodegenSpecGenerator(
leftInput,
rightInput,
0L,
(RowType) getOutputType(),
new ExecutionOrderEnforcerFusionCodegenSpec(
new CodeGeneratorContext(
config,
planner.getFlinkContext().getClassLoader(),
parentCtx)));
leftInput.addOutput(1, runtimeFilterGenerator);
rightInput.addOutput(2, runtimeFilterGenerator);
return runtimeFilterGenerator;
}
@Override
@SuppressWarnings("unchecked")
protected Transformation<RowData> translateToPlanInternal(
PlannerBase planner, ExecNodeConfig config) {
Transformation<RowData> dynamicFilteringInputTransform =
(Transformation<RowData>) getInputEdges().get(0).translateToPlan(planner);
Transformation<RowData> sourceTransform =
(Transformation<RowData>) getInputEdges().get(1).translateToPlan(planner);
// set dynamic filtering data listener id
BatchExecDynamicFilteringDataCollector dynamicFilteringDataCollector =
(BatchExecDynamicFilteringDataCollector)
ignoreExchange(getInputEdges().get(0).getSource());
BatchExecTableSourceScan tableSourceScan =
(BatchExecTableSourceScan) getInputEdges().get(1).getSource();
((SourceTransformation<?, ?, ?>) sourceTransform)
.setCoordinatorListeningID(tableSourceScan.getDynamicFilteringDataListenerID());
((DynamicFilteringDataCollectorOperatorFactory)
((OneInputTransformation<?, ?>)
dynamicFilteringDataCollector.translateToPlan(planner))
.getOperatorFactory())
.registerDynamicFilteringDataListenerID(
tableSourceScan.getDynamicFilteringDataListenerID());
final CodeGenOperatorFactory<RowData> operatorFactory =
ExecutionOrderEnforcerCodeGenerator.gen(
new CodeGeneratorContext(
config, planner.getFlinkContext().getClassLoader()),
(RowType) getInputEdges().get(0).getOutputType(),
(RowType) getInputEdges().get(1).getOutputType());
return ExecNodeUtil.createTwoInputTransformation(
dynamicFilteringInputTransform,
sourceTransform,
createTransformationName(config),
createTransformationDescription(config),
operatorFactory,
InternalTypeInfo.of(getOutputType()),
sourceTransform.getParallelism(),
0,
false);
}
private static ExecNode<?> ignoreExchange(ExecNode<?> execNode) {
if (execNode instanceof BatchExecExchange) {
return execNode.getInputEdges().get(0).getSource();
} else {
return execNode;
}
}
}
| BatchExecExecutionOrderEnforcer |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/issues/CharlesSplitUseBeanAsErrorHandlerIssueTest.java | {
"start": 2982,
"end": 3111
} | class ____ {
public void logError(String body) {
logged = body;
}
}
public static | MyLoggerBean |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/ast/tree/from/CorrelatedTableGroup.java | {
"start": 788,
"end": 5783
} | class ____ extends AbstractTableGroup {
private final TableGroup correlatedTableGroup;
private final QuerySpec querySpec;
private final Consumer<Predicate> joinPredicateConsumer;
public CorrelatedTableGroup(
TableGroup correlatedTableGroup,
SqlAliasBase sqlAliasBase,
QuerySpec querySpec,
Consumer<Predicate> joinPredicateConsumer,
SessionFactoryImplementor sessionFactory) {
super(
true,
correlatedTableGroup.getNavigablePath(),
(TableGroupProducer) correlatedTableGroup.getExpressionType(),
null,
sqlAliasBase,
sessionFactory
);
this.correlatedTableGroup = correlatedTableGroup;
this.querySpec = querySpec;
this.joinPredicateConsumer = joinPredicateConsumer;
}
public TableGroup getCorrelatedTableGroup() {
return correlatedTableGroup;
}
@Override
public void addTableGroupJoin(TableGroupJoin join) {
assert !getTableGroupJoins().contains( join );
assert join.getJoinType() == SqlAstJoinType.INNER;
querySpec.getFromClause().addRoot( join.getJoinedGroup() );
registerPredicate( join );
super.addTableGroupJoin( join );
}
@Override
public void prependTableGroupJoin(NavigablePath navigablePath, TableGroupJoin join) {
throw new UnsupportedOperationException();
}
@Override
public void addNestedTableGroupJoin(TableGroupJoin join) {
assert !getTableGroupJoins().contains( join );
assert join.getJoinType() == SqlAstJoinType.INNER;
querySpec.getFromClause().addRoot( join.getJoinedGroup() );
registerPredicate( join );
super.addNestedTableGroupJoin( join );
}
private void registerPredicate(TableGroupJoin join) {
if ( join.getPredicate() != null ) {
joinPredicateConsumer.accept( join.getPredicate() );
}
else if ( join.getJoinedGroup() instanceof LazyTableGroup lazyTableGroup ) {
// Wait for the table group to get initialized before consuming the predicate
lazyTableGroup.setTableGroupInitializerCallback(
tableGroup -> joinPredicateConsumer.accept( join.getPredicate() )
);
}
}
@Override
public TableReference getTableReference(
NavigablePath navigablePath,
ValuedModelPart modelPart,
String tableExpression,
boolean resolve) {
final TableReference tableReference = correlatedTableGroup.getTableReference(
navigablePath,
modelPart,
tableExpression,
resolve
);
if ( tableReference != null ) {
return tableReference;
}
for ( TableGroupJoin tableGroupJoin : getNestedTableGroupJoins() ) {
final TableReference groupTableReference = tableGroupJoin.getJoinedGroup()
.getPrimaryTableReference()
.getTableReference( navigablePath, modelPart, tableExpression, resolve );
if ( groupTableReference != null ) {
return groupTableReference;
}
}
for ( TableGroupJoin tableGroupJoin : getTableGroupJoins() ) {
final TableReference groupTableReference = tableGroupJoin.getJoinedGroup()
.getPrimaryTableReference()
.getTableReference( navigablePath, modelPart, tableExpression, resolve );
if ( groupTableReference != null ) {
return groupTableReference;
}
}
return null;
}
@Override
public TableReference getTableReference(
NavigablePath navigablePath,
String tableExpression,
boolean resolve) {
final TableReference tableReference = correlatedTableGroup.getTableReference(
navigablePath,
tableExpression,
resolve
);
if ( tableReference != null ) {
return tableReference;
}
for ( TableGroupJoin tableGroupJoin : getNestedTableGroupJoins() ) {
final TableReference groupTableReference = tableGroupJoin.getJoinedGroup()
.getPrimaryTableReference()
.getTableReference( navigablePath, tableExpression, resolve );
if ( groupTableReference != null ) {
return groupTableReference;
}
}
for ( TableGroupJoin tableGroupJoin : getTableGroupJoins() ) {
final TableReference groupTableReference = tableGroupJoin.getJoinedGroup()
.getPrimaryTableReference()
.getTableReference( navigablePath, tableExpression, resolve );
if ( groupTableReference != null ) {
return groupTableReference;
}
}
return null;
}
@Override
public void applyAffectedTableNames(Consumer<String> nameCollector) {
getPrimaryTableReference().applyAffectedTableNames( nameCollector );
}
@Override
public TableReference getPrimaryTableReference() {
return correlatedTableGroup.getPrimaryTableReference();
}
@Override
public List<TableReferenceJoin> getTableReferenceJoins() {
return Collections.emptyList();
}
public Consumer<Predicate> getJoinPredicateConsumer() {
return joinPredicateConsumer;
}
@Override
public TableGroup findCompatibleJoinedGroup(TableGroupJoinProducer joinProducer, SqlAstJoinType requestedJoinType) {
final TableGroup compatibleJoinedGroup = super.findCompatibleJoinedGroup( joinProducer, requestedJoinType );
return compatibleJoinedGroup == null ? correlatedTableGroup.findCompatibleJoinedGroup(
joinProducer,
requestedJoinType
) : compatibleJoinedGroup;
}
}
| CorrelatedTableGroup |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querylog/EsqlQueryLog.java | {
"start": 1385,
"end": 5558
} | class ____ {
public static final String ELASTICSEARCH_QUERYLOG_PREFIX = "elasticsearch.querylog";
public static final String ELASTICSEARCH_QUERYLOG_ERROR_MESSAGE = ELASTICSEARCH_QUERYLOG_PREFIX + ".error.message";
public static final String ELASTICSEARCH_QUERYLOG_ERROR_TYPE = ELASTICSEARCH_QUERYLOG_PREFIX + ".error.type";
public static final String ELASTICSEARCH_QUERYLOG_TOOK = ELASTICSEARCH_QUERYLOG_PREFIX + ".took";
public static final String ELASTICSEARCH_QUERYLOG_TOOK_MILLIS = ELASTICSEARCH_QUERYLOG_PREFIX + ".took_millis";
public static final String ELASTICSEARCH_QUERYLOG_PLANNING_TOOK = ELASTICSEARCH_QUERYLOG_PREFIX + ".planning.took";
public static final String ELASTICSEARCH_QUERYLOG_PLANNING_TOOK_MILLIS = ELASTICSEARCH_QUERYLOG_PREFIX + ".planning.took_millis";
public static final String ELASTICSEARCH_QUERYLOG_SUCCESS = ELASTICSEARCH_QUERYLOG_PREFIX + ".success";
public static final String ELASTICSEARCH_QUERYLOG_SEARCH_TYPE = ELASTICSEARCH_QUERYLOG_PREFIX + ".search_type";
public static final String ELASTICSEARCH_QUERYLOG_QUERY = ELASTICSEARCH_QUERYLOG_PREFIX + ".query";
public static final String LOGGER_NAME = "esql.querylog";
private static final Logger queryLogger = LogManager.getLogger(LOGGER_NAME);
private final SlowLogFields additionalFields;
private volatile long queryWarnThreshold;
private volatile long queryInfoThreshold;
private volatile long queryDebugThreshold;
private volatile long queryTraceThreshold;
private volatile boolean includeUser;
public EsqlQueryLog(ClusterSettings settings, SlowLogFieldProvider slowLogFieldProvider) {
settings.initializeAndWatch(ESQL_QUERYLOG_THRESHOLD_WARN_SETTING, this::setQueryWarnThreshold);
settings.initializeAndWatch(ESQL_QUERYLOG_THRESHOLD_INFO_SETTING, this::setQueryInfoThreshold);
settings.initializeAndWatch(ESQL_QUERYLOG_THRESHOLD_DEBUG_SETTING, this::setQueryDebugThreshold);
settings.initializeAndWatch(ESQL_QUERYLOG_THRESHOLD_TRACE_SETTING, this::setQueryTraceThreshold);
settings.initializeAndWatch(ESQL_QUERYLOG_INCLUDE_USER_SETTING, this::setIncludeUser);
this.additionalFields = slowLogFieldProvider.create();
}
public void onQueryPhase(Result esqlResult, String query) {
if (esqlResult == null) {
return; // TODO review, it happens in some tests, not sure if it's a thing also in prod
}
long tookInNanos = esqlResult.executionInfo().overallTook().nanos();
log(() -> Message.of(esqlResult, query, includeUser ? additionalFields.queryFields() : Map.of()), tookInNanos);
}
public void onQueryFailure(String query, Exception ex, long tookInNanos) {
log(() -> Message.of(query, tookInNanos, ex, includeUser ? additionalFields.queryFields() : Map.of()), tookInNanos);
}
private void log(Supplier<ESLogMessage> logProducer, long tookInNanos) {
if (queryWarnThreshold >= 0 && tookInNanos > queryWarnThreshold) {
queryLogger.warn(logProducer.get());
} else if (queryInfoThreshold >= 0 && tookInNanos > queryInfoThreshold) {
queryLogger.info(logProducer.get());
} else if (queryDebugThreshold >= 0 && tookInNanos > queryDebugThreshold) {
queryLogger.debug(logProducer.get());
} else if (queryTraceThreshold >= 0 && tookInNanos > queryTraceThreshold) {
queryLogger.trace(logProducer.get());
}
}
public void setQueryWarnThreshold(TimeValue queryWarnThreshold) {
this.queryWarnThreshold = queryWarnThreshold.nanos();
}
public void setQueryInfoThreshold(TimeValue queryInfoThreshold) {
this.queryInfoThreshold = queryInfoThreshold.nanos();
}
public void setQueryDebugThreshold(TimeValue queryDebugThreshold) {
this.queryDebugThreshold = queryDebugThreshold.nanos();
}
public void setQueryTraceThreshold(TimeValue queryTraceThreshold) {
this.queryTraceThreshold = queryTraceThreshold.nanos();
}
public void setIncludeUser(boolean includeUser) {
this.includeUser = includeUser;
}
static final | EsqlQueryLog |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/state/internals/ListValueStoreBuilder.java | {
"start": 1143,
"end": 2636
} | class ____<K, V> extends AbstractStoreBuilder<K, V, KeyValueStore<K, V>> {
private final KeyValueBytesStoreSupplier storeSupplier;
public ListValueStoreBuilder(final KeyValueBytesStoreSupplier storeSupplier,
final Serde<K> keySerde,
final Serde<V> valueSerde,
final Time time) {
super(storeSupplier.name(), keySerde, valueSerde, time);
Objects.requireNonNull(storeSupplier, "storeSupplier can't be null");
Objects.requireNonNull(storeSupplier.metricsScope(), "storeSupplier's metricsScope can't be null");
this.storeSupplier = storeSupplier;
}
@Override
public KeyValueStore<K, V> build() {
return new MeteredKeyValueStore<>(
maybeWrapCaching(maybeWrapLogging(new ListValueStore(storeSupplier.get()))),
storeSupplier.metricsScope(),
time,
keySerde,
valueSerde);
}
private KeyValueStore<Bytes, byte[]> maybeWrapCaching(final KeyValueStore<Bytes, byte[]> inner) {
if (!enableCaching) {
return inner;
}
return new CachingKeyValueStore(inner, false);
}
private KeyValueStore<Bytes, byte[]> maybeWrapLogging(final KeyValueStore<Bytes, byte[]> inner) {
if (!enableLogging) {
return inner;
}
return new ChangeLoggingListValueBytesStore(inner);
}
}
| ListValueStoreBuilder |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-csrf/deployment/src/test/java/io/quarkus/csrf/reactive/ProgrammaticCsrfTest.java | {
"start": 1207,
"end": 8311
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(TestResource.class, ProgrammaticCsrfConfig.class)
.addAsResource("templates/csrfToken.html"))
.setForcedDependencies(List.of(Dependency.of("io.quarkus", "quarkus-security", Version.getVersion())));
private final static String COOKIE_NAME = "custom-csrf-token";
private final static String HEADER_NAME = "CUSTOM-X-CSRF-TOKEN";
@Test
public void testCsrfBuilder() {
var fullConfig = (RestCsrfConfig) CSRF.builder()
.tokenHeaderName("tokenHeader1")
.formFieldName("formField1")
.cookieName("cookie1")
.cookieForceSecure()
.cookieHttpOnly(false)
.cookiePath("path1")
.requireFormUrlEncoded(false)
.tokenSize(1234)
.tokenSignatureKey("12345".repeat(10))
.cookieMaxAge(Duration.ofHours(5))
.cookieDomain("Domain1")
.createTokenPath("tokenPath1")
.build();
Assertions.assertEquals("tokenHeader1", fullConfig.tokenHeaderName());
Assertions.assertEquals("formField1", fullConfig.formFieldName());
Assertions.assertEquals("cookie1", fullConfig.cookieName());
Assertions.assertTrue(fullConfig.cookieForceSecure());
Assertions.assertFalse(fullConfig.cookieHttpOnly());
Assertions.assertEquals("path1", fullConfig.cookiePath());
Assertions.assertFalse(fullConfig.requireFormUrlEncoded());
Assertions.assertEquals(1234, fullConfig.tokenSize());
Assertions.assertEquals("12345".repeat(10), fullConfig.tokenSignatureKey().get());
Assertions.assertEquals(Duration.ofHours(5), fullConfig.cookieMaxAge());
Assertions.assertEquals("Domain1", fullConfig.cookieDomain().get());
Assertions.assertEquals(Set.of("tokenPath1"), fullConfig.createTokenPath().get());
var defaultConfig = (RestCsrfConfig) CSRF.builder().build();
Assertions.assertEquals("X-CSRF-TOKEN", defaultConfig.tokenHeaderName());
Assertions.assertEquals("csrf-token", defaultConfig.formFieldName());
Assertions.assertEquals("csrf-token", defaultConfig.cookieName());
Assertions.assertFalse(defaultConfig.cookieForceSecure());
Assertions.assertTrue(defaultConfig.cookieHttpOnly());
Assertions.assertEquals("/", defaultConfig.cookiePath());
Assertions.assertTrue(defaultConfig.requireFormUrlEncoded());
Assertions.assertEquals(16, defaultConfig.tokenSize());
Assertions.assertTrue(defaultConfig.verifyToken());
Assertions.assertTrue(defaultConfig.tokenSignatureKey().isEmpty());
Assertions.assertEquals(Duration.ofHours(2), defaultConfig.cookieMaxAge());
Assertions.assertTrue(defaultConfig.cookieDomain().isEmpty());
Assertions.assertTrue(defaultConfig.createTokenPath().isEmpty());
}
@Test
public void testForm() {
String token = when()
.get("/csrfTokenForm")
.then()
.statusCode(200)
.cookie(COOKIE_NAME)
.extract()
.cookie(COOKIE_NAME);
EncoderConfig encoderConfig = EncoderConfig.encoderConfig().encodeContentTypeAs("multipart/form-data",
ContentType.TEXT);
RestAssuredConfig restAssuredConfig = RestAssured.config().encoderConfig(encoderConfig);
//no token
given()
.cookie(COOKIE_NAME, token)
.config(restAssuredConfig)
.formParam("name", "testName")
.contentType(ContentType.URLENC)
.when()
.post("csrfTokenForm")
.then()
.statusCode(400);
//wrong token
given()
.cookie(COOKIE_NAME, token)
.config(restAssuredConfig)
.formParam(COOKIE_NAME, "WRONG")
.formParam("name", "testName")
.contentType(ContentType.URLENC)
.when()
.post("csrfTokenForm")
.then()
.statusCode(400);
//valid token
given()
.cookie(COOKIE_NAME, token)
.config(restAssuredConfig)
.formParam(COOKIE_NAME, token)
.formParam("name", "testName")
.contentType(ContentType.URLENC)
.when()
.post("csrfTokenForm")
.then()
.statusCode(200)
.body(Matchers.equalTo("testName"));
}
@Test
public void testNoBody() {
String token = when().get("/csrfTokenForm")
.then().statusCode(200).cookie(COOKIE_NAME)
.extract().cookie(COOKIE_NAME);
// no token
given()
.cookie(COOKIE_NAME, token)
.when()
.post("csrfTokenPost")
.then()
.statusCode(400);
//wrong token
given()
.cookie(COOKIE_NAME, token)
.header(HEADER_NAME, "WRONG")
.when()
.post("csrfTokenPost")
.then()
.statusCode(400);
//valid token
given()
.cookie(COOKIE_NAME, token)
.header(HEADER_NAME, token)
.when()
.post("csrfTokenPost")
.then()
.statusCode(200)
.body(Matchers.equalTo("no user"));
}
@Test
public void testWithBody() {
String token = when()
.get("/csrfTokenForm")
.then()
.statusCode(200)
.cookie(COOKIE_NAME)
.extract()
.cookie(COOKIE_NAME);
// no token
given()
.cookie(COOKIE_NAME, token)
.body("testName")
.contentType(ContentType.TEXT)
.when()
.post("csrfTokenPostBody")
.then()
.statusCode(400);
//wrong token
given()
.cookie(COOKIE_NAME, token)
.header(HEADER_NAME, "WRONG")
.body("testName")
.contentType(ContentType.TEXT)
.when()
.post("csrfTokenPostBody")
.then()
.statusCode(400);
//valid token => This test fails but should work
given()
.cookie(COOKIE_NAME, token)
.header(HEADER_NAME, token)
.body("testName")
.contentType(ContentType.TEXT)
.when()
.post("csrfTokenPostBody")
.then()
.statusCode(200)
.body(Matchers.equalTo("testName"));
}
@Path("")
public static | ProgrammaticCsrfTest |
java | google__guava | guava-testlib/test/com/google/common/testing/NullPointerTesterTest.java | {
"start": 6881,
"end": 7025
} | class ____ {
@Keep
public static void christenPoodle(String name) {
checkArgument(name != null);
}
}
private static | ThrowsIae |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/model/NoArgumentConstructor.java | {
"start": 467,
"end": 1070
} | class ____ extends ModelElement implements Constructor {
private final String name;
private final Set<SupportingConstructorFragment> fragments;
public NoArgumentConstructor(String name, Set<SupportingConstructorFragment> fragments) {
this.name = name;
this.fragments = fragments;
}
@Override
public Set<Type> getImportTypes() {
return Collections.emptySet();
}
@Override
public String getName() {
return name;
}
public Set<SupportingConstructorFragment> getFragments() {
return fragments;
}
}
| NoArgumentConstructor |
java | junit-team__junit5 | junit-jupiter-params/src/main/java/org/junit/jupiter/params/ParameterizedInvocationConstants.java | {
"start": 665,
"end": 4604
} | class ____ {
/**
* Placeholder for the {@linkplain org.junit.jupiter.api.TestInfo#getDisplayName
* display name} of a {@code @ParameterizedTest} method: <code>{displayName}</code>
*
* @since 5.3
* @see ParameterizedClass#name()
* @see ParameterizedTest#name()
*/
public static final String DISPLAY_NAME_PLACEHOLDER = "{displayName}";
/**
* Placeholder for the current invocation index of a {@code @ParameterizedTest}
* method (1-based): <code>{index}</code>
*
* @since 5.3
* @see ParameterizedClass#name()
* @see ParameterizedTest#name()
* @see #DEFAULT_DISPLAY_NAME
*/
public static final String INDEX_PLACEHOLDER = "{index}";
/**
* Placeholder for the complete, comma-separated arguments list of the
* current invocation of a {@code @ParameterizedTest} method:
* <code>{arguments}</code>
*
* @since 5.3
* @see ParameterizedClass#name()
* @see ParameterizedTest#name()
*/
public static final String ARGUMENTS_PLACEHOLDER = "{arguments}";
/**
* Placeholder for the complete, comma-separated named arguments list
* of the current invocation of a {@code @ParameterizedTest} method:
* <code>{argumentsWithNames}</code>
*
* <p>Argument names will be retrieved via the {@link java.lang.reflect.Parameter#getName()}
* API if the byte code contains parameter names — for example, if
* the code was compiled with the {@code -parameters} command line argument
* for {@code javac}.
*
* @since 5.6
* @see ParameterizedClass#name()
* @see ParameterizedTest#name()
* @see #ARGUMENT_SET_NAME_OR_ARGUMENTS_WITH_NAMES_PLACEHOLDER
*/
public static final String ARGUMENTS_WITH_NAMES_PLACEHOLDER = "{argumentsWithNames}";
/**
* Placeholder for the name of the argument set for the current invocation
* of a {@code @ParameterizedTest} method: <code>{argumentSetName}</code>.
*
* <p>This placeholder can be used when the current set of arguments was created via
* {@link org.junit.jupiter.params.provider.Arguments#argumentSet(String, Object...)
* argumentSet()}.
*
* @since 5.11
* @see ParameterizedClass#name()
* @see ParameterizedTest#name()
* @see #ARGUMENT_SET_NAME_OR_ARGUMENTS_WITH_NAMES_PLACEHOLDER
* @see org.junit.jupiter.params.provider.Arguments#argumentSet(String, Object...)
*/
@API(status = MAINTAINED, since = "5.13.3")
public static final String ARGUMENT_SET_NAME_PLACEHOLDER = "{argumentSetName}";
/**
* Placeholder for either {@link #ARGUMENT_SET_NAME_PLACEHOLDER} or
* {@link #ARGUMENTS_WITH_NAMES_PLACEHOLDER}, depending on whether the
* current set of arguments was created via
* {@link org.junit.jupiter.params.provider.Arguments#argumentSet(String, Object...)
* argumentSet()}: <code>{argumentSetNameOrArgumentsWithNames}</code>.
*
* @since 5.11
* @see ParameterizedClass#name()
* @see ParameterizedTest#name()
* @see #ARGUMENT_SET_NAME_PLACEHOLDER
* @see #ARGUMENTS_WITH_NAMES_PLACEHOLDER
* @see #DEFAULT_DISPLAY_NAME
* @see org.junit.jupiter.params.provider.Arguments#argumentSet(String, Object...)
*/
@API(status = MAINTAINED, since = "5.13.3")
public static final String ARGUMENT_SET_NAME_OR_ARGUMENTS_WITH_NAMES_PLACEHOLDER = "{argumentSetNameOrArgumentsWithNames}";
/**
* Default display name pattern for the current invocation of a
* {@code @ParameterizedTest} method: {@value}
*
* <p>Note that the default pattern does <em>not</em> include the
* {@linkplain #DISPLAY_NAME_PLACEHOLDER display name} of the
* {@code @ParameterizedTest} method.
*
* @since 5.3
* @see ParameterizedClass#name()
* @see ParameterizedTest#name()
* @see #DISPLAY_NAME_PLACEHOLDER
* @see #INDEX_PLACEHOLDER
* @see #ARGUMENT_SET_NAME_OR_ARGUMENTS_WITH_NAMES_PLACEHOLDER
*/
public static final String DEFAULT_DISPLAY_NAME = ParameterizedInvocationNameFormatter.DEFAULT_DISPLAY_NAME_PATTERN;
private ParameterizedInvocationConstants() {
}
}
| ParameterizedInvocationConstants |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/query/sqm/tree/jpa/AbstractJpaTupleElement.java | {
"start": 685,
"end": 1764
} | class ____<T>
extends AbstractSqmNode
implements SqmVisitableNode, JpaTupleElement<T> {
private @Nullable SqmBindableType<T> expressibleType;
private @Nullable String alias;
protected AbstractJpaTupleElement(@Nullable SqmBindableType<? super T> expressibleType, NodeBuilder criteriaBuilder) {
super( criteriaBuilder );
setExpressibleType( expressibleType );
}
protected void copyTo(AbstractJpaTupleElement<T> target, SqmCopyContext context) {
target.alias = alias;
}
@Override
public @Nullable String getAlias() {
return alias;
}
/**
* Protected access to set the alias.
*/
protected void setAlias(@Nullable String alias) {
this.alias = alias;
}
public @Nullable SqmBindableType<T> getNodeType() {
return expressibleType;
}
protected final void setExpressibleType(
// This is fine, since this method is final
@UnknownInitialization AbstractJpaTupleElement<T> this,
@Nullable SqmBindableType<?> expressibleType) {
//noinspection unchecked
this.expressibleType = (SqmBindableType<T>) expressibleType;
}
}
| AbstractJpaTupleElement |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostDataActionRequestTests.java | {
"start": 591,
"end": 1768
} | class ____ extends AbstractWireSerializingTestCase<PostDataAction.Request> {
@Override
protected PostDataAction.Request createTestInstance() {
PostDataAction.Request request = new PostDataAction.Request(randomAlphaOfLengthBetween(1, 20));
if (randomBoolean()) {
request.setResetStart(randomAlphaOfLengthBetween(1, 20));
}
if (randomBoolean()) {
request.setResetEnd(randomAlphaOfLengthBetween(1, 20));
}
if (randomBoolean()) {
request.setDataDescription(new DataDescription(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)));
}
if (randomBoolean()) {
request.setContent(new BytesArray(new byte[0]), randomFrom(XContentType.values()));
}
return request;
}
@Override
protected PostDataAction.Request mutateInstance(PostDataAction.Request instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected Writeable.Reader<PostDataAction.Request> instanceReader() {
return PostDataAction.Request::new;
}
}
| PostDataActionRequestTests |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/cdi/JpaQueryRewriterWithCdiIntegrationTests.java | {
"start": 6208,
"end": 7725
} | interface ____ extends JpaRepository<User, Integer>, QueryRewriter {
@Query(value = "select original_user_alias.* from SD_USER original_user_alias", nativeQuery = true,
queryRewriter = TestQueryRewriter.class)
List<User> findByNativeQuery(String param);
@Query(value = "select original_user_alias from User original_user_alias", queryRewriter = TestQueryRewriter.class)
List<User> findByNonNativeQuery(String param);
@Query(value = "select original_user_alias from User original_user_alias", queryRewriter = TestQueryRewriter.class)
List<User> findByNonNativeSortedQuery(String param, Sort sort);
@Query(value = "select original_user_alias from User original_user_alias", queryRewriter = TestQueryRewriter.class)
List<User> findByNonNativePagedQuery(String param, Pageable pageable);
@Query(value = "select original_user_alias.* from SD_USER original_user_alias", nativeQuery = true)
List<User> findByNativeQueryWithNoRewrite(String param);
@Query(value = "select original_user_alias from User original_user_alias")
List<User> findByNonNativeQueryWithNoRewrite(String param);
@Query(value = "select original_user_alias.* from SD_USER original_user_alias", nativeQuery = true,
queryRewriter = UserRepositoryWithRewriter.class)
List<User> findByNativeQueryUsingRepository(String param);
@Override
default String rewrite(String query, Sort sort) {
return replaceAlias(query, sort);
}
}
/**
* Stand-alone {@link QueryRewriter}.
*/
static | UserRepositoryWithRewriter |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/CheckpointFailureManagerTest.java | {
"start": 7050,
"end": 7557
} | class ____ implements CheckpointFailureManager.FailJobCallback {
private int invokeCounter = 0;
@Override
public void failJob(Throwable cause) {
invokeCounter++;
}
@Override
public void failJobDueToTaskFailure(
final Throwable cause, final ExecutionAttemptID executionAttemptID) {
invokeCounter++;
}
public int getInvokeCounter() {
return invokeCounter;
}
}
}
| TestFailJobCallback |
java | apache__flink | flink-core-api/src/main/java/org/apache/flink/api/java/tuple/builder/Tuple20Builder.java | {
"start": 1268,
"end": 2037
} | class ____ {@link Tuple20}.
*
* @param <T0> The type of field 0
* @param <T1> The type of field 1
* @param <T2> The type of field 2
* @param <T3> The type of field 3
* @param <T4> The type of field 4
* @param <T5> The type of field 5
* @param <T6> The type of field 6
* @param <T7> The type of field 7
* @param <T8> The type of field 8
* @param <T9> The type of field 9
* @param <T10> The type of field 10
* @param <T11> The type of field 11
* @param <T12> The type of field 12
* @param <T13> The type of field 13
* @param <T14> The type of field 14
* @param <T15> The type of field 15
* @param <T16> The type of field 16
* @param <T17> The type of field 17
* @param <T18> The type of field 18
* @param <T19> The type of field 19
*/
@Public
public | for |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/search/SearchResponse.java | {
"start": 39170,
"end": 39597
} | class ____ create
* a new Cluster object using the "copyFrom" Cluster passed in and set only
* changed values.
*
* Since the clusterAlias, indexExpression and skipUnavailable fields are
* never changed once set, this Builder provides no setter method for them.
* All other fields can be set and override the value in the "copyFrom" Cluster.
*/
public static | to |
java | apache__hadoop | hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java | {
"start": 12544,
"end": 12694
} | class ____ extends RuntimeException {
public DuplicateFileException(String message) {
super(message);
}
}
static | DuplicateFileException |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/java/typeutils/PojoTypeExtractionTest.java | {
"start": 5162,
"end": 5414
} | class ____ {
public String abc;
private int field;
public int getField() {
return this.field;
}
public void setField(int f) {
this.field = f;
}
}
public static | BeanStylePojo |
java | apache__camel | components/camel-drill/src/generated/java/org/apache/camel/component/drill/DrillEndpointConfigurer.java | {
"start": 732,
"end": 2959
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
DrillEndpoint target = (DrillEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "clusterid":
case "clusterId": target.setClusterId(property(camelContext, java.lang.String.class, value)); return true;
case "directory": target.setDirectory(property(camelContext, java.lang.String.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "mode": target.setMode(property(camelContext, org.apache.camel.component.drill.DrillConnectionMode.class, value)); return true;
case "port": target.setPort(property(camelContext, java.lang.Integer.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "clusterid":
case "clusterId": return java.lang.String.class;
case "directory": return java.lang.String.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "mode": return org.apache.camel.component.drill.DrillConnectionMode.class;
case "port": return java.lang.Integer.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
DrillEndpoint target = (DrillEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "clusterid":
case "clusterId": return target.getClusterId();
case "directory": return target.getDirectory();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "mode": return target.getMode();
case "port": return target.getPort();
default: return null;
}
}
}
| DrillEndpointConfigurer |
java | google__gson | gson/src/test/java/com/google/gson/functional/VersioningTest.java | {
"start": 1039,
"end": 3515
} | class ____ {
private static final int A = 0;
private static final int B = 1;
private static final int C = 2;
private static final int D = 3;
private static Gson gsonWithVersion(double version) {
return new GsonBuilder().setVersion(version).create();
}
@Test
public void testVersionedUntilSerialization() {
Version1 target = new Version1();
Gson gson = gsonWithVersion(1.29);
String json = gson.toJson(target);
assertThat(json).contains("\"a\":" + A);
gson = gsonWithVersion(1.3);
json = gson.toJson(target);
assertThat(json).doesNotContain("\"a\":" + A);
gson = gsonWithVersion(1.31);
json = gson.toJson(target);
assertThat(json).doesNotContain("\"a\":" + A);
}
@Test
public void testVersionedUntilDeserialization() {
String json = "{\"a\":3,\"b\":4,\"c\":5}";
Gson gson = gsonWithVersion(1.29);
Version1 version1 = gson.fromJson(json, Version1.class);
assertThat(version1.a).isEqualTo(3);
gson = gsonWithVersion(1.3);
version1 = gson.fromJson(json, Version1.class);
assertThat(version1.a).isEqualTo(A);
gson = gsonWithVersion(1.31);
version1 = gson.fromJson(json, Version1.class);
assertThat(version1.a).isEqualTo(A);
}
@Test
public void testVersionedClassesSerialization() {
Gson gson = gsonWithVersion(1.0);
String json1 = gson.toJson(new Version1());
String json2 = gson.toJson(new Version1_1());
assertThat(json2).isEqualTo(json1);
}
@Test
public void testVersionedClassesDeserialization() {
Gson gson = gsonWithVersion(1.0);
String json = "{\"a\":3,\"b\":4,\"c\":5}";
Version1 version1 = gson.fromJson(json, Version1.class);
assertThat(version1.a).isEqualTo(3);
assertThat(version1.b).isEqualTo(4);
@SuppressWarnings("MemberName")
Version1_1 version1_1 = gson.fromJson(json, Version1_1.class);
assertThat(version1_1.a).isEqualTo(3);
assertThat(version1_1.b).isEqualTo(4);
assertThat(version1_1.c).isEqualTo(C);
}
@Test
public void testIgnoreLaterVersionClassSerialization() {
Gson gson = gsonWithVersion(1.0);
assertThat(gson.toJson(new Version1_2())).isEqualTo("null");
}
@Test
public void testIgnoreLaterVersionClassDeserialization() {
Gson gson = gsonWithVersion(1.0);
String json = "{\"a\":3,\"b\":4,\"c\":5,\"d\":6}";
@SuppressWarnings("MemberName")
Version1_2 version1_2 = gson.fromJson(json, Version1_2.class);
// Since the | VersioningTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/schematools/FallbackSchemaManagementToolTests.java | {
"start": 5194,
"end": 5589
} | class ____ {
@Id
private Integer id;
@Basic
private String name;
private SimpleEntity() {
// for use by Hibernate
}
public SimpleEntity(Integer id, String name) {
this.id = id;
this.name = name;
}
public Integer getId() {
return id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
}
| SimpleEntity |
java | quarkusio__quarkus | integration-tests/maven/src/test/resources-filtered/projects/multijar-module/beans/src/test/java/org/acme/testlib/TestMessage.java | {
"start": 60,
"end": 254
} | class ____ implements Printer.Message {
private final String msg;
public TestMessage(String msg) {
this.msg = msg;
}
@Override
public String getMessage() {
return msg;
}
}
| TestMessage |
java | apache__camel | core/camel-base-engine/src/main/java/org/apache/camel/impl/engine/DefaultValidatorRegistry.java | {
"start": 1289,
"end": 2486
} | class ____ extends AbstractDynamicRegistry<ValidatorKey, Validator>
implements ValidatorRegistry {
public DefaultValidatorRegistry(CamelContext context) {
super(context, CamelContextHelper.getMaximumValidatorCacheSize(context));
}
@Override
public Validator resolveValidator(ValidatorKey key) {
Validator answer = get(key);
if (answer == null && ObjectHelper.isNotEmpty(key.getType().getName())) {
answer = get(new ValidatorKey(new DataType(key.getType().getScheme())));
}
return answer;
}
@Override
public boolean isStatic(DataType type) {
return isStatic(new ValidatorKey(type));
}
@Override
public boolean isDynamic(DataType type) {
return isDynamic(new ValidatorKey(type));
}
@Override
public String toString() {
return "ValidatorRegistry for " + context.getName() + " [capacity: " + maxCacheSize + "]";
}
@Override
public Validator put(ValidatorKey key, Validator obj) {
// ensure validator is started before its being used
ServiceHelper.startService(obj);
return super.put(key, obj);
}
}
| DefaultValidatorRegistry |
java | spring-projects__spring-framework | spring-orm/src/main/java/org/springframework/orm/jpa/JpaTransactionManager.java | {
"start": 6236,
"end": 26951
} | class ____ extends AbstractPlatformTransactionManager
implements ResourceTransactionManager, BeanFactoryAware, InitializingBean {
private @Nullable EntityManagerFactory entityManagerFactory;
private @Nullable String persistenceUnitName;
private final Map<String, Object> jpaPropertyMap = new HashMap<>();
private @Nullable DataSource dataSource;
private JpaDialect jpaDialect = new DefaultJpaDialect();
private @Nullable Consumer<EntityManager> entityManagerInitializer;
/**
* Create a new JpaTransactionManager instance.
* <p>An EntityManagerFactory has to be set to be able to use it.
* @see #setEntityManagerFactory
*/
public JpaTransactionManager() {
}
/**
* Create a new JpaTransactionManager instance.
* @param emf the EntityManagerFactory to manage transactions for
*/
public JpaTransactionManager(EntityManagerFactory emf) {
this.entityManagerFactory = emf;
afterPropertiesSet();
}
/**
* Set the EntityManagerFactory that this instance should manage transactions for.
* <p>Alternatively, specify the persistence unit name of the target EntityManagerFactory.
* By default, a default EntityManagerFactory will be retrieved by finding a
* single unique bean of type EntityManagerFactory in the containing BeanFactory.
* @see #setPersistenceUnitName
*/
public void setEntityManagerFactory(@Nullable EntityManagerFactory emf) {
this.entityManagerFactory = emf;
}
/**
* Return the EntityManagerFactory that this instance should manage transactions for.
*/
public @Nullable EntityManagerFactory getEntityManagerFactory() {
return this.entityManagerFactory;
}
/**
* Obtain the EntityManagerFactory for actual use.
* @return the EntityManagerFactory (never {@code null})
* @throws IllegalStateException in case of no EntityManagerFactory set
* @since 5.0
*/
protected final EntityManagerFactory obtainEntityManagerFactory() {
EntityManagerFactory emf = getEntityManagerFactory();
Assert.state(emf != null, "No EntityManagerFactory set");
return emf;
}
/**
* Set the name of the persistence unit to manage transactions for.
* <p>This is an alternative to specifying the EntityManagerFactory by direct reference,
* resolving it by its persistence unit name instead. If no EntityManagerFactory and
* no persistence unit name have been specified, a default EntityManagerFactory will
* be retrieved by finding a single unique bean of type EntityManagerFactory.
* @see #setEntityManagerFactory
*/
public void setPersistenceUnitName(@Nullable String persistenceUnitName) {
this.persistenceUnitName = persistenceUnitName;
}
/**
* Return the name of the persistence unit to manage transactions for, if any.
*/
public @Nullable String getPersistenceUnitName() {
return this.persistenceUnitName;
}
/**
* Specify JPA properties, to be passed into
* {@code EntityManagerFactory.createEntityManager(Map)} (if any).
* <p>Can be populated with a String "value" (parsed via PropertiesEditor)
* or a "props" element in XML bean definitions.
* @see jakarta.persistence.EntityManagerFactory#createEntityManager(java.util.Map)
*/
public void setJpaProperties(@Nullable Properties jpaProperties) {
CollectionUtils.mergePropertiesIntoMap(jpaProperties, this.jpaPropertyMap);
}
/**
* Specify JPA properties as a Map, to be passed into
* {@code EntityManagerFactory.createEntityManager(Map)} (if any).
* <p>Can be populated with a "map" or "props" element in XML bean definitions.
* @see jakarta.persistence.EntityManagerFactory#createEntityManager(java.util.Map)
*/
public void setJpaPropertyMap(@Nullable Map<String, ?> jpaProperties) {
if (jpaProperties != null) {
this.jpaPropertyMap.putAll(jpaProperties);
}
}
/**
* Allow {@code Map} access to the JPA properties to be passed to the persistence
* provider, with the option to add or override specific entries.
* <p>Useful for specifying entries directly, for example via {@code jpaPropertyMap[myKey]}.
*/
public Map<String, Object> getJpaPropertyMap() {
return this.jpaPropertyMap;
}
/**
* Set the JDBC DataSource that this instance should manage transactions for.
* The DataSource should match the one used by the JPA EntityManagerFactory:
* for example, you could specify the same JNDI DataSource for both.
* <p>If the EntityManagerFactory uses a known DataSource as its connection factory,
* the DataSource will be autodetected: You can still explicitly specify the
* DataSource, but you don't need to in this case.
* <p>A transactional JDBC Connection for this DataSource will be provided to
* application code accessing this DataSource directly via DataSourceUtils
* or JdbcTemplate. The Connection will be taken from the JPA EntityManager.
* <p>Note that you need to use a JPA dialect for a specific JPA implementation
* to allow for exposing JPA transactions as JDBC transactions.
* <p>The DataSource specified here should be the target DataSource to manage
* transactions for, not a TransactionAwareDataSourceProxy. Only data access
* code may work with TransactionAwareDataSourceProxy, while the transaction
* manager needs to work on the underlying target DataSource. If there's
* nevertheless a TransactionAwareDataSourceProxy passed in, it will be
* unwrapped to extract its target DataSource.
* @see EntityManagerFactoryInfo#getDataSource()
* @see #setJpaDialect
* @see org.springframework.jdbc.datasource.TransactionAwareDataSourceProxy
* @see org.springframework.jdbc.datasource.DataSourceUtils
* @see org.springframework.jdbc.core.JdbcTemplate
*/
public void setDataSource(@Nullable DataSource dataSource) {
if (dataSource instanceof TransactionAwareDataSourceProxy proxy) {
// If we got a TransactionAwareDataSourceProxy, we need to perform transactions
// for its underlying target DataSource, else data access code won't see
// properly exposed transactions (i.e. transactions for the target DataSource).
this.dataSource = proxy.getTargetDataSource();
}
else {
this.dataSource = dataSource;
}
}
/**
* Return the JDBC DataSource that this instance manages transactions for.
*/
public @Nullable DataSource getDataSource() {
return this.dataSource;
}
/**
* Set the JPA dialect to use for this transaction manager.
* Used for vendor-specific transaction management and JDBC connection exposure.
* <p>If the EntityManagerFactory uses a known JpaDialect, it will be autodetected:
* You can still explicitly specify the DataSource, but you don't need to in this case.
* <p>The dialect object can be used to retrieve the underlying JDBC connection
* and thus allows for exposing JPA transactions as JDBC transactions.
* @see EntityManagerFactoryInfo#getJpaDialect()
* @see JpaDialect#beginTransaction
* @see JpaDialect#getJdbcConnection
*/
public void setJpaDialect(@Nullable JpaDialect jpaDialect) {
this.jpaDialect = (jpaDialect != null ? jpaDialect : new DefaultJpaDialect());
}
/**
* Return the JPA dialect to use for this transaction manager.
*/
public JpaDialect getJpaDialect() {
return this.jpaDialect;
}
/**
* Specify a callback for customizing every {@code EntityManager} resource
* created for a new transaction managed by this {@code JpaTransactionManager}.
* <p>This is an alternative to a factory-level {@code EntityManager} customizer
* and to a {@code JpaVendorAdapter}-level {@code postProcessEntityManager}
* callback, enabling specific customizations of transactional resources.
* @since 5.3
* @see #createEntityManagerForTransaction()
* @see AbstractEntityManagerFactoryBean#setEntityManagerInitializer
* @see JpaVendorAdapter#postProcessEntityManager
*/
public void setEntityManagerInitializer(Consumer<EntityManager> entityManagerInitializer) {
this.entityManagerInitializer = entityManagerInitializer;
}
/**
* Retrieves an EntityManagerFactory by persistence unit name, if none set explicitly.
* Falls back to a default EntityManagerFactory bean if no persistence unit specified.
* @see #setPersistenceUnitName
*/
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
if (getEntityManagerFactory() == null) {
if (!(beanFactory instanceof ListableBeanFactory lbf)) {
throw new IllegalStateException("Cannot retrieve EntityManagerFactory by persistence unit name " +
"in a non-listable BeanFactory: " + beanFactory);
}
setEntityManagerFactory(EntityManagerFactoryUtils.findEntityManagerFactory(lbf, getPersistenceUnitName()));
}
}
/**
* Eagerly initialize the JPA dialect, creating a default one
* for the specified EntityManagerFactory if none set.
* Auto-detect the EntityManagerFactory's DataSource, if any.
*/
@Override
public void afterPropertiesSet() {
if (getEntityManagerFactory() == null) {
throw new IllegalArgumentException("'entityManagerFactory' or 'persistenceUnitName' is required");
}
if (getEntityManagerFactory() instanceof EntityManagerFactoryInfo emfInfo) {
DataSource dataSource = emfInfo.getDataSource();
if (dataSource != null) {
setDataSource(dataSource);
}
JpaDialect jpaDialect = emfInfo.getJpaDialect();
if (jpaDialect != null) {
setJpaDialect(jpaDialect);
}
}
}
@Override
public Object getResourceFactory() {
return obtainEntityManagerFactory();
}
@Override
protected Object doGetTransaction() {
JpaTransactionObject txObject = new JpaTransactionObject();
txObject.setSavepointAllowed(isNestedTransactionAllowed());
EntityManagerHolder emHolder = (EntityManagerHolder)
TransactionSynchronizationManager.getResource(obtainEntityManagerFactory());
if (emHolder != null) {
if (logger.isDebugEnabled()) {
logger.debug("Found thread-bound EntityManager [" + emHolder.getEntityManager() +
"] for JPA transaction");
}
txObject.setEntityManagerHolder(emHolder, false);
}
if (getDataSource() != null) {
ConnectionHolder conHolder = (ConnectionHolder)
TransactionSynchronizationManager.getResource(getDataSource());
txObject.setConnectionHolder(conHolder);
}
return txObject;
}
@Override
protected boolean isExistingTransaction(Object transaction) {
return ((JpaTransactionObject) transaction).hasTransaction();
}
@Override
protected void doBegin(Object transaction, TransactionDefinition definition) {
JpaTransactionObject txObject = (JpaTransactionObject) transaction;
if (txObject.hasConnectionHolder() && !txObject.getConnectionHolder().isSynchronizedWithTransaction()) {
throw new IllegalTransactionStateException(
"Pre-bound JDBC Connection found! JpaTransactionManager does not support " +
"running within DataSourceTransactionManager if told to manage the DataSource itself. " +
"It is recommended to use a single JpaTransactionManager for all transactions " +
"on a single DataSource, no matter whether JPA or JDBC access.");
}
try {
if (!txObject.hasEntityManagerHolder() ||
txObject.getEntityManagerHolder().isSynchronizedWithTransaction()) {
EntityManager newEm = createEntityManagerForTransaction();
if (logger.isDebugEnabled()) {
logger.debug("Opened new EntityManager [" + newEm + "] for JPA transaction");
}
txObject.setEntityManagerHolder(new EntityManagerHolder(newEm), true);
}
EntityManager em = txObject.getEntityManagerHolder().getEntityManager();
// Delegate to JpaDialect for actual transaction begin.
int timeoutToUse = determineTimeout(definition);
Object transactionData = getJpaDialect().beginTransaction(em,
new JpaTransactionDefinition(definition, timeoutToUse, txObject.isNewEntityManagerHolder()));
txObject.setTransactionData(transactionData);
txObject.setReadOnly(definition.isReadOnly());
// Register transaction timeout.
if (timeoutToUse != TransactionDefinition.TIMEOUT_DEFAULT) {
txObject.getEntityManagerHolder().setTimeoutInSeconds(timeoutToUse);
}
// Register the JPA EntityManager's JDBC Connection for the DataSource, if set.
if (getDataSource() != null) {
ConnectionHandle conHandle = getJpaDialect().getJdbcConnection(em, definition.isReadOnly());
if (conHandle != null) {
ConnectionHolder conHolder = new ConnectionHolder(conHandle);
if (timeoutToUse != TransactionDefinition.TIMEOUT_DEFAULT) {
conHolder.setTimeoutInSeconds(timeoutToUse);
}
if (logger.isDebugEnabled()) {
logger.debug("Exposing JPA transaction as JDBC [" + conHandle + "]");
}
TransactionSynchronizationManager.bindResource(getDataSource(), conHolder);
txObject.setConnectionHolder(conHolder);
}
else {
if (logger.isDebugEnabled()) {
logger.debug("Not exposing JPA transaction [" + em + "] as JDBC transaction because " +
"JpaDialect [" + getJpaDialect() + "] does not support JDBC Connection retrieval");
}
}
}
// Bind the entity manager holder to the thread.
if (txObject.isNewEntityManagerHolder()) {
TransactionSynchronizationManager.bindResource(
obtainEntityManagerFactory(), txObject.getEntityManagerHolder());
}
txObject.getEntityManagerHolder().setSynchronizedWithTransaction(true);
}
catch (TransactionException ex) {
closeEntityManagerAfterFailedBegin(txObject);
throw ex;
}
catch (Throwable ex) {
closeEntityManagerAfterFailedBegin(txObject);
throw new CannotCreateTransactionException("Could not open JPA EntityManager for transaction", ex);
}
}
/**
* Create a JPA EntityManager to be used for a transaction.
* <p>The default implementation checks whether the EntityManagerFactory
* is a Spring proxy and delegates to
* {@link EntityManagerFactoryInfo#createNativeEntityManager}
* if possible which in turns applies
* {@link JpaVendorAdapter#postProcessEntityManager(EntityManager)}.
* @see jakarta.persistence.EntityManagerFactory#createEntityManager()
*/
protected EntityManager createEntityManagerForTransaction() {
EntityManagerFactory emf = obtainEntityManagerFactory();
Map<String, Object> properties = getJpaPropertyMap();
EntityManager em;
if (emf instanceof EntityManagerFactoryInfo emfInfo) {
em = emfInfo.createNativeEntityManager(properties);
}
else {
em = (!CollectionUtils.isEmpty(properties) ?
emf.createEntityManager(properties) : emf.createEntityManager());
}
if (this.entityManagerInitializer != null) {
this.entityManagerInitializer.accept(em);
}
return em;
}
/**
* Close the current transaction's EntityManager.
* Called after a transaction begin attempt failed.
* @param txObject the current transaction
*/
protected void closeEntityManagerAfterFailedBegin(JpaTransactionObject txObject) {
if (txObject.isNewEntityManagerHolder()) {
EntityManager em = txObject.getEntityManagerHolder().getEntityManager();
try {
if (em.getTransaction().isActive()) {
em.getTransaction().rollback();
}
}
catch (Throwable ex) {
logger.debug("Could not rollback EntityManager after failed transaction begin", ex);
}
finally {
EntityManagerFactoryUtils.closeEntityManager(em);
}
txObject.setEntityManagerHolder(null, false);
}
}
@Override
protected Object doSuspend(Object transaction) {
JpaTransactionObject txObject = (JpaTransactionObject) transaction;
txObject.setEntityManagerHolder(null, false);
EntityManagerHolder entityManagerHolder = (EntityManagerHolder)
TransactionSynchronizationManager.unbindResource(obtainEntityManagerFactory());
txObject.setConnectionHolder(null);
ConnectionHolder connectionHolder = null;
if (getDataSource() != null && TransactionSynchronizationManager.hasResource(getDataSource())) {
connectionHolder = (ConnectionHolder) TransactionSynchronizationManager.unbindResource(getDataSource());
}
return new SuspendedResourcesHolder(entityManagerHolder, connectionHolder);
}
@Override
protected void doResume(@Nullable Object transaction, Object suspendedResources) {
SuspendedResourcesHolder resourcesHolder = (SuspendedResourcesHolder) suspendedResources;
TransactionSynchronizationManager.bindResource(
obtainEntityManagerFactory(), resourcesHolder.getEntityManagerHolder());
ConnectionHolder connectionHolder = resourcesHolder.getConnectionHolder();
if (connectionHolder != null && getDataSource() != null) {
TransactionSynchronizationManager.bindResource(getDataSource(), connectionHolder);
}
}
/**
* This implementation returns "true": a JPA commit will properly handle
* transactions that have been marked rollback-only at a global level.
*/
@Override
protected boolean shouldCommitOnGlobalRollbackOnly() {
return true;
}
@Override
protected void doCommit(DefaultTransactionStatus status) {
JpaTransactionObject txObject = (JpaTransactionObject) status.getTransaction();
if (status.isDebug()) {
logger.debug("Committing JPA transaction on EntityManager [" +
txObject.getEntityManagerHolder().getEntityManager() + "]");
}
try {
EntityTransaction tx = txObject.getEntityManagerHolder().getEntityManager().getTransaction();
tx.commit();
}
catch (RollbackException ex) {
if (ex.getCause() instanceof RuntimeException runtimeException) {
DataAccessException dae = getJpaDialect().translateExceptionIfPossible(runtimeException);
if (dae != null) {
throw dae;
}
}
throw new TransactionSystemException("Could not commit JPA transaction", ex);
}
catch (RuntimeException ex) {
// Assumably failed to flush changes to database.
throw DataAccessUtils.translateIfNecessary(ex, getJpaDialect());
}
}
@Override
protected void doRollback(DefaultTransactionStatus status) {
JpaTransactionObject txObject = (JpaTransactionObject) status.getTransaction();
if (status.isDebug()) {
logger.debug("Rolling back JPA transaction on EntityManager [" +
txObject.getEntityManagerHolder().getEntityManager() + "]");
}
try {
EntityTransaction tx = txObject.getEntityManagerHolder().getEntityManager().getTransaction();
if (tx.isActive()) {
tx.rollback();
}
}
catch (PersistenceException ex) {
DataAccessException dae = getJpaDialect().translateExceptionIfPossible(ex);
if (dae != null) {
throw dae;
}
throw new TransactionSystemException("Could not roll back JPA transaction", ex);
}
finally {
if (!txObject.isNewEntityManagerHolder()) {
// Clear all pending inserts/updates/deletes in the EntityManager.
// Necessary for pre-bound EntityManagers, to avoid inconsistent state.
txObject.getEntityManagerHolder().getEntityManager().clear();
}
}
}
@Override
protected void doSetRollbackOnly(DefaultTransactionStatus status) {
JpaTransactionObject txObject = (JpaTransactionObject) status.getTransaction();
if (status.isDebug()) {
logger.debug("Setting JPA transaction on EntityManager [" +
txObject.getEntityManagerHolder().getEntityManager() + "] rollback-only");
}
txObject.setRollbackOnly();
}
@Override
protected void doCleanupAfterCompletion(Object transaction) {
JpaTransactionObject txObject = (JpaTransactionObject) transaction;
// Remove the entity manager holder from the thread, if still there.
// (Could have been removed by EntityManagerFactoryUtils in order
// to replace it with an unsynchronized EntityManager).
if (txObject.isNewEntityManagerHolder()) {
TransactionSynchronizationManager.unbindResourceIfPossible(obtainEntityManagerFactory());
}
txObject.getEntityManagerHolder().clear();
// Remove the JDBC connection holder from the thread, if exposed.
if (getDataSource() != null && txObject.hasConnectionHolder()) {
TransactionSynchronizationManager.unbindResource(getDataSource());
ConnectionHandle conHandle = txObject.getConnectionHolder().getConnectionHandle();
if (conHandle != null) {
try {
getJpaDialect().releaseJdbcConnection(conHandle,
txObject.getEntityManagerHolder().getEntityManager());
}
catch (Throwable ex) {
// Just log it, to keep a transaction-related exception.
logger.error("Failed to release JDBC connection after transaction", ex);
}
}
}
getJpaDialect().cleanupTransaction(txObject.getTransactionData());
// Remove the entity manager holder from the thread.
if (txObject.isNewEntityManagerHolder()) {
logger.debug("Closing JPA EntityManager after transaction");
txObject.getEntityManagerHolder().closeAll();
}
else {
logger.debug("Not closing pre-bound JPA EntityManager after transaction");
}
}
/**
* JPA transaction object, representing a EntityManagerHolder.
* Used as transaction object by JpaTransactionManager.
*/
private | JpaTransactionManager |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/src/AbstractFileSource.java | {
"start": 10236,
"end": 10706
} | class ____<T> extends AbstractFileSourceBuilder<T, SubBuilder<T>> {
* ...
* }
* }</pre>
*
* <p>That way, all return values from builder method defined here are typed to the sub-class
* type and support fluent chaining.
*
* <p>We don't make the publicly visible builder generic with a SELF type, because it leads to
* generic signatures that can look complicated and confusing.
*/
protected abstract static | SubBuilder |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/JoinTableJpaAnnotation.java | {
"start": 1636,
"end": 8983
} | class ____ implements JoinTable, CommonTableDetails {
private String name;
private String catalog;
private String schema;
private jakarta.persistence.JoinColumn[] joinColumns;
private jakarta.persistence.JoinColumn[] inverseJoinColumns;
private jakarta.persistence.ForeignKey foreignKey;
private jakarta.persistence.ForeignKey inverseForeignKey;
private jakarta.persistence.UniqueConstraint[] uniqueConstraints;
private jakarta.persistence.Index[] indexes;
private jakarta.persistence.CheckConstraint[] check;
private String comment;
private String options;
/**
* Used in creating dynamic annotation instances (e.g. from XML)
*/
public JoinTableJpaAnnotation(ModelsContext modelContext) {
this.name = "";
this.catalog = "";
this.schema = "";
this.joinColumns = new jakarta.persistence.JoinColumn[0];
this.inverseJoinColumns = new jakarta.persistence.JoinColumn[0];
this.foreignKey = JpaAnnotations.FOREIGN_KEY.createUsage( modelContext );
this.inverseForeignKey = JpaAnnotations.FOREIGN_KEY.createUsage( modelContext );
this.uniqueConstraints = new jakarta.persistence.UniqueConstraint[0];
this.indexes = new jakarta.persistence.Index[0];
this.check = new jakarta.persistence.CheckConstraint[0];
this.comment = "";
this.options = "";
}
/**
* Used in creating annotation instances from JDK variant
*/
public JoinTableJpaAnnotation(JoinTable annotation, ModelsContext modelContext) {
this.name = annotation.name();
this.catalog = annotation.catalog();
this.schema = annotation.schema();
this.joinColumns = extractJdkValue( annotation, JpaAnnotations.JOIN_TABLE, "joinColumns", modelContext );
this.inverseJoinColumns = extractJdkValue(
annotation,
JpaAnnotations.JOIN_TABLE,
"inverseJoinColumns",
modelContext
);
this.foreignKey = extractJdkValue( annotation, JpaAnnotations.JOIN_TABLE, "foreignKey", modelContext );
this.inverseForeignKey = extractJdkValue(
annotation,
JpaAnnotations.JOIN_TABLE,
"inverseForeignKey",
modelContext
);
this.uniqueConstraints = extractJdkValue(
annotation,
JpaAnnotations.JOIN_TABLE,
"uniqueConstraints",
modelContext
);
this.indexes = extractJdkValue( annotation, JpaAnnotations.JOIN_TABLE, "indexes", modelContext );
this.check = extractJdkValue( annotation, JpaAnnotations.JOIN_TABLE, "check", modelContext );
this.comment = annotation.comment();
this.options = annotation.options();
}
/**
* Used in creating annotation instances from Jandex variant
*/
public JoinTableJpaAnnotation(Map<String, Object> attributeValues, ModelsContext modelContext) {
this.name = (String) attributeValues.get( "name" );
this.catalog = (String) attributeValues.get( "catalog" );
this.schema = (String) attributeValues.get( "schema" );
this.joinColumns = (jakarta.persistence.JoinColumn[]) attributeValues.get( "joinColumns" );
this.inverseJoinColumns = (jakarta.persistence.JoinColumn[]) attributeValues.get( "inverseJoinColumns" );
this.foreignKey = (jakarta.persistence.ForeignKey) attributeValues.get( "foreignKey" );
this.inverseForeignKey = (jakarta.persistence.ForeignKey) attributeValues.get( "inverseForeignKey" );
this.uniqueConstraints = (jakarta.persistence.UniqueConstraint[]) attributeValues.get( "uniqueConstraints" );
this.indexes = (jakarta.persistence.Index[]) attributeValues.get( "indexes" );
this.check = (jakarta.persistence.CheckConstraint[]) attributeValues.get( "check" );
this.comment = (String) attributeValues.get( "comment" );
this.options = (String) attributeValues.get( "options" );
}
@Override
public Class<? extends Annotation> annotationType() {
return JoinTable.class;
}
@Override
public String name() {
return name;
}
public void name(String value) {
this.name = value;
}
@Override
public String catalog() {
return catalog;
}
public void catalog(String value) {
this.catalog = value;
}
@Override
public String schema() {
return schema;
}
public void schema(String value) {
this.schema = value;
}
@Override
public jakarta.persistence.JoinColumn[] joinColumns() {
return joinColumns;
}
public void joinColumns(jakarta.persistence.JoinColumn[] value) {
this.joinColumns = value;
}
@Override
public jakarta.persistence.JoinColumn[] inverseJoinColumns() {
return inverseJoinColumns;
}
public void inverseJoinColumns(jakarta.persistence.JoinColumn[] value) {
this.inverseJoinColumns = value;
}
@Override
public jakarta.persistence.ForeignKey foreignKey() {
return foreignKey;
}
public void foreignKey(jakarta.persistence.ForeignKey value) {
this.foreignKey = value;
}
@Override
public jakarta.persistence.ForeignKey inverseForeignKey() {
return inverseForeignKey;
}
public void inverseForeignKey(jakarta.persistence.ForeignKey value) {
this.inverseForeignKey = value;
}
@Override
public jakarta.persistence.UniqueConstraint[] uniqueConstraints() {
return uniqueConstraints;
}
public void uniqueConstraints(jakarta.persistence.UniqueConstraint[] value) {
this.uniqueConstraints = value;
}
@Override
public jakarta.persistence.Index[] indexes() {
return indexes;
}
public void indexes(jakarta.persistence.Index[] value) {
this.indexes = value;
}
@Override
public jakarta.persistence.CheckConstraint[] check() {
return check;
}
public void check(jakarta.persistence.CheckConstraint[] value) {
this.check = value;
}
@Override
public String comment() {
return comment;
}
public void comment(String value) {
this.comment = value;
}
@Override
public String options() {
return options;
}
public void options(String value) {
this.options = value;
}
public void apply(JaxbJoinTableImpl jaxbJoinTable, XmlDocumentContext xmlDocumentContext) {
applyOptionalString( jaxbJoinTable.getName(), this::name );
applyCatalog( jaxbJoinTable, this, xmlDocumentContext );
applySchema( jaxbJoinTable, this, xmlDocumentContext );
applyOptionalString( jaxbJoinTable.getComment(), this::comment );
applyOptionalString( jaxbJoinTable.getOptions(), this::options );
check( collectCheckConstraints( jaxbJoinTable.getCheckConstraints(), xmlDocumentContext ) );
indexes( collectIndexes( jaxbJoinTable.getIndexes(), xmlDocumentContext ) );
uniqueConstraints( collectUniqueConstraints( jaxbJoinTable.getUniqueConstraints(), xmlDocumentContext ) );
final List<JaxbJoinColumnImpl> joinColumns = jaxbJoinTable.getJoinColumn();
if ( CollectionHelper.isNotEmpty( joinColumns ) ) {
joinColumns( JoinColumnProcessing.transformJoinColumnList(
joinColumns,
xmlDocumentContext
) );
}
final List<JaxbJoinColumnImpl> inverseJoinColumns = jaxbJoinTable.getInverseJoinColumn();
if ( CollectionHelper.isNotEmpty( inverseJoinColumns ) ) {
inverseJoinColumns( JoinColumnProcessing.transformJoinColumnList(
inverseJoinColumns,
xmlDocumentContext
) );
}
if ( jaxbJoinTable.getForeignKey() != null ) {
foreignKey( ForeignKeyProcessing.createNestedForeignKeyAnnotation(
jaxbJoinTable.getForeignKey(),
xmlDocumentContext
) );
}
if ( jaxbJoinTable.getInverseForeignKey() != null ) {
inverseForeignKey( ForeignKeyProcessing.createNestedForeignKeyAnnotation(
jaxbJoinTable.getInverseForeignKey(),
xmlDocumentContext
) );
}
}
}
| JoinTableJpaAnnotation |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/spi/CacheInitiator.java | {
"start": 637,
"end": 1263
} | class ____ implements SessionFactoryServiceInitiator<CacheImplementor> {
public static final CacheInitiator INSTANCE = new CacheInitiator();
@Override
public CacheImplementor initiateService(SessionFactoryServiceInitiatorContext context) {
final var regionFactory = context.getServiceRegistry().getService( RegionFactory.class );
return regionFactory instanceof NoCachingRegionFactory
? new DisabledCaching( context.getSessionFactory() )
: new EnabledCaching( context.getSessionFactory() );
}
@Override
public Class<CacheImplementor> getServiceInitiated() {
return CacheImplementor.class;
}
}
| CacheInitiator |
java | resilience4j__resilience4j | resilience4j-spring-boot2/src/test/java/io/github/resilience4j/SpringBootCommonTest.java | {
"start": 12877,
"end": 12985
} | class ____ extends AbstractTimeLimiterConfigurationOnMissingBean {
}
}
| TimeLimiterConfigurationOnMissingBean |
java | google__dagger | javatests/dagger/internal/codegen/XExecutableTypesTest.java | {
"start": 6330,
"end": 7266
} | class ____ {",
" <T> void m(Collection i) { throw new RuntimeException(); }",
"}");
CompilerTests.invocationCompiler(foo, bar)
.compile(
invocation -> {
XTypeElement fooType = invocation.getProcessingEnv().requireTypeElement("test.Foo");
XMethodElement m1 = fooType.getDeclaredMethods().get(0);
XTypeElement barType = invocation.getProcessingEnv().requireTypeElement("test.Bar");
XMethodElement m2 = barType.getDeclaredMethods().get(0);
assertThat(XExecutableTypes.isSubsignature(m2, m1)).isFalse();
assertThat(XExecutableTypes.isSubsignature(m1, m2)).isFalse();
});
}
@Test
public void subsignatureDifferentTypeArgumentBounds() {
Source foo =
CompilerTests.javaSource(
"test.Foo",
"package test;",
"import java.util.*;",
" | Bar |
java | spring-projects__spring-boot | core/spring-boot-autoconfigure/src/test/java/org/springframework/boot/autoconfigure/AutoConfigurationImportSelectorTests.java | {
"start": 15182,
"end": 15342
} | class ____ {
}
@SpringBootApplication(excludeName = "com.example.three.ThirdAutoConfiguration")
private final | EnableAutoConfigurationWithAbsentClassNameExclude |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java | {
"start": 8694,
"end": 12323
} | class ____
* @param conf configuration
* @return auxiliary service
* @throws IOException
* @throws ClassNotFoundException
*/
private AuxiliaryService createAuxServiceFromLocalClasspath(
AuxServiceRecord service, String appLocalClassPath, Configuration conf)
throws IOException, ClassNotFoundException {
Preconditions.checkArgument(appLocalClassPath != null &&
!appLocalClassPath.isEmpty(),
"local classpath was null in createAuxServiceFromLocalClasspath");
final String sName = service.getName();
final String className = getClassName(service);
if (service.getConfiguration() != null && service.getConfiguration()
.getFiles().size() > 0) {
throw new YarnRuntimeException("The aux service:" + sName
+ " has configured local classpath:" + appLocalClassPath
+ " and config files:" + service.getConfiguration().getFiles()
+ ". Only one of them should be configured.");
}
return AuxiliaryServiceWithCustomClassLoader.getInstance(conf, className,
appLocalClassPath, getSystemClasses(service));
}
/**
* Creates an auxiliary service from a specification.
*
* @param service aux service record
* @param conf configuration
* @param fromConfiguration true if from configuration, false if from manifest
* @return auxiliary service
* @throws IOException
* @throws ClassNotFoundException
*/
private AuxiliaryService createAuxService(AuxServiceRecord service,
Configuration conf, boolean fromConfiguration) throws IOException,
ClassNotFoundException {
final String sName = service.getName();
final String className = getClassName(service);
if (className == null || className.isEmpty()) {
throw new YarnRuntimeException("Class name not provided for auxiliary " +
"service " + sName);
}
if (fromConfiguration) {
// aux services from the configuration have an additional configuration
// option specifying a local classpath that will not be localized
final String appLocalClassPath = conf.get(String.format(
YarnConfiguration.NM_AUX_SERVICES_CLASSPATH, sName));
if (appLocalClassPath != null && !appLocalClassPath.isEmpty()) {
return createAuxServiceFromLocalClasspath(service, appLocalClassPath,
conf);
}
}
AuxServiceConfiguration serviceConf = service.getConfiguration();
List<Path> destFiles = new ArrayList<>();
if (serviceConf != null) {
List<AuxServiceFile> files = serviceConf.getFiles();
if (files != null) {
for (AuxServiceFile file : files) {
// localize file (if needed) and add it to the list of paths that
// will become the classpath
destFiles.add(maybeDownloadJars(sName, className, file.getSrcFile(),
file.getType(), conf));
}
}
}
if (destFiles.size() > 0) {
// create aux service using a custom localized classpath
LOG.info("The aux service:" + sName
+ " is using the custom classloader with classpath " + destFiles);
return AuxiliaryServiceWithCustomClassLoader.getInstance(conf,
className, StringUtils.join(File.pathSeparatorChar, destFiles),
getSystemClasses(service));
} else {
return createAuxServiceFromConfiguration(service);
}
}
/**
* Copies the specified remote file to local NM aux service directory. If the
* same file already exists (as determined by modification time), the file
* will not be copied again.
*
* @param sName service name
* @param className service | path |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/error/ShouldHaveSize_create_Test.java | {
"start": 1511,
"end": 3784
} | class ____ {
private ErrorMessageFactory factory;
@BeforeEach
void setUp() {
factory = shouldHaveSize(list('a', 'b'), 4, 2);
}
@Test
void should_create_error_message() {
// WHEN
String message = factory.create(new TestDescription("TEST"), STANDARD_REPRESENTATION);
// THEN
then(message).isEqualTo("[TEST] %nExpected size: 2 but was: 4 in:%n['a', 'b']".formatted());
}
@Test
void should_create_error_message_with_hexadecimal_representation() {
// WHEN
String message = factory.create(new TestDescription("TEST"), new HexadecimalRepresentation());
// THEN
then(message).isEqualTo("[TEST] %nExpected size: 2 but was: 4 in:%n['0x0061', '0x0062']".formatted());
}
@Test
void should_create_error_message_for_incorrect_file_size() {
// GIVEN
ErrorMessageFactory factory = shouldHaveSize(new FakeFile("ab%sc"), 3L);
// WHEN
String actualErrorMessage = factory.create(new TestDescription("TEST"), STANDARD_REPRESENTATION);
// THEN
then(actualErrorMessage).isEqualTo(format("[TEST] %n"
+ "Expecting file%n"
+ " ab%%sc%n"
+ "to have a size of:%n"
+ " 3L bytes%n"
+ "but had:%n"
+ " 0L bytes"));
}
@Test
void should_create_error_message_for_incorrect_path_size(@TempDir Path tempDir) throws IOException {
// GIVEN
Path actual = Files.write(tempDir.resolve("actual"), "content".getBytes());
// WHEN
String actualErrorMessage = shouldHaveSize(actual, 0L).create(new TestDescription("TEST"), STANDARD_REPRESENTATION);
// THEN
then(actualErrorMessage).isEqualTo("[TEST] %n"
+ "Expecting path%n"
+ " %s%n"
+ "to have a size of:%n"
+ " 0L bytes%n"
+ "but had:%n"
+ " 7L bytes",
actual);
}
}
| ShouldHaveSize_create_Test |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java | {
"start": 4540,
"end": 5801
} | class ____ {
private String category;
private boolean isPrefix = false;
private int boost = 1;
public Builder() {}
/**
* Sets the category of the category.
* This is a required field
*/
public Builder setCategory(String category) {
Objects.requireNonNull(category, "category must not be null");
this.category = category;
return this;
}
/**
* Sets if the context should be treated as a prefix or not.
* Defaults to false
*/
public Builder setPrefix(boolean prefix) {
this.isPrefix = prefix;
return this;
}
/**
* Sets the query-time boost of the context.
* Defaults to 1.
*/
public Builder setBoost(int boost) {
if (boost <= 0) {
throw new IllegalArgumentException("boost must be greater than 0");
}
this.boost = boost;
return this;
}
public CategoryQueryContext build() {
Objects.requireNonNull(category, "category must not be null");
return new CategoryQueryContext(category, boost, isPrefix);
}
}
}
| Builder |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MissingImplementsComparableTest.java | {
"start": 2057,
"end": 2418
} | class ____ extends Foo<String> {
@Override
public int compareTo(Foo<String> o) {
return 0;
}
}
""")
.doTest();
}
@Test
public void doesNotFlagImproperCompareTo() {
compilationHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/discovery/DiscoverySelectors.java | {
"start": 30733,
"end": 30862
} | class ____, or array types; however,
* the exact syntax depends on the underlying test engine.
*
* @param classLoader the | names |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/onetoone/bidirectional/BidirectionalNoNulls.java | {
"start": 710,
"end": 3599
} | class ____ {
private Integer ed1_id;
private Integer ed2_id;
private Integer ing1_id;
private Integer ing2_id;
@BeforeClassTemplate
public void initData(EntityManagerFactoryScope scope) {
BiRefEdEntity ed1 = new BiRefEdEntity( 1, "data_ed_1" );
BiRefEdEntity ed2 = new BiRefEdEntity( 2, "data_ed_2" );
BiRefIngEntity ing1 = new BiRefIngEntity( 3, "data_ing_1" );
BiRefIngEntity ing2 = new BiRefIngEntity( 4, "data_ing_2" );
// Revision 1
scope.inTransaction( em -> {
ing1.setReference( ed1 );
ing2.setReference( ed2 );
em.persist( ed1 );
em.persist( ed2 );
em.persist( ing1 );
em.persist( ing2 );
} );
// Revision 2
scope.inTransaction( em -> {
BiRefIngEntity ing1Ref = em.find( BiRefIngEntity.class, ing1.getId() );
BiRefIngEntity ing2Ref = em.find( BiRefIngEntity.class, ing2.getId() );
BiRefEdEntity ed1Ref = em.find( BiRefEdEntity.class, ed1.getId() );
BiRefEdEntity ed2Ref = em.find( BiRefEdEntity.class, ed2.getId() );
ing1Ref.setReference( ed2Ref );
ing2Ref.setReference( ed1Ref );
} );
//
ed1_id = ed1.getId();
ed2_id = ed2.getId();
ing1_id = ing1.getId();
ing2_id = ing2.getId();
}
@Test
public void testRevisionsCounts(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
assertEquals( Arrays.asList( 1, 2 ), auditReader.getRevisions( BiRefEdEntity.class, ed1_id ) );
assertEquals( Arrays.asList( 1, 2 ), auditReader.getRevisions( BiRefEdEntity.class, ed2_id ) );
assertEquals( Arrays.asList( 1, 2 ), auditReader.getRevisions( BiRefIngEntity.class, ing1_id ) );
assertEquals( Arrays.asList( 1, 2 ), auditReader.getRevisions( BiRefIngEntity.class, ing2_id ) );
} );
}
@Test
public void testHistoryOfEdId1(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
BiRefIngEntity ing1 = em.find( BiRefIngEntity.class, ing1_id );
BiRefIngEntity ing2 = em.find( BiRefIngEntity.class, ing2_id );
BiRefEdEntity rev1 = auditReader.find( BiRefEdEntity.class, ed1_id, 1 );
BiRefEdEntity rev2 = auditReader.find( BiRefEdEntity.class, ed1_id, 2 );
assertEquals( ing1, rev1.getReferencing() );
assertEquals( ing2, rev2.getReferencing() );
} );
}
@Test
public void testHistoryOfEdId2(EntityManagerFactoryScope scope) {
scope.inEntityManager( em -> {
final var auditReader = AuditReaderFactory.get( em );
BiRefIngEntity ing1 = em.find( BiRefIngEntity.class, ing1_id );
BiRefIngEntity ing2 = em.find( BiRefIngEntity.class, ing2_id );
BiRefEdEntity rev1 = auditReader.find( BiRefEdEntity.class, ed2_id, 1 );
BiRefEdEntity rev2 = auditReader.find( BiRefEdEntity.class, ed2_id, 2 );
assertEquals( ing2, rev1.getReferencing() );
assertEquals( ing1, rev2.getReferencing() );
} );
}
}
| BidirectionalNoNulls |
java | apache__dubbo | dubbo-registry/dubbo-registry-api/src/main/java/org/apache/dubbo/registry/retry/FailedUnsubscribedTask.java | {
"start": 1048,
"end": 1690
} | class ____ extends AbstractRetryTask {
private static final String NAME = "retry unsubscribe";
private final NotifyListener listener;
public FailedUnsubscribedTask(URL url, FailbackRegistry registry, NotifyListener listener) {
super(url, registry, NAME);
if (listener == null) {
throw new IllegalArgumentException();
}
this.listener = listener;
}
@Override
protected void doRetry(URL url, FailbackRegistry registry, Timeout timeout) {
registry.doUnsubscribe(url, listener);
registry.removeFailedUnsubscribedTask(url, listener);
}
}
| FailedUnsubscribedTask |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/batch/sql/DynamicFunctionPlanTest.java | {
"start": 1114,
"end": 1389
} | class ____ extends DynamicFunctionPlanTestBase {
@Override
protected TableTestUtil getTableTestUtil() {
return batchTestUtil(TableConfig.getDefault());
}
@Override
protected boolean isBatchMode() {
return true;
}
}
| DynamicFunctionPlanTest |
java | square__retrofit | retrofit-adapters/rxjava3/src/test/java/retrofit2/adapter/rxjava3/SingleWithSchedulerTest.java | {
"start": 1186,
"end": 2705
} | interface ____ {
@GET("/")
Single<String> body();
@GET("/")
Single<Response<String>> response();
@GET("/")
Single<Result<String>> result();
}
private final TestScheduler scheduler = new TestScheduler();
private Service service;
@Before
public void setUp() {
Retrofit retrofit =
new Retrofit.Builder()
.baseUrl(server.url("/"))
.addConverterFactory(new StringConverterFactory())
.addCallAdapterFactory(RxJava3CallAdapterFactory.createWithScheduler(scheduler))
.build();
service = retrofit.create(Service.class);
}
@Test
public void bodyUsesScheduler() {
server.enqueue(new MockResponse());
RecordingSingleObserver<Object> observer = observerRule.create();
service.body().subscribe(observer);
observer.assertNoEvents();
scheduler.triggerActions();
observer.assertAnyValue();
}
@Test
public void responseUsesScheduler() {
server.enqueue(new MockResponse());
RecordingSingleObserver<Object> observer = observerRule.create();
service.response().subscribe(observer);
observer.assertNoEvents();
scheduler.triggerActions();
observer.assertAnyValue();
}
@Test
public void resultUsesScheduler() {
server.enqueue(new MockResponse());
RecordingSingleObserver<Object> observer = observerRule.create();
service.result().subscribe(observer);
observer.assertNoEvents();
scheduler.triggerActions();
observer.assertAnyValue();
}
}
| Service |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.