language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__rocketmq
|
namesrv/src/test/java/org/apache/rocketmq/namesrv/route/ZoneRouteRPCHookMoreTest.java
|
{
"start": 1573,
"end": 6117
}
|
class ____ {
private ZoneRouteRPCHook zoneRouteRPCHook;
@Before
public void setUp() {
zoneRouteRPCHook = new ZoneRouteRPCHook();
}
@Test
public void testFilterByZoneName_ValidInput_ShouldFilterCorrectly() {
// Arrange
TopicRouteData topicRouteData = new TopicRouteData();
topicRouteData.setBrokerDatas(generateBrokerDataList());
topicRouteData.setQueueDatas(generateQueueDataList());
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_ROUTEINFO_BY_TOPIC,null);
request.setExtFields(createExtFields("true","ZoneA"));
RemotingCommand response = RemotingCommand.createResponseCommand(ResponseCode.SUCCESS, "remark");
// Act
zoneRouteRPCHook.doAfterResponse("127.0.0.1", request, response);
TopicRouteData decodedResponse = RemotingSerializable.decode(response.getBody(), TopicRouteData.class);
// Assert
assertNull(decodedResponse);
}
@Test
public void testFilterByZoneName_NoZoneName_ShouldNotFilter() {
// Arrange
TopicRouteData topicRouteData = new TopicRouteData();
topicRouteData.setBrokerDatas(generateBrokerDataList());
topicRouteData.setQueueDatas(generateQueueDataList());
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_ROUTEINFO_BY_TOPIC,null);
HashMap<String, String> extFields = new HashMap<>();
extFields.put(MixAll.ZONE_MODE, "true");
request.setExtFields(extFields);
RemotingCommand response = RemotingCommand.createResponseCommand(ResponseCode.SUCCESS, null);
// Act
zoneRouteRPCHook.doAfterResponse("127.0.0.1", request, response);
TopicRouteData decodedResponse = RemotingSerializable.decode(response.getBody(), TopicRouteData.class);
// Assert
assertEquals(topicRouteData.getBrokerDatas().size(), 2);
assertEquals(topicRouteData.getQueueDatas().size(), 2);
}
@Test
public void testFilterByZoneName_ZoneModeFalse_ShouldNotFilter() {
// Arrange
TopicRouteData topicRouteData = new TopicRouteData();
topicRouteData.setBrokerDatas(generateBrokerDataList());
topicRouteData.setQueueDatas(generateQueueDataList());
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_ROUTEINFO_BY_TOPIC,null);
request.setExtFields(createExtFields("false","ZoneA"));
RemotingCommand response = RemotingCommand.createResponseCommand(ResponseCode.SUCCESS ,null);
// Act
zoneRouteRPCHook.doAfterResponse("127.0.0.1", request, response);
TopicRouteData decodedResponse = RemotingSerializable.decode(response.getBody(), TopicRouteData.class);
// Assert
assertEquals(topicRouteData.getBrokerDatas().size(), 2);
assertEquals(topicRouteData.getQueueDatas().size(), 2);
}
private List<BrokerData> generateBrokerDataList() {
List<BrokerData> brokerDataList = new ArrayList<>();
BrokerData brokerData1 = new BrokerData();
brokerData1.setBrokerName("BrokerA");
brokerData1.setZoneName("ZoneA");
Map<Long, String> brokerAddrs = new HashMap<>();
brokerAddrs.put(MixAll.MASTER_ID, "127.0.0.1:10911");
brokerData1.setBrokerAddrs((HashMap<Long, String>) brokerAddrs);
brokerDataList.add(brokerData1);
BrokerData brokerData2 = new BrokerData();
brokerData2.setBrokerName("BrokerB");
brokerData2.setZoneName("ZoneB");
brokerAddrs = new HashMap<>();
brokerAddrs.put(MixAll.MASTER_ID, "127.0.0.1:10912");
brokerData2.setBrokerAddrs((HashMap<Long, String>) brokerAddrs);
brokerDataList.add(brokerData2);
return brokerDataList;
}
private List<QueueData> generateQueueDataList() {
List<QueueData> queueDataList = new ArrayList<>();
QueueData queueData1 = new QueueData();
queueData1.setBrokerName("BrokerA");
queueDataList.add(queueData1);
QueueData queueData2 = new QueueData();
queueData2.setBrokerName("BrokerB");
queueDataList.add(queueData2);
return queueDataList;
}
private HashMap<String, String> createExtFields(String zoneMode, String zoneName) {
HashMap<String, String> extFields = new HashMap<>();
extFields.put(MixAll.ZONE_MODE, zoneMode);
extFields.put(MixAll.ZONE_NAME, zoneName);
return extFields;
}
}
|
ZoneRouteRPCHookMoreTest
|
java
|
netty__netty
|
common/src/main/java/io/netty/util/NetUtilSubstitutions.java
|
{
"start": 3937,
"end": 4128
}
|
class ____ {
private static final Collection<NetworkInterface> NETWORK_INTERFACES =
NetUtilInitializations.networkInterfaces();
}
}
|
NetUtilNetworkInterfacesLazyHolder
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/index/engine/SegmentsStatsTests.java
|
{
"start": 1257,
"end": 3133
}
|
class ____ extends ESTestCase {
public void testFileExtensionDescriptions() throws Exception {
try (Directory dir = newDirectory()) {
try (
IndexWriter w = new IndexWriter(
dir,
new IndexWriterConfig().setUseCompoundFile(false).setMergePolicy(NoMergePolicy.INSTANCE)
)
) {
// Create a Lucene index that uses all features
Document doc = new Document();
StringField id = new StringField("id", "1", Store.YES);
doc.add(id);
// Points
doc.add(new LongPoint("lp", 42L));
// Doc values
doc.add(new NumericDocValuesField("ndv", 42L));
// Inverted index, term vectors and stored fields
FieldType ft = new FieldType(TextField.TYPE_STORED);
ft.setStoreTermVectors(true);
doc.add(new Field("if", "elasticsearch", ft));
w.addDocument(doc);
id.setStringValue("2");
w.addDocument(doc);
// Create a live docs file
w.deleteDocuments(new Term("id", "2"));
}
for (String file : dir.listAll()) {
final String extension = IndexFileNames.getExtension(file);
if ("lock".equals(extension)) {
// We should ignore lock files for stats file comparisons
continue;
}
if (extension != null) {
assertNotNull(
"extension [" + extension + "] was not contained in the known segment stats files",
LuceneFilesExtensions.fromExtension(extension)
);
}
}
}
}
}
|
SegmentsStatsTests
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/spi/StreamCachingStrategy.java
|
{
"start": 4062,
"end": 4753
}
|
class ____ can be separated by comma.
*/
void setAllowClasses(String names);
/**
* To filter stream caching of a given set of allowed/denied classes. By default, all classes that are
* {@link java.io.InputStream} is allowed.
*/
Collection<Class<?>> getAllowClasses();
/**
* To filter stream caching of a given set of allowed/denied classes. By default, all classes that are
* {@link java.io.InputStream} is allowed.
*/
void setDenyClasses(Class<?>... classes);
/**
* To filter stream caching of a given set of allowed/denied classes. By default, all classes that are
* {@link java.io.InputStream} is allowed. Multiple
|
names
|
java
|
spring-projects__spring-boot
|
module/spring-boot-web-server/src/main/java/org/springframework/boot/web/server/servlet/context/WebListenerHandler.java
|
{
"start": 1863,
"end": 2234
}
|
class ____ implements WebListenerRegistrar {
private final String listenerClassName;
ServletComponentWebListenerRegistrar(String listenerClassName) {
this.listenerClassName = listenerClassName;
}
@Override
public void register(WebListenerRegistry registry) {
registry.addWebListeners(this.listenerClassName);
}
}
}
|
ServletComponentWebListenerRegistrar
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/FallThroughTest.java
|
{
"start": 2788,
"end": 3667
}
|
class ____ {
public int numTweets = 55000000;
public int everyBodyIsDoingIt(int a, int b) {
switch (a) {
case 1:
System.out.println("1");
// BUG: Diagnostic contains:
case 2:
System.out.println("2");
// BUG: Diagnostic contains:
default:
}
return 0;
}
}
}
""")
.doTest();
}
@Test
public void negative() {
testHelper
.addSourceLines(
"FallThroughNegativeCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import java.io.FileInputStream;
import java.io.IOException;
public
|
Tweeter
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/interfaceproxy/ItemImpl.java
|
{
"start": 189,
"end": 616
}
|
class ____ implements Item {
private Long id;
private String name;
/**
* @return Returns the id.
*/
public Long getId() {
return id;
}
/**
* @param id The id to set.
*/
public void setId(Long id) {
this.id = id;
}
/**
* @return Returns the name.
*/
public String getName() {
return name;
}
/**
* @param name The name to set.
*/
public void setName(String name) {
this.name = name;
}
}
|
ItemImpl
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/invoker/transformer/ExceptionTransformerTest.java
|
{
"start": 569,
"end": 1358
}
|
class ____ {
@RegisterExtension
public ArcTestContainer container = ArcTestContainer.builder()
.beanClasses(MyService.class)
.beanRegistrars(new InvokerHelperRegistrar(MyService.class, (bean, factory, invokers) -> {
MethodInfo hello = bean.getImplClazz().firstMethod("hello");
MethodInfo doSomething = bean.getImplClazz().firstMethod("doSomething");
for (MethodInfo method : List.of(hello, doSomething)) {
invokers.put(method.name(), factory.createInvoker(bean, method)
.withExceptionTransformer(ExceptionTransformer.class, "change")
.build());
}
}))
.build();
static
|
ExceptionTransformerTest
|
java
|
ReactiveX__RxJava
|
src/main/java/io/reactivex/rxjava3/internal/operators/single/SingleDoOnDispose.java
|
{
"start": 1032,
"end": 1481
}
|
class ____<T> extends Single<T> {
final SingleSource<T> source;
final Action onDispose;
public SingleDoOnDispose(SingleSource<T> source, Action onDispose) {
this.source = source;
this.onDispose = onDispose;
}
@Override
protected void subscribeActual(final SingleObserver<? super T> observer) {
source.subscribe(new DoOnDisposeObserver<>(observer, onDispose));
}
static final
|
SingleDoOnDispose
|
java
|
elastic__elasticsearch
|
test/external-modules/esql-heap-attack/src/main/java/org/elasticsearch/test/esql/heap_attack/HeapAttackPlugin.java
|
{
"start": 1625,
"end": 3007
}
|
class ____ extends Plugin implements ActionPlugin {
@Override
public List<RestHandler> getRestHandlers(
Settings settings,
NamedWriteableRegistry namedWriteableRegistry,
RestController restController,
ClusterSettings clusterSettings,
IndexScopedSettings indexScopedSettings,
SettingsFilter settingsFilter,
IndexNameExpressionResolver indexNameExpressionResolver,
Supplier<DiscoveryNodes> nodesInCluster,
Predicate<NodeFeature> clusterSupportsFeature
) {
return List.of(new RestTriggerOutOfMemoryAction());
}
// Deliberately unregistered, only used in unit tests. Copied to AbstractSimpleTransportTestCase#IGNORE_DESERIALIZATION_ERRORS_SETTING
// so that tests in other packages can see it too.
static final Setting<Boolean> IGNORE_DESERIALIZATION_ERRORS_SETTING = Setting.boolSetting(
"transport.ignore_deserialization_errors",
true,
Setting.Property.NodeScope
);
@Override
public List<Setting<?>> getSettings() {
return CollectionUtils.appendToCopy(super.getSettings(), IGNORE_DESERIALIZATION_ERRORS_SETTING);
}
@Override
public Settings additionalSettings() {
return Settings.builder().put(super.additionalSettings()).put(IGNORE_DESERIALIZATION_ERRORS_SETTING.getKey(), true).build();
}
}
|
HeapAttackPlugin
|
java
|
spring-projects__spring-boot
|
smoke-test/spring-boot-smoke-test-test/src/main/java/smoketest/test/web/UserVehicleService.java
|
{
"start": 1130,
"end": 1882
}
|
class ____ {
private final UserRepository userRepository;
private final VehicleDetailsService vehicleDetailsService;
public UserVehicleService(UserRepository userRepository, VehicleDetailsService vehicleDetailsService) {
this.userRepository = userRepository;
this.vehicleDetailsService = vehicleDetailsService;
}
public VehicleDetails getVehicleDetails(String username)
throws UserNameNotFoundException, VehicleIdentificationNumberNotFoundException {
Assert.notNull(username, "'username' must not be null");
User user = this.userRepository.findByUsername(username);
if (user == null) {
throw new UserNameNotFoundException(username);
}
return this.vehicleDetailsService.getVehicleDetails(user.getVin());
}
}
|
UserVehicleService
|
java
|
playframework__playframework
|
core/play/src/main/java/play/mvc/RangeResults.java
|
{
"start": 586,
"end": 766
}
|
class ____ {
private static Optional<String> rangeHeader(Http.Request request) {
return request.header(Http.HeaderNames.RANGE);
}
@ApiMayChange
public static
|
RangeResults
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java
|
{
"start": 5666,
"end": 6323
}
|
class ____ extends BaseNodesResponse<NodeSnapshotStatus> {
public NodesSnapshotStatus(ClusterName clusterName, List<NodeSnapshotStatus> nodes, List<FailedNodeException> failures) {
super(clusterName, nodes, failures);
}
@Override
protected List<NodeSnapshotStatus> readNodesFrom(StreamInput in) throws IOException {
return in.readCollectionAsList(NodeSnapshotStatus::new);
}
@Override
protected void writeNodesTo(StreamOutput out, List<NodeSnapshotStatus> nodes) throws IOException {
out.writeCollection(nodes);
}
}
public static
|
NodesSnapshotStatus
|
java
|
elastic__elasticsearch
|
modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/TransportCreateDataStreamAction.java
|
{
"start": 1563,
"end": 3914
}
|
class ____ extends AcknowledgedTransportMasterNodeAction<CreateDataStreamAction.Request> {
private final MetadataCreateDataStreamService metadataCreateDataStreamService;
private final SystemIndices systemIndices;
private final ProjectResolver projectResolver;
@Inject
public TransportCreateDataStreamAction(
TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool,
ActionFilters actionFilters,
ProjectResolver projectResolver,
MetadataCreateDataStreamService metadataCreateDataStreamService,
SystemIndices systemIndices
) {
super(
CreateDataStreamAction.NAME,
transportService,
clusterService,
threadPool,
actionFilters,
CreateDataStreamAction.Request::new,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.projectResolver = projectResolver;
this.metadataCreateDataStreamService = metadataCreateDataStreamService;
this.systemIndices = systemIndices;
}
@Override
protected void masterOperation(
Task task,
CreateDataStreamAction.Request request,
ClusterState state,
ActionListener<AcknowledgedResponse> listener
) throws Exception {
final SystemDataStreamDescriptor systemDataStreamDescriptor = systemIndices.validateDataStreamAccess(
request.getName(),
threadPool.getThreadContext()
);
MetadataCreateDataStreamService.CreateDataStreamClusterStateUpdateRequest updateRequest =
new MetadataCreateDataStreamService.CreateDataStreamClusterStateUpdateRequest(
projectResolver.getProjectId(),
request.getName(),
request.getStartTime(),
systemDataStreamDescriptor,
request.masterNodeTimeout(),
request.ackTimeout(),
true
);
metadataCreateDataStreamService.createDataStream(updateRequest, listener);
}
@Override
protected ClusterBlockException checkBlock(CreateDataStreamAction.Request request, ClusterState state) {
return state.blocks().globalBlockedException(projectResolver.getProjectId(), ClusterBlockLevel.METADATA_WRITE);
}
}
|
TransportCreateDataStreamAction
|
java
|
google__auto
|
factory/src/test/resources/good/FactoryImplementingGenericInterfaceExtension.java
|
{
"start": 987,
"end": 1045
}
|
interface ____<T, S> {
T make(S arg);
}
}
|
GenericFactory
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webflux/src/test/java/org/springframework/cloud/gateway/filter/factory/RemoveJsonAttributesResponseBodyGatewayFilterFactoryTests.java
|
{
"start": 2025,
"end": 4592
}
|
class ____ extends BaseWebClientTests {
@Test
void removeJsonAttributeRootWorks() {
testClient.post()
.uri("/post")
.header("Host", "www.removejsonattributes.org")
.header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE)
.header("foo", "test")
.header("bar", "test")
.exchange()
.expectStatus()
.isOk()
.expectBody(Map.class)
.consumeWith(result -> {
Map<?, ?> response = result.getResponseBody();
assertThat(response).isNotNull();
String responseBody = (String) response.get("data");
assertThat(responseBody).isNull();
Map<String, Object> headers = getMap(response, "headers");
assertThat(headers).containsKey("user-agent");
});
}
@Test
void removeJsonAttributeRecursivelyWorks() {
testClient.post()
.uri("/post")
.header("Host", "www.removejsonattributesrecursively.org")
.header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE)
.header("foo", "test")
.header("bar", "test")
.exchange()
.expectStatus()
.isOk()
.expectBody(Map.class)
.consumeWith(result -> {
Map<?, ?> response = result.getResponseBody();
assertThat(response).isNotNull();
Map<String, Object> headers = getMap(response, "headers");
assertThat(headers).doesNotContainKey("foo");
assertThat(headers).containsEntry("bar", "test");
});
}
@Test
void removeJsonAttributeNoMatchesWorks() {
testClient.post()
.uri("/post")
.header("Host", "www.removejsonattributesnomatches.org")
.header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE)
.exchange()
.expectStatus()
.isOk()
.expectBody(Map.class)
.consumeWith(result -> {
Map<?, ?> response = result.getResponseBody();
assertThat(response).isNotNull();
Map<String, Object> headers = getMap(response, "headers");
assertThat(headers).isNotNull();
assertThat(headers).containsEntry(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE);
});
}
@Test
void raisedWhenRemoveJsonAttributes() {
testClient.post()
.uri("/post")
.header("Host", "www.raisederrorwhenremovejsonattributes.org")
.header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE)
.exchange()
.expectStatus()
.is5xxServerError()
.expectBody(String.class)
.consumeWith(result -> {
assertThat(result.getResponseBody()).isEqualTo("Failed to process JSON of response body.");
});
}
@EnableAutoConfiguration
@SpringBootConfiguration
@Import(DefaultTestConfig.class)
public static
|
RemoveJsonAttributesResponseBodyGatewayFilterFactoryTests
|
java
|
apache__flink
|
flink-test-utils-parent/flink-test-utils-junit/src/main/java/org/apache/flink/testutils/junit/RetryOnFailure.java
|
{
"start": 1283,
"end": 1962
}
|
class ____ {
*
* {@literal @}Rule
* public RetryRule retryRule = new RetryRule();
*
* {@literal @}Test
* public void yourTest() {
* // This will be retried 1 time (total runs 2) before failing the test.
* throw new Exception("Failing test");
* }
*
* {@literal @}Test
* {@literal @}RetryOnFailure(times=2)
* public void yourTest() {
* // This will be retried 2 time (total runs 3) before failing the test.
* throw new Exception("Failing test");
* }
* }
* </pre>
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({java.lang.annotation.ElementType.METHOD, ElementType.TYPE})
@Inherited
public @
|
YourTest
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/checkpointing/KeyedStateCheckpointingITCase.java
|
{
"start": 16118,
"end": 16787
}
|
class ____ {
public long value;
private NonSerializableLong(long value) {
this.value = value;
}
public static NonSerializableLong of(long value) {
return new NonSerializableLong(value);
}
@Override
public boolean equals(Object obj) {
return this == obj
|| obj != null
&& obj.getClass() == getClass()
&& ((NonSerializableLong) obj).value == this.value;
}
@Override
public int hashCode() {
return (int) (value ^ (value >>> 32));
}
}
}
|
NonSerializableLong
|
java
|
quarkusio__quarkus
|
extensions/netty/runtime/src/main/java/io/quarkus/netty/runtime/graal/NettySubstitutions.java
|
{
"start": 23879,
"end": 24074
}
|
class ____ {
@Substitute
private void setOpensslEngineSocketFd(Channel c) {
// do nothing.
}
}
@TargetClass(className = "io.netty.handler.ssl.PemReader")
final
|
Target_SslHandler
|
java
|
quarkusio__quarkus
|
integration-tests/jpa-postgresql-withxml/src/main/java/io/quarkus/it/jpa/postgresql/otherpu/EntityWithXmlOtherPU.java
|
{
"start": 348,
"end": 853
}
|
class ____ {
@Id
@GeneratedValue
Long id;
@JdbcTypeCode(SqlTypes.SQLXML)
ToBeSerializedWithDateTime xml;
public EntityWithXmlOtherPU() {
}
public EntityWithXmlOtherPU(ToBeSerializedWithDateTime data) {
this.xml = data;
}
@Override
public String toString() {
return "EntityWithXmlOtherPU{" +
"id=" + id +
", xml" + xml +
'}';
}
@RegisterForReflection
public static
|
EntityWithXmlOtherPU
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/MapRequestRepresentationWithGuavaTest.java
|
{
"start": 10381,
"end": 11622
}
|
interface ____ {",
" @Provides @IntoMap @IntKey(0) static Integer provideInt0() { return 0; }",
" @Provides @IntoMap @IntKey(1) static Integer provideInt1() { return 1; }",
" @Provides @IntoMap @IntKey(2) static Integer provideInt2() { return 2; }",
" @Provides @IntoMap @IntKey(3) static Integer provideInt3() { return 3; }",
" @Provides @IntoSet static Integer provideIntSet0() { return 0; }",
" @Provides @IntoSet static Integer provideIntSet1() { return 1; }",
" @Provides @IntoSet static Integer provideIntSet2() { return 2; }",
" @Provides @IntoSet static Integer provideIntSet3() { return 3; }",
"}");
Source subcomponentModuleFile =
CompilerTests.javaSource(
"test.SubcomponentModule",
"package test;",
"",
"import dagger.Module;",
"import dagger.Provides;",
"import dagger.multibindings.IntoMap;",
"import dagger.multibindings.IntoSet;",
"import dagger.multibindings.IntKey;",
"import java.util.Map;",
"import java.util.Set;",
"",
"@Module",
"
|
MapModule
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/security/SerializationRolesResource.java
|
{
"start": 311,
"end": 652
}
|
class ____ {
@POST
@RolesAllowed({ "user", "admin" })
public String defaultSecurity(SerializationEntity entity) {
return entity.getName();
}
@Path("/admin")
@RolesAllowed("admin")
@POST
public String admin(SerializationEntity entity) {
return entity.getName();
}
}
|
SerializationRolesResource
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_1269/model/Vehicle.java
|
{
"start": 262,
"end": 707
}
|
class ____ {
private final VehicleTypeInfo vehicleTypeInfo;
private final List<VehicleImage> images;
public Vehicle(VehicleTypeInfo vehicleTypeInfo, List<VehicleImage> images) {
this.vehicleTypeInfo = vehicleTypeInfo;
this.images = images;
}
public VehicleTypeInfo getVehicleTypeInfo() {
return vehicleTypeInfo;
}
public List<VehicleImage> getImages() {
return images;
}
}
|
Vehicle
|
java
|
elastic__elasticsearch
|
build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/dependencies/patches/azurecore/AzureCoreClassPatcher.java
|
{
"start": 1368,
"end": 2519
}
|
class ____ implements TransformAction<TransformParameters.None> {
private static final String JAR_FILE_TO_PATCH = "azure-core-[\\d.]*\\.jar";
private static final List<PatcherInfo> CLASS_PATCHERS = List.of(
classPatcher(
"com/azure/core/implementation/ImplUtils.class",
"7beda5bdff5ea460cfc08721a188cf07d16e0c987dae45401fca7abf4e6e6c0e",
ImplUtilsPatcher::new
)
);
@Classpath
@InputArtifact
public abstract Provider<FileSystemLocation> getInputArtifact();
@Override
public void transform(@NotNull TransformOutputs outputs) {
File inputFile = getInputArtifact().get().getAsFile();
if (Pattern.matches(JAR_FILE_TO_PATCH, inputFile.getName())) {
System.out.println("Patching " + inputFile.getName());
File outputFile = outputs.file(inputFile.getName().replace(".jar", "-patched.jar"));
Utils.patchJar(inputFile, outputFile, CLASS_PATCHERS, true);
} else {
System.out.println("Skipping " + inputFile.getName());
outputs.file(getInputArtifact());
}
}
}
|
AzureCoreClassPatcher
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/translog/Translog.java
|
{
"start": 5133,
"end": 47122
}
|
class ____ extends AbstractIndexShardComponent implements IndexShardComponent, Closeable {
public static final TransportVersion REORDERED_TRANSLOG_OPERATIONS = TransportVersion.fromName("reordered_translog_operations");
/*
* TODO
* - we might need something like a deletion policy to hold on to more than one translog eventually (I think sequence IDs needs this)
* but we can refactor as we go
* - use a simple BufferedOutputStream to write stuff and fold BufferedTranslogWriter into it's super class... the tricky bit is we
* need to be able to do random access reads even from the buffer
* - we need random exception on the FileSystem API tests for all this.
* - we need to page align the last write before we sync, we can take advantage of ensureSynced for this since we might have already
* fsynced far enough
*/
public static final String TRANSLOG_UUID_KEY = "translog_uuid";
public static final String TRANSLOG_FILE_PREFIX = "translog-";
public static final String TRANSLOG_FILE_SUFFIX = ".tlog";
public static final String CHECKPOINT_SUFFIX = ".ckp";
public static final String CHECKPOINT_FILE_NAME = "translog" + CHECKPOINT_SUFFIX;
static final Pattern PARSE_STRICT_ID_PATTERN = Pattern.compile("^" + TRANSLOG_FILE_PREFIX + "(\\d+)(\\.tlog)$");
public static final int DEFAULT_HEADER_SIZE_IN_BYTES = TranslogHeader.headerSizeInBytes(UUIDs.randomBase64UUID());
// the list of translog readers is guaranteed to be in order of translog generation
private final List<TranslogReader> readers = new ArrayList<>();
private final BigArrays bigArrays;
private final DiskIoBufferPool diskIoBufferPool;
protected final Lock readLock;
protected final Lock writeLock;
private final Path location;
private TranslogWriter current;
protected final TragicExceptionHolder tragedy = new TragicExceptionHolder();
private final AtomicBoolean closed = new AtomicBoolean();
private final TranslogConfig config;
private final LongSupplier globalCheckpointSupplier;
private final LongSupplier primaryTermSupplier;
private final String translogUUID;
private final TranslogDeletionPolicy deletionPolicy;
private final LongConsumer persistedSequenceNumberConsumer;
private final OperationListener operationListener;
private final TranslogOperationAsserter operationAsserter;
/**
* Creates a new Translog instance. This method will create a new transaction log unless the given {@link TranslogGeneration} is
* {@code null}. If the generation is {@code null} this method is destructive and will delete all files in the translog path given. If
* the generation is not {@code null}, this method tries to open the given translog generation. The generation is treated as the last
* generation referenced from already committed data. This means all operations that have not yet been committed should be in the
* translog file referenced by this generation. The translog creation will fail if this generation can't be opened.
*
* @param config the configuration of this translog
* @param translogUUID the translog uuid to open, null for a new translog
* @param deletionPolicy an instance of {@link TranslogDeletionPolicy} that controls when a translog file can be safely
* deleted
* @param globalCheckpointSupplier a supplier for the global checkpoint
* @param primaryTermSupplier a supplier for the latest value of primary term of the owning index shard. The latest term value is
* examined and stored in the header whenever a new generation is rolled. It's guaranteed from outside
* that a new generation is rolled when the term is increased. This guarantee allows to us to validate
* and reject operation whose term is higher than the primary term stored in the translog header.
* @param persistedSequenceNumberConsumer a callback that's called whenever an operation with a given sequence number is successfully
* persisted.
*/
@SuppressWarnings("this-escape")
public Translog(
final TranslogConfig config,
final String translogUUID,
TranslogDeletionPolicy deletionPolicy,
final LongSupplier globalCheckpointSupplier,
final LongSupplier primaryTermSupplier,
final LongConsumer persistedSequenceNumberConsumer,
final TranslogOperationAsserter operationAsserter
) throws IOException {
super(config.getShardId(), config.getIndexSettings());
this.config = config;
this.globalCheckpointSupplier = globalCheckpointSupplier;
this.primaryTermSupplier = primaryTermSupplier;
this.persistedSequenceNumberConsumer = persistedSequenceNumberConsumer;
this.operationListener = config.getOperationListener();
this.operationAsserter = operationAsserter;
this.deletionPolicy = deletionPolicy;
this.translogUUID = translogUUID;
this.bigArrays = config.getBigArrays();
this.diskIoBufferPool = config.getDiskIoBufferPool();
var rwl = new ReentrantReadWriteLock();
this.readLock = rwl.readLock();
this.writeLock = rwl.writeLock();
this.location = config.getTranslogPath();
Files.createDirectories(this.location);
try {
final Checkpoint checkpoint = readCheckpoint(location);
final Path nextTranslogFile = location.resolve(getFilename(checkpoint.generation + 1));
final Path currentCheckpointFile = location.resolve(getCommitCheckpointFileName(checkpoint.generation));
// this is special handling for error condition when we create a new writer but we fail to bake
// the newly written file (generation+1) into the checkpoint. This is still a valid state
// we just need to cleanup before we continue
// we hit this before and then blindly deleted the new generation even though we managed to bake it in and then hit this:
// https://discuss.elastic.co/t/cannot-recover-index-because-of-missing-tanslog-files/38336 as an example
//
// For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that
// file exists. If not we don't even try to clean it up and wait until we fail creating it
assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID)
: "unexpected translog file: [" + nextTranslogFile + "]";
if (Files.exists(currentCheckpointFile) // current checkpoint is already copied
&& Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning
logger.warn(
"deleted previously created, but not yet committed, next generation [{}]. This can happen due to a"
+ " tragic exception when creating a new generation",
nextTranslogFile.getFileName()
);
}
this.readers.addAll(recoverFromFiles(checkpoint));
if (readers.isEmpty()) {
throw new IllegalStateException("at least one reader must be recovered");
}
boolean success = false;
current = null;
try {
current = createWriter(
checkpoint.generation + 1,
getMinFileGeneration(),
checkpoint.globalCheckpoint,
persistedSequenceNumberConsumer
);
success = true;
} finally {
// we have to close all the recovered ones otherwise we leak file handles here
// for instance if we have a lot of tlog and we can't create the writer we keep on holding
// on to all the uncommitted tlog files if we don't close
if (success == false) {
IOUtils.closeWhileHandlingException(readers);
}
}
} catch (Exception e) {
// close the opened translog files if we fail to create a new translog...
IOUtils.closeWhileHandlingException(current);
IOUtils.closeWhileHandlingException(readers);
throw e;
}
}
public static void deleteAll(Path translogLocation) throws IOException {
IOUtils.rm(translogLocation);
}
/** recover all translog files found on disk */
private ArrayList<TranslogReader> recoverFromFiles(Checkpoint checkpoint) throws IOException {
boolean success = false;
ArrayList<TranslogReader> foundTranslogs = new ArrayList<>();
writeLock.lock();
try {
logger.debug("open uncommitted translog checkpoint {}", checkpoint);
final long minGenerationToRecoverFrom = checkpoint.minTranslogGeneration;
assert minGenerationToRecoverFrom >= 0 : "minTranslogGeneration should be non-negative";
// we open files in reverse order in order to validate the translog uuid before we start traversing the translog based on
// the generation id we found in the lucene commit. This gives for better error messages if the wrong
// translog was found.
for (long i = checkpoint.generation; i >= minGenerationToRecoverFrom; i--) {
Path committedTranslogFile = location.resolve(getFilename(i));
final Checkpoint readerCheckpoint = i == checkpoint.generation
? checkpoint
: Checkpoint.read(location.resolve(getCommitCheckpointFileName(i)));
final TranslogReader reader;
try {
reader = openReader(committedTranslogFile, readerCheckpoint);
} catch (NoSuchFileException fnfe) {
throw new TranslogCorruptedException(
committedTranslogFile.toString(),
"translog file doesn't exist with generation: "
+ i
+ " recovering from: "
+ minGenerationToRecoverFrom
+ " checkpoint: "
+ checkpoint.generation
+ " - translog ids must be consecutive"
);
}
assert reader.getPrimaryTerm() <= primaryTermSupplier.getAsLong()
: "Primary terms go backwards; current term ["
+ primaryTermSupplier.getAsLong()
+ "] translog path [ "
+ committedTranslogFile
+ ", existing term ["
+ reader.getPrimaryTerm()
+ "]";
foundTranslogs.add(reader);
logger.debug("recovered local translog from checkpoint {}", checkpoint);
}
Collections.reverse(foundTranslogs);
// when we clean up files, we first update the checkpoint with a new minReferencedTranslog and then delete them;
// if we crash just at the wrong moment, it may be that we leave one unreferenced file behind so we delete it if there
IOUtils.deleteFilesIgnoringExceptions(
location.resolve(getFilename(minGenerationToRecoverFrom - 1)),
location.resolve(getCommitCheckpointFileName(minGenerationToRecoverFrom - 1))
);
Path commitCheckpoint = location.resolve(getCommitCheckpointFileName(checkpoint.generation));
if (Files.exists(commitCheckpoint)) {
Checkpoint checkpointFromDisk = Checkpoint.read(commitCheckpoint);
if (checkpoint.equals(checkpointFromDisk) == false) {
throw new TranslogCorruptedException(
commitCheckpoint.toString(),
"checkpoint file "
+ commitCheckpoint.getFileName()
+ " already exists but has corrupted content: expected "
+ checkpoint
+ " but got "
+ checkpointFromDisk
);
}
} else {
copyCheckpointTo(commitCheckpoint);
}
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(foundTranslogs);
}
writeLock.unlock();
}
return foundTranslogs;
}
private void copyCheckpointTo(Path targetPath) throws IOException {
// a temp file to copy checkpoint to - note it must be in on the same FS otherwise atomic move won't work
final Path tempFile = Files.createTempFile(location, TRANSLOG_FILE_PREFIX, CHECKPOINT_SUFFIX);
boolean tempFileRenamed = false;
try {
// we first copy this into the temp-file and then fsync it followed by an atomic move into the target file
// that way if we hit a disk-full here we are still in an consistent state.
Files.copy(location.resolve(CHECKPOINT_FILE_NAME), tempFile, StandardCopyOption.REPLACE_EXISTING);
IOUtils.fsync(tempFile, false);
Files.move(tempFile, targetPath, StandardCopyOption.ATOMIC_MOVE);
tempFileRenamed = true;
// we only fsync the directory the tempFile was already fsynced
IOUtils.fsync(targetPath.getParent(), true);
} finally {
if (tempFileRenamed == false) {
try {
Files.delete(tempFile);
} catch (IOException ex) {
logger.warn(() -> format("failed to delete temp file %s", tempFile), ex);
}
}
}
}
TranslogReader openReader(Path path, Checkpoint checkpoint) throws IOException {
FileChannel channel = FileChannel.open(path, StandardOpenOption.READ);
try {
assert Translog.parseIdFromFileName(path) == checkpoint.generation
: "expected generation: " + Translog.parseIdFromFileName(path) + " but got: " + checkpoint.generation;
TranslogReader reader = TranslogReader.open(channel, path, checkpoint, translogUUID);
channel = null;
return reader;
} finally {
IOUtils.close(channel);
}
}
/**
* Extracts the translog generation from a file name.
*
* @throws IllegalArgumentException if the path doesn't match the expected pattern.
*/
public static long parseIdFromFileName(Path translogFile) {
final String fileName = translogFile.getFileName().toString();
final Matcher matcher = PARSE_STRICT_ID_PATTERN.matcher(fileName);
if (matcher.matches()) {
try {
return Long.parseLong(matcher.group(1));
} catch (NumberFormatException e) {
throw new IllegalStateException(
"number formatting issue in a file that passed PARSE_STRICT_ID_PATTERN: " + fileName + "]",
e
);
}
}
throw new IllegalArgumentException("can't parse id from file: " + fileName);
}
/** Returns {@code true} if this {@code Translog} is still open. */
public boolean isOpen() {
return closed.get() == false;
}
private static boolean calledFromOutsideOrViaTragedyClose() {
List<StackTraceElement> frames = Stream.of(Thread.currentThread().getStackTrace()).skip(3). // skip getStackTrace, current method
// and close method frames
limit(10). // limit depth of analysis to 10 frames, it should be enough to catch closing with, e.g. IOUtils
filter(f -> {
try {
return Translog.class.isAssignableFrom(Class.forName(f.getClassName()));
} catch (Exception ignored) {
return false;
}
}). // find all inner callers including Translog subclasses
toList();
// the list of inner callers should be either empty or should contain closeOnTragicEvent method
return frames.isEmpty() || frames.stream().anyMatch(f -> f.getMethodName().equals("closeOnTragicEvent"));
}
@Override
public void close() throws IOException {
assert calledFromOutsideOrViaTragedyClose()
: "Translog.close method is called from inside Translog, but not via closeOnTragicEvent method";
if (closed.compareAndSet(false, true)) {
writeLock.lock();
try {
try {
current.sync();
} finally {
closeFilesIfNoPendingRetentionLocks();
}
} finally {
writeLock.unlock();
logger.debug("translog closed");
}
}
}
/**
* Returns all translog locations as absolute paths.
* These paths don't contain actual translog files they are
* directories holding the transaction logs.
*/
public Path location() {
return location;
}
/**
* Returns the generation of the current transaction log.
*/
public long currentFileGeneration() {
readLock.lock();
try {
return current.getGeneration();
} finally {
readLock.unlock();
}
}
/**
* Returns the minimum file generation referenced by the translog
*/
public long getMinFileGeneration() {
readLock.lock();
try {
if (readers.isEmpty()) {
return current.getGeneration();
} else {
assert readers.stream().map(TranslogReader::getGeneration).min(Long::compareTo).get().equals(readers.get(0).getGeneration())
: "the first translog isn't the one with the minimum generation:" + readers;
return readers.get(0).getGeneration();
}
} finally {
readLock.unlock();
}
}
/**
* Returns the number of operations in the translog files
*/
public int totalOperations() {
return totalOperationsByMinGen(-1);
}
/**
* Returns the size in bytes of the v files
*/
public long sizeInBytes() {
return sizeInBytesByMinGen(-1);
}
long earliestLastModifiedAge() {
readLock.lock();
try {
ensureOpen();
return findEarliestLastModifiedAge(System.currentTimeMillis(), readers, current);
} catch (IOException e) {
throw new TranslogException(shardId, "Unable to get the earliest last modified time for the transaction log");
} finally {
readLock.unlock();
}
}
/**
* Returns the age of the oldest entry in the translog files in seconds
*/
static long findEarliestLastModifiedAge(long currentTime, Iterable<TranslogReader> readers, TranslogWriter writer) throws IOException {
long earliestTime = currentTime;
for (TranslogReader r : readers) {
earliestTime = Math.min(r.getLastModifiedTime(), earliestTime);
}
return Math.max(0, currentTime - Math.min(earliestTime, writer.getLastModifiedTime()));
}
/**
* Returns the number of operations in the translog files at least the given generation
*/
public int totalOperationsByMinGen(long minGeneration) {
readLock.lock();
try {
ensureOpen();
return Stream.concat(readers.stream(), Stream.of(current))
.filter(r -> r.getGeneration() >= minGeneration)
.mapToInt(BaseTranslogReader::totalOperations)
.sum();
} finally {
readLock.unlock();
}
}
/**
* Returns the number of operations in the transaction files that contain operations with seq# above the given number.
*/
public int estimateTotalOperationsFromMinSeq(long minSeqNo) {
readLock.lock();
try {
ensureOpen();
return Stream.concat(readers.stream(), Stream.of(current))
.filter(reader -> minSeqNo <= reader.getCheckpoint().maxEffectiveSeqNo())
.mapToInt(BaseTranslogReader::totalOperations)
.sum();
} finally {
readLock.unlock();
}
}
/**
* Returns the size in bytes of the translog files at least the given generation
*/
public long sizeInBytesByMinGen(long minGeneration) {
readLock.lock();
try {
ensureOpen();
return Stream.concat(readers.stream(), Stream.of(current))
.filter(r -> r.getGeneration() >= minGeneration)
.mapToLong(BaseTranslogReader::sizeInBytes)
.sum();
} finally {
readLock.unlock();
}
}
/**
* Creates a new translog for the specified generation.
*
* @param fileGeneration the translog generation
* @return a writer for the new translog
* @throws IOException if creating the translog failed
*/
TranslogWriter createWriter(long fileGeneration) throws IOException {
final TranslogWriter writer = createWriter(
fileGeneration,
getMinFileGeneration(),
globalCheckpointSupplier.getAsLong(),
persistedSequenceNumberConsumer
);
assert writer.sizeInBytes() == DEFAULT_HEADER_SIZE_IN_BYTES
: "Mismatch translog header size; "
+ "empty translog size ["
+ writer.sizeInBytes()
+ ", header size ["
+ DEFAULT_HEADER_SIZE_IN_BYTES
+ "]";
return writer;
}
/**
* creates a new writer
*
* @param fileGeneration the generation of the write to be written
* @param initialMinTranslogGen the minimum translog generation to be written in the first checkpoint. This is
* needed to solve and initialization problem while constructing an empty translog.
* With no readers and no current, a call to {@link #getMinFileGeneration()} would not work.
* @param initialGlobalCheckpoint the global checkpoint to be written in the first checkpoint.
*/
TranslogWriter createWriter(
long fileGeneration,
long initialMinTranslogGen,
long initialGlobalCheckpoint,
LongConsumer persistedSequenceNumberConsumer
) throws IOException {
final TranslogWriter newWriter;
try {
newWriter = TranslogWriter.create(
shardId,
translogUUID,
fileGeneration,
location.resolve(getFilename(fileGeneration)),
getChannelFactory(),
config.getBufferSize(),
initialMinTranslogGen,
initialGlobalCheckpoint,
globalCheckpointSupplier,
this::getMinFileGeneration,
primaryTermSupplier.getAsLong(),
tragedy,
persistedSequenceNumberConsumer,
bigArrays,
diskIoBufferPool,
operationListener,
operationAsserter,
config.fsync()
);
} catch (final IOException e) {
throw new TranslogException(shardId, "failed to create new translog file", e);
}
return newWriter;
}
/**
* Adds an operation to the transaction log.
*
* @param operation the operation to add
* @return the location of the operation in the translog
* @throws IOException if adding the operation to the translog resulted in an I/O exception
*/
public Location add(final Operation operation) throws IOException {
try (RecyclerBytesStreamOutput out = new RecyclerBytesStreamOutput(bigArrays.bytesRefRecycler())) {
writeHeaderWithSize(out, operation);
final BytesReference header = out.bytes();
Serialized serialized = Serialized.create(
header,
operation instanceof Index index ? ReleasableBytesReference.unwrap(index.source()) : null,
new CRC32()
);
readLock.lock();
try {
ensureOpen();
if (operation.primaryTerm() > current.getPrimaryTerm()) {
assert false
: "Operation term is newer than the current term; "
+ "current term["
+ current.getPrimaryTerm()
+ "], operation term["
+ operation
+ "]";
throw new IllegalArgumentException(
"Operation term is newer than the current term; "
+ "current term["
+ current.getPrimaryTerm()
+ "], operation term["
+ operation
+ "]"
);
}
return current.add(serialized, operation.seqNo());
} finally {
readLock.unlock();
}
} catch (final AlreadyClosedException | IOException ex) {
closeOnTragicEvent(ex);
throw ex;
} catch (final Exception ex) {
closeOnTragicEvent(ex);
throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", ex);
}
}
public record Serialized(BytesReference header, @Nullable BytesReference source, int length, int checksum) {
public Serialized(BytesReference header, @Nullable BytesReference source, int checksum) {
this(header, source, header.length() + (source == null ? 0 : source.length()) + 4, checksum);
}
public BytesReference toBytesReference() throws IOException {
byte[] checksumBytes = new byte[4];
DataOutput out = EndiannessReverserUtil.wrapDataOutput(new ByteArrayDataOutput(checksumBytes));
out.writeInt(checksum);
BytesArray checksum = new BytesArray(checksumBytes);
return source == null ? CompositeBytesReference.of(header, checksum) : CompositeBytesReference.of(header, source, checksum);
}
public static Serialized create(BytesReference header, @Nullable BytesReference source, Checksum checksum) throws IOException {
int length = header.length() + 4;
updateChecksum(header, checksum, 4);
if (source != null) {
updateChecksum(source, checksum, 0);
length += source.length();
}
return new Serialized(header, source, length, (int) checksum.getValue());
}
private static void updateChecksum(BytesReference bytes, Checksum checksum, final int bytesToSkip) throws IOException {
if (bytes.hasArray()) {
checksum.update(bytes.array(), bytes.arrayOffset() + bytesToSkip, bytes.length() - bytesToSkip);
} else {
int offset = bytesToSkip;
BytesRefIterator iterator = bytes.iterator();
BytesRef slice;
while ((slice = iterator.next()) != null) {
int toSkip = Math.min(offset, slice.length);
checksum.update(slice.bytes, slice.offset + toSkip, slice.length - toSkip);
offset -= toSkip;
}
}
}
public void writeToTranslogBuffer(RecyclerBytesStreamOutput buffer) throws IOException {
header.writeTo(buffer);
if (source != null) {
source.writeTo(buffer);
}
buffer.writeInt(checksum);
}
}
/**
* Tests whether or not the translog generation should be rolled to a new generation. This test
* is based on the size of the current generation compared to the configured generation
* threshold size.
*
* @return {@code true} if the current generation should be rolled to a new generation
*/
public boolean shouldRollGeneration() {
final long threshold = this.indexSettings.getGenerationThresholdSize().getBytes();
readLock.lock();
try {
return this.current.sizeInBytes() > threshold;
} finally {
readLock.unlock();
}
}
/**
* The a {@linkplain Location} that will sort after the {@linkplain Location} returned by the last write but before any locations which
* can be returned by the next write.
*/
public Location getLastWriteLocation() {
readLock.lock();
try {
/*
* We use position = current - 1 and size = Integer.MAX_VALUE here instead of position current and size = 0 for two reasons:
* 1. Translog.Location's compareTo doesn't actually pay attention to size even though it's equals method does.
* 2. It feels more right to return a *position* that is before the next write's position rather than rely on the size.
*/
return new Location(current.generation, current.sizeInBytes() - 1, Integer.MAX_VALUE);
} finally {
readLock.unlock();
}
}
/**
* The last synced checkpoint for this translog.
*
* @return the last synced checkpoint
*/
public long getLastSyncedGlobalCheckpoint() {
return getLastSyncedCheckpoint().globalCheckpoint;
}
final Checkpoint getLastSyncedCheckpoint() {
readLock.lock();
try {
return current.getLastSyncedCheckpoint();
} finally {
readLock.unlock();
}
}
// for testing
public Snapshot newSnapshot() throws IOException {
return newSnapshot(0, Long.MAX_VALUE);
}
/**
* Creates a new translog snapshot containing operations from the given range.
*
* @param fromSeqNo the lower bound of the range (inclusive)
* @param toSeqNo the upper bound of the range (inclusive)
* @return the new snapshot
*/
public Snapshot newSnapshot(long fromSeqNo, long toSeqNo) throws IOException {
assert fromSeqNo <= toSeqNo : fromSeqNo + " > " + toSeqNo;
assert fromSeqNo >= 0 : "from_seq_no must be non-negative " + fromSeqNo;
readLock.lock();
try {
ensureOpen();
TranslogSnapshot[] snapshots = Stream.concat(readers.stream(), Stream.of(current))
.filter(reader -> reader.getCheckpoint().minSeqNo <= toSeqNo && fromSeqNo <= reader.getCheckpoint().maxEffectiveSeqNo())
.map(BaseTranslogReader::newSnapshot)
.toArray(TranslogSnapshot[]::new);
final Snapshot snapshot = newMultiSnapshot(snapshots);
return new SeqNoFilterSnapshot(snapshot, fromSeqNo, toSeqNo);
} finally {
readLock.unlock();
}
}
/**
* Reads and returns the operation from the given location if the generation it references is still available. Otherwise
* this method will return <code>null</code>.
*/
public Operation readOperation(Location location) throws IOException {
try {
readLock.lock();
try {
ensureOpen();
if (location.generation < getMinFileGeneration()) {
return null;
}
if (current.generation == location.generation) {
// no need to fsync here the read operation will ensure that buffers are written to disk
// if they are still in RAM and we are reading onto that position
return current.read(location);
} else {
// read backwards - it's likely we need to read on that is recent
for (int i = readers.size() - 1; i >= 0; i--) {
TranslogReader translogReader = readers.get(i);
if (translogReader.generation == location.generation) {
return translogReader.read(location);
}
}
}
} finally {
readLock.unlock();
}
} catch (final Exception ex) {
closeOnTragicEvent(ex);
throw ex;
}
return null;
}
private Snapshot newMultiSnapshot(TranslogSnapshot[] snapshots) throws IOException {
final Closeable onClose;
if (snapshots.length == 0) {
onClose = () -> {};
} else {
assert Arrays.stream(snapshots).map(BaseTranslogReader::getGeneration).min(Long::compareTo).get() == snapshots[0].generation
: "first reader generation of " + snapshots + " is not the smallest";
onClose = acquireTranslogGenFromDeletionPolicy(snapshots[0].generation);
}
boolean success = false;
try {
Snapshot result = new MultiSnapshot(snapshots, onClose);
success = true;
return result;
} finally {
if (success == false) {
onClose.close();
}
}
}
/**
* Acquires a lock on the translog files, preventing them from being trimmed
*/
public Closeable acquireRetentionLock() {
readLock.lock();
try {
ensureOpen();
final long viewGen = getMinFileGeneration();
return acquireTranslogGenFromDeletionPolicy(viewGen);
} finally {
readLock.unlock();
}
}
private Closeable acquireTranslogGenFromDeletionPolicy(long viewGen) {
Releasable toClose = deletionPolicy.acquireTranslogGen(viewGen);
return () -> {
try {
toClose.close();
} finally {
trimUnreferencedReaders();
closeFilesIfNoPendingRetentionLocks();
}
};
}
/**
* Sync's the translog.
*/
public void sync() throws IOException {
try {
readLock.lock();
try {
if (closed.get() == false) {
current.sync();
}
} finally {
readLock.unlock();
}
} catch (final Exception ex) {
closeOnTragicEvent(ex);
throw ex;
}
}
/**
* Returns <code>true</code> if an fsync is required to ensure durability of the translogs operations or it's metadata.
*/
public boolean syncNeeded() {
readLock.lock();
try {
return current.syncNeeded();
} finally {
readLock.unlock();
}
}
/** package private for testing */
public static String getFilename(long generation) {
return TRANSLOG_FILE_PREFIX + generation + TRANSLOG_FILE_SUFFIX;
}
static String getCommitCheckpointFileName(long generation) {
return TRANSLOG_FILE_PREFIX + generation + CHECKPOINT_SUFFIX;
}
/**
* Trims translog for terms of files below <code>belowTerm</code> and seq# above <code>aboveSeqNo</code>.
* Effectively it moves max visible seq# {@link Checkpoint#trimmedAboveSeqNo} therefore {@link TranslogSnapshot} skips those operations.
*/
public void trimOperations(long belowTerm, long aboveSeqNo) throws IOException {
assert aboveSeqNo >= SequenceNumbers.NO_OPS_PERFORMED : "aboveSeqNo has to a valid sequence number";
writeLock.lock();
try {
ensureOpen();
if (current.getPrimaryTerm() < belowTerm) {
throw new IllegalArgumentException(
"Trimming the translog can only be done for terms lower than the current one. "
+ "Trim requested for term [ "
+ belowTerm
+ " ] , current is [ "
+ current.getPrimaryTerm()
+ " ]"
);
}
// we assume that the current translog generation doesn't have trimmable ops. Verify that.
assert current.assertNoSeqAbove(belowTerm, aboveSeqNo);
// update all existed ones (if it is necessary) as checkpoint and reader are immutable
final List<TranslogReader> newReaders = new ArrayList<>(readers.size());
try {
for (TranslogReader reader : readers) {
final TranslogReader newReader = reader.getPrimaryTerm() < belowTerm
? reader.closeIntoTrimmedReader(aboveSeqNo, getChannelFactory())
: reader;
newReaders.add(newReader);
}
} catch (IOException e) {
IOUtils.closeWhileHandlingException(newReaders);
tragedy.setTragicException(e);
closeOnTragicEvent(e);
throw e;
}
this.readers.clear();
this.readers.addAll(newReaders);
} finally {
writeLock.unlock();
}
}
/**
* Ensures that the given location and global checkpoint has be synced / written to the underlying storage.
*
* @return Returns <code>true</code> iff this call caused an actual sync operation otherwise <code>false</code>
*/
public boolean ensureSynced(Location location, long globalCheckpoint) throws IOException {
try {
readLock.lock();
try {
// if we have a new generation and the persisted global checkpoint is greater than or equal to the sync global checkpoint
// it's
// already synced
long persistedGlobalCheckpoint = current.getLastSyncedCheckpoint().globalCheckpoint;
if (location.generation == current.getGeneration() || persistedGlobalCheckpoint < globalCheckpoint) {
ensureOpen();
return current.syncUpTo(location.translogLocation + location.size, globalCheckpoint);
}
} finally {
readLock.unlock();
}
} catch (final Exception ex) {
closeOnTragicEvent(ex);
throw ex;
}
return false;
}
/**
* Closes the translog if the current translog writer experienced a tragic exception.
*
* Note that in case this thread closes the translog it must not already be holding a read lock on the translog as it will acquire a
* write lock in the course of closing the translog
*
* @param ex if an exception occurs closing the translog, it will be suppressed into the provided exception
*/
protected void closeOnTragicEvent(final Exception ex) {
// we can not hold a read lock here because closing will attempt to obtain a write lock and that would result in self-deadlock
if (tragedy.get() != null) {
try {
close();
} catch (final AlreadyClosedException inner) {
/*
* Don't do anything in this case. The AlreadyClosedException comes from TranslogWriter and we should not add it as
* suppressed because it will contain the provided exception as its cause. See also
* https://github.com/elastic/elasticsearch/issues/15941.
*/
} catch (final Exception inner) {
assert ex != inner.getCause();
ex.addSuppressed(inner);
}
}
}
/**
* return stats
*/
public TranslogStats stats() {
// acquire lock to make the two numbers roughly consistent (no file change half way)
readLock.lock();
try {
final long uncommittedGen = minGenerationForSeqNo(deletionPolicy.getLocalCheckpointOfSafeCommit() + 1, current, readers);
return new TranslogStats(
totalOperations(),
sizeInBytes(),
totalOperationsByMinGen(uncommittedGen),
sizeInBytesByMinGen(uncommittedGen),
earliestLastModifiedAge()
);
} finally {
readLock.unlock();
}
}
public TranslogConfig getConfig() {
return config;
}
// public for testing
public TranslogDeletionPolicy getDeletionPolicy() {
return deletionPolicy;
}
public record Location(long generation, long translogLocation, int size) implements Comparable<Location> {
public static final Location EMPTY = new Location(0, 0, 0);
@Override
public String toString() {
return "[generation: " + generation + ", location: " + translogLocation + ", size: " + size + "]";
}
@Override
public int compareTo(Location o) {
int result = Long.compare(generation, o.generation);
if (result == 0) {
result = Long.compare(translogLocation, o.translogLocation);
}
return result;
}
}
/**
* A snapshot of the transaction log, allows to iterate over all the transaction log operations.
*/
public
|
Translog
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/registry/ModelRegistry.java
|
{
"start": 57555,
"end": 58253
}
|
class ____ extends MetadataTask {
private final Map<String, MinimalServiceSettings> fromIndex;
UpgradeModelsMetadataTask(
ProjectId projectId,
Map<String, MinimalServiceSettings> fromIndex,
ActionListener<AcknowledgedResponse> listener
) {
super(projectId, listener);
this.fromIndex = fromIndex;
}
@Override
ModelRegistryMetadata executeTask(ModelRegistryMetadata current) {
return current.withUpgradedModels(fromIndex);
}
}
public record ModelAndSettings(String inferenceEntityId, MinimalServiceSettings settings) {}
private static
|
UpgradeModelsMetadataTask
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/NitriteComponentBuilderFactory.java
|
{
"start": 1793,
"end": 5282
}
|
interface ____ extends ComponentBuilder<NitriteComponent> {
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default NitriteComponentBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: producer
*
* @param lazyStartProducer the value to set
* @return the dsl builder
*/
default NitriteComponentBuilder lazyStartProducer(boolean lazyStartProducer) {
doSetProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type,
* which then gets configured on the component. This can be used for
* automatic configuring JDBC data sources, JMS connection factories,
* AWS Clients, etc.
*
* The option is a: <code>boolean</code> type.
*
* Default: true
* Group: advanced
*
* @param autowiredEnabled the value to set
* @return the dsl builder
*/
default NitriteComponentBuilder autowiredEnabled(boolean autowiredEnabled) {
doSetProperty("autowiredEnabled", autowiredEnabled);
return this;
}
}
|
NitriteComponentBuilder
|
java
|
apache__camel
|
core/camel-core-model/src/main/java/org/apache/camel/builder/TemplatedRouteBuilder.java
|
{
"start": 1311,
"end": 6892
}
|
class ____ {
private final CamelContext camelContext;
private final String routeTemplateId;
private final RouteTemplateContext routeTemplateContext;
private String routeId;
private String prefixId;
private Consumer<RouteTemplateDefinition> handler;
private Consumer<RouteTemplateContext> configurer;
private TemplatedRouteBuilder(CamelContext camelContext, String routeTemplateId) {
this.camelContext = camelContext;
this.routeTemplateId = routeTemplateId;
this.routeTemplateContext = new DefaultRouteTemplateContext(camelContext);
}
/**
* Creates a new {@link TemplatedRouteBuilder} to specify input parameters, and others, for the route template.
*
* @param camelContext the camel context
* @param routeTemplateId the id of the route template
* @return the builder
*/
public static TemplatedRouteBuilder builder(CamelContext camelContext, String routeTemplateId) {
return new TemplatedRouteBuilder(camelContext, routeTemplateId);
}
/**
* Sets the id of the route. If no route id is configured, then Camel will auto assign a route id, which is returned
* from the build method.
*
* @param routeId the route id
*/
public TemplatedRouteBuilder routeId(String routeId) {
this.routeId = routeId;
return this;
}
/**
* Sets a prefix to use for all node ids (not route id).
*
* @param prefixId the prefix id
*/
public TemplatedRouteBuilder prefixId(String prefixId) {
this.prefixId = prefixId;
return this;
}
/**
* Adds a parameter the route template will use when creating the route.
*
* @param name parameter name
* @param value parameter value
*/
public TemplatedRouteBuilder parameter(String name, Object value) {
routeTemplateContext.setParameter(name, value);
return this;
}
/**
* Adds parameters the route template will use when creating the route.
*
* @param parameters the template parameters to add
*/
public TemplatedRouteBuilder parameters(Map<String, Object> parameters) {
parameters.forEach(routeTemplateContext::setParameter);
return this;
}
/**
* Binds the bean to the template local repository (takes precedence over global beans)
*
* @param id the id of the bean
* @param bean the bean
*/
public TemplatedRouteBuilder bean(String id, Object bean) {
routeTemplateContext.bind(id, bean);
return this;
}
/**
* Binds the bean to the template local repository (takes precedence over global beans)
*
* @param id the id of the bean
* @param type the type of the bean to associate the binding
* @param bean the bean
*/
public TemplatedRouteBuilder bean(String id, Class<?> type, Object bean) {
routeTemplateContext.bind(id, type, bean);
return this;
}
/**
* Binds the bean (via a supplier) to the template local repository (takes precedence over global beans)
*
* @param id the id of the bean
* @param type the type of the bean to associate the binding
* @param bean the bean
*/
public TemplatedRouteBuilder bean(String id, Class<?> type, Supplier<Object> bean) {
routeTemplateContext.bind(id, type, bean);
return this;
}
/**
* Sets a handler which gives access to the route template model that will be used for creating the route. This can
* be used to do validation. Any changes to the model happens before the route is created and added, however these
* changes affect future usage of the same template.
*
* @param handler the handler with callback to invoke with the given route template
*/
public TemplatedRouteBuilder handler(Consumer<RouteTemplateDefinition> handler) {
this.handler = handler;
return this;
}
/**
* Sets a configurer which allows to do configuration while the route template is being used to create a route. This
* gives control over the creating process, such as binding local beans and doing other kind of customization.
*
* @param configurer the configurer with callback to invoke with the given route template context
*/
public TemplatedRouteBuilder configure(Consumer<RouteTemplateContext> configurer) {
this.configurer = configurer;
return this;
}
/**
* Adds the route to the {@link CamelContext} which is built from the configured route template.
*
* @return the route id of the route that was added.
*/
public String add() {
try {
if (handler != null) {
RouteTemplateDefinition def
= ((ModelCamelContext) camelContext).getRouteTemplateDefinition(routeTemplateId);
if (def == null) {
throw new IllegalArgumentException("Cannot find RouteTemplate with id " + routeTemplateId);
}
handler.accept(def);
}
// configurer is executed later controlled by the route template context
if (configurer != null) {
routeTemplateContext.setConfigurer(configurer);
}
return camelContext.addRouteFromTemplate(routeId, routeTemplateId, prefixId, routeTemplateContext);
} catch (Exception e) {
throw RuntimeCamelException.wrapRuntimeException(e);
}
}
}
|
TemplatedRouteBuilder
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/context/properties/bind/ValueObjectBinderTests.java
|
{
"start": 22529,
"end": 22871
}
|
class ____ {
private final String foo;
private final String bar;
ValidatingConstructorBean(String foo, String bar) {
Assert.notNull(foo, "'foo' must not be null");
this.foo = foo;
this.bar = bar;
}
String getFoo() {
return this.foo;
}
String getBar() {
return this.bar;
}
}
static
|
ValidatingConstructorBean
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/nullness/ReturnMissingNullableTest.java
|
{
"start": 28511,
"end": 29059
}
|
class ____ {
Object get() {
final Object nullObject;
nullObject = null;
return nullObject;
}
}
""")
.doTest();
}
@Test
public void limitation_returnThisXInsideIfNull() {
createCompilationTestHelper()
.addSourceLines(
"com/google/errorprone/bugpatterns/nullness/LiteralNullReturnTest.java",
"""
package com.google.errorprone.bugpatterns.nullness;
abstract
|
LiteralNullReturnTest
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/bug/Issue1005.java
|
{
"start": 1158,
"end": 1215
}
|
class ____ {
public List<Byte> values;
}
}
|
Model2
|
java
|
spring-projects__spring-framework
|
spring-web/src/test/java/org/springframework/http/converter/xml/Jaxb2RootElementHttpMessageConverterTests.java
|
{
"start": 11445,
"end": 11714
}
|
class ____ {
private Type type = new Type();
@XmlElement(required=false)
public String external;
public Type getType() {
return this.type;
}
@XmlElement
public void setType(Type type) {
this.type = type;
}
}
@XmlType
public static
|
RootElement
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java
|
{
"start": 6497,
"end": 8023
}
|
class ____ implements Iterator<T> {
/** The starting modification for fail-fast. */
private final int startModification = modification;
/** The next element to return. */
private DoubleLinkedElement<T> next = head;
@Override
public boolean hasNext() {
return next != null;
}
@Override
public T next() {
if (modification != startModification) {
throw new ConcurrentModificationException("modification="
+ modification + " != startModification = " + startModification);
}
if (next == null) {
throw new NoSuchElementException();
}
final T e = next.element;
// find the next element
next = next.after;
return e;
}
@Override
public void remove() {
throw new UnsupportedOperationException("Remove is not supported.");
}
}
/**
* Clear the set. Resize it to the original capacity.
*/
@Override
public void clear() {
super.clear();
this.head = null;
this.tail = null;
this.resetBookmark();
}
/**
* Returns a new iterator starting at the bookmarked element.
*
* @return the iterator to the bookmarked element.
*/
public Iterator<T> getBookmark() {
LinkedSetIterator toRet = new LinkedSetIterator();
toRet.next = this.bookmark.next;
this.bookmark = toRet;
return toRet;
}
/**
* Resets the bookmark to the beginning of the list.
*/
public void resetBookmark() {
this.bookmark.next = this.head;
}
}
|
LinkedSetIterator
|
java
|
apache__camel
|
core/camel-core-model/src/main/java/org/apache/camel/model/OutputNode.java
|
{
"start": 924,
"end": 1019
}
|
interface ____ mark a {@link ProcessorDefinition} that supports outputs
*/
@XmlTransient
public
|
to
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java
|
{
"start": 794,
"end": 2546
}
|
class ____ extends ActionTransportException {
public ConnectTransportException(DiscoveryNode node, String msg) {
this(node, msg, null, null);
}
public ConnectTransportException(DiscoveryNode node, String msg, String action) {
this(node, msg, action, null);
}
public ConnectTransportException(DiscoveryNode node, String msg, Throwable cause) {
this(node, msg, null, cause);
}
public ConnectTransportException(DiscoveryNode node, String msg, String action, Throwable cause) {
super(node == null ? null : node.getName(), node == null ? null : node.getAddress(), action, msg, cause);
}
public ConnectTransportException(StreamInput in) throws IOException {
super(in);
if (in.getTransportVersion().before(TransportVersions.V_8_1_0)) {
in.readOptionalWriteable(DiscoveryNode::new);
}
}
/**
* The ES REST API is a gateway to a single or multiple clusters. If there is an error connecting to other servers, then we should
* return a 502 BAD_GATEWAY status code instead of the parent class' 500 INTERNAL_SERVER_ERROR. Clients tend to retry on a 502 but not
* on a 500, and retrying may help on a connection error.
*
* @return a {@link RestStatus#BAD_GATEWAY} code
*/
@Override
public final RestStatus status() {
return RestStatus.BAD_GATEWAY;
}
@Override
protected void writeTo(StreamOutput out, Writer<Throwable> nestedExceptionsWriter) throws IOException {
super.writeTo(out, nestedExceptionsWriter);
if (out.getTransportVersion().before(TransportVersions.V_8_1_0)) {
out.writeMissingWriteable(DiscoveryNode.class);
}
}
}
|
ConnectTransportException
|
java
|
google__guava
|
android/guava/src/com/google/common/collect/TableCollectors.java
|
{
"start": 1217,
"end": 5037
}
|
class ____ {
static <T extends @Nullable Object, R, C, V>
Collector<T, ?, ImmutableTable<R, C, V>> toImmutableTable(
Function<? super T, ? extends R> rowFunction,
Function<? super T, ? extends C> columnFunction,
Function<? super T, ? extends V> valueFunction) {
checkNotNull(rowFunction, "rowFunction");
checkNotNull(columnFunction, "columnFunction");
checkNotNull(valueFunction, "valueFunction");
return Collector.of(
(Supplier<ImmutableTable.Builder<R, C, V>>) ImmutableTable.Builder::new,
(builder, t) ->
builder.put(rowFunction.apply(t), columnFunction.apply(t), valueFunction.apply(t)),
ImmutableTable.Builder::combine,
ImmutableTable.Builder::buildOrThrow);
}
static <T extends @Nullable Object, R, C, V>
Collector<T, ?, ImmutableTable<R, C, V>> toImmutableTable(
Function<? super T, ? extends R> rowFunction,
Function<? super T, ? extends C> columnFunction,
Function<? super T, ? extends V> valueFunction,
BinaryOperator<V> mergeFunction) {
checkNotNull(rowFunction, "rowFunction");
checkNotNull(columnFunction, "columnFunction");
checkNotNull(valueFunction, "valueFunction");
checkNotNull(mergeFunction, "mergeFunction");
/*
* No mutable Table exactly matches the insertion order behavior of ImmutableTable.Builder, but
* the Builder can't efficiently support merging of duplicate values. Getting around this
* requires some work.
*/
return Collector.of(
ImmutableTableCollectorState<R, C, V>::new,
(state, input) ->
state.put(
rowFunction.apply(input),
columnFunction.apply(input),
valueFunction.apply(input),
mergeFunction),
(s1, s2) -> s1.combine(s2, mergeFunction),
state -> state.toTable());
}
static <
T extends @Nullable Object,
R extends @Nullable Object,
C extends @Nullable Object,
V,
I extends Table<R, C, V>>
Collector<T, ?, I> toTable(
Function<? super T, ? extends R> rowFunction,
Function<? super T, ? extends C> columnFunction,
Function<? super T, ? extends V> valueFunction,
Supplier<I> tableSupplier) {
return TableCollectors.<T, R, C, V, I>toTable(
rowFunction,
columnFunction,
valueFunction,
(v1, v2) -> {
throw new IllegalStateException("Conflicting values " + v1 + " and " + v2);
},
tableSupplier);
}
static <
T extends @Nullable Object,
R extends @Nullable Object,
C extends @Nullable Object,
V,
I extends Table<R, C, V>>
Collector<T, ?, I> toTable(
Function<? super T, ? extends R> rowFunction,
Function<? super T, ? extends C> columnFunction,
Function<? super T, ? extends V> valueFunction,
BinaryOperator<V> mergeFunction,
Supplier<I> tableSupplier) {
checkNotNull(rowFunction);
checkNotNull(columnFunction);
checkNotNull(valueFunction);
checkNotNull(mergeFunction);
checkNotNull(tableSupplier);
return Collector.of(
tableSupplier,
(table, input) ->
mergeTables(
table,
rowFunction.apply(input),
columnFunction.apply(input),
valueFunction.apply(input),
mergeFunction),
(table1, table2) -> {
for (Table.Cell<R, C, V> cell2 : table2.cellSet()) {
mergeTables(
table1, cell2.getRowKey(), cell2.getColumnKey(), cell2.getValue(), mergeFunction);
}
return table1;
});
}
private static final
|
TableCollectors
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationToken.java
|
{
"start": 604,
"end": 720
}
|
interface ____ {
String principal();
Object credentials();
void clearCredentials();
}
|
AuthenticationToken
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/pool/PasswordCallbackTest.java
|
{
"start": 910,
"end": 1776
}
|
class ____ extends TestCase {
protected void setUp() throws Exception {
DruidDataSourceStatManager.clear();
}
protected void tearDown() throws Exception {
assertEquals(0, DruidDataSourceStatManager.getInstance().getDataSourceList().size());
}
public void test_0() throws Exception {
DruidDataSource dataSource = new DruidDataSource();
dataSource.setUrl("jdbc:mock:");
TestPasswordCallback passwordCallback = new TestPasswordCallback();
dataSource.setPasswordCallback(passwordCallback);
Connection conn = dataSource.getConnection();
conn.close();
assertEquals(dataSource.getUrl(), passwordCallback.getUrl());
assertEquals(dataSource.getConnectProperties(), passwordCallback.getProperties());
dataSource.close();
}
public static
|
PasswordCallbackTest
|
java
|
redisson__redisson
|
redisson-spring-data/redisson-spring-data-22/src/main/java/org/redisson/spring/data/connection/RedissonStreamCommands.java
|
{
"start": 7670,
"end": 8768
}
|
class ____ implements MultiDecoder<List<ByteRecord>> {
@Override
public List<ByteRecord> decode(List<Object> parts, State state) {
List<List<Object>> list = (List<List<Object>>) (Object) parts;
List<ByteRecord> result = new ArrayList<>(parts.size()/2);
for (List<Object> entries : list) {
List<List<Object>> streamEntries = (List<List<Object>>) entries.get(1);
if (streamEntries.isEmpty()) {
continue;
}
String name = (String) entries.get(0);
for (List<Object> se : streamEntries) {
ByteRecord record = StreamRecords.newRecord()
.in(name.getBytes())
.withId(RecordId.of(se.get(0).toString()))
.ofBytes((Map<byte[], byte[]>) se.get(1));
result.add(record);
}
}
return result;
}
}
private static
|
ByteRecordReplayDecoder2
|
java
|
spring-projects__spring-framework
|
spring-webflux/src/main/java/org/springframework/web/reactive/function/server/DefaultServerResponseBuilder.java
|
{
"start": 11279,
"end": 11986
}
|
class ____ extends AbstractServerResponse {
private final BiFunction<ServerWebExchange, Context, Mono<Void>> writeFunction;
public WriterFunctionResponse(HttpStatusCode statusCode, HttpHeaders headers,
MultiValueMap<String, ResponseCookie> cookies,
BiFunction<ServerWebExchange, Context, Mono<Void>> writeFunction) {
super(statusCode, headers, cookies, Collections.emptyMap());
Assert.notNull(writeFunction, "BiFunction must not be null");
this.writeFunction = writeFunction;
}
@Override
protected Mono<Void> writeToInternal(ServerWebExchange exchange, Context context) {
return this.writeFunction.apply(exchange, context);
}
}
private static final
|
WriterFunctionResponse
|
java
|
quarkusio__quarkus
|
extensions/grpc/deployment/src/test/java/io/quarkus/grpc/server/blocking/inheritance/ClassLevelBlockingTest.java
|
{
"start": 2236,
"end": 3091
}
|
class ____ implements BlockingTestService {
@Override
public Uni<Msg> overridden1(Msg request) {
return isBlocking();
}
@Override
public Uni<Msg> overridden2(Msg request) {
return isBlocking();
}
@Override
public Uni<Msg> notOverridden1(Msg request) {
return isBlocking();
}
@Override
@NonBlocking
public Uni<Msg> notOverridden2(Msg request) {
return isBlocking();
}
Uni<Msg> isBlocking() {
boolean isEventLoop = Thread.currentThread().getName().contains("eventloop");
return Uni.createFrom().item(isEventLoop ? NON_BLOCKING : BLOCKING)
.map(text -> Msg.newBuilder().setText(text).build());
}
}
@NonBlocking
public static
|
ServiceA
|
java
|
apache__camel
|
core/camel-management/src/main/java/org/apache/camel/management/DefaultManagementObjectNameStrategy.java
|
{
"start": 3136,
"end": 16824
}
|
class ____ implements ManagementObjectNameStrategy, CamelContextAware {
public static final String VALUE_UNKNOWN = "unknown";
public static final String KEY_NAME = "name";
public static final String KEY_TYPE = "type";
public static final String KEY_CONTEXT = "context";
public static final String TYPE_CONTEXT = "context";
public static final String TYPE_HEALTH = "health";
public static final String TYPE_ENDPOINT = "endpoints";
public static final String TYPE_DATAFORMAT = "dataformats";
public static final String TYPE_PROCESSOR = "processors";
public static final String TYPE_CONSUMER = "consumers";
public static final String TYPE_PRODUCER = "producers";
public static final String TYPE_ROUTE = "routes";
public static final String TYPE_ROUTE_GROUP = "routegroups";
public static final String TYPE_COMPONENT = "components";
public static final String TYPE_STEP = "steps";
public static final String TYPE_TRACER = "tracer";
public static final String TYPE_EVENT_NOTIFIER = "eventnotifiers";
public static final String TYPE_THREAD_POOL = "threadpools";
public static final String TYPE_SERVICE = "services";
public static final String TYPE_HA = "clusterservices";
protected String domainName;
protected String hostName = "localhost";
protected CamelContext camelContext;
public DefaultManagementObjectNameStrategy() {
this(null);
// default constructor needed for <bean> style configuration
}
public DefaultManagementObjectNameStrategy(String domainName) {
this.domainName = domainName != null ? domainName : "org.apache.camel";
try {
hostName = InetAddressUtil.getLocalHostName();
} catch (UnknownHostException ex) {
// ignore, use the default "localhost"
}
}
@Override
public CamelContext getCamelContext() {
return camelContext;
}
@Override
public void setCamelContext(CamelContext camelContext) {
this.camelContext = camelContext;
}
@Override
public ObjectName getObjectName(Object managedObject) throws MalformedObjectNameException {
if (managedObject == null) {
return null;
}
ObjectName objectName = null;
if (managedObject instanceof ManagedCamelContext mcc) {
objectName = getObjectNameForCamelContext(mcc.getContext());
} else if (managedObject instanceof ManagedCamelHealth mch) {
objectName = getObjectNameForCamelHealth(mch.getContext());
} else if (managedObject instanceof ManagedRouteController mrc) {
objectName = getObjectNameForRouteController(mrc.getContext(), mrc.getRouteController());
} else if (managedObject instanceof ManagedSupervisingRouteController mrc) {
objectName = getObjectNameForRouteController(mrc.getContext(), mrc.getRouteController());
} else if (managedObject instanceof ManagedComponent mc) {
objectName = getObjectNameForComponent(mc.getComponent(), mc.getComponentName());
} else if (managedObject instanceof ManagedDataFormat md) {
objectName = getObjectNameForDataFormat(md.getContext(), md.getDataFormat());
} else if (managedObject instanceof ManagedEndpoint me) {
objectName = getObjectNameForEndpoint(me.getEndpoint());
} else if (managedObject instanceof Endpoint endpoint) {
objectName = getObjectNameForEndpoint(endpoint);
} else if (managedObject instanceof ManagedRoute mr) {
objectName = getObjectNameForRoute(mr.getRoute());
} else if (managedObject instanceof ManagedRouteGroup mrg) {
objectName = getObjectNameForRouteGroup(mrg.getContext(), mrg.getRouteGroup());
} else if (managedObject instanceof ManagedStep mp) {
objectName = getObjectNameForStep(mp.getContext(), mp.getProcessor(), mp.getDefinition());
} else if (managedObject instanceof ManagedProcessor mp) {
objectName = getObjectNameForProcessor(mp.getContext(), mp.getProcessor(), mp.getDefinition());
} else if (managedObject instanceof ManagedConsumer ms) {
objectName = getObjectNameForConsumer(ms.getContext(), ms.getConsumer());
} else if (managedObject instanceof ManagedProducer ms) {
objectName = getObjectNameForProducer(ms.getContext(), ms.getProducer());
} else if (managedObject instanceof ManagedBacklogTracer mt) {
objectName = getObjectNameForTracer(mt.getContext(), mt.getBacklogTracer());
} else if (managedObject instanceof ManagedBacklogDebugger md) {
objectName = getObjectNameForTracer(md.getContext(), md.getBacklogDebugger());
} else if (managedObject instanceof ManagedDumpRouteStrategy md) {
objectName = getObjectNameForService(md.getContext(), md.getDumpRoutesStrategy());
} else if (managedObject instanceof ManagedEventNotifier men) {
objectName = getObjectNameForEventNotifier(men.getContext(), men.getEventNotifier());
} else if (managedObject instanceof ManagedTracer mt) {
objectName = getObjectNameForTracer(mt.getContext(), mt.getTracer());
} else if (managedObject instanceof ManagedThreadPool mes) {
objectName = getObjectNameForThreadPool(mes.getContext(), mes.getThreadPool(), mes.getId(), mes.getSourceId());
} else if (managedObject instanceof ManagedClusterService mcs) {
objectName = getObjectNameForClusterService(mcs.getContext(), mcs.getService());
} else if (managedObject instanceof ManagedService ms) {
// check for managed service should be last
// skip endpoints as they are already managed
if (ms.getService() instanceof Endpoint) {
return null;
}
objectName = getObjectNameForService(ms.getContext(), ms.getService());
}
return objectName;
}
@Override
public ObjectName getObjectNameForCamelContext(String managementName, String name) throws MalformedObjectNameException {
StringBuilder buffer = new StringBuilder();
buffer.append(domainName).append(":");
buffer.append(KEY_CONTEXT).append("=").append(getContextId(managementName)).append(",");
buffer.append(KEY_TYPE).append("=").append(TYPE_CONTEXT).append(",");
buffer.append(KEY_NAME).append("=").append(ObjectName.quote(name));
return createObjectName(buffer);
}
@Override
public ObjectName getObjectNameForCamelContext(CamelContext context) throws MalformedObjectNameException {
// prefer to use the given management name if previously assigned
String managementName = context.getManagementName();
if (managementName == null) {
managementName = context.getManagementNameStrategy().getName();
}
String name = context.getName();
return getObjectNameForCamelContext(managementName, name);
}
@Override
public ObjectName getObjectNameForCamelHealth(CamelContext context) throws MalformedObjectNameException {
// prefer to use the given management name if previously assigned
String managementName = context.getManagementName();
if (managementName == null) {
managementName = context.getManagementNameStrategy().getName();
}
StringBuilder buffer = new StringBuilder();
buffer.append(domainName).append(":");
buffer.append(KEY_CONTEXT).append("=").append(getContextId(managementName)).append(",");
buffer.append(KEY_TYPE).append("=").append(TYPE_HEALTH).append(",");
buffer.append(KEY_NAME).append("=").append("DefaultHealthCheck");
return createObjectName(buffer);
}
@Override
public ObjectName getObjectNameForRouteController(CamelContext context, RouteController routeController)
throws MalformedObjectNameException {
// prefer to use the given management name if previously assigned
String managementName = context.getManagementName();
if (managementName == null) {
managementName = context.getManagementNameStrategy().getName();
}
StringBuilder buffer = new StringBuilder();
buffer.append(domainName).append(":");
buffer.append(KEY_CONTEXT).append("=").append(getContextId(managementName)).append(",");
buffer.append(KEY_TYPE).append("=").append(TYPE_SERVICE).append(",");
buffer.append(KEY_NAME).append("=").append(routeController.getClass().getSimpleName());
return createObjectName(buffer);
}
@Override
public ObjectName getObjectNameForEndpoint(Endpoint endpoint) throws MalformedObjectNameException {
StringBuilder buffer = new StringBuilder();
buffer.append(domainName).append(":");
buffer.append(KEY_CONTEXT).append("=").append(getContextId(endpoint.getCamelContext())).append(",");
buffer.append(KEY_TYPE).append("=").append(TYPE_ENDPOINT).append(",");
buffer.append(KEY_NAME).append("=").append(ObjectName.quote(getEndpointId(endpoint)));
return createObjectName(buffer);
}
@Override
public ObjectName getObjectNameForDataFormat(CamelContext context, DataFormat dataFormat)
throws MalformedObjectNameException {
StringBuilder buffer = new StringBuilder();
buffer.append(domainName).append(":");
buffer.append(KEY_CONTEXT).append("=").append(getContextId(context)).append(",");
buffer.append(KEY_TYPE).append("=").append(TYPE_DATAFORMAT).append(",");
buffer.append(KEY_NAME).append("=").append(dataFormat.getClass().getSimpleName());
if (!(dataFormat instanceof StaticService)) {
buffer.append("(").append(ObjectHelper.getIdentityHashCode(dataFormat)).append(")");
}
return createObjectName(buffer);
}
@Override
public ObjectName getObjectNameForComponent(Component component, String name) throws MalformedObjectNameException {
StringBuilder buffer = new StringBuilder();
buffer.append(domainName).append(":");
buffer.append(KEY_CONTEXT).append("=").append(getContextId(component.getCamelContext())).append(",");
buffer.append(KEY_TYPE).append("=").append(TYPE_COMPONENT).append(",");
buffer.append(KEY_NAME).append("=").append(ObjectName.quote(name));
return createObjectName(buffer);
}
@Override
public ObjectName getObjectNameForProcessor(CamelContext context, Processor processor, NamedNode definition)
throws MalformedObjectNameException {
StringBuilder buffer = new StringBuilder();
buffer.append(domainName).append(":");
buffer.append(KEY_CONTEXT).append("=").append(getContextId(context)).append(",");
buffer.append(KEY_TYPE).append("=").append(TYPE_PROCESSOR).append(",");
String id = definition.getId();
String prefix = definition.getNodePrefixId();
if (prefix != null) {
id = prefix + id;
}
buffer.append(KEY_NAME).append("=").append(ObjectName.quote(id));
return createObjectName(buffer);
}
@Override
public ObjectName getObjectNameForStep(CamelContext context, Processor processor, NamedNode definition)
throws MalformedObjectNameException {
StringBuilder buffer = new StringBuilder();
buffer.append(domainName).append(":");
buffer.append(KEY_CONTEXT).append("=").append(getContextId(context)).append(",");
buffer.append(KEY_TYPE).append("=").append(TYPE_STEP).append(",");
String id = definition.getId();
String prefix = definition.getNodePrefixId();
if (prefix != null) {
id = prefix + id;
}
buffer.append(KEY_NAME).append("=").append(ObjectName.quote(id));
return createObjectName(buffer);
}
@Override
public ObjectName getObjectNameForConsumer(CamelContext context, Consumer consumer) throws MalformedObjectNameException {
StringBuilder buffer = new StringBuilder();
buffer.append(domainName).append(":");
buffer.append(KEY_CONTEXT).append("=").append(getContextId(context)).append(",");
buffer.append(KEY_TYPE).append("=").append(TYPE_CONSUMER).append(",");
String name = consumer.getClass().getSimpleName();
if (ObjectHelper.isEmpty(name)) {
name = "Consumer";
}
buffer.append(KEY_NAME).append("=")
.append(name)
.append("(").append(ObjectHelper.getIdentityHashCode(consumer)).append(")");
return createObjectName(buffer);
}
@Override
public ObjectName getObjectNameForProducer(CamelContext context, Producer producer) throws MalformedObjectNameException {
StringBuilder buffer = new StringBuilder();
buffer.append(domainName).append(":");
buffer.append(KEY_CONTEXT).append("=").append(getContextId(context)).append(",");
buffer.append(KEY_TYPE).append("=").append(TYPE_PRODUCER).append(",");
String name = producer.getClass().getSimpleName();
if (ObjectHelper.isEmpty(name)) {
name = "Producer";
}
buffer.append(KEY_NAME + "=")
.append(name)
.append("(").append(ObjectHelper.getIdentityHashCode(producer)).append(")");
return createObjectName(buffer);
}
@Override
public ObjectName getObjectNameForTracer(CamelContext context, Service tracer) throws MalformedObjectNameException {
// use the simple name of the
|
DefaultManagementObjectNameStrategy
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java
|
{
"start": 7810,
"end": 21471
}
|
class ____ extends FieldMapper.DimensionBuilder {
private final Parameter<Boolean> indexed;
private final Parameter<Boolean> hasDocValues = Parameter.docValuesParam(m -> toType(m).hasDocValues, true);
private final Parameter<Boolean> stored = Parameter.storeParam(m -> toType(m).fieldType.stored(), false);
private final Parameter<String> nullValue = Parameter.stringParam("null_value", false, m -> toType(m).fieldType().nullValue, null)
.acceptsNull();
private final Parameter<Boolean> eagerGlobalOrdinals = Parameter.boolParam(
"eager_global_ordinals",
true,
m -> toType(m).fieldType().eagerGlobalOrdinals(),
false
);
private final Parameter<Integer> ignoreAbove;
private final Parameter<String> indexOptions = TextParams.keywordIndexOptions(m -> toType(m).indexOptions);
private final Parameter<Boolean> hasNorms = Parameter.normsParam(m -> toType(m).fieldType.omitNorms() == false, false);
private final Parameter<SimilarityProvider> similarity = TextParams.similarity(
m -> toType(m).fieldType().getTextSearchInfo().similarity()
);
private final Parameter<String> normalizer;
private final Parameter<Boolean> normalizerSkipStoreOriginalValue;
private final Parameter<Boolean> splitQueriesOnWhitespace = Parameter.boolParam(
"split_queries_on_whitespace",
true,
m -> toType(m).splitQueriesOnWhitespace,
false
);
private final Parameter<Map<String, String>> meta = Parameter.metaParam();
private final Parameter<Script> script = Parameter.scriptParam(m -> toType(m).script);
private final Parameter<OnScriptError> onScriptError = Parameter.onScriptErrorParam(
m -> toType(m).builderParams.onScriptError(),
script
);
private final Parameter<Boolean> dimension;
private final IndexAnalyzers indexAnalyzers;
private final ScriptCompiler scriptCompiler;
private final IndexVersion indexCreatedVersion;
private final boolean forceDocValuesSkipper;
private final boolean isWithinMultiField;
private final IndexSettings indexSettings;
public Builder(final String name, final MappingParserContext mappingParserContext) {
this(
name,
mappingParserContext.getIndexAnalyzers(),
mappingParserContext.scriptCompiler(),
mappingParserContext.getIndexSettings(),
false,
mappingParserContext.isWithinMultiField()
);
}
public Builder(
String name,
IndexAnalyzers indexAnalyzers,
ScriptCompiler scriptCompiler,
IndexSettings indexSettings,
boolean forceDocValuesSkipper,
boolean isWithinMultiField
) {
super(name);
this.indexAnalyzers = indexAnalyzers;
this.scriptCompiler = Objects.requireNonNull(scriptCompiler);
this.indexCreatedVersion = indexSettings.getIndexVersionCreated();
this.normalizer = Parameter.stringParam(
"normalizer",
indexCreatedVersion.isLegacyIndexVersion(),
m -> toType(m).normalizerName,
null
).acceptsNull();
this.normalizerSkipStoreOriginalValue = Parameter.boolParam(
"normalizer_skip_store_original_value",
false,
m -> ((KeywordFieldMapper) m).isNormalizerSkipStoreOriginalValue(),
() -> "lowercase".equals(normalizer.getValue())
&& indexAnalyzers.getNormalizer(normalizer.getValue()).analyzer() instanceof LowercaseNormalizer
);
this.script.precludesParameters(nullValue);
this.dimension = TimeSeriesParams.dimensionParam(m -> toType(m).fieldType().isDimension(), hasDocValues::get)
.precludesParameters(normalizer);
this.indexed = Parameter.indexParam(m -> toType(m).indexed, indexSettings, dimension);
addScriptValidation(script, indexed, hasDocValues);
this.ignoreAbove = Parameter.ignoreAboveParam(
m -> toType(m).fieldType().ignoreAbove().get(),
IGNORE_ABOVE_SETTING.get(indexSettings.getSettings())
);
this.forceDocValuesSkipper = forceDocValuesSkipper;
this.isWithinMultiField = isWithinMultiField;
this.indexSettings = indexSettings;
}
public Builder(String name, IndexSettings indexSettings) {
this(name, null, ScriptCompiler.NONE, indexSettings, false, false);
}
public Builder(String name, IndexSettings indexSettings, boolean isWithinMultiField) {
this(name, null, ScriptCompiler.NONE, indexSettings, false, isWithinMultiField);
}
public static Builder buildWithDocValuesSkipper(String name, IndexSettings indexSettings, boolean isWithinMultiField) {
return new Builder(name, null, ScriptCompiler.NONE, indexSettings, true, isWithinMultiField);
}
public Builder ignoreAbove(int ignoreAbove) {
this.ignoreAbove.setValue(ignoreAbove);
return this;
}
Builder normalizer(String normalizerName) {
this.normalizer.setValue(normalizerName);
return this;
}
public boolean hasNormalizer() {
return this.normalizer.get() != null;
}
public boolean isNormalizerSkipStoreOriginalValue() {
return this.normalizerSkipStoreOriginalValue.getValue();
}
Builder nullValue(String nullValue) {
this.nullValue.setValue(nullValue);
return this;
}
public Builder docValues(boolean hasDocValues) {
this.hasDocValues.setValue(hasDocValues);
return this;
}
public boolean hasDocValues() {
return this.hasDocValues.get();
}
public SimilarityProvider similarity() {
return this.similarity.get();
}
public Builder dimension(boolean dimension) {
this.dimension.setValue(dimension);
return this;
}
public Builder indexed(boolean indexed) {
this.indexed.setValue(indexed);
return this;
}
public Builder stored(boolean stored) {
this.stored.setValue(stored);
return this;
}
public boolean isStored() {
return this.stored.get();
}
private FieldValues<String> scriptValues() {
if (script.get() == null) {
return null;
}
StringFieldScript.Factory scriptFactory = scriptCompiler.compile(script.get(), StringFieldScript.CONTEXT);
return scriptFactory == null
? null
: (lookup, ctx, doc, consumer) -> scriptFactory.newFactory(leafName(), script.get().getParams(), lookup, OnScriptError.FAIL)
.newInstance(ctx)
.runForDoc(doc, consumer);
}
@Override
protected Parameter<?>[] getParameters() {
return new Parameter<?>[] {
indexed,
hasDocValues,
stored,
nullValue,
eagerGlobalOrdinals,
ignoreAbove,
indexOptions,
hasNorms,
similarity,
normalizer,
normalizerSkipStoreOriginalValue,
splitQueriesOnWhitespace,
script,
onScriptError,
meta,
dimension };
}
private KeywordFieldType buildFieldType(MapperBuilderContext context, FieldType fieldType) {
NamedAnalyzer normalizer = Lucene.KEYWORD_ANALYZER;
NamedAnalyzer searchAnalyzer = Lucene.KEYWORD_ANALYZER;
NamedAnalyzer quoteAnalyzer = Lucene.KEYWORD_ANALYZER;
String normalizerName = this.normalizer.getValue();
if (normalizerName != null) {
assert indexAnalyzers != null;
normalizer = indexAnalyzers.getNormalizer(normalizerName);
if (normalizer == null) {
if (indexCreatedVersion.isLegacyIndexVersion()) {
logger.warn(
() -> format("Could not find normalizer [%s] of legacy index, falling back to default", normalizerName)
);
normalizer = Lucene.KEYWORD_ANALYZER;
} else {
throw new MapperParsingException("normalizer [" + normalizerName + "] not found for field [" + leafName() + "]");
}
}
searchAnalyzer = quoteAnalyzer = normalizer;
if (splitQueriesOnWhitespace.getValue()) {
searchAnalyzer = indexAnalyzers.getWhitespaceNormalizer(normalizerName);
}
} else if (splitQueriesOnWhitespace.getValue()) {
searchAnalyzer = Lucene.WHITESPACE_ANALYZER;
}
if (inheritDimensionParameterFromParentObject(context)) {
dimension(true);
}
return new KeywordFieldType(
context.buildFullName(leafName()),
IndexType.terms(fieldType),
new TextSearchInfo(fieldType, similarity.get(), searchAnalyzer, quoteAnalyzer),
normalizer,
this,
context.isSourceSynthetic()
);
}
@Override
public KeywordFieldMapper build(MapperBuilderContext context) {
FieldType fieldtype = resolveFieldType(forceDocValuesSkipper, context.buildFullName(leafName()));
super.hasScript = script.get() != null;
super.onScriptError = onScriptError.getValue();
String offsetsFieldName = getOffsetsFieldName(
context,
indexSettings.sourceKeepMode(),
hasDocValues.getValue(),
stored.getValue(),
this,
indexCreatedVersion,
IndexVersions.SYNTHETIC_SOURCE_STORE_ARRAYS_NATIVELY_KEYWORD
);
return new KeywordFieldMapper(
leafName(),
fieldtype,
buildFieldType(context, fieldtype),
builderParams(this, context),
this,
offsetsFieldName
);
}
private FieldType resolveFieldType(final boolean forceDocValuesSkipper, final String fullFieldName) {
FieldType fieldtype = new FieldType(Defaults.FIELD_TYPE);
if (forceDocValuesSkipper
|| shouldUseHostnameSkipper(fullFieldName)
|| shouldUseTimeSeriesSkipper()
|| shouldUseStandardSkipper()) {
fieldtype = new FieldType(Defaults.FIELD_TYPE_WITH_SKIP_DOC_VALUES);
}
fieldtype.setOmitNorms(this.hasNorms.getValue() == false);
fieldtype.setStored(this.stored.getValue());
fieldtype.setDocValuesType(this.hasDocValues.getValue() ? DocValuesType.SORTED_SET : DocValuesType.NONE);
if (fieldtype.equals(Defaults.FIELD_TYPE_WITH_SKIP_DOC_VALUES) == false) {
// NOTE: override index options only if we are not using a sparse doc values index (and we use an inverted index)
fieldtype.setIndexOptions(TextParams.toIndexOptions(this.indexed.getValue(), this.indexOptions.getValue()));
}
if (fieldtype.equals(Defaults.FIELD_TYPE)) {
// deduplicate in the common default case to save some memory
fieldtype = Defaults.FIELD_TYPE;
}
if (fieldtype.equals(Defaults.FIELD_TYPE_WITH_SKIP_DOC_VALUES)) {
fieldtype = Defaults.FIELD_TYPE_WITH_SKIP_DOC_VALUES;
}
return fieldtype;
}
private boolean shouldUseTimeSeriesSkipper() {
return hasDocValues.get() && indexed.get() == false && useTimeSeriesDocValuesSkippers(indexSettings, dimension.get());
}
private boolean shouldUseHostnameSkipper(final String fullFieldName) {
return hasDocValues.get()
&& IndexSettings.USE_DOC_VALUES_SKIPPER.get(indexSettings.getSettings())
&& indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.SKIPPERS_ENABLED_BY_DEFAULT)
&& IndexMode.LOGSDB.equals(indexSettings.getMode())
&& HOST_NAME.equals(fullFieldName)
&& indexSortConfigByHostName(indexSettings.getIndexSortConfig());
}
private boolean shouldUseStandardSkipper() {
return hasDocValues.get()
&& indexed.get() == false
&& indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.STANDARD_INDEXES_USE_SKIPPERS)
&& indexSettings.useDocValuesSkipper();
}
private static boolean indexSortConfigByHostName(final IndexSortConfig indexSortConfig) {
return indexSortConfig != null && indexSortConfig.hasIndexSort() && indexSortConfig.hasSortOnField(HOST_NAME);
}
}
public static final TypeParser PARSER = createTypeParserWithLegacySupport(Builder::new);
public static final
|
Builder
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/onexception/OnExceptionHandledAndContinueTest.java
|
{
"start": 1098,
"end": 2063
}
|
class ____ extends ContextTestSupport {
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Test
public void testHandledAndContinued() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
// should not be allowed
onException(IllegalArgumentException.class).continued(true).handled(true);
from("direct:start").to("mock:start").throwException(new IllegalArgumentException("Forced")).to("mock:result");
}
});
try {
context.start();
fail("Should thrown an exception");
} catch (Exception e) {
assertIsInstanceOf(IllegalArgumentException.class, e.getCause());
assertTrue(e.getCause().getMessage().startsWith("Only one of handled or continued is allowed to be configured"));
}
}
}
|
OnExceptionHandledAndContinueTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/dirtiness/SessionIsDirtyTests.java
|
{
"start": 4958,
"end": 5360
}
|
class ____ {
@Id
Long id;
String name;
@ManyToOne(fetch = FetchType.LAZY)
@JoinColumn(name = "entity_b")
EntityB entityB;
public EntityC() {
}
public EntityC(Long id, String name) {
this.id = id;
this.name = name;
}
public Long getId() {
return id;
}
public String getName() {
return name;
}
public EntityB getEntityB() {
return entityB;
}
}
}
|
EntityC
|
java
|
quarkusio__quarkus
|
extensions/arc/runtime/src/main/java/io/quarkus/arc/runtime/InterceptorBindings.java
|
{
"start": 738,
"end": 1136
}
|
class ____ an explanation of the reasons it might be used {@link Annotation}.
*/
@SuppressWarnings("unchecked")
public static Set<AbstractAnnotationLiteral> getInterceptorBindingLiterals(InvocationContext invocationContext) {
return (Set<AbstractAnnotationLiteral>) invocationContext.getContextData()
.get(ArcInvocationContext.KEY_INTERCEPTOR_BINDINGS);
}
}
|
for
|
java
|
apache__camel
|
components/camel-jms/src/test/java/org/apache/camel/component/jms/integration/spring/JmsRouteUsingSpringIT.java
|
{
"start": 1352,
"end": 2004
}
|
class ____ extends JmsRouteTest {
private ClassPathXmlApplicationContext applicationContext;
@ContextProvider
protected CamelContext createCamelContext() throws Exception {
applicationContext = createApplicationContext();
return SpringCamelContext.springCamelContext(applicationContext, true);
}
protected ClassPathXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext("org/apache/camel/component/jms/integration/spring/jmsRouteUsingSpring.xml");
}
@AfterEach
public void tearDown() {
IOHelper.close(applicationContext);
}
}
|
JmsRouteUsingSpringIT
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/server/processor/src/main/java/org/jboss/resteasy/reactive/server/processor/scanning/MethodScanner.java
|
{
"start": 554,
"end": 1726
}
|
interface ____ {
/**
* Method that allows for customising an endpoints handler chain
*
* @param method The method
* @param actualEndpointClass
* @param methodContext Any context discovered by {@link #handleCustomParameter(Type, Map, boolean, Map)}
* @return
*/
default List<HandlerChainCustomizer> scan(MethodInfo method, ClassInfo actualEndpointClass,
Map<String, Object> methodContext) {
return Collections.emptyList();
}
/**
* Method that is called when a parameter of an unknown type is discovered.
*
* @param paramType The parameter type
* @param annotations The annotations
* @param field If this is field injection
* @param methodContext Context that can be used to pass information into {@link #scan(MethodInfo, ClassInfo, Map)}
* @return
*/
default ParameterExtractor handleCustomParameter(Type paramType, Map<DotName, AnnotationInstance> annotations,
boolean field, Map<String, Object> methodContext) {
return null;
}
default boolean isMethodSignatureAsync(MethodInfo info) {
return false;
}
}
|
MethodScanner
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/ResultPartitionBuilder.java
|
{
"start": 10382,
"end": 11013
}
|
class ____ implements ShuffleDescriptor {
private final ResultPartitionID resultPartitionId;
private final ResourceID location;
TestingShuffleDescriptor(ResultPartitionID resultPartitionId, ResourceID location) {
this.resultPartitionId = resultPartitionId;
this.location = location;
}
@Override
public ResultPartitionID getResultPartitionID() {
return resultPartitionId;
}
@Override
public Optional<ResourceID> storesLocalResourcesOn() {
return Optional.of(location);
}
}
}
|
TestingShuffleDescriptor
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/TestBean.java
|
{
"start": 355,
"end": 1426
}
|
class ____ {
@ConfigProperty(name = "test-url")
URL url;
@RestClient
HelloClient2 client2;
@RestClient
HelloNonSimpleClient clientNonSimple;
String helloViaInjectedClient(String name) {
return client2.echo(name);
}
String helloViaInjectedClientIgnoreParams(String name) {
return client2.echoIgnoreParams(name, "whatever", "whatever2");
}
String helloViaBuiltClient(String name) {
HelloClient helloClient = RestClientBuilder.newBuilder()
.baseUrl(url)
.build(HelloClient.class);
return helloClient.echo(name);
}
String bug18977() {
return client2.bug18977();
}
byte[] helloNonSimpleSyncBytes() {
return clientNonSimple.echoSyncBytes(new byte[] { 1, 2, 3 });
}
Integer[] helloNonSimpleSyncInts() {
return clientNonSimple.echoSyncInts(new Integer[] { 1, 2, 3 });
}
Map<String, String> helloQueryParamsToMap() {
return clientNonSimple.echoQueryAsMap("1", "2", "3", "4", "5", "6");
}
}
|
TestBean
|
java
|
apache__flink
|
flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/RestoreStreamTaskTest.java
|
{
"start": 12542,
"end": 13834
}
|
class ____ extends RestoreWatchOperator<String, String> {
private static final long serialVersionUID = 2048954179291813243L;
private ListState<Long> counterState;
private long counter = 0;
@Override
public void processElement(StreamRecord<String> element) throws Exception {
counter++;
output.collect(element);
}
@Override
public void initializeState(StateInitializationContext context) throws Exception {
super.initializeState(context);
counterState =
context.getOperatorStateStore()
.getListState(
new ListStateDescriptor<>(
"counter-state", LongSerializer.INSTANCE));
if (context.isRestored()) {
for (Long value : counterState.get()) {
counter += value;
}
counterState.clear();
}
}
@Override
public void snapshotState(StateSnapshotContext context) throws Exception {
counterState.add(counter);
}
}
/** Operator that does nothing except counting state restorations. */
private static
|
CounterOperator
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jetty/src/main/java/org/springframework/boot/jetty/autoconfigure/JettyThreadPool.java
|
{
"start": 1254,
"end": 2175
}
|
class ____ {
private JettyThreadPool() {
}
static QueuedThreadPool create(JettyServerProperties.Threads properties) {
BlockingQueue<Runnable> queue = determineBlockingQueue(properties.getMaxQueueCapacity());
int maxThreadCount = (properties.getMax() > 0) ? properties.getMax() : 200;
int minThreadCount = (properties.getMin() > 0) ? properties.getMin() : 8;
int threadIdleTimeout = (properties.getIdleTimeout() != null) ? (int) properties.getIdleTimeout().toMillis()
: 60000;
return new QueuedThreadPool(maxThreadCount, minThreadCount, threadIdleTimeout, queue);
}
@Contract("!null -> !null")
private static @Nullable BlockingQueue<Runnable> determineBlockingQueue(@Nullable Integer maxQueueCapacity) {
if (maxQueueCapacity == null) {
return null;
}
if (maxQueueCapacity == 0) {
return new SynchronousQueue<>();
}
return new BlockingArrayQueue<>(maxQueueCapacity);
}
}
|
JettyThreadPool
|
java
|
apache__dubbo
|
dubbo-plugin/dubbo-rest-openapi/src/main/java/org/apache/dubbo/rpc/protocol/tri/rest/openapi/model/PathItem.java
|
{
"start": 1163,
"end": 5129
}
|
class ____ extends Node<PathItem> {
private String ref;
private String summary;
private String description;
private Map<HttpMethods, Operation> operations;
private List<Server> servers;
private List<Parameter> parameters;
public String getRef() {
return ref;
}
public PathItem setRef(String ref) {
this.ref = ref;
return this;
}
public String getSummary() {
return summary;
}
public PathItem setSummary(String summary) {
this.summary = summary;
return this;
}
public String getDescription() {
return description;
}
public PathItem setDescription(String description) {
this.description = description;
return this;
}
public Map<HttpMethods, Operation> getOperations() {
return operations;
}
public Operation getOperation(HttpMethods method) {
return operations == null ? null : operations.get(method);
}
public PathItem setOperations(Map<HttpMethods, Operation> operations) {
this.operations = operations;
return this;
}
public PathItem addOperation(HttpMethods method, Operation operation) {
if (operations == null) {
operations = new LinkedHashMap<>();
}
operations.put(method, operation);
return this;
}
public PathItem removeOperation(HttpMethods method) {
if (operations != null) {
operations.remove(method);
}
return this;
}
public List<Server> getServers() {
return servers;
}
public PathItem setServers(List<Server> servers) {
this.servers = servers;
return this;
}
public PathItem addServer(Server server) {
if (servers == null) {
servers = new ArrayList<>();
}
servers.add(server);
return this;
}
public PathItem removeServer(Server server) {
if (servers != null) {
servers.remove(server);
}
return this;
}
public List<Parameter> getParameters() {
return parameters;
}
public PathItem setParameters(List<Parameter> parameters) {
this.parameters = parameters;
return this;
}
public PathItem addParameter(Parameter parameter) {
List<Parameter> thisParameters = parameters;
if (thisParameters == null) {
parameters = thisParameters = new ArrayList<>();
} else {
for (int i = 0, size = thisParameters.size(); i < size; i++) {
Parameter tParameter = thisParameters.get(i);
if (tParameter.getName().equals(parameter.getName())) {
return this;
}
}
}
thisParameters.add(parameter);
return this;
}
public PathItem removeParameter(Parameter parameter) {
if (parameters != null) {
parameters.remove(parameter);
}
return this;
}
@Override
public PathItem clone() {
PathItem clone = super.clone();
clone.operations = clone(operations);
clone.servers = clone(servers);
clone.parameters = clone(parameters);
return clone;
}
@Override
public Map<String, Object> writeTo(Map<String, Object> node, Context context) {
if (ref != null) {
write(node, "$ref", Helper.pathToRef(ref));
} else if (operations != null) {
write(node, "summary", summary);
write(node, "description", description);
for (Map.Entry<HttpMethods, Operation> entry : operations.entrySet()) {
write(node, entry.getKey().name().toLowerCase(), entry.getValue(), context);
}
write(node, "servers", servers, context);
write(node, "parameters", parameters, context);
}
writeExtensions(node);
return node;
}
}
|
PathItem
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/model/internal/GeneratorBinder.java
|
{
"start": 19081,
"end": 19862
}
|
class ____ implements {@code Generator}
*/
private static <T extends Generator> Generator instantiateGeneratorAsBean(
Annotation annotation,
BeanContainer beanContainer,
GeneratorCreationContext creationContext,
Class<T> generatorClass,
MemberDetails memberDetails,
Class<? extends Annotation> annotationType) {
return getBean(
beanContainer,
generatorClass,
false,
true,
() -> instantiateGenerator(
annotation,
memberDetails,
annotationType,
creationContext,
generatorClass
)
);
}
/**
* Instantiate a {@link Generator}, using the given {@link BeanContainer},
* for the case where no generator annotation is available.
*
* @param beanContainer an optional {@code BeanContainer}
* @param generatorClass a
|
which
|
java
|
junit-team__junit5
|
junit-jupiter-api/src/main/java/org/junit/jupiter/api/MethodOrderer.java
|
{
"start": 5536,
"end": 6040
}
|
class ____ not be instantiated");
}
@Override
public void orderMethods(MethodOrdererContext context) {
// never called
}
}
/**
* {@code MethodOrderer} that sorts methods alphanumerically based on their
* names using {@link String#compareTo(String)}.
*
* <p>If two methods have the same name, {@code String} representations of
* their formal parameter lists will be used as a fallback for comparing the
* methods.
*
* @since 5.7
*/
@API(status = STABLE, since = "5.10")
|
must
|
java
|
apache__camel
|
components/camel-disruptor/src/main/java/org/apache/camel/component/disruptor/DisruptorReference.java
|
{
"start": 18335,
"end": 18454
}
|
interface ____ blocks all calls to the #onEvent
* method until the #unblock method is called.
*/
private
|
that
|
java
|
spring-projects__spring-boot
|
module/spring-boot-security/src/test/java/org/springframework/boot/security/autoconfigure/actuate/web/servlet/ManagementWebSecurityAutoConfigurationTests.java
|
{
"start": 10236,
"end": 10715
}
|
class ____ extends TestSecurityFilterChainConfig {
@Bean
@Order(SecurityFilterProperties.BASIC_AUTH_ORDER - 1)
SecurityFilterChain testRemoteDevToolsSecurityFilterChain(HttpSecurity http) {
http.securityMatcher(PathPatternRequestMatcher.withDefaults().matcher("/**"));
http.authorizeHttpRequests((requests) -> requests.anyRequest().anonymous());
http.csrf((csrf) -> csrf.disable());
return http.build();
}
}
static
|
TestRemoteDevToolsSecurityFilterChainConfig
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/value/enum2enum/EnumToEnumMappingTest.java
|
{
"start": 9671,
"end": 10293
}
|
enum ____ org.mapstruct.ap.test.value." +
"ExternalOrderType.")
}
)
public void shouldRaiseErrorIfUnknownEnumConstantsAreSpecifiedInMapping() {
}
@ProcessorTest
@WithClasses(ErroneousOrderMapperNotMappingConstantWithoutMatchInTargetType.class)
@ExpectedCompilationOutcome(
value = CompilationResult.FAILED,
diagnostics = {
@Diagnostic(type = ErroneousOrderMapperNotMappingConstantWithoutMatchInTargetType.class,
kind = Kind.ERROR,
line = 23,
message = "The following constants from the source
|
type
|
java
|
spring-projects__spring-boot
|
core/spring-boot-test/src/main/java/org/springframework/boot/test/json/GsonTester.java
|
{
"start": 2057,
"end": 3485
}
|
class ____ to load resources
* @param type the type under test
* @param gson the Gson instance
* @see #initFields(Object, Gson)
*/
public GsonTester(Class<?> resourceLoadClass, ResolvableType type, Gson gson) {
super(resourceLoadClass, type);
Assert.notNull(gson, "'gson' must not be null");
this.gson = gson;
}
@Override
protected String writeObject(T value, ResolvableType type) throws IOException {
return this.gson.toJson(value, type.getType());
}
@Override
protected T readObject(Reader reader, ResolvableType type) throws IOException {
return this.gson.fromJson(reader, type.getType());
}
/**
* Utility method to initialize {@link GsonTester} fields. See {@link GsonTester
* class-level documentation} for example usage.
* @param testInstance the test instance
* @param gson the Gson instance
*/
public static void initFields(Object testInstance, Gson gson) {
new GsonFieldInitializer().initFields(testInstance, gson);
}
/**
* Utility method to initialize {@link GsonTester} fields. See {@link GsonTester
* class-level documentation} for example usage.
* @param testInstance the test instance
* @param gson an object factory to create the Gson instance
*/
public static void initFields(Object testInstance, ObjectFactory<Gson> gson) {
new GsonFieldInitializer().initFields(testInstance, gson);
}
/**
* {@link FieldInitializer} for Gson.
*/
private static
|
used
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/annotation/RepeatableContainersTests.java
|
{
"start": 8993,
"end": 9098
}
|
class ____ {
}
@StandardRepeatable("a")
@StandardRepeatable("b")
static
|
SingleStandardRepeatableTestCase
|
java
|
google__auto
|
common/src/test/java/com/google/auto/common/AnnotationValuesTest.java
|
{
"start": 3605,
"end": 14742
}
|
class ____ {}
@Rule public final CompilationRule compilation = new CompilationRule();
private Elements elements;
private Types types;
private AnnotationMirror annotationMirror;
@Before
public void setUp() {
elements = compilation.getElements();
types = compilation.getTypes();
TypeElement annotatedClass = getTypeElement(AnnotatedClass.class);
annotationMirror =
MoreElements.getAnnotationMirror(annotatedClass, MultiValueAnnotation.class).get();
}
@Test
public void getTypeMirror() {
TypeElement insideClassA = getTypeElement(InsideClassA.class);
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "classValue");
assertThat(AnnotationValues.getTypeMirror(value).asElement()).isEqualTo(insideClassA);
}
@Test
public void getTypeMirrorGenericClass() {
TypeElement genericClass = getTypeElement(GenericClass.class);
AnnotationValue gvalue =
AnnotationMirrors.getAnnotationValue(annotationMirror, "genericClassValue");
assertThat(AnnotationValues.getTypeMirror(gvalue).asElement()).isEqualTo(genericClass);
}
@Test
public void getTypeMirrors() {
TypeMirror insideClassA = getTypeElement(InsideClassA.class).asType();
TypeMirror insideClassB = getTypeElement(InsideClassB.class).asType();
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "classValues");
ImmutableList<DeclaredType> valueElements = AnnotationValues.getTypeMirrors(value);
assertThat(valueElements)
.comparingElementsUsing(Correspondence.from(types::isSameType, "has Same Type"))
.containsExactly(insideClassA, insideClassB)
.inOrder();
}
@Test
public void getAnnotationMirror() {
TypeElement insideAnnotation = getTypeElement(InsideAnnotation.class);
AnnotationValue value =
AnnotationMirrors.getAnnotationValue(annotationMirror, "insideAnnotationValue");
AnnotationMirror annotationMirror = AnnotationValues.getAnnotationMirror(value);
assertThat(annotationMirror.getAnnotationType().asElement()).isEqualTo(insideAnnotation);
assertThat(AnnotationMirrors.getAnnotationValue(annotationMirror, "value").getValue())
.isEqualTo(19);
}
@Test
public void getAnnotationMirrors() {
TypeElement insideAnnotation = getTypeElement(InsideAnnotation.class);
AnnotationValue value =
AnnotationMirrors.getAnnotationValue(annotationMirror, "insideAnnotationValues");
ImmutableList<AnnotationMirror> annotationMirrors =
AnnotationValues.getAnnotationMirrors(value);
ImmutableList<Element> valueElements =
annotationMirrors.stream()
.map(AnnotationMirror::getAnnotationType)
.map(DeclaredType::asElement)
.collect(toImmutableList());
assertThat(valueElements).containsExactly(insideAnnotation, insideAnnotation);
ImmutableList<Object> valuesStoredInAnnotation =
annotationMirrors.stream()
.map(
annotationMirror ->
AnnotationMirrors.getAnnotationValue(annotationMirror, "value").getValue())
.collect(toImmutableList());
assertThat(valuesStoredInAnnotation).containsExactly(20, 21).inOrder();
}
@Test
public void getString() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "stringValue");
assertThat(AnnotationValues.getString(value)).isEqualTo("hello");
}
@Test
public void getStrings() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "stringValues");
assertThat(AnnotationValues.getStrings(value)).containsExactly("it's", "me").inOrder();
}
@Test
public void getEnum() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "enumValue");
assertThat(AnnotationValues.getEnum(value)).isEqualTo(value.getValue());
}
@Test
public void getEnums() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "enumValues");
assertThat(getEnumNames(AnnotationValues.getEnums(value)))
.containsExactly(Foo.BAZ.name(), Foo.BAH.name())
.inOrder();
}
@Test
public void getAnnotationValues() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "intValues");
ImmutableList<AnnotationValue> values = AnnotationValues.getAnnotationValues(value);
assertThat(values)
.comparingElementsUsing(Correspondence.transforming(AnnotationValue::getValue, "has value"))
.containsExactly(1, 2)
.inOrder();
}
@Test
public void getInt() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "intValue");
assertThat(AnnotationValues.getInt(value)).isEqualTo(5);
}
@Test
public void getInts() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "intValues");
assertThat(AnnotationValues.getInts(value)).containsExactly(1, 2).inOrder();
}
@Test
public void getLong() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "longValue");
assertThat(AnnotationValues.getLong(value)).isEqualTo(6L);
}
@Test
public void getLongs() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "longValues");
assertThat(AnnotationValues.getLongs(value)).containsExactly(3L, 4L).inOrder();
}
@Test
public void getByte() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "byteValue");
assertThat(AnnotationValues.getByte(value)).isEqualTo((byte) 7);
}
@Test
public void getBytes() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "byteValues");
assertThat(AnnotationValues.getBytes(value)).containsExactly((byte) 8, (byte) 9).inOrder();
}
@Test
public void getShort() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "shortValue");
assertThat(AnnotationValues.getShort(value)).isEqualTo((short) 10);
}
@Test
public void getShorts() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "shortValues");
assertThat(AnnotationValues.getShorts(value)).containsExactly((short) 11, (short) 12).inOrder();
}
@Test
public void getFloat() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "floatValue");
assertThat(AnnotationValues.getFloat(value)).isEqualTo(13F);
}
@Test
public void getFloats() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "floatValues");
assertThat(AnnotationValues.getFloats(value)).containsExactly(14F, 15F).inOrder();
}
@Test
public void getDouble() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "doubleValue");
assertThat(AnnotationValues.getDouble(value)).isEqualTo(16D);
}
@Test
public void getDoubles() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "doubleValues");
assertThat(AnnotationValues.getDoubles(value)).containsExactly(17D, 18D).inOrder();
}
@Test
public void getBoolean() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "booleanValue");
assertThat(AnnotationValues.getBoolean(value)).isTrue();
}
@Test
public void getBooleans() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "booleanValues");
assertThat(AnnotationValues.getBooleans(value)).containsExactly(true, false).inOrder();
}
@Test
public void getChar() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "charValue");
assertThat(AnnotationValues.getChar(value)).isEqualTo('a');
}
@Test
public void getChars() {
AnnotationValue value = AnnotationMirrors.getAnnotationValue(annotationMirror, "charValues");
assertThat(AnnotationValues.getChars(value)).containsExactly('b', 'c').inOrder();
}
@Test
public void toSourceString() {
ImmutableMap<String, String> inputs =
ImmutableMap.<String, String>builder()
.put("classValue", "com.google.auto.common.AnnotationValuesTest.InsideClassA.class")
.put(
"classValues",
"{com.google.auto.common.AnnotationValuesTest.InsideClassA.class,"
+ " com.google.auto.common.AnnotationValuesTest.InsideClassB.class}")
.put(
"genericClassValue",
"com.google.auto.common.AnnotationValuesTest.GenericClass.class")
.put(
"insideAnnotationValue",
"@com.google.auto.common.AnnotationValuesTest.InsideAnnotation(19)")
.put(
"insideAnnotationValues",
"{@com.google.auto.common.AnnotationValuesTest.InsideAnnotation(20),"
+ " @com.google.auto.common.AnnotationValuesTest.InsideAnnotation(21)}")
.put("stringValue", "\"hello\"")
.put("stringValues", "{\"it\\'s\", \"me\"}")
.put("enumValue", "com.google.auto.common.AnnotationValuesTest.Foo.BAR")
.put(
"enumValues",
"{com.google.auto.common.AnnotationValuesTest.Foo.BAZ,"
+ " com.google.auto.common.AnnotationValuesTest.Foo.BAH}")
.put("intValue", "5")
.put("intValues", "{1, 2}")
.put("longValue", "6L")
.put("longValues", "{3L, 4L}")
.put("byteValue", "(byte) 7")
.put("byteValues", "{(byte) 8, (byte) 9}")
.put("shortValue", "(short) 10")
.put("shortValues", "{(short) 11, (short) 12}")
.put("floatValue", "13.0F")
.put("floatValues", "{14.0F, 15.0F}")
.put("doubleValue", "16.0")
.put("doubleValues", "{17.0, 18.0}")
.put("booleanValue", "true")
.put("booleanValues", "{true, false}")
.put("charValue", "'a'")
.put("charValues", "{'b', 'c'}")
.build();
inputs.forEach(
(name, expected) ->
assertThat(
AnnotationValues.toString(
AnnotationMirrors.getAnnotationValue(annotationMirror, name)))
.isEqualTo(expected));
assertThat(AnnotationMirrors.toString(annotationMirror))
.isEqualTo(
inputs.entrySet().stream()
.map(e -> e.getKey() + " = " + e.getValue())
.collect(
joining(
", ",
"@com.google.auto.common.AnnotationValuesTest.MultiValueAnnotation(",
")")));
}
private TypeElement getTypeElement(Class<?> clazz) {
return elements.getTypeElement(clazz.getCanonicalName());
}
private static ImmutableList<String> getEnumNames(ImmutableList<VariableElement> values) {
return values.stream()
.map(VariableElement::getSimpleName)
.map(Name::toString)
.collect(toImmutableList());
}
}
|
AnnotatedClass
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java
|
{
"start": 1332,
"end": 6583
}
|
class ____ extends AbstractQueryBuilder<SpanOrQueryBuilder> implements SpanQueryBuilder {
public static final String NAME = "span_or";
private static final ParseField CLAUSES_FIELD = new ParseField("clauses");
private final List<SpanQueryBuilder> clauses = new ArrayList<>();
public SpanOrQueryBuilder(SpanQueryBuilder initialClause) {
if (initialClause == null) {
throw new IllegalArgumentException("[" + NAME + "] must include at least one clause");
}
clauses.add(initialClause);
}
/**
* Read from a stream.
*/
public SpanOrQueryBuilder(StreamInput in) throws IOException {
super(in);
for (QueryBuilder clause : readQueries(in)) {
clauses.add((SpanQueryBuilder) clause);
}
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
writeQueries(out, clauses);
}
/**
* Add a span clause to the current list of clauses
*/
public SpanOrQueryBuilder addClause(SpanQueryBuilder clause) {
if (clause == null) {
throw new IllegalArgumentException("[" + NAME + "] inner clause cannot be null");
}
clauses.add(clause);
return this;
}
/**
* @return the {@link SpanQueryBuilder} clauses that were set for this query
*/
public List<SpanQueryBuilder> clauses() {
return Collections.unmodifiableList(this.clauses);
}
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
builder.startArray(CLAUSES_FIELD.getPreferredName());
for (SpanQueryBuilder clause : clauses) {
clause.toXContent(builder, params);
}
builder.endArray();
boostAndQueryNameToXContent(builder);
builder.endObject();
}
public static SpanOrQueryBuilder fromXContent(XContentParser parser) throws IOException {
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
String queryName = null;
List<SpanQueryBuilder> clauses = new ArrayList<>();
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if (CLAUSES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
QueryBuilder query = parseInnerQueryBuilder(parser);
if (query instanceof SpanQueryBuilder == false) {
throw new ParsingException(parser.getTokenLocation(), "span_or [clauses] must be of type span query");
}
final SpanQueryBuilder clause = (SpanQueryBuilder) query;
checkNoBoost(NAME, currentFieldName, parser, clause);
clauses.add(clause);
}
} else {
throw new ParsingException(parser.getTokenLocation(), "[span_or] query does not support [" + currentFieldName + "]");
}
} else {
if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
boost = parser.floatValue();
} else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
queryName = parser.text();
} else {
throw new ParsingException(parser.getTokenLocation(), "[span_or] query does not support [" + currentFieldName + "]");
}
}
}
if (clauses.isEmpty()) {
throw new ParsingException(parser.getTokenLocation(), "span_or must include [clauses]");
}
SpanOrQueryBuilder queryBuilder = new SpanOrQueryBuilder(clauses.get(0));
for (int i = 1; i < clauses.size(); i++) {
queryBuilder.addClause(clauses.get(i));
}
queryBuilder.boost(boost);
queryBuilder.queryName(queryName);
return queryBuilder;
}
@Override
protected Query doToQuery(SearchExecutionContext context) throws IOException {
SpanQuery[] spanQueries = new SpanQuery[clauses.size()];
for (int i = 0; i < clauses.size(); i++) {
Query query = clauses.get(i).toQuery(context);
assert query instanceof SpanQuery;
spanQueries[i] = (SpanQuery) query;
}
return new SpanOrQuery(spanQueries);
}
@Override
protected int doHashCode() {
return Objects.hash(clauses);
}
@Override
protected boolean doEquals(SpanOrQueryBuilder other) {
return Objects.equals(clauses, other.clauses);
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.zero();
}
}
|
SpanOrQueryBuilder
|
java
|
spring-projects__spring-boot
|
module/spring-boot-web-server/src/main/java/org/springframework/boot/web/server/servlet/context/ServletComponentScan.java
|
{
"start": 2727,
"end": 2868
}
|
class ____ will be
* scanned.
* @return classes from the base packages to scan
*/
Class<?>[] basePackageClasses() default {};
}
|
specified
|
java
|
micronaut-projects__micronaut-core
|
inject-java/src/test/groovy/io/micronaut/inject/lifecycle/beancreationeventlistener/JCreatedListener.java
|
{
"start": 867,
"end": 1232
}
|
class ____ implements BeanCreatedEventListener<J> {
boolean executed;
Argument<J> beanType;
@Override
public J onCreated(BeanCreatedEvent<J> event) {
assert !executed : "JCreatedListener is being triggered more than once";
executed = true;
beanType = event.getBeanType();
return event.getBean();
}
}
|
JCreatedListener
|
java
|
spring-projects__spring-framework
|
spring-messaging/src/main/java/org/springframework/messaging/handler/invocation/reactive/AbstractMethodMessageHandler.java
|
{
"start": 10833,
"end": 20514
}
|
class ____ bean with name '" + beanName + "'", ex);
}
}
if (beanType != null && predicate.test(beanType)) {
detectHandlerMethods(beanName);
}
}
}
}
/**
* Detect if the given handler has any methods that can handle messages and if
* so register it with the extracted mapping information.
* <p><strong>Note:</strong> This method is protected and can be invoked by
* subclasses, but this should be done on startup only as documented in
* {@link #registerHandlerMethod}.
* @param handler the handler to check, either an instance of a Spring bean name
*/
protected final void detectHandlerMethods(Object handler) {
Class<?> handlerType;
if (handler instanceof String handlerName) {
ApplicationContext context = getApplicationContext();
Assert.state(context != null, "ApplicationContext is required for resolving handler bean names");
handlerType = context.getType(handlerName);
}
else {
handlerType = handler.getClass();
}
if (handlerType != null) {
final Class<?> userType = ClassUtils.getUserClass(handlerType);
Map<Method, T> methods = MethodIntrospector.selectMethods(userType,
(MethodIntrospector.MetadataLookup<T>) method -> getMappingForMethod(method, userType));
if (logger.isDebugEnabled()) {
logger.debug(formatMappings(userType, methods));
}
methods.forEach((key, value) -> registerHandlerMethod(handler, key, value));
}
}
private String formatMappings(Class<?> userType, Map<Method, T> methods) {
String packageName = ClassUtils.getPackageName(userType);
String formattedType = (StringUtils.hasText(packageName) ?
Arrays.stream(packageName.split("\\."))
.map(packageSegment -> packageSegment.substring(0, 1))
.collect(Collectors.joining(".", "", "." + userType.getSimpleName())) :
userType.getSimpleName());
Function<Method, String> methodFormatter = method -> Arrays.stream(method.getParameterTypes())
.map(Class::getSimpleName)
.collect(Collectors.joining(",", "(", ")"));
return methods.entrySet().stream()
.map(e -> {
Method method = e.getKey();
return e.getValue() + ": " + method.getName() + methodFormatter.apply(method);
})
.collect(Collectors.joining("\n\t", "\n\t" + formattedType + ":" + "\n\t", ""));
}
/**
* Obtain the mapping for the given method, if any.
* @param method the method to check
* @param handlerType the handler type, possibly a subtype of the method's declaring class
* @return the mapping, or {@code null} if the method is not mapped
*/
protected abstract @Nullable T getMappingForMethod(Method method, Class<?> handlerType);
/**
* Register a handler method and its unique mapping.
* <p><strong>Note:</strong> As of 5.3 this method is public (rather than
* protected) and can be used both at startup and at runtime.
* @param handler the bean name of the handler or the handler instance
* @param method the method to register
* @param mapping the mapping conditions associated with the handler method
* @throws IllegalStateException if another method was already registered
* under the same mapping
*/
public final void registerHandlerMethod(Object handler, Method method, T mapping) {
Assert.notNull(mapping, "Mapping must not be null");
HandlerMethod newHandlerMethod = createHandlerMethod(handler, method);
HandlerMethod oldHandlerMethod = this.handlerMethods.get(mapping);
if (oldHandlerMethod != null && !oldHandlerMethod.equals(newHandlerMethod)) {
throw new IllegalStateException("Ambiguous mapping found. Cannot map '" + newHandlerMethod.getBean() +
"' bean method \n" + newHandlerMethod + "\nto " + mapping + ": There is already '" +
oldHandlerMethod.getBean() + "' bean method\n" + oldHandlerMethod + " mapped.");
}
mapping = extendMapping(mapping, newHandlerMethod);
this.handlerMethods.put(mapping, newHandlerMethod);
for (String pattern : getDirectLookupMappings(mapping)) {
List<T> values = this.destinationLookup.computeIfAbsent(pattern, p -> new CopyOnWriteArrayList<>());
values.add(mapping);
}
}
/**
* Create a HandlerMethod instance from an Object handler that is either a handler
* instance or a String-based bean name.
*/
private HandlerMethod createHandlerMethod(Object handler, Method method) {
HandlerMethod handlerMethod;
if (handler instanceof String handlerName) {
ApplicationContext context = getApplicationContext();
Assert.state(context != null, "ApplicationContext is required for resolving handler bean names");
handlerMethod = new HandlerMethod(handlerName, context.getAutowireCapableBeanFactory(), method);
}
else {
handlerMethod = new HandlerMethod(handler, method);
}
return handlerMethod;
}
/**
* This method is invoked just before mappings are added. It allows
* subclasses to update the mapping with the {@link HandlerMethod} in mind.
* This can be useful when the method signature is used to refine the
* mapping, for example, based on the cardinality of input and output.
* <p>By default this method returns the mapping that is passed in.
* @param mapping the mapping to be added
* @param handlerMethod the target handler for the mapping
* @return a new mapping or the same
* @since 5.2.2
*/
protected T extendMapping(T mapping, HandlerMethod handlerMethod) {
return mapping;
}
/**
* Return String-based destinations for the given mapping, if any, that can
* be used to find matches with a direct lookup (i.e. non-patterns).
* <p><strong>Note:</strong> This is completely optional. The mapping
* metadata for a subclass may support neither direct lookups, nor String
* based destinations.
*/
protected abstract Set<String> getDirectLookupMappings(T mapping);
@Override
public Mono<Void> handleMessage(Message<?> message) throws MessagingException {
Match<T> match = null;
try {
match = getHandlerMethod(message);
}
catch (Exception ex) {
return Mono.error(ex);
}
if (match == null) {
// handleNoMatch would have been invoked already
return Mono.empty();
}
return handleMatch(match.mapping, match.handlerMethod, message);
}
protected Mono<Void> handleMatch(T mapping, HandlerMethod handlerMethod, Message<?> message) {
handlerMethod = handlerMethod.createWithResolvedBean();
return this.invocableHelper.handleMessage(handlerMethod, message);
}
private @Nullable Match<T> getHandlerMethod(Message<?> message) {
List<Match<T>> matches = new ArrayList<>();
RouteMatcher.Route destination = getDestination(message);
List<T> mappingsByUrl = (destination != null ? this.destinationLookup.get(destination.value()) : null);
if (mappingsByUrl != null) {
addMatchesToCollection(mappingsByUrl, message, matches);
}
if (matches.isEmpty()) {
// No direct hits, go through all mappings
Set<T> allMappings = this.handlerMethods.keySet();
addMatchesToCollection(allMappings, message, matches);
}
if (matches.isEmpty()) {
handleNoMatch(destination, message);
return null;
}
Comparator<Match<T>> comparator = new MatchComparator(getMappingComparator(message));
matches.sort(comparator);
if (logger.isTraceEnabled()) {
logger.trace("Found " + matches.size() + " handler methods: " + matches);
}
Match<T> bestMatch = matches.get(0);
if (matches.size() > 1) {
Match<T> secondBestMatch = matches.get(1);
if (comparator.compare(bestMatch, secondBestMatch) == 0) {
HandlerMethod m1 = bestMatch.handlerMethod;
HandlerMethod m2 = secondBestMatch.handlerMethod;
throw new IllegalStateException("Ambiguous handler methods mapped for destination '" +
(destination != null ? destination.value() : "") + "': {" +
m1.getShortLogMessage() + ", " + m2.getShortLogMessage() + "}");
}
}
return bestMatch;
}
/**
* Extract the destination from the given message.
* @see #getDirectLookupMappings(Object)
*/
protected abstract RouteMatcher.@Nullable Route getDestination(Message<?> message);
private void addMatchesToCollection(
Collection<T> mappingsToCheck, Message<?> message, List<Match<T>> matches) {
for (T mapping : mappingsToCheck) {
T match = getMatchingMapping(mapping, message);
if (match != null) {
HandlerMethod handlerMethod = this.handlerMethods.get(mapping);
Assert.state(handlerMethod != null, "HandlerMethod must not be null");
matches.add(new Match<>(match, handlerMethod));
}
}
}
/**
* Check if a mapping matches the current message and return a possibly
* new mapping with conditions relevant to the current request.
* @param mapping the mapping to get a match for
* @param message the message being handled
* @return the match or {@code null} if there is no match
*/
protected abstract @Nullable T getMatchingMapping(T mapping, Message<?> message);
/**
* Return a comparator for sorting matching mappings.
* The returned comparator should sort 'better' matches higher.
* @param message the current Message
* @return the comparator, never {@code null}
*/
protected abstract Comparator<T> getMappingComparator(Message<?> message);
/**
* Invoked when no matching handler is found.
* @param destination the destination
* @param message the message
*/
protected void handleNoMatch(RouteMatcher.@Nullable Route destination, Message<?> message) {
logger.debug("No handlers for destination '" +
(destination != null ? destination.value() : "") + "'");
}
/**
* Create a concrete instance of {@link AbstractExceptionHandlerMethodResolver}
* that finds exception handling methods based on some criteria, for example, based
* on the presence of {@code @MessageExceptionHandler}.
* @param beanType the
|
for
|
java
|
quarkusio__quarkus
|
extensions/spring-data-jpa/deployment/src/test/java/io/quarkus/spring/data/deployment/Book.java
|
{
"start": 126,
"end": 456
}
|
class ____ {
@Id
private Integer bid;
private String name;
public Integer getBid() {
return bid;
}
public void setBid(Integer bid) {
this.bid = bid;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
|
Book
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/state/internals/MergedSortedCacheSessionStoreIterator.java
|
{
"start": 1165,
"end": 2849
}
|
class ____ extends AbstractMergedSortedCacheStoreIterator<Windowed<Bytes>, Windowed<Bytes>, byte[], byte[]> {
private final SegmentedCacheFunction cacheFunction;
MergedSortedCacheSessionStoreIterator(final PeekingKeyValueIterator<Bytes, LRUCacheEntry> cacheIterator,
final KeyValueIterator<Windowed<Bytes>, byte[]> storeIterator,
final SegmentedCacheFunction cacheFunction,
final boolean forward) {
super(cacheIterator, storeIterator, forward);
this.cacheFunction = cacheFunction;
}
@Override
public KeyValue<Windowed<Bytes>, byte[]> deserializeStorePair(final KeyValue<Windowed<Bytes>, byte[]> pair) {
return pair;
}
@Override
Windowed<Bytes> deserializeCacheKey(final Bytes cacheKey) {
final byte[] binaryKey = cacheFunction.key(cacheKey).get();
final byte[] keyBytes = SessionKeySchema.extractKeyBytes(binaryKey);
final Window window = SessionKeySchema.extractWindow(binaryKey);
return new Windowed<>(Bytes.wrap(keyBytes), window);
}
@Override
byte[] deserializeCacheValue(final LRUCacheEntry cacheEntry) {
return cacheEntry.value();
}
@Override
public Windowed<Bytes> deserializeStoreKey(final Windowed<Bytes> key) {
return key;
}
@Override
public int compare(final Bytes cacheKey, final Windowed<Bytes> storeKey) {
final Bytes storeKeyBytes = SessionKeySchema.toBinary(storeKey);
return cacheFunction.compareSegmentedKeys(cacheKey, storeKeyBytes);
}
}
|
MergedSortedCacheSessionStoreIterator
|
java
|
apache__kafka
|
server-common/src/main/java/org/apache/kafka/queue/KafkaEventQueue.java
|
{
"start": 1496,
"end": 1700
}
|
class ____ implements EventQueue {
public static final String EVENT_HANDLER_THREAD_SUFFIX = "event-handler";
/**
* A context object that wraps events.
*/
private static
|
KafkaEventQueue
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/javadoc/InvalidParamTest.java
|
{
"start": 3975,
"end": 4319
}
|
interface ____ {
/** {@code foobar}, {@code barfoo} */
void foo(int foobar);
}
""")
.doTest(TEXT_MATCH);
}
@Test
@SuppressWarnings("MisformattedTestData")
public void verySimilarCodeParam_diagnosticMessage() {
helper
.addSourceLines(
"Test.java",
"""
|
Test
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/generated/GeneratedOperator.java
|
{
"start": 1081,
"end": 1292
}
|
class ____<C extends StreamOperator<?>> extends GeneratedClass<C> {
private static final long serialVersionUID = 2L;
/**
* Creates a GeneratedOperator.
*
* @param className
|
GeneratedOperator
|
java
|
grpc__grpc-java
|
api/src/main/java/io/grpc/ForwardingChannelBuilder.java
|
{
"start": 1750,
"end": 2088
}
|
class ____, so that classes extending
* {@link ForwardingChannelBuilder2} won't need class-level
* {@code @SuppressWarnings("deprecation")} annotation. Such annotation would suppress all
* deprecation warnings in all methods, inadvertently hiding any real deprecation warnings needing
* to be addressed. However, each child
|
itself
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
|
{
"start": 23067,
"end": 78084
}
|
interface ____ get the json output of a *STATUS command
* on the given file.
*
* @param filename The file to query.
* @param command Either GETFILESTATUS, LISTSTATUS, or ACLSTATUS
* @return A string containing the JSON output describing the file.
* @throws Exception
*/
private String getStatus(String filename, String command)
throws Exception {
long oldOpsStat =
metricsGetter.getOrDefault(command, defaultEntryMetricGetter).call();
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
// Remove leading / from filename
if (filename.charAt(0) == '/') {
filename = filename.substring(1);
}
String pathOps = MessageFormat.format(
"/webhdfs/v1/{0}?user.name={1}&op={2}",
filename, user, command);
URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
BufferedReader reader =
new BufferedReader(new InputStreamReader(conn.getInputStream()));
long opsStat =
metricsGetter.getOrDefault(command, defaultExitMetricGetter).call();
assertEquals(oldOpsStat + 1L, opsStat);
return reader.readLine();
}
/**
* General-purpose http PUT command to the httpfs server.
* @param filename The file to operate upon
* @param command The command to perform (SETACL, etc)
* @param params Parameters, like "aclspec=..."
*/
private void putCmd(String filename, String command,
String params) throws Exception {
assertEquals(HttpURLConnection.HTTP_OK,
putCmdWithReturn(filename, command, params).getResponseCode());
}
/**
* General-purpose http PUT command to the httpfs server,
* which returns relted HttpURLConnection instance.
* @param filename The file to operate upon
* @param command The command to perform (SETACL, etc)
* @param params Parameters, like "aclspec=..."
* @return HttpURLConnection the HttpURLConnection instance for the given PUT
*/
private HttpURLConnection putCmdWithReturn(String filename, String command,
String params) throws Exception {
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
// Remove leading / from filename
if (filename.charAt(0) == '/') {
filename = filename.substring(1);
}
String pathOps = MessageFormat.format(
"/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
filename, user, (params == null) ? "" : "&",
(params == null) ? "" : params, command);
URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
conn.connect();
return conn;
}
/**
* Given the JSON output from the GETFILESTATUS call, return the
* 'permission' value.
*
* @param statusJson JSON from GETFILESTATUS
* @return The value of 'permission' in statusJson
* @throws Exception
*/
private String getPerms(String statusJson) throws Exception {
JSONParser parser = new JSONParser();
JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
JSONObject details = (JSONObject) jsonObject.get("FileStatus");
return (String) details.get("permission");
}
/**
* Given the JSON output from the GETTRASHPATH call, return the
* 'path' value.
*
* @param statusJson JSON from GETTRASHPATH
* @return The value of 'path' in statusJson
* @throws Exception
*/
private String getPath(String statusJson) throws Exception {
JSONParser parser = new JSONParser();
JSONObject details = (JSONObject) parser.parse(statusJson);
return (String) details.get("Path");
}
/**
* Given the JSON output from the GETACLSTATUS call, return the
* 'entries' value as a List<String>.
* @param statusJson JSON from GETACLSTATUS
* @return A List of Strings which are the elements of the ACL entries
* @throws Exception
*/
private List<String> getAclEntries(String statusJson) throws Exception {
List<String> entries = new ArrayList<String>();
JSONParser parser = new JSONParser();
JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
JSONObject details = (JSONObject) jsonObject.get("AclStatus");
JSONArray jsonEntries = (JSONArray) details.get("entries");
if (jsonEntries != null) {
for (Object e : jsonEntries) {
entries.add(e.toString());
}
}
return entries;
}
/**
* Parse xAttrs from JSON result of GETXATTRS call, return xAttrs Map.
* @param statusJson JSON from GETXATTRS
* @return Map<String, byte[]> xAttrs Map
* @throws Exception
*/
private Map<String, byte[]> getXAttrs(String statusJson) throws Exception {
Map<String, byte[]> xAttrs = Maps.newHashMap();
JSONParser parser = new JSONParser();
JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
JSONArray jsonXAttrs = (JSONArray) jsonObject.get("XAttrs");
if (jsonXAttrs != null) {
for (Object a : jsonXAttrs) {
String name = (String) ((JSONObject)a).get("name");
String value = (String) ((JSONObject)a).get("value");
xAttrs.put(name, decodeXAttrValue(value));
}
}
return xAttrs;
}
/** Decode xattr value from string. */
private byte[] decodeXAttrValue(String value) throws IOException {
if (value != null) {
return XAttrCodec.decodeValue(value);
} else {
return new byte[0];
}
}
/**
*
* @param stat AclStatus object from a call to getAclStatus
* @param name The name of the ACL being searched for
* @return The AclEntry if found, or null otherwise
* @throws IOException
*/
private AclEntry findAclWithName(AclStatus stat, String name)
throws IOException{
AclEntry relevantAcl = null;
Iterator<AclEntry> it = stat.getEntries().iterator();
while (it.hasNext()) {
AclEntry e = it.next();
if (e.getName().equals(name)) {
relevantAcl = e;
break;
}
}
return relevantAcl;
}
/**
* Validate that files are created with 755 permissions when no
* 'permissions' attribute is specified, and when 'permissions'
* is specified, that value is honored.
*/
@Test
@TestDir
@TestJetty
@TestHdfs
public void testPerms() throws Exception {
createHttpFSServer(false, false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/perm"));
createWithHttp("/perm/none", null);
String statusJson = getStatus("/perm/none", "GETFILESTATUS");
assertTrue("755".equals(getPerms(statusJson)));
createWithHttp("/perm/p-777", "777");
statusJson = getStatus("/perm/p-777", "GETFILESTATUS");
assertTrue("777".equals(getPerms(statusJson)));
createWithHttp("/perm/p-654", "654");
statusJson = getStatus("/perm/p-654", "GETFILESTATUS");
assertTrue("654".equals(getPerms(statusJson)));
createWithHttp("/perm/p-321", "321");
statusJson = getStatus("/perm/p-321", "GETFILESTATUS");
assertTrue("321".equals(getPerms(statusJson)));
}
/**
* Validate XAttr get/set/remove calls.
*/
@Test
@TestDir
@TestJetty
@TestHdfs
public void testXAttrs() throws Exception {
final String name1 = "user.a1";
final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
final String name2 = "user.a2";
final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
final String dir = "/xattrTest";
final String path = dir + "/file";
createHttpFSServer(false, false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path(dir));
createWithHttp(path, null);
String statusJson = getStatus(path, "GETXATTRS");
Map<String, byte[]> xAttrs = getXAttrs(statusJson);
assertEquals(0, xAttrs.size());
// Set two xattrs
putCmd(path, "SETXATTR", setXAttrParam(name1, value1));
putCmd(path, "SETXATTR", setXAttrParam(name2, value2));
statusJson = getStatus(path, "GETXATTRS");
xAttrs = getXAttrs(statusJson);
assertEquals(2, xAttrs.size());
assertArrayEquals(value1, xAttrs.get(name1));
assertArrayEquals(value2, xAttrs.get(name2));
// Remove one xattr
putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
statusJson = getStatus(path, "GETXATTRS");
xAttrs = getXAttrs(statusJson);
assertEquals(1, xAttrs.size());
assertArrayEquals(value2, xAttrs.get(name2));
// Remove another xattr, then there is no xattr
putCmd(path, "REMOVEXATTR", "xattr.name=" + name2);
statusJson = getStatus(path, "GETXATTRS");
xAttrs = getXAttrs(statusJson);
assertEquals(0, xAttrs.size());
}
/** Params for setting an xAttr. */
public static String setXAttrParam(String name, byte[] value)
throws IOException {
return "xattr.name=" + name + "&xattr.value=" + XAttrCodec.encodeValue(
value, XAttrCodec.HEX) + "&encoding=hex&flag=create";
}
/**
* Validate the various ACL set/modify/remove calls. General strategy is
* to verify each of the following steps with GETFILESTATUS, LISTSTATUS,
* and GETACLSTATUS:
* <ol>
* <li>Create a file with no ACLs</li>
* <li>Add a user + group ACL</li>
* <li>Add another user ACL</li>
* <li>Remove the first user ACL</li>
* <li>Remove all ACLs</li>
* </ol>
*/
@Test
@TestDir
@TestJetty
@TestHdfs
public void testFileAcls() throws Exception {
final String aclUser1 = "user:foo:rw-";
final String remAclUser1 = "user:foo:";
final String aclUser2 = "user:bar:r--";
final String aclGroup1 = "group::r--";
final String aclSpec = "aclspec=user::rwx," + aclUser1 + ","
+ aclGroup1 + ",other::---";
final String modAclSpec = "aclspec=" + aclUser2;
final String remAclSpec = "aclspec=" + remAclUser1;
final String dir = "/aclFileTest";
final String path = dir + "/test";
String statusJson;
List<String> aclEntries;
createHttpFSServer(false, false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path(dir));
createWithHttp(path, null);
/* getfilestatus and liststatus don't have 'aclBit' in their reply */
statusJson = getStatus(path, "GETFILESTATUS");
assertEquals(-1, statusJson.indexOf("aclBit"));
statusJson = getStatus(dir, "LISTSTATUS");
assertEquals(-1, statusJson.indexOf("aclBit"));
/* getaclstatus works and returns no entries */
statusJson = getStatus(path, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
assertTrue(aclEntries.size() == 0);
/*
* Now set an ACL on the file. (getfile|list)status have aclBit,
* and aclstatus has entries that looks familiar.
*/
putCmd(path, "SETACL", aclSpec);
statusJson = getStatus(path, "GETFILESTATUS");
assertNotEquals(-1, statusJson.indexOf("aclBit"));
statusJson = getStatus(dir, "LISTSTATUS");
assertNotEquals(-1, statusJson.indexOf("aclBit"));
statusJson = getStatus(path, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
assertTrue(aclEntries.size() == 2);
assertTrue(aclEntries.contains(aclUser1));
assertTrue(aclEntries.contains(aclGroup1));
/* Modify acl entries to add another user acl */
putCmd(path, "MODIFYACLENTRIES", modAclSpec);
statusJson = getStatus(path, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
assertTrue(aclEntries.size() == 3);
assertTrue(aclEntries.contains(aclUser1));
assertTrue(aclEntries.contains(aclUser2));
assertTrue(aclEntries.contains(aclGroup1));
/* Remove the first user acl entry and verify */
putCmd(path, "REMOVEACLENTRIES", remAclSpec);
statusJson = getStatus(path, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
assertTrue(aclEntries.size() == 2);
assertTrue(aclEntries.contains(aclUser2));
assertTrue(aclEntries.contains(aclGroup1));
/* Remove all acls and verify */
putCmd(path, "REMOVEACL", null);
statusJson = getStatus(path, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
assertTrue(aclEntries.size() == 0);
statusJson = getStatus(path, "GETFILESTATUS");
assertEquals(-1, statusJson.indexOf("aclBit"));
statusJson = getStatus(dir, "LISTSTATUS");
assertEquals(-1, statusJson.indexOf("aclBit"));
}
/**
* Test ACL operations on a directory, including default ACLs.
* General strategy is to use GETFILESTATUS and GETACLSTATUS to verify:
* <ol>
* <li>Initial status with no ACLs</li>
* <li>The addition of a default ACL</li>
* <li>The removal of default ACLs</li>
* </ol>
*
* @throws Exception
*/
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDirAcls() throws Exception {
final String defUser1 = "default:user:glarch:r-x";
final String defSpec1 = "aclspec=" + defUser1;
final String dir = "/aclDirTest";
String statusJson;
List<String> aclEntries;
createHttpFSServer(false, false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path(dir));
/* getfilestatus and liststatus don't have 'aclBit' in their reply */
statusJson = getStatus(dir, "GETFILESTATUS");
assertEquals(-1, statusJson.indexOf("aclBit"));
/* No ACLs, either */
statusJson = getStatus(dir, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
assertTrue(aclEntries.size() == 0);
/* Give it a default ACL and verify */
putCmd(dir, "SETACL", defSpec1);
statusJson = getStatus(dir, "GETFILESTATUS");
assertNotEquals(-1, statusJson.indexOf("aclBit"));
statusJson = getStatus(dir, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
assertTrue(aclEntries.size() == 5);
/* 4 Entries are default:(user|group|mask|other):perm */
assertTrue(aclEntries.contains(defUser1));
/* Remove the default ACL and re-verify */
putCmd(dir, "REMOVEDEFAULTACL", null);
statusJson = getStatus(dir, "GETFILESTATUS");
assertEquals(-1, statusJson.indexOf("aclBit"));
statusJson = getStatus(dir, "GETACLSTATUS");
aclEntries = getAclEntries(statusJson);
assertTrue(aclEntries.size() == 0);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testCustomizedUserAndGroupNames() throws Exception {
// Start server with default configuration
Server server = createHttpFSServer(false, false);
final Configuration conf = HttpFSServerWebApp.get()
.get(FileSystemAccess.class).getFileSystemConfiguration();
// Change pattern config
conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
"^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$");
conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_KEY,
"^(default:)?(user|group|mask|other):" +
"[[0-9A-Za-z_][@A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?" +
"(user|group|mask|other):[[0-9A-Za-z_][@A-Za-z0-9._-]]*:" +
"([rwx-]{3})?)*$");
// Save configuration to site file
writeConf(conf, "hdfs-site.xml");
// Restart the HttpFS server to apply new config
server.stop();
server.start();
final String aclUser = "user:123:rw-";
final String aclGroup = "group:foo@bar:r--";
final String aclSpec = "aclspec=user::rwx," + aclUser + ",group::rwx," +
aclGroup + ",other::---";
final String dir = "/aclFileTestCustom";
final String path = dir + "/test";
// Create test dir
FileSystem fs = FileSystem.get(conf);
fs.mkdirs(new Path(dir));
createWithHttp(path, null);
// Set ACL
putCmd(path, "SETACL", aclSpec);
// Verify ACL
String statusJson = getStatus(path, "GETACLSTATUS");
List<String> aclEntries = getAclEntries(statusJson);
assertTrue(aclEntries.contains(aclUser));
assertTrue(aclEntries.contains(aclGroup));
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testOpenOffsetLength() throws Exception {
createHttpFSServer(false, false);
byte[] array = new byte[]{0, 1, 2, 3};
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/tmp"));
OutputStream os = fs.create(new Path("/tmp/foo"));
os.write(array);
os.close();
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format(
"/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2",
user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
InputStream is = conn.getInputStream();
assertEquals(1, is.read());
assertEquals(2, is.read());
assertEquals(-1, is.read());
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testCreateFileWithUnmaskedPermissions() throws Exception {
createHttpFSServer(false, false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
// Create a folder with a default acl default:user2:rw-
fs.mkdirs(new Path("/tmp"));
AclEntry acl = new org.apache.hadoop.fs.permission.AclEntry.Builder()
.setType(AclEntryType.USER)
.setScope(AclEntryScope.DEFAULT)
.setName("user2")
.setPermission(FsAction.READ_WRITE)
.build();
fs.setAcl(new Path("/tmp"), new ArrayList<AclEntry>(Arrays.asList(acl)));
String notUnmaskedFile = "/tmp/notUnmasked";
String unmaskedFile = "/tmp/unmasked";
// Create a file inside the folder. It should inherit the default acl
// but the mask should affect the ACL permissions. The mask is controlled
// by the group permissions, which are 0, and hence the mask will make
// the effective permission of the inherited ACL be NONE.
createWithHttp(notUnmaskedFile, "700");
// Pull the relevant ACL from the FS object and check the mask has affected
// its permissions.
AclStatus aclStatus = fs.getAclStatus(new Path(notUnmaskedFile));
AclEntry theAcl = findAclWithName(aclStatus, "user2");
assertNotNull(theAcl);
assertEquals(FsAction.NONE,
aclStatus.getEffectivePermission(theAcl));
// Create another file, this time pass a mask of 777. Now the inherited
// permissions should be as expected
createWithHttp(unmaskedFile, "700", "777");
aclStatus = fs.getAclStatus(new Path(unmaskedFile));
theAcl = findAclWithName(aclStatus, "user2");
assertNotNull(theAcl);
assertEquals(FsAction.READ_WRITE,
aclStatus.getEffectivePermission(theAcl));
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testMkdirWithUnmaskedPermissions() throws Exception {
createHttpFSServer(false, false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
// Create a folder with a default acl default:user2:rw-
fs.mkdirs(new Path("/tmp"));
AclEntry acl = new org.apache.hadoop.fs.permission.AclEntry.Builder()
.setType(AclEntryType.USER)
.setScope(AclEntryScope.DEFAULT)
.setName("user2")
.setPermission(FsAction.READ_WRITE)
.build();
fs.setAcl(new Path("/tmp"), new ArrayList<AclEntry>(Arrays.asList(acl)));
String notUnmaskedDir = "/tmp/notUnmaskedDir";
String unmaskedDir = "/tmp/unmaskedDir";
// Create a file inside the folder. It should inherit the default acl
// but the mask should affect the ACL permissions. The mask is controlled
// by the group permissions, which are 0, and hence the mask will make
// the effective permission of the inherited ACL be NONE.
createDirWithHttp(notUnmaskedDir, "700", null);
// Pull the relevant ACL from the FS object and check the mask has affected
// its permissions.
AclStatus aclStatus = fs.getAclStatus(new Path(notUnmaskedDir));
AclEntry theAcl = findAclWithName(aclStatus, "user2");
assertNotNull(theAcl);
assertEquals(FsAction.NONE,
aclStatus.getEffectivePermission(theAcl));
// Create another file, this time pass a mask of 777. Now the inherited
// permissions should be as expected
createDirWithHttp(unmaskedDir, "700", "777");
aclStatus = fs.getAclStatus(new Path(unmaskedDir));
theAcl = findAclWithName(aclStatus, "user2");
assertNotNull(theAcl);
assertEquals(FsAction.READ_WRITE,
aclStatus.getEffectivePermission(theAcl));
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testPutNoOperation() throws Exception {
createHttpFSServer(false, false);
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1/foo?user.name={0}", user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setDoInput(true);
conn.setDoOutput(true);
conn.setRequestMethod("PUT");
assertEquals(conn.getResponseCode(),
HttpURLConnection.HTTP_BAD_REQUEST);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGetTrashRoot() throws Exception {
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
createHttpFSServer(false, false);
String trashJson = getStatus("/", "GETTRASHROOT");
String trashPath = getPath(trashJson);
Path expectedPath = new Path(FileSystem.USER_HOME_PREFIX,
new Path(user, FileSystem.TRASH_PREFIX));
assertEquals(expectedPath.toUri().getPath(), trashPath);
byte[] array = new byte[]{0, 1, 2, 3};
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
fs.mkdirs(new Path("/tmp"));
OutputStream os = fs.create(new Path("/tmp/foo"));
os.write(array);
os.close();
trashJson = getStatus("/tmp/foo", "GETTRASHROOT");
trashPath = getPath(trashJson);
assertEquals(expectedPath.toUri().getPath(), trashPath);
//TestHdfsHelp has already set up EZ environment
final Path ezFile = TestHdfsHelper.ENCRYPTED_FILE;
final Path ezPath = TestHdfsHelper.ENCRYPTION_ZONE;
trashJson = getStatus(ezFile.toUri().getPath(), "GETTRASHROOT");
trashPath = getPath(trashJson);
expectedPath = new Path(ezPath, new Path(FileSystem.TRASH_PREFIX, user));
assertEquals(expectedPath.toUri().getPath(), trashPath);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDelegationTokenOperations() throws Exception {
createHttpFSServer(true, false);
delegationTokenCommonTests(false);
}
private HttpURLConnection snapshotTestPreconditions(String httpMethod,
String snapOperation,
String additionalParams)
throws Exception {
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
"/webhdfs/v1/tmp/tmp-snap-test/subdir?user.name={0}&op=MKDIRS",
user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
conn.connect();
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
//needed to make the given dir snapshottable
Path snapshottablePath = new Path("/tmp/tmp-snap-test");
DistributedFileSystem dfs =
(DistributedFileSystem) FileSystem.get(snapshottablePath.toUri(),
TestHdfsHelper.getHdfsConf());
dfs.allowSnapshot(snapshottablePath);
//Try to create snapshot passing snapshot name
url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
"/webhdfs/v1/tmp/tmp-snap-test?user.name={0}&op={1}&{2}", user,
snapOperation, additionalParams));
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(httpMethod);
conn.connect();
return conn;
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testAllowSnapshot() throws Exception {
createHttpFSServer(false, false);
// Create a test directory
String pathString = "/tmp/tmp-snap-allow-test";
createDirWithHttp(pathString, "700", null);
Path path = new Path(pathString);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
path.toUri(), TestHdfsHelper.getHdfsConf());
// FileStatus should have snapshot enabled bit unset by default
assertFalse(dfs.getFileStatus(path).isSnapshotEnabled());
// Send a request with ALLOWSNAPSHOT API
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
"/webhdfs/v1{0}?user.name={1}&op=ALLOWSNAPSHOT",
pathString, user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
conn.connect();
// Should return HTTP_OK
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
// FileStatus should have snapshot enabled bit set
assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
// Clean up
dfs.delete(path, true);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDisallowSnapshot() throws Exception {
createHttpFSServer(false, false);
// Create a test directory
String pathString = "/tmp/tmp-snap-disallow-test";
createDirWithHttp(pathString, "700", null);
Path path = new Path(pathString);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
path.toUri(), TestHdfsHelper.getHdfsConf());
// Allow snapshot
dfs.allowSnapshot(path);
// FileStatus should have snapshot enabled bit set so far
assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
// Send a request with DISALLOWSNAPSHOT API
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
"/webhdfs/v1{0}?user.name={1}&op=DISALLOWSNAPSHOT",
pathString, user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
conn.connect();
// Should return HTTP_OK
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
// FileStatus should not have snapshot enabled bit set
assertFalse(dfs.getFileStatus(path).isSnapshotEnabled());
// Clean up
dfs.delete(path, true);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDisallowSnapshotException() throws Exception {
createHttpFSServer(false, false);
// Create a test directory
String pathString = "/tmp/tmp-snap-disallow-exception-test";
createDirWithHttp(pathString, "700", null);
Path path = new Path(pathString);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
path.toUri(), TestHdfsHelper.getHdfsConf());
// Allow snapshot
dfs.allowSnapshot(path);
// FileStatus should have snapshot enabled bit set so far
assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
// Create some snapshots
dfs.createSnapshot(path, "snap-01");
dfs.createSnapshot(path, "snap-02");
// Send a request with DISALLOWSNAPSHOT API
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
"/webhdfs/v1{0}?user.name={1}&op=DISALLOWSNAPSHOT",
pathString, user));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
conn.connect();
// Should not return HTTP_OK
assertNotEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
// FileStatus should still have snapshot enabled bit set
assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
// Clean up
dfs.deleteSnapshot(path, "snap-02");
dfs.deleteSnapshot(path, "snap-01");
dfs.delete(path, true);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testCreateSnapshot() throws Exception {
createHttpFSServer(false, false);
final HttpURLConnection conn = snapshotTestPreconditions("PUT",
"CREATESNAPSHOT",
"snapshotname=snap-with-name");
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
final BufferedReader reader =
new BufferedReader(new InputStreamReader(conn.getInputStream()));
String result = reader.readLine();
//Validates if the content format is correct
assertTrue(result.
equals("{\"Path\":\"/tmp/tmp-snap-test/.snapshot/snap-with-name\"}"));
//Validates if the snapshot is properly created under .snapshot folder
result = getStatus("/tmp/tmp-snap-test/.snapshot",
"LISTSTATUS");
assertTrue(result.contains("snap-with-name"));
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testCreateSnapshotNoSnapshotName() throws Exception {
createHttpFSServer(false, false);
final HttpURLConnection conn = snapshotTestPreconditions("PUT",
"CREATESNAPSHOT",
"");
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
final BufferedReader reader = new BufferedReader(
new InputStreamReader(conn.getInputStream()));
String result = reader.readLine();
//Validates if the content format is correct
assertTrue(Pattern.matches(
"(\\{\\\"Path\\\"\\:\\\"/tmp/tmp-snap-test/.snapshot/s)" +
"(\\d{8})(-)(\\d{6})(\\.)(\\d{3})(\\\"\\})", result));
//Validates if the snapshot is properly created under .snapshot folder
result = getStatus("/tmp/tmp-snap-test/.snapshot",
"LISTSTATUS");
assertTrue(Pattern.matches("(.+)(\\\"pathSuffix\\\":\\\"s)" +
"(\\d{8})(-)(\\d{6})(\\.)(\\d{3})(\\\")(.+)",
result));
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testRenameSnapshot() throws Exception {
createHttpFSServer(false, false);
HttpURLConnection conn = snapshotTestPreconditions("PUT",
"CREATESNAPSHOT",
"snapshotname=snap-to-rename");
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
conn = snapshotTestPreconditions("PUT",
"RENAMESNAPSHOT",
"oldsnapshotname=snap-to-rename" +
"&snapshotname=snap-renamed");
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
//Validates the snapshot is properly renamed under .snapshot folder
String result = getStatus("/tmp/tmp-snap-test/.snapshot",
"LISTSTATUS");
assertTrue(result.contains("snap-renamed"));
//There should be no snapshot named snap-to-rename now
assertFalse(result.contains("snap-to-rename"));
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDelegationTokenOperationsSsl() throws Exception {
createHttpFSServer(true, true);
delegationTokenCommonTests(true);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testDeleteSnapshot() throws Exception {
createHttpFSServer(false, false);
HttpURLConnection conn = snapshotTestPreconditions("PUT",
"CREATESNAPSHOT",
"snapshotname=snap-to-delete");
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
conn = snapshotTestPreconditions("DELETE",
"DELETESNAPSHOT",
"snapshotname=snap-to-delete");
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
//Validates the snapshot is not under .snapshot folder anymore
String result = getStatus("/tmp/tmp-snap-test/.snapshot",
"LISTSTATUS");
assertFalse(result.contains("snap-to-delete"));
}
private HttpURLConnection sendRequestToHttpFSServer(String path, String op,
String additionalParams) throws Exception {
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
"/webhdfs/v1{0}?user.name={1}&op={2}&{3}",
path, user, op, additionalParams));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("GET");
conn.connect();
return conn;
}
private HttpURLConnection sendRequestGetSnapshotDiff(String path,
String oldsnapshotname, String snapshotname) throws Exception{
return sendRequestToHttpFSServer(path, "GETSNAPSHOTDIFF",
MessageFormat.format("oldsnapshotname={0}&snapshotname={1}",
oldsnapshotname, snapshotname));
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGetSnapshotDiff() throws Exception {
createHttpFSServer(false, false);
// Create a test directory
String pathStr = "/tmp/tmp-snap-diff-test";
createDirWithHttp(pathStr, "700", null);
Path path = new Path(pathStr);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
path.toUri(), TestHdfsHelper.getHdfsConf());
// Enable snapshot
dfs.allowSnapshot(path);
assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
// Create a file and take a snapshot
String file1 = pathStr + "/file1";
createWithHttp(file1, null);
dfs.createSnapshot(path, "snap1");
// Create another file and take a snapshot
String file2 = pathStr + "/file2";
createWithHttp(file2, null);
dfs.createSnapshot(path, "snap2");
// Send a request with GETSNAPSHOTDIFF API
HttpURLConnection conn = sendRequestGetSnapshotDiff(pathStr,
"snap1", "snap2");
// Should return HTTP_OK
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
// Verify the response
BufferedReader reader =
new BufferedReader(new InputStreamReader(conn.getInputStream()));
// The response should be a one-line JSON string.
String result = reader.readLine();
// Verify the content of diff with DFS API.
SnapshotDiffReport dfsDiffReport = dfs.getSnapshotDiffReport(path,
"snap1", "snap2");
assertEquals(result, JsonUtil.toJsonString(dfsDiffReport));
// Clean up
dfs.deleteSnapshot(path, "snap2");
dfs.deleteSnapshot(path, "snap1");
dfs.delete(path, true);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGetSnapshotDiffIllegalParam() throws Exception {
createHttpFSServer(false, false);
// Create a test directory
String pathStr = "/tmp/tmp-snap-diff-exc-test";
createDirWithHttp(pathStr, "700", null);
Path path = new Path(pathStr);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
path.toUri(), TestHdfsHelper.getHdfsConf());
// Enable snapshot
dfs.allowSnapshot(path);
assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
// Send requests with GETSNAPSHOTDIFF API
// Snapshots snap1 and snap2 are not created, expect failures but not NPE
HttpURLConnection conn = sendRequestGetSnapshotDiff(pathStr, "", "");
assertNotEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
sendRequestGetSnapshotDiff(pathStr, "snap1", "");
assertNotEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
sendRequestGetSnapshotDiff(pathStr, "", "snap2");
assertNotEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
sendRequestGetSnapshotDiff(pathStr, "snap1", "snap2");
assertNotEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
// Clean up
dfs.delete(path, true);
}
private void verifyGetSnapshottableDirectoryList(DistributedFileSystem dfs)
throws Exception {
// Send a request
HttpURLConnection conn = sendRequestToHttpFSServer("/",
"GETSNAPSHOTTABLEDIRECTORYLIST", "");
// Should return HTTP_OK
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
// Verify the response
BufferedReader reader =
new BufferedReader(new InputStreamReader(conn.getInputStream()));
// The response should be a one-line JSON string.
String dirLst = reader.readLine();
// Verify the content of diff with DFS API.
SnapshottableDirectoryStatus[] dfsDirLst = dfs.getSnapshottableDirListing();
assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst));
}
private void verifyGetSnapshotList(DistributedFileSystem dfs, Path path)
throws Exception {
// Send a request
HttpURLConnection conn = sendRequestToHttpFSServer(path.toString(),
"GETSNAPSHOTLIST", "");
// Should return HTTP_OK
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
// Verify the response
BufferedReader reader =
new BufferedReader(new InputStreamReader(conn.getInputStream()));
// The response should be a one-line JSON string.
String dirLst = reader.readLine();
// Verify the content of status with DFS API.
SnapshotStatus[] dfsDirLst = dfs.getSnapshotListing(path);
assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst));
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGetSnapshottableDirectoryList() throws Exception {
createHttpFSServer(false, false);
// Create test directories
String pathStr1 = "/tmp/tmp-snap-dirlist-test-1";
createDirWithHttp(pathStr1, "700", null);
Path path1 = new Path(pathStr1);
String pathStr2 = "/tmp/tmp-snap-dirlist-test-2";
createDirWithHttp(pathStr2, "700", null);
Path path2 = new Path(pathStr2);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
path1.toUri(), TestHdfsHelper.getHdfsConf());
// Verify response when there is no snapshottable directory
verifyGetSnapshottableDirectoryList(dfs);
// Enable snapshot for path1
dfs.allowSnapshot(path1);
assertTrue(dfs.getFileStatus(path1).isSnapshotEnabled());
// Verify response when there is one snapshottable directory
verifyGetSnapshottableDirectoryList(dfs);
// Enable snapshot for path2
dfs.allowSnapshot(path2);
assertTrue(dfs.getFileStatus(path2).isSnapshotEnabled());
// Verify response when there are two snapshottable directories
verifyGetSnapshottableDirectoryList(dfs);
// Clean up and verify
dfs.delete(path2, true);
verifyGetSnapshottableDirectoryList(dfs);
dfs.delete(path1, true);
verifyGetSnapshottableDirectoryList(dfs);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGetSnapshotList() throws Exception {
createHttpFSServer(false, false);
// Create test directories
String pathStr = "/tmp/tmp-snap-list-test-1";
createDirWithHttp(pathStr, "700", null);
Path path = new Path(pathStr);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
path.toUri(), TestHdfsHelper.getHdfsConf());
// Enable snapshot for path1
dfs.allowSnapshot(path);
assertTrue(dfs.getFileStatus(path).isSnapshotEnabled());
// Verify response when there is one snapshottable directory
verifyGetSnapshotList(dfs, path);
// Create a file and take a snapshot
String file1 = pathStr + "/file1";
createWithHttp(file1, null);
dfs.createSnapshot(path, "snap1");
// Create another file and take a snapshot
String file2 = pathStr + "/file2";
createWithHttp(file2, null);
dfs.createSnapshot(path, "snap2");
verifyGetSnapshotList(dfs, path);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testNoRedirect() throws Exception {
createHttpFSServer(false, false);
final String testContent = "Test content";
final String path = "/testfile.txt";
final String username = HadoopUsersConfTestHelper.getHadoopUsers()[0];
// Trigger the creation of the file which shouldn't redirect
URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
"/webhdfs/v1{0}?user.name={1}&op=CREATE&noredirect=true",
path, username));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(HttpMethod.PUT);
conn.connect();
// Verify that it returned the final write location
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
JSONObject json = (JSONObject)new JSONParser().parse(
new InputStreamReader(conn.getInputStream()));
String location = (String)json.get("Location");
assertTrue(location.contains(DataParam.NAME));
assertFalse(location.contains(NoRedirectParam.NAME));
assertTrue(location.contains("CREATE"));
assertTrue(location.startsWith(TestJettyHelper.getJettyURL().toString()),
"Wrong location: " + location);
// Use the location to actually write the file
url = new URL(location);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(HttpMethod.PUT);
conn.setRequestProperty(
"Content-Type", MediaType.APPLICATION_OCTET_STREAM);
conn.setDoOutput(true);
conn.connect();
OutputStream os = conn.getOutputStream();
os.write(testContent.getBytes());
os.close();
// Verify that it created the file and returned the location
assertEquals(
HttpURLConnection.HTTP_CREATED, conn.getResponseCode());
json = (JSONObject)new JSONParser().parse(
new InputStreamReader(conn.getInputStream()));
location = (String)json.get("Location");
assertEquals(
TestJettyHelper.getJettyURL() + "/webhdfs/v1" + path, location);
// Read the file which shouldn't redirect
url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
"/webhdfs/v1{0}?user.name={1}&op=OPEN&noredirect=true",
path, username));
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(HttpMethod.GET);
conn.connect();
// Verify that we got the final location to read from
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
json = (JSONObject)new JSONParser().parse(
new InputStreamReader(conn.getInputStream()));
location = (String)json.get("Location");
assertTrue(!location.contains(NoRedirectParam.NAME));
assertTrue(location.contains("OPEN"));
assertTrue(location.startsWith(TestJettyHelper.getJettyURL().toString()),
"Wrong location: " + location);
// Use the location to actually read
url = new URL(location);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(HttpMethod.GET);
conn.connect();
// Verify that we read what we wrote
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
String content = IOUtils.toString(conn.getInputStream(), StandardCharsets.UTF_8);
assertEquals(testContent, content);
// Get the checksum of the file which shouldn't redirect
url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
"/webhdfs/v1{0}?user.name={1}&op=GETFILECHECKSUM&noredirect=true",
path, username));
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(HttpMethod.GET);
conn.connect();
// Verify that we got the final location to write to
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
json = (JSONObject)new JSONParser().parse(
new InputStreamReader(conn.getInputStream()));
location = (String)json.get("Location");
assertTrue(!location.contains(NoRedirectParam.NAME));
assertTrue(location.contains("GETFILECHECKSUM"));
assertTrue(location.startsWith(TestJettyHelper.getJettyURL().toString()),
"Wrong location: " + location);
// Use the location to actually get the checksum
url = new URL(location);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(HttpMethod.GET);
conn.connect();
// Verify that we read what we wrote
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
json = (JSONObject)new JSONParser().parse(
new InputStreamReader(conn.getInputStream()));
JSONObject checksum = (JSONObject)json.get("FileChecksum");
assertEquals(
"0000020000000000000000001b9c0a445fed3c0bf1e1aa7438d96b1500000000",
checksum.get("bytes"));
assertEquals(28L, checksum.get("length"));
assertEquals("MD5-of-0MD5-of-512CRC32C", checksum.get("algorithm"));
}
private void verifyGetServerDefaults(DistributedFileSystem dfs)
throws Exception {
// Send a request
HttpURLConnection conn =
sendRequestToHttpFSServer("/", "GETSERVERDEFAULTS", "");
// Should return HTTP_OK
assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
// Verify the response
BufferedReader reader =
new BufferedReader(new InputStreamReader(conn.getInputStream()));
// The response should be a one-line JSON string.
String dirLst = reader.readLine();
FsServerDefaults dfsDirLst = dfs.getServerDefaults();
assertNotNull(dfsDirLst);
assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst));
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGetServerDefaults() throws Exception {
createHttpFSServer(false, false);
String pathStr1 = "/";
Path path1 = new Path(pathStr1);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
.get(path1.toUri(), TestHdfsHelper.getHdfsConf());
verifyGetServerDefaults(dfs);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testAccess() throws Exception {
createHttpFSServer(false, false);
final String dir = "/xattrTest";
Path path1 = new Path(dir);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
.get(path1.toUri(), TestHdfsHelper.getHdfsConf());
dfs.mkdirs(new Path(dir));
HttpURLConnection conn =
sendRequestToHttpFSServer(dir, "CHECKACCESS", "fsaction=r--");
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
HttpURLConnection conn1 =
sendRequestToHttpFSServer(dir, "CHECKACCESS", "fsaction=-w-");
assertEquals(HttpURLConnection.HTTP_OK, conn1.getResponseCode());
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testECPolicy() throws Exception {
createHttpFSServer(false, false);
final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies
.getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
final String ecPolicyName = ecPolicy.getName();
// Create an EC dir and write a test file in it
final Path ecDir = new Path("/ec");
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
.get(ecDir.toUri(), TestHdfsHelper.getHdfsConf());
Path ecFile = new Path(ecDir, "ec_file.txt");
dfs.mkdirs(ecDir);
dfs.enableErasureCodingPolicy(ecPolicyName);
dfs.setErasureCodingPolicy(ecDir, ecPolicyName);
// Create a EC file
DFSTestUtil.createFile(dfs, ecFile, 1024, (short) 1, 0);
// Verify that ecPolicy is set in getFileStatus response for ecFile
String getFileStatusResponse =
getStatus(ecFile.toString(), "GETFILESTATUS");
JSONParser parser = new JSONParser();
JSONObject jsonObject = (JSONObject) parser.parse(getFileStatusResponse);
JSONObject details = (JSONObject) jsonObject.get("FileStatus");
String ecpolicyForECfile = (String) details.get("ecPolicy");
assertEquals(ecpolicyForECfile, ecPolicyName,
"EC policy for ecFile should match the set EC policy");
// Verify httpFs getFileStatus with WEBHDFS REST API
WebHdfsFileSystem httpfsWebHdfs = (WebHdfsFileSystem) FileSystem.get(
new URI("webhdfs://"
+ TestJettyHelper.getJettyURL().toURI().getAuthority()),
TestHdfsHelper.getHdfsConf());
HdfsFileStatus httpfsFileStatus =
(HdfsFileStatus) httpfsWebHdfs.getFileStatus(ecFile);
assertNotNull(httpfsFileStatus.getErasureCodingPolicy());
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testErasureCodingPolicy() throws Exception {
createHttpFSServer(false, false);
final String dir = "/ecPolicy";
Path path1 = new Path(dir);
final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies
.getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
final String ecPolicyName = ecPolicy.getName();
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
.get(path1.toUri(), TestHdfsHelper.getHdfsConf());
dfs.mkdirs(new Path(dir));
dfs.enableErasureCodingPolicy(ecPolicyName);
HttpURLConnection conn =
putCmdWithReturn(dir, "SETECPOLICY", "ecpolicy=" + ecPolicyName);
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
HttpURLConnection conn1 = sendRequestToHttpFSServer(dir, "GETECPOLICY", "");
// Should return HTTP_OK
assertEquals(conn1.getResponseCode(), HttpURLConnection.HTTP_OK);
// Verify the response
BufferedReader reader =
new BufferedReader(new InputStreamReader(conn1.getInputStream()));
// The response should be a one-line JSON string.
String dirLst = reader.readLine();
ErasureCodingPolicy dfsDirLst = dfs.getErasureCodingPolicy(path1);
assertNotNull(dfsDirLst);
assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst));
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1{0}?user.name={1}&op={2}&{3}", dir,
user, "UNSETECPOLICY", ""));
HttpURLConnection conn2 = (HttpURLConnection) url.openConnection();
conn2.setRequestMethod("POST");
conn2.connect();
assertEquals(HttpURLConnection.HTTP_OK, conn2.getResponseCode());
// response should be null
dfsDirLst = dfs.getErasureCodingPolicy(path1);
assertNull(dfsDirLst);
// test put opeartion with path as "/"
final String dir1 = "/";
HttpURLConnection conn3 =
putCmdWithReturn(dir1, "SETECPOLICY", "ecpolicy=" + ecPolicyName);
// Should return HTTP_OK
assertEquals(HttpURLConnection.HTTP_OK, conn3.getResponseCode());
// test post operation with path as "/"
final String dir2 = "/";
URL url1 = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1{0}?user.name={1}&op={2}&{3}", dir2,
user, "UNSETECPOLICY", ""));
HttpURLConnection conn4 = (HttpURLConnection) url1.openConnection();
conn4.setRequestMethod("POST");
conn4.connect();
assertEquals(HttpURLConnection.HTTP_OK, conn4.getResponseCode());
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testStoragePolicySatisfier() throws Exception {
createHttpFSServer(false, false);
final String dir = "/parent";
Path path1 = new Path(dir);
String file = "/parent/file";
Path filePath = new Path(file);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
.get(path1.toUri(), TestHdfsHelper.getHdfsConf());
dfs.mkdirs(path1);
dfs.create(filePath).close();
dfs.setStoragePolicy(filePath, HdfsConstants.COLD_STORAGE_POLICY_NAME);
BlockStoragePolicy storagePolicy =
(BlockStoragePolicy) dfs.getStoragePolicy(filePath);
assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME,
storagePolicy.getName());
HttpURLConnection conn = putCmdWithReturn(dir, "SATISFYSTORAGEPOLICY", "");
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
Map<String, byte[]> xAttrs = dfs.getXAttrs(path1);
assertTrue(
xAttrs.containsKey(HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY));
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testNoRedirectWithData() throws Exception {
createHttpFSServer(false, false);
final String path = "/file";
final String username = HadoopUsersConfTestHelper.getHadoopUsers()[0];
// file creation which should not redirect
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format(
"/webhdfs/v1{0}?user.name={1}&op=CREATE&data=true&noredirect=true",
path, username));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(HttpMethod.PUT);
conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM);
conn.setDoOutput(true);
conn.connect();
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
JSONObject json = (JSONObject) new JSONParser()
.parse(new InputStreamReader(conn.getInputStream()));
// get the location to write
String location = (String) json.get("Location");
assertTrue(location.contains(DataParam.NAME));
assertTrue(location.contains("CREATE"));
url = new URL(location);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(HttpMethod.PUT);
conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM);
conn.setDoOutput(true);
conn.connect();
final String writeStr = "write some content";
OutputStream os = conn.getOutputStream();
os.write(writeStr.getBytes());
os.close();
// Verify that file got created
assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode());
json = (JSONObject) new JSONParser()
.parse(new InputStreamReader(conn.getInputStream()));
location = (String) json.get("Location");
assertEquals(TestJettyHelper.getJettyURL() + "/webhdfs/v1" + path,
location);
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testContentType() throws Exception {
createHttpFSServer(false, false);
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
Path dir = new Path("/tmp");
Path file = new Path(dir, "foo");
fs.mkdirs(dir);
fs.create(file);
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format(
"/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2", user));
// test jsonParse with non-json type.
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(Operation.OPEN.getMethod());
conn.connect();
LambdaTestUtils.intercept(IOException.class,
"java.io.IOException: Server returned HTTP response code: 500 for URL",
() -> HttpFSUtils.jsonParse(conn));
conn.disconnect();
}
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGetFileBlockLocations() throws Exception {
createHttpFSServer(false, false);
// Create a test directory
String pathStr = "/tmp/tmp-get-block-location-test";
createDirWithHttp(pathStr, "700", null);
Path path = new Path(pathStr);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
.get(path.toUri(), TestHdfsHelper.getHdfsConf());
String file1 = pathStr + "/file1";
createWithHttp(file1, null);
HttpURLConnection conn = sendRequestToHttpFSServer(file1,
"GETFILEBLOCKLOCATIONS", "length=10&offset10");
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
BlockLocation[] locations1 = dfs.getFileBlockLocations(new Path(file1), 0, 1);
assertNotNull(locations1);
Map<?, ?> jsonMap = JsonSerialization.mapReader().readValue(conn.getInputStream());
BlockLocation[] httpfsBlockLocations = JsonUtilClient.toBlockLocationArray(jsonMap);
assertEquals(locations1.length, httpfsBlockLocations.length);
for (int i = 0; i < locations1.length; i++) {
assertEquals(locations1[i].toString(), httpfsBlockLocations[i].toString());
}
conn.getInputStream().close();
}
}
|
to
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
|
{
"start": 29449,
"end": 31943
}
|
class ____ extends SubjectInheritingThread {
@Override
public void work() {
// Construct container resource increase request
List<Token> increaseTokens = new ArrayList<Token>();
// Add increase request.
Resource targetResource = Resource.newInstance(4096, 2);
try{
try {
updateBarrier.await();
increaseTokens.add(getContainerToken(targetResource, 1));
ContainerUpdateRequest updateRequest =
ContainerUpdateRequest.newInstance(increaseTokens);
ContainerUpdateResponse updateResponse =
getContainerManager()
.updateContainer(updateRequest);
assertEquals(1,
updateResponse.getSuccessfullyUpdatedContainers().size());
assertTrue(updateResponse.getFailedRequests().isEmpty());
} catch (Exception e) {
e.printStackTrace();
} finally {
updateBarrier.await();
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
private Token getContainerToken(Resource resource) throws IOException {
ContainerId cId = TestContainerManager.createContainerId(0);
return TestContainerManager.createContainerToken(
cId, DUMMY_RM_IDENTIFIER,
getNMContext().getNodeId(), user, resource,
getNMContext().getContainerTokenSecretManager(), null);
}
private Token getContainerToken(Resource resource, int version)
throws IOException {
ContainerId cId = TestContainerManager.createContainerId(0);
return TestContainerManager.createContainerToken(
cId, version, DUMMY_RM_IDENTIFIER,
getNMContext().getNodeId(), user, resource,
getNMContext().getContainerTokenSecretManager(), null);
}
}
public static NMContainerStatus createNMContainerStatus(int id,
ContainerState containerState) {
ApplicationId applicationId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 1);
ContainerId containerId = ContainerId.newContainerId(applicationAttemptId, id);
NMContainerStatus containerReport =
NMContainerStatus.newInstance(containerId, 0, containerState,
Resource.newInstance(1024, 1), "recover container", 0,
Priority.newInstance(10), 0);
return containerReport;
}
}
|
ContainerUpdateResourceThread
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/groovy/GroovyBeanDefinitionReaderTests.java
|
{
"start": 29844,
"end": 29994
}
|
class ____ {
Bean6(Map<String, Bean1> peopleByName) {
this.peopleByName = peopleByName;
}
Map<String, Bean1> peopleByName;
}
// a factory bean
|
Bean6
|
java
|
apache__camel
|
components/camel-thrift/src/main/java/org/apache/camel/component/thrift/ThriftProducer.java
|
{
"start": 1802,
"end": 9773
}
|
class ____ extends DefaultAsyncProducer implements AsyncProducer {
private static final Logger LOG = LoggerFactory.getLogger(ThriftProducer.class);
protected final ThriftConfiguration configuration;
protected final ThriftEndpoint endpoint;
private TTransport syncTransport;
private TNonblockingTransport asyncTransport;
private Object thriftClient;
public ThriftProducer(ThriftEndpoint endpoint, ThriftConfiguration configuration) {
super(endpoint);
this.endpoint = endpoint;
this.configuration = configuration;
}
@Override
public boolean process(Exchange exchange, AsyncCallback callback) {
Message message = exchange.getIn();
try {
ThriftUtils.invokeAsyncMethod(thriftClient, configuration.getMethod(), message.getBody(),
new AsyncClientMethodCallback(exchange, callback));
} catch (Exception e) {
if (e.getCause() instanceof TException) {
exchange.setException(e.getCause());
} else {
exchange.setException(e);
}
callback.done(true);
return true;
}
return false;
}
@Override
public void process(Exchange exchange) throws Exception {
Message message = exchange.getIn();
try {
Object outBody = ThriftUtils.invokeSyncMethod(thriftClient, configuration.getMethod(), message.getBody());
exchange.getMessage().setBody(outBody);
} catch (Exception e) {
if (e.getCause() instanceof TException) {
exchange.setException(e.getCause());
} else {
throw new Exception(e);
}
}
}
@Override
protected void doStart() throws Exception {
super.doStart();
if (configuration.getNegotiationType() == ThriftNegotiationType.SSL) {
if (!endpoint.isSynchronous()) {
throw new IllegalArgumentException("The SSL negotiation type requires to set syncronous communication mode");
}
if (syncTransport == null) {
initializeSslTransport();
LOG.info("Getting synchronous secured client implementation");
thriftClient = ThriftUtils.constructClientInstance(endpoint.getServicePackage(), endpoint.getServiceName(),
syncTransport, configuration.getExchangeProtocol(),
configuration.getNegotiationType(), configuration.getCompressionType(),
endpoint.getCamelContext());
}
} else if (endpoint.isSynchronous()) {
if (syncTransport == null) {
initializeSyncTransport();
LOG.info("Getting synchronous client implementation");
thriftClient = ThriftUtils.constructClientInstance(endpoint.getServicePackage(), endpoint.getServiceName(),
syncTransport, configuration.getExchangeProtocol(),
configuration.getNegotiationType(), configuration.getCompressionType(),
endpoint.getCamelContext());
}
} else {
if (asyncTransport == null) {
initializeAsyncTransport();
LOG.info("Getting asynchronous client implementation");
thriftClient = ThriftUtils.constructAsyncClientInstance(endpoint.getServicePackage(), endpoint.getServiceName(),
asyncTransport,
configuration.getExchangeProtocol(), endpoint.getCamelContext());
}
}
}
@Override
protected void doStop() throws Exception {
if (syncTransport != null) {
LOG.debug("Terminating synchronous transport the remote Thrift server");
syncTransport.close();
syncTransport = null;
} else if (asyncTransport != null) {
LOG.debug("Terminating asynchronous transport the remote Thrift server");
asyncTransport.close();
asyncTransport = null;
}
super.doStop();
}
protected void initializeSyncTransport() throws TTransportException {
if (!ObjectHelper.isEmpty(configuration.getHost()) && !ObjectHelper.isEmpty(configuration.getPort())) {
LOG.info("Creating transport to the remote Thrift server {}:{}", configuration.getHost(), configuration.getPort());
syncTransport = new TSocket(configuration.getHost(), configuration.getPort());
} else {
throw new IllegalArgumentException("No connection properties (host, port) specified");
}
syncTransport.open();
}
protected void initializeAsyncTransport() throws IOException, TTransportException {
if (!ObjectHelper.isEmpty(configuration.getHost()) && !ObjectHelper.isEmpty(configuration.getPort())) {
LOG.info("Creating transport to the remote Thrift server {}:{}", configuration.getHost(), configuration.getPort());
asyncTransport = new TNonblockingSocket(configuration.getHost(), configuration.getPort());
} else {
throw new IllegalArgumentException("No connection properties (host, port) specified");
}
}
protected void initializeSslTransport() throws TTransportException, IOException {
if (!ObjectHelper.isEmpty(configuration.getHost()) && !ObjectHelper.isEmpty(configuration.getPort())) {
SSLContextParameters sslParameters = configuration.getSslParameters();
if (sslParameters == null) {
throw new IllegalArgumentException(
"SSL parameters must be initialized if negotiation type is set to "
+ configuration.getNegotiationType());
}
ObjectHelper.notNull(sslParameters.getSecureSocketProtocol(), "Security protocol");
ObjectHelper.notNull(sslParameters.getTrustManagers().getKeyStore().getResource(), "Trust store path");
ObjectHelper.notNull(sslParameters.getTrustManagers().getKeyStore().getPassword(), "Trust store password");
LOG.info("Creating secured transport to the remote Thrift server {}:{}", configuration.getHost(),
configuration.getPort());
TSSLTransportFactory.TSSLTransportParameters sslParams;
sslParams = new TSSLTransportFactory.TSSLTransportParameters(
sslParameters.getSecureSocketProtocol(),
sslParameters.getCipherSuites() == null
? null
: sslParameters.getCipherSuites().getCipherSuite().stream().toArray(String[]::new));
if (ObjectHelper.isNotEmpty(sslParameters.getTrustManagers().getProvider())
&& ObjectHelper.isNotEmpty(sslParameters.getTrustManagers().getKeyStore().getType())) {
sslParams.setTrustStore(
ResourceHelper.resolveResourceAsInputStream(endpoint.getCamelContext(),
sslParameters.getTrustManagers().getKeyStore().getResource()),
sslParameters.getTrustManagers().getKeyStore().getPassword(),
sslParameters.getTrustManagers().getProvider(),
sslParameters.getTrustManagers().getKeyStore().getType());
} else {
sslParams.setTrustStore(sslParameters.getTrustManagers().getKeyStore().getResource(),
sslParameters.getTrustManagers().getKeyStore().getPassword());
}
syncTransport = TSSLTransportFactory.getClientSocket(configuration.getHost(), configuration.getPort(),
configuration.getClientTimeout(), sslParams);
} else {
throw new IllegalArgumentException("No connection properties (host, port) specified");
}
}
}
|
ThriftProducer
|
java
|
apache__flink
|
flink-yarn-tests/src/test/java/org/apache/flink/yarn/testjob/YarnTestJob.java
|
{
"start": 2125,
"end": 3324
}
|
class ____ implements Serializable {
private final String stopJobMarkerFile;
public static StopJobSignal usingMarkerFile(final Path stopJobMarkerFile) {
return new StopJobSignal(stopJobMarkerFile.toString());
}
private StopJobSignal(final String stopJobMarkerFile) {
this.stopJobMarkerFile = stopJobMarkerFile;
}
/** Signals that the job should stop. */
public void signal() {
try {
checkState(
Files.exists(Paths.get(stopJobMarkerFile)),
"Marker file is deleted before signal.");
Files.delete(Paths.get(stopJobMarkerFile));
} catch (final IOException e) {
throw new RuntimeException(e);
}
}
/** True if job should stop. */
public boolean isSignaled() {
return !Files.exists(Paths.get(stopJobMarkerFile));
}
}
// *************************************************************************
// USER FUNCTIONS
// *************************************************************************
private static final
|
StopJobSignal
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/mapper/blockloader/docvalues/DenseVectorBlockLoaderProcessor.java
|
{
"start": 830,
"end": 1797
}
|
interface ____<B extends BlockLoader.Builder> {
/**
* Creates a builder for the given expected count.
* @param factory the block factory
* @param expectedCount the expected number of values
* @param dimensions the vector dimensions
* @return the builder
*/
B createBuilder(BlockLoader.BlockFactory factory, int expectedCount, int dimensions);
/**
* Process a float vector and append the result to the builder.
*/
void process(float[] vector, B builder) throws IOException;
/**
* Process a byte vector and append the result to the builder.
*/
void process(byte[] vector, B builder) throws IOException;
/**
* Appends a null value to the builder.
*/
default void appendNull(B builder) {
builder.appendNull();
}
String name();
/**
* Processor that appends raw float vectors to a FloatBuilder as multi values.
*/
|
DenseVectorBlockLoaderProcessor
|
java
|
spring-projects__spring-security
|
web/src/main/java/org/springframework/security/web/firewall/FirewalledRequest.java
|
{
"start": 1105,
"end": 1854
}
|
class ____ extends HttpServletRequestWrapper {
/**
* Constructs a request object wrapping the given request.
* @throws IllegalArgumentException if the request is null
*/
public FirewalledRequest(HttpServletRequest request) {
super(request);
}
/**
* This method will be called once the request has passed through the security filter
* chain, when it is about to proceed to the application proper.
* <p>
* An implementation can thus choose to modify the state of the request for the
* security infrastructure, while still maintaining the original
* {@link HttpServletRequest}.
*/
public abstract void reset();
@Override
public String toString() {
return "FirewalledRequest[ " + getRequest() + "]";
}
}
|
FirewalledRequest
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/StoreQueryParameters.java
|
{
"start": 1055,
"end": 4911
}
|
class ____<T> {
private final Integer partition;
private final boolean staleStores;
private final String storeName;
private final QueryableStoreType<T> queryableStoreType;
protected StoreQueryParameters(final String storeName, final QueryableStoreType<T> queryableStoreType, final Integer partition, final boolean staleStores) {
this.storeName = storeName;
this.queryableStoreType = queryableStoreType;
this.partition = partition;
this.staleStores = staleStores;
}
public static <T> StoreQueryParameters<T> fromNameAndType(final String storeName,
final QueryableStoreType<T> queryableStoreType) {
return new StoreQueryParameters<>(storeName, queryableStoreType, null, false);
}
/**
* Set a specific partition that should be queried exclusively.
*
* @param partition The specific integer partition to be fetched from the stores list by using {@link StoreQueryParameters}.
*
* @return StoreQueryParameters a new {@code StoreQueryParameters} instance configured with the specified partition
*/
public StoreQueryParameters<T> withPartition(final Integer partition) {
return new StoreQueryParameters<>(storeName, queryableStoreType, partition, staleStores);
}
/**
* Enable querying of stale state stores, i.e., allow to query active tasks during restore as well as standby tasks.
*
* @return StoreQueryParameters a new {@code StoreQueryParameters} instance configured with serving from stale stores enabled
*/
public StoreQueryParameters<T> enableStaleStores() {
return new StoreQueryParameters<>(storeName, queryableStoreType, partition, true);
}
/**
* Get the name of the state store that should be queried.
*
* @return String state store name
*/
public String storeName() {
return storeName;
}
/**
* Get the queryable store type for which key is queried by the user.
*
* @return QueryableStoreType type of queryable store
*/
public QueryableStoreType<T> queryableStoreType() {
return queryableStoreType;
}
/**
* Get the store partition that will be queried.
* If the method returns {@code null}, it would mean that no specific partition has been requested,
* so all the local partitions for the store will be queried.
*
* @return Integer partition
*/
public Integer partition() {
return partition;
}
/**
* Get the flag staleStores. If {@code true}, include standbys and recovering stores along with running stores.
*
* @return boolean staleStores
*/
public boolean staleStoresEnabled() {
return staleStores;
}
@Override
public boolean equals(final Object obj) {
if (!(obj instanceof StoreQueryParameters)) {
return false;
}
final StoreQueryParameters<?> storeQueryParameters = (StoreQueryParameters<?>) obj;
return Objects.equals(storeQueryParameters.partition, partition)
&& Objects.equals(storeQueryParameters.staleStores, staleStores)
&& Objects.equals(storeQueryParameters.storeName, storeName)
&& Objects.equals(storeQueryParameters.queryableStoreType, queryableStoreType);
}
@Override
public String toString() {
return "StoreQueryParameters {" +
"partition=" + partition +
", staleStores=" + staleStores +
", storeName=" + storeName +
", queryableStoreType=" + queryableStoreType +
'}';
}
@Override
public int hashCode() {
return Objects.hash(partition, staleStores, storeName, queryableStoreType);
}
}
|
StoreQueryParameters
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/cglib/proxy/MethodInterceptorGenerator.java
|
{
"start": 1510,
"end": 10026
}
|
class ____
implements CallbackGenerator
{
public static final MethodInterceptorGenerator INSTANCE = new MethodInterceptorGenerator();
static final String EMPTY_ARGS_NAME = "CGLIB$emptyArgs";
static final String FIND_PROXY_NAME = "CGLIB$findMethodProxy";
static final Class[] FIND_PROXY_TYPES = { Signature.class };
private static final Type ABSTRACT_METHOD_ERROR =
TypeUtils.parseType("AbstractMethodError");
private static final Type METHOD =
TypeUtils.parseType("java.lang.reflect.Method");
private static final Type REFLECT_UTILS =
TypeUtils.parseType("org.springframework.cglib.core.ReflectUtils");
private static final Type METHOD_PROXY =
TypeUtils.parseType("org.springframework.cglib.proxy.MethodProxy");
private static final Type METHOD_INTERCEPTOR =
TypeUtils.parseType("org.springframework.cglib.proxy.MethodInterceptor");
private static final Signature GET_DECLARED_METHODS =
TypeUtils.parseSignature("java.lang.reflect.Method[] getDeclaredMethods()");
private static final Signature FIND_METHODS =
TypeUtils.parseSignature("java.lang.reflect.Method[] findMethods(String[], java.lang.reflect.Method[])");
private static final Signature MAKE_PROXY =
new Signature("create", METHOD_PROXY, new Type[]{
Constants.TYPE_CLASS,
Constants.TYPE_CLASS,
Constants.TYPE_STRING,
Constants.TYPE_STRING,
Constants.TYPE_STRING
});
private static final Signature INTERCEPT =
new Signature("intercept", Constants.TYPE_OBJECT, new Type[]{
Constants.TYPE_OBJECT,
METHOD,
Constants.TYPE_OBJECT_ARRAY,
METHOD_PROXY
});
private static final Signature FIND_PROXY =
new Signature(FIND_PROXY_NAME, METHOD_PROXY, new Type[]{ Constants.TYPE_SIGNATURE });
private static final Signature TO_STRING =
TypeUtils.parseSignature("String toString()");
private static final Transformer METHOD_TO_CLASS = value -> ((MethodInfo)value).getClassInfo();
private String getMethodField(Signature impl) {
return impl.getName() + "$Method";
}
private String getMethodProxyField(Signature impl) {
return impl.getName() + "$Proxy";
}
@Override
public void generate(ClassEmitter ce, Context context, List methods) {
Map sigMap = new HashMap();
for (Iterator it = methods.iterator(); it.hasNext();) {
MethodInfo method = (MethodInfo)it.next();
Signature sig = method.getSignature();
Signature impl = context.getImplSignature(method);
String methodField = getMethodField(impl);
String methodProxyField = getMethodProxyField(impl);
sigMap.put(sig.toString(), methodProxyField);
ce.declare_field(Constants.PRIVATE_FINAL_STATIC, methodField, METHOD, null);
ce.declare_field(Constants.PRIVATE_FINAL_STATIC, methodProxyField, METHOD_PROXY, null);
ce.declare_field(Constants.PRIVATE_FINAL_STATIC, EMPTY_ARGS_NAME, Constants.TYPE_OBJECT_ARRAY, null);
CodeEmitter e;
// access method
e = ce.begin_method(Constants.ACC_FINAL,
impl,
method.getExceptionTypes());
superHelper(e, method, context);
e.return_value();
e.end_method();
// around method
e = context.beginMethod(ce, method);
Label nullInterceptor = e.make_label();
context.emitCallback(e, context.getIndex(method));
e.dup();
e.ifnull(nullInterceptor);
e.load_this();
e.getfield(methodField);
if (sig.getArgumentTypes().length == 0) {
e.getfield(EMPTY_ARGS_NAME);
} else {
e.create_arg_array();
}
e.getfield(methodProxyField);
e.invoke_interface(METHOD_INTERCEPTOR, INTERCEPT);
e.unbox_or_zero(sig.getReturnType());
e.return_value();
e.mark(nullInterceptor);
superHelper(e, method, context);
e.return_value();
e.end_method();
}
generateFindProxy(ce, sigMap);
}
private static void superHelper(CodeEmitter e, MethodInfo method, Context context)
{
if (TypeUtils.isAbstract(method.getModifiers())) {
e.throw_exception(ABSTRACT_METHOD_ERROR, method.toString() + " is abstract" );
} else {
e.load_this();
context.emitLoadArgsAndInvoke(e, method);
}
}
@Override
public void generateStatic(CodeEmitter e, Context context, List methods) throws Exception {
/* generates:
static {
Class thisClass = Class.forName("NameOfThisClass");
Class cls = Class.forName("java.lang.Object");
String[] sigs = new String[]{ "toString", "()Ljava/lang/String;", ... };
Method[] methods = cls.getDeclaredMethods();
methods = ReflectUtils.findMethods(sigs, methods);
METHOD_0 = methods[0];
CGLIB$ACCESS_0 = MethodProxy.create(cls, thisClass, "()Ljava/lang/String;", "toString", "CGLIB$ACCESS_0");
...
}
*/
e.push(0);
e.newarray();
e.putfield(EMPTY_ARGS_NAME);
Local thisclass = e.make_local();
Local declaringclass = e.make_local();
EmitUtils.load_class_this(e);
e.store_local(thisclass);
Map methodsByClass = CollectionUtils.bucket(methods, METHOD_TO_CLASS);
for (Iterator i = methodsByClass.keySet().iterator(); i.hasNext();) {
ClassInfo classInfo = (ClassInfo)i.next();
List classMethods = (List)methodsByClass.get(classInfo);
e.push(2 * classMethods.size());
e.newarray(Constants.TYPE_STRING);
for (int index = 0; index < classMethods.size(); index++) {
MethodInfo method = (MethodInfo)classMethods.get(index);
Signature sig = method.getSignature();
e.dup();
e.push(2 * index);
e.push(sig.getName());
e.aastore();
e.dup();
e.push(2 * index + 1);
e.push(sig.getDescriptor());
e.aastore();
}
EmitUtils.load_class(e, classInfo.getType());
e.dup();
e.store_local(declaringclass);
e.invoke_virtual(Constants.TYPE_CLASS, GET_DECLARED_METHODS);
e.invoke_static(REFLECT_UTILS, FIND_METHODS);
for (int index = 0; index < classMethods.size(); index++) {
MethodInfo method = (MethodInfo)classMethods.get(index);
Signature sig = method.getSignature();
Signature impl = context.getImplSignature(method);
e.dup();
e.push(index);
e.array_load(METHOD);
e.putfield(getMethodField(impl));
e.load_local(declaringclass);
e.load_local(thisclass);
e.push(sig.getDescriptor());
e.push(sig.getName());
e.push(impl.getName());
e.invoke_static(METHOD_PROXY, MAKE_PROXY);
e.putfield(getMethodProxyField(impl));
}
e.pop();
}
}
public void generateFindProxy(ClassEmitter ce, final Map sigMap) {
final CodeEmitter e = ce.begin_method(Constants.ACC_PUBLIC | Constants.ACC_STATIC,
FIND_PROXY,
null);
e.load_arg(0);
e.invoke_virtual(Constants.TYPE_OBJECT, TO_STRING);
ObjectSwitchCallback callback = new ObjectSwitchCallback() {
@Override
public void processCase(Object key, Label end) {
e.getfield((String)sigMap.get(key));
e.return_value();
}
@Override
public void processDefault() {
e.aconst_null();
e.return_value();
}
};
EmitUtils.string_switch(e,
(String[])sigMap.keySet().toArray(new String[0]),
Constants.SWITCH_STYLE_HASH,
callback);
e.end_method();
}
}
|
MethodInterceptorGenerator
|
java
|
grpc__grpc-java
|
xds/src/main/java/io/grpc/xds/EnvoyServerProtoData.java
|
{
"start": 9460,
"end": 13680
}
|
class ____ {
@Nullable
abstract Long intervalNanos();
@Nullable
abstract Long baseEjectionTimeNanos();
@Nullable
abstract Long maxEjectionTimeNanos();
@Nullable
abstract Integer maxEjectionPercent();
@Nullable
abstract SuccessRateEjection successRateEjection();
@Nullable
abstract FailurePercentageEjection failurePercentageEjection();
static OutlierDetection create(
@Nullable Long intervalNanos,
@Nullable Long baseEjectionTimeNanos,
@Nullable Long maxEjectionTimeNanos,
@Nullable Integer maxEjectionPercentage,
@Nullable SuccessRateEjection successRateEjection,
@Nullable FailurePercentageEjection failurePercentageEjection) {
return new AutoValue_EnvoyServerProtoData_OutlierDetection(intervalNanos,
baseEjectionTimeNanos, maxEjectionTimeNanos, maxEjectionPercentage, successRateEjection,
failurePercentageEjection);
}
static OutlierDetection fromEnvoyOutlierDetection(
io.envoyproxy.envoy.config.cluster.v3.OutlierDetection envoyOutlierDetection) {
Long intervalNanos = envoyOutlierDetection.hasInterval()
? Durations.toNanos(envoyOutlierDetection.getInterval()) : null;
Long baseEjectionTimeNanos = envoyOutlierDetection.hasBaseEjectionTime()
? Durations.toNanos(envoyOutlierDetection.getBaseEjectionTime()) : null;
Long maxEjectionTimeNanos = envoyOutlierDetection.hasMaxEjectionTime()
? Durations.toNanos(envoyOutlierDetection.getMaxEjectionTime()) : null;
Integer maxEjectionPercentage = envoyOutlierDetection.hasMaxEjectionPercent()
? envoyOutlierDetection.getMaxEjectionPercent().getValue() : null;
SuccessRateEjection successRateEjection;
// If success rate enforcement has been turned completely off, don't configure this ejection.
if (envoyOutlierDetection.hasEnforcingSuccessRate()
&& envoyOutlierDetection.getEnforcingSuccessRate().getValue() == 0) {
successRateEjection = null;
} else {
Integer stdevFactor = envoyOutlierDetection.hasSuccessRateStdevFactor()
? envoyOutlierDetection.getSuccessRateStdevFactor().getValue() : null;
Integer enforcementPercentage = envoyOutlierDetection.hasEnforcingSuccessRate()
? envoyOutlierDetection.getEnforcingSuccessRate().getValue() : null;
Integer minimumHosts = envoyOutlierDetection.hasSuccessRateMinimumHosts()
? envoyOutlierDetection.getSuccessRateMinimumHosts().getValue() : null;
Integer requestVolume = envoyOutlierDetection.hasSuccessRateRequestVolume()
? envoyOutlierDetection.getSuccessRateRequestVolume().getValue() : null;
successRateEjection = SuccessRateEjection.create(stdevFactor, enforcementPercentage,
minimumHosts, requestVolume);
}
FailurePercentageEjection failurePercentageEjection;
if (envoyOutlierDetection.hasEnforcingFailurePercentage()
&& envoyOutlierDetection.getEnforcingFailurePercentage().getValue() == 0) {
failurePercentageEjection = null;
} else {
Integer threshold = envoyOutlierDetection.hasFailurePercentageThreshold()
? envoyOutlierDetection.getFailurePercentageThreshold().getValue() : null;
Integer enforcementPercentage = envoyOutlierDetection.hasEnforcingFailurePercentage()
? envoyOutlierDetection.getEnforcingFailurePercentage().getValue() : null;
Integer minimumHosts = envoyOutlierDetection.hasFailurePercentageMinimumHosts()
? envoyOutlierDetection.getFailurePercentageMinimumHosts().getValue() : null;
Integer requestVolume = envoyOutlierDetection.hasFailurePercentageRequestVolume()
? envoyOutlierDetection.getFailurePercentageRequestVolume().getValue() : null;
failurePercentageEjection = FailurePercentageEjection.create(threshold,
enforcementPercentage, minimumHosts, requestVolume);
}
return create(intervalNanos, baseEjectionTimeNanos, maxEjectionTimeNanos,
maxEjectionPercentage, successRateEjection, failurePercentageEjection);
}
}
@AutoValue
abstract static
|
OutlierDetection
|
java
|
netty__netty
|
codec-mqtt/src/test/java/io/netty/handler/codec/mqtt/MqttTestUtils.java
|
{
"start": 3127,
"end": 10671
}
|
class ____ {
private MqttTestUtils() {
}
public static void validateProperties(MqttProperties expected, MqttProperties actual) {
for (MqttProperties.MqttProperty expectedProperty : expected.listAll()) {
int propertyId = expectedProperty.propertyId;
MqttProperties.MqttProperty actualProperty = actual.getProperty(propertyId);
List<? extends MqttProperties.MqttProperty> actualProperties =
actual.getProperties(propertyId);
switch (propertyId) {
// one byte value integer property
case PAYLOAD_FORMAT_INDICATOR:
case REQUEST_PROBLEM_INFORMATION:
case REQUEST_RESPONSE_INFORMATION:
case MAXIMUM_QOS:
case RETAIN_AVAILABLE:
case WILDCARD_SUBSCRIPTION_AVAILABLE:
case SUBSCRIPTION_IDENTIFIER_AVAILABLE:
case SHARED_SUBSCRIPTION_AVAILABLE: {
final Integer expectedValue = ((MqttProperties.IntegerProperty) expectedProperty).value;
final Integer actualValue = ((MqttProperties.IntegerProperty) actualProperty).value;
assertEquals(expectedValue, actualValue, "one byte property doesn't match");
break;
}
// two byte value integer property
case SERVER_KEEP_ALIVE:
case RECEIVE_MAXIMUM:
case TOPIC_ALIAS_MAXIMUM:
case TOPIC_ALIAS: {
final Integer expectedValue = ((MqttProperties.IntegerProperty) expectedProperty).value;
final Integer actualValue = ((MqttProperties.IntegerProperty) actualProperty).value;
assertEquals(expectedValue, actualValue, "two byte property doesn't match");
break;
}
// four byte value integer property
case PUBLICATION_EXPIRY_INTERVAL:
case SESSION_EXPIRY_INTERVAL:
case WILL_DELAY_INTERVAL:
case MAXIMUM_PACKET_SIZE: {
final Integer expectedValue = ((MqttProperties.IntegerProperty) expectedProperty).value;
final Integer actualValue = ((MqttProperties.IntegerProperty) actualProperty).value;
assertEquals(expectedValue, actualValue, "four byte property doesn't match");
break;
}
// four byte value integer property
case SUBSCRIPTION_IDENTIFIER: {
final Integer expectedValue = ((MqttProperties.IntegerProperty) expectedProperty).value;
assertContainsValue("Subscription ID doesn't match", expectedValue, actualProperties);
break;
}
// UTF-8 string value integer property
case CONTENT_TYPE:
case RESPONSE_TOPIC:
case ASSIGNED_CLIENT_IDENTIFIER:
case AUTHENTICATION_METHOD:
case RESPONSE_INFORMATION:
case SERVER_REFERENCE:
case REASON_STRING: {
final String expectedValue = ((MqttProperties.StringProperty) expectedProperty).value;
final String actualValue = ((MqttProperties.StringProperty) actualProperty).value;
assertEquals(expectedValue, actualValue, "String property doesn't match");
break;
}
// User property
case USER_PROPERTY: {
final List<MqttProperties.StringPair> expectedPairs =
((MqttProperties.UserProperties) expectedProperty).value;
final List<MqttProperties.StringPair> actualPairs =
((MqttProperties.UserProperties) actualProperty).value;
assertEquals(expectedPairs, actualPairs, "User properties count doesn't match");
for (int i = 0; i < expectedPairs.size(); i++) {
assertEquals(expectedPairs.get(i), actualPairs.get(i), "User property mismatch");
}
break;
}
// byte[] property
case CORRELATION_DATA:
case AUTHENTICATION_DATA: {
final byte[] expectedValue = ((MqttProperties.BinaryProperty) expectedProperty).value;
final byte[] actualValue = ((MqttProperties.BinaryProperty) actualProperty).value;
final String expectedHexDump = ByteBufUtil.hexDump(expectedValue);
final String actualHexDump = ByteBufUtil.hexDump(actualValue);
assertEquals(expectedHexDump, actualHexDump, "byte[] property doesn't match");
break;
}
default:
fail("Property Id not recognized " + Integer.toHexString(propertyId));
}
}
for (MqttProperties.MqttProperty actualProperty : actual.listAll()) {
MqttProperties.MqttProperty expectedProperty = expected.getProperty(actualProperty.propertyId);
assertNotNull(expectedProperty, "Property " + actualProperty.propertyId + " not expected");
}
}
private static void assertContainsValue(String message,
Integer expectedValue,
List<? extends MqttProperties.MqttProperty> properties) {
for (MqttProperties.MqttProperty property: properties) {
if (property instanceof MqttProperties.IntegerProperty &&
((MqttProperties.IntegerProperty) property).value == expectedValue) {
return;
}
}
fail(message + " - properties didn't contain expected integer value " + expectedValue + ": " + properties);
}
public static void validateSubscribePayload(MqttSubscribePayload expected, MqttSubscribePayload actual) {
List<MqttTopicSubscription> expectedTopicSubscriptions = expected.topicSubscriptions();
List<MqttTopicSubscription> actualTopicSubscriptions = actual.topicSubscriptions();
assertEquals(
expectedTopicSubscriptions.size(),
actualTopicSubscriptions.size(),
"MqttSubscribePayload TopicSubscriptionList size mismatch");
for (int i = 0; i < expectedTopicSubscriptions.size(); i++) {
validateTopicSubscription(expectedTopicSubscriptions.get(i), actualTopicSubscriptions.get(i));
}
}
public static void validateTopicSubscription(
MqttTopicSubscription expected,
MqttTopicSubscription actual) {
assertEquals(expected.topicName(), actual.topicName(), "MqttTopicSubscription TopicName mismatch");
assertEquals(
expected.qualityOfService(),
actual.qualityOfService(),
"MqttTopicSubscription Qos mismatch");
assertEquals(
expected.option(),
actual.option(),
"MqttTopicSubscription options mismatch");
}
public static void validateUnsubscribePayload(MqttUnsubscribePayload expected, MqttUnsubscribePayload actual) {
assertArrayEquals(
expected.topics().toArray(),
actual.topics().toArray(),
"MqttUnsubscribePayload TopicList mismatch");
}
}
|
MqttTestUtils
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/metamodel/mapping/internal/ToOneAttributeMapping.java
|
{
"start": 52494,
"end": 52587
}
|
class ____ {
@OneToOne(mappedBy = "level2Parent")
Level3 level3Child;
}
|
Level2
|
java
|
apache__spark
|
launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java
|
{
"start": 3951,
"end": 10091
}
|
class ____ implements Closeable {
private static final Logger LOG = Logger.getLogger(LauncherServer.class.getName());
private static final String THREAD_NAME_FMT = "LauncherServer-%d";
private static final long DEFAULT_CONNECT_TIMEOUT = 10000L;
/** For creating secrets used for communication with child processes. */
private static final SecureRandom RND = new SecureRandom();
private static volatile LauncherServer serverInstance;
static synchronized LauncherServer getOrCreateServer() throws IOException {
LauncherServer server;
do {
server = serverInstance != null ? serverInstance : new LauncherServer();
} while (!server.running);
server.ref();
serverInstance = server;
return server;
}
// For testing.
static synchronized LauncherServer getServer() {
return serverInstance;
}
private final AtomicLong refCount;
private final AtomicLong threadIds;
private final ConcurrentMap<String, AbstractAppHandle> secretToPendingApps;
private final List<ServerConnection> clients;
private final ServerSocket server;
private final Thread serverThread;
private final ThreadFactory factory;
private final Timer timeoutTimer;
private volatile boolean running;
private LauncherServer() throws IOException {
this.refCount = new AtomicLong(0);
ServerSocket server = new ServerSocket();
try {
server.setReuseAddress(true);
server.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
this.clients = new ArrayList<>();
this.threadIds = new AtomicLong();
this.factory = new NamedThreadFactory(THREAD_NAME_FMT);
this.secretToPendingApps = new ConcurrentHashMap<>();
this.timeoutTimer = new Timer("LauncherServer-TimeoutTimer", true);
this.server = server;
this.running = true;
this.serverThread = factory.newThread(this::acceptConnections);
serverThread.start();
} catch (IOException ioe) {
close();
throw ioe;
} catch (Exception e) {
close();
throw new IOException(e);
}
}
/**
* Registers a handle with the server, and returns the secret the child app needs to connect
* back.
*/
synchronized String registerHandle(AbstractAppHandle handle) {
String secret = createSecret();
secretToPendingApps.put(secret, handle);
return secret;
}
@Override
public void close() throws IOException {
synchronized (this) {
if (!running) {
return;
}
running = false;
}
synchronized(LauncherServer.class) {
serverInstance = null;
}
timeoutTimer.cancel();
server.close();
synchronized (clients) {
List<ServerConnection> copy = new ArrayList<>(clients);
clients.clear();
for (ServerConnection client : copy) {
client.close();
}
}
if (serverThread != null) {
try {
serverThread.join();
} catch (InterruptedException ie) {
// no-op
}
}
}
void ref() {
refCount.incrementAndGet();
}
void unref() {
synchronized(LauncherServer.class) {
if (refCount.decrementAndGet() == 0) {
try {
close();
} catch (IOException ioe) {
// no-op.
}
}
}
}
int getPort() {
return server.getLocalPort();
}
/**
* Removes the client handle from the pending list (in case it's still there), and unrefs
* the server.
*/
void unregister(AbstractAppHandle handle) {
for (Map.Entry<String, AbstractAppHandle> e : secretToPendingApps.entrySet()) {
if (e.getValue().equals(handle)) {
String secret = e.getKey();
secretToPendingApps.remove(secret);
break;
}
}
unref();
}
private void acceptConnections() {
try {
while (running) {
final Socket client = server.accept();
TimerTask timerTask = new TimerTask() {
@Override
public void run() {
LOG.warning("Timed out waiting for hello message from client.");
try {
client.close();
} catch (IOException ioe) {
// no-op.
}
}
};
ServerConnection clientConnection = new ServerConnection(client, timerTask);
Thread clientThread = factory.newThread(clientConnection);
clientConnection.setConnectionThread(clientThread);
synchronized (clients) {
clients.add(clientConnection);
}
long timeoutMs = getConnectionTimeout();
// 0 is used for testing to avoid issues with clock resolution / thread scheduling,
// and force an immediate timeout.
if (timeoutMs > 0) {
timeoutTimer.schedule(timerTask, timeoutMs);
} else {
timerTask.run();
}
clientThread.start();
}
} catch (IOException ioe) {
if (running) {
LOG.log(Level.SEVERE, "Error in accept loop.", ioe);
}
}
}
private long getConnectionTimeout() {
String value = SparkLauncher.launcherConfig.get(SparkLauncher.CHILD_CONNECTION_TIMEOUT);
if (value != null) {
return Long.parseLong(value);
}
value = SparkLauncher.launcherConfig.get(SparkLauncher.DEPRECATED_CHILD_CONNECTION_TIMEOUT);
if (value != null) {
LOG.log(Level.WARNING,
"Property '" + SparkLauncher.DEPRECATED_CHILD_CONNECTION_TIMEOUT +
"' is deprecated, please switch to '" + SparkLauncher.CHILD_CONNECTION_TIMEOUT +
"'.");
return Long.parseLong(value);
}
return DEFAULT_CONNECT_TIMEOUT;
}
private String createSecret() {
while (true) {
byte[] secret = new byte[128];
RND.nextBytes(secret);
StringBuilder sb = new StringBuilder();
for (byte b : secret) {
int ival = b >= 0 ? b : Byte.MAX_VALUE - b;
if (ival < 0x10) {
sb.append("0");
}
sb.append(Integer.toHexString(ival));
}
String secretStr = sb.toString();
if (!secretToPendingApps.containsKey(secretStr)) {
return secretStr;
}
}
}
|
LauncherServer
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/collect/ImmutableSortedMapSubMapMapInterfaceTest.java
|
{
"start": 801,
"end": 1252
}
|
class ____
extends AbstractImmutableSortedMapMapInterfaceTest<String, Integer> {
@Override
protected SortedMap<String, Integer> makePopulatedMap() {
return ImmutableSortedMap.of("a", 1, "b", 2, "c", 3, "d", 4, "e", 5).subMap("b", "d");
}
@Override
protected String getKeyNotInPopulatedMap() {
return "a";
}
@Override
protected Integer getValueNotInPopulatedMap() {
return 4;
}
}
|
ImmutableSortedMapSubMapMapInterfaceTest
|
java
|
quarkusio__quarkus
|
integration-tests/opentelemetry-jdbc-instrumentation/src/main/java/io/quarkus/it/opentelemetry/PingPongResource.java
|
{
"start": 717,
"end": 1817
}
|
class ____ {
@Transactional
@POST
@Produces(MediaType.APPLICATION_JSON)
@Path("/hit/{tenant}")
public Hit createHit(@QueryParam("id") Long id, @PathParam("tenant") String tenant) {
switch (tenant) {
case "postgresql":
persist(PgHit::new, id);
return PgHit.findById(id);
case "oracle":
persist(OracleHit::new, id);
return OracleHit.findById(id);
case "mariadb":
persist(MariaDbHit::new, id);
return MariaDbHit.findById(id);
case "db2":
persist(Db2Hit::new, id);
return Db2Hit.findById(id);
case "h2":
persist(H2Hit::new, id);
return H2Hit.findById(id);
default:
throw new IllegalArgumentException();
}
}
private void persist(Supplier<Hit> hitSupplier, Long id) {
Hit hit = hitSupplier.get();
hit.setId(id);
hit.setMessage("Hit message.");
hit.persist();
}
}
|
PingPongResource
|
java
|
google__guava
|
guava-gwt/src-super/com/google/common/collect/super/com/google/common/collect/ImmutableEnumMap.java
|
{
"start": 924,
"end": 1342
}
|
class ____<K, V> extends ForwardingImmutableMap<K, V> {
static <K, V> ImmutableMap<K, V> asImmutable(Map<K, V> map) {
for (Entry<K, V> entry : checkNotNull(map).entrySet()) {
checkNotNull(entry.getKey());
checkNotNull(entry.getValue());
}
return new ImmutableEnumMap<K, V>(map);
}
private ImmutableEnumMap(Map<? extends K, ? extends V> delegate) {
super(delegate);
}
}
|
ImmutableEnumMap
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/embeddables/collection/AbstractEmbeddableWithManyToManyTest.java
|
{
"start": 1265,
"end": 1492
}
|
class ____ is contained in an '@ElementCollection' and may not be"
) );
}
}
protected void addAnnotatedClasses(MetadataSources metadataSources){
}
protected void addResources(MetadataSources metadataSources){
}
}
|
that
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/serializer/stream/StreamWriterTest_writeChar1.java
|
{
"start": 203,
"end": 824
}
|
class ____ extends TestCase {
public void test_0() throws Exception {
StringWriter out = new StringWriter();
SerializeWriter writer = new SerializeWriter(out, 10);
Assert.assertEquals(10, writer.getBufferLength());
for (int ch = 'a'; ch <= 'z'; ++ch) {
writer.write(ch);
}
writer.close();
String text = out.toString();
Assert.assertEquals(26, text.length());
for (int i = 0; i < 26; ++i) {
Assert.assertEquals(text.charAt(i), (char)('a' + i));
}
}
}
|
StreamWriterTest_writeChar1
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
|
{
"start": 2715,
"end": 8508
}
|
class ____ {
public static final Logger LOG = LoggerFactory.getLogger(
TestWebHDFSOAuth2.class);
private ClientAndServer mockWebHDFS;
private ClientAndServer mockOAuthServer;
public final static int WEBHDFS_PORT = 7552;
public final static int OAUTH_PORT = 7553;
public final static Header CONTENT_TYPE_APPLICATION_JSON = new Header("Content-Type", "application/json");
public final static String AUTH_TOKEN = "0123456789abcdef";
public final static Header AUTH_TOKEN_HEADER = new Header("AUTHORIZATION", OAuth2ConnectionConfigurator.HEADER + AUTH_TOKEN);
@BeforeEach
public void startMockOAuthServer() {
mockOAuthServer = startClientAndServer(OAUTH_PORT);
}
@BeforeEach
public void startMockWebHDFSServer() {
System.setProperty("hadoop.home.dir", System.getProperty("user.dir"));
mockWebHDFS = startClientAndServer(WEBHDFS_PORT);
}
@Test
public void listStatusReturnsAsExpected() throws URISyntaxException, IOException {
MockServerClient mockWebHDFSServerClient = new MockServerClient("localhost", WEBHDFS_PORT);
MockServerClient mockOAuthServerClient = new MockServerClient("localhost", OAUTH_PORT);
HttpRequest oauthServerRequest = getOAuthServerMockRequest(mockOAuthServerClient);
HttpRequest fileSystemRequest = request()
.withMethod("GET")
.withPath(WebHdfsFileSystem.PATH_PREFIX + "/test1/test2")
.withHeader(AUTH_TOKEN_HEADER);
try {
mockWebHDFSServerClient.when(fileSystemRequest,
exactly(1)
)
.respond(
response()
.withStatusCode(HttpStatus.SC_OK)
.withHeaders(
CONTENT_TYPE_APPLICATION_JSON
)
.withBody("{\n" +
" \"FileStatuses\":\n" +
" {\n" +
" \"FileStatus\":\n" +
" [\n" +
" {\n" +
" \"accessTime\" : 1320171722771,\n" +
" \"blockSize\" : 33554432,\n" +
" \"group\" : \"supergroup\",\n" +
" \"length\" : 24930,\n" +
" \"modificationTime\": 1320171722771,\n" +
" \"owner\" : \"webuser\",\n" +
" \"pathSuffix\" : \"a.patch\",\n" +
" \"permission\" : \"644\",\n" +
" \"replication\" : 1,\n" +
" \"type\" : \"FILE\"\n" +
" },\n" +
" {\n" +
" \"accessTime\" : 0,\n" +
" \"blockSize\" : 0,\n" +
" \"group\" : \"supergroup\",\n" +
" \"length\" : 0,\n" +
" \"modificationTime\": 1320895981256,\n" +
" \"owner\" : \"szetszwo\",\n" +
" \"pathSuffix\" : \"bar\",\n" +
" \"permission\" : \"711\",\n" +
" \"replication\" : 0,\n" +
" \"type\" : \"DIRECTORY\"\n" +
" }\n" +
" ]\n" +
" }\n" +
"}\n")
);
FileSystem fs = new WebHdfsFileSystem();
Configuration conf = getConfiguration();
conf.set(OAUTH_REFRESH_URL_KEY, "http://localhost:" + OAUTH_PORT + "/refresh");
conf.set(CredentialBasedAccessTokenProvider.OAUTH_CREDENTIAL_KEY, "credential");
URI uri = new URI("webhdfs://localhost:" + WEBHDFS_PORT);
fs.initialize(uri, conf);
FileStatus[] ls = fs.listStatus(new Path("/test1/test2"));
mockOAuthServer.verify(oauthServerRequest);
mockWebHDFSServerClient.verify(fileSystemRequest);
assertEquals(2, ls.length);
assertEquals("a.patch", ls[0].getPath().getName());
assertEquals("bar", ls[1].getPath().getName());
fs.close();
} finally {
mockWebHDFSServerClient.clear(fileSystemRequest);
mockOAuthServerClient.clear(oauthServerRequest);
}
}
private HttpRequest getOAuthServerMockRequest(MockServerClient mockServerClient) throws IOException {
HttpRequest expectedRequest = request()
.withMethod("POST")
.withPath("/refresh")
.withBody("client_secret=credential&grant_type=client_credentials&client_id=MY_CLIENTID");
Map<String, Object> map = new TreeMap<>();
map.put(EXPIRES_IN, "0987654321");
map.put(TOKEN_TYPE, "bearer");
map.put(ACCESS_TOKEN, AUTH_TOKEN);
ObjectMapper mapper = new ObjectMapper();
HttpResponse resp = response()
.withStatusCode(HttpStatus.SC_OK)
.withHeaders(
CONTENT_TYPE_APPLICATION_JSON
)
.withBody(mapper.writeValueAsString(map));
mockServerClient
.when(expectedRequest, exactly(1))
.respond(resp);
return expectedRequest;
}
public Configuration getConfiguration() {
Configuration conf = new Configuration();
// Configs for OAuth2
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_KEY, true);
conf.set(OAUTH_CLIENT_ID_KEY, "MY_CLIENTID");
conf.set(ACCESS_TOKEN_PROVIDER_KEY,
ConfCredentialBasedAccessTokenProvider.class.getName());
return conf;
}
@AfterEach
public void stopMockWebHDFSServer() {
mockWebHDFS.stop();
}
@AfterEach
public void stopMockOAuthServer() {
mockOAuthServer.stop();
}
}
|
TestWebHDFSOAuth2
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/configurers/CsrfConfigurerTests.java
|
{
"start": 37661,
"end": 38176
}
|
class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((requests) -> requests
.anyRequest().permitAll())
.formLogin(withDefaults())
.httpBasic(withDefaults());
// @formatter:on
return http.build();
}
@Bean
UserDetailsService userDetailsService() {
return new InMemoryUserDetailsManager(PasswordEncodedUser.user());
}
}
@Configuration
@EnableWebSecurity
static
|
DefaultDoesNotCreateSession
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/jsontype/TestOverlappingTypeIdNames.java
|
{
"start": 344,
"end": 649
}
|
class ____ extends DatabindTestUtil
{
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type")
@JsonSubTypes({
@JsonSubTypes.Type(name = "a", value = Impl312.class),
@JsonSubTypes.Type(name = "b", value = Impl312.class)
})
static abstract
|
TestOverlappingTypeIdNames
|
java
|
spring-projects__spring-boot
|
buildpack/spring-boot-buildpack-platform/src/main/java/org/springframework/boot/buildpack/platform/build/Cache.java
|
{
"start": 1028,
"end": 1090
}
|
class ____ {
/**
* The format of the cache.
*/
public
|
Cache
|
java
|
google__auto
|
value/src/test/java/com/google/auto/value/processor/AutoValueCompilationTest.java
|
{
"start": 38065,
"end": 38772
}
|
class ____ {",
" public abstract Map<Set<?>, MissingType<?>> missingType();",
"}");
Compilation compilation =
javac().withProcessors(new AutoValueProcessor()).compile(javaFileObject);
assertThat(compilation)
.hadErrorContaining("MissingType")
.inFile(javaFileObject)
.onLineContaining("MissingType");
}
@Test
public void missingSuperclassGenericParameter() {
JavaFileObject javaFileObject =
JavaFileObjects.forSourceLines(
"foo.bar.Baz",
"package foo.bar;",
"",
"import com.google.auto.value.AutoValue;",
"",
"@AutoValue",
"public abstract
|
Baz
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Params.java
|
{
"start": 1149,
"end": 1349
}
|
interface ____ {
static final String TITLE = "title";
static final String TITLE_LINK = "title.href";
static final String USER = "user";
static final String ERROR_DETAILS = "error.details";
}
|
Params
|
java
|
apache__camel
|
components/camel-aws/camel-aws2-ddb/src/test/java/org/apache/camel/component/aws2/ddb/localstack/AWS2PutItemRuleIT.java
|
{
"start": 1537,
"end": 3303
}
|
class ____ extends Aws2DDBBase {
@EndpointInject("direct:start")
private ProducerTemplate template;
private final String attributeName = "clave";
private final String tableName = "TestTable";
@Test
public void putItem() {
final Map<String, AttributeValue> attributeMap = new HashMap<>();
AttributeValue attributeValue = AttributeValue.builder().s("hello").build();
attributeMap.put(attributeName, attributeValue);
attributeMap.put("secondary_attribute", AttributeValue.builder().s("value").build());
Exchange exchange = template.send("direct:start", new Processor() {
public void process(Exchange exchange) {
exchange.getIn().setHeader(Ddb2Constants.OPERATION, Ddb2Operations.PutItem);
exchange.getIn().setHeader(Ddb2Constants.CONSISTENT_READ, "true");
exchange.getIn().setHeader(Ddb2Constants.RETURN_VALUES, "ALL_OLD");
exchange.getIn().setHeader(Ddb2Constants.ITEM, attributeMap);
exchange.getIn().setHeader(Ddb2Constants.ATTRIBUTE_NAMES, attributeMap.keySet());
}
});
assertNotNull(exchange.getIn().getHeader(Ddb2Constants.ITEM));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").to(
"aws2-ddb://" + tableName + "?keyAttributeName=" + attributeName + "&keyAttributeType=" + KeyType.HASH
+ "&keyScalarType=" + ScalarAttributeType.S
+ "&readCapacity=1&writeCapacity=1");
}
};
}
}
|
AWS2PutItemRuleIT
|
java
|
quarkusio__quarkus
|
extensions/resteasy-classic/resteasy/deployment/src/test/java/io/quarkus/resteasy/test/root/ApplicationTest.java
|
{
"start": 4402,
"end": 4626
}
|
class ____ {
@GET
@Path("ok-1")
public abstract String ok1();
@GET
@Path("ok-2")
public String ok2() {
return "ok-a";
}
}
public static
|
AResourceTest
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/CheckpointStatsSnapshot.java
|
{
"start": 1033,
"end": 3604
}
|
class ____ implements Serializable {
private static final long serialVersionUID = 8914278419087217964L;
/** Snapshot of the checkpoint counts. */
private final CheckpointStatsCounts counts;
/** Snapshot of the completed checkpoints summary stats. */
private final CompletedCheckpointStatsSummarySnapshot summary;
/** Snapshot of the checkpoint history. */
private final CheckpointStatsHistory history;
/** The latest restored checkpoint operation. */
@Nullable private final RestoredCheckpointStats latestRestoredCheckpoint;
public static CheckpointStatsSnapshot empty() {
return new CheckpointStatsSnapshot(
new CheckpointStatsCounts(),
CompletedCheckpointStatsSummarySnapshot.empty(),
new CheckpointStatsHistory(0),
null);
}
/**
* Creates a stats snapshot.
*
* @param counts Snapshot of the checkpoint counts.
* @param summary Snapshot of the completed checkpoints summary stats.
* @param history Snapshot of the checkpoint history.
* @param latestRestoredCheckpoint The latest restored checkpoint operation.
*/
CheckpointStatsSnapshot(
CheckpointStatsCounts counts,
CompletedCheckpointStatsSummarySnapshot summary,
CheckpointStatsHistory history,
@Nullable RestoredCheckpointStats latestRestoredCheckpoint) {
this.counts = checkNotNull(counts);
this.summary = checkNotNull(summary);
this.history = checkNotNull(history);
this.latestRestoredCheckpoint = latestRestoredCheckpoint;
}
/**
* Returns the snapshotted checkpoint counts.
*
* @return Snapshotted checkpoint counts.
*/
public CheckpointStatsCounts getCounts() {
return counts;
}
/**
* Returns the snapshotted completed checkpoint summary stats.
*
* @return Snapshotted completed checkpoint summary stats.
*/
public CompletedCheckpointStatsSummarySnapshot getSummaryStats() {
return summary;
}
/**
* Returns the snapshotted checkpoint history.
*
* @return Snapshotted checkpoint history.
*/
public CheckpointStatsHistory getHistory() {
return history;
}
/**
* Returns the latest restored checkpoint.
*
* @return Latest restored checkpoint or <code>null</code>.
*/
@Nullable
public RestoredCheckpointStats getLatestRestoredCheckpoint() {
return latestRestoredCheckpoint;
}
}
|
CheckpointStatsSnapshot
|
java
|
spring-projects__spring-boot
|
test-support/spring-boot-test-support/src/main/java/org/springframework/boot/testsupport/junit/DisabledOnOs.java
|
{
"start": 1301,
"end": 1484
}
|
class ____ method should be disabled.
* @return the operating systems where the test is disabled
*/
OS[] value() default {};
/**
* The operating systems on which the annotated
|
or
|
java
|
quarkusio__quarkus
|
extensions/smallrye-fault-tolerance/deployment/src/test/java/io/quarkus/smallrye/faulttolerance/test/ObjectBeanTest.java
|
{
"start": 363,
"end": 663
}
|
class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest();
@Produces
@Named("namedObject")
Object object;
@Test
public void verifyFaultToleranceDoesNotBreak() {
// just make sure that the deployment succeeds
}
}
|
ObjectBeanTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.