language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerSnapshotData.java | {
"start": 3394,
"end": 12866
} | class ____<T> {
private static final Logger LOG = LoggerFactory.getLogger(PojoSerializerSnapshotData.class);
// ---------------------------------------------------------------------------------------------
// Factory methods
// ---------------------------------------------------------------------------------------------
/**
* Creates a {@link PojoSerializerSnapshotData} from configuration of a {@link PojoSerializer}.
*
* <p>This factory method is meant to be used in regular write paths, i.e. when taking a
* snapshot of the {@link PojoSerializer}. All registered subclass classes, and non-registered
* subclass classes are all present. Some POJO fields may be absent, if the originating {@link
* PojoSerializer} was a restored one with already missing fields, and was never replaced by a
* new {@link PojoSerializer} (i.e. because the serialized old data was never accessed).
*/
static <T> PojoSerializerSnapshotData<T> createFrom(
Class<T> pojoClass,
Field[] fields,
TypeSerializer<?>[] fieldSerializers,
LinkedHashMap<Class<?>, TypeSerializer<?>> registeredSubclassSerializers,
Map<Class<?>, TypeSerializer<?>> nonRegisteredSubclassSerializers) {
final LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots =
new LinkedOptionalMap<>(fields.length);
for (int i = 0; i < fields.length; i++) {
Field field = fields[i];
String fieldName = (field == null) ? getDummyNameForMissingField(i) : field.getName();
fieldSerializerSnapshots.put(
fieldName, field, fieldSerializers[i].snapshotConfiguration());
}
LinkedHashMap<Class<?>, TypeSerializerSnapshot<?>> registeredSubclassSerializerSnapshots =
CollectionUtil.newLinkedHashMapWithExpectedSize(
registeredSubclassSerializers.size());
registeredSubclassSerializers.forEach(
(k, v) -> registeredSubclassSerializerSnapshots.put(k, v.snapshotConfiguration()));
Map<Class<?>, TypeSerializerSnapshot<?>> nonRegisteredSubclassSerializerSnapshots =
CollectionUtil.newHashMapWithExpectedSize(nonRegisteredSubclassSerializers.size());
nonRegisteredSubclassSerializers.forEach(
(k, v) ->
nonRegisteredSubclassSerializerSnapshots.put(k, v.snapshotConfiguration()));
return new PojoSerializerSnapshotData<>(
pojoClass,
fieldSerializerSnapshots,
optionalMapOf(registeredSubclassSerializerSnapshots, Class::getName),
optionalMapOf(nonRegisteredSubclassSerializerSnapshots, Class::getName));
}
/**
* Creates a {@link PojoSerializerSnapshotData} from serialized data stream.
*
* <p>This factory method is meant to be used in regular read paths, i.e. when reading back a
* snapshot of the {@link PojoSerializer}. POJO fields, registered subclass classes, and
* non-registered subclass classes may no longer be present anymore.
*/
static <T> PojoSerializerSnapshotData<T> createFrom(
DataInputView in, ClassLoader userCodeClassLoader) throws IOException {
return PojoSerializerSnapshotData.readSnapshotData(in, userCodeClassLoader);
}
/**
* Creates a {@link PojoSerializerSnapshotData} from existing snapshotted configuration of a
* {@link PojoSerializer}.
*/
static <T> PojoSerializerSnapshotData<T> createFrom(
Class<T> pojoClass,
Field[] fields,
TypeSerializerSnapshot<?>[] existingFieldSerializerSnapshots,
LinkedHashMap<Class<?>, TypeSerializerSnapshot<?>>
existingRegisteredSubclassSerializerSnapshots,
Map<Class<?>, TypeSerializerSnapshot<?>>
existingNonRegisteredSubclassSerializerSnapshots) {
final LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots =
new LinkedOptionalMap<>(fields.length);
for (int i = 0; i < fields.length; i++) {
Field field = fields[i];
String fieldName = (field == null) ? getDummyNameForMissingField(i) : field.getName();
fieldSerializerSnapshots.put(fieldName, field, existingFieldSerializerSnapshots[i]);
}
return new PojoSerializerSnapshotData<>(
pojoClass,
fieldSerializerSnapshots,
optionalMapOf(existingRegisteredSubclassSerializerSnapshots, Class::getName),
optionalMapOf(existingNonRegisteredSubclassSerializerSnapshots, Class::getName));
}
private Class<T> pojoClass;
private LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots;
private LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>>
registeredSubclassSerializerSnapshots;
private LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>>
nonRegisteredSubclassSerializerSnapshots;
private PojoSerializerSnapshotData(
Class<T> typeClass,
LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots,
LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>>
registeredSubclassSerializerSnapshots,
LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>>
nonRegisteredSubclassSerializerSnapshots) {
this.pojoClass = checkNotNull(typeClass);
this.fieldSerializerSnapshots = checkNotNull(fieldSerializerSnapshots);
this.registeredSubclassSerializerSnapshots =
checkNotNull(registeredSubclassSerializerSnapshots);
this.nonRegisteredSubclassSerializerSnapshots =
checkNotNull(nonRegisteredSubclassSerializerSnapshots);
}
// ---------------------------------------------------------------------------------------------
// Snapshot data read / write methods
// ---------------------------------------------------------------------------------------------
void writeSnapshotData(DataOutputView out) throws IOException {
out.writeUTF(pojoClass.getName());
writeOptionalMap(
out,
fieldSerializerSnapshots,
PojoFieldUtils::writeField,
TypeSerializerSnapshot::writeVersionedSnapshot);
writeOptionalMap(
out,
registeredSubclassSerializerSnapshots,
NoOpWriter.noopWriter(),
TypeSerializerSnapshot::writeVersionedSnapshot);
writeOptionalMap(
out,
nonRegisteredSubclassSerializerSnapshots,
NoOpWriter.noopWriter(),
TypeSerializerSnapshot::writeVersionedSnapshot);
}
private static <T> PojoSerializerSnapshotData<T> readSnapshotData(
DataInputView in, ClassLoader userCodeClassLoader) throws IOException {
Class<T> pojoClass = InstantiationUtil.resolveClassByName(in, userCodeClassLoader);
LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots =
readOptionalMap(
in, fieldReader(userCodeClassLoader), snapshotReader(userCodeClassLoader));
LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>>
registeredSubclassSerializerSnapshots =
readOptionalMap(
in,
classReader(userCodeClassLoader),
snapshotReader(userCodeClassLoader));
LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>>
nonRegisteredSubclassSerializerSnapshots =
readOptionalMap(
in,
classReader(userCodeClassLoader),
snapshotReader(userCodeClassLoader));
return new PojoSerializerSnapshotData<>(
pojoClass,
fieldSerializerSnapshots,
registeredSubclassSerializerSnapshots,
nonRegisteredSubclassSerializerSnapshots);
}
// ---------------------------------------------------------------------------------------------
// Snapshot data accessors
// ---------------------------------------------------------------------------------------------
Class<T> getPojoClass() {
return pojoClass;
}
LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> getFieldSerializerSnapshots() {
return fieldSerializerSnapshots;
}
LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>>
getRegisteredSubclassSerializerSnapshots() {
return registeredSubclassSerializerSnapshots;
}
LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>>
getNonRegisteredSubclassSerializerSnapshots() {
return nonRegisteredSubclassSerializerSnapshots;
}
// ---------------------------------------------------------------------------------------------
// Utilities
// ---------------------------------------------------------------------------------------------
private static String getDummyNameForMissingField(int fieldIndex) {
return String.format("missing-field-at-%d", fieldIndex);
}
private | PojoSerializerSnapshotData |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UnnecessaryQualifierTest.java | {
"start": 1879,
"end": 2149
} | class ____ {
// BUG: Diagnostic contains:
@Qual int x;
}
""")
.doTest();
}
@Test
public void unannotatedLocal() {
helper
.addSourceLines(
"Test.java",
"""
| Test |
java | apache__rocketmq | tieredstore/src/test/java/org/apache/rocketmq/tieredstore/file/FlatFileStoreTest.java | {
"start": 1689,
"end": 4346
} | class ____ {
private final String storePath = MessageStoreUtilTest.getRandomStorePath();
private MessageStoreConfig storeConfig;
private MetadataStore metadataStore;
@Before
public void init() {
storeConfig = new MessageStoreConfig();
storeConfig.setStorePathRootDir(storePath);
storeConfig.setTieredBackendServiceProvider(PosixFileSegment.class.getName());
storeConfig.setBrokerName("brokerName");
metadataStore = new DefaultMetadataStore(storeConfig);
}
@After
public void shutdown() throws IOException {
MessageStoreUtilTest.deleteStoreDirectory(storePath);
}
@Test
public void flatFileStoreTest() {
// Empty recover
MessageStoreExecutor executor = new MessageStoreExecutor();
FlatFileStore fileStore = new FlatFileStore(storeConfig, metadataStore, executor);
Assert.assertTrue(fileStore.load());
Assert.assertEquals(storeConfig, fileStore.getStoreConfig());
Assert.assertEquals(metadataStore, fileStore.getMetadataStore());
Assert.assertNotNull(fileStore.getFlatFileFactory());
for (int i = 0; i < 4; i++) {
MessageQueue mq = new MessageQueue("flatFileStoreTest", storeConfig.getBrokerName(), i);
FlatMessageFile flatFile = fileStore.computeIfAbsent(mq);
FlatMessageFile flatFileGet = fileStore.getFlatFile(mq);
Assert.assertEquals(flatFile, flatFileGet);
}
Assert.assertEquals(4, fileStore.deepCopyFlatFileToList().size());
fileStore.shutdown();
fileStore = new FlatFileStore(storeConfig, metadataStore, executor);
Assert.assertTrue(fileStore.load());
Assert.assertEquals(4, fileStore.deepCopyFlatFileToList().size());
for (int i = 1; i < 3; i++) {
MessageQueue mq = new MessageQueue("flatFileStoreTest", storeConfig.getBrokerName(), i);
fileStore.destroyFile(mq);
}
Assert.assertEquals(2, fileStore.deepCopyFlatFileToList().size());
fileStore.shutdown();
FlatFileStore fileStoreSpy = Mockito.spy(fileStore);
Mockito.when(fileStoreSpy.recoverAsync(any())).thenReturn(CompletableFuture.supplyAsync(() -> {
throw new TieredStoreException(TieredStoreErrorCode.ILLEGAL_PARAM, "Test");
}));
Assert.assertFalse(fileStoreSpy.load());
Mockito.reset(fileStoreSpy);
fileStore.load();
Assert.assertEquals(2, fileStore.deepCopyFlatFileToList().size());
fileStore.destroy();
Assert.assertEquals(0, fileStore.deepCopyFlatFileToList().size());
}
}
| FlatFileStoreTest |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/create/MySqlCreateViewTest4.java | {
"start": 1126,
"end": 3013
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "create or replace definer = 'ivan'@'%' view my_view3 as select count(*) from t3;";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLCreateViewStatement stmt = (SQLCreateViewStatement) statementList.get(0);
// print(statementList);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
assertEquals("CREATE OR REPLACE DEFINER = 'ivan'@'%'\n" +
"\tVIEW my_view3\n" +
"AS\n" +
"SELECT count(*)\n" +
"FROM t3;", //
SQLUtils.toMySqlString(stmt));
assertEquals("create or replace definer = 'ivan'@'%'\n" +
"\tview my_view3\n" +
"as\n" +
"select count(*)\n" +
"from t3;", //
SQLUtils.toMySqlString(stmt, SQLUtils.DEFAULT_LCASE_FORMAT_OPTION));
// System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(1, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("t3")));
assertTrue(visitor.getColumns().contains(new Column("t3", "*")));
// assertTrue(visitor.getColumns().contains(new Column("t2", "l_suppkey")));
}
}
| MySqlCreateViewTest4 |
java | elastic__elasticsearch | x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/CheckpointException.java | {
"start": 359,
"end": 625
} | class ____ extends ElasticsearchException {
CheckpointException(String msg, Object... params) {
super(msg, params);
}
CheckpointException(String msg, Throwable cause, Object... params) {
super(msg, cause, params);
}
}
| CheckpointException |
java | elastic__elasticsearch | libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/NamedComponentScannerTests.java | {
"start": 3019,
"end": 4586
} | class ____ implements ExtensibleInterface{}
""")));
List<ClassReader> classReaderStream = Stream.concat(
ClassReaders.ofDirWithJars(dirWithJar).stream(),
ClassReaders.ofClassPath().stream()
)// contains plugin-api
.toList();
Map<String, Map<String, String>> namedComponents = NamedComponentScanner.scanForNamedClasses(classReaderStream);
org.hamcrest.MatcherAssert.assertThat(
namedComponents,
equalTo(
Map.of(
ExtensibleClass.class.getCanonicalName(),
Map.of("a_component", "p.A"),
ExtensibleInterface.class.getCanonicalName(),
Map.of(
"b_component",
"p.B",
// noise from classpath
"test_named_component",
"org.elasticsearch.plugin.scanner.test_model.TestNamedComponent"
)
)
)
);
// aggressively delete the jar dir, so that any leaked filed handles fail this specific test on windows
IOUtils.rm(tmp);
}
public void testNamedComponentsCanExtednCommonSuperClass() throws IOException {
Map<String, CharSequence> sources = Map.of(
"p.CustomExtensibleInterface",
"""
package p;
import org.elasticsearch.plugin.*;
import org.elasticsearch.plugin.scanner.test_model.*;
public | B |
java | apache__camel | core/camel-core/src/test/java/org/apache/camel/processor/DataFormatServiceTest.java | {
"start": 3034,
"end": 3757
} | class ____ extends ServiceSupport implements DataFormat, CamelContextAware {
private CamelContext camelContext;
@Override
public CamelContext getCamelContext() {
return camelContext;
}
@Override
public void setCamelContext(CamelContext camelContext) {
this.camelContext = camelContext;
}
@Override
public void marshal(Exchange exchange, Object graph, OutputStream stream) throws Exception {
stream.write("Hi Camel".getBytes());
}
@Override
public Object unmarshal(Exchange exchange, InputStream stream) {
return "Bye World";
}
}
// END SNIPPET: e2
}
| MyDataFormat |
java | quarkusio__quarkus | extensions/kubernetes/vanilla/deployment/src/main/java/io/quarkus/kubernetes/deployment/KubernetesConfigUtil.java | {
"start": 752,
"end": 5778
} | class ____ {
/**
* It should be the same name as in VertxHttpProcessor.kubernetesForManagement.
*/
public static final String MANAGEMENT_PORT_NAME = "management";
private static final String DEKORATE_PREFIX = "dekorate.";
/**
* Get the explicitly configured deployment target, if any.
* The explicit deployment target is determined using: {@code quarkus.kubernetes.deployment-target=<deployment-target>}
*/
public static Optional<String> getExplicitlyConfiguredDeploymentTarget() {
Config config = ConfigProvider.getConfig();
return config.getOptionalValue(DEPLOYMENT_TARGET, String.class);
}
/**
* @deprecated Use {@link #getExplicitlyConfiguredDeploymentTargets()} instead
*/
@Deprecated(forRemoval = true)
public static List<String> getExplictilyDeploymentTargets() {
return getExplicitlyConfiguredDeploymentTargets();
}
/**
* The explicitly configured deployment target list.
* The configured deployment targets are determined using: {@code quarkus.kubernetes.deployment-target=<deployment-target>}
*/
public static List<String> getExplicitlyConfiguredDeploymentTargets() {
return splitDeploymentTargets(getExplicitlyConfiguredDeploymentTarget());
}
private static List<String> splitDeploymentTargets(Optional<String> commaSeparatedDeploymentTargets) {
return commaSeparatedDeploymentTargets
.map(s -> Arrays.stream(s.split(","))
.map(String::trim)
.map(String::toLowerCase)
.collect(Collectors.toList()))
.orElse(Collections.emptyList());
}
/**
* Get the user configured deployment target, if any.
* The configured deployment target is determined using:
* <ol>
* <li>the value of {@code quarkus.kubernetes.deployment-target=<deployment-target>}</li>
* <li>the presence of {@code quarkus.<deployment-target>.deploy=true}</li>
* </ol>
*/
public static Optional<String> getConfiguredDeploymentTarget() {
return getExplicitlyConfiguredDeploymentTarget().or(DeploymentUtil::getEnabledDeployer);
}
/**
* @deprecated Use {@link #getConfiguredDeploymentTargets()} instead
*/
@Deprecated(forRemoval = true)
public static List<String> getConfiguratedDeploymentTargets() {
return getConfiguredDeploymentTargets();
}
/**
* Get the configured deployment target list as determined by:
* <ol>
* <li>the value of {@code quarkus.kubernetes.deployment-target=<deployment-target>}</li>
* <li>the presence of {@code quarkus.<deployment-target>.deploy=true}</li>
* </ol>
*/
public static List<String> getConfiguredDeploymentTargets() {
return splitDeploymentTargets(getConfiguredDeploymentTarget());
}
public static boolean isDeploymentEnabled() {
return DeploymentUtil.isDeploymentEnabled("kubernetes", "openshift", "knative", "kind", "minikube");
}
/*
* Collects configuration properties for Kubernetes. Reads all properties and
* matches properties that match known Dekorate generators. These properties may
* or may not be prefixed with {@code quarkus.} though the prefixed ones take precedence.
*
* @return A map containing the properties.
*/
public static Map<String, Object> toMap(PlatformConfiguration... platformConfigurations) {
Map<String, Object> result = new HashMap<>();
// Most of quarkus prefixed properties are handled directly by the config items (KubernetesConfig, OpenshiftConfig, KnativeConfig)
// We just need group, name & version parsed here, as we don't have decorators for these (low level properties).
Map<String, Object> quarkusPrefixed = new HashMap<>();
Arrays.stream(platformConfigurations).forEach(p -> {
p.partOf().ifPresent(g -> quarkusPrefixed.put(DEKORATE_PREFIX + p.targetPlatformName() + ".part-of", g));
p.name().ifPresent(n -> quarkusPrefixed.put(DEKORATE_PREFIX + p.targetPlatformName() + ".name", n));
p.version()
.map(v -> v.equals(UNSET_VALUE) ? DEFAULT_TAG : v)
.ifPresent(v -> quarkusPrefixed.put(DEKORATE_PREFIX + p.targetPlatformName() + ".version", v));
});
result.putAll(quarkusPrefixed);
result.putAll(toS2iProperties(quarkusPrefixed));
return result;
}
public static boolean managementPortIsEnabled() {
return ConfigProvider.getConfig().getOptionalValue("quarkus.management.enabled", Boolean.class).orElse(false);
}
private static Map<String, Object> toS2iProperties(Map<String, Object> map) {
Map<String, Object> result = new HashMap<>();
map.forEach((k, v) -> {
if (k.contains(OPENSHIFT)) {
result.put(k.replaceAll(OPENSHIFT, S2I), v);
}
});
return result;
}
}
| KubernetesConfigUtil |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/hql/size/OneToManySizeTest.java | {
"start": 1141,
"end": 10194
} | class ____ {
@Test
@SkipForDialect( dialectClass = DerbyDialect.class, reason = "Derby doesn't see that the subquery is functionally dependent" )
public void testSizeAsSelectExpression(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> {
final List results = session.createQuery(
"select new org.hibernate.orm.test.query.hql.size.OneToManySizeTest$CompanyDto(" +
" c.id, c.name, size( c.customers ) )" +
" from Company c" +
" group by c.id, c.name" +
" order by c.id"
).list();
assertThat( results.size(), is( 3 ) );
final CompanyDto companyDto0 = (CompanyDto) results.get( 0 );
assertThat( companyDto0.getId(), is( 0 ) );
assertThat( companyDto0.getName(), is( "Company 0") );
assertThat( companyDto0.getSizeCustomer(), is( 0 ) );
final CompanyDto companyDto1 = (CompanyDto) results.get( 1 );
assertThat( companyDto1.getId(), is( 1 ) );
assertThat( companyDto1.getName(), is( "Company 1") );
assertThat( companyDto1.getSizeCustomer(), is( 1 ) );
final CompanyDto companyDto2 = (CompanyDto) results.get( 2 );
assertThat( companyDto2.getId(), is( 2 ) );
assertThat( companyDto2.getName(), is( "Company 2") );
assertThat( companyDto2.getSizeCustomer(), is( 2 ) );
}
);
}
@Test
@SkipForDialect( dialectClass = DerbyDialect.class, reason = "Derby doesn't see that the subquery is functionally dependent" )
public void testSizeAsSelectExpressionWithLeftJoin(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> {
final List results = session.createQuery(
"select new org.hibernate.orm.test.query.hql.size.OneToManySizeTest$CompanyDto(" +
" c.id, c.name, size( c.customers ) )" +
" from Company c left join c.customers cu" +
" group by c.id, c.name" +
" order by c.id"
).list();
assertThat( results.size(), is( 3 ) );
final CompanyDto companyDto0 = (CompanyDto) results.get( 0 );
assertThat( companyDto0.getId(), is( 0 ) );
assertThat( companyDto0.getName(), is( "Company 0") );
assertThat( companyDto0.getSizeCustomer(), is( 0 ) );
final CompanyDto companyDto1 = (CompanyDto) results.get( 1 );
assertThat( companyDto1.getId(), is( 1 ) );
assertThat( companyDto1.getName(), is( "Company 1") );
assertThat( companyDto1.getSizeCustomer(), is( 1 ) );
final CompanyDto companyDto2 = (CompanyDto) results.get( 2 );
assertThat( companyDto2.getId(), is( 2 ) );
assertThat( companyDto2.getName(), is( "Company 2") );
assertThat( companyDto2.getSizeCustomer(), is( 2 ) );
}
);
}
@Test
@SkipForDialect( dialectClass = DerbyDialect.class, reason = "Derby doesn't see that the subquery is functionally dependent" )
public void testSizeAsSelectExpressionWithInnerJoin(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> {
final List results = session.createQuery(
"select new org.hibernate.orm.test.query.hql.size.OneToManySizeTest$CompanyDto(" +
" c.id, c.name, size( c.customers ) )" +
" from Company c inner join c.customers cu" +
" group by c.id, c.name" +
" order by c.id"
).list();
assertThat( results.size(), is( 2 ) );
final CompanyDto companyDto1 = (CompanyDto) results.get( 0 );
assertThat( companyDto1.getId(), is( 1 ) );
assertThat( companyDto1.getName(), is( "Company 1") );
assertThat( companyDto1.getSizeCustomer(), is( 1 ) );
final CompanyDto companyDto2 = (CompanyDto) results.get( 1 );
assertThat( companyDto2.getId(), is( 2 ) );
assertThat( companyDto2.getName(), is( "Company 2") );
assertThat( companyDto2.getSizeCustomer(), is( 2 ) );
}
);
}
@Test
@SkipForDialect( dialectClass = DerbyDialect.class, reason = "Derby doesn't see that the subquery is functionally dependent" )
public void testSizeAsSelectExpressionOfAliasWithInnerJoin(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> {
final List results = session.createQuery(
"select new org.hibernate.orm.test.query.hql.size.OneToManySizeTest$CompanyDto(" +
" c.id, c.name, size( cu ) )" +
" from Company c inner join c.customers cu" +
" group by c.id, c.name" +
" order by c.id"
).list();
assertThat( results.size(), is( 2 ) );
final CompanyDto companyDto1 = (CompanyDto) results.get( 0 );
assertThat( companyDto1.getId(), is( 1 ) );
assertThat( companyDto1.getName(), is( "Company 1") );
assertThat( companyDto1.getSizeCustomer(), is( 1 ) );
final CompanyDto companyDto2 = (CompanyDto) results.get( 1 );
assertThat( companyDto2.getId(), is( 2 ) );
assertThat( companyDto2.getName(), is( "Company 2") );
assertThat( companyDto2.getSizeCustomer(), is( 2 ) );
}
);
}
@Test
@SkipForDialect( dialectClass = DerbyDialect.class, reason = "Derby doesn't see that the subquery is functionally dependent" )
public void testSizeAsSelectExpressionExcludeEmptyCollection(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> {
final List results = session.createQuery(
"select new org.hibernate.orm.test.query.hql.size.OneToManySizeTest$CompanyDto(" +
" c.id, c.name, size( c.customers ) )" +
" from Company c" +
" where c.id != 0" +
" group by c.id, c.name order by c.id"
).list();
assertThat( results.size(), is( 2 ) );
final CompanyDto companyDto1 = (CompanyDto) results.get( 0 );
assertThat( companyDto1.getId(), is( 1 ) );
assertThat( companyDto1.getName(), is( "Company 1") );
assertThat( companyDto1.getSizeCustomer(), is( 1 ) );
final CompanyDto companyDto2 = (CompanyDto) results.get( 1 );
assertThat( companyDto2.getId(), is( 2 ) );
assertThat( companyDto2.getName(), is( "Company 2") );
assertThat( companyDto2.getSizeCustomer(), is( 2 ) );
}
);
}
@Test
public void testSizeAsConditionalExpressionExcludeEmptyCollection(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> {
final List<Company> results = session.createQuery(
"from Company c" +
" where size( c.customers ) > 0" +
" group by c.id, c.name order by c.id",
Company.class
).list();
assertThat( results.size(), is( 2 ) );
final Company company1 = results.get( 0 );
assertThat( company1.id, is( 1 ) );
assertThat( company1.name, is( "Company 1") );
assertThat( Hibernate.isInitialized( company1.customers ), is( true ) );
assertThat( company1.customers.size(), is( 1 ) );
final Company company2 = results.get( 1 );
assertThat( company2.id, is( 2 ) );
assertThat( company2.name, is( "Company 2") );
assertThat( Hibernate.isInitialized( company2.customers ), is( true ) );
assertThat( company2.customers.size(), is( 2 ) );
}
);
}
@Test
public void testSizeAsConditionalExpressionIncludeEmptyCollection(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> {
final List<Company> results = session.createQuery(
"from Company c" +
" where size( c.customers ) > -1" +
" group by c.id, c.name order by c.id",
Company.class
).list();
assertThat( results.size(), is( 3 ) );
final Company company0 = results.get( 0 );
assertThat( company0.id, is( 0 ) );
assertThat( company0.name, is( "Company 0") );
assertThat( Hibernate.isInitialized(company0.customers), is( true ) );
assertThat( company0.customers.size(), is( 0 ) );
final Company company1 = results.get( 1 );
assertThat( company1.id, is( 1 ) );
assertThat( company1.name, is( "Company 1") );
assertThat( Hibernate.isInitialized(company1.customers), is( true ) );
assertThat( company1.customers.size(), is( 1 ) );
final Company company2 = results.get( 2 );
assertThat( company2.id, is( 2 ) );
assertThat( company2.name, is( "Company 2") );
assertThat( Hibernate.isInitialized(company2.customers), is( true ) );
assertThat( company2.customers.size(), is( 2 ) );
}
);
}
@BeforeEach
public void createTestData(SessionFactoryScope scope) {
scope.inTransaction(
(session) -> {
// Add a company with no customers
final Company companyWithNoCustomers = new Company( 0 );
companyWithNoCustomers.name = "Company 0";
session.persist( companyWithNoCustomers );
int k = 0;
for ( int i = 1; i <= 2; i++ ) {
final Company company = new Company( i );
company.name = "Company " + i;
for ( int j = 1; j <= i; j++ ) {
final Customer customer = new Customer( k );
customer.name = "Customer " + k;
company.customers.add( customer );
k++;
}
session.persist( company );
}
}
);
}
@AfterEach
public void dropTestData(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Entity(name ="Company")
public static | OneToManySizeTest |
java | quarkusio__quarkus | integration-tests/openapi/src/test/java/io/quarkus/it/openapi/spring/BooleanTest.java | {
"start": 181,
"end": 6940
} | class ____ extends AbstractTest {
// Just Boolean
@Test
public void testJustBooleanInSpringServiceRequest() {
testServiceRequest("/spring/defaultContentType/justBoolean", TEXT_PLAIN, "true");
}
@Test
public void testJustBooleanInSpringServiceResponse() {
testServiceResponse("/spring/defaultContentType/justBoolean", TEXT_PLAIN, "true");
}
@Test
public void testJustBooleanInSpringOpenAPIRequest() {
testOpenAPIRequest("/spring/defaultContentType/justBoolean", TEXT_PLAIN);
}
@Test
public void testJustBooleanInSpringOpenAPIResponse() {
testOpenAPIResponse("/spring/defaultContentType/justBoolean", TEXT_PLAIN);
}
// Just boolean
@Test
public void testJustBoolInSpringServiceRequest() {
testServiceRequest("/spring/defaultContentType/justBool", TEXT_PLAIN, "true");
}
@Test
public void testJustBoolInSpringServiceResponse() {
testServiceResponse("/spring/defaultContentType/justBool", TEXT_PLAIN, "true");
}
@Test
public void testJustBoolInSpringOpenAPIRequest() {
testOpenAPIRequest("/spring/defaultContentType/justBool", TEXT_PLAIN);
}
@Test
public void testJustBoolInSpringOpenAPIResponse() {
testOpenAPIResponse("/spring/defaultContentType/justBool", TEXT_PLAIN);
}
// ResponseEntity<Boolean>
@Test
public void testResponseEntityBooleanInSpringServiceRequest() {
testServiceRequest("/spring/defaultContentType/responseEntityBoolean", TEXT_PLAIN, "true");
}
@Test
public void testResponseEntityBooleanInSpringServiceResponse() {
testServiceResponse("/spring/defaultContentType/responseEntityBoolean", TEXT_PLAIN, "true");
}
@Test
public void testResponseEntityBooleanInSpringOpenAPIRequest() {
testOpenAPIRequest("/spring/defaultContentType/responseEntityBoolean", TEXT_PLAIN);
}
@Test
public void testResponseEntityBooleanInSpringOpenAPIResponse() {
testOpenAPIResponse("/spring/defaultContentType/responseEntityBoolean", TEXT_PLAIN);
}
// Optional<Boolean>
//@Test
public void testOptionalBooleanInSpringServiceRequest() {
testServiceRequest("/spring/defaultContentType/optionalBoolean", TEXT_PLAIN, "true");
}
//@Test
public void testOptionalBooleanInSpringServiceResponse() {
testServiceResponse("/spring/defaultContentType/optionalBoolean", TEXT_PLAIN, "true");
}
@Test
public void testOptionalBooleanInSpringOpenAPIRequest() {
testOpenAPIRequest("/spring/defaultContentType/optionalBoolean", TEXT_PLAIN);
}
@Test
public void testOptionalBooleanInSpringOpenAPIResponse() {
testOpenAPIResponse("/spring/defaultContentType/optionalBoolean", TEXT_PLAIN);
}
// Uni<Boolean>
@Test
public void testUniBooleanInSpringServiceResponse() {
testServiceResponse("/spring/defaultContentType/uniBoolean", TEXT_PLAIN, "true");
}
@Test
public void testUniBooleanInSpringOpenAPIResponse() {
testOpenAPIResponse("/spring/defaultContentType/uniBoolean", TEXT_PLAIN);
}
// CompletionStage<Boolean>
@Test
public void testCompletionStageBooleanInSpringServiceResponse() {
testServiceResponse("/spring/defaultContentType/completionStageBoolean", TEXT_PLAIN, "true");
}
@Test
public void testCompletionStageBooleanInSpringOpenAPIResponse() {
testOpenAPIResponse("/spring/defaultContentType/completionStageBoolean", TEXT_PLAIN);
}
// CompletedFuture<Boolean>
@Test
public void testCompletedFutureBooleanInSpringServiceResponse() {
testServiceResponse("/spring/defaultContentType/completedFutureBoolean", TEXT_PLAIN, "true");
}
@Test
public void testCompletedFutureBooleanInSpringOpenAPIResponse() {
testOpenAPIResponse("/spring/defaultContentType/completionStageBoolean", TEXT_PLAIN);
}
// List<Boolean>
@Test
public void testListBooleanInSpringServiceRequest() {
testServiceRequest("/spring/defaultContentType/listBoolean", APPLICATION_JSON, "[true]");
}
@Test
public void testListBooleanInSpringServiceResponse() {
testServiceResponse("/spring/defaultContentType/listBoolean", APPLICATION_JSON, "[true]");
}
@Test
public void testListBooleanInSpringOpenAPIRequest() {
testOpenAPIRequest("/spring/defaultContentType/listBoolean", APPLICATION_JSON);
}
@Test
public void testListBooleanInSpringOpenAPIResponse() {
testOpenAPIResponse("/spring/defaultContentType/listBoolean", APPLICATION_JSON);
}
// Boolean[]
@Test
public void testArrayBooleanInSpringServiceRequest() {
testServiceRequest("/spring/defaultContentType/arrayBoolean", APPLICATION_JSON, "[true]");
}
@Test
public void testArrayBooleanInSpringServiceResponse() {
testServiceResponse("/spring/defaultContentType/arrayBoolean", APPLICATION_JSON, "[true]");
}
@Test
public void testArrayBooleanInSpringOpenAPIRequest() {
testOpenAPIRequest("/spring/defaultContentType/arrayBoolean", APPLICATION_JSON);
}
@Test
public void testArrayBooleanInSpringOpenAPIResponse() {
testOpenAPIResponse("/spring/defaultContentType/arrayBoolean", APPLICATION_JSON);
}
// boolean[]
@Test
public void testArrayBoolInSpringServiceRequest() {
testServiceRequest("/spring/defaultContentType/arrayBool", APPLICATION_JSON, "[true]");
}
@Test
public void testArrayBoolInSpringServiceResponse() {
testServiceResponse("/spring/defaultContentType/arrayBool", APPLICATION_JSON, "[true]");
}
@Test
public void testArrayBoolInSpringOpenAPIRequest() {
testOpenAPIRequest("/spring/defaultContentType/arrayBool", APPLICATION_JSON);
}
@Test
public void testArrayBoolInSpringOpenAPIResponse() {
testOpenAPIResponse("/spring/defaultContentType/arrayBool", APPLICATION_JSON);
}
// Map<Boolean, Boolean>
@Test
public void testMapBooleanInSpringServiceRequest() {
testServiceRequest("/spring/defaultContentType/mapBoolean", APPLICATION_JSON, "{\"true\":true}");
}
@Test
public void testMapBooleanInSpringServiceResponse() {
testServiceResponse("/spring/defaultContentType/mapBoolean", APPLICATION_JSON, "{\"true\":true}");
}
@Test
public void testMapBooleanInSpringOpenAPIRequest() {
testOpenAPIRequest("/spring/defaultContentType/mapBoolean", APPLICATION_JSON);
}
@Test
public void testMapBooleanInSpringOpenAPIResponse() {
testOpenAPIResponse("/spring/defaultContentType/mapBoolean", APPLICATION_JSON);
}
}
| BooleanTest |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableGroupJoin.java | {
"start": 14029,
"end": 15318
} | class ____
extends AtomicReference<Subscription>
implements FlowableSubscriber<Object>, Disposable {
private static final long serialVersionUID = 1883890389173668373L;
final JoinSupport parent;
final boolean isLeft;
final int index;
LeftRightEndSubscriber(JoinSupport parent,
boolean isLeft, int index) {
this.parent = parent;
this.isLeft = isLeft;
this.index = index;
}
@Override
public void dispose() {
SubscriptionHelper.cancel(this);
}
@Override
public boolean isDisposed() {
return get() == SubscriptionHelper.CANCELLED;
}
@Override
public void onSubscribe(Subscription s) {
SubscriptionHelper.setOnce(this, s, Long.MAX_VALUE);
}
@Override
public void onNext(Object t) {
if (SubscriptionHelper.cancel(this)) {
parent.innerClose(isLeft, this);
}
}
@Override
public void onError(Throwable t) {
parent.innerCloseError(t);
}
@Override
public void onComplete() {
parent.innerClose(isLeft, this);
}
}
}
| LeftRightEndSubscriber |
java | google__guice | extensions/servlet/src/com/google/inject/servlet/ServletModule.java | {
"start": 11437,
"end": 12016
} | interface ____ {
void through(Class<? extends Filter> filterKey);
void through(Key<? extends Filter> filterKey);
/** @since 3.0 */
void through(Filter filter);
void through(Class<? extends Filter> filterKey, Map<String, String> initParams);
void through(Key<? extends Filter> filterKey, Map<String, String> initParams);
/** @since 3.0 */
void through(Filter filter, Map<String, String> initParams);
}
/**
* See the EDSL examples at {@link ServletModule#configureServlets()}
*
* @since 2.0
*/
public static | FilterKeyBindingBuilder |
java | spring-projects__spring-boot | module/spring-boot-webflux-test/src/test/java/org/springframework/boot/webflux/test/autoconfigure/WebFluxTestAutoConfigurationIntegrationTests.java | {
"start": 1489,
"end": 2108
} | class ____ {
@Autowired
private ApplicationContext applicationContext;
@Test
void messageSourceAutoConfigurationIsImported() {
assertThat(this.applicationContext).has(importedAutoConfiguration(MessageSourceAutoConfiguration.class));
}
@Test
void validationAutoConfigurationIsImported() {
assertThat(this.applicationContext).has(importedAutoConfiguration(ValidationAutoConfiguration.class));
}
@Test
void errorWebFluxAutoConfigurationIsImported() {
assertThat(this.applicationContext).has(importedAutoConfiguration(ErrorWebFluxAutoConfiguration.class));
}
}
| WebFluxTestAutoConfigurationIntegrationTests |
java | quarkusio__quarkus | core/runtime/src/main/java/io/quarkus/runtime/configuration/TrimmedStringConverter.java | {
"start": 105,
"end": 479
} | class ____ implements Converter<String> {
public TrimmedStringConverter() {
}
@Override
public String convert(String s) {
if (s == null) {
return null;
}
String trimmedString = s.trim();
if (trimmedString.isEmpty()) {
return null;
}
return trimmedString;
}
}
| TrimmedStringConverter |
java | elastic__elasticsearch | modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorFactoryTests.java | {
"start": 788,
"end": 4210
} | class ____ extends ESTestCase {
private RenameProcessor.Factory factory;
@Before
public void init() {
factory = new RenameProcessor.Factory(TestTemplateService.instance());
}
public void testCreate() throws Exception {
Map<String, Object> config = new HashMap<>();
config.put("field", "old_field");
config.put("target_field", "new_field");
String processorTag = randomAlphaOfLength(10);
RenameProcessor renameProcessor = factory.create(null, processorTag, null, config, null);
assertThat(renameProcessor.getTag(), equalTo(processorTag));
assertThat(renameProcessor.getField().newInstance(Map.of()).execute(), equalTo("old_field"));
assertThat(renameProcessor.getTargetField().newInstance(Map.of()).execute(), equalTo("new_field"));
assertThat(renameProcessor.isIgnoreMissing(), equalTo(false));
assertThat(renameProcessor.isOverrideEnabled(), equalTo(false));
}
public void testCreateWithIgnoreMissing() throws Exception {
Map<String, Object> config = new HashMap<>();
config.put("field", "old_field");
config.put("target_field", "new_field");
config.put("ignore_missing", true);
String processorTag = randomAlphaOfLength(10);
RenameProcessor renameProcessor = factory.create(null, processorTag, null, config, null);
assertThat(renameProcessor.getTag(), equalTo(processorTag));
assertThat(renameProcessor.getField().newInstance(Map.of()).execute(), equalTo("old_field"));
assertThat(renameProcessor.getTargetField().newInstance(Map.of()).execute(), equalTo("new_field"));
assertThat(renameProcessor.isIgnoreMissing(), equalTo(true));
}
public void testCreateWithEnableOverride() throws Exception {
Map<String, Object> config = new HashMap<>();
config.put("field", "old_field");
config.put("target_field", "new_field");
config.put("override", true);
String processorTag = randomAlphaOfLength(10);
RenameProcessor renameProcessor = factory.create(null, processorTag, null, config, null);
assertThat(renameProcessor.getTag(), equalTo(processorTag));
assertThat(renameProcessor.getField().newInstance(Map.of()).execute(), equalTo("old_field"));
assertThat(renameProcessor.getTargetField().newInstance(Map.of()).execute(), equalTo("new_field"));
assertThat(renameProcessor.isOverrideEnabled(), equalTo(true));
}
public void testCreateNoFieldPresent() throws Exception {
Map<String, Object> config = new HashMap<>();
config.put("target_field", "new_field");
try {
factory.create(null, null, null, config, null);
fail("factory create should have failed");
} catch (ElasticsearchParseException e) {
assertThat(e.getMessage(), equalTo("[field] required property is missing"));
}
}
public void testCreateNoToPresent() throws Exception {
Map<String, Object> config = new HashMap<>();
config.put("field", "old_field");
try {
factory.create(null, null, null, config, null);
fail("factory create should have failed");
} catch (ElasticsearchParseException e) {
assertThat(e.getMessage(), equalTo("[target_field] required property is missing"));
}
}
}
| RenameProcessorFactoryTests |
java | apache__camel | components/camel-ignite/src/generated/java/org/apache/camel/component/ignite/messaging/IgniteMessagingEndpointConfigurer.java | {
"start": 743,
"end": 5217
} | class ____ extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
IgniteMessagingEndpoint target = (IgniteMessagingEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "clustergroupexpression":
case "clusterGroupExpression": target.setClusterGroupExpression(property(camelContext, org.apache.camel.component.ignite.ClusterGroupExpression.class, value)); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "propagateincomingbodyifnoreturnvalue":
case "propagateIncomingBodyIfNoReturnValue": target.setPropagateIncomingBodyIfNoReturnValue(property(camelContext, boolean.class, value)); return true;
case "sendmode":
case "sendMode": target.setSendMode(property(camelContext, org.apache.camel.component.ignite.messaging.IgniteMessagingSendMode.class, value)); return true;
case "timeout": target.setTimeout(property(camelContext, java.lang.Long.class, value)); return true;
case "treatcollectionsascacheobjects":
case "treatCollectionsAsCacheObjects": target.setTreatCollectionsAsCacheObjects(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
@Override
public Class<?> getOptionType(String name, boolean ignoreCase) {
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": return boolean.class;
case "clustergroupexpression":
case "clusterGroupExpression": return org.apache.camel.component.ignite.ClusterGroupExpression.class;
case "exceptionhandler":
case "exceptionHandler": return org.apache.camel.spi.ExceptionHandler.class;
case "exchangepattern":
case "exchangePattern": return org.apache.camel.ExchangePattern.class;
case "lazystartproducer":
case "lazyStartProducer": return boolean.class;
case "propagateincomingbodyifnoreturnvalue":
case "propagateIncomingBodyIfNoReturnValue": return boolean.class;
case "sendmode":
case "sendMode": return org.apache.camel.component.ignite.messaging.IgniteMessagingSendMode.class;
case "timeout": return java.lang.Long.class;
case "treatcollectionsascacheobjects":
case "treatCollectionsAsCacheObjects": return boolean.class;
default: return null;
}
}
@Override
public Object getOptionValue(Object obj, String name, boolean ignoreCase) {
IgniteMessagingEndpoint target = (IgniteMessagingEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "bridgeerrorhandler":
case "bridgeErrorHandler": return target.isBridgeErrorHandler();
case "clustergroupexpression":
case "clusterGroupExpression": return target.getClusterGroupExpression();
case "exceptionhandler":
case "exceptionHandler": return target.getExceptionHandler();
case "exchangepattern":
case "exchangePattern": return target.getExchangePattern();
case "lazystartproducer":
case "lazyStartProducer": return target.isLazyStartProducer();
case "propagateincomingbodyifnoreturnvalue":
case "propagateIncomingBodyIfNoReturnValue": return target.isPropagateIncomingBodyIfNoReturnValue();
case "sendmode":
case "sendMode": return target.getSendMode();
case "timeout": return target.getTimeout();
case "treatcollectionsascacheobjects":
case "treatCollectionsAsCacheObjects": return target.isTreatCollectionsAsCacheObjects();
default: return null;
}
}
}
| IgniteMessagingEndpointConfigurer |
java | square__okhttp | samples/slack/src/main/java/okhttp3/slack/OAuthSessionFactory.java | {
"start": 3660,
"end": 3920
} | interface ____ {
void sessionGranted(OAuthSession session);
}
@Override public void close() {
if (mockWebServer == null) throw new IllegalStateException();
try {
mockWebServer.close();
} catch (IOException ignored) {
}
}
}
| Listener |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/util/reflection/GenericMetadataSupport.java | {
"start": 15165,
"end": 15909
} | class ____ extends GenericMetadataSupport {
private final Class<?> clazz;
public FromClassGenericMetadataSupport(Class<?> clazz) {
this.clazz = clazz;
registerTypeParametersOn(clazz.getTypeParameters());
registerAllTypeVariables(clazz);
}
@Override
public Class<?> rawType() {
return clazz;
}
}
/**
* Generic metadata implementation for "standalone" {@link ParameterizedType}.
* <p>
* Offer support to retrieve generic metadata on a {@link ParameterizedType} by reading type variables of
* the related raw type and declared type variable of this parameterized type.
* <p>
* This | FromClassGenericMetadataSupport |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/promql/TranslatePromqlToTimeSeriesAggregate.java | {
"start": 16787,
"end": 19149
} | enum ____: EXACT -> PREFIX -> SUFFIX -> REGEX
List<AutomatonUtils.PatternFragment> sortedFragments = new ArrayList<>(fragments);
sortedFragments.sort(Comparator.comparingInt(a -> a.type().ordinal()));
// Check if all fragments are of the same type
AutomatonUtils.PatternFragment.Type firstType = sortedFragments.get(0).type();
boolean homogeneous = true;
for (AutomatonUtils.PatternFragment fragment : sortedFragments) {
if (fragment.type() != firstType) {
homogeneous = false;
break;
}
}
if (homogeneous && firstType == AutomatonUtils.PatternFragment.Type.EXACT) {
// Optimize to IN clause
List<Expression> values = new ArrayList<>(sortedFragments.size());
for (AutomatonUtils.PatternFragment fragment : sortedFragments) {
values.add(Literal.keyword(source, fragment.value()));
}
return new In(source, field, values);
}
// For non-exact homogeneous or heterogeneous patterns, create OR of conditions
List<Expression> conditions = new ArrayList<>(sortedFragments.size());
for (AutomatonUtils.PatternFragment fragment : sortedFragments) {
Expression condition = translatePatternFragment(source, field, fragment);
conditions.add(condition);
}
// Combine with OR
return Predicates.combineOr(conditions);
}
/**
* Translates a single pattern fragment into an ESQL expression.
*/
private static Expression translatePatternFragment(Source source, Expression field, AutomatonUtils.PatternFragment fragment) {
Literal value = Literal.keyword(source, fragment.value());
return switch (fragment.type()) {
case EXACT -> new Equals(source, field, value);
case PREFIX -> new StartsWith(source, field, value);
case PROPER_PREFIX -> new And(source, new NotEquals(source, field, value), new StartsWith(source, field, value));
case SUFFIX -> new EndsWith(source, field, value);
case PROPER_SUFFIX -> new And(source, new NotEquals(source, field, value), new EndsWith(source, field, value));
case REGEX -> new RLike(source, field, new RLikePattern(fragment.value()));
};
}
}
| ordinal |
java | apache__camel | core/camel-api/src/main/java/org/apache/camel/spi/OnCamelContextStopped.java | {
"start": 960,
"end": 1073
} | interface ____ extends OnCamelContextEvent {
void onContextStopped(CamelContext context);
}
| OnCamelContextStopped |
java | apache__dubbo | dubbo-config/dubbo-config-spring/src/test/java/org/apache/dubbo/config/spring/schema/GenericServiceWithoutInterfaceTest.java | {
"start": 2458,
"end": 3260
} | interface ____ locally
Object result = genericServiceWithoutInterfaceRef.$invoke(
"sayHello", new String[] {"java.lang.String"}, new Object[] {"generic"});
Assertions.assertEquals("Welcome generic", result);
ReferenceConfigBase<Object> reference = DubboBootstrap.getInstance()
.getApplicationModel()
.getDefaultModule()
.getConfigManager()
.getReference("genericServiceWithoutInterfaceRef");
Assertions.assertNull(reference.getServiceInterfaceClass());
Assertions.assertEquals("org.apache.dubbo.config.spring.api.LocalMissClass", reference.getInterface());
Assertions.assertThrows(ClassNotFoundException.class, () -> ClassUtils.forName(reference.getInterface()));
}
}
| class |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/cdi/TransactionalInterceptor.java | {
"start": 988,
"end": 1565
} | class ____ {
@Inject
@Any
private EntityManager entityManager;
@AroundInvoke
public Object runInTransaction(InvocationContext ctx) throws Exception {
EntityTransaction entityTransaction = this.entityManager.getTransaction();
boolean isNew = !entityTransaction.isActive();
try {
if (isNew) {
entityTransaction.begin();
}
Object result = ctx.proceed();
if (isNew) {
entityTransaction.commit();
}
return result;
} catch (RuntimeException r) {
if (isNew) {
entityTransaction.rollback();
}
throw r;
}
}
}
| TransactionalInterceptor |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamPrintTest.java | {
"start": 1652,
"end": 3304
} | class ____ {
private ByteArrayOutputStream byteOutStream;
private Processor<Integer, String, Void, Void> printProcessor;
@Mock
private ProcessorContext<Void, Void> processorContext;
@BeforeEach
public void setUp() {
byteOutStream = new ByteArrayOutputStream();
final KStreamPrint<Integer, String> kStreamPrint = new KStreamPrint<>(new PrintForeachAction<>(
byteOutStream,
(key, value) -> String.format("%d, %s", key, value),
"test-stream"));
printProcessor = kStreamPrint.get();
printProcessor.init(processorContext);
}
@Test
public void testPrintStreamWithProvidedKeyValueMapper() {
final List<KeyValue<Integer, String>> inputRecords = Arrays.asList(
new KeyValue<>(0, "zero"),
new KeyValue<>(1, "one"),
new KeyValue<>(2, "two"),
new KeyValue<>(3, "three"));
final String[] expectedResult = {
"[test-stream]: 0, zero",
"[test-stream]: 1, one",
"[test-stream]: 2, two",
"[test-stream]: 3, three"};
for (final KeyValue<Integer, String> record: inputRecords) {
final Record<Integer, String> r = new Record<>(record.key, record.value, 0L);
printProcessor.process(r);
}
printProcessor.close();
final String[] flushOutDatas = new String(byteOutStream.toByteArray(), StandardCharsets.UTF_8).split("\\r*\\n");
for (int i = 0; i < flushOutDatas.length; i++) {
assertEquals(expectedResult[i], flushOutDatas[i]);
}
}
}
| KStreamPrintTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/assumptions/BDDAssumptionsTest.java | {
"start": 28633,
"end": 29047
} | class ____ {
private final AtomicBoolean actual = new AtomicBoolean(true);
@Test
void should_run_test_when_assumption_passes() {
thenCode(() -> given(actual).isTrue()).doesNotThrowAnyException();
}
@Test
void should_ignore_test_when_assumption_fails() {
expectAssumptionNotMetException(() -> given(actual).isFalse());
}
}
@Nested
| BDDAssumptions_given_AtomicBoolean_Test |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/ClusterResolverLoadBalancerProvider.java | {
"start": 5322,
"end": 9326
} | enum ____ {
EDS,
LOGICAL_DNS,
}
private DiscoveryMechanism(String cluster, Type type, @Nullable String edsServiceName,
@Nullable String dnsHostName, @Nullable ServerInfo lrsServerInfo,
@Nullable Long maxConcurrentRequests, @Nullable UpstreamTlsContext tlsContext,
Map<String, Struct> filterMetadata, @Nullable OutlierDetection outlierDetection,
@Nullable BackendMetricPropagation backendMetricPropagation) {
this.cluster = checkNotNull(cluster, "cluster");
this.type = checkNotNull(type, "type");
this.edsServiceName = edsServiceName;
this.dnsHostName = dnsHostName;
this.lrsServerInfo = lrsServerInfo;
this.maxConcurrentRequests = maxConcurrentRequests;
this.tlsContext = tlsContext;
this.filterMetadata = ImmutableMap.copyOf(checkNotNull(filterMetadata, "filterMetadata"));
this.outlierDetection = outlierDetection;
this.backendMetricPropagation = backendMetricPropagation;
}
static DiscoveryMechanism forEds(String cluster, @Nullable String edsServiceName,
@Nullable ServerInfo lrsServerInfo, @Nullable Long maxConcurrentRequests,
@Nullable UpstreamTlsContext tlsContext, Map<String, Struct> filterMetadata,
OutlierDetection outlierDetection,
@Nullable BackendMetricPropagation backendMetricPropagation) {
return new DiscoveryMechanism(cluster, Type.EDS, edsServiceName,
null, lrsServerInfo, maxConcurrentRequests, tlsContext,
filterMetadata, outlierDetection, backendMetricPropagation);
}
static DiscoveryMechanism forLogicalDns(String cluster, String dnsHostName,
@Nullable ServerInfo lrsServerInfo, @Nullable Long maxConcurrentRequests,
@Nullable UpstreamTlsContext tlsContext, Map<String, Struct> filterMetadata,
@Nullable BackendMetricPropagation backendMetricPropagation) {
return new DiscoveryMechanism(cluster, Type.LOGICAL_DNS, null, dnsHostName,
lrsServerInfo, maxConcurrentRequests, tlsContext, filterMetadata, null,
backendMetricPropagation);
}
@Override
public int hashCode() {
return Objects.hash(cluster, type, lrsServerInfo, maxConcurrentRequests, tlsContext,
edsServiceName, dnsHostName, filterMetadata,
outlierDetection, backendMetricPropagation);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DiscoveryMechanism that = (DiscoveryMechanism) o;
return cluster.equals(that.cluster)
&& type == that.type
&& Objects.equals(edsServiceName, that.edsServiceName)
&& Objects.equals(dnsHostName, that.dnsHostName)
&& Objects.equals(lrsServerInfo, that.lrsServerInfo)
&& Objects.equals(maxConcurrentRequests, that.maxConcurrentRequests)
&& Objects.equals(tlsContext, that.tlsContext)
&& Objects.equals(filterMetadata, that.filterMetadata)
&& Objects.equals(outlierDetection, that.outlierDetection);
}
@Override
public String toString() {
MoreObjects.ToStringHelper toStringHelper =
MoreObjects.toStringHelper(this)
.add("cluster", cluster)
.add("type", type)
.add("edsServiceName", edsServiceName)
.add("dnsHostName", dnsHostName)
.add("lrsServerInfo", lrsServerInfo)
// Exclude tlsContext as its string representation is cumbersome.
.add("maxConcurrentRequests", maxConcurrentRequests)
.add("filterMetadata", filterMetadata)
// Exclude outlierDetection as its string representation is long.
;
return toStringHelper.toString();
}
}
}
}
| Type |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/StreamJsonTest.java | {
"start": 6553,
"end": 9046
} | class ____ {
@Inject
Vertx vertx;
@GET
@Path("/string")
@Produces(RestMediaType.APPLICATION_STREAM_JSON)
@RestStreamElementType(MediaType.APPLICATION_JSON)
public Multi<String> readString() {
return Multi.createFrom().emitter(
em -> {
em.emit("one");
em.emit("two");
em.emit("3");
em.emit("four");
em.complete();
});
}
@GET
@Path("/pojo")
@Produces(RestMediaType.APPLICATION_STREAM_JSON)
@RestStreamElementType(MediaType.APPLICATION_JSON)
public Multi<Message> readPojo() {
return Multi.createFrom().emitter(
em -> {
em.emit(Message.of("one", "1"));
em.emit(Message.of("two", "2"));
em.emit(Message.of("three", "3"));
vertx.setTimer(100, id -> {
em.emit(Message.of("four", "4"));
em.complete();
});
});
}
@GET
@Path("/single-pojo")
@Produces(RestMediaType.APPLICATION_STREAM_JSON)
@RestStreamElementType(MediaType.APPLICATION_JSON)
public String getPojosAsString() throws JsonProcessingException {
ObjectMapper mapper = new ObjectMapper();
StringBuilder result = new StringBuilder();
ObjectWriter objectWriter = mapper.writerFor(Message.class);
for (var msg : List.of(Message.of("zero", "0"),
Message.of("one", "1"),
Message.of("two", "2"),
Message.of("three", "3"))) {
result.append(objectWriter.writeValueAsString(msg));
result.append("\n");
}
return result.toString();
}
@GET
@Path("/ticks")
@Produces(RestMediaType.APPLICATION_STREAM_JSON)
@RestStreamElementType(MediaType.APPLICATION_JSON)
public Multi<String> getTicks() {
return Multi.createFrom()
.ticks()
.every(Duration.ofMillis(TICK_EVERY_MS))
.log()
.onItem()
.transform((Long tick) -> "tick " + tick);
}
}
public static | StreamingResource |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java | {
"start": 9772,
"end": 10389
} | class ____ extends ContainerExecutor {
private static final Logger LOG =
LoggerFactory.getLogger(LinuxContainerExecutor.class);
private String nonsecureLocalUser;
private Pattern nonsecureLocalUserPattern;
private LCEResourcesHandler resourcesHandler;
private boolean containerSchedPriorityIsSet = false;
private int containerSchedPriorityAdjustment = 0;
private boolean containerLimitUsers;
private ResourceHandler resourceHandlerChain;
private LinuxContainerRuntime linuxContainerRuntime;
private Context nmContext;
/**
* The container exit code.
*/
public | LinuxContainerExecutor |
java | apache__camel | components/camel-cxf/camel-cxf-spring-soap/src/test/java/org/apache/camel/component/cxf/mtom/HelloImpl12.java | {
"start": 1000,
"end": 1246
} | class ____ SOAP 1.2
*/
@WebService(serviceName = "HelloService12")
@XmlSeeAlso({ org.apache.camel.cxf.mtom_feature.types.ObjectFactory.class })
@jakarta.xml.ws.BindingType(value = jakarta.xml.ws.soap.SOAPBinding.SOAP12HTTP_MTOM_BINDING)
public | for |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/validation/BindingMethodValidator.java | {
"start": 5170,
"end": 6177
} | class ____ extends ElementValidator {
private final XMethodElement method;
protected MethodValidator(XMethodElement method) {
super(method);
this.method = method;
}
@Override
protected final Optional<XType> bindingElementType() {
return Optional.of(method.getReturnType());
}
@Override
protected final void checkAdditionalProperties() {
checkNotExtensionFunction();
checkEnclosingElement();
checkTypeParameters();
checkNotPrivate();
checkAbstractness();
checkThrows();
checkParameters();
checkAdditionalMethodProperties();
}
/** Checks additional properties of the binding method. */
protected void checkAdditionalMethodProperties() {}
private void checkNotExtensionFunction() {
if (method.isExtensionFunction()) {
report.addError(bindingMethods("can not be an extension function"));
}
}
/**
* Adds an error if the method is not declared in a | MethodValidator |
java | apache__hadoop | hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LINK3Request.java | {
"start": 1039,
"end": 1992
} | class ____ extends RequestWithHandle {
private final FileHandle fromDirHandle;
private final String fromName;
public LINK3Request(FileHandle handle, FileHandle fromDirHandle,
String fromName) {
super(handle);
this.fromDirHandle = fromDirHandle;
this.fromName = fromName;
}
public static LINK3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
FileHandle fromDirHandle = readHandle(xdr);
String fromName = xdr.readString();
return new LINK3Request(handle, fromDirHandle, fromName);
}
public FileHandle getFromDirHandle() {
return fromDirHandle;
}
public String getFromName() {
return fromName;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
fromDirHandle.serialize(xdr);
xdr.writeInt(fromName.length());
xdr.writeFixedOpaque(fromName.getBytes(StandardCharsets.UTF_8),
fromName.length());
}
}
| LINK3Request |
java | dropwizard__dropwizard | dropwizard-testing/src/test/java/io/dropwizard/testing/junit5/PersonResourceExceptionMapperTest.java | {
"start": 787,
"end": 2490
} | class ____ {
private static final ObjectMapper OBJECT_MAPPER = Jackson.newObjectMapper()
.registerModule(new GuavaModule());
private PeopleStore peopleStore = mock(PeopleStore.class);
private ResourceExtension resources = ResourceExtension.builder()
.addResource(new PersonResource(peopleStore))
.setRegisterDefaultExceptionMappers(false)
.addProvider(new MyJerseyExceptionMapper())
.addProvider(new GenericExceptionMapper())
.setMapper(OBJECT_MAPPER)
.build();
@Test
void testDefaultConstraintViolation() {
assertThat(resources.target("/person/blah/index")
.queryParam("ind", -1).request()
.get().readEntity(String.class))
.isEqualTo("Invalid data");
}
@Test
void testDefaultJsonProcessingMapper() {
assertThat(resources.target("/person/blah/runtime-exception")
.request()
.post(Entity.json("{ \"he: \"ho\"}"))
.readEntity(String.class))
.startsWith("Something went wrong: Unexpected character");
}
@Test
void testDefaultExceptionMapper() {
assertThat(resources.target("/person/blah/runtime-exception")
.request()
.post(Entity.json("{}"))
.readEntity(String.class))
.isEqualTo("Something went wrong: I'm an exception!");
}
@Test
void testDefaultEofExceptionMapper() {
assertThat(resources.target("/person/blah/eof-exception")
.request()
.get().readEntity(String.class))
.isEqualTo("Something went wrong: I'm an eof exception!");
}
private static | PersonResourceExceptionMapperTest |
java | alibaba__nacos | api/src/test/java/com/alibaba/nacos/api/lock/model/LockInstanceTest.java | {
"start": 1089,
"end": 3881
} | class ____ {
private LockInstance lockInstance;
@BeforeEach
void setUp() {
lockInstance = new LockInstance();
}
@Test
void testConstructorWithParameters() {
String key = "testKey";
Long expiredTime = 1000L;
String lockType = "testType";
LockInstance instance = new LockInstance(key, expiredTime, lockType);
assertEquals(key, instance.getKey());
assertEquals(expiredTime, instance.getExpiredTime());
assertEquals(lockType, instance.getLockType());
}
@Test
void testDefaultConstructor() {
LockInstance instance = new LockInstance();
assertNull(instance.getKey());
assertNull(instance.getExpiredTime());
assertNull(instance.getLockType());
assertNull(instance.getParams());
}
@Test
void testGetAndSetKey() {
String key = "testKey";
lockInstance.setKey(key);
assertEquals(key, lockInstance.getKey());
}
@Test
void testGetAndSetExpiredTime() {
Long expiredTime = 1000L;
lockInstance.setExpiredTime(expiredTime);
assertEquals(expiredTime, lockInstance.getExpiredTime());
}
@Test
void testGetAndSetLockType() {
String lockType = "testType";
lockInstance.setLockType(lockType);
assertEquals(lockType, lockInstance.getLockType());
}
@Test
void testGetAndSetParams() {
Map<String, String> params = new HashMap<>();
params.put("param1", "value1");
params.put("param2", "value2");
lockInstance.setParams(params);
assertEquals(params, lockInstance.getParams());
}
@Test
void testLockMethod() throws NacosException {
LockService lockService = mock(LockService.class);
Boolean expectedResult = true;
when(lockService.remoteTryLock(lockInstance)).thenReturn(expectedResult);
lockInstance.setKey("testKey");
lockInstance.setLockType("testType");
lockInstance.setExpiredTime(1000L);
Boolean result = lockInstance.lock(lockService);
assertEquals(expectedResult, result);
}
@Test
void testUnlockMethod() throws NacosException {
LockService lockService = mock(LockService.class);
Boolean expectedResult = true;
when(lockService.remoteReleaseLock(lockInstance)).thenReturn(expectedResult);
lockInstance.setKey("testKey");
lockInstance.setLockType("testType");
lockInstance.setExpiredTime(1000L);
Boolean result = lockInstance.unLock(lockService);
assertEquals(expectedResult, result);
}
} | LockInstanceTest |
java | apache__flink | flink-filesystems/flink-azure-fs-hadoop/src/test/java/org/apache/flink/fs/azurefs/AzureBlobStorageFSFactoryTest.java | {
"start": 1382,
"end": 1563
} | class ____ {
@ParameterizedTest(name = "Factory = {0}")
@MethodSource("getFactories")
@Retention(value = RetentionPolicy.RUNTIME)
private @ | AzureBlobStorageFSFactoryTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/metrics/TimerGauge.java | {
"start": 1631,
"end": 7326
} | class ____ implements Gauge<Long>, View {
private static final int DEFAULT_TIME_SPAN_IN_SECONDS = 60;
private final Clock clock;
private final Collection<StartStopListener> startStopListeners = new ArrayList<>();
/** The time-span over which the average is calculated. */
private final int timeSpanInSeconds;
/** Circular array containing the history of values. */
private final long[] values;
/** The index in the array for the current time. */
private int idx = 0;
private boolean fullWindow = false;
private long currentValue;
private long currentCount;
private long currentMeasurementStartTS;
/**
* This differ from {@link #currentMeasurementStartTS} that {@link #currentUpdateTS} is bumped
* on every {@link #update()} call, while {@link #currentMeasurementStartTS} always marks the
* last {@link #markStart()} call.
*/
private long currentUpdateTS;
private long previousMaxSingleMeasurement;
private long currentMaxSingleMeasurement;
private long accumulatedCount;
public TimerGauge() {
this(DEFAULT_TIME_SPAN_IN_SECONDS);
}
public TimerGauge(int timeSpanInSeconds) {
this(SystemClock.getInstance(), timeSpanInSeconds);
}
public TimerGauge(Clock clock) {
this(clock, DEFAULT_TIME_SPAN_IN_SECONDS);
}
public TimerGauge(Clock clock, int timeSpanInSeconds) {
this.clock = clock;
this.timeSpanInSeconds =
Math.max(
timeSpanInSeconds - (timeSpanInSeconds % UPDATE_INTERVAL_SECONDS),
UPDATE_INTERVAL_SECONDS);
this.values = new long[this.timeSpanInSeconds / UPDATE_INTERVAL_SECONDS];
}
public synchronized void registerListener(StartStopListener listener) {
if (currentMeasurementStartTS != 0) {
listener.markStart();
}
startStopListeners.add(listener);
}
public synchronized void unregisterListener(StartStopListener listener) {
if (currentMeasurementStartTS != 0) {
listener.markEnd();
}
startStopListeners.remove(listener);
}
public synchronized void markStart() {
if (currentMeasurementStartTS != 0) {
return;
}
currentUpdateTS = clock.absoluteTimeMillis();
currentMeasurementStartTS = currentUpdateTS;
for (StartStopListener startStopListener : startStopListeners) {
startStopListener.markStart();
}
}
public synchronized void markEnd() {
if (currentMeasurementStartTS == 0) {
return;
}
long now = clock.absoluteTimeMillis();
long currentMeasurement = now - currentMeasurementStartTS;
long currentIncrement = now - currentUpdateTS;
currentCount += currentIncrement;
accumulatedCount += currentIncrement;
currentMaxSingleMeasurement = Math.max(currentMaxSingleMeasurement, currentMeasurement);
currentUpdateTS = 0;
currentMeasurementStartTS = 0;
for (StartStopListener startStopListener : startStopListeners) {
startStopListener.markEnd();
}
}
@Override
public synchronized void update() {
if (currentMeasurementStartTS != 0) {
long now = clock.absoluteTimeMillis();
// we adding to the current count only the time elapsed since last markStart or update
// call
currentCount += now - currentUpdateTS;
accumulatedCount += now - currentUpdateTS;
currentUpdateTS = now;
// on the other hand, max measurement has to be always checked against last markStart
// call
currentMaxSingleMeasurement =
Math.max(currentMaxSingleMeasurement, now - currentMeasurementStartTS);
}
updateCurrentValue();
previousMaxSingleMeasurement = currentMaxSingleMeasurement;
currentCount = 0;
currentMaxSingleMeasurement = 0;
}
private void updateCurrentValue() {
if (idx == values.length - 1) {
fullWindow = true;
}
values[idx] = currentCount;
idx = (idx + 1) % values.length;
int maxIndex = fullWindow ? values.length : idx;
long totalTime = 0;
for (int i = 0; i < maxIndex; i++) {
totalTime += values[i];
}
currentValue =
Math.max(Math.min(totalTime / (UPDATE_INTERVAL_SECONDS * maxIndex), 1000), 0);
}
@Override
public synchronized Long getValue() {
return currentValue;
}
/**
* @return the longest marked period as measured by the given * TimerGauge. For example the
* longest consecutive back pressured period.
*/
public synchronized long getMaxSingleMeasurement() {
return previousMaxSingleMeasurement;
}
/**
* @return the accumulated period by the given * TimerGauge.
*/
public synchronized long getAccumulatedCount() {
return accumulatedCount;
}
@VisibleForTesting
public synchronized long getCount() {
return currentCount;
}
public synchronized boolean isMeasuring() {
return currentMeasurementStartTS != 0;
}
/**
* Listens for {@link TimerGauge#markStart()} and {@link TimerGauge#markEnd()} events.
*
* <p>Beware! As it is right now, {@link StartStopListener} is notified under the {@link
* TimerGauge}'s lock, so those callbacks should be very short, without long call stacks that
* acquire more locks. Otherwise, a potential for deadlocks can be introduced.
*/
public | TimerGauge |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNetworkTopologyServlet.java | {
"start": 5496,
"end": 10830
} | class ____ {
@ParameterizedTest
@ValueSource(strings = {SYNC_MODE})
public void testPrintTopologyTextFormatSync(String rpcMode) throws Exception {
testPrintTopologyTextFormat();
}
@ParameterizedTest
@ValueSource(strings = {SYNC_MODE})
public void testPrintTopologyJsonFormatSync(String rpcMode) throws Exception {
testPrintTopologyJsonFormat();
}
@ParameterizedTest
@ValueSource(strings = {SYNC_MODE})
public void testPrintTopologyNoDatanodesTextFormatSync(String rpcMode)
throws Exception {
testPrintTopologyNoDatanodesTextFormat();
}
@ParameterizedTest
@ValueSource(strings = {SYNC_MODE})
public void testPrintTopologyNoDatanodesJsonFormatSync(String rpcMode)
throws Exception {
testPrintTopologyNoDatanodesJsonFormat();
}
}
public void testPrintTopologyTextFormat() throws Exception {
// Get http Address.
String httpAddress = clusterWithDatanodes.getRandomRouter().getRouter()
.getHttpServerAddress().toString();
// Send http request.
URL url = new URL("http:/" + httpAddress + "/topology");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setReadTimeout(20000);
conn.setConnectTimeout(20000);
conn.connect();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
StringBuilder sb =
new StringBuilder("-- Network Topology -- \n");
sb.append(out);
sb.append("\n-- Network Topology -- ");
String topology = sb.toString();
// Assert rack info.
assertTrue(topology.contains("/ns0/rack1"));
assertTrue(topology.contains("/ns0/rack2"));
assertTrue(topology.contains("/ns0/rack3"));
assertTrue(topology.contains("/ns1/rack4"));
assertTrue(topology.contains("/ns1/rack5"));
assertTrue(topology.contains("/ns1/rack6"));
// Assert node number.
assertEquals(18,
topology.split("127.0.0.1").length - 1);
}
public void testPrintTopologyJsonFormat() throws Exception {
// Get http Address.
String httpAddress = clusterWithDatanodes.getRandomRouter().getRouter()
.getHttpServerAddress().toString();
// Send http request.
URL url = new URL("http:/" + httpAddress + "/topology");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setReadTimeout(20000);
conn.setConnectTimeout(20000);
conn.setRequestProperty("Accept", "application/json");
conn.connect();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
String topology = out.toString();
// Parse json.
JsonNode racks = new ObjectMapper().readTree(topology);
// Assert rack number.
assertEquals(6, racks.size());
// Assert rack info.
assertTrue(topology.contains("/ns0/rack1"));
assertTrue(topology.contains("/ns0/rack2"));
assertTrue(topology.contains("/ns0/rack3"));
assertTrue(topology.contains("/ns1/rack4"));
assertTrue(topology.contains("/ns1/rack5"));
assertTrue(topology.contains("/ns1/rack6"));
// Assert node number.
Iterator<JsonNode> elements = racks.elements();
int dataNodesCount = 0;
while(elements.hasNext()){
JsonNode rack = elements.next();
Iterator<Map.Entry<String, JsonNode>> fields = rack.fields();
while (fields.hasNext()) {
dataNodesCount += fields.next().getValue().size();
}
}
assertEquals(18, dataNodesCount);
}
public void testPrintTopologyNoDatanodesTextFormat() throws Exception {
// Get http Address.
String httpAddress = clusterNoDatanodes.getRandomRouter().getRouter()
.getHttpServerAddress().toString();
// Send http request.
URL url = new URL("http:/" + httpAddress + "/topology");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setReadTimeout(20000);
conn.setConnectTimeout(20000);
conn.connect();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
StringBuilder sb =
new StringBuilder("-- Network Topology -- \n");
sb.append(out);
sb.append("\n-- Network Topology -- ");
String topology = sb.toString();
// Assert node number.
assertTrue(topology.contains("No DataNodes"));
}
public void testPrintTopologyNoDatanodesJsonFormat() throws Exception {
// Get http Address.
String httpAddress = clusterNoDatanodes.getRandomRouter().getRouter()
.getHttpServerAddress().toString();
// Send http request.
URL url = new URL("http:/" + httpAddress + "/topology");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setReadTimeout(20000);
conn.setConnectTimeout(20000);
conn.setRequestProperty("Accept", "application/json");
conn.connect();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
StringBuilder sb =
new StringBuilder("-- Network Topology -- \n");
sb.append(out);
sb.append("\n-- Network Topology -- ");
String topology = sb.toString();
// Assert node number.
assertTrue(topology.contains("No DataNodes"));
}
}
| TestWithSyncRouterRpc |
java | resilience4j__resilience4j | resilience4j-rxjava3/src/main/java/io/github/resilience4j/rxjava3/ratelimiter/operator/CompletableRateLimiter.java | {
"start": 1054,
"end": 2072
} | class ____ extends Completable {
private final Completable upstream;
private final RateLimiter rateLimiter;
CompletableRateLimiter(Completable upstream, RateLimiter rateLimiter) {
this.upstream = upstream;
this.rateLimiter = rateLimiter;
}
@Override
protected void subscribeActual(CompletableObserver downstream) {
long waitDuration = rateLimiter.reservePermission();
if (waitDuration >= 0) {
if (waitDuration > 0) {
Completable.timer(waitDuration, TimeUnit.NANOSECONDS)
.subscribe(() -> upstream.subscribe(
new RateLimiterCompletableObserver(downstream)));
} else {
upstream.subscribe(new RateLimiterCompletableObserver(downstream));
}
} else {
downstream.onSubscribe(EmptyDisposable.INSTANCE);
downstream.onError(RequestNotPermitted.createRequestNotPermitted(rateLimiter));
}
}
| CompletableRateLimiter |
java | playframework__playframework | documentation/manual/working/javaGuide/main/forms/code/javaguide/forms/csrf/Filters.java | {
"start": 311,
"end": 447
} | class ____ extends DefaultHttpFilters {
@Inject
public Filters(CSRFFilter csrfFilter) {
super(csrfFilter);
}
}
// #filters
| Filters |
java | apache__rocketmq | store/src/main/java/org/apache/rocketmq/store/TransientStorePool.java | {
"start": 1270,
"end": 3262
} | class ____ {
private static final Logger log = LoggerFactory.getLogger(LoggerName.STORE_LOGGER_NAME);
private final int poolSize;
private final int fileSize;
private final Deque<ByteBuffer> availableBuffers;
private volatile boolean isRealCommit = true;
public TransientStorePool(final int poolSize, final int fileSize) {
this.poolSize = poolSize;
this.fileSize = fileSize;
this.availableBuffers = new ConcurrentLinkedDeque<>();
}
/**
* It's a heavy init method.
*/
public void init() {
for (int i = 0; i < poolSize; i++) {
ByteBuffer byteBuffer = ByteBuffer.allocateDirect(fileSize);
final long address = PlatformDependent.directBufferAddress(byteBuffer);
Pointer pointer = new Pointer(address);
LibC.INSTANCE.mlock(pointer, new NativeLong(fileSize));
availableBuffers.offer(byteBuffer);
}
}
public void destroy() {
for (ByteBuffer byteBuffer : availableBuffers) {
final long address = PlatformDependent.directBufferAddress(byteBuffer);
Pointer pointer = new Pointer(address);
LibC.INSTANCE.munlock(pointer, new NativeLong(fileSize));
}
}
public void returnBuffer(ByteBuffer byteBuffer) {
byteBuffer.position(0);
byteBuffer.limit(fileSize);
this.availableBuffers.offerFirst(byteBuffer);
}
public ByteBuffer borrowBuffer() {
ByteBuffer buffer = availableBuffers.pollFirst();
if (availableBuffers.size() < poolSize * 0.4) {
log.warn("TransientStorePool only remain {} sheets.", availableBuffers.size());
}
return buffer;
}
public int availableBufferNums() {
return availableBuffers.size();
}
public boolean isRealCommit() {
return isRealCommit;
}
public void setRealCommit(boolean realCommit) {
isRealCommit = realCommit;
}
}
| TransientStorePool |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java | {
"start": 1853,
"end": 2254
} | enum ____ {
AES_CTR,
SM4_CTR;
static int get(String algorithm, String mode)
throws NoSuchAlgorithmException {
try {
return AlgMode.valueOf(algorithm + "_" + mode).ordinal();
} catch (Exception e) {
throw new NoSuchAlgorithmException("Doesn't support algorithm: " +
algorithm + " and mode: " + mode);
}
}
}
private | AlgMode |
java | spring-projects__spring-security | itest/context/src/main/java/org/springframework/security/integration/multiannotation/PreAuthorizeService.java | {
"start": 797,
"end": 898
} | interface ____ {
@PreAuthorize("hasRole('ROLE_A')")
void preAuthorizedMethod();
}
| PreAuthorizeService |
java | apache__camel | components/camel-netty/src/test/java/org/apache/camel/component/netty/NettyUDPMulticastAsyncTest.java | {
"start": 2508,
"end": 3859
} | class ____ extends BaseNettyTest {
private void sendFile(String uri) {
template.send(uri, new Processor() {
public void process(Exchange exchange) throws Exception {
byte[] buffer = exchange.getContext().getTypeConverter().mandatoryConvertTo(byte[].class,
new File("src/test/resources/test.txt"));
exchange.setProperty(Exchange.CHARSET_NAME, "ASCII");
exchange.getIn().setBody(buffer);
}
});
}
@Test
public void testUDPInOnlyMulticastWithNettyConsumer() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
mock.message(0).body().startsWith("Song Of A Dream".getBytes());
// any IP in the range of 224.0.0.0 through 239.255.255.255 does the job
sendFile("netty:udp://224.1.2.3:{{port}}?sync=false");
mock.assertIsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("netty:udp://224.1.2.3:{{port}}?sync=false&networkInterface=en0")
.to("mock:result")
.to("log:Message");
}
};
}
}
| NettyUDPMulticastAsyncTest |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/nestedbeans/exceptions/NestedMappingsWithExceptionTest.java | {
"start": 865,
"end": 985
} | class ____ {
@ProcessorTest
public void shouldGenerateCodeThatCompiles() {
}
}
| NestedMappingsWithExceptionTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/discriminator/CaseStatementWithTypeTest.java | {
"start": 4939,
"end": 5219
} | class ____ extends SingleParent {
public SingleChildB() {
}
public SingleChildB(Long id) {
super( id );
}
}
@SuppressWarnings({"FieldCanBeLocal", "unused"})
@Entity( name = "JoinedParent" )
@Inheritance( strategy = InheritanceType.JOINED )
public static | SingleChildB |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/expressions/LocalReferenceExpression.java | {
"start": 1392,
"end": 2949
} | class ____ implements ResolvedExpression {
private final String name;
private final DataType dataType;
LocalReferenceExpression(String name, DataType dataType) {
this.name = Preconditions.checkNotNull(name);
this.dataType = Preconditions.checkNotNull(dataType);
}
public String getName() {
return name;
}
@Override
public DataType getOutputDataType() {
return dataType;
}
@Override
public List<ResolvedExpression> getResolvedChildren() {
return Collections.emptyList();
}
@Override
public String asSummaryString() {
return name;
}
@Override
public String asSerializableString(SqlFactory sqlFactory) {
return EncodingUtils.escapeIdentifier(name);
}
@Override
public List<Expression> getChildren() {
return Collections.emptyList();
}
@Override
public <R> R accept(ExpressionVisitor<R> visitor) {
return visitor.visit(this);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
LocalReferenceExpression that = (LocalReferenceExpression) o;
return name.equals(that.name) && dataType.equals(that.dataType);
}
@Override
public int hashCode() {
return Objects.hash(name, dataType);
}
@Override
public String toString() {
return asSummaryString();
}
}
| LocalReferenceExpression |
java | apache__flink | flink-clients/src/main/java/org/apache/flink/client/deployment/application/FromClasspathEntryClassInformationProvider.java | {
"start": 5263,
"end": 5392
} | class ____ if it could be derived from the specified classpath or was
* explicitly specified.
*
* @return The job | name |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/cluster/routing/IndexRoutingTableTests.java | {
"start": 710,
"end": 7648
} | class ____ extends ESTestCase {
public void testReadyForSearch() {
Index index = new Index(randomIdentifier(), UUIDs.randomBase64UUID());
// 2 primaries that are search and index
ShardId p1 = new ShardId(index, 0);
IndexShardRoutingTable shardTable1 = new IndexShardRoutingTable(
p1,
List.of(getShard(p1, true, ShardRoutingState.STARTED, ShardRouting.Role.DEFAULT))
);
ShardId p2 = new ShardId(index, 1);
IndexShardRoutingTable shardTable2 = new IndexShardRoutingTable(
p2,
List.of(getShard(p2, true, ShardRoutingState.STARTED, ShardRouting.Role.DEFAULT))
);
IndexRoutingTable indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 });
assertTrue(indexRoutingTable.readyForSearch());
// 2 primaries that are index only
shardTable1 = new IndexShardRoutingTable(p1, List.of(getShard(p1, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY)));
shardTable2 = new IndexShardRoutingTable(p2, List.of(getShard(p2, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY)));
indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 });
assertFalse(indexRoutingTable.readyForSearch());
// 2 unassigned primaries that are index only
shardTable1 = new IndexShardRoutingTable(
p1,
List.of(getShard(p1, true, ShardRoutingState.UNASSIGNED, ShardRouting.Role.INDEX_ONLY))
);
shardTable2 = new IndexShardRoutingTable(
p2,
List.of(getShard(p2, true, ShardRoutingState.UNASSIGNED, ShardRouting.Role.INDEX_ONLY))
);
indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 });
assertFalse(indexRoutingTable.readyForSearch());
// 2 primaries that are index only with replicas that are not all available
shardTable1 = new IndexShardRoutingTable(
p1,
List.of(
getShard(p1, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY),
getShard(p1, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY),
getShard(p1, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY)
)
);
shardTable2 = new IndexShardRoutingTable(
p2,
List.of(
getShard(p2, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY),
getShard(p2, false, ShardRoutingState.UNASSIGNED, ShardRouting.Role.SEARCH_ONLY),
getShard(p2, false, ShardRoutingState.UNASSIGNED, ShardRouting.Role.SEARCH_ONLY)
)
);
indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 });
assertFalse(indexRoutingTable.readyForSearch());
// 2 primaries that are index only with some replicas that are all available
shardTable1 = new IndexShardRoutingTable(
p1,
List.of(
getShard(p1, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY),
getShard(p1, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY),
getShard(p1, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY)
)
);
shardTable2 = new IndexShardRoutingTable(
p2,
List.of(
getShard(p2, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY),
getShard(p2, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY),
getShard(p2, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY)
)
);
indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 });
assertTrue(indexRoutingTable.readyForSearch());
// 2 unassigned primaries that are index only with some replicas that are all available
shardTable1 = new IndexShardRoutingTable(
p1,
List.of(
getShard(p1, true, ShardRoutingState.UNASSIGNED, ShardRouting.Role.INDEX_ONLY),
getShard(p1, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY),
getShard(p1, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY)
)
);
shardTable2 = new IndexShardRoutingTable(
p2,
List.of(
getShard(p2, true, ShardRoutingState.UNASSIGNED, ShardRouting.Role.INDEX_ONLY),
getShard(p2, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY),
getShard(p2, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY)
)
);
indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 });
assertTrue(indexRoutingTable.readyForSearch());
// 2 primaries that are index only with at least 1 replica per primary that is available
shardTable1 = new IndexShardRoutingTable(
p1,
List.of(
getShard(p1, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY),
getShard(p1, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY),
getShard(p1, false, ShardRoutingState.UNASSIGNED, ShardRouting.Role.SEARCH_ONLY)
)
);
shardTable2 = new IndexShardRoutingTable(
p2,
List.of(
getShard(p2, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY),
getShard(p2, false, ShardRoutingState.UNASSIGNED, ShardRouting.Role.SEARCH_ONLY),
getShard(p2, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY)
)
);
indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 });
assertTrue(indexRoutingTable.readyForSearch());
}
private ShardRouting getShard(ShardId shardId, boolean isPrimary, ShardRoutingState state, ShardRouting.Role role) {
return new ShardRouting(
shardId,
state == ShardRoutingState.UNASSIGNED ? null : randomIdentifier(),
state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.STARTED ? null : randomIdentifier(),
isPrimary,
state,
TestShardRouting.buildRecoverySource(isPrimary, state),
TestShardRouting.buildUnassignedInfo(state),
TestShardRouting.buildRelocationFailureInfo(state),
TestShardRouting.buildAllocationId(state),
randomLongBetween(-1, 1024),
role
);
}
}
| IndexRoutingTableTests |
java | quarkusio__quarkus | extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/tracing/intrumentation/vertx/HttpInstrumenterVertxTracer.java | {
"start": 2854,
"end": 9067
} | class ____ implements InstrumenterVertxTracer<HttpRequest, HttpResponse> {
private final Instrumenter<HttpRequest, HttpResponse> serverInstrumenter;
private final Instrumenter<HttpRequest, HttpResponse> clientInstrumenter;
public HttpInstrumenterVertxTracer(final OpenTelemetry openTelemetry,
final OTelRuntimeConfig runtimeConfig,
final OTelBuildConfig buildConfig) {
serverInstrumenter = getServerInstrumenter(openTelemetry, runtimeConfig, buildConfig);
clientInstrumenter = getClientInstrumenter(openTelemetry, runtimeConfig);
}
@Override
public <R> boolean canHandle(final R request, final TagExtractor<R> tagExtractor) {
return request instanceof HttpRequest;
}
@Override
public Instrumenter<HttpRequest, HttpResponse> getReceiveRequestInstrumenter() {
return serverInstrumenter;
}
@Override
public Instrumenter<HttpRequest, HttpResponse> getSendResponseInstrumenter() {
return serverInstrumenter;
}
@Override
public Instrumenter<HttpRequest, HttpResponse> getSendRequestInstrumenter() {
return clientInstrumenter;
}
@Override
public Instrumenter<HttpRequest, HttpResponse> getReceiveResponseInstrumenter() {
return clientInstrumenter;
}
@Override
public OpenTelemetryVertxTracer.SpanOperation spanOperation(
final Context context,
final HttpRequest request,
final MultiMap headers,
final io.opentelemetry.context.Context spanContext,
final Scope scope) {
HttpRequestSpan requestSpan = HttpRequestSpan.request(request, headers, context, spanContext);
return OpenTelemetryVertxTracer.SpanOperation.span(context, requestSpan, headers, spanContext, scope);
}
@Override
public <R> void sendResponse(
final Context context,
final R response,
final OpenTelemetryVertxTracer.SpanOperation spanOperation,
final Throwable failure,
final TagExtractor<R> tagExtractor) {
HttpServerRoute.update(spanOperation.getSpanContext(), SERVER_FILTER, RouteGetter.ROUTE_GETTER,
((HttpRequestSpan) spanOperation.getRequest()), (HttpResponse) response);
InstrumenterVertxTracer.super.sendResponse(context, response, spanOperation, failure, tagExtractor);
}
@Override
public <R> OpenTelemetryVertxTracer.SpanOperation sendRequest(Context context,
SpanKind kind,
TracingPolicy policy,
R request,
String operation,
BiConsumer<String, String> headers,
TagExtractor<R> tagExtractor) {
OpenTelemetryVertxTracer.SpanOperation spanOperation = InstrumenterVertxTracer.super.sendRequest(context, kind, policy,
request,
operation, headers, tagExtractor);
if (spanOperation != null) {
Context runningCtx = spanOperation.getContext();
if (VertxContext.isDuplicatedContext(runningCtx)) {
String pathTemplate = runningCtx.getLocal("ClientUrlPathTemplate");
if (pathTemplate != null && !pathTemplate.isEmpty()) {
Span.fromContext(spanOperation.getSpanContext())
.updateName(((HttpRequest) spanOperation.getRequest()).method().name() + " " + pathTemplate);
}
}
}
return spanOperation;
}
@Override
public HttpRequest writableHeaders(
final HttpRequest request, final BiConsumer<String, String> headers) {
return WriteHeadersHttpRequest.request(request, headers);
}
static Instrumenter<HttpRequest, HttpResponse> getServerInstrumenter(final OpenTelemetry openTelemetry,
final OTelRuntimeConfig runtimeConfig, final OTelBuildConfig buildConfig) {
final ServerAttributesExtractor serverAttributesExtractor = new ServerAttributesExtractor();
final InstrumenterBuilder<HttpRequest, HttpResponse> serverBuilder = Instrumenter.builder(
openTelemetry,
INSTRUMENTATION_NAME,
HttpSpanNameExtractor.create(serverAttributesExtractor));
serverBuilder
.setEnabled(!runtimeConfig.sdkDisabled())
.setSpanStatusExtractor(HttpSpanStatusExtractor.create(serverAttributesExtractor))
.addAttributesExtractor(
HttpServerAttributesExtractor.create(serverAttributesExtractor))
.addAttributesExtractor(new AdditionalServerAttributesExtractor())
.addContextCustomizer(HttpServerRoute.create(serverAttributesExtractor));
if (buildConfig.metrics().enabled().orElse(false) &&
!runtimeConfig.sdkDisabled() &&
runtimeConfig.instrument().httpServerMetrics()) {
serverBuilder.addOperationMetrics(HttpServerMetrics.get());
}
return serverBuilder.buildServerInstrumenter(new HttpRequestTextMapGetter());
}
static Instrumenter<HttpRequest, HttpResponse> getClientInstrumenter(final OpenTelemetry openTelemetry,
final OTelRuntimeConfig runtimeConfig) {
ServerAttributesExtractor serverAttributesExtractor = new ServerAttributesExtractor();
HttpClientAttributesExtractor httpClientAttributesExtractor = new HttpClientAttributesExtractor();
InstrumenterBuilder<HttpRequest, HttpResponse> clientBuilder = Instrumenter.builder(
openTelemetry,
INSTRUMENTATION_NAME,
new ClientSpanNameExtractor(httpClientAttributesExtractor));
clientBuilder.setEnabled(!runtimeConfig.sdkDisabled());
return clientBuilder
.setSpanStatusExtractor(HttpSpanStatusExtractor.create(serverAttributesExtractor))
.addAttributesExtractor(
io.opentelemetry.instrumentation.api.semconv.http.HttpClientAttributesExtractor.create(
httpClientAttributesExtractor))
.buildClientInstrumenter(new HttpRequestTextMapSetter());
}
private static | HttpInstrumenterVertxTracer |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/MustBeClosedCheckerTest.java | {
"start": 9253,
"end": 11435
} | class ____ extends Closeable {
@MustBeClosed
MustBeClosedAnnotatedConstructor() {}
}
@SuppressWarnings("MustBeClosedChecker")
void respectsSuppressWarnings_onMethod() {
new Foo().mustBeClosedAnnotatedMethod();
}
void respectsSuppressWarnings_onLocal() {
@SuppressWarnings("MustBeClosedChecker")
var unused = new Foo().mustBeClosedAnnotatedMethod();
}
void negativeCase3() {
try (Closeable closeable = new Foo().mustBeClosedAnnotatedMethod()) {}
}
void negativeCase4() {
Foo foo = new Foo();
try (Closeable closeable = foo.mustBeClosedAnnotatedMethod()) {}
}
void negativeCase5() {
new Foo().bar();
}
void negativeCase6() {
try (MustBeClosedAnnotatedConstructor foo = new MustBeClosedAnnotatedConstructor()) {}
}
void negativeCase7() {
try (MustBeClosedAnnotatedConstructor foo = new MustBeClosedAnnotatedConstructor();
Closeable closeable = new Foo().mustBeClosedAnnotatedMethod()) {}
}
@MustBeClosed
Closeable positiveCase8() {
// This is fine since the caller method is annotated.
return new MustBeClosedAnnotatedConstructor();
}
@MustBeClosed
Closeable positiveCase7() {
// This is fine since the caller method is annotated.
return new Foo().mustBeClosedAnnotatedMethod();
}
@MustBeClosed
Closeable ternary(boolean condition) {
return condition ? new Foo().mustBeClosedAnnotatedMethod() : null;
}
@MustBeClosed
Closeable cast() {
// TODO(b/241012760): remove the following line after the bug is fixed.
// BUG: Diagnostic contains:
return (Closeable) new Foo().mustBeClosedAnnotatedMethod();
}
void tryWithResources() {
Foo foo = new Foo();
Closeable closeable = foo.mustBeClosedAnnotatedMethod();
try {
} finally {
closeable.close();
}
}
void mockitoWhen(Foo mockFoo) {
when(mockFoo.mustBeClosedAnnotatedMethod()).thenReturn(null);
doReturn(null).when(mockFoo).mustBeClosedAnnotatedMethod();
}
void testException() {
try {
((Foo) null).mustBeClosedAnnotatedMethod();
fail();
} catch (NullPointerException e) {
}
}
abstract | MustBeClosedAnnotatedConstructor |
java | reactor__reactor-core | reactor-core-micrometer/src/main/java/reactor/core/observability/micrometer/TimedSchedulerMeterDocumentation.java | {
"start": 1080,
"end": 3198
} | enum ____ implements MeterDocumentation {
/**
* Counter that increments by one each time a task is submitted (via any of the
* schedule methods on both Scheduler and Scheduler.Worker).
* <p>
* Note that there are actually 4 counters, which can be differentiated by the SubmittedTags#SUBMISSION tag.
* The sum of all these can thus be compared with the TASKS_COMPLETED counter.
*/
TASKS_SUBMITTED {
@Override
public KeyName[] getKeyNames() {
return SubmittedTags.values();
}
@Override
public String getName() {
return "%s.scheduler.tasks.submitted";
}
@Override
public Meter.Type getType() {
return Meter.Type.COUNTER;
}
},
/**
* LongTaskTimer reflecting tasks currently running. Note that this reflects all types of
* active tasks, including tasks scheduled with a delay or periodically (each
* iteration being considered an active task).
*/
TASKS_ACTIVE {
@Override
public String getName() {
return "%s.scheduler.tasks.active";
}
@Override
public Meter.Type getType() {
return Meter.Type.LONG_TASK_TIMER;
}
},
/**
* Timer reflecting tasks that have finished execution. Note that this reflects all types of
* active tasks, including tasks with a delay or periodically (each iteration being considered
* a separate completed task).
*/
TASKS_COMPLETED {
@Override
public String getName() {
return "%s.scheduler.tasks.completed";
}
@Override
public Meter.Type getType() {
return Meter.Type.TIMER;
}
},
/**
* LongTaskTimer reflecting tasks that were submitted for immediate execution but
* couldn't be started immediately because the scheduler is already at max capacity.
* Note that only immediate submissions via Scheduler#schedule(Runnable) and
* Scheduler.Worker#schedule(Runnable) are considered.
*/
TASKS_PENDING {
@Override
public String getName() {
return "%s.scheduler.tasks.pending";
}
@Override
public Meter.Type getType() {
return Meter.Type.LONG_TASK_TIMER;
}
}
;
/**
* Tag for the SchedulerMeters#TASKS_SUBMITTED meter.
*/
public | TimedSchedulerMeterDocumentation |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/DebeziumDb2ComponentBuilderFactory.java | {
"start": 6559,
"end": 6749
} | class ____ the commit policy. It defines when
* offsets commit has to be triggered based on the number of events
* processed and the time elapsed since the last commit. This | of |
java | apache__camel | components/camel-telegram/src/main/java/org/apache/camel/component/telegram/model/OutgoingTextMessage.java | {
"start": 1050,
"end": 2591
} | class ____ extends OutgoingMessage {
private static final long serialVersionUID = -8684079202025229263L;
private String text;
@JsonProperty("parse_mode")
private String parseMode;
@JsonProperty("disable_web_page_preview")
private Boolean disableWebPagePreview;
@JsonProperty("reply_markup")
private ReplyMarkup replyMarkup;
public OutgoingTextMessage() {
}
public OutgoingTextMessage(String text, String parseMode, Boolean disableWebPagePreview,
ReplyMarkup replyMarkup) {
this.text = text;
this.parseMode = parseMode;
this.disableWebPagePreview = disableWebPagePreview;
this.replyMarkup = replyMarkup;
}
public String getText() {
return text;
}
public void setText(String text) {
this.text = text;
}
public String getParseMode() {
return parseMode;
}
public void setParseMode(String parseMode) {
this.parseMode = parseMode;
}
public Boolean getDisableWebPagePreview() {
return disableWebPagePreview;
}
public void setDisableWebPagePreview(Boolean disableWebPagePreview) {
this.disableWebPagePreview = disableWebPagePreview;
}
public ReplyMarkup getReplyMarkup() {
return replyMarkup;
}
public void setReplyMarkup(ReplyMarkup replyMarkup) {
this.replyMarkup = replyMarkup;
}
public static Builder builder() {
return new Builder();
}
public static | OutgoingTextMessage |
java | apache__thrift | lib/java/src/main/java/org/apache/thrift/transport/TByteBuffer.java | {
"start": 259,
"end": 2814
} | class ____ extends TEndpointTransport {
private final ByteBuffer byteBuffer;
/**
* Creates a new TByteBuffer wrapping a given NIO ByteBuffer and custom TConfiguration.
*
* @param configuration the custom TConfiguration.
* @param byteBuffer the NIO ByteBuffer to wrap.
* @throws TTransportException on error.
*/
public TByteBuffer(TConfiguration configuration, ByteBuffer byteBuffer)
throws TTransportException {
super(configuration);
this.byteBuffer = byteBuffer;
updateKnownMessageSize(byteBuffer.capacity());
}
/**
* Creates a new TByteBuffer wrapping a given NIO ByteBuffer.
*
* @param byteBuffer the NIO ByteBuffer to wrap.
* @throws TTransportException on error.
*/
public TByteBuffer(ByteBuffer byteBuffer) throws TTransportException {
this(new TConfiguration(), byteBuffer);
}
@Override
public boolean isOpen() {
return true;
}
@Override
public void open() {}
@Override
public void close() {}
@Override
public int read(byte[] buf, int off, int len) throws TTransportException {
//
checkReadBytesAvailable(len);
final int n = Math.min(byteBuffer.remaining(), len);
if (n > 0) {
try {
byteBuffer.get(buf, off, n);
} catch (BufferUnderflowException e) {
throw new TTransportException("Unexpected end of input buffer", e);
}
}
return n;
}
@Override
public void write(byte[] buf, int off, int len) throws TTransportException {
try {
byteBuffer.put(buf, off, len);
} catch (BufferOverflowException e) {
throw new TTransportException("Not enough room in output buffer", e);
}
}
/**
* Gets the underlying NIO ByteBuffer.
*
* @return the underlying NIO ByteBuffer.
*/
public ByteBuffer getByteBuffer() {
return byteBuffer;
}
/**
* Convenience method to call clear() on the underlying NIO ByteBuffer.
*
* @return this instance.
*/
public TByteBuffer clear() {
byteBuffer.clear();
return this;
}
/**
* Convenience method to call flip() on the underlying NIO ByteBuffer.
*
* @return this instance.
*/
public TByteBuffer flip() {
byteBuffer.flip();
return this;
}
/**
* Convenience method to convert the underlying NIO ByteBuffer to a plain old byte array.
*
* @return the byte array backing the underlying NIO ByteBuffer.
*/
public byte[] toByteArray() {
final byte[] data = new byte[byteBuffer.remaining()];
byteBuffer.slice().get(data);
return data;
}
}
| TByteBuffer |
java | reactor__reactor-core | reactor-tools/src/main/java/reactor/tools/agent/ReactorDebugByteBuddyPlugin.java | {
"start": 1288,
"end": 2266
} | class ____ implements Plugin {
@Override
public boolean matches(TypeDescription target) {
return true;
}
@Override
public DynamicType.Builder<?> apply(
DynamicType.Builder<?> builder,
TypeDescription typeDescription,
ClassFileLocator classFileLocator
) {
return builder.visit(new AsmVisitorWrapper() {
@Override
public int mergeWriter(int flags) {
return flags | ClassWriter.COMPUTE_MAXS;
}
@Override
public int mergeReader(int flags) {
return flags;
}
@Override
public ClassVisitor wrap(
TypeDescription instrumentedType,
ClassVisitor classVisitor,
Implementation.Context implementationContext,
TypePool typePool,
FieldList<FieldDescription.InDefinedShape> fields,
MethodList<?> methods,
int writerFlags,
int readerFlags
) {
return new ReactorDebugClassVisitor(classVisitor, new AtomicBoolean());
}
});
}
@Override
public void close() {
}
}
| ReactorDebugByteBuddyPlugin |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/internals/StreamsConfigUtils.java | {
"start": 1133,
"end": 1262
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(StreamsConfigUtils.class);
public | StreamsConfigUtils |
java | elastic__elasticsearch | x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRCS1PainlessExecuteIT.java | {
"start": 978,
"end": 12892
} | class ____ extends AbstractRemoteClusterSecurityTestCase {
static {
fulfillingCluster = ElasticsearchCluster.local().name("fulfilling-cluster").nodes(3).apply(commonClusterConfig).build();
queryCluster = ElasticsearchCluster.local().name("query-cluster").apply(commonClusterConfig).build();
}
@ClassRule
public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster);
@SuppressWarnings({ "unchecked", "checkstyle:LineLength" })
public void testPainlessExecute() throws Exception {
// Setup RCS 1.0 (basicSecurity=true)
configureRemoteCluster("my_remote_cluster", fulfillingCluster, true, randomBoolean(), randomBoolean());
{
// Query cluster -> add role for test user - do not give any privileges for remote_indices
final var putRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE);
putRoleRequest.setJsonEntity("""
{
"indices": [
{
"names": ["local_index", "my_local*"],
"privileges": ["read"]
}
]
}""");
assertOK(adminClient().performRequest(putRoleRequest));
// Query cluster -> create user and assign role
final var putUserRequest = new Request("PUT", "/_security/user/" + REMOTE_SEARCH_USER);
putUserRequest.setJsonEntity("""
{
"password": "x-pack-test-password",
"roles" : ["remote_search"]
}""");
assertOK(adminClient().performRequest(putUserRequest));
// Query cluster -> create test index
final var indexDocRequest = new Request("POST", "/local_index/_doc?refresh=true");
indexDocRequest.setJsonEntity("{\"local_foo\": \"local_bar\"}");
assertOK(client().performRequest(indexDocRequest));
// Fulfilling cluster -> create test indices
final Request bulkRequest = new Request("POST", "/_bulk?refresh=true");
bulkRequest.setJsonEntity(Strings.format("""
{ "index": { "_index": "index1" } }
{ "foo": "bar" }
{ "index": { "_index": "secretindex" } }
{ "bar": "foo" }
"""));
assertOK(performRequestAgainstFulfillingCluster(bulkRequest));
}
{
// TEST CASE 1: Query local cluster for local_index - should work since role has read perms for it
Request painlessExecuteLocal = createPainlessExecuteRequest("local_index");
Response response = performRequestWithRemoteSearchUser(painlessExecuteLocal);
assertOK(response);
String responseBody = EntityUtils.toString(response.getEntity());
assertThat(responseBody, equalTo("{\"result\":[\"test\"]}"));
}
{
// TEST CASE 2: Query remote cluster for index1 - should fail since no permissions granted for remote clusters yet
Request painlessExecuteRemote = createPainlessExecuteRequest("my_remote_cluster:index1");
ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteRemote));
assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(403));
String errorResponseBody = EntityUtils.toString(exc.getResponse().getEntity());
assertThat(errorResponseBody, containsString("unauthorized for user [remote_search_user]"));
assertThat(errorResponseBody, containsString("on indices [index1]"));
assertThat(errorResponseBody, containsString("\"type\":\"security_exception\""));
}
{
// add user role and user on remote cluster
var putRoleOnRemoteClusterRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE);
putRoleOnRemoteClusterRequest.setJsonEntity("""
{
"indices": [
{
"names": ["index*"],
"privileges": ["read", "read_cross_cluster"]
}
]
}""");
assertOK(performRequestAgainstFulfillingCluster(putRoleOnRemoteClusterRequest));
var putUserOnRemoteClusterRequest = new Request("PUT", "/_security/user/" + REMOTE_SEARCH_USER);
putUserOnRemoteClusterRequest.setJsonEntity("""
{
"password": "x-pack-test-password",
"roles" : ["remote_search"]
}""");
assertOK(performRequestAgainstFulfillingCluster(putUserOnRemoteClusterRequest));
}
{
// TEST CASE 3: Query remote cluster for secretindex - should fail since no perms granted for it
Request painlessExecuteRemote = createPainlessExecuteRequest("my_remote_cluster:secretindex");
ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteRemote));
String errorResponseBody = EntityUtils.toString(exc.getResponse().getEntity());
assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(403));
assertThat(errorResponseBody, containsString("unauthorized for user [remote_search_user]"));
assertThat(errorResponseBody, containsString("on indices [secretindex]"));
assertThat(errorResponseBody, containsString("\"type\":\"security_exception\""));
}
{
// TEST CASE 4: Query remote cluster for index1 - should succeed since read and cross-cluster-read perms granted
Request painlessExecuteRemote = createPainlessExecuteRequest("my_remote_cluster:index1");
Response response = performRequestWithRemoteSearchUser(painlessExecuteRemote);
String responseBody = EntityUtils.toString(response.getEntity());
assertOK(response);
assertThat(responseBody, equalTo("{\"result\":[\"test\"]}"));
}
{
// TEST CASE 5: Query local cluster for not_present index - should fail with 403 since role does not have perms for this index
Request painlessExecuteLocal = createPainlessExecuteRequest("index_not_present");
ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteLocal));
assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(403));
String errorResponseBody = EntityUtils.toString(exc.getResponse().getEntity());
assertThat(errorResponseBody, containsString("unauthorized for user [remote_search_user]"));
assertThat(errorResponseBody, containsString("on indices [index_not_present]"));
assertThat(errorResponseBody, containsString("\"type\":\"security_exception\""));
}
{
// TEST CASE 6: Query local cluster for my_local_123 index - role has perms for this pattern, but index does not exist, so 404
Request painlessExecuteLocal = createPainlessExecuteRequest("my_local_123");
ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteLocal));
assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(404));
String errorResponseBody = EntityUtils.toString(exc.getResponse().getEntity());
assertThat(errorResponseBody, containsString("\"type\":\"index_not_found_exception\""));
}
{
// TEST CASE 7: Query local cluster for my_local* index - painless/execute does not allow wildcards, so fails with 400
Request painlessExecuteLocal = createPainlessExecuteRequest("my_local*");
ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteLocal));
assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(400));
String errorResponseBody = EntityUtils.toString(exc.getResponse().getEntity());
assertThat(errorResponseBody, containsString("indices:data/read/scripts/painless/execute does not support wildcards"));
assertThat(errorResponseBody, containsString("\"type\":\"illegal_argument_exception\""));
}
{
// TEST CASE 8: Query remote cluster for cluster that does not exist, and user does not have perms for that pattern - 403 ???
Request painlessExecuteRemote = createPainlessExecuteRequest("my_remote_cluster:abc123");
ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteRemote));
assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(403));
String errorResponseBody = EntityUtils.toString(exc.getResponse().getEntity());
assertThat(errorResponseBody, containsString("unauthorized for user [remote_search_user]"));
assertThat(errorResponseBody, containsString("on indices [abc123]"));
assertThat(errorResponseBody, containsString("\"type\":\"security_exception\""));
}
{
// TEST CASE 9: Query remote cluster for cluster that does not exist, but has permissions for the index pattern - 404
Request painlessExecuteRemote = createPainlessExecuteRequest("my_remote_cluster:index123");
ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteRemote));
assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(404));
String errorResponseBody = EntityUtils.toString(exc.getResponse().getEntity());
assertThat(errorResponseBody, containsString("\"type\":\"index_not_found_exception\""));
}
{
// TEST CASE 10: Query remote cluster with wildcard in index - painless/execute does not allow wildcards, so fails with 400
Request painlessExecuteRemote = createPainlessExecuteRequest("my_remote_cluster:index*");
ResponseException exc = expectThrows(ResponseException.class, () -> performRequestWithRemoteSearchUser(painlessExecuteRemote));
assertThat(exc.getResponse().getStatusLine().getStatusCode(), is(400));
String errorResponseBody = EntityUtils.toString(exc.getResponse().getEntity());
assertThat(errorResponseBody, containsString("indices:data/read/scripts/painless/execute does not support wildcards"));
assertThat(errorResponseBody, containsString("\"type\":\"illegal_argument_exception\""));
}
}
private static Request createPainlessExecuteRequest(String indexExpression) {
Request painlessExecuteLocal = new Request("POST", "_scripts/painless/_execute");
String body = """
{
"script": {
"source": "emit(\\"test\\")"
},
"context": "keyword_field",
"context_setup": {
"index": "INDEX_EXPRESSION_HERE",
"document": {
"@timestamp": "2023-05-06T16:22:22.000Z"
}
}
}""".replace("INDEX_EXPRESSION_HERE", indexExpression);
painlessExecuteLocal.setJsonEntity(body);
return painlessExecuteLocal;
}
private Response performRequestWithRemoteSearchUser(final Request request) throws IOException {
request.setOptions(
RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", headerFromRandomAuthMethod(REMOTE_SEARCH_USER, PASS))
);
return client().performRequest(request);
}
}
| RemoteClusterSecurityRCS1PainlessExecuteIT |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/ser/jdk/JDKMiscSerializers.java | {
"start": 825,
"end": 2164
} | class ____
{
/**
* Method called by {@link BasicSerializerFactory} to find one of serializers provided here.
*/
public static final ValueSerializer<?> find(Class<?> raw)
{
if (raw == UUID.class) {
return new UUIDSerializer();
}
if (raw == AtomicBoolean.class) {
return new AtomicBooleanSerializer();
}
if (raw == AtomicInteger.class) {
return new AtomicIntegerSerializer();
}
if (raw == AtomicLong.class) {
return new AtomicLongSerializer();
}
// Jackson-specific type(s)
// (Q: can this ever be sub-classed?)
if (raw == TokenBuffer.class) {
return new TokenBufferSerializer();
}
// And then some stranger types... not 100% they are needed but:
if ((raw == Void.class) || (raw == Void.TYPE)) {
return NullSerializer.instance;
}
if (ByteArrayOutputStream.class.isAssignableFrom(raw)) {
return new ByteArrayOutputStreamSerializer();
}
return null;
}
/*
/**********************************************************************
/* Serializers for atomic types
/**********************************************************************
*/
public static | JDKMiscSerializers |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/UngroupedOverloadsTest.java | {
"start": 16573,
"end": 17480
} | class ____ {
public void foo() {}
public void foo(int x) {}
public void foo(int x, int y) {}
public void foo(int x, int y, int z) {}
public void baz() {}
public void baz(int x) {}
public void baz(int x, int y) {}
public void bar() {}
public void bar(int x) {}
public void bar(int x, int y) {}
public void quux() {}
public void quux(int x) {}
public void quux(int x, int y) {}
}\
""")
.doTest();
}
@Test
public void ungroupedOverloadsRefactoringBelowCutoffLimit() {
// Here we have 4 methods so refactoring should be applied.
refactoringHelper
.addInputLines(
"in/BelowLimit.java",
"""
| UngroupedOverloadsRefactoringInterleaved |
java | apache__kafka | metadata/src/main/java/org/apache/kafka/image/node/ConfigurationsImageNode.java | {
"start": 1060,
"end": 2836
} | class ____ implements MetadataNode {
/**
* The name of this node.
*/
public static final String NAME = "configs";
/**
* The configurations image.
*/
private final ConfigurationsImage image;
public ConfigurationsImageNode(ConfigurationsImage image) {
this.image = image;
}
@Override
public Collection<String> childNames() {
ArrayList<String> childNames = new ArrayList<>();
for (ConfigResource configResource : image.resourceData().keySet()) {
if (configResource.isDefault()) {
childNames.add(configResource.type().name());
} else {
childNames.add(configResource.type().name() + ":" + configResource.name());
}
}
return childNames;
}
static ConfigResource resourceFromName(String name) {
for (ConfigResource.Type type : ConfigResource.Type.values()) {
if (name.startsWith(type.name())) {
String key = name.substring(type.name().length());
if (key.isEmpty()) {
return new ConfigResource(type, "");
} else if (key.startsWith(":")) {
return new ConfigResource(type, key.substring(1));
} else {
return null;
}
}
}
return null;
}
@Override
public MetadataNode child(String name) {
ConfigResource resource = resourceFromName(name);
if (resource == null) return null;
ConfigurationImage configurationImage = image.resourceData().get(resource);
if (configurationImage == null) return null;
return new ConfigurationImageNode(configurationImage);
}
}
| ConfigurationsImageNode |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableMergeWithMaybe.java | {
"start": 1339,
"end": 1911
} | class ____<T> extends AbstractObservableWithUpstream<T, T> {
final MaybeSource<? extends T> other;
public ObservableMergeWithMaybe(Observable<T> source, MaybeSource<? extends T> other) {
super(source);
this.other = other;
}
@Override
protected void subscribeActual(Observer<? super T> observer) {
MergeWithObserver<T> parent = new MergeWithObserver<>(observer);
observer.onSubscribe(parent);
source.subscribe(parent);
other.subscribe(parent.otherObserver);
}
static final | ObservableMergeWithMaybe |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/dialect/functional/HANASearchTest.java | {
"start": 1235,
"end": 7937
} | class ____ {
private static final String ENTITY_NAME = "SearchEntity";
@BeforeAll
protected void prepareTest(SessionFactoryScope scope) throws Exception {
scope.inTransaction(
session -> session.doWork(
connection -> {
try (PreparedStatement ps = connection.prepareStatement( "CREATE COLUMN TABLE " + ENTITY_NAME
+ " (key INTEGER, t TEXT, c NVARCHAR(255), PRIMARY KEY (key))" )) {
ps.execute();
}
try (PreparedStatement ps = connection
.prepareStatement( "CREATE FULLTEXT INDEX FTI ON " + ENTITY_NAME + " (c)" )) {
ps.execute();
}
}
)
);
}
@AfterAll
protected void cleanupTest(SessionFactoryScope scope) throws Exception {
scope.inTransaction(
session -> session.doWork(
connection -> {
try (PreparedStatement ps = connection.prepareStatement( "DROP TABLE " + ENTITY_NAME + " CASCADE" )) {
ps.execute();
}
catch (Exception e) {
// Ignore
}
}
)
);
}
@AfterEach
protected void cleanupTestData(SessionFactoryScope scope) throws Exception {
scope.getSessionFactory().getSchemaManager().truncate();
}
@Test
@JiraKey(value = "HHH-13021")
public void testTextType(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
SearchEntity entity = new SearchEntity();
entity.key = Integer.valueOf( 1 );
entity.t = "TEST TEXT";
entity.c = "TEST STRING";
session.persist( entity );
session.flush();
Query<Object[]> legacyQuery = session.createQuery( "select b, snippets(t), highlighted(t), score() from "
+ ENTITY_NAME + " b where contains(b.t, 'text')", Object[].class );
Object[] result = legacyQuery.getSingleResult();
SearchEntity retrievedEntity = (SearchEntity) result[0];
assertEquals( 4, result.length );
assertEquals( Integer.valueOf( 1 ), retrievedEntity.key );
assertEquals( "TEST TEXT", retrievedEntity.t );
assertEquals( "TEST STRING", retrievedEntity.c );
assertEquals( "TEST <b>TEXT</b>", result[1] );
assertEquals( "TEST <b>TEXT</b>", result[2] );
assertEquals( 0.75d, result[3] );
}
);
}
@Test
@JiraKey(value = "HHH-13021")
public void testTextTypeFalse(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
SearchEntity entity = new SearchEntity();
entity.key = Integer.valueOf( 1 );
entity.t = "TEST TEXT";
entity.c = "TEST STRING";
session.persist( entity );
session.flush();
Query<Object[]> legacyQuery = session.createQuery( "select b, snippets(t), highlighted(t), score() from " + ENTITY_NAME
+ " b where not contains(b.t, 'string')", Object[].class );
Object[] result = legacyQuery.getSingleResult();
SearchEntity retrievedEntity = (SearchEntity) result[0];
assertEquals( 4, result.length );
assertEquals( Integer.valueOf( 1 ), retrievedEntity.key );
assertEquals( "TEST TEXT", retrievedEntity.t );
assertEquals( "TEST STRING", retrievedEntity.c );
assertEquals( "TEST TEXT", result[1] );
assertEquals( "TEST TEXT", result[2] );
assertEquals( 1d, result[3] );
}
);
}
@Test
@JiraKey(value = "HHH-13021")
public void testCharType(SessionFactoryScope scope) throws Exception {
scope.inSession(
session -> {
Transaction t = session.beginTransaction();
SearchEntity entity = new SearchEntity();
entity.key = Integer.valueOf( 1 );
entity.t = "TEST TEXT";
entity.c = "TEST STRING";
session.persist( entity );
t.commit();
session.beginTransaction();
Query<Object[]> legacyQuery = session.createQuery(
"select b, snippets(c), highlighted(c), score() from " + ENTITY_NAME
+ " b where contains(b.c, 'string')",
Object[].class
);
Object[] result = legacyQuery.getSingleResult();
SearchEntity retrievedEntity = (SearchEntity) result[0];
assertEquals( 4, result.length );
assertEquals( Integer.valueOf( 1 ), retrievedEntity.key );
assertEquals( "TEST TEXT", retrievedEntity.t );
assertEquals( "TEST STRING", retrievedEntity.c );
assertEquals( "TEST <b>STRING</b>", result[1] );
assertEquals( "TEST <b>STRING</b>", result[2] );
assertEquals( 0.75d, result[3] );
}
);
}
@Test
@JiraKey(value = "HHH-13021")
public void testCharTypeComplexQuery(SessionFactoryScope scope) {
scope.inSession(
session -> {
Transaction t = session.beginTransaction();
SearchEntity entity = new SearchEntity();
entity.key = Integer.valueOf( 1 );
entity.t = "TEST TEXT";
entity.c = "TEST STRING";
session.persist( entity );
session.flush();
t.commit();
session.beginTransaction();
Query<Object[]> legacyQuery = session.createQuery(
"select b, snippets(c), highlighted(c), score() from " + ENTITY_NAME
+ " b where contains(b.c, 'string') and key=1 and score() > 0.5",
Object[].class );
Object[] result = legacyQuery.getSingleResult();
SearchEntity retrievedEntity = (SearchEntity) result[0];
assertEquals( 4, result.length );
assertEquals( Integer.valueOf( 1 ), retrievedEntity.key );
assertEquals( "TEST TEXT", retrievedEntity.t );
assertEquals( "TEST STRING", retrievedEntity.c );
assertEquals( "TEST <b>STRING</b>", result[1] );
assertEquals( "TEST <b>STRING</b>", result[2] );
assertEquals( 0.75d, result[3] );
}
);
}
@Test
@JiraKey(value = "HHH-13021")
public void testFuzzy(SessionFactoryScope scope) {
scope.inSession(
session -> {
Transaction t = session.beginTransaction();
SearchEntity entity = new SearchEntity();
entity.key = Integer.valueOf( 1 );
entity.t = "TEST TEXT";
entity.c = "TEST STRING";
session.persist( entity );
session.flush();
t.commit();
session.beginTransaction();
Query<Object[]> legacyQuery = session.createQuery( "select b, snippets(c), highlighted(c), score() from " + ENTITY_NAME
+ " b where contains(b.c, 'string', FUZZY(0.7))", Object[].class );
Object[] result = legacyQuery.getSingleResult();
SearchEntity retrievedEntity = (SearchEntity) result[0];
assertEquals( 4, result.length );
assertEquals( Integer.valueOf( 1 ), retrievedEntity.key );
assertEquals( "TEST TEXT", retrievedEntity.t );
assertEquals( "TEST STRING", retrievedEntity.c );
assertEquals( "TEST <b>STRING</b>", result[1] );
assertEquals( "TEST <b>STRING</b>", result[2] );
assertEquals( 0.75d, result[3] );
}
);
}
@Entity(name = ENTITY_NAME)
public static | HANASearchTest |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/lineage/DefaultLineageGraphTest.java | {
"start": 3063,
"end": 3567
} | class ____ implements LineageEdge {
private final SourceLineageVertex source;
private final LineageVertex sink;
private TestingLineageEdge(SourceLineageVertex source, LineageVertex sink) {
this.source = source;
this.sink = sink;
}
@Override
public SourceLineageVertex source() {
return source;
}
@Override
public LineageVertex sink() {
return sink;
}
}
}
| TestingLineageEdge |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/query/NamedOrIndexedQueryParameterSetterUnitTests.java | {
"start": 1636,
"end": 7347
} | class ____ {
private static final String EXCEPTION_MESSAGE = "mock exception";
private Function<JpaParametersParameterAccessor, Object> firstValueExtractor = args -> args.getValues()[0];
private JpaParametersParameterAccessor methodArguments;
private List<TemporalType> temporalTypes = asList(null, TIME);
private List<Parameter<?>> parameters = Arrays.<Parameter<?>> asList( //
mock(ParameterExpression.class), //
new ParameterImpl("name", null), //
new ParameterImpl(null, 1) //
);
private SoftAssertions softly = new SoftAssertions();
@BeforeEach
void before() {
JpaParametersParameterAccessor accessor = mock(JpaParametersParameterAccessor.class);
Date testDate = new Date();
when(accessor.getValues()).thenReturn(new Object[] { testDate });
when(accessor.potentiallyUnwrap(testDate)).thenReturn(testDate);
this.methodArguments = accessor;
}
@Test // DATAJPA-1233
void strictErrorHandlingThrowsExceptionForAllVariationsOfParameters() {
Query query = mockExceptionThrowingQueryWithNamedParameters();
for (Parameter parameter : parameters) {
for (TemporalType temporalType : temporalTypes) {
QueryParameterSetter setter = QueryParameterSetter.create( //
firstValueExtractor, //
parameter, //
temporalType //
);
softly
.assertThatThrownBy(
() -> setter.setParameter(QueryParameterSetter.BindableQuery.from(query), methodArguments, STRICT)) //
.describedAs("p-type: %s, p-name: %s, p-position: %s, temporal: %s", //
parameter.getClass(), //
parameter.getName(), //
parameter.getPosition(), //
temporalType) //
.hasMessage(EXCEPTION_MESSAGE);
}
}
softly.assertAll();
}
@Test // DATAJPA-1233
void lenientErrorHandlingThrowsNoExceptionForAllVariationsOfParameters() {
Query query = mockExceptionThrowingQueryWithNamedParameters();
for (Parameter<?> parameter : parameters) {
for (TemporalType temporalType : temporalTypes) {
QueryParameterSetter setter = QueryParameterSetter.create( //
firstValueExtractor, //
parameter, //
temporalType //
);
softly
.assertThatCode(
() -> setter.setParameter(QueryParameterSetter.BindableQuery.from(query), methodArguments, LENIENT)) //
.describedAs("p-type: %s, p-name: %s, p-position: %s, temporal: %s", //
parameter.getClass(), //
parameter.getName(), //
parameter.getPosition(), //
temporalType) //
.doesNotThrowAnyException();
}
}
softly.assertAll();
}
/**
* setParameter should be called in the lenient case even if the number of parameters seems to suggest that it fails,
* since the index might not be continuous due to missing parts of count queries compared to the main query. This
* happens when a parameter gets used in the ORDER BY clause which gets stripped of for the count query.
*/
@Test // DATAJPA-1233
void lenientSetsParameterWhenSuccessIsUnsure() {
Query query = mock(Query.class);
for (TemporalType temporalType : temporalTypes) {
QueryParameterSetter setter = QueryParameterSetter.create( //
firstValueExtractor, //
new ParameterImpl(null, 11), // parameter position is beyond number of parametes in query (0)
temporalType //
);
setter.setParameter(QueryParameterSetter.BindableQuery.from(query), methodArguments, LENIENT);
if (temporalType == null) {
verify(query).setParameter(eq(11), any(Date.class));
} else {
verify(query).setParameter(eq(11), any(Date.class), eq(temporalType));
}
}
softly.assertAll();
}
/**
* This scenario happens when the only (name) parameter is part of an ORDER BY clause and gets stripped of for the
* count query. Then the count query has no named parameter but the parameter provided has a {@literal null} position.
*/
@Test // DATAJPA-1233
void parameterNotSetWhenSuccessImpossible() {
Query query = mock(Query.class);
for (TemporalType temporalType : temporalTypes) {
QueryParameterSetter setter = QueryParameterSetter.create( //
firstValueExtractor, //
new ParameterImpl(null, null), // no position (and no name) makes a success of a setParameter impossible
temporalType //
);
setter.setParameter(QueryParameterSetter.BindableQuery.from(query), methodArguments, LENIENT);
if (temporalType == null) {
verify(query, never()).setParameter(anyInt(), any(Date.class));
} else {
verify(query, never()).setParameter(anyInt(), any(Date.class), eq(temporalType));
}
}
softly.assertAll();
}
@SuppressWarnings("unchecked")
private static Query mockExceptionThrowingQueryWithNamedParameters() {
Query query = mock(Query.class);
// make it a query with named parameters
doReturn(Collections.singleton(new ParameterImpl("aName", 3))) //
.when(query).getParameters();
doThrow(new RuntimeException(EXCEPTION_MESSAGE)) //
.when(query).setParameter(any(Parameter.class), any(Date.class), any(TemporalType.class));
doThrow(new RuntimeException(EXCEPTION_MESSAGE)) //
.when(query).setParameter(any(Parameter.class), any(Date.class));
doThrow(new RuntimeException(EXCEPTION_MESSAGE)) //
.when(query).setParameter(anyString(), any(Date.class), any(TemporalType.class));
doThrow(new RuntimeException(EXCEPTION_MESSAGE)) //
.when(query).setParameter(anyString(), any(Date.class));
doThrow(new RuntimeException(EXCEPTION_MESSAGE)) //
.when(query).setParameter(anyInt(), any(Date.class), any(TemporalType.class));
doThrow(new RuntimeException(EXCEPTION_MESSAGE)) //
.when(query).setParameter(anyInt(), any(Date.class));
return query;
}
private static final | NamedOrIndexedQueryParameterSetterUnitTests |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webmvc/src/main/java/org/springframework/cloud/gateway/server/mvc/filter/AfterFilterFunctions.java | {
"start": 1909,
"end": 8123
} | class ____ {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
private AfterFilterFunctions() {
}
public static BiFunction<ServerRequest, ServerResponse, ServerResponse> addResponseHeader(String name,
String... values) {
return (request, response) -> {
String[] expandedValues = MvcUtils.expandMultiple(request, values);
response.headers().addAll(name, Arrays.asList(expandedValues));
return response;
};
}
public static BiFunction<ServerRequest, ServerResponse, ServerResponse> dedupeResponseHeader(String name) {
return dedupeResponseHeader(name, DedupeStrategy.RETAIN_FIRST);
}
public static BiFunction<ServerRequest, ServerResponse, ServerResponse> dedupeResponseHeader(String name,
DedupeStrategy strategy) {
Assert.hasText(name, "name must not be null or empty");
Objects.requireNonNull(strategy, "strategy must not be null");
return (request, response) -> {
dedupeHeaders(response.headers(), name, strategy);
return response;
};
}
private static void dedupeHeaders(@Nullable HttpHeaders headers, @Nullable String names,
@Nullable DedupeStrategy strategy) {
if (headers == null || names == null || strategy == null) {
return;
}
String[] tokens = StringUtils.tokenizeToStringArray(names, " ", true, true);
for (String name : tokens) {
dedupeHeader(headers, name.trim(), strategy);
}
}
private static void dedupeHeader(HttpHeaders headers, String name, DedupeStrategy strategy) {
List<String> values = headers.get(name);
if (values == null || values.size() <= 1) {
return;
}
switch (strategy) {
case RETAIN_FIRST:
headers.set(name, values.get(0));
break;
case RETAIN_LAST:
headers.set(name, values.get(values.size() - 1));
break;
case RETAIN_UNIQUE:
headers.put(name, new ArrayList<>(new LinkedHashSet<>(values)));
break;
default:
break;
}
}
public static <T, R> BiFunction<ServerRequest, ServerResponse, ServerResponse> modifyResponseBody(Class<T> inClass,
Class<R> outClass, String newContentType,
BodyFilterFunctions.RewriteResponseFunction<T, R> rewriteFunction) {
return BodyFilterFunctions.modifyResponseBody(inClass, outClass, newContentType, rewriteFunction);
}
public static BiFunction<ServerRequest, ServerResponse, ServerResponse> removeResponseHeader(String name) {
return (request, response) -> {
response.headers().remove(name);
return response;
};
}
public static BiFunction<ServerRequest, ServerResponse, ServerResponse> rewriteLocationResponseHeader() {
return RewriteLocationResponseHeaderFilterFunctions.rewriteLocationResponseHeader(config -> {
});
}
public static BiFunction<ServerRequest, ServerResponse, ServerResponse> rewriteLocationResponseHeader(
Consumer<RewriteLocationResponseHeaderFilterFunctions.RewriteLocationResponseHeaderConfig> configConsumer) {
return RewriteLocationResponseHeaderFilterFunctions.rewriteLocationResponseHeader(configConsumer);
}
public static BiFunction<ServerRequest, ServerResponse, ServerResponse> rewriteResponseHeader(String name,
String regexp, String originalReplacement) {
String replacement = originalReplacement.replace("$\\", "$");
Pattern pattern = Pattern.compile(regexp);
return (request, response) -> {
BiFunction<String, List<String>, List<String>> remappingFunction = (key, values) -> {
List<String> rewrittenValues = values.stream()
.map(value -> pattern.matcher(value).replaceAll(replacement))
.toList();
return new ArrayList<>(rewrittenValues);
};
if (response.headers().get(name) != null) {
List<String> oldValue = response.headers().get(name);
List<String> newValue = remappingFunction.apply(name, oldValue);
if (newValue != null) {
response.headers().put(name, newValue);
}
else {
response.headers().remove(name);
}
}
return response;
};
}
public static BiFunction<ServerRequest, ServerResponse, ServerResponse> setResponseHeader(String name,
String value) {
return (request, response) -> {
String expandedValue = MvcUtils.expand(request, value);
response.headers().set(name, expandedValue);
return response;
};
}
public static BiFunction<ServerRequest, ServerResponse, ServerResponse> setStatus(int statusCode) {
return setStatus(new HttpStatusHolder(null, statusCode));
}
public static BiFunction<ServerRequest, ServerResponse, ServerResponse> setStatus(String statusCode) {
return setStatus(HttpStatusHolder.valueOf(statusCode));
}
public static BiFunction<ServerRequest, ServerResponse, ServerResponse> setStatus(HttpStatusCode statusCode) {
return setStatus(new HttpStatusHolder(statusCode, null));
}
public static BiFunction<ServerRequest, ServerResponse, ServerResponse> setStatus(HttpStatusHolder statusCode) {
return (request, response) -> {
if (response instanceof GatewayServerResponse res) {
res.setStatusCode(statusCode.resolve());
}
return response;
};
}
public static BiFunction<ServerRequest, ServerResponse, ServerResponse> removeJsonAttributesResponseBody(
List<String> fieldList, boolean deleteRecursively) {
List<String> immutableFieldList = List.copyOf(fieldList);
return modifyResponseBody(String.class, String.class, APPLICATION_JSON_VALUE, (request, response, body) -> {
String responseBody = body;
if (APPLICATION_JSON.isCompatibleWith(response.headers().getContentType())) {
try {
JsonNode jsonBodyContent = OBJECT_MAPPER.readValue(responseBody, JsonNode.class);
removeJsonAttributes(jsonBodyContent, immutableFieldList, deleteRecursively);
responseBody = OBJECT_MAPPER.writeValueAsString(jsonBodyContent);
}
catch (JsonProcessingException exception) {
throw new IllegalStateException("Failed to process JSON of response body.", exception);
}
}
return responseBody;
});
}
private static void removeJsonAttributes(JsonNode jsonNode, List<String> fieldNames, boolean deleteRecursively) {
if (jsonNode instanceof ObjectNode objectNode) {
objectNode.remove(fieldNames);
}
if (deleteRecursively) {
jsonNode.forEach(childNode -> removeJsonAttributes(childNode, fieldNames, true));
}
}
public | AfterFilterFunctions |
java | spring-projects__spring-boot | smoke-test/spring-boot-smoke-test-webservices/src/test/java/smoketest/webservices/WebServiceServerTestSampleWsApplicationTests.java | {
"start": 1525,
"end": 2405
} | class ____ {
@MockitoBean
HumanResourceService service;
@Autowired
private MockWebServiceClient client;
@Test
void testSendingHolidayRequest() {
String request = """
<hr:HolidayRequest xmlns:hr="https://company.example.com/hr/schemas">
<hr:Holiday>
<hr:StartDate>2013-10-20</hr:StartDate>
<hr:EndDate>2013-11-22</hr:EndDate>
</hr:Holiday>
<hr:Employee>
<hr:Number>1</hr:Number>
<hr:FirstName>John</hr:FirstName>
<hr:LastName>Doe</hr:LastName>
</hr:Employee>
</hr:HolidayRequest>""";
StreamSource source = new StreamSource(new StringReader(request));
this.client.sendRequest(RequestCreators.withPayload(source)).andExpect(ResponseMatchers.noFault());
then(this.service).should().bookHoliday(LocalDate.of(2013, 10, 20), LocalDate.of(2013, 11, 22), "John Doe");
}
}
| WebServiceServerTestSampleWsApplicationTests |
java | spring-projects__spring-framework | spring-jms/src/main/java/org/springframework/jms/support/converter/MessageConversionException.java | {
"start": 1011,
"end": 1457
} | class ____ extends JmsException {
/**
* Create a new MessageConversionException.
* @param msg the detail message
*/
public MessageConversionException(String msg) {
super(msg);
}
/**
* Create a new MessageConversionException.
* @param msg the detail message
* @param cause the root cause (if any)
*/
public MessageConversionException(String msg, @Nullable Throwable cause) {
super(msg, cause);
}
}
| MessageConversionException |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_issue_555.java | {
"start": 185,
"end": 371
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
JSON.parseObject("{\"list\":[{\"spec\":{}}]}", A.class);
}
public static | Bug_for_issue_555 |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/io/DatumWriter.java | {
"start": 1045,
"end": 1868
} | interface ____<D> {
/** Set the schema. */
void setSchema(Schema schema);
/**
* Write a datum. Traverse the schema, depth first, writing each leaf value in
* the schema from the datum to the output.
*/
void write(D datum, Encoder out) throws IOException;
/**
* Convenience method to Write a datum to a byte array. Traverse the schema,
* depth first, writing each leaf value in the schema from the datum to the byte
* array.
*
* @param datum The datum to serialize
* @return The serialized datum stored in an array of bytes
*/
default byte[] toByteArray(D datum) throws IOException {
try (ByteArrayOutputStream out = new ByteArrayOutputStream(128)) {
write(datum, EncoderFactory.get().directBinaryEncoder(out, null));
return out.toByteArray();
}
}
}
| DatumWriter |
java | apache__camel | catalog/camel-route-parser/src/test/java/org/apache/camel/parser/java/MyChoiceRouteBuilder.java | {
"start": 896,
"end": 1422
} | class ____ extends RouteBuilder {
@Override
public void configure() {
from("timer:foo")
.choice()
.when(header("foo"))
.to("log:foo")
.when(header("bar"))
.to("log:bar")
.to("mock:bar")
.otherwise()
.to("log:other")
.to("mock:other")
.end()
.to("log:end");
}
}
| MyChoiceRouteBuilder |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java | {
"start": 2908,
"end": 9379
} | class ____ {
private static StateStoreDFSCluster cluster;
private static RouterContext routerContext;
private static RouterClient routerAdminClient;
private static ClientProtocol routerProtocol;
@BeforeAll
public static void setUp() throws Exception {
// Build and start a federated cluster
cluster = new StateStoreDFSCluster(false, 2);
Configuration routerConf = new RouterConfigBuilder()
.stateStore()
.metrics()
.admin()
.rpc()
.build();
// Reduce the number of RPC threads to saturate the Router easy
routerConf.setInt(RBFConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY, 8);
routerConf.setInt(RBFConfigKeys.DFS_ROUTER_CLIENT_THREADS_SIZE, 4);
// Set the DNs to belong to only one subcluster
cluster.setIndependentDNs();
cluster.addRouterOverrides(routerConf);
// override some settings for the client
cluster.startCluster();
cluster.startRouters();
cluster.waitClusterUp();
routerContext = cluster.getRandomRouter();
routerProtocol = routerContext.getClient().getNamenode();
routerAdminClient = routerContext.getAdminClient();
setupNamespace();
// Simulate one of the subclusters to be slow
MiniDFSCluster dfsCluster = cluster.getCluster();
NameNode nn0 = dfsCluster.getNameNode(0);
simulateSlowNamenode(nn0, 1);
}
private static void setupNamespace() throws IOException {
// Setup a mount table to map to the two namespaces
MountTableManager mountTable = routerAdminClient.getMountTableManager();
Map<String, String> destinations = new TreeMap<>();
destinations.put("ns0", "/dirns0");
MountTable newEntry = MountTable.newInstance("/dirns0", destinations);
AddMountTableEntryRequest request =
AddMountTableEntryRequest.newInstance(newEntry);
mountTable.addMountTableEntry(request);
destinations = new TreeMap<>();
destinations.put("ns1", "/dirns1");
newEntry = MountTable.newInstance("/dirns1", destinations);
request = AddMountTableEntryRequest.newInstance(newEntry);
mountTable.addMountTableEntry(request);
// Refresh the cache in the Router
Router router = routerContext.getRouter();
MountTableResolver mountTableResolver =
(MountTableResolver) router.getSubclusterResolver();
mountTableResolver.loadCache(true);
// Add a folder to each namespace
NamenodeContext nn0 = cluster.getNamenode("ns0", null);
nn0.getFileSystem().mkdirs(new Path("/dirns0/0"));
nn0.getFileSystem().mkdirs(new Path("/dir-ns"));
NamenodeContext nn1 = cluster.getNamenode("ns1", null);
nn1.getFileSystem().mkdirs(new Path("/dirns1/1"));
}
@AfterAll
public static void tearDown() {
if (cluster != null) {
cluster.stopRouter(routerContext);
cluster.shutdown();
cluster = null;
}
}
@AfterEach
public void cleanup() throws IOException {
Router router = routerContext.getRouter();
StateStoreService stateStore = router.getStateStore();
DisabledNameserviceStore store =
stateStore.getRegisteredRecordStore(DisabledNameserviceStore.class);
store.loadCache(true);
Set<String> disabled = store.getDisabledNameservices();
for (String nsId : disabled) {
store.enableNameservice(nsId);
}
store.loadCache(true);
}
@Test
public void testWithoutDisabling() throws IOException {
// ns0 is slow and renewLease should take a long time
long t0 = monotonicNow();
routerProtocol.renewLease("client0", null);
long t = monotonicNow() - t0;
assertTrue(t > TimeUnit.SECONDS.toMillis(1), "It took too little: " + t + "ms");
// Return the results from all subclusters even if slow
FileSystem routerFs = routerContext.getFileSystem();
FileStatus[] filesStatus = routerFs.listStatus(new Path("/"));
assertEquals(3, filesStatus.length);
assertEquals("dir-ns", filesStatus[0].getPath().getName());
assertEquals("dirns0", filesStatus[1].getPath().getName());
assertEquals("dirns1", filesStatus[2].getPath().getName());
}
@Test
public void testDisabling() throws Exception {
disableNameservice("ns0");
// renewLease should be fast as we are skipping ns0
long t0 = monotonicNow();
routerProtocol.renewLease("client0", null);
long t = monotonicNow() - t0;
assertTrue(t < TimeUnit.SECONDS.toMillis(1), "It took too long: " + t + "ms");
// We should not report anything from ns0
FileSystem routerFs = routerContext.getFileSystem();
FileStatus[] filesStatus = routerFs.listStatus(new Path("/"));
assertEquals(2, filesStatus.length);
assertEquals("dirns0", filesStatus[0].getPath().getName());
assertEquals("dirns1", filesStatus[1].getPath().getName());
filesStatus = routerFs.listStatus(new Path("/dirns1"));
assertEquals(1, filesStatus.length);
assertEquals("1", filesStatus[0].getPath().getName());
}
@Test
public void testMetrics() throws Exception {
disableNameservice("ns0");
int numActive = 0;
int numDisabled = 0;
Router router = routerContext.getRouter();
RBFMetrics metrics = router.getMetrics();
String jsonString = metrics.getNameservices();
JSONObject jsonObject = new JSONObject(jsonString);
Iterator<?> keys = jsonObject.keys();
while (keys.hasNext()) {
String key = (String) keys.next();
JSONObject json = jsonObject.getJSONObject(key);
String nsId = json.getString("nameserviceId");
String state = json.getString("state");
if (nsId.equals("ns0")) {
assertEquals("DISABLED", state);
numDisabled++;
} else {
assertEquals("ACTIVE", state);
numActive++;
}
}
assertEquals(1, numActive);
assertEquals(1, numDisabled);
}
private static void disableNameservice(final String nsId)
throws IOException {
NameserviceManager nsManager = routerAdminClient.getNameserviceManager();
DisableNameserviceRequest req =
DisableNameserviceRequest.newInstance(nsId);
nsManager.disableNameservice(req);
Router router = routerContext.getRouter();
StateStoreService stateStore = router.getStateStore();
DisabledNameserviceStore store =
stateStore.getRegisteredRecordStore(DisabledNameserviceStore.class);
store.loadCache(true);
MembershipNamenodeResolver resolver =
(MembershipNamenodeResolver) router.getNamenodeResolver();
resolver.loadCache(true);
}
}
| TestDisableNameservices |
java | apache__flink | flink-core/src/main/java/org/apache/flink/types/variant/VariantBuilder.java | {
"start": 1068,
"end": 2465
} | interface ____ {
/** Create a variant from a byte. */
Variant of(byte b);
/** Create a variant from a short. */
Variant of(short s);
/** Create a variant from a int. */
Variant of(int i);
/** Create a variant from a long. */
Variant of(long l);
/** Create a variant from a string. */
Variant of(String s);
/** Create a variant from a double. */
Variant of(double d);
/** Create a variant from a float. */
Variant of(float f);
/** Create a variant from a byte array. */
Variant of(byte[] bytes);
/** Create a variant from a boolean. */
Variant of(boolean b);
/** Create a variant from a BigDecimal. */
Variant of(BigDecimal bigDecimal);
/** Create a variant from an Instant. */
Variant of(Instant instant);
/** Create a variant from a LocalDate. */
Variant of(LocalDate localDate);
/** Create a variant from a LocalDateTime. */
Variant of(LocalDateTime localDateTime);
/** Create a variant of null. */
Variant ofNull();
/** Get the builder of a variant object. */
VariantObjectBuilder object();
/** Get the builder of a variant object. */
VariantObjectBuilder object(boolean allowDuplicateKeys);
/** Get the builder for a variant array. */
VariantArrayBuilder array();
/** Builder for a variant object. */
@PublicEvolving
| VariantBuilder |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ThreadSafeCheckerTest.java | {
"start": 42476,
"end": 42703
} | class ____ implements ProtocolMessageEnum {}
""")
.addSourceLines(
"Test.java",
"""
import com.google.errorprone.annotations.ThreadSafe;
@ThreadSafe
| E |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/api/operators/ProcessOperatorTest.java | {
"start": 1501,
"end": 6274
} | class ____ {
@Test
void testTimestampAndWatermarkQuerying() throws Exception {
ProcessOperator<Integer, String> operator =
new ProcessOperator<>(new QueryingProcessFunction(TimeDomain.EVENT_TIME));
OneInputStreamOperatorTestHarness<Integer, String> testHarness =
new OneInputStreamOperatorTestHarness<>(operator);
testHarness.setup();
testHarness.open();
testHarness.processWatermark(new Watermark(17));
testHarness.processElement(new StreamRecord<>(5, 12L));
testHarness.processWatermark(new Watermark(42));
testHarness.processElement(new StreamRecord<>(6, 13L));
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
expectedOutput.add(new Watermark(17L));
expectedOutput.add(new StreamRecord<>("5TIME:17 TS:12", 12L));
expectedOutput.add(new Watermark(42L));
expectedOutput.add(new StreamRecord<>("6TIME:42 TS:13", 13L));
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.close();
}
@Test
void testTimestampAndProcessingTimeQuerying() throws Exception {
ProcessOperator<Integer, String> operator =
new ProcessOperator<>(new QueryingProcessFunction(TimeDomain.PROCESSING_TIME));
OneInputStreamOperatorTestHarness<Integer, String> testHarness =
new OneInputStreamOperatorTestHarness<>(operator);
testHarness.setup();
testHarness.open();
testHarness.setProcessingTime(17);
testHarness.processElement(new StreamRecord<>(5));
testHarness.setProcessingTime(42);
testHarness.processElement(new StreamRecord<>(6));
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
expectedOutput.add(new StreamRecord<>("5TIME:17 TS:null"));
expectedOutput.add(new StreamRecord<>("6TIME:42 TS:null"));
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.close();
}
@Test
void testNullOutputTagRefusal() throws Exception {
ProcessOperator<Integer, String> operator =
new ProcessOperator<>(new NullOutputTagEmittingProcessFunction());
OneInputStreamOperatorTestHarness<Integer, String> testHarness =
new OneInputStreamOperatorTestHarness<>(operator);
testHarness.setup();
testHarness.open();
testHarness.setProcessingTime(17);
try {
assertThatThrownBy(() -> testHarness.processElement(new StreamRecord<>(5)))
.isInstanceOf(IllegalArgumentException.class);
} finally {
testHarness.close();
}
}
/** This also verifies that the timestamps ouf side-emitted records is correct. */
@Test
void testSideOutput() throws Exception {
ProcessOperator<Integer, String> operator =
new ProcessOperator<>(new SideOutputProcessFunction());
OneInputStreamOperatorTestHarness<Integer, String> testHarness =
new OneInputStreamOperatorTestHarness<>(operator);
testHarness.setup();
testHarness.open();
testHarness.processElement(new StreamRecord<>(42, 17L /* timestamp */));
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
expectedOutput.add(new StreamRecord<>("IN:42", 17L /* timestamp */));
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
ConcurrentLinkedQueue<StreamRecord<Integer>> expectedIntSideOutput =
new ConcurrentLinkedQueue<>();
expectedIntSideOutput.add(new StreamRecord<>(42, 17L /* timestamp */));
ConcurrentLinkedQueue<StreamRecord<Integer>> intSideOutput =
testHarness.getSideOutput(SideOutputProcessFunction.INTEGER_OUTPUT_TAG);
TestHarnessUtil.assertOutputEquals(
"Side output was not correct.", expectedIntSideOutput, intSideOutput);
ConcurrentLinkedQueue<StreamRecord<Long>> expectedLongSideOutput =
new ConcurrentLinkedQueue<>();
expectedLongSideOutput.add(new StreamRecord<>(42L, 17L /* timestamp */));
ConcurrentLinkedQueue<StreamRecord<Long>> longSideOutput =
testHarness.getSideOutput(SideOutputProcessFunction.LONG_OUTPUT_TAG);
TestHarnessUtil.assertOutputEquals(
"Side output was not correct.", expectedLongSideOutput, longSideOutput);
testHarness.close();
}
private static | ProcessOperatorTest |
java | elastic__elasticsearch | modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderCliIT.java | {
"start": 569,
"end": 1180
} | class ____ extends GeoIpDownloaderIT {
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings));
if (getEndpoint() != null) {
settings.put(GeoIpDownloader.ENDPOINT_SETTING.getKey(), getEndpoint() + "cli/overview.json");
}
return settings.build();
}
public void testUseGeoIpProcessorWithDownloadedDBs() {
assumeTrue("this test can't work with CLI (some expected files are missing)", false);
}
}
| GeoIpDownloaderCliIT |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/io/parsing/ValidatingGrammarGenerator.java | {
"start": 3838,
"end": 4318
} | class ____ {
public final Schema actual;
public LitS(Schema actual) {
this.actual = actual;
}
/**
* Two LitS are equal if and only if their underlying schema is the same (not
* merely equal).
*/
@Override
public boolean equals(Object o) {
if (!(o instanceof LitS))
return false;
return actual.equals(((LitS) o).actual);
}
@Override
public int hashCode() {
return actual.hashCode();
}
}
}
| LitS |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/introspect/DefaultAccessorNamingStrategy.java | {
"start": 673,
"end": 962
} | class ____
extends AccessorNamingStrategy
{
/**
* Definition of a handler API to use for checking whether given base name
* (remainder of accessor method name after removing prefix) is acceptable
* based on various rules.
*/
public | DefaultAccessorNamingStrategy |
java | alibaba__nacos | api/src/main/java/com/alibaba/nacos/api/exception/runtime/NacosLoadException.java | {
"start": 727,
"end": 931
} | class ____ extends RuntimeException {
private static final long serialVersionUID = 3513491993982295562L;
public NacosLoadException(String errMsg) {
super(errMsg);
}
}
| NacosLoadException |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java | {
"start": 82822,
"end": 222793
} | class ____ implements Releasable {
/**
* The result of removing a snapshot from a shard folder in the repository.
*
* @param indexId Repository UUID for index that the snapshot was removed from
* @param shardId Shard id that the snapshot was removed from
* @param blobsToDelete Blob names in the shard directory that have become unreferenced in the new shard generation
*/
private record ShardSnapshotMetaDeleteResult(String indexId, int shardId, Collection<String> blobsToDelete) {
ShardSnapshotMetaDeleteResult(StreamInput in) throws IOException {
this(in.readString(), in.readVInt(), in.readStringCollectionAsImmutableList());
assert in.getTransportVersion().equals(TransportVersion.current()); // only used in memory on the local node
}
void writeTo(StreamOutput out) throws IOException {
assert out.getTransportVersion().equals(TransportVersion.current()); // only used in memory on the local node
out.writeString(indexId);
out.writeVInt(shardId);
out.writeStringCollection(blobsToDelete);
}
}
/**
* <p>
* Shard-level results, i.e. a sequence of {@link ShardSnapshotMetaDeleteResult} objects, except serialized, concatenated, and
* compressed in order to reduce the memory footprint by about 4x when compared with a list of bare objects. This can be GiBs in
* size if we're deleting snapshots from a large repository, especially if earlier failures left behind lots of dangling blobs
* for some reason.
* </p>
* <p>
* Writes to this object are all synchronized (via {@link #addShardDeleteResult}), and happen-before it is read, so the reads
* need no further synchronization.
* </p>
*/
private final BytesStreamOutput shardDeleteResults;
private final TruncatedOutputStream truncatedShardDeleteResultsOutputStream;
private final StreamOutput compressed;
private int resultsCount = 0;
private int leakedBlobsCount = 0;
private final ArrayList<Closeable> resources = new ArrayList<>();
private final ShardGenerations.Builder shardGenerationsBuilder = ShardGenerations.builder();
ShardBlobsToDelete() {
this.shardDeleteResults = new ReleasableBytesStreamOutput(bigArrays);
this.truncatedShardDeleteResultsOutputStream = new TruncatedOutputStream(
new BufferedOutputStream(
new DeflaterOutputStream(Streams.flushOnCloseStream(shardDeleteResults)),
DeflateCompressor.BUFFER_SIZE
),
shardDeleteResults::size,
maxHeapSizeForSnapshotDeletion
);
this.compressed = new OutputStreamStreamOutput(this.truncatedShardDeleteResultsOutputStream);
resources.add(compressed);
resources.add(LeakTracker.wrap((Releasable) shardDeleteResults));
}
synchronized void addShardDeleteResult(
IndexId indexId,
int shardId,
ShardGeneration newGeneration,
Collection<String> blobsToDelete
) {
try {
shardGenerationsBuilder.put(indexId, shardId, newGeneration);
// The write was truncated
if (writeBlobsIfCapacity(indexId, shardId, blobsToDelete) == false) {
logger.debug(
"Unable to clean up the following dangling blobs, {}, for index {} and shard {} "
+ "due to insufficient heap space on the master node.",
blobsToDelete,
indexId,
shardId
);
leakedBlobsCount += blobsToDelete.size();
}
} catch (IOException e) {
assert false : e; // no IO actually happens here
throw new UncheckedIOException(e);
}
}
private boolean writeBlobsIfCapacity(IndexId indexId, int shardId, Collection<String> blobsToDelete) throws IOException {
// There is a minimum of 1 byte available for writing
if (this.truncatedShardDeleteResultsOutputStream.hasCapacity()) {
new ShardSnapshotMetaDeleteResult(Objects.requireNonNull(indexId.getId()), shardId, blobsToDelete).writeTo(compressed);
// We only want to read this shard delete result if we were able to write the entire object.
// Otherwise, for partial writes, an EOFException will be thrown upon reading
if (this.truncatedShardDeleteResultsOutputStream.hasCapacity()) {
resultsCount += 1;
return true;
}
}
return false;
}
public ShardGenerations getUpdatedShardGenerations() {
return shardGenerationsBuilder.build();
}
public Iterator<String> getBlobPaths() {
final StreamInput input;
try {
compressed.close();
input = new InputStreamStreamInput(
new BufferedInputStream(
new InflaterInputStream(shardDeleteResults.bytes().streamInput()),
DeflateCompressor.BUFFER_SIZE
)
);
resources.add(input);
} catch (IOException e) {
assert false : e; // no IO actually happens here
throw new UncheckedIOException(e);
}
if (leakedBlobsCount > 0) {
logger.warn(
"Skipped cleanup of {} dangling snapshot blobs due to memory constraints on the master node. "
+ "These blobs will be cleaned up automatically by future snapshot deletions. "
+ "If you routinely delete large snapshots, consider increasing the master node's heap size "
+ "to allow for more efficient cleanup.",
leakedBlobsCount
);
}
return Iterators.flatMap(Iterators.forRange(0, resultsCount, i -> {
try {
return new ShardSnapshotMetaDeleteResult(input);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}), shardResult -> {
final var shardPath = shardPath(new IndexId("_na_", shardResult.indexId), shardResult.shardId).buildAsString();
return Iterators.map(shardResult.blobsToDelete.iterator(), blob -> shardPath + blob);
});
}
@Override
public void close() {
try {
IOUtils.close(resources);
} catch (IOException e) {
assert false : e; // no IO actually happens here
throw new UncheckedIOException(e);
}
}
// exposed for tests
int sizeInBytes() {
return shardDeleteResults.size();
}
}
@Override
public void finalizeSnapshot(final FinalizeSnapshotContext finalizeSnapshotContext) {
assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SNAPSHOT);
assert finalizeSnapshotContext.snapshotInfo().projectId().equals(getProjectId())
: "project-id mismatch: " + finalizeSnapshotContext.snapshotInfo().projectId() + " != " + getProjectId();
final long repositoryStateId = finalizeSnapshotContext.repositoryStateId();
final SnapshotInfo snapshotInfo = finalizeSnapshotContext.snapshotInfo();
assert repositoryStateId > RepositoryData.UNKNOWN_REPO_GEN
: "Must finalize based on a valid repository generation but received [" + repositoryStateId + "]";
final Collection<IndexId> indices = finalizeSnapshotContext.updatedShardGenerations().liveIndices().indices();
final SnapshotId snapshotId = snapshotInfo.snapshotId();
// Once we are done writing the updated index-N blob we remove the now unreferenced index-${uuid} blobs in each shard
// directory if all nodes are at least at version SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION
// If there are older version nodes in the cluster, we don't need to run this cleanup as it will have already happened
// when writing the index-${N} to each shard directory.
final IndexVersion repositoryMetaVersion = finalizeSnapshotContext.repositoryMetaVersion();
final boolean writeShardGens = SnapshotsServiceUtils.useShardGenerations(repositoryMetaVersion);
final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
final boolean writeIndexGens = SnapshotsServiceUtils.useIndexGenerations(repositoryMetaVersion);
record MetadataWriteResult(
RepositoryData existingRepositoryData,
Map<IndexId, String> indexMetas,
Map<String, String> indexMetaIdentifiers
) {}
record RootBlobUpdateResult(RepositoryData oldRepositoryData, RepositoryData newRepositoryData) {}
SubscribableListener
// Get the current RepositoryData
.<RepositoryData>newForked(listener -> getRepositoryData(executor, listener))
// Identify and write the missing metadata
.<MetadataWriteResult>andThen((l, existingRepositoryData) -> {
final int existingSnapshotCount = existingRepositoryData.getSnapshotIds().size();
if (existingSnapshotCount >= maxSnapshotCount) {
throw new RepositoryException(
metadata.name(),
"Cannot add another snapshot to this repository as it already contains ["
+ existingSnapshotCount
+ "] snapshots and is configured to hold up to ["
+ maxSnapshotCount
+ "] snapshots only."
);
}
final MetadataWriteResult metadataWriteResult;
if (writeIndexGens) {
metadataWriteResult = new MetadataWriteResult(
existingRepositoryData,
ConcurrentCollections.newConcurrentMap(),
ConcurrentCollections.newConcurrentMap()
);
} else {
metadataWriteResult = new MetadataWriteResult(existingRepositoryData, null, null);
}
try (var allMetaListeners = new RefCountingListener(l.map(ignored -> metadataWriteResult))) {
// We ignore all FileAlreadyExistsException when writing metadata since otherwise a master failover while in this method
// will mean that no snap-${uuid}.dat blob is ever written for this snapshot. This is safe because any updated version
// of the index or global metadata will be compatible with the segments written in this snapshot as well.
// Failing on an already existing index-${repoGeneration} below ensures that the index.latest blob is not updated in a
// way that decrements the generation it points at
// Write global metadata
final Metadata clusterMetadata = finalizeSnapshotContext.clusterMetadata();
final var projectMetadata = clusterMetadata.getProject(getProjectId());
executor.execute(ActionRunnable.run(allMetaListeners.acquire(), () -> {
if (finalizeSnapshotContext.serializeProjectMetadata()) {
PROJECT_METADATA_FORMAT.write(projectMetadata, blobContainer(), snapshotId.getUUID(), compress);
} else {
GLOBAL_METADATA_FORMAT.write(clusterMetadata, blobContainer(), snapshotId.getUUID(), compress);
}
}));
// Write the index metadata for each index in the snapshot
for (IndexId index : indices) {
executor.execute(ActionRunnable.run(allMetaListeners.acquire(), () -> {
final IndexMetadata indexMetaData = projectMetadata.index(index.getName());
if (writeIndexGens) {
final String identifiers = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetaData);
String metaUUID = existingRepositoryData.indexMetaDataGenerations().getIndexMetaBlobId(identifiers);
if (metaUUID == null) {
// We don't yet have this version of the metadata so we write it
metaUUID = UUIDs.base64UUID();
INDEX_METADATA_FORMAT.write(indexMetaData, indexContainer(index), metaUUID, compress);
metadataWriteResult.indexMetaIdentifiers().put(identifiers, metaUUID);
} // else this task was largely a no-op - TODO no need to fork in that case
metadataWriteResult.indexMetas().put(index, identifiers);
} else {
INDEX_METADATA_FORMAT.write(
clusterMetadata.getProject(getProjectId()).index(index.getName()),
indexContainer(index),
snapshotId.getUUID(),
compress
);
}
}));
}
// Write the SnapshotInfo blob to the repo (we're already on a SNAPSHOT thread so no need to fork this)
assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SNAPSHOT);
ActionListener.completeWith(allMetaListeners.acquire(), () -> {
SNAPSHOT_FORMAT.write(snapshotInfo, blobContainer(), snapshotId.getUUID(), compress);
return null;
});
// TODO fail fast if any metadata write fails
// TODO clean up successful metadata writes on failure (needs care, we must not clobber another node concurrently
// finalizing the same snapshot: we can only clean up after removing the failed snapshot from the cluster state)
}
})
// Update the root blob
.<RootBlobUpdateResult>andThen((l, metadataWriteResult) -> {
assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SNAPSHOT);
final var snapshotDetails = SnapshotDetails.fromSnapshotInfo(snapshotInfo);
final var existingRepositoryData = metadataWriteResult.existingRepositoryData();
writeIndexGen(
existingRepositoryData.addSnapshot(
snapshotId,
snapshotDetails,
finalizeSnapshotContext.updatedShardGenerations(),
metadataWriteResult.indexMetas(),
metadataWriteResult.indexMetaIdentifiers()
),
repositoryStateId,
repositoryMetaVersion,
new Function<>() {
@Override
public ClusterState apply(ClusterState state) {
return finalizeSnapshotContext.updatedClusterState(state);
}
@Override
public String toString() {
return "finalizing snapshot [" + metadata.name() + "][" + snapshotId + "]";
}
},
l.map(newRepositoryData -> new RootBlobUpdateResult(existingRepositoryData, newRepositoryData))
);
// NB failure of writeIndexGen doesn't guarantee the update failed, so we cannot safely clean anything up on failure
})
// Report success, then clean up.
.<RepositoryData>andThen((l, rootBlobUpdateResult) -> {
l.onResponse(rootBlobUpdateResult.newRepositoryData());
cleanupOldMetadata(
rootBlobUpdateResult.oldRepositoryData(),
rootBlobUpdateResult.newRepositoryData(),
finalizeSnapshotContext,
writeShardGens
);
})
// Finally subscribe the context as the listener, wrapping exceptions if needed
.addListener(
finalizeSnapshotContext.delegateResponse(
(l, e) -> l.onFailure(new SnapshotException(metadata.name(), snapshotId, "failed to update snapshot in repository", e))
)
);
}
// Delete all old shard gen and root level index blobs that aren't referenced any longer as a result from moving to updated
// repository data
private void cleanupOldMetadata(
RepositoryData existingRepositoryData,
RepositoryData updatedRepositoryData,
FinalizeSnapshotContext finalizeSnapshotContext,
boolean writeShardGenerations
) {
final Set<String> toDelete = new HashSet<>();
// Delete all now outdated index files up to 1000 blobs back from the new generation.
// If there are more than 1000 dangling index-N cleanup functionality on repo delete will take care of them.
long newRepoGeneration = updatedRepositoryData.getGenId();
for (long gen = Math.max(
Math.max(existingRepositoryData.getGenId() - 1, 0),
newRepoGeneration - 1000
); gen < newRepoGeneration; gen++) {
toDelete.add(getRepositoryDataBlobName(gen));
}
if (writeShardGenerations) {
final int prefixPathLen = basePath().buildAsString().length();
updatedRepositoryData.shardGenerations()
.obsoleteShardGenerations(existingRepositoryData.shardGenerations())
.forEach(
(indexId, gens) -> gens.forEach(
(shardId, oldGen) -> toDelete.add(
shardPath(indexId, shardId).buildAsString().substring(prefixPathLen) + BlobStoreRepository.SNAPSHOT_INDEX_PREFIX
+ oldGen.getGenerationUUID()
)
)
);
for (Map.Entry<RepositoryShardId, Set<ShardGeneration>> obsoleteEntry : finalizeSnapshotContext.obsoleteShardGenerations()
.entrySet()) {
final String containerPath = shardPath(obsoleteEntry.getKey().index(), obsoleteEntry.getKey().shardId()).buildAsString()
.substring(prefixPathLen) + SNAPSHOT_INDEX_PREFIX;
for (ShardGeneration shardGeneration : obsoleteEntry.getValue()) {
toDelete.add(containerPath + shardGeneration);
}
}
}
if (toDelete.isEmpty() == false) {
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
deleteFromContainer(OperationPurpose.SNAPSHOT_METADATA, blobContainer(), toDelete.iterator());
}
@Override
public void onFailure(Exception e) {
logger.warn("Failed to clean up old metadata blobs", e);
}
@Override
public void onAfter() {
finalizeSnapshotContext.onDone();
}
});
} else {
finalizeSnapshotContext.onDone();
}
}
@Override
public void getSnapshotInfo(
Collection<SnapshotId> snapshotIds,
boolean abortOnFailure,
BooleanSupplier isCancelled,
CheckedConsumer<SnapshotInfo, Exception> consumer,
ActionListener<Void> listener
) {
final var context = new GetSnapshotInfoContext(snapshotIds, abortOnFailure, isCancelled, (ctx, sni) -> {
try {
consumer.accept(sni);
} catch (Exception e) {
ctx.onFailure(e);
}
}, listener);
// put snapshot info downloads into a task queue instead of pushing them all into the queue to not completely monopolize the
// snapshot meta pool for a single request
final int workers = Math.min(threadPool.info(ThreadPool.Names.SNAPSHOT_META).getMax(), context.snapshotIds().size());
final BlockingQueue<SnapshotId> queue = new LinkedBlockingQueue<>(context.snapshotIds());
for (int i = 0; i < workers; i++) {
getOneSnapshotInfo(queue, context);
}
}
/**
* Tries to poll a {@link SnapshotId} to load {@link SnapshotInfo} from the given {@code queue}.
*/
private void getOneSnapshotInfo(BlockingQueue<SnapshotId> queue, GetSnapshotInfoContext context) {
final SnapshotId snapshotId = queue.poll();
if (snapshotId == null) {
return;
}
threadPool.executor(ThreadPool.Names.SNAPSHOT_META).execute(() -> {
if (context.done()) {
return;
}
if (context.isCancelled()) {
queue.clear();
context.onFailure(new TaskCancelledException("task cancelled"));
return;
}
Exception failure = null;
SnapshotInfo snapshotInfo = null;
try {
snapshotInfo = SNAPSHOT_FORMAT.read(getProjectRepo(), blobContainer(), snapshotId.getUUID(), namedXContentRegistry);
} catch (NoSuchFileException ex) {
failure = new SnapshotMissingException(metadata.name(), snapshotId, ex);
} catch (IOException | NotXContentException ex) {
failure = new SnapshotException(metadata.name(), snapshotId, "failed to get snapshot info" + snapshotId, ex);
} catch (Exception e) {
failure = e instanceof SnapshotException
? e
: new SnapshotException(metadata.name(), snapshotId, "Snapshot could not be read", e);
}
if (failure != null) {
if (context.abortOnFailure()) {
queue.clear();
}
context.onFailure(failure);
} else {
assert snapshotInfo != null;
context.onResponse(snapshotInfo);
}
getOneSnapshotInfo(queue, context);
});
}
@Override
public Metadata getSnapshotGlobalMetadata(final SnapshotId snapshotId, boolean fromProjectMetadata) {
try {
if (fromProjectMetadata) {
final var projectMetadata = PROJECT_METADATA_FORMAT.read(
getProjectRepo(),
blobContainer(),
snapshotId.getUUID(),
namedXContentRegistry
);
return Metadata.builder().put(projectMetadata).build();
} else {
return GLOBAL_METADATA_FORMAT.read(getProjectRepo(), blobContainer(), snapshotId.getUUID(), namedXContentRegistry);
}
} catch (NoSuchFileException ex) {
throw new SnapshotMissingException(metadata.name(), snapshotId, ex);
} catch (IOException ex) {
throw new SnapshotException(metadata.name(), snapshotId, "failed to read global metadata", ex);
}
}
@Override
public IndexMetadata getSnapshotIndexMetaData(RepositoryData repositoryData, SnapshotId snapshotId, IndexId index) throws IOException {
try {
return INDEX_METADATA_FORMAT.read(
getProjectRepo(),
indexContainer(index),
repositoryData.indexMetaDataGenerations().indexMetaBlobId(snapshotId, index),
namedXContentRegistry
);
} catch (NoSuchFileException e) {
throw new SnapshotMissingException(metadata.name(), snapshotId, e);
}
}
private void deleteFromContainer(OperationPurpose purpose, BlobContainer container, Iterator<String> blobs) throws IOException {
final Iterator<String> wrappedIterator;
if (logger.isTraceEnabled()) {
wrappedIterator = new Iterator<>() {
@Override
public boolean hasNext() {
return blobs.hasNext();
}
@Override
public String next() {
final String blobName = blobs.next();
logger.trace("[{}] Deleting [{}] from [{}]", metadata.name(), blobName, container.path());
return blobName;
}
};
} else {
wrappedIterator = blobs;
}
container.deleteBlobsIgnoringIfNotExists(purpose, wrappedIterator);
}
private BlobPath indicesPath() {
return basePath().add("indices");
}
private BlobContainer indexContainer(IndexId indexId) {
return blobStore().blobContainer(indexPath(indexId));
}
private BlobContainer shardContainer(IndexId indexId, ShardId shardId) {
return shardContainer(indexId, shardId.getId());
}
private BlobPath indexPath(IndexId indexId) {
return indicesPath().add(indexId.getId());
}
private BlobPath shardPath(IndexId indexId, int shardId) {
return indexPath(indexId).add(Integer.toString(shardId));
}
public BlobContainer shardContainer(IndexId indexId, int shardId) {
return blobStore().blobContainer(shardPath(indexId, shardId));
}
/**
* Configures RateLimiter based on repository and global settings
*
* @param rateLimiter the existing rate limiter to configure (or null if no throttling was previously needed)
* @param maxConfiguredBytesPerSec the configured max bytes per sec from the settings
* @param settingKey setting used to configure the rate limiter
* @param warnIfOverRecovery log a warning if rate limit setting is over the effective recovery rate limit
* @return the newly configured rate limiter or null if no throttling is needed
*/
private RateLimiter getRateLimiter(
RateLimiter rateLimiter,
ByteSizeValue maxConfiguredBytesPerSec,
String settingKey,
boolean warnIfOverRecovery
) {
if (maxConfiguredBytesPerSec.getBytes() <= 0) {
return null;
} else {
ByteSizeValue effectiveRecoverySpeed = recoverySettings.getMaxBytesPerSec();
if (warnIfOverRecovery && effectiveRecoverySpeed.getBytes() > 0) {
if (maxConfiguredBytesPerSec.getBytes() > effectiveRecoverySpeed.getBytes()) {
logger.warn(
"repository {} has a rate limit [{}={}] per second which is above the effective recovery rate limit "
+ "[{}={}] per second, thus the repository rate limit will be superseded by the recovery rate limit",
toStringShort(),
settingKey,
maxConfiguredBytesPerSec,
INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(),
effectiveRecoverySpeed
);
}
}
if (rateLimiter != null) {
rateLimiter.setMBPerSec(maxConfiguredBytesPerSec.getMbFrac());
return rateLimiter;
} else {
return new RateLimiter.SimpleRateLimiter(maxConfiguredBytesPerSec.getMbFrac());
}
}
}
// package private for testing
RateLimiter getSnapshotRateLimiter() {
Settings repositorySettings = metadata.settings();
ByteSizeValue maxConfiguredBytesPerSec = MAX_SNAPSHOT_BYTES_PER_SEC.get(repositorySettings);
if (MAX_SNAPSHOT_BYTES_PER_SEC.exists(repositorySettings) == false && recoverySettings.nodeBandwidthSettingsExist()) {
assert maxConfiguredBytesPerSec.getMb() == 40;
maxConfiguredBytesPerSec = ByteSizeValue.ZERO;
}
return getRateLimiter(
snapshotRateLimiter,
maxConfiguredBytesPerSec,
MAX_SNAPSHOT_BYTES_PER_SEC.getKey(),
recoverySettings.nodeBandwidthSettingsExist()
);
}
// package private for testing
RateLimiter getRestoreRateLimiter() {
return getRateLimiter(
restoreRateLimiter,
MAX_RESTORE_BYTES_PER_SEC.get(metadata.settings()),
MAX_RESTORE_BYTES_PER_SEC.getKey(),
true
);
}
private void assertSnapshotOrStatelessPermittedThreadPool() {
// The Stateless plugin adds custom thread pools for object store operations
assert ThreadPool.assertCurrentThreadPool(
ThreadPool.Names.SNAPSHOT,
ThreadPool.Names.SNAPSHOT_META,
ThreadPool.Names.GENERIC,
STATELESS_SHARD_READ_THREAD_NAME,
STATELESS_TRANSLOG_THREAD_NAME,
STATELESS_SHARD_WRITE_THREAD_NAME,
STATELESS_CLUSTER_STATE_READ_WRITE_THREAD_NAME,
STATELESS_SHARD_PREWARMING_THREAD_NAME,
STATELESS_SHARD_UPLOAD_PREWARMING_THREAD_NAME,
SEARCHABLE_SNAPSHOTS_CACHE_FETCH_ASYNC_THREAD_NAME,
SEARCHABLE_SNAPSHOTS_CACHE_PREWARMING_THREAD_NAME
);
}
@Override
public String startVerification() {
try {
if (isReadOnly()) {
// It's readonly - so there is not much we can do here to verify it apart from reading the blob store metadata
latestIndexBlobId();
return "read-only";
} else {
String seed = UUIDs.randomBase64UUID();
byte[] testBytes = Strings.toUTF8Bytes(seed);
BlobContainer testContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed)));
testContainer.writeBlobAtomic(OperationPurpose.SNAPSHOT_METADATA, "master.dat", new BytesArray(testBytes), true);
return seed;
}
} catch (Exception exp) {
throw new RepositoryVerificationException(metadata.name(), "path " + basePath() + " is not accessible on master node", exp);
}
}
@Override
public void endVerification(String seed) {
if (isReadOnly() == false) {
try {
final String testPrefix = testBlobPrefix(seed);
blobStore().blobContainer(basePath().add(testPrefix)).delete(OperationPurpose.SNAPSHOT_METADATA);
} catch (Exception exp) {
throw new RepositoryVerificationException(metadata.name(), "cannot delete test data at " + basePath(), exp);
}
}
}
// Tracks the latest known repository generation in a best-effort way to detect inconsistent listing of root level index-N blobs
// and concurrent modifications.
private final AtomicLong latestKnownRepoGen = new AtomicLong(RepositoryData.UNKNOWN_REPO_GEN);
// Best effort cache of the latest known repository data
private final AtomicReference<RepositoryData> latestKnownRepositoryData = new AtomicReference<>(RepositoryData.EMPTY);
@Override
public void getRepositoryData(Executor responseExecutor, ActionListener<RepositoryData> listener) {
// RepositoryData is the responsibility of the elected master: we shouldn't be loading it on other nodes as we don't have good
// consistency guarantees there, but electedness is too ephemeral to assert. We can say for sure that this node should be
// master-eligible, which is almost as strong since all other snapshot-related activity happens on data nodes whether they be
// master-eligible or not.
assert clusterService.localNode().isMasterNode() : "should only load repository data on master nodes";
while (true) {
// retry loop, in case the state changes underneath us somehow
if (lifecycle.started() == false) {
listener.onFailure(notStartedException());
return;
}
if (latestKnownRepoGen.get() == RepositoryData.CORRUPTED_REPO_GEN) {
listener.onFailure(corruptedStateException(null, null));
return;
}
final RepositoryData cached = latestKnownRepositoryData.get();
// Fast path loading repository data directly from cache if we're in fully consistent mode and the cache matches up with
// the latest known repository generation
if (bestEffortConsistency == false && cached.getGenId() == latestKnownRepoGen.get()) {
listener.onResponse(cached);
return;
}
if (metadata.generation() == RepositoryData.UNKNOWN_REPO_GEN && isReadOnly() == false) {
logger.debug("""
[{}] loading repository metadata for the first time, trying to determine correct generation and to store it in the \
cluster state""", metadata.name());
if (initializeRepoGenerationTracking(responseExecutor, listener)) {
return;
} // else there was a concurrent modification, retry from the start
} else {
logger.trace(
"[{}] loading un-cached repository data with best known repository generation [{}]",
metadata.name(),
latestKnownRepoGen
);
repoDataLoadDeduplicator.execute(new ThreadedActionListener<>(responseExecutor, listener));
return;
}
}
}
private RepositoryException notStartedException() {
return new RepositoryException(metadata.name(), "repository is not in started state");
}
// Listener used to ensure that repository data is only initialized once in the cluster state by #initializeRepoGenerationTracking
@Nullable // unless we're in the process of initializing repo-generation tracking
private SubscribableListener<RepositoryData> repoDataInitialized;
/**
* Method used to set the current repository generation in the cluster state's {@link RepositoryMetadata} to the latest generation that
* can be physically found in the repository before passing the latest {@link RepositoryData} to the given listener.
* This ensures that operations using {@code SnapshotsService#executeConsistentStateUpdate} right after mounting a fresh repository will
* have a consistent view of the {@link RepositoryData} before any data has been written to the repository.
*
* @param responseExecutor executor to use to complete the listener if not completing it on the calling thread
* @param listener listener to resolve with new repository data
* @return {@code true} if this method at least started the initialization process successfully and will eventually complete the
* listener, {@code false} if there was some concurrent state change which prevents us from starting repo generation tracking (typically
* that some other node got there first) and the caller should check again and possibly retry or complete the listener in some other
* way.
*/
private boolean initializeRepoGenerationTracking(Executor responseExecutor, ActionListener<RepositoryData> listener) {
final SubscribableListener<RepositoryData> listenerToSubscribe;
final ActionListener<RepositoryData> listenerToComplete;
synchronized (this) {
if (repoDataInitialized == null) {
// double-check the generation since we checked it outside the mutex in the caller and it could have changed by a
// concurrent initialization of the repo metadata and just load repository normally in case we already finished the
// initialization
if (metadata.generation() != RepositoryData.UNKNOWN_REPO_GEN) {
return false; // retry
}
logger.trace("[{}] initializing repository generation in cluster state", metadata.name());
repoDataInitialized = listenerToSubscribe = new SubscribableListener<>();
listenerToComplete = new ActionListener<>() {
private ActionListener<RepositoryData> acquireAndClearRepoDataInitialized() {
synchronized (BlobStoreRepository.this) {
assert repoDataInitialized == listenerToSubscribe;
repoDataInitialized = null;
return listenerToSubscribe;
}
}
@Override
public void onResponse(RepositoryData repositoryData) {
acquireAndClearRepoDataInitialized().onResponse(repositoryData);
}
@Override
public void onFailure(Exception e) {
logger.warn(
() -> format("%s Exception when initializing repository generation in cluster state", toStringShort()),
e
);
acquireAndClearRepoDataInitialized().onFailure(e);
}
};
} else {
logger.trace(
"[{}] waiting for existing initialization of repository metadata generation in cluster state",
metadata.name()
);
listenerToComplete = null;
listenerToSubscribe = repoDataInitialized;
}
}
if (listenerToComplete != null) {
SubscribableListener
// load the current repository data
.newForked(repoDataLoadDeduplicator::execute)
// write its generation to the cluster state
.<RepositoryData>andThen(
(l, repoData) -> submitUnbatchedTask(
"set initial safe repository generation [" + metadata.name() + "][" + repoData.getGenId() + "]",
new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return getClusterStateWithUpdatedRepositoryGeneration(currentState, repoData);
}
@Override
public void onFailure(Exception e) {
l.onFailure(e);
}
@Override
public void clusterStateProcessed(ClusterState oldState, ClusterState newState) {
l.onResponse(repoData);
}
}
)
)
// fork to generic pool since we're on the applier thread and some callbacks for repository data do additional IO
.<RepositoryData>andThen((l, repoData) -> {
logger.trace("[{}] initialized repository generation in cluster state to [{}]", metadata.name(), repoData.getGenId());
threadPool.generic().execute(ActionRunnable.supply(ActionListener.runAfter(l, () -> {
logger.trace(
"[{}] called listeners after initializing repository to generation [{}]",
metadata.name(),
repoData.getGenId()
);
}), () -> repoData));
})
// and finally complete the listener
.addListener(listenerToComplete);
}
listenerToSubscribe.addListener(listener, responseExecutor, threadPool.getThreadContext());
return true;
}
private ClusterState getClusterStateWithUpdatedRepositoryGeneration(ClusterState currentState, RepositoryData repoData) {
// In theory we might have failed over to a different master which initialized the repo and then failed back to this node, so we
// must check the repository generation in the cluster state is still unknown here.
final var project = currentState.metadata().getProject(getProjectId());
final RepositoryMetadata repoMetadata = getRepoMetadata(project);
if (repoMetadata.generation() != RepositoryData.UNKNOWN_REPO_GEN) {
throw new RepositoryException(repoMetadata.name(), "Found unexpected initialized repo metadata [" + repoMetadata + "]");
}
return ClusterState.builder(currentState)
.putProjectMetadata(
ProjectMetadata.builder(project)
.putCustom(
RepositoriesMetadata.TYPE,
RepositoriesMetadata.get(project)
.withUpdatedGeneration(repoMetadata.name(), repoData.getGenId(), repoData.getGenId())
)
)
.build();
}
/**
* Deduplicator that deduplicates the physical loading of {@link RepositoryData} from the repositories' underlying storage.
*/
private final SingleResultDeduplicator<RepositoryData> repoDataLoadDeduplicator;
private void doGetRepositoryData(ActionListener<RepositoryData> listener) {
// Retry loading RepositoryData in a loop in case we run into concurrent modifications of the repository.
// Keep track of the most recent generation we failed to load so we can break out of the loop if we fail to load the same
// generation repeatedly.
long lastFailedGeneration = RepositoryData.UNKNOWN_REPO_GEN;
while (true) {
final long genToLoad;
if (bestEffortConsistency) {
// We're only using #latestKnownRepoGen as a hint in this mode and listing repo contents as a secondary way of trying
// to find a higher generation
final long generation;
try {
generation = latestIndexBlobId();
} catch (Exception e) {
listener.onFailure(
new RepositoryException(metadata.name(), "Could not determine repository generation from root blobs", e)
);
return;
}
genToLoad = latestKnownRepoGen.accumulateAndGet(generation, Math::max);
if (genToLoad > generation) {
logger.info(
"Determined repository generation [{}] from repository contents but correct generation must be at " + "least [{}]",
generation,
genToLoad
);
}
} else {
// We only rely on the generation tracked in #latestKnownRepoGen which is exclusively updated from the cluster state
genToLoad = latestKnownRepoGen.get();
}
try {
final RepositoryData cached = latestKnownRepositoryData.get();
// Caching is not used with #bestEffortConsistency see docs on #cacheRepositoryData for details
if (bestEffortConsistency == false && cached.getGenId() == genToLoad) {
listener.onResponse(cached);
} else {
final RepositoryData loaded = getRepositoryData(genToLoad);
if (cached == null || cached.getGenId() < genToLoad) {
// We can cache in the most recent version here without regard to the actual repository metadata version since
// we're only caching the information that we just wrote and thus won't accidentally cache any information that
// isn't safe
cacheRepositoryData(loaded, IndexVersion.current());
}
if (loaded.getUuid().equals(metadata.uuid())) {
listener.onResponse(loaded);
} else {
// someone switched the repo contents out from under us
RepositoriesService.updateRepositoryUuidInMetadata(
clusterService,
getProjectId(),
metadata.name(),
loaded,
new ThreadedActionListener<>(threadPool.generic(), listener.map(v -> loaded))
);
}
}
return;
} catch (RepositoryException e) {
// If the generation to load changed concurrently and we didn't just try loading the same generation before we retry
if (genToLoad != latestKnownRepoGen.get() && genToLoad != lastFailedGeneration) {
lastFailedGeneration = genToLoad;
logger.warn(
"Failed to load repository data generation ["
+ genToLoad
+ "] because a concurrent operation moved the current generation to ["
+ latestKnownRepoGen.get()
+ "]",
e
);
continue;
}
if (bestEffortConsistency == false && ExceptionsHelper.unwrap(e, NoSuchFileException.class) != null) {
// We did not find the expected index-N even though the cluster state continues to point at the missing value
// of N so we mark this repository as corrupted.
Tuple<Long, String> previousWriterInformation = null;
try {
previousWriterInformation = readLastWriterInfo();
} catch (Exception ex) {
e.addSuppressed(ex);
}
final Tuple<Long, String> finalLastInfo = previousWriterInformation;
markRepoCorrupted(
genToLoad,
e,
listener.delegateFailureAndWrap((l, v) -> l.onFailure(corruptedStateException(e, finalLastInfo)))
);
} else {
listener.onFailure(e);
}
return;
} catch (Exception e) {
listener.onFailure(new RepositoryException(metadata.name(), "Unexpected exception when loading repository data", e));
return;
}
}
}
/**
* Cache repository data if repository data caching is enabled.
*
* @param repositoryData repository data to cache
* @param version repository metadata version used when writing the data to the repository
*/
private void cacheRepositoryData(RepositoryData repositoryData, IndexVersion version) {
if (cacheRepositoryData == false) {
return;
}
final RepositoryData toCache;
if (SnapshotsServiceUtils.useShardGenerations(version)) {
toCache = repositoryData;
} else {
// don't cache shard generations here as they may be unreliable
toCache = repositoryData.withoutShardGenerations();
assert repositoryData.indexMetaDataGenerations().equals(IndexMetaDataGenerations.EMPTY)
: "repository data should not contain index generations at version ["
+ version.toReleaseVersion()
+ "] but saw ["
+ repositoryData.indexMetaDataGenerations()
+ "]";
}
assert toCache.getGenId() >= 0 : "No need to cache abstract generations but attempted to cache [" + toCache.getGenId() + "]";
latestKnownRepositoryData.updateAndGet(known -> {
if (known.getGenId() > toCache.getGenId()) {
return known;
}
return toCache;
});
}
private RepositoryException corruptedStateException(@Nullable Exception cause, @Nullable Tuple<Long, String> previousWriterInfo) {
return new RepositoryException(metadata.name(), Strings.format("""
The repository has been disabled to prevent data corruption because its contents were found not to match its expected state. \
This is either because something other than this cluster modified the repository contents, or because the repository's \
underlying storage behaves incorrectly. To re-enable this repository, first ensure that this cluster has exclusive write \
access to it, and then re-register the repository with this cluster. See %s for further information.\
%s""", ReferenceDocs.CONCURRENT_REPOSITORY_WRITERS, previousWriterMessage(previousWriterInfo)), cause);
}
private static String previousWriterMessage(@Nullable Tuple<Long, String> previousWriterInfo) {
return previousWriterInfo == null
? ""
: " The last cluster to write to this repository was ["
+ previousWriterInfo.v2()
+ "] at generation ["
+ previousWriterInfo.v1()
+ "].";
}
/**
* Marks the repository as corrupted. This puts the repository in a state where its tracked value for
* {@link RepositoryMetadata#pendingGeneration()} is unchanged while its value for {@link RepositoryMetadata#generation()} is set to
* {@link RepositoryData#CORRUPTED_REPO_GEN}. In this state, the repository can not be used any longer and must be removed and
* recreated after the problem that lead to it being marked as corrupted has been fixed.
*
* @param corruptedGeneration generation that failed to load because the index file was not found but that should have loaded
* @param originalException exception that lead to the failing to load the {@code index-N} blob
* @param listener listener to invoke once done
*/
private void markRepoCorrupted(long corruptedGeneration, Exception originalException, ActionListener<Void> listener) {
assert corruptedGeneration != RepositoryData.UNKNOWN_REPO_GEN;
assert bestEffortConsistency == false;
logger.warn(() -> "Marking repository " + toStringShort() + " as corrupted", originalException);
submitUnbatchedTask("mark repository corrupted " + toStringShort() + "[" + corruptedGeneration + "]", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
final var project = currentState.metadata().getProject(projectId);
final RepositoriesMetadata state = RepositoriesMetadata.get(project);
final RepositoryMetadata repoState = state.repository(metadata.name());
if (repoState.generation() != corruptedGeneration) {
throw new IllegalStateException(
"Tried to mark repo generation ["
+ corruptedGeneration
+ "] as corrupted but its state concurrently changed to ["
+ repoState
+ "]"
);
}
return ClusterState.builder(currentState)
.putProjectMetadata(
ProjectMetadata.builder(project)
.putCustom(
RepositoriesMetadata.TYPE,
state.withUpdatedGeneration(
metadata.name(),
RepositoryData.CORRUPTED_REPO_GEN,
repoState.pendingGeneration()
)
)
)
.build();
}
@Override
public void onFailure(Exception e) {
listener.onFailure(
new RepositoryException(
metadata.name(),
"Failed marking repository state as corrupted",
ExceptionsHelper.useOrSuppress(e, originalException)
)
);
}
@Override
public void clusterStateProcessed(ClusterState oldState, ClusterState newState) {
listener.onResponse(null);
}
});
}
private RepositoryData getRepositoryData(long indexGen) {
if (indexGen == RepositoryData.EMPTY_REPO_GEN) {
return RepositoryData.EMPTY;
}
try {
final var repositoryDataBlobName = getRepositoryDataBlobName(indexGen);
// EMPTY is safe here because RepositoryData#fromXContent calls namedObject
try (
InputStream blob = blobContainer().readBlob(OperationPurpose.SNAPSHOT_METADATA, repositoryDataBlobName);
XContentParser parser = XContentType.JSON.xContent()
.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, blob)
) {
return RepositoryData.snapshotsFromXContent(parser, indexGen, true);
}
} catch (IOException ioe) {
if (bestEffortConsistency) {
// If we fail to load the generation we tracked in latestKnownRepoGen we reset it.
// This is done as a fail-safe in case a user manually deletes the contents of the repository in which case subsequent
// operations must start from the EMPTY_REPO_GEN again
if (latestKnownRepoGen.compareAndSet(indexGen, RepositoryData.EMPTY_REPO_GEN)) {
logger.warn("Resetting repository generation tracker because we failed to read generation [" + indexGen + "]", ioe);
}
}
throw new RepositoryException(metadata.name(), "could not read repository data from index blob", ioe);
}
}
private static String testBlobPrefix(String seed) {
return TESTS_FILE + seed;
}
@Override
public boolean isReadOnly() {
return readOnly;
}
/**
* Writing a new index generation (root) blob is a three-step process. Typically, it starts from a stable state where the pending
* generation {@link RepositoryMetadata#pendingGeneration()} is equal to the safe generation {@link RepositoryMetadata#generation()},
* but after a failure it may be that the pending generation starts out greater than the safe generation.
* <ol>
* <li>
* We reserve ourselves a new root blob generation {@code G}, greater than {@link RepositoryMetadata#pendingGeneration()}, via a
* cluster state update which edits the {@link RepositoryMetadata} entry for this repository, increasing its pending generation to
* {@code G} without changing its safe generation.
* <li>
* We write the updated {@link RepositoryData} to a new root blob with generation {@code G}.
* <li>
* We mark the successful end of the update of the repository data with a cluster state update which edits the
* {@link RepositoryMetadata} entry for this repository again, increasing its safe generation to equal to its pending generation
* {@code G}.
* </ol>
* We use this process to protect against problems such as a master failover part-way through. If a new master is elected while we're
* writing the root blob with generation {@code G} then we will fail to update the safe repository generation in the final step, and
* meanwhile the new master will choose a generation greater than {@code G} for all subsequent root blobs so there is no risk that we
* will clobber its writes. See the package level documentation for {@link org.elasticsearch.repositories.blobstore} for more details.
* <p>
* Note that a failure here does not imply that the process was unsuccessful or the repository is unchanged. Once we have written the
* new root blob the repository is updated from the point of view of any other clusters reading from it, and if we performed a full
* cluster restart at that point then we would also pick up the new root blob. Writing the root blob may succeed without us receiving
* a successful response from the repository, leading us to report that the write operation failed. Updating the safe generation may
* likewise succeed on a majority of master-eligible nodes which does not include this one, again leading to an apparent failure.
* <p>
* We therefore cannot safely clean up apparently-dangling blobs after a failure here. Instead, we defer any cleanup until after the
* next successful root-blob write, which may happen on a different master node or possibly even in a different cluster.
*
* @param repositoryData RepositoryData to write
* @param expectedGen expected repository generation at the start of the operation
* @param version version of the repository metadata to write
* @param stateFilter filter for the last cluster state update executed by this method
* @param listener completion listener
*/
protected void writeIndexGen(
RepositoryData repositoryData,
long expectedGen,
IndexVersion version,
Function<ClusterState, ClusterState> stateFilter,
ActionListener<RepositoryData> listener
) {
logger.trace("[{}] writing repository data on top of expected generation [{}]", metadata.name(), expectedGen);
assert isReadOnly() == false; // can not write to a read only repository
final long currentGen = repositoryData.getGenId();
if (currentGen != expectedGen) {
// the index file was updated by a concurrent operation, so we were operating on stale
// repository data
listener.onFailure(
new RepositoryException(
metadata.name(),
"concurrent modification of the index-N file, expected current generation ["
+ expectedGen
+ "], actual current generation ["
+ currentGen
+ "]"
)
);
return;
}
// Step 1: Set repository generation state to the next possible pending generation
final ListenableFuture<Long> setPendingStep = new ListenableFuture<>();
final String setPendingGenerationSource = "set pending repository generation [" + metadata.name() + "][" + expectedGen + "]";
submitUnbatchedTask(setPendingGenerationSource, new ClusterStateUpdateTask() {
private long newGen;
@Override
public ClusterState execute(ClusterState currentState) {
final var project = currentState.metadata().getProject(projectId);
final RepositoryMetadata meta = getRepoMetadata(project);
final String repoName = metadata.name();
if (RepositoriesService.isReadOnly(meta.settings())) {
// Last resort check: we shouldn't have been able to mark the repository as readonly while the operation that led to
// this writeIndexGen() call was in progress, and conversely shouldn't have started any such operation if the repo
// was already readonly, but these invariants are not obviously true and it is disastrous to proceed here.
throw new RepositoryException(meta.name(), "repository is readonly, cannot update root blob");
}
final long genInState = meta.generation();
final boolean uninitializedMeta = meta.generation() == RepositoryData.UNKNOWN_REPO_GEN || bestEffortConsistency;
if (uninitializedMeta == false && meta.pendingGeneration() != genInState) {
logger.info(
"Trying to write new repository data over unfinished write, repo {} is at "
+ "safe generation [{}] and pending generation [{}]",
toStringShort(),
genInState,
meta.pendingGeneration()
);
}
assert expectedGen == RepositoryData.EMPTY_REPO_GEN || uninitializedMeta || expectedGen == meta.generation()
: "Expected non-empty generation [" + expectedGen + "] does not match generation tracked in [" + meta + "]";
// If we run into the empty repo generation for the expected gen, the repo is assumed to have been cleared of
// all contents by an external process so we reset the safe generation to the empty generation.
final long safeGeneration = expectedGen == RepositoryData.EMPTY_REPO_GEN
? RepositoryData.EMPTY_REPO_GEN
: (uninitializedMeta ? expectedGen : genInState);
// Regardless of whether or not the safe generation has been reset, the pending generation always increments so that
// even if a repository has been manually cleared of all contents we will never reuse the same repository generation.
// This is motivated by the consistency behavior the S3 based blob repository implementation has to support which does
// not offer any consistency guarantees when it comes to overwriting the same blob name with different content.
final long nextPendingGen = metadata.pendingGeneration() + 1;
newGen = uninitializedMeta ? Math.max(expectedGen + 1, nextPendingGen) : nextPendingGen;
assert newGen > latestKnownRepoGen.get()
: "Attempted new generation ["
+ newGen
+ "] must be larger than latest known generation ["
+ latestKnownRepoGen.get()
+ "]";
return ClusterState.builder(currentState)
.putProjectMetadata(
ProjectMetadata.builder(project)
.putCustom(
RepositoriesMetadata.TYPE,
RepositoriesMetadata.get(project).withUpdatedGeneration(repoName, safeGeneration, newGen)
)
.build()
)
.build();
}
@Override
public void onFailure(Exception e) {
listener.onFailure(
new RepositoryException(
metadata.name(),
"Failed to execute cluster state update [" + setPendingGenerationSource + "]",
e
)
);
}
@Override
public void clusterStateProcessed(ClusterState oldState, ClusterState newState) {
logger.trace("[{}] successfully set pending repository generation to [{}]", metadata.name(), newGen);
setPendingStep.onResponse(newGen);
}
@Override
public String toString() {
return Strings.format("start RepositoryData update from generation [%d], stateFilter=[%s]", expectedGen, stateFilter);
}
});
final ListenableFuture<RepositoryData> filterRepositoryDataStep = new ListenableFuture<>();
// Step 2: Write new index-N blob to repository and update index.latest
setPendingStep.addListener(
listener.delegateFailureAndWrap(
(delegate, newGen) -> threadPool().executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(delegate, l -> {
// BwC logic: Load snapshot version information if any snapshot is missing details in RepositoryData so that the new
// RepositoryData contains full details for every snapshot
final List<SnapshotId> snapshotIdsWithMissingDetails = repositoryData.getSnapshotIds()
.stream()
.filter(repositoryData::hasMissingDetails)
.toList();
if (snapshotIdsWithMissingDetails.isEmpty() == false) {
final Map<SnapshotId, SnapshotDetails> extraDetailsMap = new ConcurrentHashMap<>();
getSnapshotInfo(
snapshotIdsWithMissingDetails,
false,
() -> false,
snapshotInfo -> extraDetailsMap.put(snapshotInfo.snapshotId(), SnapshotDetails.fromSnapshotInfo(snapshotInfo)),
ActionListener.runAfter(new ActionListener<>() {
@Override
public void onResponse(Void aVoid) {
logger.info(
"Successfully loaded all snapshots' detailed information for {} from snapshot metadata",
AllocationService.firstListElementsToCommaDelimitedString(
snapshotIdsWithMissingDetails,
SnapshotId::toString,
logger.isDebugEnabled()
)
);
}
@Override
public void onFailure(Exception e) {
logger.warn("Failure when trying to load missing details from snapshot metadata", e);
}
}, () -> filterRepositoryDataStep.onResponse(repositoryData.withExtraDetails(extraDetailsMap)))
);
} else {
filterRepositoryDataStep.onResponse(repositoryData);
}
}))
)
);
filterRepositoryDataStep.addListener(listener.delegateFailureAndWrap((delegate, filteredRepositoryData) -> {
final long newGen = setPendingStep.result();
final RepositoryData newRepositoryData = updateRepositoryData(filteredRepositoryData, version, newGen);
if (latestKnownRepoGen.get() >= newGen) {
throw new IllegalArgumentException(
"Tried writing generation ["
+ newGen
+ "] but repository is at least at generation ["
+ latestKnownRepoGen.get()
+ "] already"
);
}
// write the index file
if (ensureSafeGenerationExists(expectedGen, delegate::onFailure) == false) {
return;
}
final String indexBlob = getRepositoryDataBlobName(newGen);
logger.debug("Repository [{}] writing new index generational blob [{}]", metadata.name(), indexBlob);
writeAtomic(OperationPurpose.SNAPSHOT_METADATA, blobContainer(), indexBlob, out -> {
try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder(org.elasticsearch.core.Streams.noCloseStream(out))) {
newRepositoryData.snapshotsToXContent(xContentBuilder, version);
}
}, true);
maybeWriteIndexLatest(newGen);
if (filteredRepositoryData.getUuid().equals(RepositoryData.MISSING_UUID) && SnapshotsServiceUtils.includesUUIDs(version)) {
assert newRepositoryData.getUuid().equals(RepositoryData.MISSING_UUID) == false;
logger.info(
Strings.format(
"Generated new repository UUID [%s] for repository %s in generation [%d]",
newRepositoryData.getUuid(),
toStringShort(),
newGen
)
);
} else {
// repo UUID is not new
assert filteredRepositoryData.getUuid().equals(newRepositoryData.getUuid())
: filteredRepositoryData.getUuid() + " vs " + newRepositoryData.getUuid();
}
// Step 3: Update CS to reflect new repository generation.
final String setSafeGenerationSource = "set safe repository generation [" + metadata.name() + "][" + newGen + "]";
submitUnbatchedTask(setSafeGenerationSource, new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
final var project = currentState.metadata().getProject(projectId);
final RepositoryMetadata meta = getRepoMetadata(project);
if (meta.generation() != expectedGen) {
throw new IllegalStateException(
"Tried to update repo generation to [" + newGen + "] but saw unexpected generation in state [" + meta + "]"
);
}
if (meta.pendingGeneration() != newGen) {
throw new IllegalStateException(
"Tried to update from unexpected pending repo generation ["
+ meta.pendingGeneration()
+ "] after write to generation ["
+ newGen
+ "]"
);
}
final RepositoriesMetadata withGenerations = RepositoriesMetadata.get(project)
.withUpdatedGeneration(metadata.name(), newGen, newGen);
final RepositoriesMetadata withUuid = meta.uuid().equals(newRepositoryData.getUuid())
? withGenerations
: withGenerations.withUuid(metadata.name(), newRepositoryData.getUuid());
final ClusterState newClusterState = stateFilter.apply(
ClusterState.builder(currentState)
.putProjectMetadata(ProjectMetadata.builder(project).putCustom(RepositoriesMetadata.TYPE, withUuid))
.build()
);
return updateRepositoryGenerationsIfNecessary(newClusterState, expectedGen, newGen);
}
@Override
public void onFailure(Exception e) {
delegate.onFailure(
new RepositoryException(
metadata.name(),
"Failed to execute cluster state update [" + setSafeGenerationSource + "]",
e
)
);
}
@Override
public void clusterStateProcessed(ClusterState oldState, ClusterState newState) {
logger.trace("[{}] successfully set safe repository generation to [{}]", metadata.name(), newGen);
cacheRepositoryData(newRepositoryData, version);
delegate.onResponse(newRepositoryData);
}
@Override
public String toString() {
return Strings.format(
"complete RepositoryData update from generation [%d] to generation [%d], stateFilter=[%s]",
expectedGen,
newGen,
stateFilter
);
}
});
}));
}
private RepositoryData updateRepositoryData(RepositoryData repositoryData, IndexVersion repositoryMetaversion, long newGen) {
if (SnapshotsServiceUtils.includesUUIDs(repositoryMetaversion)) {
final String clusterUUID = clusterService.state().metadata().clusterUUID();
if (repositoryData.getClusterUUID().equals(clusterUUID) == false) {
repositoryData = repositoryData.withClusterUuid(clusterUUID);
}
}
return repositoryData.withGenId(newGen);
}
/**
* Write {@code index.latest} blob to support using this repository as the basis of a url repository.
*
* @param newGen new repository generation
*/
private void maybeWriteIndexLatest(long newGen) {
if (supportURLRepo) {
logger.debug("Repository [{}] updating index.latest with generation [{}]", metadata.name(), newGen);
try {
writeAtomic(
OperationPurpose.SNAPSHOT_METADATA,
blobContainer(),
INDEX_LATEST_BLOB,
out -> out.write(Numbers.longToBytes(newGen)),
false
);
} catch (Exception e) {
logger.warn(
() -> format(
"Failed to write index.latest blob. If you do not intend to use this "
+ "repository as the basis for a URL repository you may turn off attempting to write the index.latest blob by "
+ "setting repository setting [%s] to [false]",
SUPPORT_URL_REPO.getKey()
),
e
);
}
}
}
/**
* Ensures that {@link RepositoryData} for the given {@code safeGeneration} actually physically exists in the repository.
* This method is used by {@link #writeIndexGen} to make sure that no writes are executed on top of a concurrently modified repository.
* This check is necessary because {@link RepositoryData} is mostly read from the cached value in {@link #latestKnownRepositoryData}
* which could be stale in the broken situation of a concurrent write to the repository.
*
* @param safeGeneration generation to verify existence for
* @param onFailure callback to invoke with failure in case the repository generation is not physically found in the repository
*/
private boolean ensureSafeGenerationExists(long safeGeneration, Consumer<Exception> onFailure) throws IOException {
logger.debug("Ensure generation [{}] that is the basis for this write exists in [{}]", safeGeneration, metadata.name());
if (safeGeneration != RepositoryData.EMPTY_REPO_GEN
&& blobContainer().blobExists(SNAPSHOT_METADATA, getRepositoryDataBlobName(safeGeneration)) == false) {
Tuple<Long, String> previousWriterInfo = null;
Exception readRepoDataEx = null;
try {
previousWriterInfo = readLastWriterInfo();
} catch (Exception ex) {
readRepoDataEx = ex;
}
final Exception exception = new RepositoryException(
metadata.name(),
"concurrent modification of the index-N file, expected current generation ["
+ safeGeneration
+ "] but it was not found in the repository."
+ previousWriterMessage(previousWriterInfo)
);
if (readRepoDataEx != null) {
exception.addSuppressed(readRepoDataEx);
}
markRepoCorrupted(safeGeneration, exception, new ActionListener<>() {
@Override
public void onResponse(Void aVoid) {
onFailure.accept(exception);
}
@Override
public void onFailure(Exception e) {
onFailure.accept(e);
}
});
return false;
}
return true;
}
/**
* Tries to find the latest cluster UUID that wrote to this repository on a best effort basis by listing out repository root contents
* to find the latest repository generation and then reading the cluster UUID of the last writer from the {@link RepositoryData} found
* at this generation.
*
* @return tuple of repository generation and cluster UUID of the last cluster to write to this repository
*/
private Tuple<Long, String> readLastWriterInfo() throws IOException {
assert bestEffortConsistency == false : "This should only be used for adding information to errors in consistent mode";
final long latestGeneration = latestIndexBlobId();
final RepositoryData actualRepositoryData = getRepositoryData(latestGeneration);
return Tuple.tuple(latestGeneration, actualRepositoryData.getClusterUUID());
}
/**
* Updates the repository generation that running deletes and snapshot finalizations will be based on for this repository if any such
* operations are found in the cluster state while setting the safe repository generation.
*
* @param state cluster state to update
* @param oldGen previous safe repository generation
* @param newGen new safe repository generation
* @return updated cluster state
*/
private ClusterState updateRepositoryGenerationsIfNecessary(ClusterState state, long oldGen, long newGen) {
final String repoName = metadata.name();
final SnapshotsInProgress updatedSnapshotsInProgress;
boolean changedSnapshots = false;
final List<SnapshotsInProgress.Entry> snapshotEntries = new ArrayList<>();
final SnapshotsInProgress snapshotsInProgress = SnapshotsInProgress.get(state);
for (SnapshotsInProgress.Entry entry : snapshotsInProgress.forRepo(getProjectId(), repoName)) {
if (entry.repositoryStateId() == oldGen) {
snapshotEntries.add(entry.withRepoGen(newGen));
changedSnapshots = true;
} else {
snapshotEntries.add(entry);
}
}
updatedSnapshotsInProgress = changedSnapshots
? snapshotsInProgress.createCopyWithUpdatedEntriesForRepo(getProjectId(), repoName, snapshotEntries)
: null;
final SnapshotDeletionsInProgress updatedDeletionsInProgress;
boolean changedDeletions = false;
final List<SnapshotDeletionsInProgress.Entry> deletionEntries = new ArrayList<>();
for (SnapshotDeletionsInProgress.Entry entry : SnapshotDeletionsInProgress.get(state).getEntries()) {
if (entry.projectId().equals(getProjectId()) && entry.repository().equals(repoName) && entry.repositoryStateId() == oldGen) {
deletionEntries.add(entry.withRepoGen(newGen));
changedDeletions = true;
} else {
deletionEntries.add(entry);
}
}
updatedDeletionsInProgress = changedDeletions ? SnapshotDeletionsInProgress.of(deletionEntries) : null;
return SnapshotsServiceUtils.updateWithSnapshots(state, updatedSnapshotsInProgress, updatedDeletionsInProgress);
}
private RepositoryMetadata getRepoMetadata(ProjectMetadata projectMetadata) {
final RepositoryMetadata repositoryMetadata = RepositoriesMetadata.get(projectMetadata).repository(metadata.name());
assert repositoryMetadata != null || lifecycle.stoppedOrClosed()
: "did not find metadata for repo [" + metadata.name() + "] in state [" + lifecycleState() + "]";
return repositoryMetadata;
}
/**
* Get the latest snapshot index blob id. Snapshot index blobs are named index-N, where N is
* the next version number from when the index blob was written. Each individual index-N blob is
* only written once and never overwritten. The highest numbered index-N blob is the latest one
* that contains the current snapshots in the repository.
*
* Package private for testing
*/
long latestIndexBlobId() throws IOException {
try {
// First, try listing all index-N blobs (there should only be two index-N blobs at any given
// time in a repository if cleanup is happening properly) and pick the index-N blob with the
// highest N value - this will be the latest index blob for the repository. Note, we do this
// instead of directly reading the index.latest blob to get the current index-N blob because
// index.latest is not written atomically and is not immutable - on every index-N change,
// we first delete the old index.latest and then write the new one. If the repository is not
// read-only, it is possible that we try deleting the index.latest blob while it is being read
// by some other operation (such as the get snapshots operation). In some file systems, it is
// illegal to delete a file while it is being read elsewhere (e.g. Windows). For read-only
// repositories, we read for index.latest, both because listing blob prefixes is often unsupported
// and because the index.latest blob will never be deleted and re-written.
return listBlobsToGetLatestIndexId();
} catch (UnsupportedOperationException e) {
// If its a read-only repository, listing blobs by prefix may not be supported (e.g. a URL repository),
// in this case, try reading the latest index generation from the index.latest blob
try {
return readSnapshotIndexLatestBlob();
} catch (NoSuchFileException nsfe) {
return RepositoryData.EMPTY_REPO_GEN;
}
}
}
// package private for testing
long readSnapshotIndexLatestBlob() throws IOException {
final BytesReference content = Streams.readFully(
Streams.limitStream(blobContainer().readBlob(OperationPurpose.SNAPSHOT_METADATA, INDEX_LATEST_BLOB), Long.BYTES + 1)
);
if (content.length() != Long.BYTES) {
throw new RepositoryException(
metadata.name(),
"exception reading blob ["
+ INDEX_LATEST_BLOB
+ "]: expected 8 bytes but blob was "
+ (content.length() < Long.BYTES ? content.length() + " bytes" : "longer")
);
}
return Numbers.bytesToLong(content.toBytesRef());
}
private long listBlobsToGetLatestIndexId() throws IOException {
return latestGeneration(blobContainer().listBlobsByPrefix(OperationPurpose.SNAPSHOT_METADATA, INDEX_FILE_PREFIX).keySet());
}
private long latestGeneration(Collection<String> rootBlobs) {
long latest = RepositoryData.EMPTY_REPO_GEN;
for (String blobName : rootBlobs) {
if (blobName.startsWith(INDEX_FILE_PREFIX) == false) {
continue;
}
try {
final long curr = Long.parseLong(blobName.substring(INDEX_FILE_PREFIX.length()));
latest = Math.max(latest, curr);
} catch (NumberFormatException nfe) {
// the index- blob wasn't of the format index-N where N is a number,
// no idea what this blob is but it doesn't belong in the repository!
logger.warn("[{}] Unknown blob in the repository: {}", toStringShort(), blobName);
}
}
return latest;
}
private void writeAtomic(
OperationPurpose purpose,
BlobContainer container,
final String blobName,
CheckedConsumer<OutputStream, IOException> writer,
boolean failIfAlreadyExists
) throws IOException {
logger.trace(() -> format("[%s] Writing [%s] to %s atomically", metadata.name(), blobName, container.path()));
container.writeMetadataBlob(purpose, blobName, failIfAlreadyExists, true, writer);
}
@Override
public void snapshotShard(SnapshotShardContext context) {
context.status().updateStatusDescription("queued in snapshot task runner");
shardSnapshotTaskRunner.enqueueShardSnapshot(context);
}
private void doSnapshotShard(SnapshotShardContext context) {
blobStoreSnapshotMetrics.shardSnapshotStarted();
context.addListener(ActionListener.running(() -> blobStoreSnapshotMetrics.shardSnapshotCompleted(context.status())));
if (isReadOnly()) {
context.onFailure(new RepositoryException(metadata.name(), "cannot snapshot shard on a readonly repository"));
return;
}
final Store store = context.store();
final ShardId shardId = store.shardId();
final SnapshotId snapshotId = context.snapshotId();
final IndexShardSnapshotStatus snapshotStatus = context.status();
snapshotStatus.updateStatusDescription("snapshot task runner: setting up shard snapshot");
final long startTime = threadPool.absoluteTimeInMillis();
try {
final ShardGeneration generation = snapshotStatus.generation();
final BlobContainer shardContainer = shardContainer(context.indexId(), shardId);
logger.debug("[{}][{}] snapshot to [{}][{}][{}] ...", shardId, snapshotId, metadata.name(), context.indexId(), generation);
final Set<String> blobs;
if (generation == null) {
snapshotStatus.ensureNotAborted();
snapshotStatus.updateStatusDescription("snapshot task runner: listing blob prefixes");
try {
blobs = shardContainer.listBlobsByPrefix(OperationPurpose.SNAPSHOT_METADATA, SNAPSHOT_INDEX_PREFIX).keySet();
} catch (IOException e) {
throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e);
}
} else {
blobs = Collections.singleton(SNAPSHOT_INDEX_PREFIX + generation);
}
snapshotStatus.ensureNotAborted();
snapshotStatus.updateStatusDescription("snapshot task runner: loading snapshot blobs");
Tuple<BlobStoreIndexShardSnapshots, ShardGeneration> tuple = buildBlobStoreIndexShardSnapshots(
context.indexId(),
shardId.id(),
blobs,
shardContainer,
generation
);
BlobStoreIndexShardSnapshots snapshots = tuple.v1();
ShardGeneration fileListGeneration = tuple.v2();
if (snapshots.snapshots().stream().anyMatch(sf -> sf.snapshot().equals(snapshotId.getName()))) {
throw new IndexShardSnapshotFailedException(
shardId,
"Duplicate snapshot name [" + snapshotId.getName() + "] detected, aborting"
);
}
// First inspect all known SegmentInfos instances to see if we already have an equivalent commit in the repository
final List<BlobStoreIndexShardSnapshot.FileInfo> filesFromSegmentInfos = Optional.ofNullable(context.stateIdentifier())
.map(id -> {
for (SnapshotFiles snapshotFileSet : snapshots.snapshots()) {
if (id.equals(snapshotFileSet.shardStateIdentifier())) {
return snapshotFileSet.indexFiles();
}
}
return null;
})
.orElse(null);
final List<BlobStoreIndexShardSnapshot.FileInfo> indexCommitPointFiles;
int indexIncrementalFileCount = 0;
int indexTotalNumberOfFiles = 0;
long indexIncrementalSize = 0;
long indexTotalFileSize = 0;
final BlockingQueue<BlobStoreIndexShardSnapshot.FileInfo> filesToSnapshot = new LinkedBlockingQueue<>();
int filesInShardMetadataCount = 0;
long filesInShardMetadataSize = 0;
if (store.indexSettings().getIndexMetadata().isSearchableSnapshot()) {
indexCommitPointFiles = Collections.emptyList();
} else if (filesFromSegmentInfos == null) {
// If we did not find a set of files that is equal to the current commit we determine the files to upload by comparing files
// in the commit with files already in the repository
indexCommitPointFiles = new ArrayList<>();
final Collection<String> fileNames;
final Store.MetadataSnapshot metadataFromStore;
try (Releasable ignored = context.withCommitRef()) {
// TODO apparently we don't use the MetadataSnapshot#.recoveryDiff(...) here but we should
try {
final IndexCommit snapshotIndexCommit = context.indexCommit();
logger.trace("[{}] [{}] Loading store metadata using index commit [{}]", shardId, snapshotId, snapshotIndexCommit);
metadataFromStore = store.getMetadata(snapshotIndexCommit);
fileNames = snapshotIndexCommit.getFileNames();
} catch (IOException e) {
throw new IndexShardSnapshotFailedException(shardId, "Failed to get store file metadata", e);
}
}
for (String fileName : fileNames) {
ensureNotAborted(shardId, snapshotId, snapshotStatus, fileName);
logger.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName);
final StoreFileMetadata md = metadataFromStore.get(fileName);
BlobStoreIndexShardSnapshot.FileInfo existingFileInfo = snapshots.findPhysicalIndexFile(md);
// We can skip writing blobs where the metadata hash is equal to the blob's contents because we store the hash/contents
// directly in the shard level metadata in this case
final boolean needsWrite = md.hashEqualsContents() == false;
indexTotalFileSize += md.length();
indexTotalNumberOfFiles++;
if (existingFileInfo == null) {
indexIncrementalFileCount++;
indexIncrementalSize += md.length();
// create a new FileInfo
BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = new BlobStoreIndexShardSnapshot.FileInfo(
(needsWrite ? UPLOADED_DATA_BLOB_PREFIX : VIRTUAL_DATA_BLOB_PREFIX) + UUIDs.randomBase64UUID(),
md,
chunkSize()
);
indexCommitPointFiles.add(snapshotFileInfo);
if (needsWrite) {
filesToSnapshot.add(snapshotFileInfo);
} else {
assert assertFileContentsMatchHash(snapshotStatus, snapshotFileInfo, store);
filesInShardMetadataCount += 1;
filesInShardMetadataSize += md.length();
}
} else {
// a commit point file with the same name, size and checksum was already copied to repository
// we will reuse it for this snapshot
indexCommitPointFiles.add(existingFileInfo);
}
}
} else {
for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesFromSegmentInfos) {
indexTotalNumberOfFiles++;
indexTotalFileSize += fileInfo.length();
}
indexCommitPointFiles = filesFromSegmentInfos;
}
snapshotStatus.updateStatusDescription("snapshot task runner: starting shard snapshot");
snapshotStatus.moveToStarted(
startTime,
indexIncrementalFileCount,
indexTotalNumberOfFiles,
indexIncrementalSize,
indexTotalFileSize
);
final ShardGeneration indexGeneration;
final boolean writeShardGens = SnapshotsServiceUtils.useShardGenerations(context.getRepositoryMetaVersion());
final boolean writeFileInfoWriterUUID = SnapshotsServiceUtils.includeFileInfoWriterUUID(context.getRepositoryMetaVersion());
// build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones
final BlobStoreIndexShardSnapshots updatedBlobStoreIndexShardSnapshots = snapshots.withAddedSnapshot(
new SnapshotFiles(snapshotId.getName(), indexCommitPointFiles, context.stateIdentifier())
);
final Runnable afterWriteSnapBlob;
if (writeShardGens) {
// When using shard generations we can safely write the index-${uuid} blob before writing out any of the actual data
// for this shard since the uuid named blob will simply not be referenced in case of error and thus we will never
// reference a generation that has not had all its files fully upload.
indexGeneration = ShardGeneration.newGeneration();
try {
final Map<String, String> serializationParams = Collections.singletonMap(
BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID,
Boolean.toString(writeFileInfoWriterUUID)
);
snapshotStatus.updateStatusDescription("snapshot task runner: updating blob store with new shard generation");
INDEX_SHARD_SNAPSHOTS_FORMAT.write(
updatedBlobStoreIndexShardSnapshots,
shardContainer,
indexGeneration.getGenerationUUID(),
compress,
serializationParams
);
snapshotStatus.addProcessedFiles(filesInShardMetadataCount, filesInShardMetadataSize);
} catch (IOException e) {
throw new IndexShardSnapshotFailedException(
shardId,
"Failed to write shard level snapshot metadata for ["
+ snapshotId
+ "] to ["
+ INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(indexGeneration.getGenerationUUID())
+ "]",
e
);
}
afterWriteSnapBlob = () -> {};
} else {
// When not using shard generations we can only write the index-${N} blob after all other work for this shard has
// completed.
// Also, in case of numeric shard generations the data node has to take care of deleting old shard generations.
final long newGen = Long.parseLong(fileListGeneration.getGenerationUUID()) + 1;
indexGeneration = new ShardGeneration(newGen);
// Delete all previous index-N blobs
final List<String> blobsToDelete = blobs.stream().filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX)).toList();
assert blobsToDelete.stream()
.mapToLong(b -> Long.parseLong(b.replaceFirst(SNAPSHOT_INDEX_PREFIX, "")))
.max()
.orElse(-1L) < Long.parseLong(indexGeneration.toString())
: "Tried to delete an index-N blob newer than the current generation ["
+ indexGeneration
+ "] when deleting index-N blobs "
+ blobsToDelete;
final var finalFilesInShardMetadataCount = filesInShardMetadataCount;
final var finalFilesInShardMetadataSize = filesInShardMetadataSize;
afterWriteSnapBlob = () -> {
try {
final Map<String, String> serializationParams = Collections.singletonMap(
BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID,
Boolean.toString(writeFileInfoWriterUUID)
);
snapshotStatus.updateStatusDescription("no shard generations: writing new index-${N} file");
writeShardIndexBlobAtomic(shardContainer, newGen, updatedBlobStoreIndexShardSnapshots, serializationParams);
} catch (IOException e) {
throw new IndexShardSnapshotFailedException(
shardId,
"Failed to finalize snapshot creation ["
+ snapshotId
+ "] with shard index ["
+ INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(indexGeneration.getGenerationUUID())
+ "]",
e
);
}
snapshotStatus.addProcessedFiles(finalFilesInShardMetadataCount, finalFilesInShardMetadataSize);
try {
snapshotStatus.updateStatusDescription("no shard generations: deleting blobs");
deleteFromContainer(OperationPurpose.SNAPSHOT_METADATA, shardContainer, blobsToDelete.iterator());
} catch (IOException e) {
logger.warn(
() -> format("[%s][%s] failed to delete old index-N blobs during finalization", snapshotId, shardId),
e
);
}
};
}
// filesToSnapshot will be emptied while snapshotting the file. We make a copy here for cleanup purpose in case of failure.
final AtomicReference<List<FileInfo>> fileToCleanUp = new AtomicReference<>(List.copyOf(filesToSnapshot));
final ActionListener<Collection<Void>> allFilesUploadedListener = ActionListener.assertOnce(ActionListener.wrap(ignore -> {
snapshotStatus.updateStatusDescription("all files uploaded: finalizing");
final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize();
// now create and write the commit point
logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId);
final BlobStoreIndexShardSnapshot blobStoreIndexShardSnapshot = new BlobStoreIndexShardSnapshot(
snapshotId.getName(),
indexCommitPointFiles,
lastSnapshotStatus.getStartTimeMillis(),
threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTimeMillis(),
lastSnapshotStatus.getIncrementalFileCount(),
lastSnapshotStatus.getIncrementalSize()
);
// Once we start writing the shard level snapshot file, no cleanup will be performed because it is possible that
// written files are referenced by another concurrent process.
fileToCleanUp.set(List.of());
try {
final String snapshotUUID = snapshotId.getUUID();
final Map<String, String> serializationParams = Collections.singletonMap(
BlobStoreIndexShardSnapshot.FileInfo.SERIALIZE_WRITER_UUID,
Boolean.toString(writeFileInfoWriterUUID)
);
snapshotStatus.updateStatusDescription("all files uploaded: writing to index shard file");
INDEX_SHARD_SNAPSHOT_FORMAT.write(
blobStoreIndexShardSnapshot,
shardContainer,
snapshotUUID,
compress,
serializationParams
);
} catch (IOException e) {
throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e);
}
afterWriteSnapBlob.run();
final ShardSnapshotResult shardSnapshotResult = new ShardSnapshotResult(
indexGeneration,
ByteSizeValue.ofBytes(blobStoreIndexShardSnapshot.totalSize()),
getSegmentInfoFileCount(blobStoreIndexShardSnapshot.indexFiles())
);
snapshotStatus.updateStatusDescription("all files uploaded: done");
snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis(), shardSnapshotResult);
context.onResponse(shardSnapshotResult);
}, e -> {
try {
snapshotStatus.updateStatusDescription("all files uploaded: cleaning up data files, exception while finalizing: " + e);
shardContainer.deleteBlobsIgnoringIfNotExists(
OperationPurpose.SNAPSHOT_DATA,
Iterators.flatMap(fileToCleanUp.get().iterator(), f -> Iterators.forRange(0, f.numberOfParts(), f::partName))
);
} catch (Exception innerException) {
e.addSuppressed(innerException);
}
context.onFailure(e);
}));
if (indexIncrementalFileCount == 0 || filesToSnapshot.isEmpty()) {
allFilesUploadedListener.onResponse(Collections.emptyList());
return;
}
snapshotFiles(context, filesToSnapshot, allFilesUploadedListener);
} catch (Exception e) {
context.onFailure(e);
}
}
private static void ensureNotAborted(ShardId shardId, SnapshotId snapshotId, IndexShardSnapshotStatus snapshotStatus, String fileName) {
var shardSnapshotStage = snapshotStatus.getStage();
try {
IndexShardSnapshotStatus.ensureNotAborted(shardSnapshotStage);
if (shardSnapshotStage != IndexShardSnapshotStatus.Stage.INIT && shardSnapshotStage != IndexShardSnapshotStatus.Stage.STARTED) {
// A normally running shard snapshot should be in stage INIT or STARTED. And we know it's not in PAUSING or ABORTED because
// the ensureNotAborted() call above did not throw. The remaining options don't make sense, if they ever happen.
logger.error(
"Shard snapshot found an unexpected state. ShardId [{}], SnapshotID [{}], Stage [{}]",
shardId,
snapshotId,
shardSnapshotStage
);
assert false;
}
} catch (Exception e) {
// We want to see when a shard snapshot operation checks for and finds an interrupt signal during shutdown. A
// PausedSnapshotException indicates we're in shutdown because that's the only case when shard snapshots are signaled to pause.
// An AbortedSnapshotException may also occur during shutdown if an uncommon error occurs.
ShutdownLogger.shutdownLogger.debug(
() -> Strings.format(
"Shard snapshot operation is aborting. ShardId [%s], SnapshotID [%s], File [%s], Stage [%s]",
shardId,
snapshotId,
fileName,
shardSnapshotStage
),
e
);
assert e instanceof AbortedSnapshotException || e instanceof PausedSnapshotException : e;
throw e;
}
}
protected void snapshotFiles(
SnapshotShardContext context,
BlockingQueue<FileInfo> filesToSnapshot,
ActionListener<Collection<Void>> allFilesUploadedListener
) {
final int noOfFilesToSnapshot = filesToSnapshot.size();
final ActionListener<Void> filesListener = fileQueueListener(filesToSnapshot, noOfFilesToSnapshot, allFilesUploadedListener);
context.status().updateStatusDescription("enqueued file snapshot tasks: threads running concurrent file uploads");
for (int i = 0; i < noOfFilesToSnapshot; i++) {
shardSnapshotTaskRunner.enqueueFileSnapshot(context, filesToSnapshot::poll, filesListener);
}
}
private static boolean assertFileContentsMatchHash(
IndexShardSnapshotStatus snapshotStatus,
BlobStoreIndexShardSnapshot.FileInfo fileInfo,
Store store
) {
if (store.tryIncRef()) {
try (IndexInput indexInput = store.openVerifyingInput(fileInfo.physicalName(), IOContext.READONCE, fileInfo.metadata())) {
final byte[] tmp = new byte[Math.toIntExact(fileInfo.metadata().length())];
indexInput.readBytes(tmp, 0, tmp.length);
assert fileInfo.metadata().hash().bytesEquals(new BytesRef(tmp));
} catch (IOException e) {
throw new AssertionError(e);
} finally {
store.decRef();
}
} else {
try {
snapshotStatus.ensureNotAborted();
assert false : "if the store is already closed we must have been aborted";
} catch (Exception e) {
assert e instanceof AbortedSnapshotException : e;
}
}
return true;
}
@Override
public void restoreShard(
Store store,
SnapshotId snapshotId,
IndexId indexId,
ShardId snapshotShardId,
RecoveryState recoveryState,
ActionListener<Void> listener
) {
final ShardId shardId = store.shardId();
final ActionListener<Void> restoreListener = listener.delegateResponse(
(l, e) -> l.onFailure(new IndexShardRestoreFailedException(shardId, "failed to restore snapshot [" + snapshotId + "]", e))
);
final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
final BlobContainer container = shardContainer(indexId, snapshotShardId);
if (store.isClosing()) {
restoreListener.onFailure(new AlreadyClosedException("store is closing"));
return;
}
if (lifecycle.started() == false) {
restoreListener.onFailure(new AlreadyClosedException("repository [" + metadata.name() + "] closed"));
return;
}
if (activityRefs.tryIncRef() == false) {
restoreListener.onFailure(new AlreadyClosedException("repository [" + metadata.name() + "] closing"));
return;
}
executor.execute(ActionRunnable.wrap(ActionListener.runBefore(restoreListener, activityRefs::decRef), l -> {
final BlobStoreIndexShardSnapshot snapshot = loadShardSnapshot(container, snapshotId);
final SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles(), null);
new FileRestoreContext(metadata.name(), shardId, snapshotId, recoveryState) {
@Override
protected void restoreFiles(
List<BlobStoreIndexShardSnapshot.FileInfo> filesToRecover,
Store store,
ActionListener<Void> listener
) {
if (filesToRecover.isEmpty()) {
listener.onResponse(null);
} else {
// Start as many workers as fit into the snapshot pool at once at the most
final int workers = Math.min(
threadPool.info(ThreadPool.Names.SNAPSHOT).getMax(),
snapshotFiles.indexFiles().size()
);
final BlockingQueue<BlobStoreIndexShardSnapshot.FileInfo> files = new LinkedBlockingQueue<>(filesToRecover);
final ActionListener<Void> allFilesListener = fileQueueListener(files, workers, listener.map(v -> null));
// restore the files from the snapshot to the Lucene store
for (int i = 0; i < workers; ++i) {
try {
executeOneFileRestore(files, allFilesListener);
} catch (Exception e) {
allFilesListener.onFailure(e);
}
}
}
}
private void executeOneFileRestore(
BlockingQueue<BlobStoreIndexShardSnapshot.FileInfo> files,
ActionListener<Void> allFilesListener
) throws InterruptedException {
final BlobStoreIndexShardSnapshot.FileInfo fileToRecover = files.poll(0L, TimeUnit.MILLISECONDS);
if (fileToRecover == null) {
allFilesListener.onResponse(null);
} else {
executor.execute(ActionRunnable.wrap(allFilesListener, filesListener -> {
store.incRef();
try {
restoreFile(fileToRecover, store);
} finally {
store.decRef();
}
executeOneFileRestore(files, filesListener);
}));
}
}
private void restoreFile(BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store store) throws IOException {
ensureNotClosing(store);
logger.trace(() -> format("[%s] restoring [%s] to [%s]", metadata.name(), fileInfo, store));
boolean success = false;
try (
IndexOutput indexOutput = store.createVerifyingOutput(
fileInfo.physicalName(),
fileInfo.metadata(),
IOContext.DEFAULT
)
) {
if (fileInfo.name().startsWith(VIRTUAL_DATA_BLOB_PREFIX)) {
final BytesRef hash = fileInfo.metadata().hash();
indexOutput.writeBytes(hash.bytes, hash.offset, hash.length);
recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.physicalName(), hash.length);
} else {
try (InputStream stream = maybeRateLimitRestores(new SlicedInputStream(fileInfo.numberOfParts()) {
@Override
protected InputStream openSlice(int slice) throws IOException {
ensureNotClosing(store);
return container.readBlob(OperationPurpose.SNAPSHOT_DATA, fileInfo.partName(slice));
}
@Override
public boolean markSupported() {
return false;
}
})) {
final byte[] buffer = new byte[Math.toIntExact(Math.min(bufferSize, fileInfo.length()))];
int length;
while ((length = stream.read(buffer)) > 0) {
ensureNotClosing(store);
indexOutput.writeBytes(buffer, 0, length);
recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.physicalName(), length);
}
}
}
Store.verify(indexOutput);
indexOutput.close();
store.directory().sync(Collections.singleton(fileInfo.physicalName()));
success = true;
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
try {
store.markStoreCorrupted(ex);
} catch (IOException e) {
logger.warn("store cannot be marked as corrupted", e);
}
throw ex;
} finally {
if (success == false) {
store.deleteQuiet(fileInfo.physicalName());
}
}
}
void ensureNotClosing(final Store store) throws AlreadyClosedException {
assert store.refCount() > 0;
if (store.isClosing()) {
throw new AlreadyClosedException("store is closing");
}
if (lifecycle.started() == false) {
throw new AlreadyClosedException("repository [" + metadata.name() + "] closed");
}
}
}.restore(snapshotFiles, store, l);
}));
}
private static ActionListener<Void> fileQueueListener(
BlockingQueue<BlobStoreIndexShardSnapshot.FileInfo> files,
int numberOfFiles,
ActionListener<Collection<Void>> listener
) {
return new GroupedActionListener<>(numberOfFiles, listener).delegateResponse((l, e) -> {
files.clear(); // Stop uploading the remaining files if we run into any exception
l.onFailure(e);
});
}
private static InputStream maybeRateLimit(
InputStream stream,
Supplier<RateLimiter> rateLimiterSupplier,
RateLimitingInputStream.Listener throttleListener
) {
return new RateLimitingInputStream(stream, rateLimiterSupplier, throttleListener);
}
/**
* Wrap the restore rate limiter (controlled by the repository setting `max_restore_bytes_per_sec` and the cluster setting
* `indices.recovery.max_bytes_per_sec`) around the given stream. Any throttling is reported to the given listener and not otherwise
* recorded in the value returned by {@link RepositoriesStats.SnapshotStats#totalReadThrottledNanos()}.
*/
public InputStream maybeRateLimitRestores(InputStream stream) {
return maybeRateLimitRestores(stream, blobStoreSnapshotMetrics::incrementRestoreRateLimitingTimeInNanos);
}
/**
* Wrap the restore rate limiter (controlled by the repository setting `max_restore_bytes_per_sec` and the cluster setting
* `indices.recovery.max_bytes_per_sec`) around the given stream. Any throttling is recorded in the value returned by {@link
* RepositoriesStats.SnapshotStats#totalReadThrottledNanos()}.
*/
public InputStream maybeRateLimitRestores(InputStream stream, RateLimitingInputStream.Listener throttleListener) {
return maybeRateLimit(
maybeRateLimit(stream, () -> restoreRateLimiter, throttleListener),
recoverySettings::rateLimiter,
throttleListener
);
}
/**
* Wrap the snapshot rate limiter around the given stream. Any throttling is recorded in the value returned by
* {@link RepositoriesStats.SnapshotStats#totalWriteThrottledNanos()}. Note that speed is throttled by the repository setting
* `max_snapshot_bytes_per_sec` and, if recovery node bandwidth settings have been set, additionally by the
* `indices.recovery.max_bytes_per_sec` speed.
*/
public InputStream maybeRateLimitSnapshots(InputStream stream) {
return maybeRateLimitSnapshots(stream, blobStoreSnapshotMetrics::incrementSnapshotRateLimitingTimeInNanos);
}
/**
* Wrap the snapshot rate limiter around the given stream. Any throttling is recorded in the value returned by
* {@link RepositoriesStats.SnapshotStats#totalWriteThrottledNanos()}. Note that speed is throttled by the repository setting
* `max_snapshot_bytes_per_sec` and, if recovery node bandwidth settings have been set, additionally by the
* `indices.recovery.max_bytes_per_sec` speed.
*/
public InputStream maybeRateLimitSnapshots(InputStream stream, RateLimitingInputStream.Listener throttleListener) {
InputStream rateLimitStream = maybeRateLimit(stream, () -> snapshotRateLimiter, throttleListener);
if (recoverySettings.nodeBandwidthSettingsExist()) {
rateLimitStream = maybeRateLimit(rateLimitStream, recoverySettings::rateLimiter, throttleListener);
}
return rateLimitStream;
}
@Override
public IndexShardSnapshotStatus.Copy getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) {
BlobStoreIndexShardSnapshot snapshot = loadShardSnapshot(shardContainer(indexId, shardId), snapshotId);
return IndexShardSnapshotStatus.newDone(
snapshot.startTime(),
snapshot.time(),
snapshot.incrementalFileCount(),
snapshot.totalFileCount(),
snapshot.incrementalSize(),
snapshot.totalSize(),
null // Not adding a real generation here as it doesn't matter to callers
);
}
@Override
public void verify(String seed, DiscoveryNode localNode) {
assertSnapshotOrStatelessPermittedThreadPool();
if (isReadOnly()) {
try {
latestIndexBlobId();
} catch (Exception e) {
throw new RepositoryVerificationException(
metadata.name(),
"path " + basePath() + " is not accessible on node " + localNode,
e
);
}
} else {
BlobContainer testBlobContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed)));
try {
testBlobContainer.writeBlob(
OperationPurpose.SNAPSHOT_METADATA,
"data-" + localNode.getId() + METADATA_BLOB_NAME_SUFFIX,
new BytesArray(seed),
true
);
} catch (Exception exp) {
throw new RepositoryVerificationException(
metadata.name(),
"store location [" + blobStore() + "] is not accessible on the node [" + localNode + "]",
exp
);
}
try (InputStream masterDat = testBlobContainer.readBlob(OperationPurpose.SNAPSHOT_METADATA, "master.dat")) {
final String seedRead = Streams.readFully(masterDat).utf8ToString();
if (seedRead.equals(seed) == false) {
throw new RepositoryVerificationException(
metadata.name(),
"Seed read from master.dat was [" + seedRead + "] but expected seed [" + seed + "]"
);
}
} catch (NoSuchFileException e) {
throw new RepositoryVerificationException(
metadata.name(),
"a file written by master to the store ["
+ blobStore()
+ "] cannot be accessed on the node ["
+ localNode
+ "]. "
+ "This might indicate that the store ["
+ blobStore()
+ "] is not shared between this node and the master node or "
+ "that permissions on the store don't allow reading files written by the master node",
e
);
} catch (Exception e) {
throw new RepositoryVerificationException(metadata.name(), "Failed to verify repository", e);
}
}
}
@Override
public String toString() {
return "BlobStoreRepository[" + toStringShort() + ", [" + blobStore.get() + ']' + ']';
}
// Package private for testing
String toStringShort() {
return projectRepoString(projectId, metadata.name());
}
/**
* Utility for atomically writing shard level metadata to a numeric shard generation. This is only required for writing
* numeric shard generations where atomic writes with fail-if-already-exists checks are useful in preventing repository corruption.
*/
private void writeShardIndexBlobAtomic(
BlobContainer shardContainer,
long indexGeneration,
BlobStoreIndexShardSnapshots updatedSnapshots,
Map<String, String> serializationParams
) throws IOException {
assert indexGeneration >= 0 : "Shard generation must not be negative but saw [" + indexGeneration + "]";
logger.trace(() -> format("[%s] Writing shard index [%s] to [%s]", metadata.name(), indexGeneration, shardContainer.path()));
final String blobName = INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(String.valueOf(indexGeneration));
writeAtomic(
OperationPurpose.SNAPSHOT_METADATA,
shardContainer,
blobName,
out -> INDEX_SHARD_SNAPSHOTS_FORMAT.serialize(updatedSnapshots, blobName, compress, serializationParams, out),
true
);
}
/**
* Loads information about shard snapshot
*/
public BlobStoreIndexShardSnapshot loadShardSnapshot(BlobContainer shardContainer, SnapshotId snapshotId) {
try {
return INDEX_SHARD_SNAPSHOT_FORMAT.read(getProjectRepo(), shardContainer, snapshotId.getUUID(), namedXContentRegistry);
} catch (NoSuchFileException ex) {
throw new SnapshotMissingException(metadata.name(), snapshotId, ex);
} catch (IOException ex) {
throw new SnapshotException(
metadata.name(),
snapshotId,
"failed to read shard snapshot file for [" + shardContainer.path() + ']',
ex
);
}
}
/**
* Loads all available snapshots in the repository using the given {@code generation} for a shard. When {@code shardGen}
* is null it tries to load it using the BwC mode, listing the available index- blobs in the shard container.
*/
public BlobStoreIndexShardSnapshots getBlobStoreIndexShardSnapshots(IndexId indexId, int shardId, @Nullable ShardGeneration shardGen)
throws IOException {
final BlobContainer shardContainer = shardContainer(indexId, shardId);
Set<String> blobs = Collections.emptySet();
if (shardGen == null) {
blobs = shardContainer.listBlobsByPrefix(OperationPurpose.SNAPSHOT_METADATA, SNAPSHOT_INDEX_PREFIX).keySet();
}
return buildBlobStoreIndexShardSnapshots(indexId, shardId, blobs, shardContainer, shardGen).v1();
}
/**
* Loads all available snapshots in the repository using the given {@code generation} or falling back to trying to determine it from
* the given list of blobs in the shard container.
*
* @param indexId {@link IndexId} identifying the corresponding index
* @param shardId The 0-based shard id, see also {@link ShardId#id()}
* @param blobs list of blobs in repository
* @param generation shard generation or {@code null} in case there was no shard generation tracked in the {@link RepositoryData} for
* this shard because its snapshot was created in a version older than
* {@link SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION}.
* @return tuple of BlobStoreIndexShardSnapshots and the last snapshot index generation
*/
private Tuple<BlobStoreIndexShardSnapshots, ShardGeneration> buildBlobStoreIndexShardSnapshots(
IndexId indexId,
int shardId,
Set<String> blobs,
BlobContainer shardContainer,
@Nullable ShardGeneration generation
) throws IOException {
if (generation != null) {
if (generation.equals(ShardGenerations.NEW_SHARD_GEN)) {
return new Tuple<>(BlobStoreIndexShardSnapshots.EMPTY, ShardGenerations.NEW_SHARD_GEN);
}
try {
return new Tuple<>(
INDEX_SHARD_SNAPSHOTS_FORMAT.read(
getProjectRepo(),
shardContainer,
generation.getGenerationUUID(),
namedXContentRegistry
),
generation
);
} catch (NoSuchFileException noSuchFileException) {
// Master may have concurrently mutated the shard generation. This can happen when master fails over
// which is "expected". We do not need to apply the following workaround for missing file in this case.
final RepositoryData currentRepositoryData;
try {
final long latestGeneration = latestIndexBlobId();
currentRepositoryData = getRepositoryData(latestGeneration);
} catch (Exception e) {
noSuchFileException.addSuppressed(e);
throw noSuchFileException;
}
final ShardGeneration latestShardGen = currentRepositoryData.shardGenerations().getShardGen(indexId, shardId);
if (latestShardGen == null || latestShardGen.equals(generation) == false) {
throw noSuchFileException;
}
// This shouldn't happen (absent an external force deleting blobs from the repo) but in practice we've found bugs in the way
// we manipulate shard generation UUIDs under concurrent snapshot load which can lead to incorrectly deleting a referenced
// shard-level `index-UUID` blob during finalization. We definitely want to treat this as a test failure (see the `assert`
// below), but in production it is rather poor UX to leave this shard in a state where it permanently cannot take any new
// snapshots. Especially if we got into this state because of a bug. Thus we fall back to reconstructing the contents of the
// missing blob from all available shard-level snapshot metadata blobs.
//
// Note that if a shard-level snapshot metadata blob is also missing when executing this fallback then we'll lose track of,
// and eventually delete, the corresponding data blobs. We haven't seen any bugs that can lead to missing shard-level
// metadata blobs, and any such bug would prevent the shard snapshot from being restored anyway so there's no point in
// keeping hold of its data blobs.
try {
final var message = Strings.format(
"index %s shard generation [%s] in %s[%s] not found - falling back to reading all shard snapshots",
indexId,
generation,
toStringShort(),
shardContainer.path()
);
logger.error(message, noSuchFileException);
assert BlobStoreIndexShardSnapshots.areIntegrityAssertionsEnabled() == false
: new AssertionError(message, noSuchFileException);
final var shardSnapshotBlobs = shardContainer.listBlobsByPrefix(OperationPurpose.SNAPSHOT_METADATA, SNAPSHOT_PREFIX);
var blobStoreIndexShardSnapshots = BlobStoreIndexShardSnapshots.EMPTY;
final var messageBuilder = new StringBuilder();
final var shardSnapshotBlobNameLengthBeforeExt = SNAPSHOT_PREFIX.length() + UUIDs.RANDOM_BASED_UUID_STRING_LENGTH;
final var shardSnapshotBlobNameLength = shardSnapshotBlobNameLengthBeforeExt + METADATA_BLOB_NAME_SUFFIX.length();
for (final var shardSnapshotBlobName : shardSnapshotBlobs.keySet()) {
if (shardSnapshotBlobName.startsWith(SNAPSHOT_PREFIX)
&& shardSnapshotBlobName.endsWith(METADATA_BLOB_NAME_SUFFIX)
&& shardSnapshotBlobName.length() == shardSnapshotBlobNameLength) {
final var shardSnapshot = INDEX_SHARD_SNAPSHOT_FORMAT.read(
getProjectRepo(),
shardContainer,
shardSnapshotBlobName.substring(SNAPSHOT_PREFIX.length(), shardSnapshotBlobNameLengthBeforeExt),
namedXContentRegistry
);
blobStoreIndexShardSnapshots = blobStoreIndexShardSnapshots.withAddedSnapshot(
new SnapshotFiles(shardSnapshot.snapshot(), shardSnapshot.indexFiles(), null)
);
if (messageBuilder.length() > 0) {
messageBuilder.append(", ");
}
messageBuilder.append(shardSnapshotBlobName);
} else {
throw new IllegalStateException(
Strings.format(
"unexpected shard snapshot blob [%s] found in [%s][%s]",
shardSnapshotBlobName,
metadata.name(),
shardContainer.path()
)
);
}
}
logger.error(
"read shard snapshots [{}] due to missing shard generation [{}] for index {} in {}[{}]",
messageBuilder,
generation,
indexId,
toStringShort(),
shardContainer.path()
);
return new Tuple<>(blobStoreIndexShardSnapshots, generation);
} catch (Exception fallbackException) {
logger.error(
Strings.format("failed while reading all shard snapshots from %s[%s]", toStringShort(), shardContainer.path()),
fallbackException
);
noSuchFileException.addSuppressed(fallbackException);
throw noSuchFileException;
}
}
} else {
final Tuple<BlobStoreIndexShardSnapshots, Long> legacyIndex = buildBlobStoreIndexShardSnapshots(blobs, shardContainer);
return new Tuple<>(legacyIndex.v1(), new ShardGeneration(legacyIndex.v2()));
}
}
/**
* Loads all available snapshots in the repository
*
* @param blobs list of blobs in repository
* @return tuple of BlobStoreIndexShardSnapshots and the last snapshot index generation
*/
private Tuple<BlobStoreIndexShardSnapshots, Long> buildBlobStoreIndexShardSnapshots(Set<String> blobs, BlobContainer shardContainer)
throws IOException {
long latest = latestGeneration(blobs);
if (latest >= 0) {
final BlobStoreIndexShardSnapshots shardSnapshots = INDEX_SHARD_SNAPSHOTS_FORMAT.read(
getProjectRepo(),
shardContainer,
Long.toString(latest),
namedXContentRegistry
);
return new Tuple<>(shardSnapshots, latest);
} else if (blobs.stream()
.anyMatch(
b -> b.startsWith(SNAPSHOT_PREFIX) || b.startsWith(SNAPSHOT_INDEX_PREFIX) || b.startsWith(UPLOADED_DATA_BLOB_PREFIX)
)) {
logger.warn(
"Could not find a readable index-N file in a non-empty shard snapshot directory [" + shardContainer.path() + "]"
);
}
return new Tuple<>(BlobStoreIndexShardSnapshots.EMPTY, latest);
}
/**
* Snapshot individual file
* @param fileInfo file to snapshot
*/
protected void snapshotFile(SnapshotShardContext context, FileInfo fileInfo) throws IOException {
final IndexId indexId = context.indexId();
final Store store = context.store();
final ShardId shardId = store.shardId();
final IndexShardSnapshotStatus snapshotStatus = context.status();
final SnapshotId snapshotId = context.snapshotId();
final BlobContainer shardContainer = shardContainer(indexId, shardId);
final String file = fileInfo.physicalName();
try (
Releasable ignored = context.withCommitRef();
IndexInput indexInput = store.openVerifyingInput(file, IOContext.DEFAULT, fileInfo.metadata())
) {
for (int i = 0; i < fileInfo.numberOfParts(); i++) {
final long partBytes = fileInfo.partBytes(i);
// Make reads abortable by mutating the snapshotStatus object
final InputStream inputStream = new FilterInputStream(
maybeRateLimitSnapshots(new InputStreamIndexInput(indexInput, partBytes))
) {
@Override
public int read() throws IOException {
checkAborted();
final long beforeReadMillis = threadPool.rawRelativeTimeInMillis();
int value = super.read();
blobStoreSnapshotMetrics.incrementUploadReadTime(threadPool.rawRelativeTimeInMillis() - beforeReadMillis);
return value;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
checkAborted();
final long beforeReadMillis = threadPool.rawRelativeTimeInMillis();
int amountRead = super.read(b, off, len);
blobStoreSnapshotMetrics.incrementUploadReadTime(threadPool.rawRelativeTimeInMillis() - beforeReadMillis);
return amountRead;
}
private void checkAborted() {
ensureNotAborted(shardId, snapshotId, snapshotStatus, fileInfo.physicalName());
}
};
final String partName = fileInfo.partName(i);
logger.trace("[{}] Writing [{}] to [{}]", metadata.name(), partName, shardContainer.path());
final long startMillis = threadPool.rawRelativeTimeInMillis();
shardContainer.writeBlob(OperationPurpose.SNAPSHOT_DATA, partName, inputStream, partBytes, false);
final long uploadTimeInMillis = threadPool.rawRelativeTimeInMillis() - startMillis;
blobStoreSnapshotMetrics.incrementCountersForPartUpload(partBytes, uploadTimeInMillis);
logger.trace(
"[{}] Writing [{}] of size [{}b] to [{}] took [{}/{}ms]",
metadata.name(),
partName,
partBytes,
shardContainer.path(),
new TimeValue(uploadTimeInMillis),
uploadTimeInMillis
);
}
blobStoreSnapshotMetrics.incrementNumberOfBlobsUploaded();
Store.verify(indexInput);
snapshotStatus.addProcessedFile(fileInfo.length());
} catch (Exception t) {
failStoreIfCorrupted(store, t);
snapshotStatus.addProcessedFile(0);
throw t;
}
}
private static void failStoreIfCorrupted(Store store, Exception e) {
if (Lucene.isCorruptionException(e)) {
try {
store.markStoreCorrupted((IOException) e);
} catch (IOException inner) {
inner.addSuppressed(e);
logger.warn("store cannot be marked as corrupted", inner);
}
}
}
public boolean supportURLRepo() {
return supportURLRepo;
}
/**
* @return whether this repository performs overwrites atomically. In practice we only overwrite the `index.latest` blob so this
* is not very important, but the repository analyzer does test that overwrites happen atomically. It will skip those tests if the
* repository overrides this method to indicate that it does not support atomic overwrites.
*/
public boolean hasAtomicOverwrites() {
return true;
}
public int getReadBufferSizeInBytes() {
return bufferSize;
}
/**
* @return extra information to be included in the exception message emitted on failure of a repository analysis.
*/
public String getAnalysisFailureExtraDetail() {
return Strings.format(
"""
Elasticsearch observed the storage system underneath this repository behaved incorrectly which indicates it is not \
suitable for use with Elasticsearch snapshots. See [%s] for further information.""",
ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS
);
}
public static final String READ_ONLY_USAGE_STATS_NAME = "read_only";
public static final String READ_WRITE_USAGE_STATS_NAME = "read_write";
@Override
public final Set<String> getUsageFeatures() {
final var extraUsageFeatures = getExtraUsageFeatures();
assert extraUsageFeatures.contains(READ_ONLY_USAGE_STATS_NAME) == false : extraUsageFeatures;
assert extraUsageFeatures.contains(READ_WRITE_USAGE_STATS_NAME) == false : extraUsageFeatures;
return Set.copyOf(
Stream.concat(Stream.of(isReadOnly() ? READ_ONLY_USAGE_STATS_NAME : READ_WRITE_USAGE_STATS_NAME), extraUsageFeatures.stream())
.toList()
);
}
/**
* All blob-store repositories include the counts of read-only and read-write repositories in their telemetry. This method returns other
* features of the repositories in use.
*
* @return a set of the names of the extra features that this repository instance uses, for reporting in the cluster stats for telemetry
* collection.
*/
protected Set<String> getExtraUsageFeatures() {
return Set.of();
}
@Override
public LongWithAttributes getShardSnapshotsInProgress() {
return blobStoreSnapshotMetrics.getShardSnapshotsInProgress();
}
@Override
public RepositoriesStats.SnapshotStats getSnapshotStats() {
return blobStoreSnapshotMetrics.getSnapshotStats();
}
}
| ShardBlobsToDelete |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/operators/SemanticProperties.java | {
"start": 2883,
"end": 3247
} | class ____ extends InvalidProgramException {
private static final long serialVersionUID = 1L;
public InvalidSemanticAnnotationException(String s) {
super(s);
}
public InvalidSemanticAnnotationException(String s, Throwable e) {
super(s, e);
}
}
public static | InvalidSemanticAnnotationException |
java | apache__dubbo | dubbo-plugin/dubbo-mcp/src/main/java/org/apache/dubbo/mcp/core/McpServiceExportListener.java | {
"start": 1670,
"end": 5607
} | class ____ {
final int toolCount;
final String interfaceName;
final ProviderModel providerModel;
RegisteredServiceInfo(int toolCount, String interfaceName, ProviderModel providerModel) {
this.toolCount = toolCount;
this.interfaceName = interfaceName;
this.providerModel = providerModel;
}
}
@Override
public void exported(ServiceConfig sc) {
try {
if (sc.getRef() == null) {
return;
}
String serviceKey = sc.getUniqueServiceName();
ProviderModel providerModel =
sc.getScopeModel().getServiceRepository().lookupExportedService(serviceKey);
if (providerModel == null) {
logger.warn(
LoggerCodeConstants.COMMON_UNEXPECTED_EXCEPTION,
"",
"",
"ProviderModel not found for service: " + sc.getInterface() + " with key: " + serviceKey);
return;
}
DubboServiceToolRegistry toolRegistry = getToolRegistry(sc);
if (toolRegistry == null) {
return;
}
int registeredCount = toolRegistry.registerService(providerModel);
if (registeredCount > 0) {
registeredServiceTools.put(
serviceKey,
new RegisteredServiceInfo(
registeredCount, providerModel.getServiceModel().getInterfaceName(), providerModel));
logger.info(
"Dynamically registered {} MCP tools for exported service: {}",
registeredCount,
providerModel.getServiceModel().getInterfaceName());
}
} catch (Exception e) {
logger.error(
LoggerCodeConstants.COMMON_UNEXPECTED_EXCEPTION,
"",
"",
"Failed to register MCP tools for exported service: " + sc.getInterface(),
e);
}
}
@Override
public void unexported(ServiceConfig sc) {
try {
if (sc.getRef() == null) {
return;
}
String serviceKey = sc.getUniqueServiceName();
RegisteredServiceInfo serviceInfo = registeredServiceTools.remove(serviceKey);
if (serviceInfo != null && serviceInfo.toolCount > 0) {
DubboServiceToolRegistry toolRegistry = getToolRegistry(sc);
if (toolRegistry == null) {
return;
}
toolRegistry.unregisterService(serviceInfo.providerModel);
logger.info(
"Dynamically unregistered {} MCP tools for unexported service: {}",
serviceInfo.toolCount,
serviceInfo.interfaceName);
}
} catch (Exception e) {
logger.error(
LoggerCodeConstants.COMMON_UNEXPECTED_EXCEPTION,
"",
"",
"Failed to unregister MCP tools for unexported service: " + sc.getInterface(),
e);
}
}
private DubboServiceToolRegistry getToolRegistry(ServiceConfig sc) {
try {
ApplicationModel applicationModel = sc.getScopeModel().getApplicationModel();
return applicationModel.getBeanFactory().getBean(DubboServiceToolRegistry.class);
} catch (Exception e) {
logger.warn(
LoggerCodeConstants.COMMON_UNEXPECTED_EXCEPTION,
"",
"",
"Failed to get DubboServiceToolRegistry from application context",
e);
return null;
}
}
}
| RegisteredServiceInfo |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/TopicIdPartitionTest.java | {
"start": 1023,
"end": 3881
} | class ____ {
private final Uuid topicId0 = new Uuid(-4883993789924556279L, -5960309683534398572L);
private final String topicName0 = "a_topic_name";
private final int partition1 = 1;
private final TopicPartition topicPartition0 = new TopicPartition(topicName0, partition1);
private final TopicIdPartition topicIdPartition0 = new TopicIdPartition(topicId0, topicPartition0);
private final TopicIdPartition topicIdPartition1 = new TopicIdPartition(topicId0,
partition1, topicName0);
private final TopicIdPartition topicIdPartitionWithNullTopic0 = new TopicIdPartition(topicId0,
partition1, null);
private final TopicIdPartition topicIdPartitionWithNullTopic1 = new TopicIdPartition(topicId0,
new TopicPartition(null, partition1));
private final Uuid topicId1 = new Uuid(7759286116672424028L, -5081215629859775948L);
private final String topicName1 = "another_topic_name";
private final TopicIdPartition topicIdPartition2 = new TopicIdPartition(topicId1,
partition1, topicName1);
private final TopicIdPartition topicIdPartitionWithNullTopic2 = new TopicIdPartition(topicId1,
new TopicPartition(null, partition1));
@Test
public void testEquals() {
assertEquals(topicIdPartition0, topicIdPartition1);
assertEquals(topicIdPartition1, topicIdPartition0);
assertEquals(topicIdPartitionWithNullTopic0, topicIdPartitionWithNullTopic1);
assertNotEquals(topicIdPartition0, topicIdPartition2);
assertNotEquals(topicIdPartition2, topicIdPartition0);
assertNotEquals(topicIdPartition0, topicIdPartitionWithNullTopic0);
assertNotEquals(topicIdPartitionWithNullTopic0, topicIdPartitionWithNullTopic2);
}
@Test
public void testHashCode() {
assertEquals(Objects.hash(topicIdPartition0.topicId(), topicIdPartition0.topicPartition()),
topicIdPartition0.hashCode());
assertEquals(topicIdPartition0.hashCode(), topicIdPartition1.hashCode());
assertEquals(Objects.hash(topicIdPartitionWithNullTopic0.topicId(),
new TopicPartition(null, partition1)), topicIdPartitionWithNullTopic0.hashCode());
assertEquals(topicIdPartitionWithNullTopic0.hashCode(), topicIdPartitionWithNullTopic1.hashCode());
assertNotEquals(topicIdPartition0.hashCode(), topicIdPartition2.hashCode());
assertNotEquals(topicIdPartition0.hashCode(), topicIdPartitionWithNullTopic0.hashCode());
assertNotEquals(topicIdPartitionWithNullTopic0.hashCode(), topicIdPartitionWithNullTopic2.hashCode());
}
@Test
public void testToString() {
assertEquals("vDiRhkpVQgmtSLnsAZx7lA:a_topic_name-1", topicIdPartition0.toString());
assertEquals("vDiRhkpVQgmtSLnsAZx7lA:null-1", topicIdPartitionWithNullTopic0.toString());
}
}
| TopicIdPartitionTest |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/filter/wall/WallSelectWhereTest2.java | {
"start": 794,
"end": 1130
} | class ____ extends TestCase {
private String sql = "SELECT F1, F2 from t WHERE 1 = 1 AND FID = ?";
public void testMySql() throws Exception {
assertTrue(WallUtils.isValidateMySql(sql));
}
public void testORACLE() throws Exception {
assertTrue(WallUtils.isValidateOracle(sql));
}
}
| WallSelectWhereTest2 |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/completeness/TypeInfoTestCoverageTest.java | {
"start": 2428,
"end": 5350
} | class ____ extends TestLogger {
@Test
public void testTypeInfoTestCoverage() {
Reflections reflections = new Reflections("org.apache.flink");
Set<Class<? extends TypeInformation>> typeInfos =
reflections.getSubTypesOf(TypeInformation.class);
Set<String> typeInfoTestNames =
reflections.getSubTypesOf(TypeInformationTestBase.class).stream()
.map(Class::getName)
.collect(Collectors.toSet());
// type info whitelist for TypeInformationTestBase test coverage (see FLINK-27725)
final List<String> typeInfoTestBaseWhitelist =
Arrays.asList(
LegacyTimestampTypeInfo.class.getName(),
InternalTypeInfo.class.getName(),
LegacyLocalDateTimeTypeInfo.class.getName(),
TimeIntervalTypeInfo.class.getName(),
TimeIndicatorTypeInfo.class.getName(),
TimestampDataTypeInfo.class.getName(),
MapViewTypeInfo.class.getName(),
LegacyInstantTypeInfo.class.getName(),
ListViewTypeInfo.class.getName(),
StringDataTypeInfo.class.getName(),
SortedMapTypeInfo.class.getName(),
ExternalTypeInfo.class.getName(),
BigDecimalTypeInfo.class.getName(),
DecimalDataTypeInfo.class.getName(),
GenericRecordAvroTypeInfo.class.getName(),
AvroTypeInfo.class.getName());
// check if a test exists for each type information
for (Class<? extends TypeInformation> typeInfo : typeInfos) {
// we skip abstract classes and inner classes to skip type information defined in test
// classes
if (Modifier.isAbstract(typeInfo.getModifiers())
|| Modifier.isPrivate(typeInfo.getModifiers())
|| typeInfo.getName().contains("Test$")
|| typeInfo.getName().contains("TestBase$")
|| typeInfo.getName().contains("ITCase$")
|| typeInfo.getName().contains("$$anon")
|| typeInfo.getName().contains("queryablestate")) {
continue;
}
final String testToFind = typeInfo.getName() + "Test";
if (!typeInfoTestNames.contains(testToFind)
&& !typeInfoTestBaseWhitelist.contains(typeInfo.getName())) {
fail(
"Could not find test '"
+ testToFind
+ "' that covers '"
+ typeInfo.getName()
+ "'.");
}
}
}
}
| TypeInfoTestCoverageTest |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/LiftFunctionTest.java | {
"start": 5164,
"end": 5884
} | class ____ {
ConnectableFlux<Integer> source = Flux.just(1).publish().hide();
@Test
void liftScannable() {
initLiftOperatorByLiftScannable(source);
lift(ConnectableFlux.class, ConnectableLift.class);
}
@Test
void scanLiftedAsScannable() {
initLiftOperatorByLiftScannable(source);
scanOperator(source, Queues.SMALL_BUFFER_SIZE, Attr.RunStyle.SYNC);
}
@Test
void liftPublisher() {
initLiftOperatorByLiftPublisher(source);
lift(ConnectableFlux.class, ConnectableLift.class);
}
@Test
void scanLiftedAsPublisher() {
initLiftOperatorByLiftPublisher(source);
scanOperator(source, Queues.SMALL_BUFFER_SIZE, Attr.RunStyle.SYNC);
}
}
@Nested
| Normal |
java | apache__flink | flink-core/src/main/java/org/apache/flink/core/fs/LimitedConnectionsFileSystem.java | {
"start": 2657,
"end": 23727
} | class ____ extends FileSystem {
private static final Logger LOG = LoggerFactory.getLogger(LimitedConnectionsFileSystem.class);
/** The original file system to which connections are limited. */
private final FileSystem originalFs;
/** The lock that synchronizes connection bookkeeping. */
private final ReentrantLock lock;
/** Condition for threads that are blocking on the availability of new connections. */
private final Condition available;
/** The maximum number of concurrently open output streams. */
private final int maxNumOpenOutputStreams;
/** The maximum number of concurrently open input streams. */
private final int maxNumOpenInputStreams;
/** The maximum number of concurrently open streams (input + output). */
private final int maxNumOpenStreamsTotal;
/** The nanoseconds that a opening a stream may wait for availability. */
private final long streamOpenTimeoutNanos;
/**
* The nanoseconds that a stream may spend not writing any bytes before it is closed as
* inactive.
*/
private final long streamInactivityTimeoutNanos;
/** The set of currently open output streams. */
@GuardedBy("lock")
private final HashSet<OutStream> openOutputStreams;
/** The set of currently open input streams. */
@GuardedBy("lock")
private final HashSet<InStream> openInputStreams;
/** The number of output streams reserved to be opened. */
@GuardedBy("lock")
private int numReservedOutputStreams;
/** The number of input streams reserved to be opened. */
@GuardedBy("lock")
private int numReservedInputStreams;
// ------------------------------------------------------------------------
/**
* Creates a new output connection limiting file system.
*
* <p>If streams are inactive (meaning not writing bytes) for longer than the given timeout,
* then they are terminated as "inactive", to prevent that the limited number of connections
* gets stuck on only blocked threads.
*
* @param originalFs The original file system to which connections are limited.
* @param maxNumOpenStreamsTotal The maximum number of concurrent open streams (0 means no
* limit).
*/
public LimitedConnectionsFileSystem(FileSystem originalFs, int maxNumOpenStreamsTotal) {
this(originalFs, maxNumOpenStreamsTotal, 0, 0);
}
/**
* Creates a new output connection limiting file system.
*
* <p>If streams are inactive (meaning not writing bytes) for longer than the given timeout,
* then they are terminated as "inactive", to prevent that the limited number of connections
* gets stuck on only blocked threads.
*
* @param originalFs The original file system to which connections are limited.
* @param maxNumOpenStreamsTotal The maximum number of concurrent open streams (0 means no
* limit).
* @param streamOpenTimeout The maximum number of milliseconds that the file system will wait
* when no more connections are currently permitted.
* @param streamInactivityTimeout The milliseconds that a stream may spend not writing any bytes
* before it is closed as inactive.
*/
public LimitedConnectionsFileSystem(
FileSystem originalFs,
int maxNumOpenStreamsTotal,
long streamOpenTimeout,
long streamInactivityTimeout) {
this(originalFs, maxNumOpenStreamsTotal, 0, 0, streamOpenTimeout, streamInactivityTimeout);
}
/**
* Creates a new output connection limiting file system, limiting input and output streams with
* potentially different quotas.
*
* <p>If streams are inactive (meaning not writing bytes) for longer than the given timeout,
* then they are terminated as "inactive", to prevent that the limited number of connections
* gets stuck on only blocked threads.
*
* @param originalFs The original file system to which connections are limited.
* @param maxNumOpenStreamsTotal The maximum number of concurrent open streams (0 means no
* limit).
* @param maxNumOpenOutputStreams The maximum number of concurrent open output streams (0 means
* no limit).
* @param maxNumOpenInputStreams The maximum number of concurrent open input streams (0 means no
* limit).
* @param streamOpenTimeout The maximum number of milliseconds that the file system will wait
* when no more connections are currently permitted.
* @param streamInactivityTimeout The milliseconds that a stream may spend not writing any bytes
* before it is closed as inactive.
*/
public LimitedConnectionsFileSystem(
FileSystem originalFs,
int maxNumOpenStreamsTotal,
int maxNumOpenOutputStreams,
int maxNumOpenInputStreams,
long streamOpenTimeout,
long streamInactivityTimeout) {
checkArgument(maxNumOpenStreamsTotal >= 0, "maxNumOpenStreamsTotal must be >= 0");
checkArgument(maxNumOpenOutputStreams >= 0, "maxNumOpenOutputStreams must be >= 0");
checkArgument(maxNumOpenInputStreams >= 0, "maxNumOpenInputStreams must be >= 0");
checkArgument(
streamOpenTimeout >= 0,
"stream opening timeout must be >= 0 (0 means infinite timeout)");
checkArgument(
streamInactivityTimeout >= 0,
"stream inactivity timeout must be >= 0 (0 means infinite timeout)");
this.originalFs = checkNotNull(originalFs, "originalFs");
this.lock = new ReentrantLock(true);
this.available = lock.newCondition();
this.openOutputStreams = new HashSet<>();
this.openInputStreams = new HashSet<>();
this.maxNumOpenStreamsTotal = maxNumOpenStreamsTotal;
this.maxNumOpenOutputStreams = maxNumOpenOutputStreams;
this.maxNumOpenInputStreams = maxNumOpenInputStreams;
// assign nanos overflow aware
final long openTimeoutNanos = streamOpenTimeout * 1_000_000;
final long inactivityTimeoutNanos = streamInactivityTimeout * 1_000_000;
this.streamOpenTimeoutNanos =
openTimeoutNanos >= streamOpenTimeout ? openTimeoutNanos : Long.MAX_VALUE;
this.streamInactivityTimeoutNanos =
inactivityTimeoutNanos >= streamInactivityTimeout
? inactivityTimeoutNanos
: Long.MAX_VALUE;
}
// ------------------------------------------------------------------------
/** Gets the maximum number of concurrently open output streams. */
public int getMaxNumOpenOutputStreams() {
return maxNumOpenOutputStreams;
}
/** Gets the maximum number of concurrently open input streams. */
public int getMaxNumOpenInputStreams() {
return maxNumOpenInputStreams;
}
/** Gets the maximum number of concurrently open streams (input + output). */
public int getMaxNumOpenStreamsTotal() {
return maxNumOpenStreamsTotal;
}
/**
* Gets the number of milliseconds that a opening a stream may wait for availability in the
* connection pool.
*/
public long getStreamOpenTimeout() {
return streamOpenTimeoutNanos / 1_000_000;
}
/**
* Gets the milliseconds that a stream may spend not writing any bytes before it is closed as
* inactive.
*/
public long getStreamInactivityTimeout() {
return streamInactivityTimeoutNanos / 1_000_000;
}
/** Gets the total number of open streams (input plus output). */
public int getTotalNumberOfOpenStreams() {
lock.lock();
try {
return numReservedOutputStreams + numReservedInputStreams;
} finally {
lock.unlock();
}
}
/** Gets the number of currently open output streams. */
public int getNumberOfOpenOutputStreams() {
lock.lock();
try {
return numReservedOutputStreams;
} finally {
lock.unlock();
}
}
/** Gets the number of currently open input streams. */
public int getNumberOfOpenInputStreams() {
return numReservedInputStreams;
}
// ------------------------------------------------------------------------
// input & output stream opening methods
// ------------------------------------------------------------------------
@Override
public FSDataOutputStream create(Path f, WriteMode overwriteMode) throws IOException {
return createOutputStream(() -> originalFs.create(f, overwriteMode));
}
@Override
@Deprecated
@SuppressWarnings("deprecation")
public FSDataOutputStream create(
Path f, boolean overwrite, int bufferSize, short replication, long blockSize)
throws IOException {
return createOutputStream(
() -> originalFs.create(f, overwrite, bufferSize, replication, blockSize));
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return createInputStream(() -> originalFs.open(f, bufferSize));
}
@Override
public FSDataInputStream open(Path f) throws IOException {
return createInputStream(() -> originalFs.open(f));
}
private FSDataOutputStream createOutputStream(
final SupplierWithException<FSDataOutputStream, IOException> streamOpener)
throws IOException {
final SupplierWithException<OutStream, IOException> wrappedStreamOpener =
() -> new OutStream(streamOpener.get(), this);
return createStream(wrappedStreamOpener, openOutputStreams, true);
}
private FSDataInputStream createInputStream(
final SupplierWithException<FSDataInputStream, IOException> streamOpener)
throws IOException {
final SupplierWithException<InStream, IOException> wrappedStreamOpener =
() -> new InStream(streamOpener.get(), this);
return createStream(wrappedStreamOpener, openInputStreams, false);
}
// ------------------------------------------------------------------------
// other delegating file system methods
// ------------------------------------------------------------------------
@Override
public boolean isDistributedFS() {
return originalFs.isDistributedFS();
}
@Override
public Path getWorkingDirectory() {
return originalFs.getWorkingDirectory();
}
@Override
public Path getHomeDirectory() {
return originalFs.getHomeDirectory();
}
@Override
public URI getUri() {
return originalFs.getUri();
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
return originalFs.getFileStatus(f);
}
@Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start, long len)
throws IOException {
return originalFs.getFileBlockLocations(file, start, len);
}
@Override
public FileStatus[] listStatus(Path f) throws IOException {
return originalFs.listStatus(f);
}
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
return originalFs.delete(f, recursive);
}
@Override
public boolean mkdirs(Path f) throws IOException {
return originalFs.mkdirs(f);
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
return originalFs.rename(src, dst);
}
@Override
public boolean exists(Path f) throws IOException {
return originalFs.exists(f);
}
@Override
@Deprecated
@SuppressWarnings("deprecation")
public long getDefaultBlockSize() {
return originalFs.getDefaultBlockSize();
}
// ------------------------------------------------------------------------
private <T extends StreamWithTimeout> T createStream(
final SupplierWithException<T, IOException> streamOpener,
final HashSet<T> openStreams,
final boolean output)
throws IOException {
final int outputLimit =
output && maxNumOpenOutputStreams > 0 ? maxNumOpenOutputStreams : Integer.MAX_VALUE;
final int inputLimit =
!output && maxNumOpenInputStreams > 0 ? maxNumOpenInputStreams : Integer.MAX_VALUE;
final int totalLimit =
maxNumOpenStreamsTotal > 0 ? maxNumOpenStreamsTotal : Integer.MAX_VALUE;
final int outputCredit = output ? 1 : 0;
final int inputCredit = output ? 0 : 1;
// because waiting for availability may take long, we need to be interruptible here
// and handle interrupted exceptions as I/O errors
// even though the code is written to make sure the lock is held for a short time only,
// making the lock acquisition interruptible helps to guard against the cases where
// a supposedly fast operation (like 'getPos()' on a stream) actually takes long.
try {
lock.lockInterruptibly();
try {
// some integrity checks
assert openOutputStreams.size() <= numReservedOutputStreams;
assert openInputStreams.size() <= numReservedInputStreams;
// wait until there are few enough streams so we can open another
waitForAvailability(totalLimit, outputLimit, inputLimit);
// We do not open the stream here in the locked scope because opening a stream
// could take a while. Holding the lock during that operation would block all
// concurrent
// attempts to try and open a stream, effectively serializing all calls to open the
// streams.
numReservedOutputStreams += outputCredit;
numReservedInputStreams += inputCredit;
} finally {
lock.unlock();
}
} catch (InterruptedException e) {
// restore interruption flag
Thread.currentThread().interrupt();
throw new IOException("interrupted before opening stream");
}
// open the stream outside the lock.
boolean success = false;
try {
final T out = streamOpener.get();
// add the stream to the set, need to re-acquire the lock
lock.lock();
try {
openStreams.add(out);
} finally {
lock.unlock();
}
// good, can now return cleanly
success = true;
return out;
} finally {
if (!success) {
// remove the reserved credit
// we must open this non-interruptibly, because this must succeed!
lock.lock();
try {
numReservedOutputStreams -= outputCredit;
numReservedInputStreams -= inputCredit;
available.signalAll();
} finally {
lock.unlock();
}
}
}
}
@GuardedBy("lock")
private void waitForAvailability(int totalLimit, int outputLimit, int inputLimit)
throws InterruptedException, IOException {
checkState(lock.isHeldByCurrentThread());
// compute the deadline of this operations
final long deadline;
if (streamOpenTimeoutNanos == 0) {
deadline = Long.MAX_VALUE;
} else {
long deadlineNanos = System.nanoTime() + streamOpenTimeoutNanos;
// check for overflow
deadline = deadlineNanos > 0 ? deadlineNanos : Long.MAX_VALUE;
}
// wait for available connections
long timeLeft;
if (streamInactivityTimeoutNanos == 0) {
// simple case: just wait
while ((timeLeft = (deadline - System.nanoTime())) > 0
&& !hasAvailability(totalLimit, outputLimit, inputLimit)) {
available.await(timeLeft, TimeUnit.NANOSECONDS);
}
} else {
// complex case: chase down inactive streams
final long checkIntervalNanos = (streamInactivityTimeoutNanos >>> 1) + 1;
long now;
while ((timeLeft = (deadline - (now = System.nanoTime()))) > 0
&& // while still within timeout
!hasAvailability(totalLimit, outputLimit, inputLimit)) {
// check all streams whether there in one that has been inactive for too long
if (!(closeInactiveStream(openOutputStreams, now)
|| closeInactiveStream(openInputStreams, now))) {
// only wait if we did not manage to close any stream.
// otherwise eagerly check again if we have availability now (we should have!)
long timeToWait = Math.min(checkIntervalNanos, timeLeft);
available.await(timeToWait, TimeUnit.NANOSECONDS);
}
}
}
// check for timeout
// we check availability again to catch cases where the timeout expired while waiting
// to re-acquire the lock
if (timeLeft <= 0 && !hasAvailability(totalLimit, outputLimit, inputLimit)) {
throw new IOException(
String.format(
"Timeout while waiting for an available stream/connection. "
+ "limits: total=%d, input=%d, output=%d ; Open: input=%d, output=%d ; timeout: %d ms",
maxNumOpenStreamsTotal,
maxNumOpenInputStreams,
maxNumOpenOutputStreams,
numReservedInputStreams,
numReservedOutputStreams,
getStreamOpenTimeout()));
}
}
@GuardedBy("lock")
private boolean hasAvailability(int totalLimit, int outputLimit, int inputLimit) {
return numReservedOutputStreams < outputLimit
&& numReservedInputStreams < inputLimit
&& numReservedOutputStreams + numReservedInputStreams < totalLimit;
}
@GuardedBy("lock")
private boolean closeInactiveStream(
HashSet<? extends StreamWithTimeout> streams, long nowNanos) {
for (StreamWithTimeout stream : streams) {
try {
final StreamProgressTracker tracker = stream.getProgressTracker();
// If the stream is closed already, it will be removed anyways, so we
// do not classify it as inactive. We also skip the check if another check happened
// too recently.
if (stream.isClosed()
|| nowNanos
< tracker.getLastCheckTimestampNanos()
+ streamInactivityTimeoutNanos) {
// interval since last check not yet over
return false;
} else if (!tracker.checkNewBytesAndMark(nowNanos)) {
stream.closeDueToTimeout();
return true;
}
} catch (StreamTimeoutException ignored) {
// may happen due to races
} catch (IOException e) {
// only log on debug level here, to avoid log spamming
LOG.debug("Could not check for stream progress to determine inactivity", e);
}
}
return false;
}
// ------------------------------------------------------------------------
/**
* Atomically removes the given output stream from the set of currently open output streams, and
* signals that new stream can now be opened.
*/
void unregisterOutputStream(OutStream stream) {
lock.lock();
try {
// only decrement if we actually remove the stream
if (openOutputStreams.remove(stream)) {
numReservedOutputStreams--;
available.signalAll();
}
} finally {
lock.unlock();
}
}
/**
* Atomically removes the given input stream from the set of currently open input streams, and
* signals that new stream can now be opened.
*/
void unregisterInputStream(InStream stream) {
lock.lock();
try {
// only decrement if we actually remove the stream
if (openInputStreams.remove(stream)) {
numReservedInputStreams--;
available.signalAll();
}
} finally {
lock.unlock();
}
}
// ------------------------------------------------------------------------
/** A special IOException, indicating a timeout in the data output stream. */
public static final | LimitedConnectionsFileSystem |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/uri/UriBuilder.java | {
"start": 929,
"end": 4033
} | interface ____ {
/**
* Sets the URI fragment.
*
* @param fragment The fragment
* @return This builder
*/
@NonNull UriBuilder fragment(@Nullable String fragment);
/**
* Sets the URI scheme.
*
* @param scheme The scheme
* @return This builder
*/
@NonNull UriBuilder scheme(@Nullable String scheme);
/**
* Sets the URI user info.
*
* @param userInfo The use info
* @return This builder
*/
@NonNull UriBuilder userInfo(@Nullable String userInfo);
/**
* Sets the URI host.
*
* @param host The host to use
* @return This builder
*/
@NonNull UriBuilder host(@Nullable String host);
/**
* Sets the URI port.
*
* @param port The port to use
* @return This builder
*/
@NonNull UriBuilder port(int port);
/**
* Appends the given path to the existing path correctly handling '/'. If path is null it is simply ignored.
*
* @param path The path
* @return This builder
*/
@NonNull UriBuilder path(@Nullable String path);
/**
* Replaces the existing path if any. If path is null it is simply ignored.
*
* @param path The path
* @return This builder
*/
@NonNull UriBuilder replacePath(@Nullable String path);
/**
* Adds a query parameter for the give name and values. The values will be URI encoded.
* If either name or values are null the value will be ignored.
*
* @param name The name
* @param values The values
* @return This builder
*/
@NonNull UriBuilder queryParam(String name, Object... values);
/**
* Adds a query parameter for the give name and values. The values will be URI encoded.
* If either name or values are null the value will be ignored.
*
* @param name The name
* @param values The values
* @return This builder
*/
@NonNull UriBuilder replaceQueryParam(String name, Object... values);
/**
* The constructed URI.
*
* @return Build the URI
* @throws io.micronaut.http.exceptions.UriSyntaxException if the URI to be constructed is invalid
*/
@NonNull URI build();
/**
* Expands the URI if it is a template, using the given values.
*
* @param values Expands the URI with the given values.
* @return Build the URI
*/
@NonNull URI expand(Map<String, ? super Object> values);
/**
* Create a {@link UriBuilder} with the given base URI as a starting point.
*
* @param uri The URI
* @return The builder
*/
static @NonNull UriBuilder of(@NonNull URI uri) {
ArgumentUtils.requireNonNull("uri", uri);
return new DefaultUriBuilder(uri);
}
/**
* Create a {@link UriBuilder} with the given base URI as a starting point.
*
* @param uri The URI
* @return The builder
*/
static @NonNull UriBuilder of(@NonNull CharSequence uri) {
ArgumentUtils.requireNonNull("uri", uri);
return new DefaultUriBuilder(uri);
}
}
| UriBuilder |
java | apache__dubbo | dubbo-metrics/dubbo-tracing/src/test/java/org/apache/dubbo/tracing/utils/ObservationConventionUtils.java | {
"start": 1063,
"end": 2049
} | class ____ {
public static Invoker<?> getMockInvokerWithUrl() {
URL url = URL.valueOf(
"dubbo://127.0.0.1:12345/com.example.TestService?anyhost=true&application=test&category=providers&dubbo=2.0.2&generic=false&interface=com.example.TestService&methods=testMethod&pid=26716&side=provider×tamp=1633863896653");
Invoker<?> invoker = Mockito.mock(Invoker.class);
Mockito.when(invoker.getUrl()).thenReturn(url);
return invoker;
}
public static String getValueForKey(KeyValues keyValues, Object key)
throws NoSuchFieldException, IllegalAccessException {
Field f = KeyValues.class.getDeclaredField("sortedSet");
f.setAccessible(true);
KeyValue[] kv = (KeyValue[]) f.get(keyValues);
for (KeyValue keyValue : kv) {
if (keyValue.getKey().equals(key)) {
return keyValue.getValue();
}
}
return null;
}
}
| ObservationConventionUtils |
java | spring-projects__spring-security | saml2/saml2-service-provider/src/test/java/org/springframework/security/saml2/provider/service/web/authentication/logout/Saml2LogoutResponseFilterTests.java | {
"start": 2671,
"end": 9576
} | class ____ {
RelyingPartyRegistrationResolver relyingPartyRegistrationResolver = mock(RelyingPartyRegistrationResolver.class);
Saml2LogoutRequestRepository logoutRequestRepository = mock(Saml2LogoutRequestRepository.class);
Saml2LogoutResponseValidator logoutResponseValidator = mock(Saml2LogoutResponseValidator.class);
LogoutSuccessHandler logoutSuccessHandler = mock(LogoutSuccessHandler.class);
Saml2LogoutResponseFilter logoutResponseProcessingFilter = new Saml2LogoutResponseFilter(
this.relyingPartyRegistrationResolver, this.logoutResponseValidator, this.logoutSuccessHandler);
@BeforeEach
public void setUp() {
this.logoutResponseProcessingFilter.setLogoutRequestRepository(this.logoutRequestRepository);
}
@AfterEach
public void tearDown() {
SecurityContextHolder.clearContext();
}
@Test
public void doFilterWhenSamlResponsePostThenLogout() throws Exception {
Authentication authentication = new TestingAuthenticationToken("user", "password");
SecurityContextHolder.getContext().setAuthentication(authentication);
MockHttpServletRequest request = post("/logout/saml2/slo").param(Saml2ParameterNames.SAML_RESPONSE, "response")
.build();
MockHttpServletResponse response = new MockHttpServletResponse();
RelyingPartyRegistration registration = TestRelyingPartyRegistrations.full().build();
given(this.relyingPartyRegistrationResolver.resolve(request, "registration-id")).willReturn(registration);
Saml2LogoutRequest logoutRequest = Saml2LogoutRequest.withRelyingPartyRegistration(registration)
.samlRequest("request")
.build();
given(this.logoutRequestRepository.removeLogoutRequest(request, response)).willReturn(logoutRequest);
given(this.logoutResponseValidator.validate(any())).willReturn(Saml2LogoutValidatorResult.success());
this.logoutResponseProcessingFilter.doFilterInternal(request, response, new MockFilterChain());
verify(this.logoutResponseValidator).validate(any());
verify(this.logoutSuccessHandler).onLogoutSuccess(any(), any(), any());
}
@Test
public void doFilterWhenSamlResponseRedirectThenLogout() throws Exception {
Authentication authentication = new TestingAuthenticationToken("user", "password");
SecurityContextHolder.getContext().setAuthentication(authentication);
MockHttpServletRequest request = get("/logout/saml2/slo").build();
request.setParameter(Saml2ParameterNames.SAML_RESPONSE, "response");
MockHttpServletResponse response = new MockHttpServletResponse();
RelyingPartyRegistration registration = TestRelyingPartyRegistrations.full()
.singleLogoutServiceBinding(Saml2MessageBinding.REDIRECT)
.build();
given(this.relyingPartyRegistrationResolver.resolve(request, "registration-id")).willReturn(registration);
Saml2LogoutRequest logoutRequest = Saml2LogoutRequest.withRelyingPartyRegistration(registration)
.samlRequest("request")
.build();
given(this.logoutRequestRepository.removeLogoutRequest(request, response)).willReturn(logoutRequest);
given(this.logoutResponseValidator.validate(any())).willReturn(Saml2LogoutValidatorResult.success());
this.logoutResponseProcessingFilter.doFilterInternal(request, response, new MockFilterChain());
verify(this.logoutResponseValidator).validate(any());
verify(this.logoutSuccessHandler).onLogoutSuccess(any(), any(), any());
}
@Test
public void doFilterWhenRequestMismatchesThenNoLogout() throws Exception {
Authentication authentication = new TestingAuthenticationToken("user", "password");
SecurityContextHolder.getContext().setAuthentication(authentication);
MockHttpServletRequest request = post("/logout").param(Saml2ParameterNames.SAML_REQUEST, "request").build();
MockHttpServletResponse response = new MockHttpServletResponse();
this.logoutResponseProcessingFilter.doFilterInternal(request, response, new MockFilterChain());
verifyNoInteractions(this.logoutResponseValidator, this.logoutSuccessHandler);
}
@Test
public void doFilterWhenNoSamlRequestOrResponseThenNoLogout() throws Exception {
Authentication authentication = new TestingAuthenticationToken("user", "password");
SecurityContextHolder.getContext().setAuthentication(authentication);
MockHttpServletRequest request = post("/logout/saml2/slo").build();
MockHttpServletResponse response = new MockHttpServletResponse();
this.logoutResponseProcessingFilter.doFilterInternal(request, response, new MockFilterChain());
verifyNoInteractions(this.logoutResponseValidator, this.logoutSuccessHandler);
}
@Test
public void doFilterWhenValidatorFailsThenStops() throws Exception {
Authentication authentication = new TestingAuthenticationToken("user", "password");
SecurityContextHolder.getContext().setAuthentication(authentication);
MockHttpServletRequest request = post("/logout/saml2/slo").param(Saml2ParameterNames.SAML_RESPONSE, "response")
.build();
MockHttpServletResponse response = new MockHttpServletResponse();
RelyingPartyRegistration registration = TestRelyingPartyRegistrations.full().build();
given(this.relyingPartyRegistrationResolver.resolve(request, "registration-id")).willReturn(registration);
Saml2LogoutRequest logoutRequest = Saml2LogoutRequest.withRelyingPartyRegistration(registration)
.samlRequest("request")
.build();
given(this.logoutRequestRepository.removeLogoutRequest(request, response)).willReturn(logoutRequest);
given(this.logoutResponseValidator.validate(any()))
.willReturn(Saml2LogoutValidatorResult.withErrors(new Saml2Error("error", "description")).build());
this.logoutResponseProcessingFilter.doFilterInternal(request, response, new MockFilterChain());
verify(this.logoutResponseValidator).validate(any());
verifyNoInteractions(this.logoutSuccessHandler);
}
@Test
public void doFilterWhenNoRelyingPartyLogoutThen401() throws Exception {
Authentication authentication = new TestingAuthenticationToken("user", "password");
SecurityContextHolder.getContext().setAuthentication(authentication);
MockHttpServletRequest request = post("/logout/saml2/slo").param(Saml2ParameterNames.SAML_RESPONSE, "response")
.build();
MockHttpServletResponse response = new MockHttpServletResponse();
RelyingPartyRegistration registration = TestRelyingPartyRegistrations.full()
.singleLogoutServiceLocation(null)
.singleLogoutServiceResponseLocation(null)
.build();
given(this.relyingPartyRegistrationResolver.resolve(any(), any())).willReturn(registration);
Saml2LogoutRequest logoutRequest = Saml2LogoutRequest.withRelyingPartyRegistration(registration)
.samlRequest("request")
.build();
given(this.logoutRequestRepository.removeLogoutRequest(request, response)).willReturn(logoutRequest);
this.logoutResponseProcessingFilter.doFilterInternal(request, response, new MockFilterChain());
assertThat(response.getStatus()).isEqualTo(401);
verifyNoInteractions(this.logoutSuccessHandler);
}
}
| Saml2LogoutResponseFilterTests |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/sender/RequestExecutorService.java | {
"start": 20896,
"end": 29853
} | class ____ {
private static final TimeValue NO_TASKS_AVAILABLE = TimeValue.MAX_VALUE;
private static final TimeValue EXECUTED_A_TASK = TimeValue.ZERO;
private static final Logger logger = LogManager.getLogger(RateLimitingEndpointHandler.class);
private static final int ACCUMULATED_TOKENS_LIMIT = 1;
private final AdjustableCapacityBlockingQueue<RejectableTask> queue;
private final Supplier<Boolean> isShutdownMethod;
private final RequestSender requestSender;
private final String rateLimitGroupingId;
private final AtomicReference<Instant> timeOfLastEnqueue = new AtomicReference<>();
private final Clock clock;
private final RateLimiter rateLimiter;
private final RequestExecutorServiceSettings requestExecutorServiceSettings;
private final RateLimitSettings rateLimitSettings;
private final Long originalRequestsPerTimeUnit;
RateLimitingEndpointHandler(
String rateLimitGroupingId,
AdjustableCapacityBlockingQueue.QueueCreator<RejectableTask> createQueue,
RequestExecutorServiceSettings settings,
RequestSender requestSender,
Clock clock,
RateLimitSettings rateLimitSettings,
Supplier<Boolean> isShutdownMethod,
RateLimiterCreator rateLimiterCreator,
Integer rateLimitDivisor
) {
this.requestExecutorServiceSettings = Objects.requireNonNull(settings);
this.rateLimitGroupingId = Objects.requireNonNull(rateLimitGroupingId);
this.queue = new AdjustableCapacityBlockingQueue<>(createQueue, settings.getQueueCapacity());
this.requestSender = Objects.requireNonNull(requestSender);
this.clock = Objects.requireNonNull(clock);
this.isShutdownMethod = Objects.requireNonNull(isShutdownMethod);
this.rateLimitSettings = Objects.requireNonNull(rateLimitSettings);
this.originalRequestsPerTimeUnit = rateLimitSettings.requestsPerTimeUnit();
Objects.requireNonNull(rateLimitSettings);
Objects.requireNonNull(rateLimiterCreator);
rateLimiter = rateLimiterCreator.create(
ACCUMULATED_TOKENS_LIMIT,
rateLimitSettings.requestsPerTimeUnit(),
rateLimitSettings.timeUnit()
);
}
public void init() {
requestExecutorServiceSettings.registerQueueCapacityCallback(rateLimitGroupingId, this::onCapacityChange);
}
private void onCapacityChange(int capacity) {
logger.debug(
() -> Strings.format("Executor service grouping [%s] setting queue capacity to [%s]", rateLimitGroupingId, capacity)
);
try {
queue.setCapacity(capacity);
} catch (Exception e) {
logger.warn(
format(
"Executor service grouping [%s] failed to set the capacity of the task queue to [%s]",
rateLimitGroupingId,
capacity
),
e
);
}
}
public int queueSize() {
return queue.size();
}
public boolean isShutdown() {
return isShutdownMethod.get();
}
public Instant timeOfLastEnqueue() {
return timeOfLastEnqueue.get();
}
public synchronized TimeValue executeEnqueuedTask() {
try {
return executeEnqueuedTaskInternal();
} catch (Exception e) {
logger.warn(format("Executor service grouping [%s] failed to execute request", rateLimitGroupingId), e);
// we tried to do some work but failed, so we'll say we did something to try looking for more work
return EXECUTED_A_TASK;
}
}
private TimeValue executeEnqueuedTaskInternal() {
if (rateLimitSettings.isEnabled()) {
var timeBeforeAvailableToken = rateLimiter.timeToReserve(1);
if (shouldExecuteImmediately(timeBeforeAvailableToken) == false) {
return timeBeforeAvailableToken;
}
}
var task = queue.poll();
// TODO Batching - in a situation where no new tasks are queued we'll want to execute any prepared tasks
// So we'll need to check for null and call a helper method executePreparedTasks()
if (shouldExecuteTask(task) == false) {
return NO_TASKS_AVAILABLE;
}
if (rateLimitSettings.isEnabled()) {
// We should never have to wait because we checked above
var reserveRes = rateLimiter.reserve(1);
assert shouldExecuteImmediately(reserveRes) : "Reserving request tokens required a sleep when it should not have";
}
task.getRequestManager()
.execute(task.getInferenceInputs(), requestSender, task.getRequestCompletedFunction(), task.getListener());
return EXECUTED_A_TASK;
}
private static boolean shouldExecuteTask(RejectableTask task) {
return task != null && isNoopRequest(task) == false && task.hasCompleted() == false;
}
private static boolean isNoopRequest(InferenceRequest inferenceRequest) {
return inferenceRequest.getRequestManager() == null
|| inferenceRequest.getInferenceInputs() == null
|| inferenceRequest.getListener() == null;
}
private static boolean shouldExecuteImmediately(TimeValue delay) {
return delay.duration() == 0;
}
public void enqueue(RequestTask task) {
timeOfLastEnqueue.set(Instant.now(clock));
if (isShutdown()) {
EsRejectedExecutionException rejected = new EsRejectedExecutionException(
format(
"Failed to enqueue task for inference id [%s] because the request service [%s] has already shutdown",
task.getRequestManager().inferenceEntityId(),
rateLimitGroupingId
),
true
);
task.onRejection(rejected);
return;
}
var addedToQueue = queue.offer(task);
if (addedToQueue == false) {
EsRejectedExecutionException rejected = new EsRejectedExecutionException(
format(
"Failed to execute task for inference id [%s] because the request service [%s] queue is full",
task.getRequestManager().inferenceEntityId(),
rateLimitGroupingId
),
false
);
task.onRejection(rejected);
} else if (isShutdown()) {
notifyRequestsOfShutdown();
}
}
public synchronized void notifyRequestsOfShutdown() {
assert isShutdown() : "Requests should only be notified if the executor is shutting down";
try {
List<RejectableTask> notExecuted = new ArrayList<>();
queue.drainTo(notExecuted);
rejectTasks(notExecuted);
} catch (Exception e) {
logger.warn(format("Failed to notify tasks of executor service grouping [%s] shutdown", rateLimitGroupingId));
}
}
private void rejectTasks(List<RejectableTask> tasks) {
for (var task : tasks) {
var inferenceEntityId = task.getRequestManager().inferenceEntityId();
rejectRequest(
task,
format(
"Failed to send request, request service [%s] for inference id [%s] has shutdown prior to executing request",
rateLimitGroupingId,
inferenceEntityId
),
format(
"Failed to notify request for inference id [%s] of rejection after executor service grouping [%s] shutdown",
inferenceEntityId,
rateLimitGroupingId
)
);
}
}
public int remainingCapacity() {
return queue.remainingCapacity();
}
public void close() {
requestExecutorServiceSettings.deregisterQueueCapacityCallback(rateLimitGroupingId);
}
}
private static final RejectableTask NOOP_TASK = new RejectableTask() {
@Override
public void onRejection(Exception e) {
throw new UnsupportedOperationException("NoopTask is a pure marker | RateLimitingEndpointHandler |
java | elastic__elasticsearch | modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java | {
"start": 1409,
"end": 2136
} | class ____ extends AbstractInstrument.Builder<ObservableLongCounter> {
private final Supplier<Collection<LongWithAttributes>> observer;
private Builder(String name, String description, String unit, Supplier<Collection<LongWithAttributes>> observer) {
super(name, description, unit);
this.observer = Objects.requireNonNull(observer);
}
@Override
public ObservableLongCounter build(Meter meter) {
return Objects.requireNonNull(meter)
.counterBuilder(name)
.setDescription(description)
.setUnit(unit)
.buildWithCallback(OtelHelper.longMeasurementCallback(observer));
}
}
}
| Builder |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/Headers.java | {
"start": 657,
"end": 750
} | class ____ {
final com.sun.net.httpserver.Headers headers;
/**
* Creates a | Headers |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/BufferReaderWriterUtil.java | {
"start": 1802,
"end": 1925
} | class ____ write to a file
* and read from the byte buffer that results from mapping this file to memory.
*/
public final | can |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/metrics/DefaultCommandLatencyCollector.java | {
"start": 9951,
"end": 10689
} | class ____ {
private final LatencyStats firstResponse;
private final LatencyStats completion;
Latencies(PauseDetector pauseDetector) {
firstResponse = LatencyStats.Builder.create().pauseDetector(pauseDetector).build();
completion = LatencyStats.Builder.create().pauseDetector(pauseDetector).build();
}
public Histogram getFirstResponseHistogram() {
return firstResponse.getIntervalHistogram();
}
public Histogram getCompletionHistogram() {
return completion.getIntervalHistogram();
}
public void stop() {
firstResponse.stop();
completion.stop();
}
}
private static | Latencies |
java | spring-projects__spring-security | web/src/main/java/org/springframework/security/web/util/RedirectUrlBuilder.java | {
"start": 774,
"end": 931
} | class ____ building redirect URLs.
*
* Could probably make more use of the classes in java.net for this.
*
* @author Luke Taylor
* @since 2.0
*/
public | for |
java | spring-projects__spring-boot | module/spring-boot-batch-jdbc/src/test/java/org/springframework/boot/batch/jdbc/autoconfigure/BatchJdbcAutoConfigurationTests.java | {
"start": 31445,
"end": 31815
} | class ____ {
@Bean
@Order(1)
BatchConversionServiceCustomizer batchConversionServiceCustomizer() {
return mock(BatchConversionServiceCustomizer.class);
}
@Bean
@Order(2)
BatchConversionServiceCustomizer anotherBatchConversionServiceCustomizer() {
return mock(BatchConversionServiceCustomizer.class);
}
}
}
| ConversionServiceCustomizersConfiguration |
java | google__guava | android/guava-tests/test/com/google/common/net/InternetDomainNameTest.java | {
"start": 1403,
"end": 18011
} | class ____ extends TestCase {
private static final InternetDomainName UNICODE_EXAMPLE =
InternetDomainName.from("j\u00f8rpeland.no");
private static final InternetDomainName PUNYCODE_EXAMPLE =
InternetDomainName.from("xn--jrpeland-54a.no");
/** The Greek letter delta, used in unicode testing. */
private static final String DELTA = "\u0394";
/** A domain part which is valid under lenient validation, but invalid under strict validation. */
@SuppressWarnings("InlineMeInliner") // String.repeat unavailable under Java 8
static final String LOTS_OF_DELTAS = Strings.repeat(DELTA, 62);
@SuppressWarnings("InlineMeInliner") // String.repeat unavailable under Java 8
private static final String ALMOST_TOO_MANY_LEVELS = Strings.repeat("a.", 127);
@SuppressWarnings("InlineMeInliner") // String.repeat unavailable under Java 8
private static final String ALMOST_TOO_LONG = Strings.repeat("aaaaa.", 40) + "1234567890.c";
private static final ImmutableSet<String> VALID_NAME =
ImmutableSet.of(
// keep-sorted start
"123.cn",
"8server.shop",
"a" + DELTA + "b.com",
"abc.a23",
"biz.com.ua",
"f--1.com",
"f--o",
"f-_-o.cOM",
"f11-1.com",
"fOo",
"f_a",
"foo.com",
"foo.net.us\uFF61ocm",
"woo.com.",
"www",
"x",
ALMOST_TOO_LONG,
ALMOST_TOO_MANY_LEVELS
// keep-sorted end
);
private static final ImmutableSet<String> INVALID_NAME =
ImmutableSet.of(
// keep-sorted start
" ",
"",
".",
"..",
"...",
"..bar.com",
"..quiffle.com",
".foo.com",
"127.0.0.1",
"13",
"::1",
"_bar.quux",
"a" + DELTA + " .com",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.com",
"abc.12c",
"baz..com",
"fleeb.com..",
"foo!bar.com",
"foo+bar.com",
"foo-.com",
ALMOST_TOO_LONG + ".c",
ALMOST_TOO_MANY_LEVELS + "com"
// keep-sorted end
);
private static final ImmutableSet<String> RS =
ImmutableSet.of(
// keep-sorted start
"\u7f51\u7edc.Cn", // "网络.Cn"
"co.uk",
"co.uk.", // Trailing dot
"co\uFF61uk", // Alternate dot character
"com",
"gov.ck",
"org.ck",
"org.mK",
"us",
// keep-sorted end
"j\u00f8rpeland.no", // "jorpeland.no" (first o slashed)
"xn--jrpeland-54a.no" // IDNA (punycode) encoding of above
);
private static final ImmutableSet<String> PS_NOT_RS = ImmutableSet.of("blogspot.com", "uk.com");
private static final ImmutableSet<String> PS =
ImmutableSet.<String>builder().addAll(RS).addAll(PS_NOT_RS).build();
private static final ImmutableSet<String> NO_PS =
ImmutableSet.of("www", "foo.ihopethiswillneverbeapublicsuffix", "x.y.z");
/**
* Having a public suffix is equivalent to having a registry suffix, because all registry suffixes
* are public suffixes, and all public suffixes have registry suffixes.
*/
private static final ImmutableSet<String> NO_RS = NO_PS;
private static final ImmutableSet<String> NON_PS =
ImmutableSet.of(
// keep-sorted start
"dominio.com.co",
"foo.bar.ca",
"foo.bar.co.il",
"foo.bar.com",
"foo.blogspot.co.uk",
"foo.blogspot.com",
"foo.ca",
"foo.eDu.au",
"foo.uk.com",
"home.netscape.com",
"pvt.k12.ca.us",
"state.CA.us",
"utenti.blah.IT",
"web.MIT.edu",
"www.google.com",
"www.state.pa.us",
"www4.yahoo.co.uk"
// keep-sorted end
);
private static final ImmutableSet<String> NON_RS =
ImmutableSet.<String>builder().addAll(NON_PS).addAll(PS_NOT_RS).build();
private static final ImmutableSet<String> TOP_UNDER_REGISTRY_SUFFIX =
ImmutableSet.of("google.com", "foo.Co.uk", "foo.ca.us.");
private static final ImmutableSet<String> TOP_PRIVATE_DOMAIN =
ImmutableSet.of("google.com", "foo.Co.uk", "foo.ca.us.", "foo.blogspot.com");
private static final ImmutableSet<String> UNDER_TOP_UNDER_REGISTRY_SUFFIX =
ImmutableSet.of("foo.bar.google.com", "a.b.co.uk", "x.y.ca.us");
private static final ImmutableSet<String> UNDER_PRIVATE_DOMAIN =
ImmutableSet.of("foo.bar.google.com", "a.b.co.uk", "x.y.ca.us", "a.b.blogspot.com");
private static final ImmutableSet<String> VALID_IP_ADDRS =
ImmutableSet.of("1.2.3.4", "127.0.0.1", "::1", "2001:db8::1");
private static final ImmutableSet<String> INVALID_IP_ADDRS =
ImmutableSet.of(
"", "1", "1.2.3", "...", "1.2.3.4.5", "400.500.600.700", ":", ":::1", "2001:db8:");
private static final ImmutableSet<String> SOMEWHERE_UNDER_PS =
ImmutableSet.of(
// keep-sorted start
"1.fm",
"a.b.c.1.2.3.ca.us",
"a\u7f51\u7edcA.\u7f51\u7edc.Cn", // "a网络A.网络.Cn"
"cnn.ca",
"cool.co.uk",
"cool.de",
"cool.dk",
"cool.es",
"cool.nl",
"cool.se",
"cool\uFF61fr", // Alternate dot character
"foo.bar.google.com",
"google.Co.uK",
"google.com",
"home.netscape.com",
"it-trace.ch",
"jobs.kt.com.",
"jprs.co.jp",
"kt.co",
"ledger-enquirer.com",
"members.blah.nl.",
"pvt.k12.ca.us",
"site.ac.jp",
"site.ad.jp",
"site.cc",
"site.ed.jp",
"site.ee",
"site.fi",
"site.fm",
"site.geo.jp",
"site.go.jp",
"site.gr",
"site.gr.jp",
"site.jp",
"site.lg.jp",
"site.ma",
"site.mk",
"site.ne.jp",
"site.or.jp",
"site.quick.jp",
"site.tenki.jp",
"site.tv",
"site.us",
"some.org.mk",
"stanford.edu",
"state.ca.us",
"uomi-online.kir.jp",
"utenti.blah.it",
"web.stanford.edu",
"www.GOOGLE.com",
"www.com",
"www.leguide.ma",
"www.odev.us",
"www.rave.ca.",
"www.state.ca.us",
"www7.google.co.uk"
// keep-sorted end
);
private static final ImmutableSet<String> SOMEWHERE_UNDER_RS =
ImmutableSet.<String>builder().addAll(SOMEWHERE_UNDER_PS).addAll(PS_NOT_RS).build();
public void testValid() {
for (String name : VALID_NAME) {
InternetDomainName unused = InternetDomainName.from(name);
}
}
public void testInvalid() {
for (String name : INVALID_NAME) {
assertThrows(IllegalArgumentException.class, () -> InternetDomainName.from(name));
}
}
public void testPublicSuffix() {
for (String name : PS) {
InternetDomainName domain = InternetDomainName.from(name);
assertTrue(name, domain.isPublicSuffix());
assertTrue(name, domain.hasPublicSuffix());
assertFalse(name, domain.isUnderPublicSuffix());
assertFalse(name, domain.isTopPrivateDomain());
assertEquals(domain, domain.publicSuffix());
}
for (String name : NO_PS) {
InternetDomainName domain = InternetDomainName.from(name);
assertFalse(name, domain.isPublicSuffix());
assertFalse(name, domain.hasPublicSuffix());
assertFalse(name, domain.isUnderPublicSuffix());
assertFalse(name, domain.isTopPrivateDomain());
assertThat(domain.publicSuffix()).isNull();
}
for (String name : NON_PS) {
InternetDomainName domain = InternetDomainName.from(name);
assertFalse(name, domain.isPublicSuffix());
assertTrue(name, domain.hasPublicSuffix());
assertTrue(name, domain.isUnderPublicSuffix());
}
}
public void testUnderPublicSuffix() {
for (String name : SOMEWHERE_UNDER_PS) {
InternetDomainName domain = InternetDomainName.from(name);
assertFalse(name, domain.isPublicSuffix());
assertTrue(name, domain.hasPublicSuffix());
assertTrue(name, domain.isUnderPublicSuffix());
}
}
public void testTopPrivateDomain() {
for (String name : TOP_PRIVATE_DOMAIN) {
InternetDomainName domain = InternetDomainName.from(name);
assertFalse(name, domain.isPublicSuffix());
assertTrue(name, domain.hasPublicSuffix());
assertTrue(name, domain.isUnderPublicSuffix());
assertTrue(name, domain.isTopPrivateDomain());
assertEquals(domain.parent(), domain.publicSuffix());
}
}
public void testUnderPrivateDomain() {
for (String name : UNDER_PRIVATE_DOMAIN) {
InternetDomainName domain = InternetDomainName.from(name);
assertFalse(name, domain.isPublicSuffix());
assertTrue(name, domain.hasPublicSuffix());
assertTrue(name, domain.isUnderPublicSuffix());
assertFalse(name, domain.isTopPrivateDomain());
}
}
public void testRegistrySuffix() {
for (String name : RS) {
InternetDomainName domain = InternetDomainName.from(name);
assertTrue(name, domain.isRegistrySuffix());
assertTrue(name, domain.hasRegistrySuffix());
assertFalse(name, domain.isUnderRegistrySuffix());
assertFalse(name, domain.isTopDomainUnderRegistrySuffix());
assertEquals(domain, domain.registrySuffix());
}
for (String name : NO_RS) {
InternetDomainName domain = InternetDomainName.from(name);
assertFalse(name, domain.isRegistrySuffix());
assertFalse(name, domain.hasRegistrySuffix());
assertFalse(name, domain.isUnderRegistrySuffix());
assertFalse(name, domain.isTopDomainUnderRegistrySuffix());
assertThat(domain.registrySuffix()).isNull();
}
for (String name : NON_RS) {
InternetDomainName domain = InternetDomainName.from(name);
assertFalse(name, domain.isRegistrySuffix());
assertTrue(name, domain.hasRegistrySuffix());
assertTrue(name, domain.isUnderRegistrySuffix());
}
}
public void testUnderRegistrySuffix() {
for (String name : SOMEWHERE_UNDER_RS) {
InternetDomainName domain = InternetDomainName.from(name);
assertFalse(name, domain.isRegistrySuffix());
assertTrue(name, domain.hasRegistrySuffix());
assertTrue(name, domain.isUnderRegistrySuffix());
}
}
public void testTopDomainUnderRegistrySuffix() {
for (String name : TOP_UNDER_REGISTRY_SUFFIX) {
InternetDomainName domain = InternetDomainName.from(name);
assertFalse(name, domain.isRegistrySuffix());
assertTrue(name, domain.hasRegistrySuffix());
assertTrue(name, domain.isUnderRegistrySuffix());
assertTrue(name, domain.isTopDomainUnderRegistrySuffix());
assertEquals(domain.parent(), domain.registrySuffix());
}
}
public void testUnderTopDomainUnderRegistrySuffix() {
for (String name : UNDER_TOP_UNDER_REGISTRY_SUFFIX) {
InternetDomainName domain = InternetDomainName.from(name);
assertFalse(name, domain.isRegistrySuffix());
assertTrue(name, domain.hasRegistrySuffix());
assertTrue(name, domain.isUnderRegistrySuffix());
assertFalse(name, domain.isTopDomainUnderRegistrySuffix());
}
}
public void testParent() {
assertEquals("com", InternetDomainName.from("google.com").parent().toString());
assertEquals("uk", InternetDomainName.from("co.uk").parent().toString());
assertEquals("google.com", InternetDomainName.from("www.google.com").parent().toString());
assertThrows(IllegalStateException.class, () -> InternetDomainName.from("com").parent());
}
public void testChild() {
InternetDomainName domain = InternetDomainName.from("foo.com");
assertEquals("www.foo.com", domain.child("www").toString());
assertThrows(IllegalArgumentException.class, () -> domain.child("www."));
}
public void testParentChild() {
InternetDomainName origin = InternetDomainName.from("foo.com");
InternetDomainName parent = origin.parent();
assertEquals("com", parent.toString());
// These would throw an exception if leniency were not preserved during parent() and child()
// calls.
InternetDomainName child = parent.child(LOTS_OF_DELTAS);
InternetDomainName unused = child.child(LOTS_OF_DELTAS);
}
public void testValidTopPrivateDomain() {
InternetDomainName googleDomain = InternetDomainName.from("google.com");
assertEquals(googleDomain, googleDomain.topPrivateDomain());
assertEquals(googleDomain, googleDomain.child("mail").topPrivateDomain());
assertEquals(googleDomain, googleDomain.child("foo.bar").topPrivateDomain());
}
public void testInvalidTopPrivateDomain() {
ImmutableSet<String> badCookieDomains = ImmutableSet.of("co.uk", "foo", "com");
for (String domain : badCookieDomains) {
assertThrows(
IllegalStateException.class, () -> InternetDomainName.from(domain).topPrivateDomain());
}
}
public void testIsValid() {
Iterable<String> validCases = Iterables.concat(VALID_NAME, PS, NO_PS, NON_PS);
Iterable<String> invalidCases =
Iterables.concat(INVALID_NAME, VALID_IP_ADDRS, INVALID_IP_ADDRS);
for (String valid : validCases) {
assertTrue(valid, InternetDomainName.isValid(valid));
}
for (String invalid : invalidCases) {
assertFalse(invalid, InternetDomainName.isValid(invalid));
}
}
public void testToString() {
for (String inputName : SOMEWHERE_UNDER_PS) {
InternetDomainName domain = InternetDomainName.from(inputName);
/*
* We would ordinarily use constants for the expected results, but
* doing it by derivation allows us to reuse the test case definitions
* used in other tests.
*/
String expectedName = Ascii.toLowerCase(inputName);
expectedName = expectedName.replaceAll("[\u3002\uFF0E\uFF61]", ".");
if (expectedName.endsWith(".")) {
expectedName = expectedName.substring(0, expectedName.length() - 1);
}
assertEquals(expectedName, domain.toString());
}
}
public void testPublicSuffixExclusion() {
InternetDomainName domain = InternetDomainName.from("foo.city.yokohama.jp");
assertTrue(domain.hasPublicSuffix());
assertEquals("yokohama.jp", domain.publicSuffix().toString());
// Behold the weirdness!
assertFalse(domain.publicSuffix().isPublicSuffix());
}
public void testPublicSuffixMultipleUnders() {
// PSL has both *.uk and *.sch.uk; the latter should win.
// See https://github.com/google/guava/issues/1176
InternetDomainName domain = InternetDomainName.from("www.essex.sch.uk");
assertTrue(domain.hasPublicSuffix());
assertEquals("essex.sch.uk", domain.publicSuffix().toString());
assertEquals("www.essex.sch.uk", domain.topPrivateDomain().toString());
}
public void testRegistrySuffixExclusion() {
InternetDomainName domain = InternetDomainName.from("foo.city.yokohama.jp");
assertTrue(domain.hasRegistrySuffix());
assertEquals("yokohama.jp", domain.registrySuffix().toString());
// Behold the weirdness!
assertFalse(domain.registrySuffix().isRegistrySuffix());
}
public void testRegistrySuffixMultipleUnders() {
// PSL has both *.uk and *.sch.uk; the latter should win.
// See https://github.com/google/guava/issues/1176
InternetDomainName domain = InternetDomainName.from("www.essex.sch.uk");
assertTrue(domain.hasRegistrySuffix());
assertEquals("essex.sch.uk", domain.registrySuffix().toString());
assertEquals("www.essex.sch.uk", domain.topDomainUnderRegistrySuffix().toString());
}
public void testEquality() {
new EqualsTester()
.addEqualityGroup(idn("google.com"), idn("google.com"), idn("GOOGLE.COM"))
.addEqualityGroup(idn("www.google.com"))
.addEqualityGroup(UNICODE_EXAMPLE)
.addEqualityGroup(PUNYCODE_EXAMPLE)
.testEquals();
}
private static InternetDomainName idn(String domain) {
return InternetDomainName.from(domain);
}
@J2ktIncompatible
@GwtIncompatible // NullPointerTester
public void testNulls() {
NullPointerTester tester = new NullPointerTester();
tester.testAllPublicStaticMethods(InternetDomainName.class);
tester.testAllPublicInstanceMethods(InternetDomainName.from("google.com"));
}
}
| InternetDomainNameTest |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/filter/ProblemHandlerUnknownTypeId2221Test.java | {
"start": 1036,
"end": 1329
} | class ____ {
private Collection innerObjects;
public Collection getInnerObjects() {
return innerObjects;
}
public void setInnerObjects(Collection innerObjects) {
this.innerObjects = innerObjects;
}
}
static | GenericContent |
java | apache__kafka | metadata/src/test/java/org/apache/kafka/metadata/authorizer/StandardAuthorizerPropertyTest.java | {
"start": 2455,
"end": 2693
} | class ____ {
@Target({ ElementType.ANNOTATION_TYPE, ElementType.PARAMETER, ElementType.TYPE_USE })
@Retention(RetentionPolicy.RUNTIME)
@AlphaChars @NumericChars @Chars({ '_', '-', '.' })
public @ | StandardAuthorizerPropertyTest |
java | spring-projects__spring-boot | integration-test/spring-boot-actuator-integration-tests/src/test/java/org/springframework/boot/actuate/management/ThreadDumpEndpointWebIntegrationTests.java | {
"start": 1174,
"end": 2039
} | class ____ {
@WebEndpointTest
void getRequestWithJsonAcceptHeaderShouldProduceJsonThreadDumpResponse(WebTestClient client) {
client.get()
.uri("/actuator/threaddump")
.accept(MediaType.APPLICATION_JSON)
.exchange()
.expectStatus()
.isOk()
.expectHeader()
.contentType(MediaType.APPLICATION_JSON);
}
@WebEndpointTest
void getRequestWithTextPlainAcceptHeaderShouldProduceTextPlainResponse(WebTestClient client) {
String response = client.get()
.uri("/actuator/threaddump")
.accept(MediaType.TEXT_PLAIN)
.exchange()
.expectStatus()
.isOk()
.expectHeader()
.contentType("text/plain;charset=UTF-8")
.expectBody(String.class)
.returnResult()
.getResponseBody();
assertThat(response).contains("Full thread dump");
}
@Configuration(proxyBeanMethods = false)
public static | ThreadDumpEndpointWebIntegrationTests |
java | apache__spark | common/variant/src/main/java/org/apache/spark/types/variant/VariantUtil.java | {
"start": 21902,
"end": 23818
} | interface ____<T> {
/**
* @param size Number of object fields.
* @param idSize The integer size of the field id list.
* @param offsetSize The integer size of the offset list.
* @param idStart The starting index of the field id list in the variant value array.
* @param offsetStart The starting index of the offset list in the variant value array.
* @param dataStart The starting index of field data in the variant value array.
*/
T apply(int size, int idSize, int offsetSize, int idStart, int offsetStart, int dataStart);
}
// A helper function to access a variant object. It provides `handler` with its required
// parameters and returns what it returns.
public static <T> T handleObject(byte[] value, int pos, ObjectHandler<T> handler) {
checkIndex(pos, value.length);
int basicType = value[pos] & BASIC_TYPE_MASK;
int typeInfo = (value[pos] >> BASIC_TYPE_BITS) & TYPE_INFO_MASK;
if (basicType != OBJECT) throw unexpectedType(Type.OBJECT);
// Refer to the comment of the `OBJECT` constant for the details of the object header encoding.
// Suppose `typeInfo` has a bit representation of 0_b4_b3b2_b1b0, the following line extracts
// b4 to determine whether the object uses a 1/4-byte size.
boolean largeSize = ((typeInfo >> 4) & 0x1) != 0;
int sizeBytes = (largeSize ? U32_SIZE : 1);
int size = readUnsigned(value, pos + 1, sizeBytes);
// Extracts b3b2 to determine the integer size of the field id list.
int idSize = ((typeInfo >> 2) & 0x3) + 1;
// Extracts b1b0 to determine the integer size of the offset list.
int offsetSize = (typeInfo & 0x3) + 1;
int idStart = pos + 1 + sizeBytes;
int offsetStart = idStart + size * idSize;
int dataStart = offsetStart + (size + 1) * offsetSize;
return handler.apply(size, idSize, offsetSize, idStart, offsetStart, dataStart);
}
public | ObjectHandler |
java | greenrobot__EventBus | EventBus/src/org/greenrobot/eventbus/meta/SimpleSubscriberInfo.java | {
"start": 873,
"end": 1698
} | class ____ extends AbstractSubscriberInfo {
private final SubscriberMethodInfo[] methodInfos;
public SimpleSubscriberInfo(Class subscriberClass, boolean shouldCheckSuperclass, SubscriberMethodInfo[] methodInfos) {
super(subscriberClass, null, shouldCheckSuperclass);
this.methodInfos = methodInfos;
}
@Override
public synchronized SubscriberMethod[] getSubscriberMethods() {
int length = methodInfos.length;
SubscriberMethod[] methods = new SubscriberMethod[length];
for (int i = 0; i < length; i++) {
SubscriberMethodInfo info = methodInfos[i];
methods[i] = createSubscriberMethod(info.methodName, info.eventType, info.threadMode,
info.priority, info.sticky);
}
return methods;
}
} | SimpleSubscriberInfo |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.