language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | quarkusio__quarkus | core/builder/src/main/java/io/quarkus/builder/ConsumeFlags.java | {
"start": 187,
"end": 1457
} | class ____ extends Flags<ConsumeFlag, ConsumeFlags> {
@Override
protected ConsumeFlags value(final int bits) {
return values[bits & enumValues.length - 1];
}
@Override
protected ConsumeFlags this_() {
return this;
}
@Override
protected ConsumeFlag itemOf(final int index) {
return enumValues[index];
}
@Override
protected ConsumeFlag castItemOrNull(final Object obj) {
return obj instanceof ConsumeFlag ? (ConsumeFlag) obj : null;
}
@Override
protected ConsumeFlags castThis(final Object obj) {
return (ConsumeFlags) obj;
}
private ConsumeFlags(int val) {
super(val);
}
private static final ConsumeFlag[] enumValues = ConsumeFlag.values();
private static final ConsumeFlags[] values;
static {
final ConsumeFlags[] flags = new ConsumeFlags[1 << ConsumeFlag.values().length];
for (int i = 0; i < flags.length; i++) {
flags[i] = new ConsumeFlags(i);
}
values = flags;
}
public static ConsumeFlags of(ConsumeFlag flag) {
Assert.checkNotNullParam("flag", flag);
return values[1 << flag.ordinal()];
}
public static final ConsumeFlags NONE = values[0];
}
| ConsumeFlags |
java | apache__logging-log4j2 | log4j-api-test/src/test/java/org/apache/logging/log4j/util/StackLocatorUtilTest.java | {
"start": 3591,
"end": 4923
} | class ____ "PrivateSecurityManagerStackTraceUtil"
classes.removeFirst();
assertSame(PrivateSecurityManagerStackTraceUtil.class, classes.getFirst());
}
@Test
void testGetCallerClassViaName() {
final Class<?> expected = TestMethodTestDescriptor.class;
final Class<?> actual =
StackLocatorUtil.getCallerClass("org.junit.platform.engine.support.hierarchical.ThrowableCollector");
// if this test fails in the future, it's probably because of a JUnit upgrade; check the new stack trace and
// update this test accordingly
assertSame(expected, actual);
}
@Test
void testGetCallerClassViaAnchorClass() {
final Class<?> expected = TestMethodTestDescriptor.class;
final Class<?> actual = StackLocatorUtil.getCallerClass(ThrowableCollector.class);
// if this test fails in the future, it's probably because of a JUnit upgrade; check the new stack trace and
// update this test accordingly
assertSame(expected, actual);
}
@Test
void testLocateClass() {
final ClassLocator locator = new ClassLocator();
final Class<?> clazz = locator.locateClass();
assertNotNull(clazz, "Could note locate class");
assertEquals(this.getClass(), clazz, "Incorrect class");
}
}
| in |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/IntegerSerializerTest.java | {
"start": 471,
"end": 685
} | class ____ {
private Integer value;
public Integer getValue() {
return value;
}
public void setValue(Integer value) {
this.value = value;
}
}
}
| VO |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/query/NativeQueryResultTypeAutoDiscoveryTest.java | {
"start": 22673,
"end": 22839
} | class ____ extends TestedEntity<Time> {
public Time getTestedProperty() {
return testedProperty;
}
}
@Entity(name = "timestampEntity")
public static | TimeEntity |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java | {
"start": 18673,
"end": 25390
} | class ____
assertNotNull(xmlFilename, "XML file name is null");
assertNotNull(configurationMemberVariables, "Configuration member variables is null");
assertNotNull(configurationMemberVariables, "Configuration default variables is null");
Set<String> xmlPropertiesWithEmptyValue = new TreeSet<>();
Set<String> configPropertiesWithNoDefaultConfig = new TreeSet<>();
Map<String, String> xmlPropertiesMatchingConfigDefault = new HashMap<>();
// Ugly solution. Should have tuple-based solution.
Map<Map<String, String>, Map<String, String>> mismatchingXmlConfig = new HashMap<>();
for (Map.Entry<String, String> xEntry : xmlKeyValueMap.entrySet()) {
String xmlProperty = xEntry.getKey();
String xmlDefaultValue = xEntry.getValue();
String configProperty = configurationMemberVariables.get(xmlProperty);
if (configProperty != null) {
String defaultConfigName = null;
String defaultConfigValue = null;
// Type 1: Prepend DEFAULT_
String defaultNameCheck1 = "DEFAULT_" + configProperty;
String defaultValueCheck1 = configurationDefaultVariables
.get(defaultNameCheck1);
// Type 2: Swap _KEY suffix with _DEFAULT suffix
String defaultNameCheck2 = null;
if (configProperty.endsWith("_KEY")) {
defaultNameCheck2 = configProperty
.substring(0, configProperty.length() - 4) + "_DEFAULT";
}
String defaultValueCheck2 = configurationDefaultVariables
.get(defaultNameCheck2);
// Type Last: Append _DEFAULT suffix
String defaultNameCheck3 = configProperty + "_DEFAULT";
String defaultValueCheck3 = configurationDefaultVariables
.get(defaultNameCheck3);
// Pick the default value that exists
if (defaultValueCheck1 != null) {
defaultConfigName = defaultNameCheck1;
defaultConfigValue = defaultValueCheck1;
} else if (defaultValueCheck2 != null) {
defaultConfigName = defaultNameCheck2;
defaultConfigValue = defaultValueCheck2;
} else if (defaultValueCheck3 != null) {
defaultConfigName = defaultNameCheck3;
defaultConfigValue = defaultValueCheck3;
}
if (defaultConfigValue != null) {
if (xmlDefaultValue == null) {
xmlPropertiesWithEmptyValue.add(xmlProperty);
} else if (!xmlDefaultValue.equals(defaultConfigValue)) {
Map<String, String> xmlEntry = new HashMap<>();
xmlEntry.put(xmlProperty, xmlDefaultValue);
Map<String, String> configEntry = new HashMap<>();
configEntry.put(defaultConfigName, defaultConfigValue);
mismatchingXmlConfig.put(xmlEntry, configEntry);
} else {
xmlPropertiesMatchingConfigDefault
.put(xmlProperty, defaultConfigName);
}
} else {
configPropertiesWithNoDefaultConfig.add(configProperty);
}
}
}
// Print out any unknown mismatching XML value/Config default value
LOG.info("{} has {} properties that do not match the default Config value",
xmlFilename, mismatchingXmlConfig.size());
if (mismatchingXmlConfig.isEmpty()) {
LOG.info(" (None)");
} else {
for (Map.Entry<Map<String, String>, Map<String, String>> xcEntry :
mismatchingXmlConfig.entrySet()) {
xcEntry.getKey().forEach((key, value) -> {
LOG.info("XML Property: {}", key);
LOG.info("XML Value: {}", value);
});
xcEntry.getValue().forEach((key, value) -> {
LOG.info("Config Name: {}", key);
LOG.info("Config Value: {}", value);
});
LOG.info("");
}
}
LOG.info("\n");
// Print out Config properties that have no corresponding DEFAULT_*
// variable and cannot do any XML comparison (i.e. probably needs to
// be checked by hand)
LOG.info("Configuration(s) have {} " +
" properties with no corresponding default member variable. These" +
" will need to be verified manually.",
configPropertiesWithNoDefaultConfig.size());
if (configPropertiesWithNoDefaultConfig.isEmpty()) {
LOG.info(" (None)");
} else {
configPropertiesWithNoDefaultConfig.forEach(c -> LOG.info(" {}", c));
}
LOG.info("\n");
// MAYBE TODO Print out any known mismatching XML value/Config default
// Print out XML properties that have empty values (i.e. should result
// in code-based default)
LOG.info("{} has {} properties with empty values",
xmlFilename, xmlPropertiesWithEmptyValue.size());
if (xmlPropertiesWithEmptyValue.isEmpty()) {
LOG.info(" (None)");
} else {
xmlPropertiesWithEmptyValue.forEach(p -> LOG.info(" {}", p));
}
LOG.info("\n");
// Print out any matching XML value/Config default value
LOG.info("{} has {} properties which match a corresponding Config variable",
xmlFilename, xmlPropertiesMatchingConfigDefault.size());
if (xmlPropertiesMatchingConfigDefault.isEmpty()) {
LOG.info(" (None)");
} else {
xmlPropertiesMatchingConfigDefault.forEach(
(key, value) -> LOG.info(" {} / {}", key, value));
}
LOG.info("\n=====\n");
}
/**
* For each specified string, get the default parameter values whose names
* contain the string. Then check whether any of these default values collide.
* This is, for example, useful to make sure there is no collision of default
* ports across different services.
*/
@Test
public void testDefaultValueCollision() {
for (String filter : filtersForDefaultValueCollisionCheck) {
LOG.info("Checking if any of the default values whose name " +
"contains string \"{}\" collide.", filter);
// Map from filtered default value to name of the corresponding parameter.
Map<String, String> filteredValues = new HashMap<>();
int valuesChecked = 0;
for (Map.Entry<String, String> ent :
configurationDefaultVariables.entrySet()) {
// Apply the name filter to the default parameters.
if (ent.getKey().contains(filter)) {
// Check only for numerical values.
if (StringUtils.isNumeric(ent.getValue())) {
String crtValue =
filteredValues.putIfAbsent(ent.getValue(), ent.getKey());
assertNull(crtValue, "Parameters " + ent.getKey() + " and " + crtValue +
" are using the same default value!");
}
valuesChecked++;
}
}
LOG.info("Checked {} default values for collision.", valuesChecked);
}
}
}
| members |
java | netty__netty | example/src/main/java/io/netty/example/uptime/UptimeServer.java | {
"start": 1246,
"end": 2556
} | class ____ {
private static final int PORT = Integer.parseInt(System.getProperty("port", "8080"));
private static final UptimeServerHandler handler = new UptimeServerHandler();
private UptimeServer() {
}
public static void main(String[] args) throws Exception {
EventLoopGroup group = new MultiThreadIoEventLoopGroup(NioIoHandler.newFactory());
try {
ServerBootstrap b = new ServerBootstrap();
b.group(group)
.channel(NioServerSocketChannel.class)
.handler(new LoggingHandler(LogLevel.INFO))
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) {
ch.pipeline().addLast(handler);
}
});
// Bind and start to accept incoming connections.
ChannelFuture f = b.bind(PORT).sync();
// Wait until the server socket is closed.
// In this example, this does not happen, but you can do that to gracefully
// shut down your server.
f.channel().closeFuture().sync();
} finally {
group.shutdownGracefully();
}
}
}
| UptimeServer |
java | quarkusio__quarkus | integration-tests/test-extension/extension/runtime/src/main/java/io/quarkus/extest/runtime/classpath/RecordedClasspathEntries.java | {
"start": 2834,
"end": 4753
} | class ____ {
private static final String SEPARATOR = "\t";
static String serialize(Record record) {
StringBuilder builder = new StringBuilder();
builder.append(record.phase.name());
builder.append(SEPARATOR);
builder.append(record.resourceName);
for (String classpathEntry : record.classpathEntries) {
builder.append(SEPARATOR);
builder.append(classpathEntry);
}
return builder.toString();
}
static Record deserialize(String line) {
String[] elements = line.split(SEPARATOR);
Phase phase = Phase.valueOf(elements[0]);
String resourceName = elements[1];
List<String> classpathEntries = new ArrayList<>();
for (int i = 2; i < elements.length; i++) {
classpathEntries.add(elements[i]);
}
return new Record(phase, resourceName, classpathEntries);
}
private final Phase phase;
private final String resourceName;
private final List<String> classpathEntries;
public Record(Phase phase, String resourceName, List<String> classpathEntries) {
this.phase = phase;
this.resourceName = resourceName;
this.classpathEntries = classpathEntries;
}
@Override
public String toString() {
return "Record{" +
"phase=" + phase +
", resourceName='" + resourceName + '\'' +
", classpathEntries=" + classpathEntries +
'}';
}
public Phase getPhase() {
return phase;
}
public String getResourceName() {
return resourceName;
}
public List<String> getClasspathEntries() {
return classpathEntries;
}
}
}
| Record |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/metamodel/model/domain/internal/AnyMappingSqmPathSource.java | {
"start": 821,
"end": 2025
} | class ____<J> extends AbstractSqmPathSource<J> {
private final SqmPathSource<?> keyPathSource;
private final AnyDiscriminatorSqmPathSource<?> discriminatorPathSource;
public AnyMappingSqmPathSource(
String localPathName,
SqmPathSource<J> pathModel,
AnyMappingDomainType<J> domainType,
BindableType jpaBindableType) {
super( localPathName, pathModel, domainType, jpaBindableType );
keyPathSource = new BasicSqmPathSource<>(
AnyKeyPart.KEY_NAME,
null,
(BasicDomainType<?>) domainType.getKeyType(),
domainType.getKeyType().getExpressibleJavaType(),
SINGULAR_ATTRIBUTE,
false
);
discriminatorPathSource = new AnyDiscriminatorSqmPathSource<>(
localPathName,
null,
domainType.getDiscriminatorType(),
jpaBindableType
);
}
@Override
public @Nullable SqmPathSource<?> findSubPathSource(String name) {
return switch ( name ) {
case AnyKeyPart.KEY_NAME ->
// standard id() function
keyPathSource;
case AnyDiscriminatorPart.ROLE_NAME ->
// standard type() function
discriminatorPathSource;
case "id" ->
// deprecated HQL .id syntax
keyPathSource;
case "class" ->
// deprecated HQL . | AnyMappingSqmPathSource |
java | spring-projects__spring-framework | spring-webflux/src/test/java/org/springframework/web/reactive/result/method/annotation/JacksonStreamingIntegrationTests.java | {
"start": 3471,
"end": 3810
} | class ____ {
@GetMapping(value = "/stream",
produces = { APPLICATION_NDJSON_VALUE, "application/stream+x-jackson-smile" })
Flux<Person> person() {
return testInterval(Duration.ofMillis(1), 50).map(l -> new Person("foo " + l));
}
}
@Configuration
@EnableWebFlux
@SuppressWarnings("unused")
static | JacksonStreamingController |
java | apache__camel | dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java | {
"start": 643017,
"end": 645902
} | class ____ extends YamlDeserializerBase<NoErrorHandlerDefinition> {
public NoErrorHandlerDefinitionDeserializer() {
super(NoErrorHandlerDefinition.class);
}
@Override
protected NoErrorHandlerDefinition newInstance() {
return new NoErrorHandlerDefinition();
}
@Override
protected boolean setProperty(NoErrorHandlerDefinition target, String propertyKey,
String propertyName, Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "id": {
String val = asText(node);
target.setId(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
nodes = "oauth2",
types = org.apache.camel.model.rest.OAuth2Definition.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
displayName = "Oauth2",
description = "Rest security OAuth2 definition",
deprecated = false,
properties = {
@YamlProperty(name = "authorizationUrl", type = "string", description = "The authorization URL to be used for this flow. This SHOULD be in the form of a URL. Required for implicit and access code flows", displayName = "Authorization Url"),
@YamlProperty(name = "description", type = "string", description = "A short description for security scheme.", displayName = "Description"),
@YamlProperty(name = "flow", type = "enum:implicit,password,application,clientCredentials,accessCode,authorizationCode", description = "The flow used by the OAuth2 security scheme. Valid values are implicit, password, application or accessCode.", displayName = "Flow"),
@YamlProperty(name = "key", type = "string", required = true, description = "Key used to refer to this security definition", displayName = "Key"),
@YamlProperty(name = "refreshUrl", type = "string", description = "The URL to be used for obtaining refresh tokens. This MUST be in the form of a URL.", displayName = "Refresh Url"),
@YamlProperty(name = "scopes", type = "array:org.apache.camel.model.rest.RestPropertyDefinition", description = "The available scopes for an OAuth2 security scheme", displayName = "Scopes"),
@YamlProperty(name = "tokenUrl", type = "string", description = "The token URL to be used for this flow. This SHOULD be in the form of a URL. Required for password, application, and access code flows.", displayName = "Token Url")
}
)
public static | NoErrorHandlerDefinitionDeserializer |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/inject/qualifiers/FilteringCompositeQualifier.java | {
"start": 1016,
"end": 3435
} | class ____<T> extends FilteringQualifier<T> {
private final FilteringQualifier<T>[] qualifiers;
/**
* @param qualifiers The qualifiers
*/
FilteringCompositeQualifier(FilteringQualifier<T>[] qualifiers) {
this.qualifiers = qualifiers;
}
@Override
public boolean doesQualify(Class<T> beanType, BeanType<T> candidate) {
for (FilteringQualifier<T> qualifier : qualifiers) {
if (!qualifier.doesQualify(beanType, candidate)) {
return false;
}
}
return true;
}
@Override
public boolean doesQualify(Class<T> beanType, QualifiedBeanType<T> candidate) {
for (FilteringQualifier<T> qualifier : qualifiers) {
if (!qualifier.doesQualify(beanType, candidate)) {
return false;
}
}
return true;
}
public FilteringQualifier<T>[] getQualifiers() {
return qualifiers;
}
@Override
public boolean contains(Qualifier<T> qualifier) {
if (qualifier instanceof FilteringCompositeQualifier<T> filteringCompositeQualifier) {
for (Qualifier<T> q : filteringCompositeQualifier.qualifiers) {
if (!contains(q)) {
return false;
}
}
return true;
}
if (qualifier instanceof CompositeQualifier<T> compositeQualifier) {
for (Qualifier<T> q : compositeQualifier.getQualifiers()) {
if (!contains(q)) {
return false;
}
}
return true;
}
for (FilteringQualifier<T> q : qualifiers) {
if (q.contains(qualifier)) {
return true;
}
}
return false;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
FilteringCompositeQualifier<?> that = (FilteringCompositeQualifier<?>) o;
return Arrays.equals(qualifiers, that.qualifiers);
}
@Override
public int hashCode() {
return Arrays.hashCode(qualifiers);
}
@Override
public String toString() {
return Arrays.stream(qualifiers).map(Object::toString).collect(Collectors.joining(" and "));
}
}
| FilteringCompositeQualifier |
java | apache__maven | impl/maven-cli/src/main/java/org/apache/maven/cling/invoker/PlexusContainerCapsuleFactory.java | {
"start": 11345,
"end": 14642
} | class ____ '" + extRealm.getId() + "'");
for (Path file : extClassPath) {
logger.debug(" included '" + file + "'");
extRealm.addURL(file.toUri().toURL());
}
ArrayList<CoreExtensionEntry> reversed = new ArrayList<>(extensions);
Collections.reverse(reversed);
for (CoreExtensionEntry entry : reversed) {
Set<String> exportedPackages = entry.getExportedPackages();
ClassRealm realm = entry.getClassRealm();
for (String exportedPackage : exportedPackages) {
extRealm.importFrom(realm, exportedPackage);
}
if (exportedPackages.isEmpty()) {
// sisu uses realm imports to establish component visibility
extRealm.importFrom(realm, realm.getId());
}
}
return extRealm;
}
return coreRealm;
}
protected List<LoadedCoreExtension> loadCoreExtensions(
LookupInvoker<C> invoker,
C context,
ClassRealm containerRealm,
Set<String> providedArtifacts,
List<CoreExtension> extensions)
throws Exception {
if (extensions.isEmpty()) {
return List.of();
}
ContainerConfiguration cc = new DefaultContainerConfiguration()
.setClassWorld(containerRealm.getWorld())
.setRealm(containerRealm)
.setClassPathScanning(PlexusConstants.SCANNING_INDEX)
.setAutoWiring(true)
.setJSR250Lifecycle(true)
.setStrictClassPathScanning(false)
.setName("maven");
DefaultPlexusContainer container = new DefaultPlexusContainer(cc, new AbstractModule() {
@Override
protected void configure() {
bind(ILoggerFactory.class).toProvider(() -> context.loggerFactory);
}
});
ClassLoader oldCL = Thread.currentThread().getContextClassLoader();
Runnable settingsCleaner = null;
try {
container.setLookupRealm(null);
container.setLoggerManager(createLoggerManager());
container.getLoggerManager().setThresholds(toPlexusLoggingLevel(context.loggerLevel));
Thread.currentThread().setContextClassLoader(container.getContainerRealm());
settingsCleaner = invoker.settings(context, false, container.lookup(SettingsBuilder.class));
MavenExecutionRequest mer = new DefaultMavenExecutionRequest();
invoker.populateRequest(context, new DefaultLookup(container), mer);
mer = container.lookup(MavenExecutionRequestPopulator.class).populateDefaults(mer);
return Collections.unmodifiableList(container
.lookup(BootstrapCoreExtensionManager.class)
.loadCoreExtensions(mer, providedArtifacts, extensions));
} finally {
if (settingsCleaner != null) {
settingsCleaner.run();
}
try {
container.dispose();
} finally {
Thread.currentThread().setContextClassLoader(oldCL);
}
}
}
}
| realm |
java | spring-projects__spring-framework | spring-websocket/src/test/java/org/springframework/web/socket/ContextLoaderTestUtils.java | {
"start": 951,
"end": 2018
} | class ____ {
private static Map<ClassLoader, WebApplicationContext> currentContextPerThread =
getCurrentContextPerThreadFromContextLoader();
public static void setCurrentWebApplicationContext(WebApplicationContext applicationContext) {
setCurrentWebApplicationContext(Thread.currentThread().getContextClassLoader(), applicationContext);
}
public static void setCurrentWebApplicationContext(ClassLoader classLoader,
WebApplicationContext applicationContext) {
if (applicationContext != null) {
currentContextPerThread.put(classLoader, applicationContext);
}
else {
currentContextPerThread.remove(classLoader);
}
}
@SuppressWarnings("unchecked")
private static Map<ClassLoader, WebApplicationContext> getCurrentContextPerThreadFromContextLoader() {
try {
Field field = ContextLoader.class.getDeclaredField("currentContextPerThread");
field.setAccessible(true);
return (Map<ClassLoader, WebApplicationContext>) field.get(null);
}
catch (Exception ex) {
throw new IllegalStateException(ex);
}
}
}
| ContextLoaderTestUtils |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/search/vectors/DiversifyingChildrenIVFKnnFloatVectorQueryTests.java | {
"start": 708,
"end": 1244
} | class ____ extends AbstractDiversifyingChildrenIVFKnnVectorQueryTestCase {
@Override
Query getDiversifyingChildrenKnnQuery(String fieldName, float[] queryVector, Query childFilter, int k, BitSetProducer parentBitSet) {
return new DiversifyingChildrenIVFKnnFloatVectorQuery(fieldName, queryVector, k, k, childFilter, parentBitSet, 0);
}
@Override
Field getKnnVectorField(String name, float[] vector) {
return new KnnFloatVectorField(name, vector);
}
}
| DiversifyingChildrenIVFKnnFloatVectorQueryTests |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/ZoneIdOfZTest.java | {
"start": 2242,
"end": 2712
} | class ____ {
private static final String ZONE = "Z";
// BUG: Diagnostic contains: private static final ZoneId UTC = ZoneOffset.UTC;
private static final ZoneId UTC = ZoneId.of(ZONE);
}
""")
.doTest();
}
@Test
public void zoneIdOfNonConstant() {
helper
.addSourceLines(
"TestClass.java",
"""
import java.time.ZoneId;
public | TestClass |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/show/MySqlShowFieldsTest.java | {
"start": 929,
"end": 2092
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "SHOW FIELDS FROM `schema_migrations`";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement statemen = statementList.get(0);
// print(statementList);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
statemen.accept(visitor);
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(0, visitor.getTables().size());
assertEquals(0, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("t_price")));
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("t_basic_store")));
}
}
| MySqlShowFieldsTest |
java | quarkusio__quarkus | extensions/arc/deployment/src/test/java/io/quarkus/arc/test/unproxyable/some/Resource.java | {
"start": 63,
"end": 332
} | class ____ {
Resource() {
}
public static Resource from(int ping) {
return new Resource() {
@Override
public int ping() {
return ping;
}
};
}
public abstract int ping();
}
| Resource |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/propertyeditors/BeanInfoTests.java | {
"start": 1966,
"end": 2416
} | class ____ extends SimpleBeanInfo {
@Override
public PropertyDescriptor[] getPropertyDescriptors() {
try {
PropertyDescriptor pd = new PropertyDescriptor("value", ValueBean.class);
pd.setPropertyEditorClass(MyNumberEditor.class);
return new PropertyDescriptor[] {pd};
}
catch (IntrospectionException ex) {
throw new FatalBeanException("Couldn't create PropertyDescriptor", ex);
}
}
}
public static | ValueBeanBeanInfo |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/FutureTransformAsyncTest.java | {
"start": 11527,
"end": 12247
} | class ____ {
private Executor executor;
ListenableFuture<String> test() {
return Futures.transform(Futures.immediateFuture(5), value -> "value: " + value, executor);
}
}
""")
.doTest();
}
@Test
public void transformAsync_staticImports() {
refactoringHelper
.addInputLines(
"in/Test.java",
"""
import static com.google.common.util.concurrent.Futures.immediateFuture;
import static com.google.common.util.concurrent.Futures.transformAsync;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.concurrent.Executor;
| Test |
java | apache__flink | flink-core/src/main/java/org/apache/flink/util/UserCodeClassLoader.java | {
"start": 1366,
"end": 1519
} | class ____ is being
* released.
*
* @param releaseHookName
* @param releaseHook releaseHook which is executed before the user code | loader |
java | redisson__redisson | redisson/src/main/java/org/redisson/redisnode/RedissonClusterNodes.java | {
"start": 1011,
"end": 1752
} | class ____ extends RedissonBaseNodes implements RedisCluster {
public RedissonClusterNodes(ConnectionManager connectionManager, CommandAsyncExecutor commandExecutor) {
super(connectionManager, commandExecutor);
}
@Override
public Collection<RedisClusterMaster> getMasters() {
return getNodes(NodeType.MASTER);
}
@Override
public RedisClusterMaster getMaster(String address) {
return getNode(address, NodeType.MASTER);
}
@Override
public Collection<RedisClusterSlave> getSlaves() {
return getNodes(NodeType.SLAVE);
}
@Override
public RedisClusterSlave getSlave(String address) {
return getNode(address, NodeType.SLAVE);
}
}
| RedissonClusterNodes |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/support/AutomatonPatternsTests.java | {
"start": 810,
"end": 5162
} | class ____ extends ESTestCase {
/**
* RCS 2.0 allows a single API key to define "replication" and "search" blocks. If both are defined, this results in an API key with 2
* sets of indices permissions. Due to the way API keys (and roles) work across the multiple index permission, the set of index
* patterns allowed are effectively the most generous of the sets of index patterns since the index patterns are OR'ed together. For
* example, `foo` OR `*` results in access to `*`. So, if you have "search" access defined as `foo`, but replication access defined
* as `*`, the API key effectively allows access to index pattern `*`. This means that the access for API keys that define both
* "search" and "replication", the action names used are the primary means by which we can constrain CCS to the set of "search" indices
* as well as how we constrain CCR to the set "replication" indices. For example, if "replication" ever allowed access to
* `indices:data/read/get` for `*` , then the "replication" permissions would effectively enable users of CCS to get documents,
* even if "search" is never defined in the RCS 2.0 API key. This obviously is not desirable and in practice when both "search" and
* "replication" are defined the isolation between CCS and CCR is only achieved because the action names for the workflows do not
* overlap. This test helps to ensure that the actions names used for RCS 2.0 do not bleed over between search and replication.
*/
public void testRemoteClusterPrivsDoNotOverlap() {
// check that the action patterns for remote CCS are not allowed by remote CCR privileges
Arrays.stream(CCS_INDICES_PRIVILEGE_NAMES).forEach(ccsPrivilege -> {
Automaton ccsAutomaton = IndexPrivilege.get(ccsPrivilege).getAutomaton();
Automatons.getPatterns(ccsAutomaton).forEach(ccsPattern -> {
// emulate an action name that could be allowed by a CCS privilege
String actionName = ccsPattern.replaceAll("\\*", randomAlphaOfLengthBetween(1, 8));
Arrays.stream(CCR_INDICES_PRIVILEGE_NAMES).forEach(ccrPrivileges -> {
String errorMessage = String.format(
Locale.ROOT,
"CCR privilege \"%s\" allows CCS action \"%s\". This could result in an "
+ "accidental bleeding of permission between RCS 2.0's search and replication index permissions",
ccrPrivileges,
ccsPattern
);
assertFalse(errorMessage, IndexPrivilege.get(ccrPrivileges).predicate().test(actionName));
});
});
});
// check that the action patterns for remote CCR are not allowed by remote CCS privileges
Arrays.stream(CCR_INDICES_PRIVILEGE_NAMES).forEach(ccrPrivilege -> {
Automaton ccrAutomaton = IndexPrivilege.get(ccrPrivilege).getAutomaton();
Automatons.getPatterns(ccrAutomaton).forEach(ccrPattern -> {
// emulate an action name that could be allowed by a CCR privilege
String actionName = ccrPattern.replaceAll("\\*", randomAlphaOfLengthBetween(1, 8));
Arrays.stream(CCS_INDICES_PRIVILEGE_NAMES).forEach(ccsPrivileges -> {
if ("indices:data/read/xpack/ccr/shard_changes*".equals(ccrPattern)) {
// do nothing, this action is only applicable to CCR workflows and is a moot point if CCS technically has
// access to the index pattern for this action granted by CCR
} else {
String errorMessage = String.format(
Locale.ROOT,
"CCS privilege \"%s\" allows CCR action \"%s\". This could result in an accidental bleeding of "
+ "permission between RCS 2.0's search and replication index permissions",
ccsPrivileges,
ccrPattern
);
assertFalse(errorMessage, IndexPrivilege.get(ccsPrivileges).predicate().test(actionName));
}
});
});
});
}
}
| AutomatonPatternsTests |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/web/servlet/samples/client/standalone/resultmatches/ContentAssertionTests.java | {
"start": 1619,
"end": 4553
} | class ____ {
private final WebTestClient testClient =
MockMvcWebTestClient.bindToController(new SimpleController()).build();
@Test
void contentType() {
testClient.get().uri("/handle").accept(TEXT_PLAIN)
.exchange()
.expectStatus().isOk()
.expectHeader().contentType(MediaType.valueOf("text/plain;charset=ISO-8859-1"))
.expectHeader().contentType("text/plain;charset=ISO-8859-1")
.expectHeader().contentTypeCompatibleWith("text/plain")
.expectHeader().contentTypeCompatibleWith(TEXT_PLAIN);
testClient.get().uri("/handleUtf8")
.exchange()
.expectStatus().isOk()
.expectHeader().contentType(MediaType.valueOf("text/plain;charset=UTF-8"))
.expectHeader().contentType("text/plain;charset=UTF-8")
.expectHeader().contentTypeCompatibleWith("text/plain")
.expectHeader().contentTypeCompatibleWith(TEXT_PLAIN);
}
@Test
void contentAsString() {
testClient.get().uri("/handle").accept(TEXT_PLAIN)
.exchange()
.expectStatus().isOk()
.expectBody(String.class).isEqualTo("Hello world!");
testClient.get().uri("/handleUtf8").accept(TEXT_PLAIN)
.exchange()
.expectStatus().isOk()
.expectBody(String.class).isEqualTo("\u3053\u3093\u306b\u3061\u306f\u4e16\u754c\uff01");
// Hamcrest matchers...
testClient.get().uri("/handle").accept(TEXT_PLAIN)
.exchange()
.expectStatus().isOk()
.expectBody(String.class).value(v -> MatcherAssert.assertThat(v, equalTo("Hello world!")));
testClient.get().uri("/handleUtf8")
.exchange()
.expectStatus().isOk()
.expectBody(String.class).value(v ->
MatcherAssert.assertThat(v, equalTo("\u3053\u3093\u306b\u3061\u306f\u4e16\u754c\uff01")));
}
@Test
void contentAsBytes() {
testClient.get().uri("/handle").accept(TEXT_PLAIN)
.exchange()
.expectStatus().isOk()
.expectBody(byte[].class).isEqualTo(
"Hello world!".getBytes(ISO_8859_1));
testClient.get().uri("/handleUtf8")
.exchange()
.expectStatus().isOk()
.expectBody(byte[].class).isEqualTo(
"\u3053\u3093\u306b\u3061\u306f\u4e16\u754c\uff01".getBytes(UTF_8));
}
@Test
void contentStringMatcher() {
testClient.get().uri("/handle").accept(TEXT_PLAIN)
.exchange()
.expectStatus().isOk()
.expectBody(String.class).value(v -> MatcherAssert.assertThat(v, containsString("world")));
}
@Test
void characterEncoding() {
testClient.get().uri("/handle").accept(TEXT_PLAIN)
.exchange()
.expectStatus().isOk()
.expectHeader().contentType("text/plain;charset=ISO-8859-1")
.expectBody(String.class).value(v -> MatcherAssert.assertThat(v, containsString("world")));
testClient.get().uri("/handleUtf8")
.exchange()
.expectStatus().isOk()
.expectHeader().contentType("text/plain;charset=UTF-8")
.expectBody(byte[].class)
.isEqualTo("\u3053\u3093\u306b\u3061\u306f\u4e16\u754c\uff01".getBytes(UTF_8));
}
@Controller
private static | ContentAssertionTests |
java | apache__rocketmq | namesrv/src/main/java/org/apache/rocketmq/namesrv/routeinfo/RouteInfoManager.java | {
"start": 3455,
"end": 50566
} | class ____ {
private static final Logger log = LoggerFactory.getLogger(LoggerName.NAMESRV_LOGGER_NAME);
private static final long DEFAULT_BROKER_CHANNEL_EXPIRED_TIME = 1000 * 60 * 2;
private final ReadWriteLock lock = new ReentrantReadWriteLock();
private final Map<String/* topic */, Map<String, QueueData>> topicQueueTable;
private final Map<String/* brokerName */, BrokerData> brokerAddrTable;
private final Map<String/* clusterName */, Set<String/* brokerName */>> clusterAddrTable;
private final Map<BrokerAddrInfo/* brokerAddr */, BrokerLiveInfo> brokerLiveTable;
private final Map<BrokerAddrInfo/* brokerAddr */, List<String>/* Filter Server */> filterServerTable;
private final Map<String/* topic */, Map<String/*brokerName*/, TopicQueueMappingInfo>> topicQueueMappingInfoTable;
private final BatchUnregistrationService unRegisterService;
private final NamesrvController namesrvController;
private final NamesrvConfig namesrvConfig;
public RouteInfoManager(final NamesrvConfig namesrvConfig, NamesrvController namesrvController) {
this.topicQueueTable = new ConcurrentHashMap<>(1024);
this.brokerAddrTable = new ConcurrentHashMap<>(128);
this.clusterAddrTable = new ConcurrentHashMap<>(32);
this.brokerLiveTable = new ConcurrentHashMap<>(256);
this.filterServerTable = new ConcurrentHashMap<>(256);
this.topicQueueMappingInfoTable = new ConcurrentHashMap<>(1024);
this.unRegisterService = new BatchUnregistrationService(this, namesrvConfig);
this.namesrvConfig = namesrvConfig;
this.namesrvController = namesrvController;
}
public void start() {
this.unRegisterService.start();
}
public void shutdown() {
this.unRegisterService.shutdown(true);
}
public boolean submitUnRegisterBrokerRequest(UnRegisterBrokerRequestHeader unRegisterRequest) {
return this.unRegisterService.submit(unRegisterRequest);
}
// For test only
int blockedUnRegisterRequests() {
return this.unRegisterService.queueLength();
}
public ClusterInfo getAllClusterInfo() {
ClusterInfo clusterInfoSerializeWrapper = new ClusterInfo();
clusterInfoSerializeWrapper.setBrokerAddrTable(this.brokerAddrTable);
clusterInfoSerializeWrapper.setClusterAddrTable(this.clusterAddrTable);
return clusterInfoSerializeWrapper;
}
public void registerTopic(final String topic, List<QueueData> queueDatas) {
if (queueDatas == null || queueDatas.isEmpty()) {
return;
}
try {
this.lock.writeLock().lockInterruptibly();
if (this.topicQueueTable.containsKey(topic)) {
Map<String, QueueData> queueDataMap = this.topicQueueTable.get(topic);
for (QueueData queueData : queueDatas) {
if (!this.brokerAddrTable.containsKey(queueData.getBrokerName())) {
log.warn("Register topic contains illegal broker, {}, {}", topic, queueData);
return;
}
queueDataMap.put(queueData.getBrokerName(), queueData);
}
log.info("Topic route already exist.{}, {}", topic, this.topicQueueTable.get(topic));
} else {
// check and construct queue data map
Map<String, QueueData> queueDataMap = new HashMap<>();
for (QueueData queueData : queueDatas) {
if (!this.brokerAddrTable.containsKey(queueData.getBrokerName())) {
log.warn("Register topic contains illegal broker, {}, {}", topic, queueData);
return;
}
queueDataMap.put(queueData.getBrokerName(), queueData);
}
this.topicQueueTable.put(topic, queueDataMap);
log.info("Register topic route:{}, {}", topic, queueDatas);
}
} catch (Exception e) {
log.error("registerTopic Exception", e);
} finally {
this.lock.writeLock().unlock();
}
}
public void deleteTopic(final String topic) {
try {
this.lock.writeLock().lockInterruptibly();
this.topicQueueTable.remove(topic);
} catch (Exception e) {
log.error("deleteTopic Exception", e);
} finally {
this.lock.writeLock().unlock();
}
}
public void deleteTopic(final String topic, final String clusterName) {
try {
this.lock.writeLock().lockInterruptibly();
//get all the brokerNames fot the specified cluster
Set<String> brokerNames = this.clusterAddrTable.get(clusterName);
if (brokerNames == null || brokerNames.isEmpty()) {
return;
}
//get the store information for single topic
Map<String, QueueData> queueDataMap = this.topicQueueTable.get(topic);
if (queueDataMap != null) {
for (String brokerName : brokerNames) {
final QueueData removedQD = queueDataMap.remove(brokerName);
if (removedQD != null) {
log.info("deleteTopic, remove one broker's topic {} {} {}", brokerName, topic, removedQD);
}
}
if (queueDataMap.isEmpty()) {
log.info("deleteTopic, remove the topic all queue {} {}", clusterName, topic);
this.topicQueueTable.remove(topic);
}
}
} catch (Exception e) {
log.error("deleteTopic Exception", e);
} finally {
this.lock.writeLock().unlock();
}
}
public TopicList getAllTopicList() {
TopicList topicList = new TopicList();
try {
this.lock.readLock().lockInterruptibly();
topicList.getTopicList().addAll(this.topicQueueTable.keySet());
} catch (Exception e) {
log.error("getAllTopicList Exception", e);
} finally {
this.lock.readLock().unlock();
}
return topicList;
}
public RegisterBrokerResult registerBroker(
final String clusterName,
final String brokerAddr,
final String brokerName,
final long brokerId,
final String haServerAddr,
final String zoneName,
final Long timeoutMillis,
final TopicConfigSerializeWrapper topicConfigWrapper,
final List<String> filterServerList,
final Channel channel) {
return registerBroker(clusterName, brokerAddr, brokerName, brokerId, haServerAddr, zoneName, timeoutMillis, false, topicConfigWrapper, filterServerList, channel);
}
public RegisterBrokerResult registerBroker(
final String clusterName,
final String brokerAddr,
final String brokerName,
final long brokerId,
final String haServerAddr,
final String zoneName,
final Long timeoutMillis,
final Boolean enableActingMaster,
final TopicConfigSerializeWrapper topicConfigWrapper,
final List<String> filterServerList,
final Channel channel) {
RegisterBrokerResult result = new RegisterBrokerResult();
try {
this.lock.writeLock().lockInterruptibly();
//init or update the cluster info
Set<String> brokerNames = ConcurrentHashMapUtils.computeIfAbsent((ConcurrentHashMap<String, Set<String>>) this.clusterAddrTable, clusterName, k -> new HashSet<>());
brokerNames.add(brokerName);
boolean registerFirst = false;
BrokerData brokerData = this.brokerAddrTable.get(brokerName);
if (null == brokerData) {
registerFirst = true;
brokerData = new BrokerData(clusterName, brokerName, new HashMap<>());
this.brokerAddrTable.put(brokerName, brokerData);
}
boolean isOldVersionBroker = enableActingMaster == null;
brokerData.setEnableActingMaster(!isOldVersionBroker && enableActingMaster);
brokerData.setZoneName(zoneName);
Map<Long, String> brokerAddrsMap = brokerData.getBrokerAddrs();
boolean isMinBrokerIdChanged = false;
long prevMinBrokerId = 0;
if (!brokerAddrsMap.isEmpty()) {
prevMinBrokerId = Collections.min(brokerAddrsMap.keySet());
}
if (brokerId < prevMinBrokerId) {
isMinBrokerIdChanged = true;
}
//Switch slave to master: first remove <1, IP:PORT> in namesrv, then add <0, IP:PORT>
//The same IP:PORT must only have one record in brokerAddrTable
brokerAddrsMap.entrySet().removeIf(item -> null != brokerAddr && brokerAddr.equals(item.getValue()) && brokerId != item.getKey());
//If Local brokerId stateVersion bigger than the registering one,
String oldBrokerAddr = brokerAddrsMap.get(brokerId);
if (null != oldBrokerAddr && !oldBrokerAddr.equals(brokerAddr)) {
BrokerLiveInfo oldBrokerInfo = brokerLiveTable.get(new BrokerAddrInfo(clusterName, oldBrokerAddr));
if (null != oldBrokerInfo) {
long oldStateVersion = oldBrokerInfo.getDataVersion().getStateVersion();
long newStateVersion = topicConfigWrapper.getDataVersion().getStateVersion();
if (oldStateVersion > newStateVersion) {
log.warn("Registering Broker conflicts with the existed one, just ignore.: Cluster:{}, BrokerName:{}, BrokerId:{}, " +
"Old BrokerAddr:{}, Old Version:{}, New BrokerAddr:{}, New Version:{}.",
clusterName, brokerName, brokerId, oldBrokerAddr, oldStateVersion, brokerAddr, newStateVersion);
//Remove the rejected brokerAddr from brokerLiveTable.
brokerLiveTable.remove(new BrokerAddrInfo(clusterName, brokerAddr));
return result;
}
}
}
if (!brokerAddrsMap.containsKey(brokerId) && topicConfigWrapper.getTopicConfigTable().size() == 1) {
log.warn("Can't register topicConfigWrapper={} because broker[{}]={} has not registered.",
topicConfigWrapper.getTopicConfigTable(), brokerId, brokerAddr);
return null;
}
String oldAddr = brokerAddrsMap.put(brokerId, brokerAddr);
registerFirst = registerFirst || (StringUtils.isEmpty(oldAddr));
boolean isMaster = MixAll.MASTER_ID == brokerId;
boolean isPrimeSlave = !isOldVersionBroker && !isMaster
&& brokerId == Collections.min(brokerAddrsMap.keySet());
if (null != topicConfigWrapper && (isMaster || isPrimeSlave)) {
ConcurrentMap<String, TopicConfig> tcTable =
topicConfigWrapper.getTopicConfigTable();
if (tcTable != null) {
TopicConfigAndMappingSerializeWrapper mappingSerializeWrapper = TopicConfigAndMappingSerializeWrapper.from(topicConfigWrapper);
Map<String, TopicQueueMappingInfo> topicQueueMappingInfoMap = mappingSerializeWrapper.getTopicQueueMappingInfoMap();
// Delete the topics that don't exist in tcTable from the current broker
// Static topic is not supported currently
if (namesrvConfig.isDeleteTopicWithBrokerRegistration() && topicQueueMappingInfoMap.isEmpty()) {
final Set<String> oldTopicSet = topicSetOfBrokerName(brokerName);
final Set<String> newTopicSet = tcTable.keySet();
final Sets.SetView<String> toDeleteTopics = Sets.difference(oldTopicSet, newTopicSet);
for (final String toDeleteTopic : toDeleteTopics) {
Map<String, QueueData> queueDataMap = topicQueueTable.get(toDeleteTopic);
final QueueData removedQD = queueDataMap.remove(brokerName);
if (removedQD != null) {
log.info("deleteTopic, remove one broker's topic {} {} {}", brokerName, toDeleteTopic, removedQD);
}
if (queueDataMap.isEmpty()) {
log.info("deleteTopic, remove the topic all queue {}", toDeleteTopic);
topicQueueTable.remove(toDeleteTopic);
}
}
}
for (Map.Entry<String, TopicConfig> entry : tcTable.entrySet()) {
if (registerFirst || this.isTopicConfigChanged(clusterName, brokerAddr,
topicConfigWrapper.getDataVersion(), brokerName,
entry.getValue().getTopicName())) {
final TopicConfig topicConfig = entry.getValue();
// In Slave Acting Master mode, Namesrv will regard the surviving Slave with the smallest brokerId as the "agent" Master, and modify the brokerPermission to read-only.
if (isPrimeSlave && brokerData.isEnableActingMaster()) {
// Wipe write perm for prime slave
topicConfig.setPerm(topicConfig.getPerm() & (~PermName.PERM_WRITE));
}
this.createAndUpdateQueueData(brokerName, topicConfig);
}
}
if (this.isBrokerTopicConfigChanged(clusterName, brokerAddr, topicConfigWrapper.getDataVersion()) || registerFirst) {
//the topicQueueMappingInfoMap should never be null, but can be empty
for (Map.Entry<String, TopicQueueMappingInfo> entry : topicQueueMappingInfoMap.entrySet()) {
if (!topicQueueMappingInfoTable.containsKey(entry.getKey())) {
topicQueueMappingInfoTable.put(entry.getKey(), new HashMap<>());
}
//Note asset brokerName equal entry.getValue().getBname()
//here use the mappingDetail.bname
topicQueueMappingInfoTable.get(entry.getKey()).put(entry.getValue().getBname(), entry.getValue());
}
}
}
}
BrokerAddrInfo brokerAddrInfo = new BrokerAddrInfo(clusterName, brokerAddr);
BrokerLiveInfo prevBrokerLiveInfo = this.brokerLiveTable.put(brokerAddrInfo,
new BrokerLiveInfo(
System.currentTimeMillis(),
timeoutMillis == null ? DEFAULT_BROKER_CHANNEL_EXPIRED_TIME : timeoutMillis,
topicConfigWrapper == null ? new DataVersion() : topicConfigWrapper.getDataVersion(),
channel,
haServerAddr));
if (null == prevBrokerLiveInfo) {
log.info("new broker registered, {} HAService: {}", brokerAddrInfo, haServerAddr);
}
if (filterServerList != null) {
if (filterServerList.isEmpty()) {
this.filterServerTable.remove(brokerAddrInfo);
} else {
this.filterServerTable.put(brokerAddrInfo, filterServerList);
}
}
if (MixAll.MASTER_ID != brokerId) {
String masterAddr = brokerData.getBrokerAddrs().get(MixAll.MASTER_ID);
if (masterAddr != null) {
BrokerAddrInfo masterAddrInfo = new BrokerAddrInfo(clusterName, masterAddr);
BrokerLiveInfo masterLiveInfo = this.brokerLiveTable.get(masterAddrInfo);
if (masterLiveInfo != null) {
result.setHaServerAddr(masterLiveInfo.getHaServerAddr());
result.setMasterAddr(masterAddr);
}
}
}
if (isMinBrokerIdChanged && namesrvConfig.isNotifyMinBrokerIdChanged()) {
notifyMinBrokerIdChanged(brokerAddrsMap, null,
this.brokerLiveTable.get(brokerAddrInfo).getHaServerAddr());
}
} catch (Exception e) {
log.error("registerBroker Exception", e);
} finally {
this.lock.writeLock().unlock();
}
return result;
}
private Set<String> topicSetOfBrokerName(final String brokerName) {
Set<String> topicOfBroker = new HashSet<>();
for (final Entry<String, Map<String, QueueData>> entry : this.topicQueueTable.entrySet()) {
if (entry.getValue().containsKey(brokerName)) {
topicOfBroker.add(entry.getKey());
}
}
return topicOfBroker;
}
public BrokerMemberGroup getBrokerMemberGroup(String clusterName, String brokerName) {
BrokerMemberGroup groupMember = new BrokerMemberGroup(clusterName, brokerName);
try {
try {
this.lock.readLock().lockInterruptibly();
final BrokerData brokerData = this.brokerAddrTable.get(brokerName);
if (brokerData != null) {
groupMember.getBrokerAddrs().putAll(brokerData.getBrokerAddrs());
}
} finally {
this.lock.readLock().unlock();
}
} catch (Exception e) {
log.error("Get broker member group exception", e);
}
return groupMember;
}
public boolean isBrokerTopicConfigChanged(final String clusterName, final String brokerAddr,
final DataVersion dataVersion) {
DataVersion prev = queryBrokerTopicConfig(clusterName, brokerAddr);
return null == prev || !prev.equals(dataVersion);
}
public boolean isTopicConfigChanged(final String clusterName, final String brokerAddr,
final DataVersion dataVersion, String brokerName, String topic) {
boolean isChange = isBrokerTopicConfigChanged(clusterName, brokerAddr, dataVersion);
if (isChange) {
return true;
}
final Map<String, QueueData> queueDataMap = this.topicQueueTable.get(topic);
if (queueDataMap == null || queueDataMap.isEmpty()) {
return true;
}
// The topicQueueTable already contains the broker
return !queueDataMap.containsKey(brokerName);
}
public DataVersion queryBrokerTopicConfig(final String clusterName, final String brokerAddr) {
BrokerAddrInfo addrInfo = new BrokerAddrInfo(clusterName, brokerAddr);
BrokerLiveInfo prev = this.brokerLiveTable.get(addrInfo);
if (prev != null) {
return prev.getDataVersion();
}
return null;
}
public void updateBrokerInfoUpdateTimestamp(final String clusterName, final String brokerAddr) {
BrokerAddrInfo addrInfo = new BrokerAddrInfo(clusterName, brokerAddr);
BrokerLiveInfo prev = this.brokerLiveTable.get(addrInfo);
if (prev != null) {
prev.setLastUpdateTimestamp(System.currentTimeMillis());
}
}
private void createAndUpdateQueueData(final String brokerName, final TopicConfig topicConfig) {
QueueData queueData = new QueueData();
queueData.setBrokerName(brokerName);
queueData.setWriteQueueNums(topicConfig.getWriteQueueNums());
queueData.setReadQueueNums(topicConfig.getReadQueueNums());
queueData.setPerm(topicConfig.getPerm());
queueData.setTopicSysFlag(topicConfig.getTopicSysFlag());
Map<String, QueueData> queueDataMap = this.topicQueueTable.get(topicConfig.getTopicName());
if (null == queueDataMap) {
queueDataMap = new HashMap<>();
queueDataMap.put(brokerName, queueData);
this.topicQueueTable.put(topicConfig.getTopicName(), queueDataMap);
log.info("new topic registered, {} {}", topicConfig.getTopicName(), queueData);
} else {
final QueueData existedQD = queueDataMap.get(brokerName);
if (existedQD == null) {
queueDataMap.put(brokerName, queueData);
} else if (!existedQD.equals(queueData)) {
log.info("topic changed, {} OLD: {} NEW: {}", topicConfig.getTopicName(), existedQD,
queueData);
queueDataMap.put(brokerName, queueData);
}
}
}
public int wipeWritePermOfBrokerByLock(final String brokerName) {
try {
try {
this.lock.writeLock().lockInterruptibly();
return operateWritePermOfBroker(brokerName, RequestCode.WIPE_WRITE_PERM_OF_BROKER);
} finally {
this.lock.writeLock().unlock();
}
} catch (Exception e) {
log.error("wipeWritePermOfBrokerByLock Exception", e);
}
return 0;
}
public int addWritePermOfBrokerByLock(final String brokerName) {
try {
try {
this.lock.writeLock().lockInterruptibly();
return operateWritePermOfBroker(brokerName, RequestCode.ADD_WRITE_PERM_OF_BROKER);
} finally {
this.lock.writeLock().unlock();
}
} catch (Exception e) {
log.error("addWritePermOfBrokerByLock Exception", e);
}
return 0;
}
private int operateWritePermOfBroker(final String brokerName, final int requestCode) {
int topicCnt = 0;
for (Entry<String, Map<String, QueueData>> entry : this.topicQueueTable.entrySet()) {
Map<String, QueueData> qdMap = entry.getValue();
final QueueData qd = qdMap.get(brokerName);
if (qd == null) {
continue;
}
int perm = qd.getPerm();
switch (requestCode) {
case RequestCode.WIPE_WRITE_PERM_OF_BROKER:
perm &= ~PermName.PERM_WRITE;
break;
case RequestCode.ADD_WRITE_PERM_OF_BROKER:
perm = PermName.PERM_READ | PermName.PERM_WRITE;
break;
}
qd.setPerm(perm);
topicCnt++;
}
return topicCnt;
}
public void unregisterBroker(
final String clusterName,
final String brokerAddr,
final String brokerName,
final long brokerId) {
UnRegisterBrokerRequestHeader unRegisterBrokerRequest = new UnRegisterBrokerRequestHeader();
unRegisterBrokerRequest.setClusterName(clusterName);
unRegisterBrokerRequest.setBrokerAddr(brokerAddr);
unRegisterBrokerRequest.setBrokerName(brokerName);
unRegisterBrokerRequest.setBrokerId(brokerId);
unRegisterBroker(Sets.newHashSet(unRegisterBrokerRequest));
}
public void unRegisterBroker(Set<UnRegisterBrokerRequestHeader> unRegisterRequests) {
try {
Set<String> removedBroker = new HashSet<>();
Set<String> reducedBroker = new HashSet<>();
Map<String, BrokerStatusChangeInfo> needNotifyBrokerMap = new HashMap<>();
this.lock.writeLock().lockInterruptibly();
for (final UnRegisterBrokerRequestHeader unRegisterRequest : unRegisterRequests) {
final String brokerName = unRegisterRequest.getBrokerName();
final String clusterName = unRegisterRequest.getClusterName();
final String brokerAddr = unRegisterRequest.getBrokerAddr();
BrokerAddrInfo brokerAddrInfo = new BrokerAddrInfo(clusterName, brokerAddr);
BrokerLiveInfo brokerLiveInfo = this.brokerLiveTable.remove(brokerAddrInfo);
log.info("unregisterBroker, remove from brokerLiveTable {}, {}",
brokerLiveInfo != null ? "OK" : "Failed",
brokerAddrInfo
);
this.filterServerTable.remove(brokerAddrInfo);
boolean removeBrokerName = false;
boolean isMinBrokerIdChanged = false;
BrokerData brokerData = this.brokerAddrTable.get(brokerName);
if (null != brokerData) {
if (!brokerData.getBrokerAddrs().isEmpty() &&
unRegisterRequest.getBrokerId().equals(Collections.min(brokerData.getBrokerAddrs().keySet()))) {
isMinBrokerIdChanged = true;
}
boolean removed = brokerData.getBrokerAddrs().entrySet().removeIf(item -> item.getValue().equals(brokerAddr));
log.info("unregisterBroker, remove addr from brokerAddrTable {}, {}",
removed ? "OK" : "Failed",
brokerAddrInfo
);
if (brokerData.getBrokerAddrs().isEmpty()) {
this.brokerAddrTable.remove(brokerName);
log.info("unregisterBroker, remove name from brokerAddrTable OK, {}",
brokerName
);
removeBrokerName = true;
} else if (isMinBrokerIdChanged) {
needNotifyBrokerMap.put(brokerName, new BrokerStatusChangeInfo(
brokerData.getBrokerAddrs(), brokerAddr, null));
}
}
if (removeBrokerName) {
Set<String> nameSet = this.clusterAddrTable.get(clusterName);
if (nameSet != null) {
boolean removed = nameSet.remove(brokerName);
log.info("unregisterBroker, remove name from clusterAddrTable {}, {}",
removed ? "OK" : "Failed",
brokerName);
if (nameSet.isEmpty()) {
this.clusterAddrTable.remove(clusterName);
log.info("unregisterBroker, remove cluster from clusterAddrTable {}",
clusterName
);
}
}
removedBroker.add(brokerName);
} else {
reducedBroker.add(brokerName);
}
}
cleanTopicByUnRegisterRequests(removedBroker, reducedBroker);
if (!needNotifyBrokerMap.isEmpty() && namesrvConfig.isNotifyMinBrokerIdChanged()) {
notifyMinBrokerIdChanged(needNotifyBrokerMap);
}
} catch (Exception e) {
log.error("unregisterBroker Exception", e);
} finally {
this.lock.writeLock().unlock();
}
}
private void cleanTopicByUnRegisterRequests(Set<String> removedBroker, Set<String> reducedBroker) {
Iterator<Entry<String, Map<String, QueueData>>> itMap = this.topicQueueTable.entrySet().iterator();
while (itMap.hasNext()) {
Entry<String, Map<String, QueueData>> entry = itMap.next();
String topic = entry.getKey();
Map<String, QueueData> queueDataMap = entry.getValue();
for (final String brokerName : removedBroker) {
final QueueData removedQD = queueDataMap.remove(brokerName);
if (removedQD != null) {
log.debug("removeTopicByBrokerName, remove one broker's topic {} {}", topic, removedQD);
}
}
if (queueDataMap.isEmpty()) {
log.debug("removeTopicByBrokerName, remove the topic all queue {}", topic);
itMap.remove();
}
for (final String brokerName : reducedBroker) {
final QueueData queueData = queueDataMap.get(brokerName);
if (queueData != null) {
if (this.brokerAddrTable.get(brokerName).isEnableActingMaster()) {
// Master has been unregistered, wipe the write perm
if (isNoMasterExists(brokerName)) {
queueData.setPerm(queueData.getPerm() & (~PermName.PERM_WRITE));
}
}
}
}
}
}
private boolean isNoMasterExists(String brokerName) {
final BrokerData brokerData = this.brokerAddrTable.get(brokerName);
if (brokerData == null) {
return true;
}
if (brokerData.getBrokerAddrs().size() == 0) {
return true;
}
return Collections.min(brokerData.getBrokerAddrs().keySet()) > 0;
}
public TopicRouteData pickupTopicRouteData(final String topic) {
TopicRouteData topicRouteData = new TopicRouteData();
boolean foundQueueData = false;
boolean foundBrokerData = false;
List<BrokerData> brokerDataList = new LinkedList<>();
topicRouteData.setBrokerDatas(brokerDataList);
HashMap<String, List<String>> filterServerMap = new HashMap<>();
topicRouteData.setFilterServerTable(filterServerMap);
try {
this.lock.readLock().lockInterruptibly();
Map<String, QueueData> queueDataMap = this.topicQueueTable.get(topic);
if (queueDataMap != null) {
topicRouteData.setQueueDatas(new ArrayList<>(queueDataMap.values()));
foundQueueData = true;
Set<String> brokerNameSet = new HashSet<>(queueDataMap.keySet());
for (String brokerName : brokerNameSet) {
BrokerData brokerData = this.brokerAddrTable.get(brokerName);
if (null == brokerData) {
continue;
}
BrokerData brokerDataClone = new BrokerData(brokerData);
brokerDataList.add(brokerDataClone);
foundBrokerData = true;
if (filterServerTable.isEmpty()) {
continue;
}
for (final String brokerAddr : brokerDataClone.getBrokerAddrs().values()) {
BrokerAddrInfo brokerAddrInfo = new BrokerAddrInfo(brokerDataClone.getCluster(), brokerAddr);
List<String> filterServerList = this.filterServerTable.get(brokerAddrInfo);
filterServerMap.put(brokerAddr, filterServerList);
}
}
}
} catch (Exception e) {
log.error("pickupTopicRouteData Exception", e);
} finally {
this.lock.readLock().unlock();
}
log.debug("pickupTopicRouteData {} {}", topic, topicRouteData);
if (foundBrokerData && foundQueueData) {
topicRouteData.setTopicQueueMappingByBroker(this.topicQueueMappingInfoTable.get(topic));
if (!namesrvConfig.isSupportActingMaster()) {
return topicRouteData;
}
if (topic.startsWith(TopicValidator.SYNC_BROKER_MEMBER_GROUP_PREFIX)) {
return topicRouteData;
}
if (topicRouteData.getBrokerDatas().size() == 0 || topicRouteData.getQueueDatas().size() == 0) {
return topicRouteData;
}
boolean needActingMaster = false;
for (final BrokerData brokerData : topicRouteData.getBrokerDatas()) {
if (brokerData.getBrokerAddrs().size() != 0
&& !brokerData.getBrokerAddrs().containsKey(MixAll.MASTER_ID)) {
needActingMaster = true;
break;
}
}
if (!needActingMaster) {
return topicRouteData;
}
for (final BrokerData brokerData : topicRouteData.getBrokerDatas()) {
final HashMap<Long, String> brokerAddrs = brokerData.getBrokerAddrs();
if (brokerAddrs.size() == 0 || brokerAddrs.containsKey(MixAll.MASTER_ID) || !brokerData.isEnableActingMaster()) {
continue;
}
// No master
for (final QueueData queueData : topicRouteData.getQueueDatas()) {
if (queueData.getBrokerName().equals(brokerData.getBrokerName())) {
if (!PermName.isWriteable(queueData.getPerm())) {
final Long minBrokerId = Collections.min(brokerAddrs.keySet());
final String actingMasterAddr = brokerAddrs.remove(minBrokerId);
brokerAddrs.put(MixAll.MASTER_ID, actingMasterAddr);
}
break;
}
}
}
return topicRouteData;
}
return null;
}
public void scanNotActiveBroker() {
try {
log.info("start scanNotActiveBroker");
for (Entry<BrokerAddrInfo, BrokerLiveInfo> next : this.brokerLiveTable.entrySet()) {
long last = next.getValue().getLastUpdateTimestamp();
long timeoutMillis = next.getValue().getHeartbeatTimeoutMillis();
if ((last + timeoutMillis) < System.currentTimeMillis()) {
RemotingHelper.closeChannel(next.getValue().getChannel());
log.warn("The broker channel expired, {} {}ms", next.getKey(), timeoutMillis);
this.onChannelDestroy(next.getKey());
}
}
} catch (Exception e) {
log.error("scanNotActiveBroker exception", e);
}
}
public void onChannelDestroy(BrokerAddrInfo brokerAddrInfo) {
UnRegisterBrokerRequestHeader unRegisterRequest = new UnRegisterBrokerRequestHeader();
boolean needUnRegister = false;
if (brokerAddrInfo != null) {
try {
try {
this.lock.readLock().lockInterruptibly();
needUnRegister = setupUnRegisterRequest(unRegisterRequest, brokerAddrInfo);
} finally {
this.lock.readLock().unlock();
}
} catch (Exception e) {
log.error("onChannelDestroy Exception", e);
}
}
if (needUnRegister) {
boolean result = this.submitUnRegisterBrokerRequest(unRegisterRequest);
log.info("the broker's channel destroyed, submit the unregister request at once, " +
"broker info: {}, submit result: {}", unRegisterRequest, result);
}
}
public void onChannelDestroy(Channel channel) {
UnRegisterBrokerRequestHeader unRegisterRequest = new UnRegisterBrokerRequestHeader();
BrokerAddrInfo brokerAddrFound = null;
boolean needUnRegister = false;
if (channel != null) {
try {
try {
this.lock.readLock().lockInterruptibly();
for (Entry<BrokerAddrInfo, BrokerLiveInfo> entry : this.brokerLiveTable.entrySet()) {
if (entry.getValue().getChannel() == channel) {
brokerAddrFound = entry.getKey();
break;
}
}
if (brokerAddrFound != null) {
needUnRegister = setupUnRegisterRequest(unRegisterRequest, brokerAddrFound);
}
} finally {
this.lock.readLock().unlock();
}
} catch (Exception e) {
log.error("onChannelDestroy Exception", e);
}
}
if (needUnRegister) {
boolean result = this.submitUnRegisterBrokerRequest(unRegisterRequest);
log.info("the broker's channel destroyed, submit the unregister request at once, " +
"broker info: {}, submit result: {}", unRegisterRequest, result);
}
}
private boolean setupUnRegisterRequest(UnRegisterBrokerRequestHeader unRegisterRequest,
BrokerAddrInfo brokerAddrInfo) {
unRegisterRequest.setClusterName(brokerAddrInfo.getClusterName());
unRegisterRequest.setBrokerAddr(brokerAddrInfo.getBrokerAddr());
for (Entry<String, BrokerData> stringBrokerDataEntry : this.brokerAddrTable.entrySet()) {
BrokerData brokerData = stringBrokerDataEntry.getValue();
if (!brokerAddrInfo.getClusterName().equals(brokerData.getCluster())) {
continue;
}
for (Entry<Long, String> entry : brokerData.getBrokerAddrs().entrySet()) {
Long brokerId = entry.getKey();
String brokerAddr = entry.getValue();
if (brokerAddr.equals(brokerAddrInfo.getBrokerAddr())) {
unRegisterRequest.setBrokerName(brokerData.getBrokerName());
unRegisterRequest.setBrokerId(brokerId);
return true;
}
}
}
return false;
}
private void notifyMinBrokerIdChanged(Map<String, BrokerStatusChangeInfo> needNotifyBrokerMap)
throws InterruptedException, RemotingConnectException, RemotingTimeoutException, RemotingSendRequestException,
RemotingTooMuchRequestException {
for (String brokerName : needNotifyBrokerMap.keySet()) {
BrokerStatusChangeInfo brokerStatusChangeInfo = needNotifyBrokerMap.get(brokerName);
BrokerData brokerData = brokerAddrTable.get(brokerName);
if (brokerData != null && brokerData.isEnableActingMaster()) {
notifyMinBrokerIdChanged(brokerStatusChangeInfo.getBrokerAddrs(),
brokerStatusChangeInfo.getOfflineBrokerAddr(), brokerStatusChangeInfo.getHaBrokerAddr());
}
}
}
private void notifyMinBrokerIdChanged(Map<Long, String> brokerAddrMap, String offlineBrokerAddr,
String haBrokerAddr)
throws InterruptedException, RemotingSendRequestException, RemotingTimeoutException,
RemotingTooMuchRequestException, RemotingConnectException {
if (brokerAddrMap == null || brokerAddrMap.isEmpty() || this.namesrvController == null) {
return;
}
NotifyMinBrokerIdChangeRequestHeader requestHeader = new NotifyMinBrokerIdChangeRequestHeader();
long minBrokerId = Collections.min(brokerAddrMap.keySet());
requestHeader.setMinBrokerId(minBrokerId);
requestHeader.setMinBrokerAddr(brokerAddrMap.get(minBrokerId));
requestHeader.setOfflineBrokerAddr(offlineBrokerAddr);
requestHeader.setHaBrokerAddr(haBrokerAddr);
List<String> brokerAddrsNotify = chooseBrokerAddrsToNotify(brokerAddrMap, offlineBrokerAddr);
log.info("min broker id changed to {}, notify {}, offline broker addr {}", minBrokerId, brokerAddrsNotify, offlineBrokerAddr);
RemotingCommand request =
RemotingCommand.createRequestCommand(RequestCode.NOTIFY_MIN_BROKER_ID_CHANGE, requestHeader);
for (String brokerAddr : brokerAddrsNotify) {
this.namesrvController.getRemotingClient().invokeOneway(brokerAddr, request, 300);
}
}
private List<String> chooseBrokerAddrsToNotify(Map<Long, String> brokerAddrMap, String offlineBrokerAddr) {
if (offlineBrokerAddr != null || brokerAddrMap.size() == 1) {
// notify the reset brokers.
return new ArrayList<>(brokerAddrMap.values());
}
// new broker registered, notify previous brokers.
long minBrokerId = Collections.min(brokerAddrMap.keySet());
List<String> brokerAddrList = new ArrayList<>();
for (Long brokerId : brokerAddrMap.keySet()) {
if (brokerId != minBrokerId) {
brokerAddrList.add(brokerAddrMap.get(brokerId));
}
}
return brokerAddrList;
}
// For test only
public void printAllPeriodically() {
try {
try {
this.lock.readLock().lockInterruptibly();
log.info("--------------------------------------------------------");
{
log.info("topicQueueTable SIZE: {}", this.topicQueueTable.size());
for (Entry<String, Map<String, QueueData>> next : this.topicQueueTable.entrySet()) {
log.info("topicQueueTable Topic: {} {}", next.getKey(), next.getValue());
}
}
{
log.info("brokerAddrTable SIZE: {}", this.brokerAddrTable.size());
for (Entry<String, BrokerData> next : this.brokerAddrTable.entrySet()) {
log.info("brokerAddrTable brokerName: {} {}", next.getKey(), next.getValue());
}
}
{
log.info("brokerLiveTable SIZE: {}", this.brokerLiveTable.size());
for (Entry<BrokerAddrInfo, BrokerLiveInfo> next : this.brokerLiveTable.entrySet()) {
log.info("brokerLiveTable brokerAddr: {} {}", next.getKey(), next.getValue());
}
}
{
log.info("clusterAddrTable SIZE: {}", this.clusterAddrTable.size());
for (Entry<String, Set<String>> next : this.clusterAddrTable.entrySet()) {
log.info("clusterAddrTable clusterName: {} {}", next.getKey(), next.getValue());
}
}
} finally {
this.lock.readLock().unlock();
}
} catch (Exception e) {
log.error("printAllPeriodically Exception", e);
}
}
public TopicList getSystemTopicList() {
TopicList topicList = new TopicList();
try {
this.lock.readLock().lockInterruptibly();
for (Map.Entry<String, Set<String>> entry : clusterAddrTable.entrySet()) {
topicList.getTopicList().add(entry.getKey());
topicList.getTopicList().addAll(entry.getValue());
}
if (!brokerAddrTable.isEmpty()) {
for (String s : brokerAddrTable.keySet()) {
BrokerData bd = brokerAddrTable.get(s);
HashMap<Long, String> brokerAddrs = bd.getBrokerAddrs();
if (brokerAddrs != null && !brokerAddrs.isEmpty()) {
Iterator<Long> it2 = brokerAddrs.keySet().iterator();
topicList.setBrokerAddr(brokerAddrs.get(it2.next()));
break;
}
}
}
} catch (Exception e) {
log.error("getSystemTopicList Exception", e);
} finally {
this.lock.readLock().unlock();
}
return topicList;
}
public TopicList getTopicsByCluster(String cluster) {
TopicList topicList = new TopicList();
try {
try {
this.lock.readLock().lockInterruptibly();
Set<String> brokerNameSet = this.clusterAddrTable.get(cluster);
for (String brokerName : brokerNameSet) {
for (Entry<String, Map<String, QueueData>> topicEntry : this.topicQueueTable.entrySet()) {
String topic = topicEntry.getKey();
Map<String, QueueData> queueDataMap = topicEntry.getValue();
final QueueData qd = queueDataMap.get(brokerName);
if (qd != null) {
topicList.getTopicList().add(topic);
}
}
}
} finally {
this.lock.readLock().unlock();
}
} catch (Exception e) {
log.error("getTopicsByCluster Exception", e);
}
return topicList;
}
public TopicList getUnitTopics() {
TopicList topicList = new TopicList();
try {
this.lock.readLock().lockInterruptibly();
for (Entry<String, Map<String, QueueData>> topicEntry : this.topicQueueTable.entrySet()) {
String topic = topicEntry.getKey();
Map<String, QueueData> queueDatas = topicEntry.getValue();
if (queueDatas != null && queueDatas.size() > 0
&& TopicSysFlag.hasUnitFlag(queueDatas.values().iterator().next().getTopicSysFlag())) {
topicList.getTopicList().add(topic);
}
}
} catch (Exception e) {
log.error("getUnitTopics Exception", e);
} finally {
this.lock.readLock().unlock();
}
return topicList;
}
public TopicList getHasUnitSubTopicList() {
TopicList topicList = new TopicList();
try {
this.lock.readLock().lockInterruptibly();
for (Entry<String, Map<String, QueueData>> topicEntry : this.topicQueueTable.entrySet()) {
String topic = topicEntry.getKey();
Map<String, QueueData> queueDatas = topicEntry.getValue();
if (queueDatas != null && queueDatas.size() > 0
&& TopicSysFlag.hasUnitSubFlag(queueDatas.values().iterator().next().getTopicSysFlag())) {
topicList.getTopicList().add(topic);
}
}
} catch (Exception e) {
log.error("getHasUnitSubTopicList Exception", e);
} finally {
this.lock.readLock().unlock();
}
return topicList;
}
public TopicList getHasUnitSubUnUnitTopicList() {
TopicList topicList = new TopicList();
try {
this.lock.readLock().lockInterruptibly();
for (Entry<String, Map<String, QueueData>> topicEntry : this.topicQueueTable.entrySet()) {
String topic = topicEntry.getKey();
Map<String, QueueData> queueDatas = topicEntry.getValue();
if (queueDatas != null && queueDatas.size() > 0
&& !TopicSysFlag.hasUnitFlag(queueDatas.values().iterator().next().getTopicSysFlag())
&& TopicSysFlag.hasUnitSubFlag(queueDatas.values().iterator().next().getTopicSysFlag())) {
topicList.getTopicList().add(topic);
}
}
} catch (Exception e) {
log.error("getHasUnitSubUnUnitTopicList Exception", e);
} finally {
this.lock.readLock().unlock();
}
return topicList;
}
}
/**
* broker address information
*/
| RouteInfoManager |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/features/NotWriteDefaultValueFieldTest.java | {
"start": 593,
"end": 785
} | class ____ {
@JSONField(serialzeFeatures = SerializerFeature.NotWriteDefaultValue)
public int id;
public Model(int id) {
this.id = id;
}
}
}
| Model |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/state/internals/AbstractRocksDBSegmentedBytesStoreTest.java | {
"start": 3920,
"end": 44274
} | class ____<S extends Segment> {
private final long windowSizeForTimeWindow = 500;
private InternalMockProcessorContext<?, ?> context;
private AbstractRocksDBSegmentedBytesStore<S> bytesStore;
private File stateDir;
private final Window[] windows = new Window[4];
private Window nextSegmentWindow;
final long retention = 1000;
final long segmentInterval = 60_000L;
final String storeName = "bytes-store";
public SegmentedBytesStore.KeySchema schema;
public static Stream<Arguments> getKeySchemas() {
return Stream.of(Arguments.of(new SessionKeySchema()),
Arguments.of(new WindowKeySchema()));
}
public void before(final SegmentedBytesStore.KeySchema schema) {
this.schema = schema;
if (schema instanceof SessionKeySchema) {
windows[0] = new SessionWindow(10L, 10L);
windows[1] = new SessionWindow(500L, 1000L);
windows[2] = new SessionWindow(1_000L, 1_500L);
windows[3] = new SessionWindow(30_000L, 60_000L);
// All four of the previous windows will go into segment 1.
// The nextSegmentWindow is computed be a high enough time that when it gets written
// to the segment store, it will advance stream time past the first segment's retention time and
// expire it.
nextSegmentWindow = new SessionWindow(segmentInterval + retention, segmentInterval + retention);
}
if (schema instanceof WindowKeySchema) {
windows[0] = timeWindowForSize(10L, windowSizeForTimeWindow);
windows[1] = timeWindowForSize(500L, windowSizeForTimeWindow);
windows[2] = timeWindowForSize(1_000L, windowSizeForTimeWindow);
windows[3] = timeWindowForSize(60_000L, windowSizeForTimeWindow);
// All four of the previous windows will go into segment 1.
// The nextSegmentWindow is computed be a high enough time that when it gets written
// to the segment store, it will advance stream time past the first segment's retention time and
// expire it.
nextSegmentWindow = timeWindowForSize(segmentInterval + retention, windowSizeForTimeWindow);
}
bytesStore = getBytesStore();
stateDir = TestUtils.tempDirectory();
context = new InternalMockProcessorContext<>(
stateDir,
Serdes.String(),
Serdes.Long(),
new MockRecordCollector(),
new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics()))
);
bytesStore.init(context, bytesStore);
}
@AfterEach
public void close() {
bytesStore.close();
}
abstract AbstractRocksDBSegmentedBytesStore<S> getBytesStore();
abstract AbstractSegments<S> newSegments();
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldPutAndFetch(final SegmentedBytesStore.KeySchema schema) {
before(schema);
final String keyA = "a";
final String keyB = "b";
final String keyC = "c";
bytesStore.put(serializeKey(new Windowed<>(keyA, windows[0])), serializeValue(10));
bytesStore.put(serializeKey(new Windowed<>(keyA, windows[1])), serializeValue(50));
bytesStore.put(serializeKey(new Windowed<>(keyB, windows[2])), serializeValue(100));
bytesStore.put(serializeKey(new Windowed<>(keyC, windows[3])), serializeValue(200));
try (final KeyValueIterator<Bytes, byte[]> values = bytesStore.fetch(
Bytes.wrap(keyA.getBytes()), 0, windows[2].start())) {
// All Records expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1)
// for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000
assertEquals(Collections.emptyList(), toListAndCloseIterator(values));
}
try (final KeyValueIterator<Bytes, byte[]> values = bytesStore.fetch(
Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), 0, windows[2].start())) {
// All Records expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1)
// for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000
assertEquals(Collections.emptyList(), toListAndCloseIterator(values));
}
try (final KeyValueIterator<Bytes, byte[]> values = bytesStore.fetch(
null, Bytes.wrap(keyB.getBytes()), 0, windows[2].start())) {
// All Records expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1)
// for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000
assertEquals(Collections.emptyList(), toListAndCloseIterator(values));
}
try (final KeyValueIterator<Bytes, byte[]> values = bytesStore.fetch(
Bytes.wrap(keyB.getBytes()), null, 0, windows[3].start())) {
// Only 1 record not expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1)
// for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000
final List<KeyValue<Windowed<String>, Long>> expected = Collections.singletonList(
KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L)
);
assertEquals(expected, toListAndCloseIterator(values));
}
try (final KeyValueIterator<Bytes, byte[]> values = bytesStore.fetch(
null, null, 0, windows[3].start())) {
// Only 1 record not expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1)
// for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000
final List<KeyValue<Windowed<String>, Long>> expected = Collections.singletonList(
KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L)
);
assertEquals(expected, toListAndCloseIterator(values));
}
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldPutAndBackwardFetch(final SegmentedBytesStore.KeySchema schema) {
before(schema);
final String keyA = "a";
final String keyB = "b";
final String keyC = "c";
bytesStore.put(serializeKey(new Windowed<>(keyA, windows[0])), serializeValue(10));
bytesStore.put(serializeKey(new Windowed<>(keyA, windows[1])), serializeValue(50));
bytesStore.put(serializeKey(new Windowed<>(keyB, windows[2])), serializeValue(100));
bytesStore.put(serializeKey(new Windowed<>(keyC, windows[3])), serializeValue(200));
try (final KeyValueIterator<Bytes, byte[]> values = bytesStore.backwardFetch(
Bytes.wrap(keyA.getBytes()), 0, windows[2].start())) {
// All Records expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1)
// for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000
assertEquals(Collections.emptyList(), toListAndCloseIterator(values));
}
try (final KeyValueIterator<Bytes, byte[]> values = bytesStore.backwardFetch(
Bytes.wrap(keyA.getBytes()), Bytes.wrap(keyB.getBytes()), 0, windows[2].start())) {
// All Records expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1)
// for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000
assertEquals(Collections.emptyList(), toListAndCloseIterator(values));
}
try (final KeyValueIterator<Bytes, byte[]> values = bytesStore.backwardFetch(
null, Bytes.wrap(keyB.getBytes()), 0, windows[2].start())) {
// All Records expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1)
// for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000
assertEquals(Collections.emptyList(), toListAndCloseIterator(values));
}
try (final KeyValueIterator<Bytes, byte[]> values = bytesStore.backwardFetch(
Bytes.wrap(keyB.getBytes()), null, 0, windows[3].start())) {
// Only 1 record not expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1)
// for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000
final List<KeyValue<Windowed<String>, Long>> expected = Collections.singletonList(
KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L)
);
assertEquals(expected, toListAndCloseIterator(values));
}
try (final KeyValueIterator<Bytes, byte[]> values = bytesStore.backwardFetch(
null, null, 0, windows[3].start())) {
// Only 1 record not expired as observed stream time = 60000 implying actual-from = 59001 (60000 - 1000 + 1)
// for WindowKeySchema, to = 60000 while for SessionKeySchema, to = 30000
final List<KeyValue<Windowed<String>, Long>> expected = Collections.singletonList(
KeyValue.pair(new Windowed<>(keyC, windows[3]), 200L)
);
assertEquals(expected, toListAndCloseIterator(values));
}
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldFindValuesWithinRange(final SegmentedBytesStore.KeySchema schema) {
before(schema);
final String key = "a";
bytesStore.put(serializeKey(new Windowed<>(key, windows[0])), serializeValue(10));
bytesStore.put(serializeKey(new Windowed<>(key, windows[1])), serializeValue(50));
bytesStore.put(serializeKey(new Windowed<>(key, windows[2])), serializeValue(100));
try (final KeyValueIterator<Bytes, byte[]> results = bytesStore.fetch(Bytes.wrap(key.getBytes()), 1, 999)) {
final List<KeyValue<Windowed<String>, Long>> expected = new ArrayList<>();
/*
* For WindowKeySchema, the observedStreamTime is 1000 which means 1 extra record gets returned while for
* SessionKeySchema, it's 1500. Which changes the actual-from while fetching. In case of SessionKeySchema, the
* fetch happens from 501-999 while for WindowKeySchema it's from 1-999.
*/
if (schema instanceof SessionKeySchema) {
expected.add(KeyValue.pair(new Windowed<>(key, windows[1]), 50L));
} else {
expected.add(KeyValue.pair(new Windowed<>(key, windows[0]), 10L));
expected.add(KeyValue.pair(new Windowed<>(key, windows[1]), 50L));
}
assertEquals(expected, toListAndCloseIterator(results));
}
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldRemove(final SegmentedBytesStore.KeySchema schema) {
before(schema);
bytesStore.put(serializeKey(new Windowed<>("a", windows[0])), serializeValue(30));
bytesStore.put(serializeKey(new Windowed<>("a", windows[1])), serializeValue(50));
bytesStore.remove(serializeKey(new Windowed<>("a", windows[0])));
try (final KeyValueIterator<Bytes, byte[]> value = bytesStore.fetch(Bytes.wrap("a".getBytes()), 0, 100)) {
assertFalse(value.hasNext());
}
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldRollSegments(final SegmentedBytesStore.KeySchema schema) {
before(schema);
// just to validate directories
final AbstractSegments<S> segments = newSegments();
final String key = "a";
bytesStore.put(serializeKey(new Windowed<>(key, windows[0])), serializeValue(50));
bytesStore.put(serializeKey(new Windowed<>(key, windows[1])), serializeValue(100));
bytesStore.put(serializeKey(new Windowed<>(key, windows[2])), serializeValue(500));
assertEquals(Collections.singleton(segments.segmentName(0)), segmentDirs());
bytesStore.put(serializeKey(new Windowed<>(key, windows[3])), serializeValue(1000));
assertEquals(Set.of(segments.segmentName(0), segments.segmentName(1)), segmentDirs());
final List<KeyValue<Windowed<String>, Long>> results = toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0, 1500));
/*
* All records expired as observed stream time = 60,000 which sets actual-from to 59001(60,000 - 1000 + 1). to = 1500.
*/
assertEquals(
Collections.emptyList(),
results
);
segments.close();
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldGetAllSegments(final SegmentedBytesStore.KeySchema schema) {
before(schema);
// just to validate directories
final AbstractSegments<S> segments = newSegments();
final String key = "a";
bytesStore.put(serializeKey(new Windowed<>(key, windows[0])), serializeValue(50L));
assertEquals(Collections.singleton(segments.segmentName(0)), segmentDirs());
bytesStore.put(serializeKey(new Windowed<>(key, windows[3])), serializeValue(100L));
assertEquals(
Set.of(
segments.segmentName(0),
segments.segmentName(1)
),
segmentDirs()
);
/*
* Only 1 record returned. observed stream time = 60000, actual from = 59001 (60000 - 1000 + 1) and to = Long.MAX.
*/
final List<KeyValue<Windowed<String>, Long>> results = toListAndCloseIterator(bytesStore.all());
assertEquals(
Collections.singletonList(
KeyValue.pair(new Windowed<>(key, windows[3]), 100L)
),
results
);
segments.close();
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldFetchAllSegments(final SegmentedBytesStore.KeySchema schema) {
before(schema);
// just to validate directories
final AbstractSegments<S> segments = newSegments();
final String key = "a";
bytesStore.put(serializeKey(new Windowed<>(key, windows[0])), serializeValue(50L));
assertEquals(Collections.singleton(segments.segmentName(0)), segmentDirs());
bytesStore.put(serializeKey(new Windowed<>(key, windows[3])), serializeValue(100L));
assertEquals(
Set.of(
segments.segmentName(0),
segments.segmentName(1)
),
segmentDirs()
);
/*
* Only 1 record returned. observed stream time = 60000, actual from = 59001 (60000 - 1000 + 1) and to = 60,000.
*/
final List<KeyValue<Windowed<String>, Long>> results = toListAndCloseIterator(bytesStore.fetchAll(0L, 60_000L));
assertEquals(
Collections.singletonList(
KeyValue.pair(new Windowed<>(key, windows[3]), 100L)
),
results
);
segments.close();
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldLoadSegmentsWithOldStyleDateFormattedName(final SegmentedBytesStore.KeySchema schema) {
before(schema);
final AbstractSegments<S> segments = newSegments();
final String key = "a";
bytesStore.put(serializeKey(new Windowed<>(key, windows[0])), serializeValue(50L));
bytesStore.put(serializeKey(new Windowed<>(key, windows[3])), serializeValue(100L));
bytesStore.close();
final String firstSegmentName = segments.segmentName(0);
final String[] nameParts = firstSegmentName.split("\\.");
final long segmentId = Long.parseLong(nameParts[1]);
final SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMddHHmm");
formatter.setTimeZone(new SimpleTimeZone(0, "UTC"));
final String formatted = formatter.format(new Date(segmentId * segmentInterval));
final File parent = new File(stateDir, storeName);
final File oldStyleName = new File(parent, nameParts[0] + "-" + formatted);
assertTrue(new File(parent, firstSegmentName).renameTo(oldStyleName));
bytesStore = getBytesStore();
bytesStore.init(context, bytesStore);
final List<KeyValue<Windowed<String>, Long>> results = toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0L, 60_000L));
assertThat(
results,
equalTo(
Arrays.asList(
KeyValue.pair(new Windowed<>(key, windows[0]), 50L),
KeyValue.pair(new Windowed<>(key, windows[3]), 100L)
)
)
);
segments.close();
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldLoadSegmentsWithOldStyleColonFormattedName(final SegmentedBytesStore.KeySchema schema) {
before(schema);
final AbstractSegments<S> segments = newSegments();
final String key = "a";
bytesStore.put(serializeKey(new Windowed<>(key, windows[0])), serializeValue(50L));
bytesStore.put(serializeKey(new Windowed<>(key, windows[3])), serializeValue(100L));
bytesStore.close();
final String firstSegmentName = segments.segmentName(0);
final String[] nameParts = firstSegmentName.split("\\.");
final File parent = new File(stateDir, storeName);
final File oldStyleName = new File(parent, nameParts[0] + ":" + Long.parseLong(nameParts[1]));
assertTrue(new File(parent, firstSegmentName).renameTo(oldStyleName));
bytesStore = getBytesStore();
bytesStore.init(context, bytesStore);
final List<KeyValue<Windowed<String>, Long>> results = toListAndCloseIterator(bytesStore.fetch(Bytes.wrap(key.getBytes()), 0L, 60_000L));
assertThat(
results,
equalTo(
Arrays.asList(
KeyValue.pair(new Windowed<>(key, windows[0]), 50L),
KeyValue.pair(new Windowed<>(key, windows[3]), 100L)
)
)
);
segments.close();
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldBeAbleToWriteToReInitializedStore(final SegmentedBytesStore.KeySchema schema) {
before(schema);
final String key = "a";
// need to create a segment so we can attempt to write to it again.
bytesStore.put(serializeKey(new Windowed<>(key, windows[0])), serializeValue(50));
bytesStore.close();
bytesStore.init(context, bytesStore);
bytesStore.put(serializeKey(new Windowed<>(key, windows[1])), serializeValue(100));
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldCreateWriteBatches(final SegmentedBytesStore.KeySchema schema) {
before(schema);
final String key = "a";
final Collection<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
records.add(new ConsumerRecord<>("", 0, 0L, serializeKey(new Windowed<>(key, windows[0])).get(), serializeValue(50L)));
records.add(new ConsumerRecord<>("", 0, 0L, serializeKey(new Windowed<>(key, windows[3])).get(), serializeValue(100L)));
final Map<S, WriteBatch> writeBatchMap = bytesStore.getWriteBatches(records);
assertEquals(2, writeBatchMap.size());
for (final WriteBatch batch : writeBatchMap.values()) {
assertEquals(1, batch.count());
batch.close();
}
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldRestoreToByteStoreForActiveTask(final SegmentedBytesStore.KeySchema schema) {
before(schema);
shouldRestoreToByteStore();
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldRestoreToByteStoreForStandbyTask(final SegmentedBytesStore.KeySchema schema) {
before(schema);
context.transitionToStandby(null);
shouldRestoreToByteStore();
}
private void shouldRestoreToByteStore() {
bytesStore.init(context, bytesStore);
// 0 segments initially.
assertEquals(0, bytesStore.getSegments().size());
final String key = "a";
final Collection<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
records.add(new ConsumerRecord<>("", 0, 0L, serializeKey(new Windowed<>(key, windows[0])).get(), serializeValue(50L)));
records.add(new ConsumerRecord<>("", 0, 0L, serializeKey(new Windowed<>(key, windows[3])).get(), serializeValue(100L)));
bytesStore.restoreAllInternal(records);
// 2 segments are created during restoration.
assertEquals(2, bytesStore.getSegments().size());
/*
* Only 1 record returned. observed stream time = 60000, actual from = 59001 (60000 - 1000 + 1) and to = Long.MAX.
*/
final List<KeyValue<Windowed<String>, Long>> expected = new ArrayList<>();
expected.add(new KeyValue<>(new Windowed<>(key, windows[3]), 100L));
final List<KeyValue<Windowed<String>, Long>> results = toListAndCloseIterator(bytesStore.all());
assertEquals(expected, results);
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldMatchPositionAfterPut(final SegmentedBytesStore.KeySchema schema) {
before(schema);
bytesStore.init(context, bytesStore);
final String keyA = "a";
final String keyB = "b";
final String keyC = "c";
context.setRecordContext(new ProcessorRecordContext(0, 1, 0, "", new RecordHeaders()));
bytesStore.put(serializeKey(new Windowed<>(keyA, windows[0])), serializeValue(10));
context.setRecordContext(new ProcessorRecordContext(0, 2, 0, "", new RecordHeaders()));
bytesStore.put(serializeKey(new Windowed<>(keyA, windows[1])), serializeValue(50));
context.setRecordContext(new ProcessorRecordContext(0, 3, 0, "", new RecordHeaders()));
bytesStore.put(serializeKey(new Windowed<>(keyB, windows[2])), serializeValue(100));
context.setRecordContext(new ProcessorRecordContext(0, 4, 0, "", new RecordHeaders()));
bytesStore.put(serializeKey(new Windowed<>(keyC, windows[3])), serializeValue(200));
final Position expected = Position.fromMap(mkMap(mkEntry("", mkMap(mkEntry(0, 4L)))));
final Position actual = bytesStore.getPosition();
assertEquals(expected, actual);
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldRestoreRecordsAndConsistencyVectorSingleTopic(final SegmentedBytesStore.KeySchema schema) {
before(schema);
final Properties props = StreamsTestUtils.getStreamsConfig();
props.put(InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, true);
final File dir = TestUtils.tempDirectory();
context = new InternalMockProcessorContext<>(
dir,
Serdes.String(),
Serdes.String(),
new StreamsMetricsImpl(new Metrics(), "mock", new MockTime()),
new StreamsConfig(props),
MockRecordCollector::new,
new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())),
Time.SYSTEM
);
bytesStore = getBytesStore();
bytesStore.init(context, bytesStore);
// 0 segments initially.
assertEquals(0, bytesStore.getSegments().size());
bytesStore.restoreAllInternal(getChangelogRecords());
// 2 segments are created during restoration.
assertEquals(2, bytesStore.getSegments().size());
final String key = "a";
/*
* Only 1 record returned. observed stream time = 60000, actual from = 59001 (60000 - 1000 + 1) and to = Long.MAX.
*/
final List<KeyValue<Windowed<String>, Long>> expected = new ArrayList<>();
expected.add(new KeyValue<>(new Windowed<>(key, windows[3]), 200L));
final List<KeyValue<Windowed<String>, Long>> results = toListAndCloseIterator(bytesStore.all());
assertEquals(expected, results);
assertThat(bytesStore.getPosition(), Matchers.notNullValue());
assertThat(bytesStore.getPosition().getPartitionPositions(""), Matchers.notNullValue());
assertThat(bytesStore.getPosition().getPartitionPositions(""), hasEntry(0, 3L));
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldRestoreRecordsAndConsistencyVectorMultipleTopics(final SegmentedBytesStore.KeySchema schema) {
before(schema);
final Properties props = StreamsTestUtils.getStreamsConfig();
props.put(InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, true);
final File dir = TestUtils.tempDirectory();
context = new InternalMockProcessorContext<>(
dir,
Serdes.String(),
Serdes.String(),
new StreamsMetricsImpl(new Metrics(), "mock", new MockTime()),
new StreamsConfig(props),
MockRecordCollector::new,
new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())),
Time.SYSTEM
);
bytesStore = getBytesStore();
bytesStore.init(context, bytesStore);
// 0 segments initially.
assertEquals(0, bytesStore.getSegments().size());
bytesStore.restoreAllInternal(getChangelogRecordsMultipleTopics());
// 2 segments are created during restoration.
assertEquals(2, bytesStore.getSegments().size());
final String key = "a";
/*
* Only 1 record returned. observed stream time = 60000, actual from = 59001 (60000 - 1000 + 1) and to = Long.MAX.
*/
final List<KeyValue<Windowed<String>, Long>> expected = new ArrayList<>();
expected.add(new KeyValue<>(new Windowed<>(key, windows[3]), 200L));
final List<KeyValue<Windowed<String>, Long>> results = toListAndCloseIterator(bytesStore.all());
assertEquals(expected, results);
assertThat(bytesStore.getPosition(), Matchers.notNullValue());
assertThat(bytesStore.getPosition().getPartitionPositions("A"), Matchers.notNullValue());
assertThat(bytesStore.getPosition().getPartitionPositions("A"), hasEntry(0, 3L));
assertThat(bytesStore.getPosition().getPartitionPositions("B"), Matchers.notNullValue());
assertThat(bytesStore.getPosition().getPartitionPositions("B"), hasEntry(0, 2L));
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldHandleTombstoneRecords(final SegmentedBytesStore.KeySchema schema) {
before(schema);
final Properties props = StreamsTestUtils.getStreamsConfig();
props.put(InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, true);
final File dir = TestUtils.tempDirectory();
context = new InternalMockProcessorContext<>(
dir,
Serdes.String(),
Serdes.String(),
new StreamsMetricsImpl(new Metrics(), "mock", new MockTime()),
new StreamsConfig(props),
MockRecordCollector::new,
new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())),
Time.SYSTEM
);
bytesStore = getBytesStore();
bytesStore.init(context, bytesStore);
// 0 segments initially.
assertEquals(0, bytesStore.getSegments().size());
bytesStore.restoreAllInternal(getChangelogRecordsWithTombstones());
// 1 segments are created during restoration.
assertEquals(1, bytesStore.getSegments().size());
final String key = "a";
/*
* For WindowKeySchema, the observedStreamTime is 1000 which means 1 extra record gets returned while for
* SessionKeySchema, it's 1500. Which changes the actual-from while fetching. In case of SessionKeySchema, the
* fetch happens from 501 to end while for WindowKeySchema it's from 1 to end.
*/
final List<KeyValue<Windowed<String>, Long>> results = toListAndCloseIterator(bytesStore.all());
if (schema instanceof SessionKeySchema) {
assertEquals(Collections.emptyList(), results);
} else {
final List<KeyValue<Windowed<String>, Long>> expected = new ArrayList<>();
expected.add(new KeyValue<>(new Windowed<>(key, windows[0]), 50L));
assertEquals(expected, results);
}
assertThat(bytesStore.getPosition(), Matchers.notNullValue());
assertThat(bytesStore.getPosition().getPartitionPositions("A"), hasEntry(0, 2L));
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldNotThrowWhenRestoringOnMissingHeaders(final SegmentedBytesStore.KeySchema schema) {
before(schema);
final Properties props = StreamsTestUtils.getStreamsConfig();
props.put(InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, true);
final File dir = TestUtils.tempDirectory();
context = new InternalMockProcessorContext<>(
dir,
Serdes.String(),
Serdes.String(),
new StreamsMetricsImpl(new Metrics(), "mock", new MockTime()),
new StreamsConfig(props),
MockRecordCollector::new,
new ThreadCache(new LogContext("testCache "), 0, new MockStreamsMetrics(new Metrics())),
Time.SYSTEM
);
bytesStore = getBytesStore();
bytesStore.init(context, bytesStore);
bytesStore.restoreAllInternal(getChangelogRecordsWithoutHeaders());
assertThat(bytesStore.getPosition(), is(Position.emptyPosition()));
}
private List<ConsumerRecord<byte[], byte[]>> getChangelogRecords() {
final List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
final Headers headers = new RecordHeaders();
Position position1 = Position.emptyPosition();
position1 = position1.withComponent("", 0, 1);
headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
headers.add(new RecordHeader(
ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY,
PositionSerde.serialize(position1).array())
);
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1,
serializeKey(new Windowed<>("a", windows[0])).get(), serializeValue(50L), headers, Optional.empty()));
headers.remove(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY);
position1 = position1.withComponent("", 0, 2);
headers.add(new RecordHeader(
ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY,
PositionSerde.serialize(position1).array())
);
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1,
serializeKey(new Windowed<>("a", windows[2])).get(), serializeValue(100L), headers, Optional.empty()));
headers.remove(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY);
position1 = position1.withComponent("", 0, 3);
headers.add(new RecordHeader(
ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY,
PositionSerde.serialize(position1).array())
);
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1,
serializeKey(new Windowed<>("a", windows[3])).get(), serializeValue(200L), headers, Optional.empty()));
return records;
}
private List<ConsumerRecord<byte[], byte[]>> getChangelogRecordsMultipleTopics() {
final List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
final Headers headers = new RecordHeaders();
Position position1 = Position.emptyPosition();
position1 = position1.withComponent("A", 0, 1);
headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
headers.add(new RecordHeader(
ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY,
PositionSerde.serialize(position1).array())
);
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1,
serializeKey(new Windowed<>("a", windows[0])).get(), serializeValue(50L), headers, Optional.empty()));
headers.remove(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY);
position1 = position1.withComponent("B", 0, 2);
headers.add(new RecordHeader(
ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY,
PositionSerde.serialize(position1).array())
);
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1,
serializeKey(new Windowed<>("a", windows[2])).get(), serializeValue(100L), headers, Optional.empty()));
headers.remove(ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY);
position1 = position1.withComponent("A", 0, 3);
headers.add(new RecordHeader(
ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY,
PositionSerde.serialize(position1).array())
);
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1,
serializeKey(new Windowed<>("a", windows[3])).get(), serializeValue(200L), headers, Optional.empty()));
return records;
}
private List<ConsumerRecord<byte[], byte[]>> getChangelogRecordsWithTombstones() {
final List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
final Headers headers = new RecordHeaders();
Position position = Position.emptyPosition();
position = position.withComponent("A", 0, 1);
headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
headers.add(new RecordHeader(
ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY,
PositionSerde.serialize(position).array()));
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1,
serializeKey(new Windowed<>("a", windows[0])).get(), serializeValue(50L), headers, Optional.empty()));
position = position.withComponent("A", 0, 2);
headers.add(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_RECORD_CONSISTENCY);
headers.add(new RecordHeader(
ChangelogRecordDeserializationHelper.CHANGELOG_POSITION_HEADER_KEY,
PositionSerde.serialize(position).array()));
records.add(new ConsumerRecord<>("", 0, 0L, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, -1, -1,
serializeKey(new Windowed<>("a", windows[2])).get(), null, headers, Optional.empty()));
return records;
}
private List<ConsumerRecord<byte[], byte[]>> getChangelogRecordsWithoutHeaders() {
final List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
records.add(new ConsumerRecord<>("", 0, 0L, serializeKey(new Windowed<>("a", windows[2])).get(), serializeValue(50L)));
return records;
}
@ParameterizedTest
@MethodSource("getKeySchemas")
public void shouldMeasureExpiredRecords(final SegmentedBytesStore.KeySchema schema) {
before(schema);
final Properties streamsConfig = StreamsTestUtils.getStreamsConfig();
final AbstractRocksDBSegmentedBytesStore<S> bytesStore = getBytesStore();
final InternalMockProcessorContext<?, ?> context = new InternalMockProcessorContext<>(
TestUtils.tempDirectory(),
new StreamsConfig(streamsConfig)
);
final Time time = Time.SYSTEM;
context.setSystemTimeMs(time.milliseconds());
bytesStore.init(context, bytesStore);
// write a record to advance stream time, with a high enough timestamp
// that the subsequent record in windows[0] will already be expired.
bytesStore.put(serializeKey(new Windowed<>("dummy", nextSegmentWindow)), serializeValue(0));
final Bytes key = serializeKey(new Windowed<>("a", windows[0]));
final byte[] value = serializeValue(5);
bytesStore.put(key, value);
final Map<MetricName, ? extends Metric> metrics = context.metrics().metrics();
final String threadId = Thread.currentThread().getName();
final Metric dropTotal;
final Metric dropRate;
dropTotal = metrics.get(new MetricName(
"dropped-records-total",
"stream-task-metrics",
"",
mkMap(
mkEntry("thread-id", threadId),
mkEntry("task-id", "0_0")
)
));
dropRate = metrics.get(new MetricName(
"dropped-records-rate",
"stream-task-metrics",
"",
mkMap(
mkEntry("thread-id", threadId),
mkEntry("task-id", "0_0")
)
));
assertEquals(1.0, dropTotal.metricValue());
assertNotEquals(0.0, dropRate.metricValue());
bytesStore.close();
}
private Set<String> segmentDirs() {
final File windowDir = new File(stateDir, storeName);
return Set.of(Objects.requireNonNull(windowDir.list()));
}
private Bytes serializeKey(final Windowed<String> key) {
final StateSerdes<String, Long> stateSerdes = StateSerdes.withBuiltinTypes("dummy", String.class, Long.class);
if (schema instanceof SessionKeySchema) {
return Bytes.wrap(SessionKeySchema.toBinary(key, stateSerdes.keySerializer(), "dummy"));
} else if (schema instanceof WindowKeySchema) {
return WindowKeySchema.toStoreKeyBinary(key, 0, stateSerdes);
} else {
throw new IllegalStateException("Unrecognized serde schema");
}
}
@SuppressWarnings("resource")
private byte[] serializeValue(final long value) {
return new LongSerializer().serialize("", value);
}
@SuppressWarnings("resource")
private List<KeyValue<Windowed<String>, Long>> toListAndCloseIterator(final KeyValueIterator<Bytes, byte[]> iterator) {
try (iterator) {
final List<KeyValue<Windowed<String>, Long>> results = new ArrayList<>();
final StateSerdes<String, Long> stateSerdes = StateSerdes.withBuiltinTypes("dummy", String.class, Long.class);
while (iterator.hasNext()) {
final KeyValue<Bytes, byte[]> next = iterator.next();
if (schema instanceof WindowKeySchema) {
final KeyValue<Windowed<String>, Long> deserialized = KeyValue.pair(
WindowKeySchema.fromStoreKey(
next.key.get(),
windowSizeForTimeWindow,
stateSerdes.keyDeserializer(),
stateSerdes.topic()
),
stateSerdes.valueDeserializer().deserialize("dummy", next.value)
);
results.add(deserialized);
} else if (schema instanceof SessionKeySchema) {
final KeyValue<Windowed<String>, Long> deserialized = KeyValue.pair(
SessionKeySchema.from(next.key.get(), stateSerdes.keyDeserializer(), "dummy"),
stateSerdes.valueDeserializer().deserialize("dummy", next.value)
);
results.add(deserialized);
} else {
throw new IllegalStateException("Unrecognized serde schema");
}
}
return results;
}
}
}
| AbstractRocksDBSegmentedBytesStoreTest |
java | apache__flink | flink-table/flink-sql-gateway/src/main/java/org/apache/flink/table/gateway/rest/message/session/GetSessionConfigResponseBody.java | {
"start": 1322,
"end": 1802
} | class ____ implements ResponseBody {
private static final String FIELD_PROPERTIES = "properties";
@JsonProperty(FIELD_PROPERTIES)
private final Map<String, String> properties;
@JsonCreator
public GetSessionConfigResponseBody(
@JsonProperty(FIELD_PROPERTIES) Map<String, String> properties) {
this.properties = properties;
}
public Map<String, String> getProperties() {
return properties;
}
}
| GetSessionConfigResponseBody |
java | bumptech__glide | library/test/src/test/java/com/bumptech/glide/load/resource/bitmap/BitmapDrawableTransformationTest.java | {
"start": 1616,
"end": 5639
} | class ____ {
@Rule public final KeyTester keyTester = new KeyTester();
@Mock private BitmapPool bitmapPool;
@Mock private Transformation<Bitmap> wrapped;
@Mock private Resource<BitmapDrawable> drawableResourceToTransform;
private BitmapDrawableTransformation transformation;
private Bitmap bitmapToTransform;
private Application context;
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
bitmapToTransform = Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888);
BitmapDrawable drawableToTransform = new BitmapDrawable(bitmapToTransform);
context = ApplicationProvider.getApplicationContext();
Glide.init(context, new GlideBuilder().setBitmapPool(bitmapPool));
when(drawableResourceToTransform.get()).thenReturn(drawableToTransform);
transformation = new BitmapDrawableTransformation(wrapped);
}
@After
public void tearDown() {
Glide.tearDown();
}
@Test
public void testReturnsOriginalResourceIfTransformationDoesNotTransform() {
int outWidth = 123;
int outHeight = 456;
when(wrapped.transform(anyContext(), Util.<Bitmap>anyResource(), eq(outWidth), eq(outHeight)))
.thenAnswer(
new Answer<Resource<Bitmap>>() {
@SuppressWarnings("unchecked")
@Override
public Resource<Bitmap> answer(InvocationOnMock invocation) throws Throwable {
return (Resource<Bitmap>) invocation.getArguments()[1];
}
});
Resource<BitmapDrawable> transformed =
transformation.transform(context, drawableResourceToTransform, outWidth, outHeight);
assertThat(transformed).isEqualTo(drawableResourceToTransform);
}
@Test
public void testReturnsNewResourceIfTransformationDoesTransform() {
int outWidth = 999;
int outHeight = 555;
Bitmap transformedBitmap = Bitmap.createBitmap(outWidth, outHeight, Bitmap.Config.RGB_565);
Resource<Bitmap> transformedBitmapResource = Util.mockResource();
when(transformedBitmapResource.get()).thenReturn(transformedBitmap);
when(wrapped.transform(anyContext(), Util.<Bitmap>anyResource(), eq(outWidth), eq(outHeight)))
.thenReturn(transformedBitmapResource);
Resource<BitmapDrawable> transformed =
transformation.transform(context, drawableResourceToTransform, outWidth, outHeight);
assertThat(transformed.get().getBitmap()).isEqualTo(transformedBitmap);
}
@Test
public void testProvidesBitmapFromGivenResourceToWrappedTransformation() {
int outWidth = 332;
int outHeight = 111;
Resource<Bitmap> transformed = Util.mockResource();
when(transformed.get())
.thenReturn(Bitmap.createBitmap(outWidth, outHeight, Bitmap.Config.ARGB_8888));
when(wrapped.transform(anyContext(), Util.<Bitmap>anyResource(), anyInt(), anyInt()))
.thenReturn(transformed);
transformation.transform(context, drawableResourceToTransform, outWidth, outHeight);
ArgumentCaptor<Resource<Bitmap>> captor = Util.cast(ArgumentCaptor.forClass(Resource.class));
verify(wrapped).transform(anyContext(), captor.capture(), eq(outWidth), eq(outHeight));
assertThat(captor.getValue().get()).isEqualTo(bitmapToTransform);
}
@Test
public void testEquals() throws NoSuchAlgorithmException {
doAnswer(new Util.WriteDigest("wrapped"))
.when(wrapped)
.updateDiskCacheKey(any(MessageDigest.class));
@SuppressWarnings("unchecked")
Transformation<Bitmap> other = mock(Transformation.class);
doAnswer(new Util.WriteDigest("other"))
.when(other)
.updateDiskCacheKey(any(MessageDigest.class));
keyTester
.addEquivalenceGroup(transformation, new BitmapDrawableTransformation(wrapped))
.addEquivalenceGroup(new BitmapDrawableTransformation(other))
.addEquivalenceGroup(wrapped)
.addRegressionTest(
transformation, "adbf45b08ad6468aa147e5b2a23758ef56ab631a2b70ad52501ca358441a34f3")
.test();
}
}
| BitmapDrawableTransformationTest |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/file/impl/AsyncFileLockImpl.java | {
"start": 711,
"end": 2008
} | class ____ implements AsyncFileLock {
private final VertxInternal vertx;
private final FileLock fileLock;
public AsyncFileLockImpl(VertxInternal vertx, FileLock fileLock) {
this.vertx = Objects.requireNonNull(vertx, "vertx is null");
this.fileLock = Objects.requireNonNull(fileLock, "fileLock is null");
}
@Override
public long position() {
return fileLock.position();
}
@Override
public long size() {
return fileLock.size();
}
@Override
public boolean isShared() {
return fileLock.isShared();
}
@Override
public boolean overlaps(long position, long size) {
return fileLock.overlaps(position, size);
}
@Override
public boolean isValidBlocking() {
return fileLock.isValid();
}
@Override
public Future<Boolean> isValid() {
return vertx.executeBlockingInternal(this::isValidBlocking);
}
@Override
public void releaseBlocking() {
try {
fileLock.release();
} catch (IOException e) {
throw new FileSystemException(e);
}
}
@Override
public Future<Void> release() {
return vertx.executeBlockingInternal(() -> {
try {
fileLock.release();
return null;
} catch (IOException e) {
throw new FileSystemException(e);
}
});
}
}
| AsyncFileLockImpl |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/comparison/fields/RecursiveComparisonAssert_isEqualTo_ignoringCollectionOrder_Test.java | {
"start": 24629,
"end": 25809
} | class ____ {
public int val;
public Inner(int val) {
this.val = val;
}
public String toString() {
return "I" + val;
}
}
record Item(String name, int quantity) {
}
@Test
void should_honor_representation_in_unmatched_elements_when_comparing_iterables_ignoring_order() {
// GIVEN
List<Item> expectedItems = List.of(new Item("Shoes", 2), new Item("Pants", 3));
List<Item> actualItems = List.of(new Item("Pants", 3), new Item("Loafers", 1));
registerFormatterForType(Item.class, item -> "Item(%s, %d)".formatted(item.name(), item.quantity()));
// WHEN
var assertionError = expectAssertionError(() -> assertThat(actualItems).usingRecursiveComparison(recursiveComparisonConfiguration)
.ignoringCollectionOrder()
.isEqualTo(expectedItems));
// THEN
then(assertionError).hasMessageContaining(format("The following expected elements were not matched in the actual List12:%n" +
" [Item(Shoes, 2)]"));
}
}
| Inner |
java | google__guice | extensions/persist/src/com/google/inject/persist/jpa/JpaLocalTxnInterceptor.java | {
"start": 1310,
"end": 5090
} | class ____ {}
// Tracks if the unit of work was begun implicitly by this transaction.
private final ThreadLocal<Boolean> didWeStartWork = new ThreadLocal<>();
@Override
public Object invoke(MethodInvocation methodInvocation) throws Throwable {
// Should we start a unit of work?
if (!emProvider.isWorking()) {
emProvider.begin();
didWeStartWork.set(true);
}
Transactional transactional = readTransactionMetadata(methodInvocation);
EntityManager em = this.emProvider.get();
// Allow 'joining' of transactions if there is an enclosing @Transactional method.
if (em.getTransaction().isActive()) {
return methodInvocation.proceed();
}
final EntityTransaction txn = em.getTransaction();
txn.begin();
Object result;
try {
result = methodInvocation.proceed();
} catch (Exception e) {
// commit transaction only if rollback didnt occur
if (rollbackIfNecessary(transactional, e, txn)) {
txn.commit();
}
// propagate whatever exception is thrown anyway
throw e;
} finally {
// Close the em if necessary (guarded so this code doesn't run unless catch fired).
if (null != didWeStartWork.get() && !txn.isActive()) {
didWeStartWork.remove();
unitOfWork.end();
}
}
// everything was normal so commit the txn (do not move into try block above as it
// interferes with the advised method's throwing semantics)
try {
if (txn.isActive()) {
if (txn.getRollbackOnly()) {
txn.rollback();
} else {
txn.commit();
}
}
} finally {
// close the em if necessary
if (null != didWeStartWork.get()) {
didWeStartWork.remove();
unitOfWork.end();
}
}
// or return result
return result;
}
// TODO(user): Cache this method's results.
private Transactional readTransactionMetadata(MethodInvocation methodInvocation) {
Transactional transactional;
Method method = methodInvocation.getMethod();
Class<?> targetClass = methodInvocation.getThis().getClass();
transactional = method.getAnnotation(Transactional.class);
if (null == transactional) {
// If none on method, try the class.
transactional = targetClass.getAnnotation(Transactional.class);
}
if (null == transactional) {
// If there is no transactional annotation present, use the default
transactional = Internal.class.getAnnotation(Transactional.class);
}
return transactional;
}
/**
* Returns True if rollback DID NOT HAPPEN (i.e. if commit should continue).
*
* @param transactional The metadata annotation of the method
* @param e The exception to test for rollback
* @param txn A JPA Transaction to issue rollbacks on
*/
private boolean rollbackIfNecessary(
Transactional transactional, Exception e, EntityTransaction txn) {
boolean commit = true;
// check rollback clauses
for (Class<? extends Exception> rollBackOn : transactional.rollbackOn()) {
// if one matched, try to perform a rollback
if (rollBackOn.isInstance(e)) {
commit = false;
// check ignore clauses (supercedes rollback clause)
for (Class<? extends Exception> exceptOn : transactional.ignore()) {
// An exception to the rollback clause was found, DON'T rollback
// (i.e. commit and throw anyway)
if (exceptOn.isInstance(e)) {
commit = true;
break;
}
}
// rollback only if nothing matched the ignore check
if (!commit) {
txn.rollback();
}
// otherwise continue to commit
break;
}
}
return commit;
}
}
| Internal |
java | spring-projects__spring-framework | spring-expression/src/test/java/org/springframework/expression/spel/OptionalNullSafetyTests.java | {
"start": 11400,
"end": 11978
} | class ____ {
@Test
void elvisOperatorOnEmptyOptional() {
Expression expr = parser.parseExpression("#service.findJediByName('') ?: 'unknown'");
assertThat(expr.getValue(context)).isEqualTo("unknown");
}
@Test
void elvisOperatorOnNonEmptyOptional() {
Expression expr = parser.parseExpression("#service.findJediByName('Yoda') ?: 'unknown'");
assertThat(expr.getValue(context)).isEqualTo(new Jedi("Yoda"));
}
}
record Jedi(String name) {
public String salutation(String salutation) {
return salutation + " " + this.name;
}
}
static | ElvisTests |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/subjects/SingleSubjectTest.java | {
"start": 1057,
"end": 6694
} | class ____ extends RxJavaTest {
@Test
public void success() {
SingleSubject<Integer> ss = SingleSubject.create();
assertFalse(ss.hasValue());
assertNull(ss.getValue());
assertFalse(ss.hasThrowable());
assertNull(ss.getThrowable());
assertFalse(ss.hasObservers());
assertEquals(0, ss.observerCount());
TestObserver<Integer> to = ss.test();
to.assertEmpty();
assertTrue(ss.hasObservers());
assertEquals(1, ss.observerCount());
ss.onSuccess(1);
assertTrue(ss.hasValue());
assertEquals(1, ss.getValue().intValue());
assertFalse(ss.hasThrowable());
assertNull(ss.getThrowable());
assertFalse(ss.hasObservers());
assertEquals(0, ss.observerCount());
to.assertResult(1);
ss.test().assertResult(1);
assertTrue(ss.hasValue());
assertEquals(1, ss.getValue().intValue());
assertFalse(ss.hasThrowable());
assertNull(ss.getThrowable());
assertFalse(ss.hasObservers());
assertEquals(0, ss.observerCount());
}
@Test
public void once() {
SingleSubject<Integer> ss = SingleSubject.create();
TestObserver<Integer> to = ss.test();
ss.onSuccess(1);
ss.onSuccess(2);
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
ss.onError(new IOException());
TestHelper.assertUndeliverable(errors, 0, IOException.class);
} finally {
RxJavaPlugins.reset();
}
to.assertResult(1);
}
@Test
public void error() {
SingleSubject<Integer> ss = SingleSubject.create();
assertFalse(ss.hasValue());
assertNull(ss.getValue());
assertFalse(ss.hasThrowable());
assertNull(ss.getThrowable());
assertFalse(ss.hasObservers());
assertEquals(0, ss.observerCount());
TestObserver<Integer> to = ss.test();
to.assertEmpty();
assertTrue(ss.hasObservers());
assertEquals(1, ss.observerCount());
ss.onError(new IOException());
assertFalse(ss.hasValue());
assertNull(ss.getValue());
assertTrue(ss.hasThrowable());
assertTrue(ss.getThrowable().toString(), ss.getThrowable() instanceof IOException);
assertFalse(ss.hasObservers());
assertEquals(0, ss.observerCount());
to.assertFailure(IOException.class);
ss.test().assertFailure(IOException.class);
assertFalse(ss.hasValue());
assertNull(ss.getValue());
assertTrue(ss.hasThrowable());
assertTrue(ss.getThrowable().toString(), ss.getThrowable() instanceof IOException);
assertFalse(ss.hasObservers());
assertEquals(0, ss.observerCount());
}
@Test
public void nullValue() {
SingleSubject<Integer> ss = SingleSubject.create();
try {
ss.onSuccess(null);
fail("No NullPointerException thrown");
} catch (NullPointerException ex) {
assertEquals(ExceptionHelper.nullWarning("onSuccess called with a null value."), ex.getMessage());
}
ss.test().assertEmpty().dispose();
}
@Test
public void nullThrowable() {
SingleSubject<Integer> ss = SingleSubject.create();
try {
ss.onError(null);
fail("No NullPointerException thrown");
} catch (NullPointerException ex) {
assertEquals(ExceptionHelper.nullWarning("onError called with a null Throwable."), ex.getMessage());
}
ss.test().assertEmpty().dispose();
}
@Test
public void cancelOnArrival() {
SingleSubject.create()
.test(true)
.assertEmpty();
}
@Test
public void cancelOnArrival2() {
SingleSubject<Integer> ss = SingleSubject.create();
ss.test();
ss
.test(true)
.assertEmpty();
}
@Test
public void dispose() {
TestHelper.checkDisposed(SingleSubject.create());
}
@Test
public void disposeTwice() {
SingleSubject.create()
.subscribe(new SingleObserver<Object>() {
@Override
public void onSubscribe(Disposable d) {
assertFalse(d.isDisposed());
d.dispose();
d.dispose();
assertTrue(d.isDisposed());
}
@Override
public void onSuccess(Object value) {
}
@Override
public void onError(Throwable e) {
}
});
}
@Test
public void onSubscribeDispose() {
SingleSubject<Integer> ss = SingleSubject.create();
Disposable d = Disposable.empty();
ss.onSubscribe(d);
assertFalse(d.isDisposed());
ss.onSuccess(1);
d = Disposable.empty();
ss.onSubscribe(d);
assertTrue(d.isDisposed());
}
@Test
public void addRemoveRace() {
for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) {
final SingleSubject<Integer> ss = SingleSubject.create();
final TestObserver<Integer> to = ss.test();
Runnable r1 = new Runnable() {
@Override
public void run() {
ss.test();
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
to.dispose();
}
};
TestHelper.race(r1, r2);
}
}
}
| SingleSubjectTest |
java | quarkusio__quarkus | extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/proxies/PreGeneratedProxies.java | {
"start": 480,
"end": 812
} | class ____ {
private Map<String, ProxyClassDetailsHolder> proxies = new HashMap<>();
public Map<String, ProxyClassDetailsHolder> getProxies() {
return proxies;
}
public void setProxies(Map<String, ProxyClassDetailsHolder> proxies) {
this.proxies = proxies;
}
public static | PreGeneratedProxies |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/validators/ParamValidationNaming.java | {
"start": 1386,
"end": 16042
} | class ____ {
@Test
public void checkCompletable() throws Exception {
processFile(Completable.class);
}
@Test
public void checkSingle() throws Exception {
processFile(Single.class);
}
@Test
public void checkMaybe() throws Exception {
processFile(Maybe.class);
}
@Test
public void checkObservable() throws Exception {
processFile(Observable.class);
}
@Test
public void checkFlowable() throws Exception {
processFile(Flowable.class);
}
@Test
public void checkParallelFlowable() throws Exception {
processFile(ParallelFlowable.class);
}
@Test
public void checkConnectableObservable() throws Exception {
processFile(ConnectableObservable.class);
}
@Test
public void checkConnectableFlowable() throws Exception {
processFile(ConnectableFlowable.class);
}
@Test
public void checkSubject() throws Exception {
processFile(Subject.class);
}
@Test
public void checkFlowableProcessor() throws Exception {
processFile(FlowableProcessor.class);
}
@Test
public void checkDisposable() throws Exception {
processFile(Disposable.class);
}
@Test
public void checkScheduler() throws Exception {
processFile(Scheduler.class);
}
@Test
public void checkSchedulers() throws Exception {
processFile(Schedulers.class);
}
@Test
public void checkAsyncSubject() throws Exception {
processFile(AsyncSubject.class);
}
@Test
public void checkBehaviorSubject() throws Exception {
processFile(BehaviorSubject.class);
}
@Test
public void checkPublishSubject() throws Exception {
processFile(PublishSubject.class);
}
@Test
public void checkReplaySubject() throws Exception {
processFile(ReplaySubject.class);
}
@Test
public void checkUnicastSubject() throws Exception {
processFile(UnicastSubject.class);
}
@Test
public void checkSingleSubject() throws Exception {
processFile(SingleSubject.class);
}
@Test
public void checkMaybeSubject() throws Exception {
processFile(MaybeSubject.class);
}
@Test
public void checkCompletableSubject() throws Exception {
processFile(CompletableSubject.class);
}
@Test
public void checkAsyncProcessor() throws Exception {
processFile(AsyncProcessor.class);
}
@Test
public void checkBehaviorProcessor() throws Exception {
processFile(BehaviorProcessor.class);
}
@Test
public void checkPublishProcessor() throws Exception {
processFile(PublishProcessor.class);
}
@Test
public void checkReplayProcessor() throws Exception {
processFile(ReplayProcessor.class);
}
@Test
public void checkUnicastProcessor() throws Exception {
processFile(UnicastProcessor.class);
}
@Test
public void checkMulticastProcessor() throws Exception {
processFile(MulticastProcessor.class);
}
@Test
public void checkCompositeDisposable() throws Exception {
processFile(CompositeDisposable.class);
}
static void processFile(Class<?> clazz) throws Exception {
String baseClassName = clazz.getSimpleName();
File f = TestHelper.findSource(baseClassName, clazz.getPackage().getName());
if (f == null) {
return;
}
String fullClassName = clazz.getName();
int errorCount = 0;
StringBuilder errors = new StringBuilder();
List<String> lines = Files.readAllLines(f.toPath());
for (int j = 0; j < lines.size(); j++) {
String line = lines.get(j).trim();
for (ValidatorStrings validatorStr : VALIDATOR_STRINGS) {
int strIdx = line.indexOf(validatorStr.code);
if (strIdx >= 0) {
int comma = line.indexOf(',', strIdx + validatorStr.code.length());
String paramName = line.substring(strIdx + validatorStr.code.length(), comma);
int quote = line.indexOf('"', comma);
String message = line.substring(quote + 1, Math.min(line.length(), quote + 2 + paramName.length()));
if (line.contains("\"A Disposable")) {
continue;
}
if (!line.contains("\"The RxJavaPlugins")
&& !(message.startsWith(paramName)
&& (message.endsWith(" ") || message.endsWith("\"")))) {
errorCount++;
errors.append("L")
.append(j)
.append(" : Wrong validator message parameter name\r\n ")
.append(line)
.append("\r\n")
.append(" ").append(paramName).append(" != ").append(message)
.append("\r\n at ")
.append(fullClassName)
.append(".method(")
.append(f.getName())
.append(":")
.append(j + 1)
.append(")\r\n")
;
}
int midx = j - 1;
// find the method declaration
for (; midx >= 0; midx--) {
String linek = lines.get(midx).trim();
if (linek.startsWith("public") || linek.startsWith("private")
|| linek.startsWith("protected")
|| linek.startsWith("static")
|| linek.startsWith(baseClassName)) {
break;
}
}
if (line.contains("\"The RxJavaPlugins")) {
continue;
}
// find JavaDoc of throws
boolean found = false;
for (int k = midx - 1; k >= 0; k--) {
String linek = lines.get(k).trim();
if (linek.startsWith("/**")) {
break;
}
if (linek.startsWith("}")) {
found = true; // no method JavaDoc present
break;
}
if (linek.startsWith(validatorStr.javadoc)) {
// see if a @code paramName is present
String paramStr = "{@code " + paramName + "}";
for (int m = k; m < lines.size(); m++) {
String linem = lines.get(m).trim();
if (linem.startsWith("* @see")
|| linem.startsWith("* @since")
|| linem.startsWith("*/")) {
break;
}
if (linem.contains(paramStr)) {
found = true;
break;
}
}
break;
}
}
if (!found) {
errorCount++;
errors.append("L")
.append(j)
.append(" : missing '")
.append(validatorStr.javadoc)
.append("' for argument validation: ")
.append(paramName)
.append("\r\n ")
.append(line)
.append("\r\n at ")
.append(fullClassName)
.append(".method(")
.append(f.getName())
.append(":")
.append(j + 1)
.append(")\r\n")
;
}
}
}
for (ValidatorStrings validatorStr : EXCEPTION_STRINGS) {
int strIdx = line.indexOf(validatorStr.code);
if (strIdx >= 0) {
int midx = j - 1;
// find the method declaration
for (; midx >= 0; midx--) {
String linek = lines.get(midx).trim();
if (linek.startsWith("public") || linek.startsWith("private")
|| linek.startsWith("protected")
|| linek.startsWith("static")
|| linek.startsWith(baseClassName)) {
break;
}
}
// find JavaDoc of throws
boolean found = false;
for (int k = midx - 1; k >= 0; k--) {
String linek = lines.get(k).trim();
if (linek.startsWith("/**")) {
break;
}
if (linek.startsWith("}")) {
found = true; // no JavaDoc
break;
}
if (linek.startsWith(validatorStr.javadoc)) {
found = true;
}
}
if (!found) {
errorCount++;
errors.append("L")
.append(j)
.append(" : missing '")
.append(validatorStr.javadoc)
.append("' for exception\r\n ")
.append(line)
.append("\r\n at ")
.append(fullClassName)
.append(".method(")
.append(f.getName())
.append(":")
.append(j + 1)
.append(")\r\n")
;
}
}
}
if (line.startsWith("public") || line.startsWith("protected") || line.startsWith("final") || line.startsWith("private")
|| line.startsWith("static")) {
for (ValidatorStrings validatorStr : TYPICAL_ARGUMENT_STRINGS) {
// find the method declaration ending {
for (int i = j; i < lines.size(); i++) {
String linei = lines.get(i).trim();
// space + code for capturing type declarations
String varPattern = " " + validatorStr.code;
if (linei.contains(varPattern + ")")
|| linei.contains(varPattern + ",")
|| linei.endsWith(varPattern)) {
// ignore nullable-annotated arguments
if (!linei.matches(".*\\@Nullable\\s.*" + validatorStr.code + ".*")) {
boolean found = false;
for (int k = i - 1; k >= 0; k--) {
String linek = lines.get(k).trim();
if (linek.startsWith("/**")) {
break;
}
if (linek.startsWith("}")) {
found = true; // no method JavaDoc present
break;
}
if (linek.startsWith(validatorStr.javadoc)) {
// see if a @code paramName is present
String paramStr = "{@code " + validatorStr.code + "}";
for (int m = k; m < lines.size(); m++) {
String linem = lines.get(m).trim();
if (linem.startsWith("* @see")
|| linem.startsWith("* @since")
|| linem.startsWith("*/")) {
break;
}
if (linem.contains(paramStr)) {
found = true;
break;
}
}
break;
}
}
if (!found) {
errorCount++;
errors.append("L")
.append(j)
.append(" : missing '")
.append(validatorStr.javadoc)
.append("' for typical argument: ")
.append(validatorStr.code)
.append("\r\n ")
.append(line)
.append("\r\n at ")
.append(fullClassName)
.append(".method(")
.append(f.getName())
.append(":")
.append(j + 1)
.append(")\r\n")
;
}
}
}
if (linei.endsWith("{") || linei.endsWith(";")) {
break;
}
}
}
}
}
if (errorCount != 0) {
errors.insert(0, errorCount + " problems\r\n");
errors.setLength(errors.length() - 2);
throw new AssertionError(errors.toString());
}
}
static final | ParamValidationNaming |
java | elastic__elasticsearch | x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutResponseHandlerHttpPostTests.java | {
"start": 960,
"end": 4651
} | class ____ extends SamlResponseHandlerTests {
private SamlLogoutResponseHandler samlLogoutResponseHandler;
@Before
public void setupHandler() {
clock = new ClockMock();
maxSkew = TimeValue.timeValueMinutes(1);
requestId = randomId();
samlLogoutResponseHandler = new SamlLogoutResponseHandler(
clock,
getIdpConfiguration(() -> buildOpenSamlCredential(idpSigningCertificatePair)),
getSpConfiguration(emptyList()),
maxSkew
);
}
public void testHandlerWorksWithHttpPostBinding() throws Exception {
final String payload = buildLogoutResponsePayload(emptyMap(), true);
samlLogoutResponseHandler.handle(false, payload, List.of(requestId));
}
public void testHandlerFailsWithHttpPostBindingAndNoSignature() throws Exception {
final String payload = buildLogoutResponsePayload(emptyMap(), false);
final ElasticsearchSecurityException e = expectSamlException(
() -> samlLogoutResponseHandler.handle(false, payload, List.of(requestId))
);
assertThat(e.getMessage(), containsString("is not signed"));
}
public void testHandlerWillThrowWhenStatusIsNotSuccess() throws Exception {
final Map<String, Object> replacements = new HashMap<>();
replacements.put("status", "urn:oasis:names:tc:SAML:2.0:status:Requester");
final String payload = buildLogoutResponsePayload(replacements, true);
final ElasticsearchSecurityException e = expectSamlException(
() -> samlLogoutResponseHandler.handle(false, payload, List.of(requestId))
);
assertThat(e.getMessage(), containsString("not a 'success' response"));
}
private String buildLogoutResponsePayload(Map<String, Object> data, boolean shouldSign) throws Exception {
final String template = """
<?xml version="1.0"?>
<samlp:LogoutResponse xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol"\s
ID="%(randomId)"
InResponseTo="%(requestId)" Version="2.0"\s
IssueInstant="%(now)"
Destination="%(SP_LOGOUT_URL)">
<saml:Issuer xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion">%(IDP_ENTITY_ID)</saml:Issuer>
<samlp:Status>
<samlp:StatusCode Value="%(status)"/>
</samlp:Status>
</samlp:LogoutResponse>""";
Map<String, Object> replacements = new HashMap<>(data);
replacements.putIfAbsent("IDP_ENTITY_ID", IDP_ENTITY_ID);
replacements.putIfAbsent("now", clock.instant());
replacements.putIfAbsent("randomId", requestId);
replacements.putIfAbsent("requestId", requestId);
replacements.putIfAbsent("SP_LOGOUT_URL", SP_LOGOUT_URL);
replacements.putIfAbsent("status", "urn:oasis:names:tc:SAML:2.0:status:Success");
final String xml = NamedFormatter.format(template, replacements);
final String signed = shouldSign ? signLogoutResponseString(xml) : xml;
return Base64.getEncoder().encodeToString(signed.getBytes(StandardCharsets.UTF_8));
}
private String signLogoutResponseString(String xml) throws Exception {
final LogoutResponse logoutResponse = samlLogoutResponseHandler.buildXmlObject(
parseDocument(xml).getDocumentElement(),
LogoutResponse.class
);
signSignableObject(logoutResponse, EXCLUSIVE, idpSigningCertificatePair);
return SamlUtils.getXmlContent(logoutResponse, false);
}
}
| SamlLogoutResponseHandlerHttpPostTests |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KubernetesNamespacesEndpointBuilderFactory.java | {
"start": 52179,
"end": 54881
} | class ____ {
/**
* The internal instance of the builder used to access to all the
* methods representing the name of headers.
*/
private static final KubernetesNamespacesHeaderNameBuilder INSTANCE = new KubernetesNamespacesHeaderNameBuilder();
/**
* The Producer operation.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code KubernetesOperation}.
*/
public String kubernetesOperation() {
return "CamelKubernetesOperation";
}
/**
* The namespace name.
*
* The option is a: {@code String} type.
*
* Group: producer
*
* @return the name of the header {@code KubernetesNamespaceName}.
*/
public String kubernetesNamespaceName() {
return "CamelKubernetesNamespaceName";
}
/**
* The namespace labels.
*
* The option is a: {@code Map<String, String>} type.
*
* Group: producer
*
* @return the name of the header {@code KubernetesNamespaceLabels}.
*/
public String kubernetesNamespaceLabels() {
return "CamelKubernetesNamespaceLabels";
}
/**
* The namespace annotations.
*
* The option is a: {@code Map<String, String>} type.
*
* Group: producer
*
* @return the name of the header {@code
* KubernetesNamespaceAnnotations}.
*/
public String kubernetesNamespaceAnnotations() {
return "CamelKubernetesNamespaceAnnotations";
}
/**
* Action watched by the consumer.
*
* The option is a: {@code io.fabric8.kubernetes.client.Watcher.Action}
* type.
*
* Group: consumer
*
* @return the name of the header {@code KubernetesEventAction}.
*/
public String kubernetesEventAction() {
return "CamelKubernetesEventAction";
}
/**
* Timestamp of the action watched by the consumer.
*
* The option is a: {@code long} type.
*
* Group: consumer
*
* @return the name of the header {@code KubernetesEventTimestamp}.
*/
public String kubernetesEventTimestamp() {
return "CamelKubernetesEventTimestamp";
}
}
static KubernetesNamespacesEndpointBuilder endpointBuilder(String componentName, String path) {
| KubernetesNamespacesHeaderNameBuilder |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/iterative/io/SerializedUpdateBuffer.java | {
"start": 1670,
"end": 10504
} | class ____ extends AbstractPagedOutputView {
private static final int HEADER_LENGTH = 4;
private static final float SPILL_THRESHOLD = 0.95f;
private final LinkedBlockingQueue<MemorySegment> emptyBuffers;
private ArrayDeque<MemorySegment> fullBuffers;
private BlockChannelWriter<MemorySegment> currentWriter;
private final IOManager ioManager;
private final FileIOChannel.Enumerator channelEnumerator;
private final int numSegmentsSpillingThreshold;
private int numBuffersSpilled;
private final int minBuffersForWriteEnd;
private final int minBuffersForSpilledReadEnd;
private final List<ReadEnd> readEnds;
private final int totalNumBuffers;
public SerializedUpdateBuffer() {
super(-1, HEADER_LENGTH);
emptyBuffers = null;
fullBuffers = null;
ioManager = null;
channelEnumerator = null;
numSegmentsSpillingThreshold = -1;
minBuffersForWriteEnd = -1;
minBuffersForSpilledReadEnd = -1;
totalNumBuffers = -1;
readEnds = Collections.emptyList();
}
public SerializedUpdateBuffer(
List<MemorySegment> memSegments, int segmentSize, IOManager ioManager) {
super(memSegments.remove(memSegments.size() - 1), segmentSize, HEADER_LENGTH);
totalNumBuffers = memSegments.size() + 1;
if (totalNumBuffers < 3) {
throw new IllegalArgumentException(
"SerializedUpdateBuffer needs at least 3 memory segments.");
}
emptyBuffers = new LinkedBlockingQueue<MemorySegment>(totalNumBuffers);
fullBuffers = new ArrayDeque<MemorySegment>(64);
emptyBuffers.addAll(memSegments);
int threshold = (int) ((1 - SPILL_THRESHOLD) * totalNumBuffers);
numSegmentsSpillingThreshold = threshold > 0 ? threshold : 0;
minBuffersForWriteEnd = Math.max(2, Math.min(16, totalNumBuffers / 2));
minBuffersForSpilledReadEnd = Math.max(1, Math.min(16, totalNumBuffers / 4));
if (minBuffersForSpilledReadEnd + minBuffersForWriteEnd > totalNumBuffers) {
throw new IllegalArgumentException("BUG: Unfulfillable memory assignment.");
}
this.ioManager = ioManager;
channelEnumerator = ioManager.createChannelEnumerator();
readEnds = new ArrayList<ReadEnd>();
}
@Override
protected MemorySegment nextSegment(MemorySegment current, int positionInCurrent)
throws IOException {
current.putInt(0, positionInCurrent);
// check if we keep the segment in memory, or if we spill it
if (emptyBuffers.size() > numSegmentsSpillingThreshold) {
// keep buffer in memory
fullBuffers.addLast(current);
} else {
// spill all buffers up to now
// check, whether we have a channel already
if (currentWriter == null) {
currentWriter =
ioManager.createBlockChannelWriter(channelEnumerator.next(), emptyBuffers);
}
// spill all elements gathered up to now
numBuffersSpilled += fullBuffers.size();
while (fullBuffers.size() > 0) {
currentWriter.writeBlock(fullBuffers.removeFirst());
}
currentWriter.writeBlock(current);
numBuffersSpilled++;
}
try {
return emptyBuffers.take();
} catch (InterruptedException iex) {
throw new RuntimeException(
"Spilling Fifo Queue was interrupted while waiting for next buffer.");
}
}
public void flush() throws IOException {
advance();
}
public ReadEnd switchBuffers() throws IOException {
// remove exhausted read ends
for (int i = readEnds.size() - 1; i >= 0; --i) {
final ReadEnd re = readEnds.get(i);
if (re.disposeIfDone()) {
readEnds.remove(i);
}
}
// add the current memorySegment and reset this writer
final MemorySegment current = getCurrentSegment();
current.putInt(0, getCurrentPositionInSegment());
fullBuffers.addLast(current);
// create the reader
final ReadEnd readEnd;
if (numBuffersSpilled == 0 && emptyBuffers.size() >= minBuffersForWriteEnd) {
// read completely from in-memory segments
readEnd =
new ReadEnd(
fullBuffers.removeFirst(), emptyBuffers, fullBuffers, null, null, 0);
} else {
int toSpill =
Math.min(
minBuffersForSpilledReadEnd
+ minBuffersForWriteEnd
- emptyBuffers.size(),
fullBuffers.size());
// reader reads also segments on disk
// grab some empty buffers to re-read the first segment
if (toSpill > 0) {
// need to spill to make a buffers available
if (currentWriter == null) {
currentWriter =
ioManager.createBlockChannelWriter(
channelEnumerator.next(), emptyBuffers);
}
for (int i = 0; i < toSpill; i++) {
currentWriter.writeBlock(fullBuffers.removeFirst());
}
numBuffersSpilled += toSpill;
}
// now close the writer and create the reader
currentWriter.close();
final BlockChannelReader<MemorySegment> reader =
ioManager.createBlockChannelReader(currentWriter.getChannelID());
// gather some memory segments to circulate while reading back the data
final List<MemorySegment> readSegments = new ArrayList<MemorySegment>();
try {
while (readSegments.size() < minBuffersForSpilledReadEnd) {
readSegments.add(emptyBuffers.take());
}
// read the first segment
MemorySegment firstSeg = readSegments.remove(readSegments.size() - 1);
reader.readBlock(firstSeg);
firstSeg = reader.getReturnQueue().take();
// create the read end reading one less buffer, because the first buffer is already
// read back
readEnd =
new ReadEnd(
firstSeg,
emptyBuffers,
fullBuffers,
reader,
readSegments,
numBuffersSpilled - 1);
} catch (InterruptedException e) {
throw new RuntimeException(
"SerializedUpdateBuffer was interrupted while reclaiming memory by spilling.",
e);
}
}
// reset the writer
fullBuffers = new ArrayDeque<MemorySegment>(64);
currentWriter = null;
numBuffersSpilled = 0;
try {
seekOutput(emptyBuffers.take(), HEADER_LENGTH);
} catch (InterruptedException e) {
throw new RuntimeException(
"SerializedUpdateBuffer was interrupted while reclaiming memory by spilling.",
e);
}
// register this read end
readEnds.add(readEnd);
return readEnd;
}
public List<MemorySegment> close() {
if (currentWriter != null) {
try {
currentWriter.closeAndDelete();
} catch (Throwable t) {
// do nothing
}
}
List<MemorySegment> freeMem = new ArrayList<MemorySegment>(64);
// add all memory allocated to the write end
freeMem.add(getCurrentSegment());
clear();
freeMem.addAll(fullBuffers);
fullBuffers = null;
// add memory from non-exhausted read ends
try {
for (int i = readEnds.size() - 1; i >= 0; --i) {
final ReadEnd re = readEnds.remove(i);
re.forceDispose(freeMem);
}
// release all empty segments
while (freeMem.size() < totalNumBuffers) {
freeMem.add(emptyBuffers.take());
}
} catch (InterruptedException e) {
throw new RuntimeException(
"Retrieving memory back from asynchronous I/O was interrupted.", e);
}
return freeMem;
}
// ============================================================================================
private static final | SerializedUpdateBuffer |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/ast/tree/expression/ColumnReference.java | {
"start": 1086,
"end": 7304
} | class ____ implements Expression, Assignable {
private final @Nullable String qualifier;
private final String columnExpression;
private final @Nullable SelectablePath selectablePath;
private final boolean isFormula;
private final @Nullable String readExpression;
private final JdbcMapping jdbcMapping;
public ColumnReference(TableReference tableReference, SelectableMapping selectableMapping) {
this(
tableReference.getIdentificationVariable(),
selectableMapping.getSelectionExpression(),
selectableMapping.getSelectablePath(),
selectableMapping.isFormula(),
selectableMapping.getCustomReadExpression(),
selectableMapping.getJdbcMapping()
);
}
public ColumnReference(TableReference tableReference, String mapping, JdbcMapping jdbcMapping) {
this(
tableReference.getIdentificationVariable(),
mapping,
null,
false,
null,
jdbcMapping
);
}
public ColumnReference(@Nullable String qualifier, SelectableMapping selectableMapping) {
this(
qualifier,
selectableMapping.getSelectionExpression(),
selectableMapping.getSelectablePath(),
selectableMapping.isFormula(),
selectableMapping.getCustomReadExpression(),
selectableMapping.getJdbcMapping()
);
}
public ColumnReference(@Nullable String qualifier, SelectableMapping selectableMapping, JdbcMapping jdbcMapping) {
this(
qualifier,
selectableMapping.getSelectionExpression(),
selectableMapping.getSelectablePath(),
selectableMapping.isFormula(),
selectableMapping.getCustomReadExpression(),
jdbcMapping
);
}
public ColumnReference(
TableReference tableReference,
String columnExpression,
boolean isFormula,
@Nullable String customReadExpression,
JdbcMapping jdbcMapping) {
this(
tableReference.getIdentificationVariable(),
columnExpression,
null,
isFormula,
customReadExpression,
jdbcMapping
);
}
public ColumnReference(
@Nullable String qualifier,
String columnExpression,
boolean isFormula,
@Nullable String customReadExpression,
JdbcMapping jdbcMapping) {
this( qualifier, columnExpression, null, isFormula, customReadExpression, jdbcMapping );
}
public ColumnReference(
@Nullable String qualifier,
String columnExpression,
@Nullable SelectablePath selectablePath,
boolean isFormula,
@Nullable String customReadExpression,
JdbcMapping jdbcMapping) {
this.qualifier = nullIfEmpty( qualifier );
if ( isFormula ) {
this.columnExpression = qualifier == null
? replace( columnExpression, TEMPLATE + '.', "" )
: replace( columnExpression, TEMPLATE, qualifier );
}
else {
this.columnExpression = columnExpression;
}
this.selectablePath = selectablePath == null
? new SelectablePath( this.columnExpression )
: selectablePath;
this.isFormula = isFormula;
this.readExpression = customReadExpression;
this.jdbcMapping = jdbcMapping;
}
@Override
public ColumnReference getColumnReference() {
return this;
}
public @Nullable String getQualifier() {
return qualifier;
}
public String getColumnExpression() {
return columnExpression;
}
public @Nullable String getReadExpression() {
return readExpression;
}
public @Nullable String getSelectableName() {
return selectablePath == null ? null : selectablePath.getSelectableName();
}
public @Nullable SelectablePath getSelectablePath() {
return selectablePath;
}
public boolean isColumnExpressionFormula() {
return isFormula;
}
public String getExpressionText() {
final StringBuilder sb = new StringBuilder();
appendReadExpression( new StringBuilderSqlAppender( sb ) );
return sb.toString();
}
public void appendReadExpression(SqlAppender appender) {
appendReadExpression( appender, qualifier );
}
public void appendReadExpression(@Nullable String qualifier, Consumer<String> appender) {
if ( isFormula ) {
appender.accept( columnExpression );
}
else if ( readExpression != null ) {
appender.accept( qualifier == null
? replace( readExpression, TEMPLATE + '.', "" )
: replace( readExpression, TEMPLATE, qualifier ) );
}
else {
if ( qualifier != null ) {
appender.accept( qualifier );
appender.accept( "." );
}
appender.accept( columnExpression );
}
}
public void appendReadExpression(SqlAppender appender, @Nullable String qualifier) {
appendReadExpression( qualifier, appender::appendSql );
}
public void appendColumnForWrite(SqlAppender appender) {
appendColumnForWrite( appender, qualifier );
}
public void appendColumnForWrite(SqlAppender appender, @Nullable String qualifier) {
if ( qualifier != null ) {
appender.append( qualifier );
appender.append( '.' );
}
appender.append( columnExpression );
}
public JdbcMapping getJdbcMapping() {
return jdbcMapping;
}
@Override
public JdbcMapping getExpressionType() {
return jdbcMapping;
}
@Override
public void accept(SqlAstWalker interpreter) {
interpreter.visitColumnReference( this );
}
@Override
public String toString() {
return String.format(
Locale.ROOT,
"%s(%s)",
getClass().getSimpleName(),
getExpressionText()
);
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( o == null || getClass() != o.getClass() ) {
return false;
}
final ColumnReference that = (ColumnReference) o;
return isFormula == that.isFormula
&& Objects.equals( qualifier, that.qualifier )
&& Objects.equals( columnExpression, that.columnExpression )
&& Objects.equals( readExpression, that.readExpression );
}
@Override
public int hashCode() {
int result = qualifier != null ? qualifier.hashCode() : 0;
result = 31 * result + ( columnExpression != null ? columnExpression.hashCode() : 0 );
result = 31 * result + ( isFormula ? 1 : 0 );
result = 31 * result + ( readExpression != null ? readExpression.hashCode() : 0 );
return result;
}
@Override
public void visitColumnReferences(Consumer<ColumnReference> columnReferenceConsumer) {
columnReferenceConsumer.accept( this );
}
@Override
public List<ColumnReference> getColumnReferences() {
return Collections.singletonList( this );
}
}
| ColumnReference |
java | grpc__grpc-java | examples/src/main/java/io/grpc/examples/advanced/HelloJsonClient.java | {
"start": 3316,
"end": 4216
} | class ____ extends AbstractStub<HelloJsonStub> {
static final MethodDescriptor<HelloRequest, HelloReply> METHOD_SAY_HELLO =
GreeterGrpc.getSayHelloMethod()
.toBuilder(
JsonMarshaller.jsonMarshaller(HelloRequest.getDefaultInstance()),
JsonMarshaller.jsonMarshaller(HelloReply.getDefaultInstance()))
.build();
protected HelloJsonStub(Channel channel) {
super(channel);
}
protected HelloJsonStub(Channel channel, CallOptions callOptions) {
super(channel, callOptions);
}
@Override
protected HelloJsonStub build(Channel channel, CallOptions callOptions) {
return new HelloJsonStub(channel, callOptions);
}
public HelloReply sayHello(HelloRequest request) {
return blockingUnaryCall(
getChannel(), METHOD_SAY_HELLO, getCallOptions(), request);
}
}
}
| HelloJsonStub |
java | spring-projects__spring-security | oauth2/oauth2-client/src/main/java/org/springframework/security/oauth2/client/jackson2/OidcIdTokenMixin.java | {
"start": 1102,
"end": 1697
} | class ____ used to serialize/deserialize {@link OidcIdToken}.
*
* @author Joe Grandja
* @since 5.3
* @see OidcIdToken
* @see OAuth2ClientJackson2Module
* @deprecated as of 7.0 in favor of
* {@code org.springframework.security.oauth2.client.jackson.OidcIdTokenMixin} based on
* Jackson 3
*/
@Deprecated(forRemoval = true)
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS)
@JsonAutoDetect(fieldVisibility = JsonAutoDetect.Visibility.ANY, getterVisibility = JsonAutoDetect.Visibility.NONE,
isGetterVisibility = JsonAutoDetect.Visibility.NONE)
@JsonIgnoreProperties(ignoreUnknown = true)
abstract | is |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/convert/multiple/MultiValueConverter.java | {
"start": 1453,
"end": 3617
} | interface ____<S> extends Prioritized {
/**
* Accept the source type and target type or not
*
* @param sourceType the source type
* @param multiValueType the multi-value type
* @return if accepted, return <code>true</code>, or <code>false</code>
*/
boolean accept(Class<S> sourceType, Class<?> multiValueType);
/**
* Convert the source to be the multiple value
*
* @param source the source-typed value
* @param multiValueType the multi-value type
* @param elementType the element type
* @return
*/
Object convert(S source, Class<?> multiValueType, Class<?> elementType);
/**
* Get the source type
*
* @return non-null
*/
default Class<S> getSourceType() {
return findActualTypeArgument(getClass(), MultiValueConverter.class, 0);
}
/**
* Find the {@link MultiValueConverter} instance from {@link ExtensionLoader} with the specified source and target type
*
* @param sourceType the source type
* @param targetType the target type
* @return <code>null</code> if not found
* @see ExtensionLoader#getSupportedExtensionInstances()
* @since 2.7.8
* @deprecated will be removed in 3.3.0
*/
@Deprecated
static MultiValueConverter<?> find(Class<?> sourceType, Class<?> targetType) {
return FrameworkModel.defaultModel()
.getExtensionLoader(MultiValueConverter.class)
.getSupportedExtensionInstances()
.stream()
.filter(converter -> converter.accept(sourceType, targetType))
.findFirst()
.orElse(null);
}
/**
* @deprecated will be removed in 3.3.0
*/
@Deprecated
static <T> T convertIfPossible(Object source, Class<?> multiValueType, Class<?> elementType) {
Class<?> sourceType = source.getClass();
MultiValueConverter converter = find(sourceType, multiValueType);
if (converter != null) {
return (T) converter.convert(source, multiValueType, elementType);
}
return null;
}
}
| MultiValueConverter |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java | {
"start": 1957,
"end": 2092
} | class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(QueueManager.class.getName());
private final | QueueManager |
java | apache__spark | sql/catalyst/src/main/java/org/apache/spark/sql/connector/metric/CustomMetric.java | {
"start": 1428,
"end": 1507
} | interface ____ have a 0-arg constructor.
*
* @since 3.2.0
*/
@Evolving
public | to |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/e2e/SchedulingAndDeployingBenchmarkTest.java | {
"start": 1170,
"end": 1798
} | class ____ {
@Test
void scheduleAndDeployInStreamingJob() throws Exception {
SchedulingAndDeployingBenchmark benchmark = new SchedulingAndDeployingBenchmark();
benchmark.setup(JobConfiguration.STREAMING_TEST);
benchmark.startScheduling();
benchmark.teardown();
}
@Test
void scheduleAndDeployInBatchJob() throws Exception {
SchedulingAndDeployingBenchmark benchmark = new SchedulingAndDeployingBenchmark();
benchmark.setup(JobConfiguration.BATCH_TEST);
benchmark.startScheduling();
benchmark.teardown();
}
}
| SchedulingAndDeployingBenchmarkTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/doublearray/DoubleArrayAssert_containsOnly_with_Double_array_Test.java | {
"start": 1326,
"end": 2278
} | class ____ extends DoubleArrayAssertBaseTest {
@Test
void should_fail_if_values_is_null() {
// GIVEN
Double[] values = null;
// WHEN
Throwable thrown = catchThrowable(() -> assertions.containsOnly(values));
// THEN
then(thrown).isInstanceOf(NullPointerException.class)
.hasMessage(shouldNotBeNull("values").create());
}
@Test
void should_pass_if_values_are_in_range_of_precision() {
// GIVEN
Double[] values = new Double[] { 2.0, 3.0, 0.7 };
// WHEN/THEN
assertThat(arrayOf(1.0, 2.0, 3.0)).containsOnly(values, withPrecision(0.5));
}
@Override
protected DoubleArrayAssert invoke_api_method() {
return assertions.containsOnly(new Double[] { 1.0, 2.0 });
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertContainsOnly(getInfo(assertions), getActual(assertions), arrayOf(1.0, 2.0));
}
}
| DoubleArrayAssert_containsOnly_with_Double_array_Test |
java | google__truth | extensions/proto/src/main/java/com/google/common/truth/extensions/proto/FieldScopes.java | {
"start": 1028,
"end": 11557
} | class ____ {
/**
* Returns a {@link FieldScope} which is constrained to precisely those specific field paths that
* are explicitly set in the message. Note that, for version 3 protobufs, such a {@link
* FieldScope} will omit fields in the provided message which are set to default values.
*
* <p>This can be used limit the scope of a comparison to a complex set of fields in a very brief
* statement. Often, {@code message} is the expected half of a comparison about to be performed.
*
* <p>Example usage:
*
* <pre>{@code
* Foo actual = Foo.newBuilder().setBar(3).setBaz(4).build();
* Foo expected = Foo.newBuilder().setBar(3).setBaz(5).build();
* // Fails, because actual.getBaz() != expected.getBaz().
* assertThat(actual).isEqualTo(expected);
*
* Foo scope = Foo.newBuilder().setBar(2).build();
* // Succeeds, because only the field 'bar' is compared.
* assertThat(actual).withPartialScope(FieldScopes.fromSetFields(scope)).isEqualTo(expected);
*
* }</pre>
*
* <p>The returned {@link FieldScope} does not respect repeated field indices nor map keys. For
* example, if the provided message sets different field values for different elements of a
* repeated field, like so:
*
* <pre>{@code
* sub_message: {
* foo: "foo"
* }
* sub_message: {
* bar: "bar"
* }
* }</pre>
*
* <p>The {@link FieldScope} will contain {@code sub_message.foo} and {@code sub_message.bar} for
* *all* repeated {@code sub_messages}, including those beyond index 1.
*/
// TODO(user): Figure out a way to improve this without reinventing MessageDifferencer.
// Alternatively, gather evidence to show that the existing behavior is fine/preferable.
// Alternatively II, add Scope.PARTIAL support to ProtoFluentEquals, but with a different name and
// explicit documentation that it may cause issues with Proto 3.
public static FieldScope fromSetFields(Message message) {
return fromSetFields(
message, AnyUtils.defaultTypeRegistry(), AnyUtils.defaultExtensionRegistry());
}
/**
* Returns a {@link FieldScope} which is constrained to precisely those specific field paths that
* are explicitly set in the message. Note that, for version 3 protobufs, such a {@link
* FieldScope} will omit fields in the provided message which are set to default values.
*
* <p>This can be used limit the scope of a comparison to a complex set of fields in a very brief
* statement. Often, {@code message} is the expected half of a comparison about to be performed.
*
* <p>Example usage:
*
* <pre>{@code
* Foo actual = Foo.newBuilder().setBar(3).setBaz(4).build();
* Foo expected = Foo.newBuilder().setBar(3).setBaz(5).build();
* // Fails, because actual.getBaz() != expected.getBaz().
* assertThat(actual).isEqualTo(expected);
*
* Foo scope = Foo.newBuilder().setBar(2).build();
* // Succeeds, because only the field 'bar' is compared.
* assertThat(actual).withPartialScope(FieldScopes.fromSetFields(scope)).isEqualTo(expected);
*
* }</pre>
*
* <p>The returned {@link FieldScope} does not respect repeated field indices nor map keys. For
* example, if the provided message sets different field values for different elements of a
* repeated field, like so:
*
* <pre>{@code
* sub_message: {
* foo: "foo"
* }
* sub_message: {
* bar: "bar"
* }
* }</pre>
*
* <p>The {@link FieldScope} will contain {@code sub_message.foo} and {@code sub_message.bar} for
* *all* repeated {@code sub_messages}, including those beyond index 1.
*
* <p>If there are {@code google.protobuf.Any} protos anywhere within these messages, they will be
* unpacked using the provided {@link TypeRegistry} and {@link ExtensionRegistry} to determine
* which fields within them should be compared.
*
* @see ProtoFluentAssertion#unpackingAnyUsing
* @since 1.2
*/
public static FieldScope fromSetFields(
Message message, TypeRegistry typeRegistry, ExtensionRegistry extensionRegistry) {
return FieldScopeImpl.createFromSetFields(message, typeRegistry, extensionRegistry);
}
/**
* Creates a {@link FieldScope} covering the fields set in every message in the provided list of
* messages, with the same semantics as in {@link #fromSetFields(Message)}.
*
* <p>This can be thought of as the union of the {@link FieldScope}s for each individual message,
* or the {@link FieldScope} for the merge of all the messages. These are equivalent.
*/
public static FieldScope fromSetFields(
Message firstMessage, Message secondMessage, Message... rest) {
return fromSetFields(asList(firstMessage, secondMessage, rest));
}
/**
* Creates a {@link FieldScope} covering the fields set in every message in the provided list of
* messages, with the same semantics as in {@link #fromSetFields(Message)}.
*
* <p>This can be thought of as the union of the {@link FieldScope}s for each individual message,
* or the {@link FieldScope} for the merge of all the messages. These are equivalent.
*/
public static FieldScope fromSetFields(Iterable<? extends Message> messages) {
return fromSetFields(
messages, AnyUtils.defaultTypeRegistry(), AnyUtils.defaultExtensionRegistry());
}
/**
* Creates a {@link FieldScope} covering the fields set in every message in the provided list of
* messages, with the same semantics as in {@link #fromSetFields(Message)}.
*
* <p>This can be thought of as the union of the {@link FieldScope}s for each individual message,
* or the {@link FieldScope} for the merge of all the messages. These are equivalent.
*
* <p>If there are {@code google.protobuf.Any} protos anywhere within these messages, they will be
* unpacked using the provided {@link TypeRegistry} and {@link ExtensionRegistry} to determine
* which fields within them should be compared.
*
* @see ProtoFluentAssertion#unpackingAnyUsing
* @since 1.2
*/
public static FieldScope fromSetFields(
Iterable<? extends Message> messages,
TypeRegistry typeRegistry,
ExtensionRegistry extensionRegistry) {
return FieldScopeImpl.createFromSetFields(messages, typeRegistry, extensionRegistry);
}
/**
* Returns a {@link FieldScope} which matches everything except the provided field numbers for the
* top level message type.
*
* <p>The field numbers are ignored recursively on this type. That is, if {@code YourMessage}
* contains another {@code YourMessage} somewhere within its subtree, field number {@code X} will
* be ignored for all submessages of type {@code YourMessage}, as well as for the top-level
* message.
*
* @see FieldScope#ignoringFields(int, int...)
*/
public static FieldScope ignoringFields(int firstFieldNumber, int... rest) {
return FieldScopeImpl.createIgnoringFields(asList(firstFieldNumber, rest));
}
/**
* Returns a {@link FieldScope} which matches everything except the provided field numbers for the
* top level message type.
*
* <p>The field numbers are ignored recursively on this type. That is, if {@code YourMessage}
* contains another {@code YourMessage} somewhere within its subtree, field number {@code X} will
* be ignored for all submessages of type {@code YourMessage}, as well as for the top-level
* message.
*
* @see FieldScope#ignoringFields(Iterable)
*/
public static FieldScope ignoringFields(Iterable<Integer> fieldNumbers) {
return FieldScopeImpl.createIgnoringFields(fieldNumbers);
}
/**
* Returns a {@link FieldScope} which matches everything except the provided field descriptors for
* the message.
*
* @see FieldScope#ignoringFieldDescriptors(FieldDescriptor, FieldDescriptor...)
*/
public static FieldScope ignoringFieldDescriptors(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest) {
return FieldScopeImpl.createIgnoringFieldDescriptors(asList(firstFieldDescriptor, rest));
}
/**
* Returns a {@link FieldScope} which matches everything except the provided field descriptors for
* the message.
*
* @see FieldScope#ignoringFieldDescriptors(Iterable)
*/
public static FieldScope ignoringFieldDescriptors(Iterable<FieldDescriptor> fieldDescriptors) {
return FieldScopeImpl.createIgnoringFieldDescriptors(fieldDescriptors);
}
/**
* Returns a {@link FieldScope} which matches nothing except the provided field numbers for the
* top level message type.
*
* @see FieldScope#allowingFields(int, int...)
*/
public static FieldScope allowingFields(int firstFieldNumber, int... rest) {
return FieldScopeImpl.createAllowingFields(asList(firstFieldNumber, rest));
}
/**
* Returns a {@link FieldScope} which matches nothing except the provided field numbers for the
* top level message type.
*
* @see FieldScope#allowingFields(Iterable)
*/
public static FieldScope allowingFields(Iterable<Integer> fieldNumbers) {
return FieldScopeImpl.createAllowingFields(fieldNumbers);
}
/**
* Returns a {@link FieldScope} which matches nothing except the provided field descriptors for
* the message.
*
* @see FieldScope#allowingFieldDescriptors(FieldDescriptor, FieldDescriptor...)
*/
public static FieldScope allowingFieldDescriptors(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest) {
return FieldScopeImpl.createAllowingFieldDescriptors(asList(firstFieldDescriptor, rest));
}
/**
* Returns a {@link FieldScope} which matches nothing except the provided field descriptors for
* the message.
*
* @see FieldScope#allowingFieldDescriptors(Iterable)
*/
public static FieldScope allowingFieldDescriptors(Iterable<FieldDescriptor> fieldDescriptors) {
return FieldScopeImpl.createAllowingFieldDescriptors(fieldDescriptors);
}
/**
* Returns a {@link FieldScope} which matches all fields without exception. Generally not needed,
* since the other factory functions will build on top of this for you.
*/
public static FieldScope all() {
return FieldScopeImpl.all();
}
/**
* Returns a {@link FieldScope} which matches no fields. A comparison made using this scope alone
* will always trivially pass. Generally not needed, since the other factory functions will build
* on top of this for you.
*/
public static FieldScope none() {
return FieldScopeImpl.none();
}
private FieldScopes() {}
}
| FieldScopes |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/interfaces/hbm/allAudited/AuditedImplementor.java | {
"start": 262,
"end": 833
} | class ____ implements SimpleInterface {
private long id;
private String data;
private String auditedImplementorData;
protected AuditedImplementor() {
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public String getData() {
return data;
}
public void setData(String data) {
this.data = data;
}
public String getAuditedImplementorData() {
return auditedImplementorData;
}
public void setAuditedImplementorData(String implementorData) {
this.auditedImplementorData = implementorData;
}
}
| AuditedImplementor |
java | google__error-prone | core/src/test/java/com/google/errorprone/refaster/testdata/input/LabelTemplateExample.java | {
"start": 795,
"end": 1488
} | class ____ {
public void example(BigInteger[] array) {
StringBuilder builder = new StringBuilder("[");
for (int i = 0; i < array.length; i++) {
builder.append(array[i]);
if (i == array.length - 1) {
break;
}
builder.append(',');
}
join: for (int i = 0; i < array.length; i++) {
builder.append(array[i]);
if (i == array.length - 1) {
break join;
}
builder.append(',');
}
join: for (int i = 0; i < array.length; i++) {
builder.append(array[i]);
if (i == array.length - 1) {
continue join;
}
builder.append(',');
}
System.out.println(builder);
}
}
| LabelTemplateExample |
java | elastic__elasticsearch | x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java | {
"start": 1367,
"end": 7479
} | class ____ extends Command {
private final String index;
private final LikePattern pattern;
private final LikePattern clusterPattern;
private final EnumSet<IndexType> types;
public SysTables(Source source, LikePattern clusterPattern, String index, LikePattern pattern, EnumSet<IndexType> types) {
super(source);
this.clusterPattern = clusterPattern;
this.index = index;
this.pattern = pattern;
this.types = types;
}
@Override
protected NodeInfo<SysTables> info() {
return NodeInfo.create(this, SysTables::new, clusterPattern, index, pattern, types);
}
@Override
public List<Attribute> output() {
return asList(
keyword("TABLE_CAT"),
keyword("TABLE_SCHEM"),
keyword("TABLE_NAME"),
keyword("TABLE_TYPE"),
keyword("REMARKS"),
keyword("TYPE_CAT"),
keyword("TYPE_SCHEM"),
keyword("TYPE_NAME"),
keyword("SELF_REFERENCING_COL_NAME"),
keyword("REF_GENERATION")
);
}
@Override
public final void execute(SqlSession session, ActionListener<Page> listener) {
String cluster = session.indexResolver().clusterName();
// first check if where dealing with ODBC enumeration
// namely one param specified with '%', everything else empty string
// https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqltables-function?view=ssdt-18vs2017#comments
// catalog enumeration
if (clusterPattern == null || clusterPattern.pattern().equals(SQL_WILDCARD)) {
// enumerate only if pattern is "" and no types are specified (types is null)
if (pattern != null && pattern.pattern().isEmpty() && index == null && types == null) {
// include remote and local cluster
Set<String> clusters = session.indexResolver().remoteClusters();
clusters.add(cluster);
List<List<?>> rows = new ArrayList<>(clusters.size());
for (String name : clusters) {
List<String> row = new ArrayList<>(10);
row.addAll(Arrays.asList(name, null, null, null, null, null, null, null, null, null));
rows.add(row);
}
listener.onResponse(of(session, rows));
return;
}
}
boolean includeFrozen = session.configuration().includeFrozen();
// enumerate types
// if no types are specified (the parser takes care of the % case)
if (types == null) {
// empty string for catalog
if (clusterPattern != null && clusterPattern.pattern().isEmpty()
// empty string for table like and no index specified
&& pattern != null
&& pattern.pattern().isEmpty()
&& index == null) {
List<List<?>> values = new ArrayList<>();
// send only the types, everything else is made of empty strings
// NB: since the types are sent in SQL, frozen doesn't have to be taken into account since
// it's just another TABLE
Set<IndexType> typeSet = IndexType.VALID_REGULAR;
for (IndexType type : typeSet) {
Object[] enumeration = new Object[10];
enumeration[3] = type.toSql();
values.add(asList(enumeration));
}
values.sort(Comparator.comparing(l -> l.get(3).toString()));
listener.onResponse(of(session, values));
return;
}
}
// no enumeration pattern found, list actual tables
String cRegex = clusterPattern != null ? clusterPattern.asIndexNameWildcard() : null;
String idx = hasText(index) ? index : (pattern != null ? pattern.asIndexNameWildcard() : "*");
String regex = pattern != null ? pattern.asJavaRegex() : null;
EnumSet<IndexType> tableTypes = types;
// initialize types for name resolution
if (tableTypes == null) {
tableTypes = includeFrozen ? IndexType.VALID_INCLUDE_FROZEN : IndexType.VALID_REGULAR;
} else {
if (includeFrozen && tableTypes.contains(IndexType.FROZEN_INDEX) == false) {
tableTypes.add(IndexType.FROZEN_INDEX);
}
}
session.indexResolver()
.resolveNames(
cRegex,
idx,
regex,
tableTypes,
listener.delegateFailureAndWrap(
(delegate, result) -> delegate.onResponse(
of(
session,
result.stream()
// sort by type, then by cluster and name
.sorted(
Comparator.<IndexInfo, String>comparing(i -> i.type().toSql())
.thenComparing(IndexInfo::cluster)
.thenComparing(IndexInfo::name)
)
.map(t -> asList(t.cluster(), null, t.name(), t.type().toSql(), EMPTY, null, null, null, null, null))
.collect(toList())
)
)
)
);
}
@Override
public int hashCode() {
return Objects.hash(clusterPattern, index, pattern, types);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
SysTables other = (SysTables) obj;
return Objects.equals(clusterPattern, other.clusterPattern)
&& Objects.equals(index, other.index)
&& Objects.equals(pattern, other.pattern)
&& Objects.equals(types, other.types);
}
}
| SysTables |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/ast/tree/from/AbstractColumnReferenceQualifier.java | {
"start": 310,
"end": 1376
} | class ____ implements ColumnReferenceQualifier {
protected abstract TableReference getPrimaryTableReference();
protected abstract List<TableReferenceJoin> getTableReferenceJoins();
protected abstract SessionFactoryImplementor getSessionFactory();
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// TableReference handling
@Override
public TableReference getTableReference(
NavigablePath navigablePath,
String tableExpression,
boolean resolve) {
final TableReference primaryTableReference = getPrimaryTableReference().getTableReference(
navigablePath,
tableExpression,
resolve
);
if ( primaryTableReference != null) {
return primaryTableReference;
}
for ( TableReferenceJoin tableJoin : getTableReferenceJoins() ) {
final TableReference tableReference = tableJoin.getJoinedTableReference().getTableReference(
navigablePath,
tableExpression,
resolve
);
if ( tableReference != null) {
return tableReference;
}
}
return null;
}
}
| AbstractColumnReferenceQualifier |
java | grpc__grpc-java | binder/src/main/java/io/grpc/binder/internal/MetadataHelper.java | {
"start": 1556,
"end": 7599
} | class ____ {
/** The generic metadata marshaller we use for reading parcelables from the transport. */
private static final Metadata.BinaryStreamMarshaller<Parcelable> TRANSPORT_INBOUND_MARSHALLER =
new ParcelableMetadataMarshaller<>(null, true);
/** Indicates the following value is a parcelable. */
private static final int PARCELABLE_SENTINEL = -1;
private MetadataHelper() {}
/**
* Write a Metadata instance to a Parcel.
*
* @param parcel The {@link Parcel} to write to.
* @param metadata The {@link Metadata} to write.
*/
public static void writeMetadata(Parcel parcel, @Nullable Metadata metadata)
throws StatusException, IOException {
int n = metadata != null ? InternalMetadata.headerCount(metadata) : 0;
if (n == 0) {
parcel.writeInt(0);
return;
}
Object[] serialized = InternalMetadata.serializePartial(metadata);
parcel.writeInt(n);
for (int i = 0; i < n; i++) {
byte[] name = (byte[]) serialized[i * 2];
parcel.writeInt(name.length);
parcel.writeByteArray(name);
Object value = serialized[i * 2 + 1];
if (value instanceof byte[]) {
byte[] valueBytes = (byte[]) value;
parcel.writeInt(valueBytes.length);
parcel.writeByteArray(valueBytes);
} else if (value instanceof ParcelableInputStream) {
parcel.writeInt(PARCELABLE_SENTINEL);
((ParcelableInputStream) value).writeToParcel(parcel);
} else {
// An InputStream which wasn't created by ParcelableUtils, which means there's another use
// of Metadata.BinaryStreamMarshaller. Just read the bytes.
//
// We know that BlockPool will give us a buffer at least as large as the max space for all
// names and values so it'll certainly be large enough (and the limit is only 8k so this
// is fine).
byte[] buffer = BlockPool.acquireBlock();
try {
InputStream stream = (InputStream) value;
int total = 0;
while (total < buffer.length) {
int read = stream.read(buffer, total, buffer.length - total);
if (read == -1) {
break;
}
total += read;
}
if (total == buffer.length) {
throw Status.RESOURCE_EXHAUSTED
.withDescription("Metadata value too large")
.asException();
}
parcel.writeInt(total);
if (total > 0) {
parcel.writeByteArray(buffer, 0, total);
}
} finally {
BlockPool.releaseBlock(buffer);
}
}
}
}
/**
* Read a Metadata instance from a Parcel.
*
* @param parcel The {@link Parcel} to read from.
*/
public static Metadata readMetadata(Parcel parcel, Attributes attributes) throws StatusException {
int n = parcel.readInt();
if (n == 0) {
return new Metadata();
}
// For enforcing the header-size limit. Doesn't include parcelable data.
int bytesRead = 0;
// For enforcing the maximum allowed parcelable data (see InboundParcelablePolicy).
int parcelableBytesRead = 0;
Object[] serialized = new Object[n * 2];
for (int i = 0; i < n; i++) {
int numNameBytes = parcel.readInt();
bytesRead += 4;
byte[] name = readBytesChecked(parcel, numNameBytes, bytesRead);
bytesRead += numNameBytes;
serialized[i * 2] = name;
int numValueBytes = parcel.readInt();
bytesRead += 4;
if (numValueBytes == PARCELABLE_SENTINEL) {
InboundParcelablePolicy policy = attributes.get(BinderTransport.INBOUND_PARCELABLE_POLICY);
if (!policy.shouldAcceptParcelableMetadataValues()) {
throw Status.PERMISSION_DENIED
.withDescription("Parcelable metadata values not allowed")
.asException();
}
int parcelableStartPos = parcel.dataPosition();
try {
// readParcelable(Classloader, Class<>) requires SDK 33 and at this layer we can't know
// value's type anyway.
@SuppressWarnings("deprecation")
Parcelable value = parcel.readParcelable(MetadataHelper.class.getClassLoader());
if (value == null) {
throw Status.INTERNAL.withDescription("Read null parcelable in metadata").asException();
}
serialized[i * 2 + 1] = InternalMetadata.parsedValue(TRANSPORT_INBOUND_MARSHALLER, value);
} catch (AndroidRuntimeException are) {
throw Status.INTERNAL
.withCause(are)
.withDescription("Failure reading parcelable in metadata")
.asException();
}
int parcelableSize = parcel.dataPosition() - parcelableStartPos;
parcelableBytesRead += parcelableSize;
if (parcelableBytesRead > policy.getMaxParcelableMetadataSize()) {
throw Status.RESOURCE_EXHAUSTED
.withDescription(
"Inbound Parcelables too large according to policy (see InboundParcelablePolicy)")
.asException();
}
} else if (numValueBytes < 0) {
throw Status.INTERNAL.withDescription("Unrecognized metadata sentinel").asException();
} else {
byte[] value = readBytesChecked(parcel, numValueBytes, bytesRead);
bytesRead += numValueBytes;
serialized[i * 2 + 1] = value;
}
}
return InternalMetadata.newMetadataWithParsedValues(n, serialized);
}
/** Read a byte array checking that we're not reading too much. */
private static byte[] readBytesChecked(Parcel parcel, int numBytes, int bytesRead)
throws StatusException {
if (bytesRead + numBytes > GrpcUtil.DEFAULT_MAX_HEADER_LIST_SIZE) {
throw Status.RESOURCE_EXHAUSTED.withDescription("Metadata too large").asException();
}
byte[] res = new byte[numBytes];
if (numBytes > 0) {
parcel.readByteArray(res);
}
return res;
}
/** A marshaller for passing parcelables in gRPC {@link Metadata} */
public static final | MetadataHelper |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/beans/factory/xml/XmlBeanFactoryTestTypes.java | {
"start": 13028,
"end": 13225
} | class ____ extends MethodReplaceCandidate implements Serializable {
//public abstract Point getPoint();
}
/**
* @author Juergen Hoeller
* @since 23.10.2004
*/
| SerializableMethodReplacerCandidate |
java | elastic__elasticsearch | x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/action/MonitoringBulkResponseTests.java | {
"start": 967,
"end": 3846
} | class ____ extends ESTestCase {
public void testResponseStatus() {
final long took = Math.abs(randomLong());
MonitoringBulkResponse response = new MonitoringBulkResponse(took, false);
assertThat(response.getTookInMillis(), equalTo(took));
assertThat(response.getError(), is(nullValue()));
assertThat(response.isIgnored(), is(false));
assertThat(response.status(), equalTo(RestStatus.OK));
response = new MonitoringBulkResponse(took, true);
assertThat(response.getTookInMillis(), equalTo(took));
assertThat(response.getError(), is(nullValue()));
assertThat(response.isIgnored(), is(true));
assertThat(response.status(), equalTo(RestStatus.OK));
ExportException exception = new ExportException(randomAlphaOfLength(10));
response = new MonitoringBulkResponse(took, new MonitoringBulkResponse.Error(exception));
assertThat(response.getTookInMillis(), equalTo(took));
assertThat(response.getError(), is(notNullValue()));
assertThat(response.isIgnored(), is(false));
assertThat(response.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
}
public void testSerialization() throws IOException {
int iterations = randomIntBetween(5, 50);
for (int i = 0; i < iterations; i++) {
MonitoringBulkResponse response;
if (randomBoolean()) {
response = new MonitoringBulkResponse(Math.abs(randomLong()), randomBoolean());
} else {
Exception exception = randomFrom(
new ExportException(randomAlphaOfLength(5), new IllegalStateException(randomAlphaOfLength(5))),
new IllegalStateException(randomAlphaOfLength(5)),
new IllegalArgumentException(randomAlphaOfLength(5))
);
response = new MonitoringBulkResponse(Math.abs(randomLong()), new MonitoringBulkResponse.Error(exception));
}
TransportVersion version = TransportVersionUtils.randomVersion(random());
BytesStreamOutput output = new BytesStreamOutput();
output.setTransportVersion(version);
response.writeTo(output);
StreamInput streamInput = output.bytes().streamInput();
streamInput.setTransportVersion(version);
MonitoringBulkResponse response2 = new MonitoringBulkResponse(streamInput);
assertThat(response2.getTookInMillis(), equalTo(response.getTookInMillis()));
if (response.getError() == null) {
assertThat(response2.getError(), is(nullValue()));
} else {
assertThat(response2.getError(), is(notNullValue()));
}
assertThat(response2.isIgnored(), is(response.isIgnored()));
}
}
}
| MonitoringBulkResponseTests |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/rules/logical/LogicalUnnestRule.java | {
"start": 9127,
"end": 9630
} | interface ____ extends RelRule.Config {
LogicalUnnestRuleConfig DEFAULT =
ImmutableLogicalUnnestRule.LogicalUnnestRuleConfig.builder()
.build()
.withOperandSupplier(b0 -> b0.operand(LogicalCorrelate.class).anyInputs())
.withDescription("LogicalUnnestRule");
@Override
default LogicalUnnestRule toRule() {
return new LogicalUnnestRule(this);
}
}
}
| LogicalUnnestRuleConfig |
java | apache__flink | flink-table/flink-table-common/src/test/java/org/apache/flink/table/types/extraction/TypeInferenceExtractorTest.java | {
"start": 126326,
"end": 126994
} | class ____ {
static final DataType TYPE =
DataTypes.STRUCTURED(
EnrichedExtractionStateProcessTableFunction.User.class,
DataTypes.FIELD("score", DataTypes.DECIMAL(3, 2)));
public BigDecimal score;
}
public void eval(
@StateHint(
type =
@DataTypeHint(
defaultDecimalPrecision = 3,
defaultDecimalScale = 2))
User u) {}
}
private static | User |
java | elastic__elasticsearch | modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java | {
"start": 1841,
"end": 7456
} | class ____ extends Plugin implements RepositoryPlugin, ReloadablePlugin {
public static final String REPOSITORY_THREAD_POOL_NAME = "repository_azure";
public static final String NETTY_EVENT_LOOP_THREAD_POOL_NAME = "azure_event_loop";
// protected for testing
final SetOnce<AzureStorageService> azureStoreService = new SetOnce<>();
private final Settings settings;
public AzureRepositoryPlugin(Settings settings) {
// eagerly load client settings so that secure settings are read
AzureStorageSettings.load(settings);
this.settings = settings;
}
@Override
public Map<String, Repository.Factory> getRepositories(
Environment env,
NamedXContentRegistry namedXContentRegistry,
ClusterService clusterService,
BigArrays bigArrays,
RecoverySettings recoverySettings,
RepositoriesMetrics repositoriesMetrics,
SnapshotMetrics snapshotMetrics
) {
return Collections.singletonMap(AzureRepository.TYPE, (projectId, metadata) -> {
AzureStorageService storageService = azureStoreService.get();
assert storageService != null;
return new AzureRepository(
projectId,
metadata,
namedXContentRegistry,
storageService,
clusterService,
bigArrays,
recoverySettings,
repositoriesMetrics,
snapshotMetrics
);
});
}
@Override
public Collection<?> createComponents(PluginServices services) {
AzureClientProvider azureClientProvider = AzureClientProvider.create(services.threadPool(), settings);
azureStoreService.set(
createAzureStorageService(settings, azureClientProvider, services.clusterService(), services.projectResolver())
);
assert assertRepositoryAzureMaxThreads(settings, services.threadPool());
return List.of(azureClientProvider);
}
AzureStorageService createAzureStorageService(
Settings settingsToUse,
AzureClientProvider azureClientProvider,
ClusterService clusterService,
ProjectResolver projectResolver
) {
return new AzureStorageService(settingsToUse, azureClientProvider, clusterService, projectResolver);
}
@Override
public List<Setting<?>> getSettings() {
return Arrays.asList(
AzureClientProvider.EVENT_LOOP_THREAD_COUNT,
AzureClientProvider.MAX_OPEN_CONNECTIONS,
AzureClientProvider.OPEN_CONNECTION_TIMEOUT,
AzureClientProvider.MAX_IDLE_TIME,
AzureStorageSettings.ACCOUNT_SETTING,
AzureStorageSettings.KEY_SETTING,
AzureStorageSettings.SAS_TOKEN_SETTING,
AzureStorageSettings.ENDPOINT_SUFFIX_SETTING,
AzureStorageSettings.TIMEOUT_SETTING,
AzureStorageSettings.MAX_RETRIES_SETTING,
AzureStorageSettings.PROXY_TYPE_SETTING,
AzureStorageSettings.PROXY_HOST_SETTING,
AzureStorageSettings.PROXY_PORT_SETTING,
AzureStorageSettings.ENDPOINT_SETTING,
AzureStorageSettings.SECONDARY_ENDPOINT_SETTING
);
}
@Override
public List<ExecutorBuilder<?>> getExecutorBuilders(Settings settings) {
return List.of(executorBuilder(settings), nettyEventLoopExecutorBuilder(settings));
}
public static ExecutorBuilder<?> executorBuilder(Settings settings) {
int repositoryAzureMax = 5;
if (DiscoveryNode.isStateless(settings)) {
// REPOSITORY_THREAD_POOL_NAME is shared between snapshot and translogs/segments upload logic in serverless. In order to avoid
// snapshots to slow down other uploads due to rate limiting, we allow more threads in serverless.
repositoryAzureMax += ThreadPool.getMaxSnapshotThreadPoolSize(EsExecutors.allocatedProcessors(settings));
}
return new ScalingExecutorBuilder(REPOSITORY_THREAD_POOL_NAME, 0, repositoryAzureMax, TimeValue.timeValueSeconds(30L), false);
}
public static ExecutorBuilder<?> nettyEventLoopExecutorBuilder(Settings settings) {
int eventLoopThreads = AzureClientProvider.eventLoopThreadsFromSettings(settings);
return new ScalingExecutorBuilder(NETTY_EVENT_LOOP_THREAD_POOL_NAME, 0, eventLoopThreads, TimeValue.timeValueSeconds(30L), false);
}
@Override
public void reload(Settings settingsToLoad) {
// secure settings should be readable
final Map<String, AzureStorageSettings> clientsSettings = AzureStorageSettings.load(settingsToLoad);
AzureStorageService storageService = azureStoreService.get();
assert storageService != null;
storageService.refreshClusterClientSettings(clientsSettings);
}
private static boolean assertRepositoryAzureMaxThreads(Settings settings, ThreadPool threadPool) {
if (DiscoveryNode.isStateless(settings)) {
var repositoryAzureMax = threadPool.info(REPOSITORY_THREAD_POOL_NAME).getMax();
var snapshotMax = ThreadPool.getMaxSnapshotThreadPoolSize(EsExecutors.allocatedProcessors(settings));
assert snapshotMax < repositoryAzureMax
: "thread pool ["
+ REPOSITORY_THREAD_POOL_NAME
+ "] should be large enough to allow all "
+ snapshotMax
+ " snapshot threads to run at once, but got: "
+ repositoryAzureMax;
}
return true;
}
}
| AzureRepositoryPlugin |
java | apache__camel | components/camel-aws/camel-aws2-transcribe/src/test/java/org/apache/camel/component/aws2/transcribe/Transcribe2ProducerTest.java | {
"start": 1384,
"end": 8025
} | class ____ extends CamelTestSupport {
@BindToRegistry("amazonTranscribeClient")
AmazonAWSTranscribeMock clientMock = new AmazonAWSTranscribeMock();
@EndpointInject("mock:result")
private MockEndpoint mock;
@Test
public void transcribeStartTranscriptionJobTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:startTranscriptionJob", new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
exchange.getIn().setHeader(Transcribe2Constants.TRANSCRIPTION_JOB_NAME, "test-job");
exchange.getIn().setHeader(Transcribe2Constants.LANGUAGE_CODE, "en-US");
exchange.getIn().setHeader(Transcribe2Constants.MEDIA_FORMAT, "mp3");
exchange.getIn().setHeader(Transcribe2Constants.MEDIA_URI, "s3://mybucket/myfile.mp3");
}
});
StartTranscriptionJobResponse resultGet = (StartTranscriptionJobResponse) exchange.getIn().getBody();
assertNotNull(resultGet);
assertEquals("test-job", resultGet.transcriptionJob().transcriptionJobName());
assertEquals(TranscriptionJobStatus.IN_PROGRESS, resultGet.transcriptionJob().transcriptionJobStatus());
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void transcribeGetTranscriptionJobTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:getTranscriptionJob", new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
exchange.getIn().setHeader(Transcribe2Constants.TRANSCRIPTION_JOB_NAME, "test-job");
}
});
GetTranscriptionJobResponse resultGet = (GetTranscriptionJobResponse) exchange.getIn().getBody();
assertNotNull(resultGet);
assertEquals("test-job", resultGet.transcriptionJob().transcriptionJobName());
assertEquals(TranscriptionJobStatus.COMPLETED, resultGet.transcriptionJob().transcriptionJobStatus());
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void transcribeListTranscriptionJobsTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:listTranscriptionJobs", new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
exchange.getIn().setHeader(Transcribe2Constants.STATUS, "COMPLETED");
}
});
ListTranscriptionJobsResponse resultGet = (ListTranscriptionJobsResponse) exchange.getIn().getBody();
assertNotNull(resultGet);
assertEquals(1, resultGet.transcriptionJobSummaries().size());
assertEquals("test-job", resultGet.transcriptionJobSummaries().get(0).transcriptionJobName());
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void transcribeDeleteTranscriptionJobTest() throws Exception {
mock.expectedMessageCount(1);
template.request("direct:deleteTranscriptionJob", new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
exchange.getIn().setHeader(Transcribe2Constants.TRANSCRIPTION_JOB_NAME, "test-job");
}
});
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void transcribeCreateVocabularyTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:createVocabulary", new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
exchange.getIn().setHeader(Transcribe2Constants.VOCABULARY_NAME, "test-vocabulary");
exchange.getIn().setHeader(Transcribe2Constants.LANGUAGE_CODE, "en-US");
}
});
CreateVocabularyResponse resultGet = (CreateVocabularyResponse) exchange.getIn().getBody();
assertNotNull(resultGet);
assertEquals("test-vocabulary", resultGet.vocabularyName());
assertEquals(VocabularyState.PENDING, resultGet.vocabularyState());
MockEndpoint.assertIsSatisfied(context);
}
@Test
public void transcribeGetVocabularyTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:getVocabulary", new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
exchange.getIn().setHeader(Transcribe2Constants.VOCABULARY_NAME, "test-vocabulary");
}
});
GetVocabularyResponse resultGet = (GetVocabularyResponse) exchange.getIn().getBody();
assertNotNull(resultGet);
assertEquals("test-vocabulary", resultGet.vocabularyName());
assertEquals(VocabularyState.READY, resultGet.vocabularyState());
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:startTranscriptionJob")
.to("aws2-transcribe://transcribe?operation=startTranscriptionJob&transcribeClient=#amazonTranscribeClient")
.to("mock:result");
from("direct:getTranscriptionJob")
.to("aws2-transcribe://transcribe?operation=getTranscriptionJob&transcribeClient=#amazonTranscribeClient")
.to("mock:result");
from("direct:listTranscriptionJobs")
.to("aws2-transcribe://transcribe?operation=listTranscriptionJobs&transcribeClient=#amazonTranscribeClient")
.to("mock:result");
from("direct:deleteTranscriptionJob")
.to("aws2-transcribe://transcribe?operation=deleteTranscriptionJob&transcribeClient=#amazonTranscribeClient")
.to("mock:result");
from("direct:createVocabulary")
.to("aws2-transcribe://transcribe?operation=createVocabulary&transcribeClient=#amazonTranscribeClient")
.to("mock:result");
from("direct:getVocabulary")
.to("aws2-transcribe://transcribe?operation=getVocabulary&transcribeClient=#amazonTranscribeClient")
.to("mock:result");
}
};
}
}
| Transcribe2ProducerTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/onetomany/OneToManyDuplicatesTest.java | {
"start": 3181,
"end": 4178
} | class ____ {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
private Long id;
private String phoneNumber;
private String address;
@ManyToOne
private UserContact userContact;
public ContactInfo() {
}
public ContactInfo(Long id, String phoneNumber, String address, UserContact userContact) {
this.id = id;
this.phoneNumber = phoneNumber;
this.address = address;
this.userContact = userContact;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getPhoneNumber() {
return phoneNumber;
}
public void setPhoneNumber(String phoneNumber) {
this.phoneNumber = phoneNumber;
}
public String getAddress() {
return address;
}
public void setAddress(String address) {
this.address = address;
}
public UserContact getUserContact() {
return userContact;
}
public void setUserContact(UserContact userContact) {
this.userContact = userContact;
}
}
}
| ContactInfo |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/BaseRouterRMAdminTest.java | {
"start": 6325,
"end": 13844
} | class ____ extends RouterRMAdminService {
public MockRouterRMAdminService() {
super();
}
}
protected RefreshQueuesResponse refreshQueues(String user)
throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs((PrivilegedExceptionAction<RefreshQueuesResponse>) () -> {
RefreshQueuesRequest req = RefreshQueuesRequest.newInstance();
RefreshQueuesResponse response =
getRouterRMAdminService().refreshQueues(req);
return response;
});
}
protected RefreshNodesResponse refreshNodes(String user)
throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs((PrivilegedExceptionAction<RefreshNodesResponse>) () -> {
RefreshNodesRequest req = RefreshNodesRequest.newInstance();
RefreshNodesResponse response =
getRouterRMAdminService().refreshNodes(req);
return response;
});
}
protected RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration(
String user) throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user).doAs(
(PrivilegedExceptionAction<RefreshSuperUserGroupsConfigurationResponse>) () -> {
RefreshSuperUserGroupsConfigurationRequest req =
RefreshSuperUserGroupsConfigurationRequest.newInstance();
RefreshSuperUserGroupsConfigurationResponse response =
getRouterRMAdminService()
.refreshSuperUserGroupsConfiguration(req);
return response;
});
}
protected RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
String user) throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user).doAs(
(PrivilegedExceptionAction<RefreshUserToGroupsMappingsResponse>) () -> {
RefreshUserToGroupsMappingsRequest req =
RefreshUserToGroupsMappingsRequest.newInstance();
RefreshUserToGroupsMappingsResponse response =
getRouterRMAdminService().refreshUserToGroupsMappings(req);
return response;
});
}
protected RefreshAdminAclsResponse refreshAdminAcls(String user)
throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs((PrivilegedExceptionAction<RefreshAdminAclsResponse>) () -> {
RefreshAdminAclsRequest req = RefreshAdminAclsRequest.newInstance();
RefreshAdminAclsResponse response =
getRouterRMAdminService().refreshAdminAcls(req);
return response;
});
}
protected RefreshServiceAclsResponse refreshServiceAcls(String user)
throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs((PrivilegedExceptionAction<RefreshServiceAclsResponse>) () -> {
RefreshServiceAclsRequest req =
RefreshServiceAclsRequest.newInstance();
RefreshServiceAclsResponse response =
getRouterRMAdminService().refreshServiceAcls(req);
return response;
});
}
protected UpdateNodeResourceResponse updateNodeResource(String user)
throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs((PrivilegedExceptionAction<UpdateNodeResourceResponse>) () -> {
UpdateNodeResourceRequest req =
UpdateNodeResourceRequest.newInstance(null);
UpdateNodeResourceResponse response =
getRouterRMAdminService().updateNodeResource(req);
return response;
});
}
protected RefreshNodesResourcesResponse refreshNodesResources(String user)
throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs((PrivilegedExceptionAction<RefreshNodesResourcesResponse>) () -> {
RefreshNodesResourcesRequest req =
RefreshNodesResourcesRequest.newInstance();
RefreshNodesResourcesResponse response =
getRouterRMAdminService().refreshNodesResources(req);
return response;
});
}
protected AddToClusterNodeLabelsResponse addToClusterNodeLabels(String user)
throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs((PrivilegedExceptionAction<AddToClusterNodeLabelsResponse>) () -> {
AddToClusterNodeLabelsRequest req =
AddToClusterNodeLabelsRequest.newInstance(null);
AddToClusterNodeLabelsResponse response =
getRouterRMAdminService().addToClusterNodeLabels(req);
return response;
});
}
protected RemoveFromClusterNodeLabelsResponse removeFromClusterNodeLabels(
String user) throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user).doAs(
(PrivilegedExceptionAction<RemoveFromClusterNodeLabelsResponse>) () -> {
RemoveFromClusterNodeLabelsRequest req =
RemoveFromClusterNodeLabelsRequest.newInstance(null);
RemoveFromClusterNodeLabelsResponse response =
getRouterRMAdminService().removeFromClusterNodeLabels(req);
return response;
});
}
protected ReplaceLabelsOnNodeResponse replaceLabelsOnNode(String user)
throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs((PrivilegedExceptionAction<ReplaceLabelsOnNodeResponse>) () -> {
ReplaceLabelsOnNodeRequest req = ReplaceLabelsOnNodeRequest
.newInstance(new HashMap<NodeId, Set<String>>());
ReplaceLabelsOnNodeResponse response =
getRouterRMAdminService().replaceLabelsOnNode(req);
return response;
});
}
protected CheckForDecommissioningNodesResponse checkForDecommissioningNodes(
String user) throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user).doAs(
(PrivilegedExceptionAction<CheckForDecommissioningNodesResponse>) () -> {
CheckForDecommissioningNodesRequest req =
CheckForDecommissioningNodesRequest.newInstance();
CheckForDecommissioningNodesResponse response =
getRouterRMAdminService().checkForDecommissioningNodes(req);
return response;
});
}
protected RefreshClusterMaxPriorityResponse refreshClusterMaxPriority(
String user) throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user).doAs(
(PrivilegedExceptionAction<RefreshClusterMaxPriorityResponse>) () -> {
RefreshClusterMaxPriorityRequest req =
RefreshClusterMaxPriorityRequest.newInstance();
RefreshClusterMaxPriorityResponse response =
getRouterRMAdminService().refreshClusterMaxPriority(req);
return response;
});
}
protected String[] getGroupsForUser(String user)
throws IOException, InterruptedException {
return UserGroupInformation.createRemoteUser(user)
.doAs(new PrivilegedExceptionAction<String[]>() {
@Override
public String[] run() throws Exception {
String[] response =
getRouterRMAdminService().getGroupsForUser(user);
return response;
}
});
}
}
| MockRouterRMAdminService |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/annotations/Imported.java | {
"start": 446,
"end": 593
} | class ____ must be fully-qualified in the query language.
*
* @author Gavin King
*
* @since 6.2
*/
@Target(TYPE)
@Retention(RUNTIME)
public @ | names |
java | apache__camel | components/camel-oaipmh/src/main/java/org/apache/camel/oaipmh/component/OAIPMHProducer.java | {
"start": 1305,
"end": 4048
} | class ____ extends DefaultProducer {
private OAIPMHEndpoint endpoint;
public OAIPMHProducer(OAIPMHEndpoint endpoint) {
super(endpoint);
this.endpoint = endpoint;
}
@Override
public void process(Exchange exchange) throws Exception {
Harvester harvester = new Harvester(
new ProducerResponseHandler(),
endpoint.getUrl(),
endpoint.getVerb(),
endpoint.getMetadataPrefix(),
endpoint.getUntil(),
endpoint.getFrom(),
endpoint.getSet(),
endpoint.getIdentifier());
overrideHarvesterConfigs(exchange.getIn(), harvester);
if (endpoint.isIgnoreSSLWarnings()) {
harvester.getHttpClient().setIgnoreSSLWarnings(true);
}
List<String> synHarvest = harvester.synHarvest(endpoint.isOnlyFirst());
exchange.getMessage().setBody(synHarvest);
if (endpoint.isOnlyFirst() && harvester.getResumptionToken() != null) {
exchange.getMessage().setHeader(OAIPMHConstants.RESUMPTION_TOKEN, harvester.getResumptionToken());
} else {
exchange.getMessage().removeHeader(OAIPMHConstants.RESUMPTION_TOKEN);
}
}
private void overrideHarvesterConfigs(Message msg, Harvester harvester) {
checkAndSetConfigs(msg, OAIPMHConstants.URL, x -> harvester.setBaseURI(URI.create(x)), String.class);
checkAndSetConfigs(msg, OAIPMHConstants.ENDPOINT_URL, x -> harvester.setBaseURI(URI.create(x)), String.class);
checkAndSetConfigs(msg, OAIPMHConstants.VERB, harvester::setVerb, String.class);
checkAndSetConfigs(msg, OAIPMHConstants.METADATA_PREFIX, harvester::setMetadata, String.class);
checkAndSetConfigs(msg, OAIPMHConstants.UNTIL, harvester::setUntil, String.class);
checkAndSetConfigs(msg, OAIPMHConstants.FROM, harvester::setFrom, String.class);
checkAndSetConfigs(msg, OAIPMHConstants.SET, harvester::setSet, String.class);
checkAndSetConfigs(msg, OAIPMHConstants.IDENTIFIER, harvester::setIdentifier, String.class);
checkAndSetConfigs(msg, OAIPMHConstants.RESUMPTION_TOKEN, harvester::setResumptionToken, String.class);
checkAndSetConfigs(msg, OAIPMHConstants.ONLY_FIRST, endpoint::setOnlyFirst, Boolean.class);
checkAndSetConfigs(msg, OAIPMHConstants.IGNORE_SSL_WARNINGS, endpoint::setIgnoreSSLWarnings, Boolean.class);
}
private <T> void checkAndSetConfigs(final Message message, final String key, final Consumer<T> fn, final Class<T> type) {
final T header = message.getHeader(key, type);
if (!ObjectHelper.isEmpty(header)) {
fn.accept(header);
}
}
}
| OAIPMHProducer |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/support/TestPropertySourceUtilsTests.java | {
"start": 15487,
"end": 15722
} | class ____ extends LocationsAndPropertiesPropertySources {
}
@TestPropertySource(locations = "/baz.properties", properties = "key = value", inheritLocations = false, inheritProperties = false)
static | OverriddenPropertiesPropertySources |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configuration/OAuth2AuthorizationServerConfigurationTests.java | {
"start": 1154,
"end": 1605
} | class ____ {
@Test
public void assertOrderHighestPrecedence() {
Method authorizationServerSecurityFilterChainMethod = ClassUtils.getMethod(
OAuth2AuthorizationServerConfiguration.class, "authorizationServerSecurityFilterChain",
HttpSecurity.class);
Integer order = OrderUtils.getOrder(authorizationServerSecurityFilterChainMethod);
assertThat(order).isEqualTo(Ordered.HIGHEST_PRECEDENCE);
}
}
| OAuth2AuthorizationServerConfigurationTests |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AzureBlobIngressHandler.java | {
"start": 2045,
"end": 14245
} | class ____ extends AzureIngressHandler {
private static final Logger LOG = LoggerFactory.getLogger(
AbfsOutputStream.class);
private volatile String eTag;
private final AzureBlobBlockManager blobBlockManager;
private final AbfsBlobClient blobClient;
private final AbfsClientHandler clientHandler;
/**
* Constructs an AzureBlobIngressHandler.
*
* @param abfsOutputStream the AbfsOutputStream.
* @param blockFactory the block factory.
* @param bufferSize the buffer size.
* @param eTag the eTag.
* @param clientHandler the client handler.
* @param blockManager the block manager.
* @throws AzureBlobFileSystemException if an error occurs.
*/
public AzureBlobIngressHandler(AbfsOutputStream abfsOutputStream,
DataBlocks.BlockFactory blockFactory,
int bufferSize, String eTag, AbfsClientHandler clientHandler, AzureBlockManager blockManager)
throws AzureBlobFileSystemException {
super(abfsOutputStream);
this.eTag = eTag;
if (blockManager instanceof AzureBlobBlockManager) {
this.blobBlockManager = (AzureBlobBlockManager) blockManager;
} else {
this.blobBlockManager = new AzureBlobBlockManager(abfsOutputStream,
blockFactory, bufferSize);
}
this.clientHandler = clientHandler;
this.blobClient = clientHandler.getBlobClient();
LOG.trace("Created a new BlobIngress Handler for AbfsOutputStream instance {} for path {}",
abfsOutputStream.getStreamID(), abfsOutputStream.getPath());
}
/**
* Buffers data into the specified block.
*
* @param block the block to buffer data into.
* @param data the data to be buffered.
* @param off the start offset in the data.
* @param length the number of bytes to buffer.
* @return the number of bytes buffered.
* @throws IOException if an I/O error occurs.
*/
@Override
protected int bufferData(AbfsBlock block,
final byte[] data,
final int off,
final int length)
throws IOException {
LOG.trace("Buffering data of length {} to block at offset {}", length, off);
return block.write(data, off, length);
}
/**
* Performs a remote write operation.
*
* @param blockToUpload the block to upload.
* @param uploadData the data to upload.
* @param reqParams the request parameters.
* @param tracingContext the tracing context.
* @return the resulting AbfsRestOperation.
* @throws IOException if an I/O error occurs.
*/
@Override
protected AbfsRestOperation remoteWrite(AbfsBlock blockToUpload,
DataBlocks.BlockUploadData uploadData,
AppendRequestParameters reqParams,
TracingContext tracingContext)
throws IOException {
BlobAppendRequestParameters blobParams = new BlobAppendRequestParameters(blockToUpload.getBlockId(), getETag());
reqParams.setBlobParams(blobParams);
AbfsRestOperation op;
String threadIdStr = String.valueOf(Thread.currentThread().getId());
TracingContext tracingContextAppend = new TracingContext(tracingContext);
tracingContextAppend.setIngressHandler(BLOB_APPEND + " T " + threadIdStr);
tracingContextAppend.setPosition(String.valueOf(blockToUpload.getOffset()));
try {
LOG.trace("Starting remote write for block with ID {} and offset {}",
blockToUpload.getBlockId(), blockToUpload.getOffset());
op = getClient().append(getAbfsOutputStream().getPath(), uploadData.toByteArray(),
reqParams,
getAbfsOutputStream().getCachedSasTokenString(),
getAbfsOutputStream().getContextEncryptionAdapter(),
tracingContextAppend);
blobBlockManager.updateEntry(blockToUpload);
} catch (AbfsRestOperationException ex) {
LOG.error("Error in remote write requiring handler switch for path {}", getAbfsOutputStream().getPath(), ex);
if (shouldIngressHandlerBeSwitched(ex)) {
throw getIngressHandlerSwitchException(ex);
}
LOG.error("Error in remote write for path {} and offset {}", getAbfsOutputStream().getPath(),
blockToUpload.getOffset(), ex);
throw ex;
}
return op;
}
/**
* Flushes data to the remote store.
*
* @param offset the offset to flush.
* @param retainUncommitedData whether to retain uncommitted data.
* @param isClose whether this is a close operation.
* @param leaseId the lease ID.
* @param tracingContext the tracing context.
* @return the resulting AbfsRestOperation.
* @throws IOException if an I/O error occurs.
*/
@Override
protected synchronized AbfsRestOperation remoteFlush(final long offset,
final boolean retainUncommitedData,
final boolean isClose,
final String leaseId,
TracingContext tracingContext)
throws IOException {
AbfsRestOperation op;
AzureBlobBlockManager blobBlockManager = (AzureBlobBlockManager) getBlockManager();
if (getAbfsOutputStream().isAppendBlob()) {
return null;
}
if (!blobBlockManager.hasBlocksToCommit()) {
return null;
}
try {
// Generate the xml with the list of blockId's to generate putBlockList call.
String blockListXml = generateBlockListXml(
blobBlockManager.getBlockIdToCommit());
TracingContext tracingContextFlush = new TracingContext(tracingContext);
tracingContextFlush.setIngressHandler(BLOB_FLUSH);
tracingContextFlush.setPosition(String.valueOf(offset));
LOG.trace("Flushing data at offset {} for path {}", offset, getAbfsOutputStream().getPath());
String fullBlobMd5 = null;
if (getClient().isFullBlobChecksumValidationEnabled()) {
fullBlobMd5 = computeFullBlobMd5();
}
op = getClient().flush(blockListXml.getBytes(StandardCharsets.UTF_8),
getAbfsOutputStream().getPath(),
isClose, getAbfsOutputStream().getCachedSasTokenString(), leaseId,
getETag(), getAbfsOutputStream().getContextEncryptionAdapter(), tracingContextFlush, fullBlobMd5);
setETag(op.getResult().getResponseHeader(HttpHeaderConfigurations.ETAG));
} catch (AbfsRestOperationException ex) {
LOG.error("Error in remote flush requiring handler switch for path {}", getAbfsOutputStream().getPath(), ex);
if (shouldIngressHandlerBeSwitched(ex)) {
throw getIngressHandlerSwitchException(ex);
}
LOG.error("Error in remote flush for path {} and offset {}", getAbfsOutputStream().getPath(), offset, ex);
throw ex;
} finally {
if (getClient().isFullBlobChecksumValidationEnabled()) {
getAbfsOutputStream().getFullBlobContentMd5().reset();
}
}
return op;
}
/**
* Method to perform a remote write operation for appending data to an append blob in Azure Blob Storage.
*
* <p>This method is intended to be implemented by subclasses to handle the specific
* case of appending data to an append blob. It takes in the path of the append blob,
* the data to be uploaded, the block of data, and additional parameters required for
* the append operation.</p>
*
* @param path The path of the append blob to which data is to be appended.
* @param uploadData The data to be uploaded as part of the append operation.
* @param block The block of data to append.
* @param reqParams The additional parameters required for the append operation.
* @param tracingContext The tracing context for the operation.
* @return An {@link AbfsRestOperation} object representing the remote write operation.
* @throws IOException If an I/O error occurs during the append operation.
*/
protected AbfsRestOperation remoteAppendBlobWrite(String path,
DataBlocks.BlockUploadData uploadData,
AbfsBlock block,
AppendRequestParameters reqParams,
TracingContext tracingContext) throws IOException {
// Perform the remote append operation using the blob client.
AbfsRestOperation op;
try {
op = blobClient.appendBlock(path, reqParams, uploadData.toByteArray(), tracingContext);
} catch (AbfsRestOperationException ex) {
LOG.error("Error in remote write requiring handler switch for path {}",
getAbfsOutputStream().getPath(), ex);
if (shouldIngressHandlerBeSwitched(ex)) {
throw getIngressHandlerSwitchException(ex);
}
LOG.error("Error in remote write for path {} and offset {}",
getAbfsOutputStream().getPath(),
block.getOffset(), ex);
throw ex;
}
return op;
}
/**
* Sets the eTag of the blob.
*
* @param eTag the eTag to set.
*/
void setETag(String eTag) {
this.eTag = eTag;
}
/**
* Gets the eTag value of the blob.
*
* @return the eTag.
*/
@VisibleForTesting
@Override
public String getETag() {
return eTag;
}
/**
* Writes the current buffer to the service. .
*
*/
@Override
protected void writeAppendBlobCurrentBufferToService() throws IOException {
AbfsBlock activeBlock = blobBlockManager.getActiveBlock();
// No data, return immediately.
if (!getAbfsOutputStream().hasActiveBlockDataToUpload()) {
return;
}
// Prepare data for upload.
final int bytesLength = activeBlock.dataSize();
DataBlocks.BlockUploadData uploadData = activeBlock.startUpload();
// Clear active block and update statistics.
if (blobBlockManager.hasActiveBlock()) {
blobBlockManager.clearActiveBlock();
}
getAbfsOutputStream().getOutputStreamStatistics().writeCurrentBuffer();
getAbfsOutputStream().getOutputStreamStatistics().bytesToUpload(bytesLength);
// Update the stream position.
final long offset = getAbfsOutputStream().getPosition();
getAbfsOutputStream().setPosition(offset + bytesLength);
// Perform the upload within a performance tracking context.
try (AbfsPerfInfo perfInfo = new AbfsPerfInfo(
blobClient.getAbfsPerfTracker(),
"writeCurrentBufferToService", APPEND_ACTION)) {
LOG.trace("Writing current buffer to service at offset {} and path {}", offset, getAbfsOutputStream().getPath());
AppendRequestParameters reqParams = new AppendRequestParameters(
offset, 0, bytesLength, AppendRequestParameters.Mode.APPEND_MODE,
true, getAbfsOutputStream().getLeaseId(),
getAbfsOutputStream().isExpectHeaderEnabled(),
getAbfsOutputStream().getMd5());
AbfsRestOperation op;
try {
op = remoteAppendBlobWrite(getAbfsOutputStream().getPath(), uploadData,
activeBlock, reqParams,
new TracingContext(getAbfsOutputStream().getTracingContext()));
} catch (InvalidIngressServiceException ex) {
LOG.debug("InvalidIngressServiceException caught for path: {}, switching handler and retrying remoteAppendBlobWrite.",
getAbfsOutputStream().getPath());
getAbfsOutputStream().switchHandler();
op = getAbfsOutputStream().getIngressHandler()
.remoteAppendBlobWrite(getAbfsOutputStream().getPath(), uploadData,
activeBlock, reqParams,
new TracingContext(getAbfsOutputStream().getTracingContext()));
} finally {
// Ensure the upload data stream is closed.
IOUtils.closeStreams(uploadData, activeBlock);
}
if (op != null) {
// Update the SAS token and log the successful upload.
getAbfsOutputStream().getCachedSasToken().update(op.getSasToken());
getAbfsOutputStream().getOutputStreamStatistics()
.uploadSuccessful(bytesLength);
// Register performance information.
perfInfo.registerResult(op.getResult());
perfInfo.registerSuccess(true);
}
}
}
/**
* Gets the block manager.
*
* @return the block manager.
*/
@Override
public AzureBlockManager getBlockManager() {
return blobBlockManager;
}
/**
* Gets the blob client.
*
* @return the blob client.
*/
@Override
public AbfsBlobClient getClient() {
return blobClient;
}
@VisibleForTesting
public AbfsClientHandler getClientHandler() {
return clientHandler;
}
}
| AzureBlobIngressHandler |
java | apache__camel | components/camel-tracing/src/main/java/org/apache/camel/tracing/propagation/CamelMessagingHeadersExtractAdapter.java | {
"start": 1075,
"end": 2653
} | class ____ implements ExtractAdapter {
private final Map<String, Object> map = new CaseInsensitiveMap();
private final boolean jmsEncoding;
public CamelMessagingHeadersExtractAdapter(final Map<String, Object> map, boolean jmsEncoding) {
// Extract string valued map entries
this.jmsEncoding = jmsEncoding;
map.entrySet().stream().filter(e -> e.getValue() instanceof String || e.getValue() instanceof byte[]).forEach(e -> {
if (e.getValue() instanceof byte[] bytes) {
this.map.put(decodeDash(e.getKey()), new String(bytes, StandardCharsets.UTF_8));
} else {
this.map.put(decodeDash(e.getKey()), e.getValue());
}
});
}
@Override
public Iterator<Map.Entry<String, Object>> iterator() {
return map.entrySet().iterator();
}
@Override
public Object get(String key) {
return this.map.get(key);
}
@Override
public Set<String> keys() {
return map.keySet();
}
/**
* Decode dashes (encoded in {@link CamelMessagingHeadersInjectAdapter} Dash encoding and decoding is required by
* JMS. This is implemented here rather than specifically to JMS so that other Camel messaging endpoints can take
* part in traces where the peer is using JMS.
*/
private String decodeDash(String key) {
if (jmsEncoding) {
return key.replace(CamelMessagingHeadersInjectAdapter.JMS_DASH, "-");
} else {
return key;
}
}
}
| CamelMessagingHeadersExtractAdapter |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/appender/SocketAppenderReconnectTest.java | {
"start": 2676,
"end": 20949
} | class ____ {
private static final String CLASS_NAME = SocketAppenderReconnectTest.class.getSimpleName();
private static final int EPHEMERAL_PORT = 0;
private static final String APPENDER_NAME = "TestSocket";
/**
* Tests if failures are propagated when reconnection fails.
*
* @see <a href="https://issues.apache.org/jira/browse/LOG4J2-2829">LOG4J2-2829</a>
*/
@Test
@UsingStatusListener // Suppresses `StatusLogger` output, unless there is a failure
void repeating_reconnect_failures_should_be_propagated() throws Exception {
try (final LineReadingTcpServer server = new LineReadingTcpServer()) {
// Start the server.
server.start("Main", EPHEMERAL_PORT);
final String serverHost = server.getServerSocket().getInetAddress().getHostAddress();
final int serverPort = server.getServerSocket().getLocalPort();
// Initialize the logger context
final Configuration config = createConfiguration(serverHost, serverPort, null);
try (final LoggerContext loggerContext = createStartedLoggerContext(config)) {
// Configure the error handler
final BufferingErrorHandler errorHandler = new BufferingErrorHandler();
loggerContext.getConfiguration().getAppender(APPENDER_NAME).setHandler(errorHandler);
// Verify the initial working state.
verifyLoggingSuccess(loggerContext, server, errorHandler);
// Stop the server, and verify the logging failure.
server.close();
verifyLoggingFailure(loggerContext, errorHandler);
// Start the server again, and verify the logging success.
server.start("Main", serverPort);
verifyLoggingSuccess(loggerContext, server, errorHandler);
}
}
}
/**
* Tests if all the {@link InetSocketAddress}es returned by an {@link HostResolver} is used for fallback on reconnect attempts.
*/
@Test
@UsingStatusListener // Suppresses `StatusLogger` output, unless there is a failure
void reconnect_should_fallback_when_there_are_multiple_resolved_hosts() throws Exception {
try (final LineReadingTcpServer primaryServer = new LineReadingTcpServer();
final LineReadingTcpServer secondaryServer = new LineReadingTcpServer()) {
// Start servers.
primaryServer.start("Primary", EPHEMERAL_PORT);
secondaryServer.start("Secondary", EPHEMERAL_PORT);
// Mock the host resolver.
final FixedHostResolver hostResolver = FixedHostResolver.ofServers(primaryServer, secondaryServer);
TcpSocketManager.setHostResolver(hostResolver);
try {
// Initialize the logger context
final Configuration config = createConfiguration(
// Passing dummy host & port, since the resolution is supposed to be performed by the mocked
// host resolver anyway.
"localhost", 0, null);
try (final LoggerContext loggerContext = createStartedLoggerContext(config)) {
// Configure the error handler
final BufferingErrorHandler errorHandler = new BufferingErrorHandler();
loggerContext.getConfiguration().getAppender(APPENDER_NAME).setHandler(errorHandler);
// Verify the initial working state on the primary server.
verifyLoggingSuccess(loggerContext, primaryServer, errorHandler);
// Stop the primary server, and verify the logging success due to fallback on to the secondary
// server.
primaryServer.close();
verifyLoggingSuccess(loggerContext, secondaryServer, errorHandler);
}
}
// Reset the host resolver
finally {
TcpSocketManager.setHostResolver(new HostResolver());
}
}
}
/**
* Triggers a reconfiguration such that the {@code <KeyStore>} and {@code <TrustStore>} configuration will be unchanged, but the content they refer to will be updated.
*
* @see <a href="https://issues.apache.org/jira/browse/LOG4J2-2988">LOG4J2-2988</a>
* @see <a href="https://github.com/apache/logging-log4j2/pull/2767">#2767</a>
*/
@Test
void key_store_changes_should_be_detected_at_reconfigure(
@TempDir(cleanup = CleanupMode.ON_SUCCESS) final Path tempDir) throws Exception {
// Create the 1st `SSLContext`
final String keyStore1Type = SslKeyStoreConstants.KEYSTORE_TYPE;
final String keyStore1Location = SslKeyStoreConstants.KEYSTORE_LOCATION;
final char[] keyStore1Password = SslKeyStoreConstants.KEYSTORE_PWD();
final String trustStore1Type = SslKeyStoreConstants.TRUSTSTORE_TYPE;
final String trustStore1Location = SslKeyStoreConstants.TRUSTSTORE_LOCATION;
final char[] trustStore1Password = SslKeyStoreConstants.TRUSTSTORE_PWD();
final SSLContext sslContext1 = createSslContext(
keyStore1Type,
keyStore1Location,
keyStore1Password,
trustStore1Type,
trustStore1Location,
trustStore1Password);
// Create the 2nd `SSLContext`
final String keyStore2Type = SslKeyStoreConstants.KEYSTORE2_TYPE;
final String keyStore2Location = SslKeyStoreConstants.KEYSTORE2_LOCATION;
final char[] keyStore2Password = SslKeyStoreConstants.KEYSTORE2_PWD();
final String trustStore2Type = SslKeyStoreConstants.TRUSTSTORE2_TYPE;
final String trustStore2Location = SslKeyStoreConstants.TRUSTSTORE2_LOCATION;
final char[] trustStore2Password = SslKeyStoreConstants.TRUSTSTORE2_PWD();
final SSLContext sslContext2 = createSslContext(
keyStore2Type,
keyStore2Location,
keyStore2Password,
trustStore2Type,
trustStore2Location,
trustStore2Password);
// Ensure that store types are identical.
// We will use these in the `*StoreConfiguration`.
// They need to be same, so that the encapsulating `SslConfiguration` will be unchanged during reconfiguration.
assertThat(keyStore1Type).isEqualTo(keyStore2Type);
// Stage the key store files using the 1st `SSLContext`
@SuppressWarnings("UnnecessaryLocalVariable")
final String keyStoreType = keyStore1Type;
final Path keyStoreFilePath = tempDir.resolve("keyStore");
Files.write(keyStoreFilePath, Files.readAllBytes(Paths.get(keyStore1Location)));
final Path keyStorePasswordFilePath = tempDir.resolve("keyStorePassword");
Files.write(keyStorePasswordFilePath, new String(keyStore1Password).getBytes(StandardCharsets.UTF_8));
// Stage the trust store files using the 1st `SSLContext`
@SuppressWarnings("UnnecessaryLocalVariable")
final String trustStoreType = trustStore1Type;
final Path trustStoreFilePath = tempDir.resolve("trustStore");
Files.write(trustStoreFilePath, Files.readAllBytes(Paths.get(trustStore1Location)));
final Path trustStorePasswordFilePath = tempDir.resolve("trustStorePassword");
Files.write(trustStorePasswordFilePath, new String(trustStore1Password).getBytes(StandardCharsets.UTF_8));
// Create servers
try (final LineReadingTcpServer server1 = new LineReadingTcpServer(sslContext1.getServerSocketFactory());
final LineReadingTcpServer server2 = new LineReadingTcpServer(sslContext2.getServerSocketFactory())) {
// Start the 1st server
server1.start("1st", EPHEMERAL_PORT);
final String server1Host =
server1.getServerSocket().getInetAddress().getHostAddress();
final int server1Port = server1.getServerSocket().getLocalPort();
// Create the configuration transformer to add the `<Ssl>`, `<KeyStore>`, and `<TrustStore>` elements
final BiFunction<
ConfigurationBuilder<BuiltConfiguration>,
AppenderComponentBuilder,
AppenderComponentBuilder>
appenderComponentBuilderTransformer = (configBuilder, appenderComponentBuilder) -> {
final ComponentBuilder<?> keyStoreComponentBuilder = configBuilder
.newComponent("KeyStore")
.addAttribute("type", keyStoreType)
.addAttribute("location", keyStoreFilePath.toString())
.addAttribute("passwordFile", keyStorePasswordFilePath);
final ComponentBuilder<?> trustStoreComponentBuilder = configBuilder
.newComponent("TrustStore")
.addAttribute("type", trustStoreType)
.addAttribute("location", trustStoreFilePath.toString())
.addAttribute("passwordFile", trustStorePasswordFilePath);
return appenderComponentBuilder.addComponent(configBuilder
.newComponent("Ssl")
.addAttribute("protocol", "TLS")
.addComponent(keyStoreComponentBuilder)
.addComponent(trustStoreComponentBuilder));
};
// Initialize the logger context
final Configuration config1 =
createConfiguration(server1Host, server1Port, appenderComponentBuilderTransformer);
try (final LoggerContext loggerContext = createStartedLoggerContext(config1)) {
// Configure the error handler
final BufferingErrorHandler errorHandler = new BufferingErrorHandler();
loggerContext.getConfiguration().getAppender(APPENDER_NAME).setHandler(errorHandler);
// Verify the initial working state on the 1st server
verifyLoggingSuccess(loggerContext, server1, errorHandler);
// Stop the 1st server and start the 2nd one (using different SSL configuration!) on the same port
server1.close();
server2.start("2nd", server1Port);
// Stage the key store files using the 2nd `SSLContext`
Files.write(keyStoreFilePath, Files.readAllBytes(Paths.get(keyStore2Location)));
Files.write(keyStorePasswordFilePath, new String(keyStore2Password).getBytes(StandardCharsets.UTF_8));
// Stage the trust store files using the 2nd `SSLContext`
Files.write(trustStoreFilePath, Files.readAllBytes(Paths.get(trustStore2Location)));
Files.write(
trustStorePasswordFilePath, new String(trustStore2Password).getBytes(StandardCharsets.UTF_8));
// Reconfigure the logger context
//
// You might be thinking:
//
// Why don't we simply call `loggerContext.reconfigure()`?
// Why do we need to create the very same configuration twice?
//
// We need to reconfigure the logger context using the very same configuration to test if
// `SslSocketManager` will be able to pick up the key and trust store changes even though the
// `SslConfiguration` is untouched – the whole point of this test and the issue reported in LOG4J2-2988.
//
// `loggerContext.reconfigure()` stops the active configuration (i.e., the programmatically built
// configuration we provided to it), and starts a fresh scan for `log4j2.xml` et al. in the classpath.
// This effectively discards the initially provided configuration.
//
// `loggerContext.reconfigure(loggerContext.getConfiguration())` doesn't work either, since it tries to
// start the configuration it has just stopped and a programmatically built `Configuration` is not
// `Reconfigurable` – yes, this needs to be fixed.
//
// Hence, the only way is to programmatically build the very same configuration, twice, and use the 1st
// one for initialization, and the 2nd one for reconfiguration.
final Configuration config2 =
createConfiguration(server1Host, server1Port, appenderComponentBuilderTransformer);
loggerContext.reconfigure(config2);
// Verify the working state on the 2nd server
verifyLoggingSuccess(loggerContext, server2, errorHandler);
}
}
}
private static Configuration createConfiguration(
final String host,
final int port,
@Nullable
final BiFunction<
ConfigurationBuilder<BuiltConfiguration>,
AppenderComponentBuilder,
AppenderComponentBuilder>
appenderComponentBuilderTransformer) {
// Create the configuration builder
final ConfigurationBuilder<BuiltConfiguration> configBuilder =
ConfigurationBuilderFactory.newConfigurationBuilder()
.setStatusLevel(Level.ERROR)
.setConfigurationName(SocketAppenderReconnectTest.class.getSimpleName());
// Create the appender configuration
final AppenderComponentBuilder appenderComponentBuilder = configBuilder
.newAppender(APPENDER_NAME, "Socket")
.addAttribute("host", host)
.addAttribute("port", port)
.addAttribute("ignoreExceptions", false)
.addAttribute("reconnectionDelayMillis", 10)
.addAttribute("immediateFlush", true)
.add(configBuilder.newLayout("PatternLayout").addAttribute("pattern", "%m%n"));
final AppenderComponentBuilder transformedAppenderComponentBuilder = appenderComponentBuilderTransformer != null
? appenderComponentBuilderTransformer.apply(configBuilder, appenderComponentBuilder)
: appenderComponentBuilder;
// Create the configuration
return configBuilder
.add(transformedAppenderComponentBuilder)
.add(configBuilder.newRootLogger(Level.ALL).add(configBuilder.newAppenderRef(APPENDER_NAME)))
.build(false);
}
private static final AtomicInteger LOGGER_CONTEXT_COUNTER = new AtomicInteger();
private static LoggerContext createStartedLoggerContext(final Configuration configuration) {
final String name = String.format(
"%s-%02d", SocketAppenderReconnectTest.class.getSimpleName(), LOGGER_CONTEXT_COUNTER.getAndIncrement());
final LoggerContext loggerContext = new LoggerContext(name);
loggerContext.start(configuration);
return loggerContext;
}
private static void verifyLoggingSuccess(
final LoggerContext loggerContext,
final LineReadingTcpServer server,
final BufferingErrorHandler errorHandler)
throws Exception {
// Report status
StatusLogger.getLogger().debug("[{}] verifying logging success", CLASS_NAME);
// Create messages to log
final int messageCount = 2;
assertThat(messageCount)
.as("expecting `messageCount > 1` due to LOG4J2-2829")
.isGreaterThan(1);
final List<String> expectedMessages = IntStream.range(0, messageCount)
.mapToObj(messageIndex -> String.format("m%02d", messageIndex))
.collect(Collectors.toList());
// Log the 1st message
// Due to socket initialization, the first `write()` might need some extra effort
final Logger logger = loggerContext.getRootLogger();
await("first socket append")
.pollInterval(100, TimeUnit.MILLISECONDS)
.atMost(120, TimeUnit.SECONDS)
.ignoreExceptions()
.untilAsserted(() -> {
final String message = expectedMessages.get(0);
logger.info(message);
});
// Reset the error handler
errorHandler.clear();
// Log the rest of the messages
for (int messageIndex = 1; messageIndex < expectedMessages.size(); messageIndex++) {
final String message = expectedMessages.get(messageIndex);
logger.info(message);
}
// Verify the messages received by the server
final List<String> actualMessages = server.pollLines(messageCount);
assertThat(actualMessages).containsExactlyElementsOf(expectedMessages);
// Verify the error handler state
assertThat(errorHandler.getBuffer()).isEmpty();
}
private static void verifyLoggingFailure(
final LoggerContext loggerContext, final BufferingErrorHandler errorHandler) {
// Report status
StatusLogger.getLogger().debug("[{}] verifying logging failure", CLASS_NAME);
// Verify the configuration
final Logger logger = loggerContext.getRootLogger();
final int retryCount = 3;
assertThat(retryCount)
.as("expecting `retryCount > 1` due to LOG4J2-2829")
.isGreaterThan(1);
// Verify the failure
for (int i = 0; i < retryCount; i++) {
try {
logger.info("should fail #" + i);
fail("should have failed #" + i);
} catch (final AppenderLoggingException ignored) {
assertThat(errorHandler.getBuffer()).hasSize(2 * (i + 1));
}
}
}
}
| SocketAppenderReconnectTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/codec/vectors/es93/ES93BFloat16FlatVectorsWriter.java | {
"start": 2642,
"end": 14471
} | class ____ extends FlatVectorsWriter {
private static final long SHALLOW_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ES93BFloat16FlatVectorsWriter.class);
private final SegmentWriteState segmentWriteState;
private final IndexOutput meta, vectorData;
private final List<FieldWriter<?>> fields = new ArrayList<>();
private boolean finished;
public ES93BFloat16FlatVectorsWriter(SegmentWriteState state, FlatVectorsScorer scorer) throws IOException {
super(scorer);
segmentWriteState = state;
String metaFileName = IndexFileNames.segmentFileName(
state.segmentInfo.name,
state.segmentSuffix,
ES93BFloat16FlatVectorsFormat.META_EXTENSION
);
String vectorDataFileName = IndexFileNames.segmentFileName(
state.segmentInfo.name,
state.segmentSuffix,
ES93BFloat16FlatVectorsFormat.VECTOR_DATA_EXTENSION
);
boolean success = false;
try {
meta = state.directory.createOutput(metaFileName, state.context);
vectorData = state.directory.createOutput(vectorDataFileName, state.context);
CodecUtil.writeIndexHeader(
meta,
ES93BFloat16FlatVectorsFormat.META_CODEC_NAME,
ES93BFloat16FlatVectorsFormat.VERSION_CURRENT,
state.segmentInfo.getId(),
state.segmentSuffix
);
CodecUtil.writeIndexHeader(
vectorData,
ES93BFloat16FlatVectorsFormat.VECTOR_DATA_CODEC_NAME,
ES93BFloat16FlatVectorsFormat.VERSION_CURRENT,
state.segmentInfo.getId(),
state.segmentSuffix
);
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(this);
}
}
}
@Override
public FlatFieldVectorsWriter<?> addField(FieldInfo fieldInfo) throws IOException {
FieldWriter<?> newField = FieldWriter.create(fieldInfo);
fields.add(newField);
return newField;
}
@Override
public void flush(int maxDoc, Sorter.DocMap sortMap) throws IOException {
for (FieldWriter<?> field : fields) {
if (sortMap == null) {
writeField(field, maxDoc);
} else {
writeSortingField(field, maxDoc, sortMap);
}
field.finish();
}
}
@Override
public void finish() throws IOException {
if (finished) {
throw new IllegalStateException("already finished");
}
finished = true;
if (meta != null) {
// write end of fields marker
meta.writeInt(-1);
CodecUtil.writeFooter(meta);
}
if (vectorData != null) {
CodecUtil.writeFooter(vectorData);
}
}
@Override
public long ramBytesUsed() {
long total = SHALLOW_RAM_BYTES_USED;
for (FieldWriter<?> field : fields) {
total += field.ramBytesUsed();
}
return total;
}
private void writeField(FieldWriter<?> fieldData, int maxDoc) throws IOException {
// write vector values
long vectorDataOffset = vectorData.alignFilePointer(BFloat16.BYTES);
switch (fieldData.fieldInfo.getVectorEncoding()) {
case FLOAT32 -> writeBFloat16Vectors(fieldData);
case BYTE -> throw new IllegalStateException(
"Incorrect encoding for field " + fieldData.fieldInfo.name + ": " + VectorEncoding.BYTE
);
}
long vectorDataLength = vectorData.getFilePointer() - vectorDataOffset;
writeMeta(fieldData.fieldInfo, maxDoc, vectorDataOffset, vectorDataLength, fieldData.docsWithField);
}
private void writeBFloat16Vectors(FieldWriter<?> fieldData) throws IOException {
final ByteBuffer buffer = ByteBuffer.allocate(fieldData.dim * BFloat16.BYTES).order(ByteOrder.LITTLE_ENDIAN);
for (Object v : fieldData.vectors) {
BFloat16.floatToBFloat16((float[]) v, buffer.asShortBuffer());
vectorData.writeBytes(buffer.array(), buffer.array().length);
}
}
private void writeSortingField(FieldWriter<?> fieldData, int maxDoc, Sorter.DocMap sortMap) throws IOException {
final int[] ordMap = new int[fieldData.docsWithField.cardinality()]; // new ord to old ord
DocsWithFieldSet newDocsWithField = new DocsWithFieldSet();
mapOldOrdToNewOrd(fieldData.docsWithField, sortMap, null, ordMap, newDocsWithField);
// write vector values
long vectorDataOffset = switch (fieldData.fieldInfo.getVectorEncoding()) {
case FLOAT32 -> writeSortedBFloat16Vectors(fieldData, ordMap);
case BYTE -> throw new IllegalStateException(
"Incorrect encoding for field " + fieldData.fieldInfo.name + ": " + VectorEncoding.BYTE
);
};
long vectorDataLength = vectorData.getFilePointer() - vectorDataOffset;
writeMeta(fieldData.fieldInfo, maxDoc, vectorDataOffset, vectorDataLength, newDocsWithField);
}
private long writeSortedBFloat16Vectors(FieldWriter<?> fieldData, int[] ordMap) throws IOException {
long vectorDataOffset = vectorData.alignFilePointer(BFloat16.BYTES);
final ByteBuffer buffer = ByteBuffer.allocate(fieldData.dim * BFloat16.BYTES).order(ByteOrder.LITTLE_ENDIAN);
for (int ordinal : ordMap) {
float[] vector = (float[]) fieldData.vectors.get(ordinal);
BFloat16.floatToBFloat16(vector, buffer.asShortBuffer());
vectorData.writeBytes(buffer.array(), buffer.array().length);
}
return vectorDataOffset;
}
@Override
public void mergeOneField(FieldInfo fieldInfo, MergeState mergeState) throws IOException {
// Since we know we will not be searching for additional indexing, we can just write the
// the vectors directly to the new segment.
long vectorDataOffset = vectorData.alignFilePointer(BFloat16.BYTES);
// No need to use temporary file as we don't have to re-open for reading
DocsWithFieldSet docsWithField = switch (fieldInfo.getVectorEncoding()) {
case FLOAT32 -> writeVectorData(vectorData, MergedVectorValues.mergeFloatVectorValues(fieldInfo, mergeState));
case BYTE -> throw new IllegalStateException("Incorrect encoding for field " + fieldInfo.name + ": " + VectorEncoding.BYTE);
};
long vectorDataLength = vectorData.getFilePointer() - vectorDataOffset;
writeMeta(fieldInfo, segmentWriteState.segmentInfo.maxDoc(), vectorDataOffset, vectorDataLength, docsWithField);
}
@Override
public CloseableRandomVectorScorerSupplier mergeOneFieldToIndex(FieldInfo fieldInfo, MergeState mergeState) throws IOException {
long vectorDataOffset = vectorData.alignFilePointer(BFloat16.BYTES);
IndexOutput tempVectorData = segmentWriteState.directory.createTempOutput(vectorData.getName(), "temp", segmentWriteState.context);
IndexInput vectorDataInput = null;
boolean success = false;
try {
// write the vector data to a temporary file
DocsWithFieldSet docsWithField = switch (fieldInfo.getVectorEncoding()) {
case FLOAT32 -> writeVectorData(tempVectorData, MergedVectorValues.mergeFloatVectorValues(fieldInfo, mergeState));
case BYTE -> throw new UnsupportedOperationException("ES92BFloat16FlatVectorsWriter only supports float vectors");
};
CodecUtil.writeFooter(tempVectorData);
IOUtils.close(tempVectorData);
// This temp file will be accessed in a random-access fashion to construct the HNSW graph.
// Note: don't use the context from the state, which is a flush/merge context, not expecting
// to perform random reads.
vectorDataInput = segmentWriteState.directory.openInput(
tempVectorData.getName(),
IOContext.DEFAULT.withHints(FileTypeHint.DATA, FileDataHint.KNN_VECTORS, DataAccessHint.RANDOM)
);
// copy the temporary file vectors to the actual data file
vectorData.copyBytes(vectorDataInput, vectorDataInput.length() - CodecUtil.footerLength());
CodecUtil.retrieveChecksum(vectorDataInput);
long vectorDataLength = vectorData.getFilePointer() - vectorDataOffset;
writeMeta(fieldInfo, segmentWriteState.segmentInfo.maxDoc(), vectorDataOffset, vectorDataLength, docsWithField);
success = true;
final IndexInput finalVectorDataInput = vectorDataInput;
final RandomVectorScorerSupplier randomVectorScorerSupplier = vectorsScorer.getRandomVectorScorerSupplier(
fieldInfo.getVectorSimilarityFunction(),
new OffHeapBFloat16VectorValues.DenseOffHeapVectorValues(
fieldInfo.getVectorDimension(),
docsWithField.cardinality(),
finalVectorDataInput,
fieldInfo.getVectorDimension() * BFloat16.BYTES,
vectorsScorer,
fieldInfo.getVectorSimilarityFunction()
)
);
return new FlatCloseableRandomVectorScorerSupplier(() -> {
IOUtils.close(finalVectorDataInput);
segmentWriteState.directory.deleteFile(tempVectorData.getName());
}, docsWithField.cardinality(), randomVectorScorerSupplier);
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(vectorDataInput, tempVectorData);
try {
segmentWriteState.directory.deleteFile(tempVectorData.getName());
} catch (Exception e) {
// ignore
}
}
}
}
private void writeMeta(FieldInfo field, int maxDoc, long vectorDataOffset, long vectorDataLength, DocsWithFieldSet docsWithField)
throws IOException {
meta.writeInt(field.number);
meta.writeInt(field.getVectorEncoding().ordinal());
meta.writeInt(field.getVectorSimilarityFunction().ordinal());
meta.writeVLong(vectorDataOffset);
meta.writeVLong(vectorDataLength);
meta.writeVInt(field.getVectorDimension());
// write docIDs
int count = docsWithField.cardinality();
meta.writeInt(count);
OrdToDocDISIReaderConfiguration.writeStoredMeta(DIRECT_MONOTONIC_BLOCK_SHIFT, meta, vectorData, count, maxDoc, docsWithField);
}
/**
* Writes the vector values to the output and returns a set of documents that contains vectors.
*/
private static DocsWithFieldSet writeVectorData(IndexOutput output, FloatVectorValues floatVectorValues) throws IOException {
DocsWithFieldSet docsWithField = new DocsWithFieldSet();
ByteBuffer buffer = ByteBuffer.allocate(floatVectorValues.dimension() * BFloat16.BYTES).order(ByteOrder.LITTLE_ENDIAN);
KnnVectorValues.DocIndexIterator iter = floatVectorValues.iterator();
for (int docV = iter.nextDoc(); docV != NO_MORE_DOCS; docV = iter.nextDoc()) {
// write vector
float[] value = floatVectorValues.vectorValue(iter.index());
BFloat16.floatToBFloat16(value, buffer.asShortBuffer());
output.writeBytes(buffer.array(), buffer.limit());
docsWithField.add(docV);
}
return docsWithField;
}
@Override
public void close() throws IOException {
IOUtils.close(meta, vectorData);
}
private abstract static | ES93BFloat16FlatVectorsWriter |
java | google__guice | core/src/com/google/inject/spi/ProvidesMethodBinding.java | {
"start": 949,
"end": 1588
} | interface ____<T> extends HasDependencies {
/** Returns the method this binding uses. */
Method getMethod();
/** Returns the instance of the object the method is defined in. */
Object getEnclosingInstance();
/** Returns the key of the binding. */
Key<T> getKey();
/**
* Returns the annotation that caused this binding to be created. For {@code @Provides} methods,
* this is an instance of the {@code @Provides} annotation. For bindings from {@link
* ModuleAnnotatedMethodScanner}, this is the annotation that caused the scanner to produce the
* binding.
*/
Annotation getAnnotation();
}
| ProvidesMethodBinding |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/internal/ShardSearchContextId.java | {
"start": 746,
"end": 2602
} | class ____ implements Writeable {
private final String sessionId;
private final long id;
private final String searcherId;
public ShardSearchContextId(String sessionId, long id) {
this(sessionId, id, null);
}
public ShardSearchContextId(String sessionId, long id, String searcherId) {
this.sessionId = Objects.requireNonNull(sessionId);
this.id = id;
this.searcherId = searcherId;
}
public ShardSearchContextId(StreamInput in) throws IOException {
this.id = in.readLong();
this.sessionId = in.readString();
this.searcherId = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeLong(id);
out.writeString(sessionId);
out.writeOptionalString(searcherId);
}
public String getSessionId() {
return sessionId;
}
public long getId() {
return id;
}
public boolean isRetryable() {
return this.searcherId != null;
}
public boolean sameSearcherIdsAs(String otherSearcherId) {
return this.isRetryable() && this.searcherId.equals(otherSearcherId);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ShardSearchContextId other = (ShardSearchContextId) o;
return id == other.id && sessionId.equals(other.sessionId) && Objects.equals(searcherId, other.searcherId);
}
@Override
public int hashCode() {
return Objects.hash(sessionId, id, searcherId);
}
@Override
public String toString() {
return "[" + sessionId + "][" + id + "] searcherId [" + searcherId + "]";
}
public String getSearcherId() {
return searcherId;
}
}
| ShardSearchContextId |
java | apache__logging-log4j2 | log4j-api-test/src/test/java/org/apache/logging/log4j/util/PropertiesUtilTest.java | {
"start": 14612,
"end": 15238
} | class ____ implements PropertySource {
private final PropertiesUtil propertiesUtil;
private RecursivePropertySource(PropertiesUtil propertiesUtil) {
this.propertiesUtil = propertiesUtil;
}
@Override
public int getPriority() {
return Integer.MIN_VALUE;
}
@Override
public String getProperty(String key) {
return propertiesUtil.getStringProperty(key);
}
@Override
public boolean containsProperty(String key) {
return propertiesUtil.hasProperty(key);
}
}
}
| RecursivePropertySource |
java | apache__hadoop | hadoop-cloud-storage-project/hadoop-tos/src/test/java/org/apache/hadoop/fs/tosfs/object/tos/TestTOSInputStream.java | {
"start": 4630,
"end": 5241
} | class ____ extends ByteArrayInputStream {
// -1 means call close()
// 0 means neither call close() nor forceClose()
// 1 means call forceClose()
private int cloeState = 0;
private TestInputStream(byte[] buf, int off, int len) {
super(buf, off, len);
}
@Override
public void close() {
cloeState = -1;
}
public void forceClose() {
cloeState = 1;
}
boolean isForceClose() {
assertTrue(cloeState == -1 || cloeState == 1, "Neither call close() nor forceClose()");
return cloeState == 1;
}
}
private final static | TestInputStream |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/transport/NodeDisconnectedException.java | {
"start": 652,
"end": 1227
} | class ____ extends ConnectTransportException {
public NodeDisconnectedException(DiscoveryNode node, String msg, String action, Exception cause) {
super(node, msg, action, cause);
}
public NodeDisconnectedException(DiscoveryNode node, String action) {
super(node, "disconnected", action, null);
}
public NodeDisconnectedException(StreamInput in) throws IOException {
super(in);
}
// stack trace is meaningless...
@Override
public Throwable fillInStackTrace() {
return this;
}
}
| NodeDisconnectedException |
java | apache__kafka | server-common/src/main/java/org/apache/kafka/server/common/UnitTestFeatureVersion.java | {
"start": 10604,
"end": 11959
} | enum ____ implements FeatureVersion {
UT_FV7_0(0, MetadataVersion.MINIMUM_VERSION, Map.of(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_7_IV0.featureLevel())),
UT_FV7_1(1, MetadataVersion.IBP_3_8_IV0, Map.of(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_8_IV0.featureLevel()));
private final short featureLevel;
private final MetadataVersion bootstrapMetadataVersion;
private final Map<String, Short> dependencies;
public static final String FEATURE_NAME = "unit.test.feature.version.7";
public static final FV7 LATEST_PRODUCTION = UT_FV7_1;
FV7(int featureLevel, MetadataVersion bootstrapMetadataVersion, Map<String, Short> dependencies) {
this.featureLevel = (short) featureLevel;
this.bootstrapMetadataVersion = bootstrapMetadataVersion;
this.dependencies = dependencies;
}
@Override
public short featureLevel() {
return featureLevel;
}
@Override
public String featureName() {
return FEATURE_NAME;
}
@Override
public MetadataVersion bootstrapMetadataVersion() {
return bootstrapMetadataVersion;
}
@Override
public Map<String, Short> dependencies() {
return dependencies;
}
}
}
| FV7 |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java | {
"start": 113060,
"end": 115648
} | class ____ extends ParserRuleContext {
public TerminalNode SORT() { return getToken(EsqlBaseParser.SORT, 0); }
public List<OrderExpressionContext> orderExpression() {
return getRuleContexts(OrderExpressionContext.class);
}
public OrderExpressionContext orderExpression(int i) {
return getRuleContext(OrderExpressionContext.class,i);
}
public List<TerminalNode> COMMA() { return getTokens(EsqlBaseParser.COMMA); }
public TerminalNode COMMA(int i) {
return getToken(EsqlBaseParser.COMMA, i);
}
@SuppressWarnings("this-escape")
public SortCommandContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override public int getRuleIndex() { return RULE_sortCommand; }
@Override
public void enterRule(ParseTreeListener listener) {
if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).enterSortCommand(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).exitSortCommand(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if ( visitor instanceof EsqlBaseParserVisitor ) return ((EsqlBaseParserVisitor<? extends T>)visitor).visitSortCommand(this);
else return visitor.visitChildren(this);
}
}
public final SortCommandContext sortCommand() throws RecognitionException {
SortCommandContext _localctx = new SortCommandContext(_ctx, getState());
enterRule(_localctx, 78, RULE_sortCommand);
try {
int _alt;
enterOuterAlt(_localctx, 1);
{
setState(455);
match(SORT);
setState(456);
orderExpression();
setState(461);
_errHandler.sync(this);
_alt = getInterpreter().adaptivePredict(_input,30,_ctx);
while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) {
if ( _alt==1 ) {
{
{
setState(457);
match(COMMA);
setState(458);
orderExpression();
}
}
}
setState(463);
_errHandler.sync(this);
_alt = getInterpreter().adaptivePredict(_input,30,_ctx);
}
}
}
catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
}
finally {
exitRule();
}
return _localctx;
}
@SuppressWarnings("CheckReturnValue")
public static | SortCommandContext |
java | quarkusio__quarkus | extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/boot/AmbiguousPersistenceUnitExtensionTest.java | {
"start": 416,
"end": 1529
} | class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClass(MyEntity.class)
.addClass(PersistenceUnitInterceptor.class)
.addClass(AnotherPersistenceUnitInterceptor.class))
.withConfigurationResource("application.properties")
.assertException(throwable -> assertThat(throwable)
.hasNoSuppressedExceptions()
.rootCause()
.hasMessageContainingAll("Multiple instances of Interceptor were found for persistence unit <default>.",
"At most one instance can be assigned to each persistence unit. Instances found:",
"io.quarkus.hibernate.orm.boot.AmbiguousPersistenceUnitExtensionTest.PersistenceUnitInterceptor",
"io.quarkus.hibernate.orm.boot.AmbiguousPersistenceUnitExtensionTest.AnotherPersistenceUnitInterceptor"));
@PersistenceUnitExtension
public static | AmbiguousPersistenceUnitExtensionTest |
java | spring-projects__spring-framework | spring-messaging/src/main/java/org/springframework/messaging/tcp/reactor/AbstractNioBufferReactorNettyCodec.java | {
"start": 1081,
"end": 1731
} | class ____<P> implements ReactorNettyCodec<P> {
@Override
public Collection<Message<P>> decode(ByteBuf inputBuffer) {
ByteBuffer nioBuffer = inputBuffer.nioBuffer();
int start = nioBuffer.position();
List<Message<P>> messages = decodeInternal(nioBuffer);
inputBuffer.skipBytes(nioBuffer.position() - start);
return messages;
}
@Override
public void encode(Message<P> message, ByteBuf outputBuffer) {
outputBuffer.writeBytes(encodeInternal(message));
}
protected abstract List<Message<P>> decodeInternal(ByteBuffer nioBuffer);
protected abstract ByteBuffer encodeInternal(Message<P> message);
}
| AbstractNioBufferReactorNettyCodec |
java | redisson__redisson | redisson/src/main/java/org/redisson/client/protocol/decoder/ScoredAttributesReplayDecoder.java | {
"start": 1000,
"end": 1784
} | class ____<T> implements MultiDecoder<List<ScoreAttributesEntry<T>>> {
@Override
public Decoder<Object> getDecoder(Codec codec, int paramNum, State state, long size) {
if (paramNum % 3 == 1) {
return DoubleCodec.INSTANCE.getValueDecoder();
}
return MultiDecoder.super.getDecoder(codec, paramNum, state, size);
}
@Override
public List<ScoreAttributesEntry<T>> decode(List<Object> parts, State state) {
List<ScoreAttributesEntry<T>> result = new ArrayList<>();
for (int i = 0; i < parts.size(); i += 3) {
result.add(new ScoreAttributesEntry<>(((Number) parts.get(i + 1)).doubleValue(), (T) parts.get(i), (String) parts.get(i + 2)));
}
return result;
}
}
| ScoredAttributesReplayDecoder |
java | mockito__mockito | mockito-extensions/mockito-junit-jupiter/src/test/java/org/mockitousage/GenericTypeMockMultipleMatchesTest.java | {
"start": 1254,
"end": 1689
} | class ____ implements BeforeEachCallback {
@Override
public void beforeEach(ExtensionContext context) throws Exception {
currentExtensionContext = context;
}
}
private void startMocking(Object testInstance) {
MockitoExtension mockitoExtension = new MockitoExtension();
mockitoExtension.beforeEach(currentExtensionContext);
}
@Nested
public | ContextProvidingExtension |
java | apache__camel | components/camel-cm-sms/src/main/java/org/apache/camel/component/cm/exceptions/cmresponse/NotPhoneNumberFoundException.java | {
"start": 872,
"end": 990
} | class ____ extends CMResponseException {
public NotPhoneNumberFoundException() {
}
}
| NotPhoneNumberFoundException |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/aop/aspectj/autoproxy/AtAspectJAnnotationBindingTests.java | {
"start": 1203,
"end": 1975
} | class ____ {
private AnnotatedTestBean testBean;
private ClassPathXmlApplicationContext ctx;
@BeforeEach
void setup() {
ctx = new ClassPathXmlApplicationContext(getClass().getSimpleName() + "-context.xml", getClass());
testBean = (AnnotatedTestBean) ctx.getBean("testBean");
}
@Test
void testAnnotationBindingInAroundAdvice() {
assertThat(testBean.doThis()).isEqualTo("this value doThis");
assertThat(testBean.doThat()).isEqualTo("that value doThat");
assertThat(testBean.doArray()).hasSize(2);
}
@Test
void testNoMatchingWithoutAnnotationPresent() {
assertThat(testBean.doTheOther()).isEqualTo("doTheOther");
}
@Test
void testPointcutEvaluatedAgainstArray() {
ctx.getBean("arrayFactoryBean");
}
}
@Aspect
| AtAspectJAnnotationBindingTests |
java | google__auto | value/src/main/java/com/google/auto/value/extension/serializable/processor/SerializableAutoValueExtension.java | {
"start": 8448,
"end": 12999
} | class ____ {
private static final String PROXY_CLASS_NAME = "Proxy$";
private final TypeName outerClassTypeName;
private final ImmutableList<TypeVariableName> typeVariableNames;
private final ImmutableList<PropertyMirror> propertyMirrors;
private final ImmutableMap<Equivalence.Wrapper<TypeMirror>, Serializer> serializersMap;
ProxyGenerator(
TypeName outerClassTypeName,
ImmutableList<TypeVariableName> typeVariableNames,
ImmutableList<PropertyMirror> propertyMirrors,
ImmutableMap<Equivalence.Wrapper<TypeMirror>, Serializer> serializersMap) {
this.outerClassTypeName = outerClassTypeName;
this.typeVariableNames = typeVariableNames;
this.propertyMirrors = propertyMirrors;
this.serializersMap = serializersMap;
}
private TypeSpec generate() {
TypeSpec.Builder proxy =
TypeSpec.classBuilder(PROXY_CLASS_NAME)
.addModifiers(Modifier.STATIC)
.addTypeVariables(typeVariableNames)
.addSuperinterface(Serializable.class)
.addField(serialVersionUid())
.addFields(properties())
.addMethod(constructor())
.addMethod(readResolve());
return proxy.build();
}
private static FieldSpec serialVersionUid() {
return FieldSpec.builder(
long.class, "serialVersionUID", Modifier.PRIVATE, Modifier.STATIC, Modifier.FINAL)
.initializer("0")
.build();
}
/** Maps each AutoValue property to a serializable type. */
private List<FieldSpec> properties() {
return propertyMirrors.stream()
.map(
propertyMirror ->
FieldSpec.builder(
TypeName.get(
serializersMap
.get(MoreTypes.equivalence().wrap(propertyMirror.getType()))
.proxyFieldType()),
propertyMirror.getName(),
Modifier.PRIVATE)
.build())
.collect(toImmutableList());
}
/** Creates a constructor that converts the AutoValue's properties to serializable values. */
private MethodSpec constructor() {
MethodSpec.Builder constructor = MethodSpec.constructorBuilder();
for (PropertyMirror propertyMirror : propertyMirrors) {
Serializer serializer =
serializersMap.get(MoreTypes.equivalence().wrap(propertyMirror.getType()));
String name = propertyMirror.getName();
constructor.addParameter(TypeName.get(propertyMirror.getType()), name);
constructor.addStatement(
CodeBlock.of("this.$L = $L", name, serializer.toProxy(CodeBlock.of(name))));
}
return constructor.build();
}
/**
* Creates an implementation of {@code readResolve} that returns the serializable values in the
* Proxy object back to their original types.
*/
private MethodSpec readResolve() {
return MethodSpec.methodBuilder("readResolve")
.returns(Object.class)
.addException(Exception.class)
.addStatement(
"return new $T($L)",
outerClassTypeName,
CodeBlock.join(
propertyMirrors.stream().map(this::resolve).collect(toImmutableList()), ", "))
.build();
}
/** Maps a serializable type back to its original AutoValue property. */
private CodeBlock resolve(PropertyMirror propertyMirror) {
return serializersMap
.get(MoreTypes.equivalence().wrap(propertyMirror.getType()))
.fromProxy(CodeBlock.of(propertyMirror.getName()));
}
}
private static boolean hasSerializableInterface(Context context) {
final TypeMirror serializableTypeMirror =
context
.processingEnvironment()
.getElementUtils()
.getTypeElement(Serializable.class.getCanonicalName())
.asType();
return context
.processingEnvironment()
.getTypeUtils()
.isAssignable(context.autoValueClass().asType(), serializableTypeMirror);
}
private static boolean hasSerializableAutoValueAnnotation(Context context) {
return context.autoValueClass().getAnnotationMirrors().stream()
.map(AnnotationMirror::getAnnotationType)
.map(MoreTypes::asTypeElement)
.map(TypeElement::getQualifiedName)
.anyMatch(name -> name.contentEquals(SERIALIZABLE_AUTO_VALUE_NAME));
}
}
| ProxyGenerator |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/GoogleCloudStorageEndpointBuilderFactory.java | {
"start": 42949,
"end": 43532
} | class ____ use when creating the new buckets.
*
* The option is a: <code>com.google.cloud.storage.StorageClass</code>
* type.
*
* Default: STANDARD
* Group: common
*
* @param storageClass the value to set
* @return the dsl builder
*/
default GoogleCloudStorageEndpointProducerBuilder storageClass(com.google.cloud.storage.StorageClass storageClass) {
doSetProperty("storageClass", storageClass);
return this;
}
/**
* The Cloud Storage | to |
java | spring-projects__spring-boot | documentation/spring-boot-docs/src/main/java/org/springframework/boot/docs/io/caching/provider/cache2k/MyCache2kDefaultsConfiguration.java | {
"start": 973,
"end": 1223
} | class ____ {
@Bean
public Cache2kBuilderCustomizer myCache2kDefaultsCustomizer() {
// @formatter:off
return (builder) -> builder.entryCapacity(200)
.expireAfterWrite(5, TimeUnit.MINUTES);
// @formatter:on
}
}
| MyCache2kDefaultsConfiguration |
java | quarkusio__quarkus | integration-tests/hibernate-orm-compatibility-5.6/database-generator/src/main/java/io/quarkus/it/hibernate/compatibility/Main.java | {
"start": 858,
"end": 4683
} | class ____ implements QuarkusApplication {
@Inject
EntityManager em;
@Override
public int run(String... args) {
System.out.println("Initializing data...");
MyEntity createdEntity = QuarkusTransaction.requiringNew().call(() -> {
var entity = new MyEntity();
entity.duration = Duration.of(59, ChronoUnit.SECONDS);
entity.uuid = UUID.fromString("f49c6ba8-8d7f-417a-a255-d594dddf729f");
entity.instant = Instant.parse("2018-01-01T10:58:30.00Z");
entity.offsetTime = LocalTime.of(12, 58, 30, 0)
.atOffset(ZoneOffset.ofHours(2));
entity.offsetDateTime = LocalDateTime.of(2018, 1, 1, 12, 58, 30, 0)
.atOffset(ZoneOffset.ofHours(2));
entity.zonedDateTime = LocalDateTime.of(2018, 1, 1, 12, 58, 30, 0)
.atZone(ZoneId.of("Africa/Cairo" /* UTC+2 */));
entity.intArray = new int[] { 0, 1, 42 };
entity.stringList = new ArrayList<>(List.of("one", "two"));
entity.myEnum = MyEnum.VALUE2;
em.persist(entity);
// Create more than one entity of each type,
// so that we avoid the (uninteresting) edge case in sequence optimizers
// where only 1 entity was created and the optimizer is just about to start another pool.
em.persist(new MyEntity());
em.persist(new MyEntityWithGenericGeneratorAndDefaultAllocationSize());
em.persist(new MyEntityWithGenericGeneratorAndDefaultAllocationSize());
em.persist(new MyEntityWithSequenceGeneratorAndDefaultAllocationSize());
em.persist(new MyEntityWithSequenceGeneratorAndDefaultAllocationSize());
return entity;
});
System.out.println("Checking data...");
// Check that Hibernate ORM 5 used to load the values we're going to expect in compatibility tests
QuarkusTransaction.requiringNew().run(() -> {
checkEqual(1L, createdEntity.id);
var loadedEntity = em.find(MyEntity.class, createdEntity.id);
checkEqual(createdEntity.duration, loadedEntity.duration);
checkEqual(createdEntity.uuid, loadedEntity.uuid);
checkEqual(createdEntity.instant, loadedEntity.instant);
checkEqual(createdEntity.offsetTime.toLocalTime().atOffset(ZoneId.systemDefault().getRules().getOffset(Instant.now())),
loadedEntity.offsetTime);
checkEqual(createdEntity.offsetDateTime.atZoneSameInstant(ZoneId.systemDefault()).toOffsetDateTime(),
loadedEntity.offsetDateTime);
checkEqual(createdEntity.zonedDateTime.withZoneSameInstant(ZoneId.systemDefault()), loadedEntity.zonedDateTime);
checkEqual(createdEntity.intArray, loadedEntity.intArray);
checkEqual(createdEntity.stringList, loadedEntity.stringList);
checkEqual(createdEntity.myEnum, loadedEntity.myEnum);
});
System.out.println("Done.");
return 0;
}
private <T> void checkEqual(T expected, T actual) {
if (!Objects.equals(expected, actual)) {
throw new AssertionError("Not equal; expected: " + expected + ", actual: " + actual);
}
}
private void checkEqual(int[] expected, int[] actual) {
if (!Arrays.equals(expected, actual)) {
throw new AssertionError("Not equal; expected: " + Arrays.toString(expected)
+ ", actual: " + Arrays.toString(actual));
}
}
}
}
| QuarkusMain |
java | quarkusio__quarkus | independent-projects/bootstrap/core/src/main/java/io/quarkus/bootstrap/classloading/MemoryClassPathElement.java | {
"start": 3684,
"end": 3989
} | class ____ in the ProtectionDomain
// but it is not a good idea
// see https://github.com/quarkusio/quarkus/issues/41417 for more details about the problem
return NULL_PROTECTION_DOMAIN;
}
@Override
public void close() throws IOException {
}
private static | bytes |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/conversion/SimpleConversion.java | {
"start": 728,
"end": 3705
} | class ____ implements ConversionProvider {
@Override
public Assignment to(ConversionContext conversionContext) {
String toExpression = getToExpression( conversionContext );
return new TypeConversion( getToConversionImportTypes( conversionContext ),
getToConversionExceptionTypes( conversionContext ),
toExpression
);
}
@Override
public Assignment from(ConversionContext conversionContext) {
String fromExpression = getFromExpression( conversionContext );
return new TypeConversion( getFromConversionImportTypes( conversionContext ),
getFromConversionExceptionTypes( conversionContext ),
fromExpression
);
}
@Override
public List<HelperMethod> getRequiredHelperMethods(ConversionContext conversionContext) {
return Collections.emptyList();
}
/**
* Returns the conversion string from source to target. The placeholder {@code <SOURCE>} can be used to represent a
* reference to the source value.
*
* @param conversionContext A context providing optional information required for creating the conversion.
*
* @return The conversion string from source to target
*/
protected abstract String getToExpression(ConversionContext conversionContext);
/**
* Returns the conversion string from target to source. The placeholder {@code <SOURCE>} can be used to represent a
* reference to the target value.
*
* @param conversionContext ConversionContext providing optional information required for creating the conversion.
*
* @return The conversion string from target to source
*/
protected abstract String getFromExpression(ConversionContext conversionContext);
/**
* Returns a set with imported types of the "from" conversion. Defaults to an empty set; can be overridden in
* sub-classes to return the required types.
*
* @param conversionContext the conversion context
*
* @return conversion types required in the "from" conversion
*/
protected Set<Type> getFromConversionImportTypes(ConversionContext conversionContext) {
return Collections.emptySet();
}
/**
* Returns a set with imported types of the "to" conversion. Defaults to an empty set; can be overridden in
* sub-classes to return the required types.
*
* @param conversionContext the conversion context
*
* @return conversion types required in the "to" conversion
*/
protected Set<Type> getToConversionImportTypes(ConversionContext conversionContext) {
return Collections.emptySet();
}
protected List<Type> getToConversionExceptionTypes(ConversionContext conversionContext) {
return Collections.emptyList();
}
protected List<Type> getFromConversionExceptionTypes(ConversionContext conversionContext) {
return Collections.emptyList();
}
}
| SimpleConversion |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/oauth2/server/authorization/OidcClientRegistrationTests.java | {
"start": 37035,
"end": 38213
} | class ____
implements Converter<OidcClientRegistration, RegisteredClient> {
private final OidcClientRegistrationRegisteredClientConverter delegate = new OidcClientRegistrationRegisteredClientConverter();
private final List<String> supportedCustomClientMetadata;
private CustomRegisteredClientConverter(List<String> supportedCustomClientMetadata) {
this.supportedCustomClientMetadata = supportedCustomClientMetadata;
}
@Override
public RegisteredClient convert(OidcClientRegistration clientRegistration) {
RegisteredClient registeredClient = this.delegate.convert(clientRegistration);
ClientSettings.Builder clientSettingsBuilder = ClientSettings
.withSettings(registeredClient.getClientSettings().getSettings());
if (!CollectionUtils.isEmpty(this.supportedCustomClientMetadata)) {
clientRegistration.getClaims().forEach((claim, value) -> {
if (this.supportedCustomClientMetadata.contains(claim)) {
clientSettingsBuilder.setting(claim, value);
}
});
}
return RegisteredClient.from(registeredClient).clientSettings(clientSettingsBuilder.build()).build();
}
}
private static final | CustomRegisteredClientConverter |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java | {
"start": 1849,
"end": 30173
} | class ____ extends Configuration {
public static final Logger LOG = LoggerFactory.getLogger(
FairSchedulerConfiguration.class.getName());
/**
* Resource Increment request grant-able by the FairScheduler.
* This property is looked up in the yarn-site.xml.
* @deprecated The preferred way to configure the increment is by using the
* yarn.resource-types.{RESOURCE_NAME}.increment-allocation property,
* for memory: yarn.resource-types.memory-mb.increment-allocation
*/
@Deprecated
public static final String RM_SCHEDULER_INCREMENT_ALLOCATION_MB =
YarnConfiguration.YARN_PREFIX + "scheduler.increment-allocation-mb";
@Deprecated
public static final int DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB = 1024;
/**
* Resource Increment request grant-able by the FairScheduler.
* This property is looked up in the yarn-site.xml.
* @deprecated The preferred way to configure the increment is by using the
* yarn.resource-types.{RESOURCE_NAME}.increment-allocation property,
* for CPU: yarn.resource-types.vcores.increment-allocation
*/
@Deprecated
public static final String RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES =
YarnConfiguration.YARN_PREFIX + "scheduler.increment-allocation-vcores";
@Deprecated
public static final int DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES = 1;
/** Threshold for container size for making a container reservation as a
* multiple of increment allocation. Only container sizes above this are
* allowed to reserve a node */
public static final String
RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE =
YarnConfiguration.YARN_PREFIX +
"scheduler.reservation-threshold.increment-multiple";
public static final float
DEFAULT_RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE = 2f;
private static final String CONF_PREFIX = "yarn.scheduler.fair.";
/**
* Used during FS->CS conversion. When enabled, background threads are
* not started. This property should NOT be used by end-users!
*/
public static final String MIGRATION_MODE = CONF_PREFIX + "migration.mode";
/**
* Disables checking whether a placement rule is terminal or not. Only
* used during migration mode. This property should NOT be used by end users!
*/
public static final String NO_TERMINAL_RULE_CHECK = CONF_PREFIX +
"no-terminal-rule.check";
public static final String ALLOCATION_FILE = CONF_PREFIX + "allocation.file";
protected static final String DEFAULT_ALLOCATION_FILE = "fair-scheduler.xml";
/** Whether pools can be created that were not specified in the FS configuration file
*/
public static final String ALLOW_UNDECLARED_POOLS = CONF_PREFIX +
"allow-undeclared-pools";
public static final boolean DEFAULT_ALLOW_UNDECLARED_POOLS = true;
/** Whether to use the user name as the queue name (instead of "default") if
* the request does not specify a queue. */
public static final String USER_AS_DEFAULT_QUEUE = CONF_PREFIX +
"user-as-default-queue";
public static final boolean DEFAULT_USER_AS_DEFAULT_QUEUE = true;
protected static final float DEFAULT_LOCALITY_THRESHOLD = -1.0f;
/** Cluster threshold for node locality. */
public static final String LOCALITY_THRESHOLD_NODE = CONF_PREFIX +
"locality.threshold.node";
public static final float DEFAULT_LOCALITY_THRESHOLD_NODE =
DEFAULT_LOCALITY_THRESHOLD;
/** Cluster threshold for rack locality. */
public static final String LOCALITY_THRESHOLD_RACK = CONF_PREFIX +
"locality.threshold.rack";
public static final float DEFAULT_LOCALITY_THRESHOLD_RACK =
DEFAULT_LOCALITY_THRESHOLD;
/**
* Delay for node locality.
* @deprecated Continuous scheduling is known to cause locking issue inside
* Only used when {@link #CONTINUOUS_SCHEDULING_ENABLED} is enabled
*/
@Deprecated
protected static final String LOCALITY_DELAY_NODE_MS = CONF_PREFIX +
"locality-delay-node-ms";
@Deprecated
protected static final long DEFAULT_LOCALITY_DELAY_NODE_MS = -1L;
/**
* Delay for rack locality.
* @deprecated Continuous scheduling is known to cause locking issue inside
* Only used when {@link #CONTINUOUS_SCHEDULING_ENABLED} is enabled
*/
@Deprecated
protected static final String LOCALITY_DELAY_RACK_MS = CONF_PREFIX +
"locality-delay-rack-ms";
@Deprecated
protected static final long DEFAULT_LOCALITY_DELAY_RACK_MS = -1L;
/**
* Enable continuous scheduling or not.
* @deprecated Continuous scheduling is known to cause locking issue inside
* the scheduler in larger cluster, more than 100 nodes, use
* {@link #ASSIGN_MULTIPLE} to improve container allocation ramp up.
*/
@Deprecated
public static final String CONTINUOUS_SCHEDULING_ENABLED = CONF_PREFIX +
"continuous-scheduling-enabled";
@Deprecated
public static final boolean DEFAULT_CONTINUOUS_SCHEDULING_ENABLED = false;
/**
* Sleep time of each pass in continuous scheduling (5ms in default).
* @deprecated Continuous scheduling is known to cause locking issue inside
* Only used when {@link #CONTINUOUS_SCHEDULING_ENABLED} is enabled
*/
@Deprecated
public static final String CONTINUOUS_SCHEDULING_SLEEP_MS = CONF_PREFIX +
"continuous-scheduling-sleep-ms";
@Deprecated
public static final int DEFAULT_CONTINUOUS_SCHEDULING_SLEEP_MS = 5;
/** Whether preemption is enabled. */
public static final String PREEMPTION = CONF_PREFIX + "preemption";
public static final boolean DEFAULT_PREEMPTION = false;
protected static final String AM_PREEMPTION =
CONF_PREFIX + "am.preemption";
protected static final String AM_PREEMPTION_PREFIX =
CONF_PREFIX + "am.preemption.";
protected static final boolean DEFAULT_AM_PREEMPTION = true;
protected static final String PREEMPTION_THRESHOLD =
CONF_PREFIX + "preemption.cluster-utilization-threshold";
protected static final float DEFAULT_PREEMPTION_THRESHOLD = 0.8f;
public static final String WAIT_TIME_BEFORE_KILL = CONF_PREFIX +
"waitTimeBeforeKill";
public static final int DEFAULT_WAIT_TIME_BEFORE_KILL = 15000;
/**
* Postfix for resource allocation increments in the
* yarn.resource-types.{RESOURCE_NAME}.increment-allocation property.
*/
static final String INCREMENT_ALLOCATION = ".increment-allocation";
/**
* Configurable delay (ms) before an app's starvation is considered after
* it is identified. This is to give the scheduler enough time to
* allocate containers post preemption. This delay is added to the
* {@link #WAIT_TIME_BEFORE_KILL} and enough heartbeats.
*
* This is intended to be a backdoor on production clusters, and hence
* intentionally not documented.
*/
public static final String WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS =
CONF_PREFIX + "waitTimeBeforeNextStarvationCheck";
public static final long
DEFAULT_WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS = 10000;
/** Whether to assign multiple containers in one check-in. */
public static final String ASSIGN_MULTIPLE = CONF_PREFIX + "assignmultiple";
public static final boolean DEFAULT_ASSIGN_MULTIPLE = false;
/** Whether to give more weight to apps requiring many resources. */
public static final String SIZE_BASED_WEIGHT = CONF_PREFIX +
"sizebasedweight";
public static final boolean DEFAULT_SIZE_BASED_WEIGHT = false;
/** Maximum number of containers to assign on each check-in. */
public static final String DYNAMIC_MAX_ASSIGN =
CONF_PREFIX + "dynamic.max.assign";
private static final boolean DEFAULT_DYNAMIC_MAX_ASSIGN = true;
/**
* Specify exact number of containers to assign on each heartbeat, if dynamic
* max assign is turned off.
*/
public static final String MAX_ASSIGN = CONF_PREFIX + "max.assign";
public static final int DEFAULT_MAX_ASSIGN = -1;
/** The update interval for calculating resources in FairScheduler .*/
public static final String UPDATE_INTERVAL_MS =
CONF_PREFIX + "update-interval-ms";
public static final int DEFAULT_UPDATE_INTERVAL_MS = 500;
/** Ratio of nodes available for an app to make an reservation on. */
public static final String RESERVABLE_NODES =
CONF_PREFIX + "reservable-nodes";
public static final float RESERVABLE_NODES_DEFAULT = 0.05f;
private static final String INVALID_RESOURCE_DEFINITION_PREFIX =
"Error reading resource config--invalid resource definition: ";
private static final String RESOURCE_PERCENTAGE_PATTERN =
"^(-?(\\d+)(\\.\\d*)?)\\s*%\\s*";
private static final String RESOURCE_VALUE_PATTERN =
"^(-?\\d+)(\\.\\d*)?\\s*";
/**
* For resources separated by spaces instead of a comma.
*/
private static final String RESOURCES_WITH_SPACES_PATTERN =
"-?\\d+(?:\\.\\d*)?\\s*[a-z]+\\s*";
public FairSchedulerConfiguration() {
super();
}
public FairSchedulerConfiguration(Configuration conf) {
super(conf);
}
public Resource getMinimumAllocation() {
int mem = getInt(
YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
int cpu = getInt(
YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
return Resources.createResource(mem, cpu);
}
public Resource getMaximumAllocation() {
int mem = getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
int cpu = getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
return Resources.createResource(mem, cpu);
}
public Resource getIncrementAllocation() {
Long memory = null;
Integer vCores = null;
Map<String, Long> others = new HashMap<>();
ResourceInformation[] resourceTypes = ResourceUtils.getResourceTypesArray();
for (int i=0; i < resourceTypes.length; ++i) {
String name = resourceTypes[i].getName();
String propertyKey = getAllocationIncrementPropKey(name);
String propValue = get(propertyKey);
if (propValue != null) {
Matcher matcher = RESOURCE_REQUEST_VALUE_PATTERN.matcher(propValue);
if (matcher.matches()) {
long value = Long.parseLong(matcher.group(1));
String unit = matcher.group(2);
long valueInDefaultUnits = getValueInDefaultUnits(value, unit, name);
others.put(name, valueInDefaultUnits);
} else {
throw new IllegalArgumentException("Property " + propertyKey +
" is not in \"value [unit]\" format: " + propValue);
}
}
}
if (others.containsKey(ResourceInformation.MEMORY_MB.getName())) {
memory = others.get(ResourceInformation.MEMORY_MB.getName());
if (get(RM_SCHEDULER_INCREMENT_ALLOCATION_MB) != null) {
String overridingKey = getAllocationIncrementPropKey(
ResourceInformation.MEMORY_MB.getName());
LOG.warn("Configuration " + overridingKey + "=" + get(overridingKey) +
" is overriding the " + RM_SCHEDULER_INCREMENT_ALLOCATION_MB +
"=" + get(RM_SCHEDULER_INCREMENT_ALLOCATION_MB) + " property");
}
others.remove(ResourceInformation.MEMORY_MB.getName());
} else {
memory = getLong(
RM_SCHEDULER_INCREMENT_ALLOCATION_MB,
DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB);
}
if (others.containsKey(ResourceInformation.VCORES.getName())) {
vCores = others.get(ResourceInformation.VCORES.getName()).intValue();
if (get(RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES) != null) {
String overridingKey = getAllocationIncrementPropKey(
ResourceInformation.VCORES.getName());
LOG.warn("Configuration " + overridingKey + "=" + get(overridingKey) +
" is overriding the " + RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES +
"=" + get(RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES) + " property");
}
others.remove(ResourceInformation.VCORES.getName());
} else {
vCores = getInt(
RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES,
DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES);
}
return Resource.newInstance(memory, vCores, others);
}
private long getValueInDefaultUnits(long value, String unit,
String resourceName) {
return unit.isEmpty() ? value : UnitsConversionUtil.convert(unit,
ResourceUtils.getDefaultUnit(resourceName), value);
}
private String getAllocationIncrementPropKey(String resourceName) {
return YarnConfiguration.RESOURCE_TYPES + "." + resourceName +
INCREMENT_ALLOCATION;
}
public float getReservationThresholdIncrementMultiple() {
return getFloat(
RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE,
DEFAULT_RM_SCHEDULER_RESERVATION_THRESHOLD_INCREMENT_MULTIPLE);
}
public float getLocalityThresholdNode() {
return getFloat(LOCALITY_THRESHOLD_NODE, DEFAULT_LOCALITY_THRESHOLD_NODE);
}
public float getLocalityThresholdRack() {
return getFloat(LOCALITY_THRESHOLD_RACK, DEFAULT_LOCALITY_THRESHOLD_RACK);
}
/**
* Whether continuous scheduling is turned on.
* @deprecated use {@link #ASSIGN_MULTIPLE} to improve container allocation
* ramp up.
* @return whether continuous scheduling is enabled
*/
@Deprecated
public boolean isContinuousSchedulingEnabled() {
return getBoolean(CONTINUOUS_SCHEDULING_ENABLED,
DEFAULT_CONTINUOUS_SCHEDULING_ENABLED);
}
/**
* The sleep time of the continuous scheduler thread.
* @deprecated linked to {@link #CONTINUOUS_SCHEDULING_ENABLED} deprecation
* @return sleep time in ms
*/
@Deprecated
public int getContinuousSchedulingSleepMs() {
return getInt(CONTINUOUS_SCHEDULING_SLEEP_MS,
DEFAULT_CONTINUOUS_SCHEDULING_SLEEP_MS);
}
/**
* Delay in milliseconds for locality fallback node to rack.
* @deprecated linked to {@link #CONTINUOUS_SCHEDULING_ENABLED} deprecation
* @return delay in ms
*/
@Deprecated
public long getLocalityDelayNodeMs() {
return getLong(LOCALITY_DELAY_NODE_MS, DEFAULT_LOCALITY_DELAY_NODE_MS);
}
/**
* Delay in milliseconds for locality fallback rack to other.
* @deprecated linked to {@link #CONTINUOUS_SCHEDULING_ENABLED} deprecation
* @return delay in ms
*/
@Deprecated
public long getLocalityDelayRackMs() {
return getLong(LOCALITY_DELAY_RACK_MS, DEFAULT_LOCALITY_DELAY_RACK_MS);
}
public boolean getPreemptionEnabled() {
return getBoolean(PREEMPTION, DEFAULT_PREEMPTION);
}
public boolean getAMPreemptionEnabled(String queueName) {
String propertyName = AM_PREEMPTION_PREFIX + queueName;
if (get(propertyName) != null) {
boolean amPreemptionEnabled =
getBoolean(propertyName, DEFAULT_AM_PREEMPTION);
LOG.debug("AM preemption enabled for queue {}: {}",
queueName, amPreemptionEnabled);
return amPreemptionEnabled;
}
return getBoolean(AM_PREEMPTION, DEFAULT_AM_PREEMPTION);
}
public float getPreemptionUtilizationThreshold() {
return getFloat(PREEMPTION_THRESHOLD, DEFAULT_PREEMPTION_THRESHOLD);
}
public boolean getAssignMultiple() {
return getBoolean(ASSIGN_MULTIPLE, DEFAULT_ASSIGN_MULTIPLE);
}
public boolean isMaxAssignDynamic() {
return getBoolean(DYNAMIC_MAX_ASSIGN, DEFAULT_DYNAMIC_MAX_ASSIGN);
}
public int getMaxAssign() {
return getInt(MAX_ASSIGN, DEFAULT_MAX_ASSIGN);
}
public boolean getSizeBasedWeight() {
return getBoolean(SIZE_BASED_WEIGHT, DEFAULT_SIZE_BASED_WEIGHT);
}
public long getWaitTimeBeforeNextStarvationCheck() {
return getLong(WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS,
DEFAULT_WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS);
}
public int getWaitTimeBeforeKill() {
return getInt(WAIT_TIME_BEFORE_KILL, DEFAULT_WAIT_TIME_BEFORE_KILL);
}
public boolean getUsePortForNodeName() {
return getBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME,
YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME);
}
public float getReservableNodes() {
return getFloat(RESERVABLE_NODES, RESERVABLE_NODES_DEFAULT);
}
/**
* Parses a resource config value in one of three forms:
* <ol>
* <li>Percentage: "50%" or "40% memory, 60% cpu"</li>
* <li>New style resources: "vcores=10, memory-mb=1024"
* or "vcores=60%, memory-mb=40%"</li>
* <li>Old style resources: "1024 mb, 10 vcores"</li>
* </ol>
* In new style resources, any resource that is not specified will be
* set to {@link Long#MAX_VALUE} or 100%, as appropriate. Also, in the new
* style resources, units are not allowed. Units are assumed from the resource
* manager's settings for the resources when the value isn't a percentage.
*
* @param value the resource definition to parse
* @return a {@link ConfigurableResource} that represents the parsed value
* @throws AllocationConfigurationException if the raw value is not a valid
* resource definition
*/
public static ConfigurableResource parseResourceConfigValue(String value)
throws AllocationConfigurationException {
return parseResourceConfigValue(value, Long.MAX_VALUE);
}
/**
* Parses a resource config value in one of three forms:
* <ol>
* <li>Percentage: "50%" or "40% memory, 60% cpu"</li>
* <li>New style resources: "vcores=10, memory-mb=1024"
* or "vcores=60%, memory-mb=40%"</li>
* <li>Old style resources: "1024 mb, 10 vcores"</li>
* </ol>
* In new style resources, any resource that is not specified will be
* set to {@code missing} or 0%, as appropriate. Also, in the new style
* resources, units are not allowed. Units are assumed from the resource
* manager's settings for the resources when the value isn't a percentage.
*
* The {@code missing} parameter is only used in the case of new style
* resources without percentages. With new style resources with percentages,
* any missing resources will be assumed to be 100% because percentages are
* only used with maximum resource limits.
*
* @param value the resource definition to parse
* @param missing the value to use for any unspecified resources
* @return a {@link ConfigurableResource} that represents the parsed value
* @throws AllocationConfigurationException if the raw value is not a valid
* resource definition
*/
public static ConfigurableResource parseResourceConfigValue(String value,
long missing) throws AllocationConfigurationException {
ConfigurableResource configurableResource;
if (value.trim().isEmpty()) {
throw new AllocationConfigurationException("Error reading resource "
+ "config--the resource string is empty.");
}
try {
if (value.contains("=")) {
configurableResource = parseNewStyleResource(value, missing);
} else if (value.contains("%")) {
configurableResource = parseOldStyleResourceAsPercentage(value);
} else {
configurableResource = parseOldStyleResource(value);
}
} catch (RuntimeException ex) {
throw new AllocationConfigurationException(
"Error reading resource config", ex);
}
return configurableResource;
}
private static ConfigurableResource parseNewStyleResource(String value,
long missing) throws AllocationConfigurationException {
final ConfigurableResource configurableResource;
boolean asPercent = value.contains("%");
if (asPercent) {
configurableResource = new ConfigurableResource();
} else {
configurableResource = new ConfigurableResource(missing);
}
String[] resources = value.split(",");
for (String resource : resources) {
String[] parts = resource.split("=");
if (parts.length != 2) {
throw createConfigException(value,
"Every resource must be of the form: name=value.");
}
String resourceName = parts[0].trim();
String resourceValue = parts[1].trim();
try {
if (asPercent) {
double percentage = parseNewStyleResourceAsPercentage(value,
resourceName, resourceValue);
configurableResource.setPercentage(resourceName, percentage);
} else {
long parsedValue = parseNewStyleResourceAsAbsoluteValue(value,
resourceValue, resourceName);
configurableResource.setValue(resourceName, parsedValue);
}
} catch (ResourceNotFoundException ex) {
throw createConfigException(value, "The "
+ "resource name, \"" + resourceName + "\" was not "
+ "recognized. Please check the value of "
+ YarnConfiguration.RESOURCE_TYPES + " in the Resource "
+ "Manager's configuration files.", ex);
}
}
return configurableResource;
}
private static double parseNewStyleResourceAsPercentage(
String value, String resource, String resourceValue)
throws AllocationConfigurationException {
try {
return findPercentage(resourceValue, resource);
} catch (AllocationConfigurationException ex) {
throw createConfigException(value,
"The resource values must all be percentages. \""
+ resourceValue + "\" is either not a non-negative number " +
"or does not include the '%' symbol.", ex);
}
}
private static long parseNewStyleResourceAsAbsoluteValue(String value,
String resourceValue, String resourceName)
throws AllocationConfigurationException {
final long parsedValue;
try {
parsedValue = Long.parseLong(resourceValue);
} catch (NumberFormatException e) {
throw createConfigException(value, "The "
+ "resource values must all be integers. \"" + resourceValue
+ "\" is not an integer.", e);
}
if (parsedValue < 0) {
throw new AllocationConfigurationException(
"Invalid value of " + resourceName +
": " + parsedValue + ", value should not be negative!");
}
return parsedValue;
}
private static ConfigurableResource parseOldStyleResourceAsPercentage(
String value) throws AllocationConfigurationException {
return new ConfigurableResource(
getResourcePercentage(StringUtils.toLowerCase(value)));
}
private static ConfigurableResource parseOldStyleResource(String input)
throws AllocationConfigurationException {
final String lowerCaseInput = StringUtils.toLowerCase(input);
String[] resources = lowerCaseInput.split(",");
if (resources.length != 2) {
resources = findOldStyleResourcesInSpaceSeparatedInput(lowerCaseInput);
if (resources.length != 2) {
throw new AllocationConfigurationException(
"Cannot parse resource values from input: " + input);
}
}
final int memory = parseOldStyleResourceMemory(resources);
final int vcores = parseOldStyleResourceVcores(resources);
return new ConfigurableResource(
Resources.createResource(memory, vcores));
}
private static String[] findOldStyleResourcesInSpaceSeparatedInput(
String input) {
final Pattern pattern = Pattern.compile(RESOURCES_WITH_SPACES_PATTERN);
final Matcher matcher = pattern.matcher(input);
List<String> resources = Lists.newArrayList();
while (matcher.find()) {
resources.add(matcher.group(0));
}
return resources.toArray(new String[0]);
}
private static int parseOldStyleResourceMemory(String[] resources)
throws AllocationConfigurationException {
final int memory = findResource(resources, "mb");
if (memory < 0) {
throw new AllocationConfigurationException(
"Invalid value of memory: " + memory +
", value should not be negative!");
}
return memory;
}
private static int parseOldStyleResourceVcores(String[] resources)
throws AllocationConfigurationException {
final int vcores = findResource(resources, "vcores");
if (vcores < 0) {
throw new AllocationConfigurationException(
"Invalid value of vcores: " + vcores +
", value should not be negative!");
}
return vcores;
}
private static double[] getResourcePercentage(String val)
throws AllocationConfigurationException {
int numberOfKnownResourceTypes = ResourceUtils
.getNumberOfCountableResourceTypes();
double[] resourcePercentage = new double[numberOfKnownResourceTypes];
String[] values = val.split(",");
if (values.length == 1) {
double percentage = findPercentage(values, "");
for (int i = 0; i < numberOfKnownResourceTypes; i++) {
resourcePercentage[i] = percentage;
}
} else {
resourcePercentage[0] = findPercentage(values, "memory");
resourcePercentage[1] = findPercentage(values, "cpu");
}
return resourcePercentage;
}
private static double findPercentage(String resourceValue, String resource)
throws AllocationConfigurationException {
return findPercentageInternal(resource, resourceValue, false);
}
private static double findPercentage(String[] resourceValues, String resource)
throws AllocationConfigurationException {
String resourceValue = findResourceFromValues(resourceValues, resource);
return findPercentageInternal(resource, resourceValue, true);
}
private static double findPercentageInternal(String resource,
String resourceValue, boolean includeResourceInPattern)
throws AllocationConfigurationException {
final Pattern pattern;
if (includeResourceInPattern) {
pattern = Pattern.compile(RESOURCE_PERCENTAGE_PATTERN + resource);
} else {
pattern = Pattern.compile(RESOURCE_PERCENTAGE_PATTERN);
}
Matcher matcher = pattern.matcher(resourceValue);
if (!matcher.matches()) {
if (resource.equals("")) {
throw new AllocationConfigurationException("Invalid percentage: " +
resourceValue);
} else {
throw new AllocationConfigurationException("Invalid percentage of " +
resource + ": " + resourceValue);
}
}
double percentage = Double.parseDouble(matcher.group(1)) / 100.0;
if (percentage < 0) {
throw new AllocationConfigurationException("Invalid percentage: " +
resourceValue + ", percentage should not be negative!");
}
return percentage;
}
private static AllocationConfigurationException createConfigException(
String value, String message) {
return createConfigException(value, message, null);
}
private static AllocationConfigurationException createConfigException(
String value, String message, Throwable t) {
String msg = INVALID_RESOURCE_DEFINITION_PREFIX + value + ". " + message;
if (t != null) {
return new AllocationConfigurationException(msg, t);
} else {
return new AllocationConfigurationException(msg);
}
}
public long getUpdateInterval() {
return getLong(UPDATE_INTERVAL_MS, DEFAULT_UPDATE_INTERVAL_MS);
}
private static int findResource(String[] resourceValues, String resource)
throws AllocationConfigurationException {
String resourceValue = findResourceFromValues(resourceValues, resource);
final Pattern pattern = Pattern.compile(RESOURCE_VALUE_PATTERN +
resource);
Matcher matcher = pattern.matcher(resourceValue);
if (!matcher.find()) {
throw new AllocationConfigurationException("Invalid value of " +
(resource.equals("mb") ? "memory" : resource) + ": " + resourceValue);
}
return Integer.parseInt(matcher.group(1));
}
private static String findResourceFromValues(String[] resourceValues,
String resource) throws AllocationConfigurationException {
for (String resourceValue : resourceValues) {
if (resourceValue.contains(resource)) {
return resourceValue.trim();
}
}
throw new AllocationConfigurationException("Missing resource: " + resource);
}
}
| FairSchedulerConfiguration |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/server/MethodNotAllowedException.java | {
"start": 1211,
"end": 2749
} | class ____ extends ResponseStatusException {
private final String method;
private final Set<HttpMethod> httpMethods;
public MethodNotAllowedException(HttpMethod method, Collection<HttpMethod> supportedMethods) {
this(method.name(), supportedMethods);
}
public MethodNotAllowedException(String method, @Nullable Collection<HttpMethod> supportedMethods) {
super(HttpStatus.METHOD_NOT_ALLOWED, "Request method '" + method + "' is not supported.",
null, null, new Object[] {method, supportedMethods});
Assert.notNull(method, "'method' is required");
if (supportedMethods == null) {
supportedMethods = Collections.emptySet();
}
this.method = method;
this.httpMethods = Collections.unmodifiableSet(new LinkedHashSet<>(supportedMethods));
if (!this.httpMethods.isEmpty()) {
setDetail("Supported methods: " + this.httpMethods);
}
}
/**
* Return HttpHeaders with an "Allow" header that documents the allowed
* HTTP methods for this URL, if available, or an empty instance otherwise.
*/
@Override
public HttpHeaders getHeaders() {
if (CollectionUtils.isEmpty(this.httpMethods)) {
return HttpHeaders.EMPTY;
}
HttpHeaders headers = new HttpHeaders();
headers.setAllow(this.httpMethods);
return headers;
}
/**
* Return the HTTP method for the failed request.
*/
public String getHttpMethod() {
return this.method;
}
/**
* Return the list of supported HTTP methods.
*/
public Set<HttpMethod> getSupportedMethods() {
return this.httpMethods;
}
}
| MethodNotAllowedException |
java | netty__netty | codec-base/src/main/java/io/netty/handler/codec/base64/Base64.java | {
"start": 1451,
"end": 15151
} | class ____ {
/** Maximum line length (76) of Base64 output. */
private static final int MAX_LINE_LENGTH = 76;
/** The equals sign (=) as a byte. */
private static final byte EQUALS_SIGN = (byte) '=';
/** The new line character (\n) as a byte. */
private static final byte NEW_LINE = (byte) '\n';
private static final byte WHITE_SPACE_ENC = -5; // Indicates white space in encoding
private static final byte EQUALS_SIGN_ENC = -1; // Indicates equals sign in encoding
private static byte[] alphabet(Base64Dialect dialect) {
return ObjectUtil.checkNotNull(dialect, "dialect").alphabet;
}
private static byte[] decodabet(Base64Dialect dialect) {
return ObjectUtil.checkNotNull(dialect, "dialect").decodabet;
}
private static boolean breakLines(Base64Dialect dialect) {
return ObjectUtil.checkNotNull(dialect, "dialect").breakLinesByDefault;
}
public static ByteBuf encode(ByteBuf src) {
return encode(src, Base64Dialect.STANDARD);
}
public static ByteBuf encode(ByteBuf src, Base64Dialect dialect) {
return encode(src, breakLines(dialect), dialect);
}
public static ByteBuf encode(ByteBuf src, Base64Dialect dialect, boolean addPadding) {
return encode(src, breakLines(dialect), dialect, addPadding);
}
public static ByteBuf encode(ByteBuf src, boolean breakLines) {
return encode(src, breakLines, Base64Dialect.STANDARD);
}
public static ByteBuf encode(ByteBuf src, boolean breakLines, boolean addPadding) {
return encode(src, breakLines, Base64Dialect.STANDARD, addPadding);
}
public static ByteBuf encode(ByteBuf src, boolean breakLines, Base64Dialect dialect) {
ObjectUtil.checkNotNull(src, "src");
ByteBuf dest = encode(src, src.readerIndex(), src.readableBytes(), breakLines, dialect);
src.readerIndex(src.writerIndex());
return dest;
}
public static ByteBuf encode(ByteBuf src, boolean breakLines, Base64Dialect dialect, boolean addPadding) {
ObjectUtil.checkNotNull(src, "src");
ByteBuf dest = encode(src, src.readerIndex(), src.readableBytes(), breakLines, dialect, addPadding);
src.readerIndex(src.writerIndex());
return dest;
}
public static ByteBuf encode(ByteBuf src, int off, int len) {
return encode(src, off, len, Base64Dialect.STANDARD);
}
public static ByteBuf encode(ByteBuf src, int off, int len, Base64Dialect dialect) {
return encode(src, off, len, breakLines(dialect), dialect);
}
public static ByteBuf encode(
ByteBuf src, int off, int len, boolean breakLines) {
return encode(src, off, len, breakLines, Base64Dialect.STANDARD);
}
public static ByteBuf encode(
ByteBuf src, int off, int len, boolean breakLines, Base64Dialect dialect) {
return encode(src, off, len, breakLines, dialect, src.alloc(), true);
}
public static ByteBuf encode(
ByteBuf src, int off, int len, boolean breakLines, Base64Dialect dialect, boolean addPadding) {
return encode(src, off, len, breakLines, dialect, src.alloc(), addPadding);
}
public static ByteBuf encode(
ByteBuf src, int off, int len, boolean breakLines, Base64Dialect dialect, ByteBufAllocator allocator) {
return encode(src, off, len, breakLines, dialect, allocator, true);
}
private static ByteBuf encode(
ByteBuf src, int off, int len, boolean breakLines, Base64Dialect dialect, ByteBufAllocator allocator,
boolean addPadding) {
ObjectUtil.checkNotNull(src, "src");
ObjectUtil.checkNotNull(dialect, "dialect");
int capacity = encodedBufferSize(len, breakLines);
ByteBuf destBuf = allocator.buffer(capacity).order(src.order());
// Ensure the destination buffer is flat, if possible, and avoid leak detection checks on every write:
ByteBuf dest = destBuf.unwrap() == null || !destBuf.isContiguous() ? destBuf :
destBuf.hasArray() ?
Unpooled.wrappedBuffer(destBuf.array(), destBuf.arrayOffset(), capacity).order(src.order()) :
Unpooled.wrappedBuffer(destBuf.internalNioBuffer(0, capacity)).order(src.order());
byte[] alphabet = alphabet(dialect);
int d = 0;
int e = 0;
int len2 = len - 2;
int lineLength = 0;
for (; d < len2; d += 3, e += 4) {
encode3to4(src, d + off, 3, dest, e, alphabet, addPadding);
lineLength += 4;
if (breakLines && lineLength == MAX_LINE_LENGTH) {
dest.setByte(e + 4, NEW_LINE);
e ++;
lineLength = 0;
} // end if: end of line
} // end for: each piece of array
if (d < len) {
e += encode3to4(src, d + off, len - d, dest, e, alphabet, addPadding);
} // end if: some padding needed
// Remove last byte if it's a newline
if (e > 1 && dest.getByte(e - 1) == NEW_LINE) {
e--;
}
if (dest != destBuf) {
dest.release();
}
return destBuf.setIndex(0, e);
}
private static int encode3to4(
ByteBuf src, int srcOffset, int numSigBytes, ByteBuf dest, int destOffset, byte[] alphabet,
boolean addPadding) {
// 1 2 3
// 01234567890123456789012345678901 Bit position
// --------000000001111111122222222 Array position from threeBytes
// --------| || || || | Six bit groups to index ALPHABET
// >>18 >>12 >> 6 >> 0 Right shift necessary
// 0x3f 0x3f 0x3f Additional AND
// Create buffer with zero-padding if there are only one or two
// significant bytes passed in the array.
// We have to shift left 24 in order to flush out the 1's that appear
// when Java treats a value as negative that is cast from a byte to an int.
if (src.order() == ByteOrder.BIG_ENDIAN) {
final int inBuff;
switch (numSigBytes) {
case 1:
inBuff = toInt(src.getByte(srcOffset));
break;
case 2:
inBuff = toIntBE(src.getShort(srcOffset));
break;
default:
inBuff = numSigBytes <= 0 ? 0 : toIntBE(src.getMedium(srcOffset));
break;
}
return encode3to4BigEndian(inBuff, numSigBytes, dest, destOffset, alphabet, addPadding);
} else {
final int inBuff;
switch (numSigBytes) {
case 1:
inBuff = toInt(src.getByte(srcOffset));
break;
case 2:
inBuff = toIntLE(src.getShort(srcOffset));
break;
default:
inBuff = numSigBytes <= 0 ? 0 : toIntLE(src.getMedium(srcOffset));
break;
}
return encode3to4LittleEndian(inBuff, numSigBytes, dest, destOffset, alphabet, addPadding);
}
}
// package-private for testing
static int encodedBufferSize(int len, boolean breakLines) {
// Cast len to long to prevent overflow
long len43 = ((long) len << 2) / 3;
// Account for padding
long ret = (len43 + 3) & ~3;
if (breakLines) {
ret += len43 / MAX_LINE_LENGTH;
}
return ret < Integer.MAX_VALUE ? (int) ret : Integer.MAX_VALUE;
}
private static int toInt(byte value) {
return (value & 0xff) << 16;
}
private static int toIntBE(short value) {
return (value & 0xff00) << 8 | (value & 0xff) << 8;
}
private static int toIntLE(short value) {
return (value & 0xff) << 16 | (value & 0xff00);
}
private static int toIntBE(int mediumValue) {
return (mediumValue & 0xff0000) | (mediumValue & 0xff00) | (mediumValue & 0xff);
}
private static int toIntLE(int mediumValue) {
return (mediumValue & 0xff) << 16 | (mediumValue & 0xff00) | (mediumValue & 0xff0000) >>> 16;
}
private static int encode3to4BigEndian(
int inBuff, int numSigBytes, ByteBuf dest, int destOffset, byte[] alphabet, boolean addPadding) {
// Packing bytes into an int to reduce bound and reference count checking.
switch (numSigBytes) {
case 3:
dest.setInt(destOffset, alphabet[inBuff >>> 18 ] << 24 |
alphabet[inBuff >>> 12 & 0x3f] << 16 |
alphabet[inBuff >>> 6 & 0x3f] << 8 |
alphabet[inBuff & 0x3f]);
return 4;
case 2:
if (addPadding) {
dest.setInt(destOffset, alphabet[inBuff >>> 18 ] << 24 |
alphabet[inBuff >>> 12 & 0x3f] << 16 |
alphabet[inBuff >>> 6 & 0x3f] << 8 |
EQUALS_SIGN);
return 4;
} else {
dest.setMedium(destOffset, alphabet[inBuff >>> 18 ] << 16 |
alphabet[inBuff >>> 12 & 0x3f] << 8 |
alphabet[inBuff >>> 6 & 0x3f ]);
return 3;
}
case 1:
if (addPadding) {
dest.setInt(destOffset, alphabet[inBuff >>> 18 ] << 24 |
alphabet[inBuff >>> 12 & 0x3f] << 16 |
EQUALS_SIGN << 8 |
EQUALS_SIGN);
return 4;
} else {
dest.setShort(destOffset, alphabet[inBuff >>> 18 ] << 8 |
alphabet[inBuff >>> 12 & 0x3f]);
return 2;
}
default:
// NOOP
return 0;
}
}
private static int encode3to4LittleEndian(
int inBuff, int numSigBytes, ByteBuf dest, int destOffset, byte[] alphabet, boolean addPadding) {
// Packing bytes into an int to reduce bound and reference count checking.
switch (numSigBytes) {
case 3:
dest.setInt(destOffset, alphabet[inBuff >>> 18 ] |
alphabet[inBuff >>> 12 & 0x3f] << 8 |
alphabet[inBuff >>> 6 & 0x3f] << 16 |
alphabet[inBuff & 0x3f] << 24);
return 4;
case 2:
if (addPadding) {
dest.setInt(destOffset, alphabet[inBuff >>> 18 ] |
alphabet[inBuff >>> 12 & 0x3f ] << 8 |
alphabet[inBuff >>> 6 & 0x3f ] << 16 |
EQUALS_SIGN << 24);
return 4;
} else {
dest.setMedium(destOffset, alphabet[inBuff >>> 18 ] |
alphabet[inBuff >>> 12 & 0x3f] << 8 |
alphabet[inBuff >>> 6 & 0x3f] << 16);
return 3;
}
case 1:
if (addPadding) {
dest.setInt(destOffset, alphabet[inBuff >>> 18 ] |
alphabet[inBuff >>> 12 & 0x3f] << 8 |
EQUALS_SIGN << 16 |
EQUALS_SIGN << 24);
return 4;
} else {
dest.setShort(destOffset, alphabet[inBuff >>> 18 ] |
alphabet[inBuff >>> 12 & 0x3f] << 8);
return 2;
}
default:
// NOOP
return 0;
}
}
public static ByteBuf decode(ByteBuf src) {
return decode(src, Base64Dialect.STANDARD);
}
public static ByteBuf decode(ByteBuf src, Base64Dialect dialect) {
ObjectUtil.checkNotNull(src, "src");
ByteBuf dest = decode(src, src.readerIndex(), src.readableBytes(), dialect);
src.readerIndex(src.writerIndex());
return dest;
}
public static ByteBuf decode(
ByteBuf src, int off, int len) {
return decode(src, off, len, Base64Dialect.STANDARD);
}
public static ByteBuf decode(
ByteBuf src, int off, int len, Base64Dialect dialect) {
return decode(src, off, len, dialect, src.alloc());
}
public static ByteBuf decode(
ByteBuf src, int off, int len, Base64Dialect dialect, ByteBufAllocator allocator) {
ObjectUtil.checkNotNull(src, "src");
ObjectUtil.checkNotNull(dialect, "dialect");
// Using a ByteProcessor to reduce bound and reference count checking.
return new Decoder().decode(src, off, len, allocator, dialect);
}
// package-private for testing
static int decodedBufferSize(int len) {
return len - (len >>> 2);
}
private static final | Base64 |
java | elastic__elasticsearch | modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java | {
"start": 2063,
"end": 2224
} | class ____ extends BucketsAggregator {
public static final ParseField FILTERS_FIELD = new ParseField("filters");
protected static | AdjacencyMatrixAggregator |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/PublicApiNamedStreamShouldReturnStreamTest.java | {
"start": 1641,
"end": 2090
} | class ____ {
// BUG: Diagnostic contains: PublicApiNamedStreamShouldReturnStream
public String stream() {
return "hello";
}
}
""")
.doTest();
}
@Test
public void compliantNegativeCase() {
compilationHelper
.addSourceLines(
"in/Test.java",
"""
import java.util.stream.Stream;
public abstract | Test |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java | {
"start": 15015,
"end": 15488
} | class ____ extends AbstractTransportRequest {
ClusterStatsNodeRequest() {}
public ClusterStatsNodeRequest(StreamInput in) throws IOException {
super(in);
}
@Override
public Task createTask(long id, String type, String action, TaskId parentTaskId, Map<String, String> headers) {
return new CancellableTask(id, type, action, "", parentTaskId, headers);
}
}
private static | ClusterStatsNodeRequest |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/api/internal/TableResultInternal.java | {
"start": 1411,
"end": 1488
} | interface ____ internal methods for {@link TableResult}. */
@Internal
public | with |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/StronglyTypeTimeTest.java | {
"start": 5241,
"end": 5721
} | class ____ {
private static final long FOO_MILLIS;
static {
FOO_MILLIS = 100;
}
public Duration get() {
return Duration.ofMillis(FOO_MILLIS);
}
}
""")
.doTest();
}
@Test
public void refactoring() {
refactoringHelper
.addInputLines(
"Test.java",
"""
import java.time.Duration;
| Test |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java | {
"start": 1920,
"end": 6235
} | class ____ extends SaslDataTransferTestCase {
final static private int NUM_OF_DATANODES = 0;
@Test
public void testName() throws Exception {
MiniDFSCluster cluster = null;
HdfsConfiguration conf = createSecureConfig(
"authentication,privacy");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES)
.build();
final MiniDFSCluster clusterRef = cluster;
cluster.waitActive();
FileSystem fsForSuperUser = UserGroupInformation
.loginUserFromKeytabAndReturnUGI(getHdfsPrincipal(), getHdfsKeytab()).doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return clusterRef.getFileSystem();
}
});
fsForSuperUser.mkdirs(new Path("/tmp"));
fsForSuperUser.setPermission(new Path("/tmp"), new FsPermission(
(short) 511));
UserGroupInformation ugi = UserGroupInformation
.loginUserFromKeytabAndReturnUGI(getUserPrincipal(), getUserKeyTab());
FileSystem fs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return clusterRef.getFileSystem();
}
});
Path p = new Path("/mydir");
assertThrows(IOException.class, () -> {
fs.mkdirs(p);
Path tmp = new Path("/tmp/alpha");
fs.mkdirs(tmp);
assertNotNull(fs.listStatus(tmp));
assertEquals(AuthenticationMethod.KERBEROS,
ugi.getAuthenticationMethod());
});
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Verify the following scenario.
* 1. Kerberos is enabled.
* 2. HDFS block tokens are not enabled.
* 3. Start the NN.
* 4. NN should throw an IOException and abort
* @throws Exception
*/
@Test
public void testKerberosHdfsBlockTokenInconsistencyNNStartup() throws Exception {
IOException exception = assertThrows(IOException.class, () -> {
MiniDFSCluster dfsCluster = null;
HdfsConfiguration conf = createSecureConfig(
"authentication,privacy");
try {
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, false);
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
dfsCluster.waitActive();
} finally {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
return;
});
assertTrue(exception.getMessage().contains("Security is enabled but block access tokens"));
}
/**
* Test NameNodeStatusMXBean with security enabled and disabled.
*
* @throws Exception
*/
@Test
public void testNameNodeStatusMXBeanSecurityEnabled() throws Exception {
Configuration simpleConf = new Configuration();
Configuration secureConf = createSecureConfig("authentication");
// disabling security
UserGroupInformation.setConfiguration(simpleConf);
// get attribute "SecurityEnabled" with simple configuration
try (MiniDFSCluster cluster =
new MiniDFSCluster.Builder(simpleConf).build()) {
cluster.waitActive();
NameNode namenode = cluster.getNameNode();
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeStatus");
boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
"SecurityEnabled");
assertFalse(securityEnabled);
assertEquals(namenode.isSecurityEnabled(), securityEnabled);
}
// get attribute "SecurityEnabled" with secure configuration
try (MiniDFSCluster cluster =
new MiniDFSCluster.Builder(secureConf).build()) {
cluster.waitActive();
NameNode namenode = cluster.getNameNode();
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeStatus");
boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
"SecurityEnabled");
assertTrue(securityEnabled);
assertEquals(namenode.isSecurityEnabled(), securityEnabled);
}
}
}
| TestSecureNameNode |
java | quarkusio__quarkus | extensions/grpc-common/runtime/src/main/java/io/quarkus/grpc/common/runtime/graal/GrpcSubstitutions.java | {
"start": 2482,
"end": 2793
} | interface ____<T> { // NOSONAR
// Just provide access to "io.grpc.ServiceProviders.PriorityAccessor"
@Alias
boolean isAvailable(T provider);
@Alias
int getPriority(T provider);
}
@TargetClass(className = "com.google.protobuf.UnsafeUtil")
final | Target_io_grpc_ServiceProviders_PriorityAccessor |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/DefaultCharsetTest.java | {
"start": 6180,
"end": 6628
} | class ____ {
void f(String s, File f) throws Exception {
new FileReader(s);
new FileReader(f);
}
}
""")
.addOutputLines(
"out/Test.java",
"""
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.io.Files;
import java.io.*;
import java.io.File;
| Test |
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/SingleInputSenderExecutableAction.java | {
"start": 828,
"end": 1845
} | class ____ extends SenderExecutableAction {
private final String requestTypeForInputValidationError;
public SingleInputSenderExecutableAction(
Sender sender,
RequestManager requestManager,
String failedToSendRequestErrorMessage,
String requestTypeForInputValidationError
) {
super(sender, requestManager, failedToSendRequestErrorMessage);
this.requestTypeForInputValidationError = Objects.requireNonNull(requestTypeForInputValidationError);
}
@Override
public void execute(InferenceInputs inferenceInputs, TimeValue timeout, ActionListener<InferenceServiceResults> listener) {
if (inferenceInputs.isSingleInput() == false) {
listener.onFailure(
new ElasticsearchStatusException(requestTypeForInputValidationError + " only accepts 1 input", RestStatus.BAD_REQUEST)
);
return;
}
super.execute(inferenceInputs, timeout, listener);
}
}
| SingleInputSenderExecutableAction |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.