language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | elastic__elasticsearch | x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/ExecutableAction.java | {
"start": 693,
"end": 844
} | interface ____ {
void execute(InferenceInputs inferenceInputs, TimeValue timeout, ActionListener<InferenceServiceResults> listener);
}
| ExecutableAction |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elastic/densetextembeddings/ElasticInferenceServiceDenseTextEmbeddingsModelTests.java | {
"start": 725,
"end": 1373
} | class ____ {
public static ElasticInferenceServiceDenseTextEmbeddingsModel createModel(String url, String modelId) {
return new ElasticInferenceServiceDenseTextEmbeddingsModel(
"id",
TaskType.TEXT_EMBEDDING,
"elastic",
new ElasticInferenceServiceDenseTextEmbeddingsServiceSettings(modelId, SimilarityMeasure.COSINE, null, null),
EmptyTaskSettings.INSTANCE,
EmptySecretSettings.INSTANCE,
ElasticInferenceServiceComponents.of(url),
ChunkingSettingsBuilder.DEFAULT_SETTINGS
);
}
}
| ElasticInferenceServiceDenseTextEmbeddingsModelTests |
java | apache__camel | components/camel-jaxb/src/test/java/org/apache/camel/converter/jaxb/person/Person.java | {
"start": 2089,
"end": 4414
} | class ____ {
@XmlElement(required = true)
protected String firstName;
@XmlElement(required = true)
protected String lastName;
@XmlElement(required = true, type = Integer.class, nillable = true)
protected Integer age;
@XmlElement(required = true)
protected Address address;
/**
* Gets the value of the firstName property.
*
* @return possible object is {@link String }
*
*/
public String getFirstName() {
return firstName;
}
/**
* Sets the value of the firstName property.
*
* @param value allowed object is {@link String }
*
*/
public void setFirstName(String value) {
this.firstName = value;
}
/**
* Gets the value of the lastName property.
*
* @return possible object is {@link String }
*
*/
public String getLastName() {
return lastName;
}
/**
* Sets the value of the lastName property.
*
* @param value allowed object is {@link String }
*
*/
public void setLastName(String value) {
this.lastName = value;
}
/**
* Gets the value of the age property.
*
* @return possible object is {@link Integer }
*
*/
public Integer getAge() {
return age;
}
/**
* Sets the value of the age property.
*
* @param value allowed object is {@link Integer }
*
*/
public void setAge(Integer value) {
this.age = value;
}
/**
* Gets the value of the address property.
*
* @return possible object is {@link Address }
*
*/
public Address getAddress() {
return address;
}
/**
* Sets the value of the address property.
*
* @param value allowed object is {@link Address }
*
*/
public void setAddress(Address value) {
this.address = value;
}
public Person withFirstName(String value) {
setFirstName(value);
return this;
}
public Person withLastName(String value) {
setLastName(value);
return this;
}
public Person withAge(Integer value) {
setAge(value);
return this;
}
public Person withAddress(Address value) {
setAddress(value);
return this;
}
}
| Person |
java | netty__netty | codec-stomp/src/main/java/io/netty/handler/codec/stomp/DefaultStompContentSubframe.java | {
"start": 874,
"end": 2490
} | class ____ extends DefaultByteBufHolder implements StompContentSubframe {
private DecoderResult decoderResult = DecoderResult.SUCCESS;
public DefaultStompContentSubframe(ByteBuf content) {
super(content);
}
@Override
public StompContentSubframe copy() {
return (StompContentSubframe) super.copy();
}
@Override
public StompContentSubframe duplicate() {
return (StompContentSubframe) super.duplicate();
}
@Override
public StompContentSubframe retainedDuplicate() {
return (StompContentSubframe) super.retainedDuplicate();
}
@Override
public StompContentSubframe replace(ByteBuf content) {
return new DefaultStompContentSubframe(content);
}
@Override
public StompContentSubframe retain() {
super.retain();
return this;
}
@Override
public StompContentSubframe retain(int increment) {
super.retain(increment);
return this;
}
@Override
public StompContentSubframe touch() {
super.touch();
return this;
}
@Override
public StompContentSubframe touch(Object hint) {
super.touch(hint);
return this;
}
@Override
public DecoderResult decoderResult() {
return decoderResult;
}
@Override
public void setDecoderResult(DecoderResult decoderResult) {
this.decoderResult = decoderResult;
}
@Override
public String toString() {
return "DefaultStompContent{" +
"decoderResult=" + decoderResult +
'}';
}
}
| DefaultStompContentSubframe |
java | quarkusio__quarkus | independent-projects/bootstrap/app-model/src/main/java/io/quarkus/paths/PathTreeUtils.java | {
"start": 208,
"end": 3449
} | interface ____ {
/**
* Returns a path as a string using the specified separator.
*
* @param path path to convert to a string
* @param separator path element separator
* @return string representation of a path
*/
static String asString(final Path path, String separator) {
if (path.getFileSystem().getSeparator().equals(separator)) {
return path.toString();
}
final int nameCount = path.getNameCount();
if (nameCount == 0) {
return "";
}
if (nameCount == 1) {
return path.getName(0).toString();
}
final StringBuilder s = new StringBuilder();
s.append(path.getName(0));
for (int i = 1; i < nameCount; ++i) {
s.append(separator).append(path.getName(i));
}
return s.toString();
}
/**
* Checks whether a path tree contains a given relative path respecting case sensitivity even on Windows.
*
* <p>
* Path API on Windows may resolve {@code templates} to {@code Templates}. This method
* helps verify whether a given relative path actually exists.
*
* @param pathTree path tree
* @param relativePath relative path to check
* @return true if a path tree contains a given relative path
*/
static boolean containsCaseSensitivePath(PathTree pathTree, String relativePath) {
if (!pathTree.contains(relativePath)) {
return false;
}
// if it's not Windows, we don't need to check further
if (File.separatorChar != '\\') {
return true;
}
// this should not be necessary, since relatvePath is meant to be a resource path, not an FS path but just in case
relativePath = relativePath.replace(File.separatorChar, '/');
final String[] pathElements = relativePath.split("/");
try (var openTree = pathTree.open()) {
for (var root : openTree.getRoots()) {
if (containsCaseSensitivePath(root, pathElements)) {
return true;
}
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return false;
}
private static boolean containsCaseSensitivePath(Path root, String[] pathElements) {
var parent = root;
for (String pathElement : pathElements) {
if (!Files.isDirectory(parent)) {
return false;
}
try (Stream<Path> stream = Files.list(parent)) {
var i = stream.iterator();
Path match = null;
while (i.hasNext()) {
final Path next = i.next();
if (pathElement.equals(next.getFileName().toString())) {
match = next;
break;
}
}
if (match == null) {
return false;
}
parent = match;
} catch (IOException e) {
throw new UncheckedIOException(e);
} catch (Exception e) {
throw e;
}
}
return true;
}
}
| PathTreeUtils |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/mvc/ServletForwardingController.java | {
"start": 3688,
"end": 6219
} | class ____ extends AbstractController implements BeanNameAware {
private @Nullable String servletName;
private @Nullable String beanName;
public ServletForwardingController() {
super(false);
}
/**
* Set the name of the servlet to forward to,
* i.e. the "servlet-name" of the target servlet in web.xml.
* <p>Default is the bean name of this controller.
*/
public void setServletName(String servletName) {
this.servletName = servletName;
}
@Override
public void setBeanName(String name) {
this.beanName = name;
if (this.servletName == null) {
this.servletName = name;
}
}
@Override
protected @Nullable ModelAndView handleRequestInternal(HttpServletRequest request, HttpServletResponse response)
throws Exception {
ServletContext servletContext = getServletContext();
Assert.state(servletContext != null, "No ServletContext");
RequestDispatcher rd = servletContext.getNamedDispatcher(this.servletName);
if (rd == null) {
throw new ServletException("No servlet with name '" + this.servletName + "' defined in web.xml");
}
// If already included, include again, else forward.
if (useInclude(request, response)) {
rd.include(request, response);
if (logger.isTraceEnabled()) {
logger.trace("Included servlet [" + this.servletName +
"] in ServletForwardingController '" + this.beanName + "'");
}
}
else {
rd.forward(request, response);
if (logger.isTraceEnabled()) {
logger.trace("Forwarded to servlet [" + this.servletName +
"] in ServletForwardingController '" + this.beanName + "'");
}
}
return null;
}
/**
* Determine whether to use RequestDispatcher's {@code include} or
* {@code forward} method.
* <p>Performs a check whether an include URI attribute is found in the request,
* indicating an include request, and whether the response has already been committed.
* In both cases, an include will be performed, as a forward is not possible anymore.
* @param request current HTTP request
* @param response current HTTP response
* @return {@code true} for include, {@code false} for forward
* @see jakarta.servlet.RequestDispatcher#forward
* @see jakarta.servlet.RequestDispatcher#include
* @see jakarta.servlet.ServletResponse#isCommitted
* @see org.springframework.web.util.WebUtils#isIncludeRequest
*/
protected boolean useInclude(HttpServletRequest request, HttpServletResponse response) {
return (WebUtils.isIncludeRequest(request) || response.isCommitted());
}
}
| ServletForwardingController |
java | apache__flink | flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java | {
"start": 45834,
"end": 49721
} | class ____ extends AbstractSSEHandler
implements ObjectExpirationResult, S3RequesterChargedResult, S3VersionResult {
// Data items for successful copy
private final CopyObjectResult result = new CopyObjectResult();
// Data items for failed copy
private String errorCode = null;
private String errorMessage = null;
private String errorRequestId = null;
private String errorHostId = null;
private boolean receivedErrorResponse = false;
@Override
protected ServerSideEncryptionResult sseResult() {
return result;
}
public Date getLastModified() {
return result.getLastModifiedDate();
}
@Override
public String getVersionId() {
return result.getVersionId();
}
@Override
public void setVersionId(String versionId) {
result.setVersionId(versionId);
}
@Override
public Date getExpirationTime() {
return result.getExpirationTime();
}
@Override
public void setExpirationTime(Date expirationTime) {
result.setExpirationTime(expirationTime);
}
@Override
public String getExpirationTimeRuleId() {
return result.getExpirationTimeRuleId();
}
@Override
public void setExpirationTimeRuleId(String expirationTimeRuleId) {
result.setExpirationTimeRuleId(expirationTimeRuleId);
}
public String getETag() {
return result.getETag();
}
public String getErrorCode() {
return errorCode;
}
public String getErrorHostId() {
return errorHostId;
}
public String getErrorMessage() {
return errorMessage;
}
public String getErrorRequestId() {
return errorRequestId;
}
public boolean isErrorResponse() {
return receivedErrorResponse;
}
public boolean isRequesterCharged() {
return result.isRequesterCharged();
}
public void setRequesterCharged(boolean isRequesterCharged) {
result.setRequesterCharged(isRequesterCharged);
}
@Override
protected void doStartElement(String uri, String name, String qName, Attributes attrs) {
if (atTopLevel()) {
if (name.equals("CopyObjectResult") || name.equals("CopyPartResult")) {
receivedErrorResponse = false;
} else if (name.equals("Error")) {
receivedErrorResponse = true;
}
}
}
@Override
protected void doEndElement(String uri, String name, String qName) {
if (in("CopyObjectResult") || in("CopyPartResult")) {
if (name.equals("LastModified")) {
result.setLastModifiedDate(ServiceUtils.parseIso8601Date(getText()));
} else if (name.equals("ETag")) {
result.setETag(ServiceUtils.removeQuotes(getText()));
}
} else if (in("Error")) {
if (name.equals("Code")) {
errorCode = getText();
} else if (name.equals("Message")) {
errorMessage = getText();
} else if (name.equals("RequestId")) {
errorRequestId = getText();
} else if (name.equals("HostId")) {
errorHostId = getText();
}
}
}
}
/**
* Handler for parsing RequestPaymentConfiguration XML response associated with an Amazon S3
* bucket. The XML response is parsed into a <code>RequestPaymentConfiguration</code> object.
*/
public static | CopyObjectResultHandler |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSDelegationToken.java | {
"start": 1438,
"end": 1670
} | class ____
extends DelegationTokenIdentifier {
public KMSDelegationTokenIdentifier() {
super(TOKEN_KIND);
}
@Override
public Text getKind() {
return TOKEN_KIND;
}
}
} | KMSDelegationTokenIdentifier |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupField.java | {
"start": 1102,
"end": 5612
} | class ____ {
// Fields that are used both in core Rollup actions and Rollup plugin
public static final ParseField ID = new ParseField("id");
public static final String TASK_NAME = "xpack/rollup/job";
public static final String ROLLUP_META = "_rollup";
public static final String INTERVAL = "interval";
public static final String COUNT_FIELD = "_count";
public static final String VERSION_FIELD = "version";
public static final String VALUE = "value";
public static final String TIMESTAMP = "timestamp";
public static final String FILTER = "filter";
public static final String NAME = "rollup";
public static final String TYPE_NAME = "_doc";
public static final String AGG = "agg";
public static final String ROLLUP_MISSING = "ROLLUP_MISSING_40710B25931745D4B0B8B310F6912A69";
public static final List<String> SUPPORTED_NUMERIC_METRICS = Arrays.asList(
MaxAggregationBuilder.NAME,
MinAggregationBuilder.NAME,
SumAggregationBuilder.NAME,
AvgAggregationBuilder.NAME,
ValueCountAggregationBuilder.NAME
);
public static final List<String> SUPPORTED_DATE_METRICS = Arrays.asList(
MaxAggregationBuilder.NAME,
MinAggregationBuilder.NAME,
ValueCountAggregationBuilder.NAME
);
// a set of ALL our supported metrics, to be a union of all other supported metric types (numeric, date, etc.)
public static final Set<String> SUPPORTED_METRICS;
static {
SUPPORTED_METRICS = new HashSet<>();
SUPPORTED_METRICS.addAll(SUPPORTED_NUMERIC_METRICS);
SUPPORTED_METRICS.addAll(SUPPORTED_DATE_METRICS);
}
// these mapper types are used by the configs (metric, histo, etc) to validate field mappings
public static final List<String> NUMERIC_FIELD_MAPPER_TYPES;
static {
List<String> types = Stream.of(NumberFieldMapper.NumberType.values())
.map(NumberFieldMapper.NumberType::typeName)
.collect(Collectors.toList());
types.add("scaled_float"); // have to add manually since scaled_float is in a module
NUMERIC_FIELD_MAPPER_TYPES = types;
}
public static final List<String> DATE_FIELD_MAPPER_TYPES = List.of(
DateFieldMapper.CONTENT_TYPE,
DateFieldMapper.DATE_NANOS_CONTENT_TYPE
);
/**
* Format to the appropriate Rollup field name convention
*
* @param source Source aggregation to get type and name from
* @param extra The type of value this field is (VALUE, INTERVAL, etc)
* @return formatted field name
*/
public static String formatFieldName(ValuesSourceAggregationBuilder<?> source, String extra) {
return source.field() + "." + source.getType() + "." + extra;
}
/**
* Format to the appropriate Rollup field name convention
*
* @param field The field we are formatting
* @param type The aggregation type that was used for rollup
* @param extra The type of value this field is (VALUE, INTERVAL, etc)
* @return formatted field name
*/
public static String formatFieldName(String field, String type, String extra) {
return field + "." + type + "." + extra;
}
/**
* Format to the appropriate Rollup convention for internal Metadata fields (_rollup)
*/
public static String formatMetaField(String extra) {
return RollupField.ROLLUP_META + "." + extra;
}
/**
* Format to the appropriate Rollup convention for extra Count aggs.
* These are added to averages and bucketing aggs that need a count
*/
public static String formatCountAggName(String field) {
return field + "." + RollupField.COUNT_FIELD;
}
/**
* Format to the appropriate Rollup convention for agg names that
* might conflict with empty buckets. `value` is appended to agg name.
* E.g. used for averages
*/
public static String formatValueAggName(String field) {
return field + "." + RollupField.VALUE;
}
/**
* Format into the convention for computed field lookups
*/
public static String formatComputed(String field, String agg) {
return field + "." + agg;
}
/**
* Format into the convention used by the Indexer's composite agg, so that
* the normal field name is translated into a Rollup fieldname via the agg name
*/
public static String formatIndexerAggName(String field, String agg) {
return field + "." + agg;
}
}
| RollupField |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/state/operator/restore/keyed/KeyedJob.java | {
"start": 5509,
"end": 6565
} | class ____
extends RichSourceFunction<Tuple2<Integer, Integer>> {
private static final long serialVersionUID = 1912878510707871659L;
private final ExecutionMode mode;
private boolean running = true;
private IntegerTupleSource(ExecutionMode mode) {
this.mode = mode;
}
@Override
public void run(SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception {
for (int x = 0; x < 10; x++) {
ctx.collect(new Tuple2<>(x, x));
}
switch (mode) {
case GENERATE:
case MIGRATE:
synchronized (this) {
while (running) {
this.wait();
}
}
}
}
@Override
public void cancel() {
synchronized (this) {
running = false;
this.notifyAll();
}
}
}
private static final | IntegerTupleSource |
java | apache__camel | components/camel-pgevent/src/test/java/org/apache/camel/pgevent/integration/PgEventPubSubIT.java | {
"start": 1105,
"end": 2273
} | class ____ extends PgEventITSupport {
@EndpointInject("timer://test?repeatCount=1&period=1")
private Endpoint timerEndpoint;
@EndpointInject("mock:result")
private MockEndpoint mockEndpoint;
@Test
public void testPgEventPublishSubscribe() throws Exception {
mockEndpoint.expectedBodiesReceived(TEST_MESSAGE_BODY);
mockEndpoint.assertIsSatisfied(5000);
}
@Override
protected RoutesBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(timerEndpoint)
.setBody(constant(TEST_MESSAGE_BODY))
.to(String.format(
"pgevent://%s:%s/%s/testchannel?user=%s&pass=%s", getHost(), getMappedPort(), POSTGRES_DB,
POSTGRES_USER, POSTGRES_PASSWORD));
from(String.format("pgevent://%s:%s/%s/testchannel?user=%s&pass=%s",
getHost(), getMappedPort(), POSTGRES_DB, POSTGRES_USER, POSTGRES_PASSWORD))
.to(mockEndpoint);
}
};
}
}
| PgEventPubSubIT |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/iterables/Iterables_assertAllSatisfy_Test.java | {
"start": 1295,
"end": 3020
} | class ____ extends IterablesBaseTest {
private List<String> actual = newArrayList("Luke", "Leia", "Yoda");
@Test
void should_satisfy_single_requirement() {
iterables.assertAllSatisfy(someInfo(), actual, s -> assertThat(s.length()).isEqualTo(4));
}
@Test
void should_satisfy_multiple_requirements() {
iterables.assertAllSatisfy(someInfo(), actual, s -> {
assertThat(s.length()).isEqualTo(4);
assertThat(s).doesNotContain("V");
});
}
@Test
void should_fail_according_to_requirements() {
// GIVEN
Consumer<String> restrictions = s -> {
assertThat(s.length()).isEqualTo(4);
assertThat(s).startsWith("L");
};
// WHEN
var error = expectAssertionError(() -> iterables.assertAllSatisfy(someInfo(), actual, restrictions));
// THEN
// can't build the exact error message due to internal stack traces
then(error).hasMessageContaining(format("%n" +
"Expecting actual:%n" +
" \"Yoda\"%n" +
"to start with:%n" +
" \"L\"%n"));
}
@Test
void should_fail_if_consumer_is_null() {
assertThatNullPointerException().isThrownBy(() -> assertThat(actual).allSatisfy(null))
.withMessage("The Consumer<T> expressing the assertions requirements must not be null");
}
@Test
void should_fail_if_actual_is_null() {
// WHEN
var error = expectAssertionError(() -> {
actual = null;
assertThat(actual).allSatisfy(null);
});
// THEN
then(error).hasMessage(actualIsNull());
}
}
| Iterables_assertAllSatisfy_Test |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/commons/util/ReflectionUtilsWithGenericTypeHierarchiesTests.java | {
"start": 2243,
"end": 2632
} | class ____ extends AParent implements InterfaceGenericNumber<Number> {
@Override
public void foo(Number parameter) {
}
}
var foo = findMethod(A.class, "foo", Long.class).orElseThrow();
assertEquals(A.class, foo.getDeclaringClass());
}
@Test
@Disabled("Expected behaviour is not clear yet.")
void unclearPrecedenceOfImplementationsInParentClassAndInterfaceDefault() {
| A |
java | apache__maven | impl/maven-impl/src/main/java/org/apache/maven/api/services/model/InheritanceAssembler.java | {
"start": 1072,
"end": 2007
} | interface ____ {
/**
* Merges values from the specified parent model into the given child model. Implementations are expected to keep
* parent and child completely decoupled by injecting deep copies of objects into the child rather than the original
* objects from the parent.
*
* @param child The child model into which to merge the values inherited from the parent, must not be
* <code>null</code>.
* @param parent The (read-only) parent model from which to inherit the values, may be <code>null</code>.
* @param request The model building request that holds further settings, must not be {@code null}.
* @param problems The container used to collect problems that were encountered, must not be {@code null}.
*/
Model assembleModelInheritance(
Model child, Model parent, ModelBuilderRequest request, ModelProblemCollector problems);
}
| InheritanceAssembler |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/WorkdayEndpointBuilderFactory.java | {
"start": 1558,
"end": 4678
} | interface ____
extends
EndpointProducerBuilder {
default AdvancedWorkdayEndpointBuilder advanced() {
return (AdvancedWorkdayEndpointBuilder) this;
}
/**
* Workday Report as a service output format.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: json
* Group: format
*
* @param reportFormat the value to set
* @return the dsl builder
*/
default WorkdayEndpointBuilder reportFormat(String reportFormat) {
doSetProperty("reportFormat", reportFormat);
return this;
}
/**
* Workday Host name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Required: true
* Group: host
*
* @param host the value to set
* @return the dsl builder
*/
default WorkdayEndpointBuilder host(String host) {
doSetProperty("host", host);
return this;
}
/**
* Workday client Id generated by API client for integrations.
*
* The option is a: <code>java.lang.String</code> type.
*
* Required: true
* Group: security
*
* @param clientId the value to set
* @return the dsl builder
*/
default WorkdayEndpointBuilder clientId(String clientId) {
doSetProperty("clientId", clientId);
return this;
}
/**
* Workday client Secret generated by API client for integrations.
*
* The option is a: <code>java.lang.String</code> type.
*
* Required: true
* Group: security
*
* @param clientSecret the value to set
* @return the dsl builder
*/
default WorkdayEndpointBuilder clientSecret(String clientSecret) {
doSetProperty("clientSecret", clientSecret);
return this;
}
/**
* Workday token Refresh generated for integrations system user.
*
* The option is a: <code>java.lang.String</code> type.
*
* Required: true
* Group: security
*
* @param tokenRefresh the value to set
* @return the dsl builder
*/
default WorkdayEndpointBuilder tokenRefresh(String tokenRefresh) {
doSetProperty("tokenRefresh", tokenRefresh);
return this;
}
/**
* Workday Tenant name.
*
* The option is a: <code>java.lang.String</code> type.
*
* Required: true
* Group: tenant
*
* @param tenant the value to set
* @return the dsl builder
*/
default WorkdayEndpointBuilder tenant(String tenant) {
doSetProperty("tenant", tenant);
return this;
}
}
/**
* Advanced builder for endpoint for the Workday component.
*/
public | WorkdayEndpointBuilder |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/BitFieldArgs.java | {
"start": 17405,
"end": 18320
} | class ____ extends SubCommand {
private final BitFieldType bitFieldType;
private final boolean bitOffset;
private final int offset;
private Get(BitFieldType bitFieldType, boolean bitOffset, int offset) {
LettuceAssert.notNull(bitFieldType, "BitFieldType must not be null");
this.bitFieldType = bitFieldType;
this.bitOffset = bitOffset;
this.offset = offset;
}
@Override
<K, V> void build(CommandArgs<K, V> args) {
args.add(CommandType.GET).add(bitFieldType.asString());
if (bitOffset) {
args.add("#" + Integer.toUnsignedString(offset));
} else {
args.add(Integer.toUnsignedString(offset));
}
}
}
/**
* Representation for the {@code INCRBY} subcommand for {@code BITFIELD}.
*/
private static | Get |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/CanonicalDurationTest.java | {
"start": 6644,
"end": 7233
} | class ____ {
// The 120 is left alone here because 121 can't be converted too.
static final List<Duration> negative = asList(ofSeconds(120), ofSeconds(121));
static final List<Duration> positive = asList(ofMinutes(2), ofMinutes(3));
}
""")
.doTest();
}
@Test
public void fixConstantExpressions() {
helper
.addInputLines(
"A.java",
"""
package a;
import static java.time.Duration.ofSeconds;
import java.time.Duration;
public | A |
java | spring-projects__spring-boot | core/spring-boot/src/main/java/org/springframework/boot/logging/logback/SpringPropertyModel.java | {
"start": 974,
"end": 1569
} | class ____ extends NamedModel {
@SuppressWarnings("NullAway.Init")
private String scope;
@SuppressWarnings("NullAway.Init")
private String defaultValue;
@SuppressWarnings("NullAway.Init")
private String source;
String getScope() {
return this.scope;
}
void setScope(String scope) {
this.scope = scope;
}
String getDefaultValue() {
return this.defaultValue;
}
void setDefaultValue(String defaultValue) {
this.defaultValue = defaultValue;
}
String getSource() {
return this.source;
}
void setSource(String source) {
this.source = source;
}
}
| SpringPropertyModel |
java | hibernate__hibernate-orm | hibernate-testing/src/main/java/org/hibernate/testing/boot/ExtraJavaServicesClassLoaderService.java | {
"start": 387,
"end": 1877
} | class ____ extends ClassLoaderServiceImpl {
private final List<JavaServiceDescriptor<?>> extraJavaServices;
public ExtraJavaServicesClassLoaderService(List<JavaServiceDescriptor<?>> extraJavaServices) {
this.extraJavaServices = extraJavaServices;
}
@Override
public <S> Collection<S> loadJavaServices(Class<S> serviceContract) {
final Collection<S> baseServices = super.loadJavaServices( serviceContract );
final List<S> services = new ArrayList<>( baseServices );
applyExtraJavaServices( serviceContract, services );
return services;
}
private <S> void applyExtraJavaServices(Class<S> serviceContract, List<S> services) {
extraJavaServices.forEach(
(javaServiceDescriptor) -> {
if ( serviceContract.isAssignableFrom( javaServiceDescriptor.role ) ) {
try {
final Object serviceInstance = javaServiceDescriptor.impl.getDeclaredConstructor().newInstance();
//noinspection unchecked
services.add( (S) serviceInstance );
}
catch (NoSuchMethodException | IllegalAccessException e) {
throw new RuntimeException( "Unable to access constructor for specified 'extra' Java service : " + javaServiceDescriptor.impl.getName(), e );
}
catch (InstantiationException | InvocationTargetException e) {
throw new RuntimeException( "Unable to instantiate specified 'extra' Java service : " + javaServiceDescriptor.impl.getName(), e );
}
}
}
);
}
public static | ExtraJavaServicesClassLoaderService |
java | google__dagger | javatests/dagger/internal/codegen/MembersInjectionValidationTest.java | {
"start": 3759,
"end": 4526
} | interface ____ {",
" MembersInjector<Object[]> objectArrayInjector();",
"}");
CompilerTests.daggerCompiler(component)
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining("Cannot inject members into java.lang.Object[]")
.onSource(component)
.onLineContaining("objectArrayInjector();");
});
}
@Test
public void membersInjectRawType() {
Source component =
CompilerTests.javaSource(
"test.TestComponent",
"package test;",
"",
"import dagger.Component;",
"import java.util.Set;",
"",
"@Component",
" | TestComponent |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/inject/ast/ElementFactory.java | {
"start": 942,
"end": 1127
} | class ____ the AST
* @param <M> The type that represents a method in the AST
* @param <F> The type that represents a field in the AST
* @author graemerocher
* @since 2.3.0
*/
public | in |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/util/ReflectionUtils.java | {
"start": 21956,
"end": 23079
} | class ____ introspect
* @param name the name of the field (may be {@code null} if type is specified)
* @param type the type of the field (may be {@code null} if name is specified)
* @return the corresponding Field object, or {@code null} if not found
*/
@Contract("_, null, null -> fail")
public static @Nullable Field findField(Class<?> clazz, @Nullable String name, @Nullable Class<?> type) {
Assert.notNull(clazz, "Class must not be null");
Assert.isTrue(name != null || type != null, "Either name or type of the field must be specified");
Class<?> searchType = clazz;
while (Object.class != searchType && searchType != null) {
Field[] fields = getDeclaredFields(searchType);
for (Field field : fields) {
if ((name == null || name.equals(field.getName())) &&
(type == null || type.equals(field.getType()))) {
return field;
}
}
searchType = searchType.getSuperclass();
}
return null;
}
/**
* Attempt to find a {@link Field field} on the supplied {@link Class} with the
* supplied {@code name}. Searches all superclasses up to {@link Object}.
* @param clazz the | to |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/YodaConditionTest.java | {
"start": 5389,
"end": 5720
} | class ____ {
boolean yoda(E a) {
return a.equals(E.A);
}
}
""")
.setFixChooser(FixChoosers.SECOND)
.doTest();
}
@Test
public void nullTolerantFix() {
refactoring
.addInputLines(
"E.java",
"""
| Test |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/invocation/StubInfo.java | {
"start": 241,
"end": 369
} | interface ____ {
/**
* @return the location where the invocation was stubbed.
*/
Location stubbedAt();
}
| StubInfo |
java | grpc__grpc-java | android-interop-testing/src/generated/debug/grpc/io/grpc/testing/integration/LoadBalancerStatsServiceGrpc.java | {
"start": 11280,
"end": 12982
} | class ____
extends io.grpc.stub.AbstractBlockingStub<LoadBalancerStatsServiceBlockingV2Stub> {
private LoadBalancerStatsServiceBlockingV2Stub(
io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
super(channel, callOptions);
}
@java.lang.Override
protected LoadBalancerStatsServiceBlockingV2Stub build(
io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
return new LoadBalancerStatsServiceBlockingV2Stub(channel, callOptions);
}
/**
* <pre>
* Gets the backend distribution for RPCs sent by a test client.
* </pre>
*/
public io.grpc.testing.integration.Messages.LoadBalancerStatsResponse getClientStats(io.grpc.testing.integration.Messages.LoadBalancerStatsRequest request) throws io.grpc.StatusException {
return io.grpc.stub.ClientCalls.blockingV2UnaryCall(
getChannel(), getGetClientStatsMethod(), getCallOptions(), request);
}
/**
* <pre>
* Gets the accumulated stats for RPCs sent by a test client.
* </pre>
*/
public io.grpc.testing.integration.Messages.LoadBalancerAccumulatedStatsResponse getClientAccumulatedStats(io.grpc.testing.integration.Messages.LoadBalancerAccumulatedStatsRequest request) throws io.grpc.StatusException {
return io.grpc.stub.ClientCalls.blockingV2UnaryCall(
getChannel(), getGetClientAccumulatedStatsMethod(), getCallOptions(), request);
}
}
/**
* A stub to allow clients to do limited synchronous rpc calls to service LoadBalancerStatsService.
* <pre>
* A service used to obtain stats for verifying LB behavior.
* </pre>
*/
public static final | LoadBalancerStatsServiceBlockingV2Stub |
java | spring-projects__spring-framework | spring-messaging/src/test/java/org/springframework/messaging/simp/config/MessageBrokerConfigurationTests.java | {
"start": 28372,
"end": 28721
} | class ____ extends BaseDotSeparatorConfig {
// Artemis-style broker convention for STOMP destinations
@Override
protected void configureMessageBroker(MessageBrokerRegistry registry) {
super.configureMessageBroker(registry);
registry.enableSimpleBroker("topic.", "queue.");
}
}
private static | DotSeparatorWithDotBrokerConventionConfig |
java | apache__camel | core/camel-base/src/main/java/org/apache/camel/impl/converter/EnumTypeConverter.java | {
"start": 2821,
"end": 3400
} | enum ____
// (and trim in case there are leading/trailing white-space)
String text = value.toString().trim();
Class<Enum<?>> enumClass = (Class<Enum<?>>) type;
// we want to match case insensitive for enums
for (Enum<?> enumValue : enumClass.getEnumConstants()) {
if (enumValue.name().equalsIgnoreCase(text)) {
return type.cast(enumValue);
}
}
// add support for using dash or camel cased to common used upper cased underscore style for | constant |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/fanout/FanoutPublishArgs.java | {
"start": 890,
"end": 1480
} | interface ____<V> extends SyncArgs<FanoutPublishArgs<V>> {
/**
* Sets the codec to be used for encoding and decoding message headers.
*
* @param codec the codec
* @return arguments object
*/
FanoutPublishArgs<V> headersCodec(Codec codec);
/**
* Defines messages to be added.
*
* @param msgs The message arguments to be added to the queue
* @return arguments object
*/
@SafeVarargs
static <V> FanoutPublishArgs<V> messages(MessageArgs<V>... msgs) {
return new FanoutPublishParams<>(msgs);
}
}
| FanoutPublishArgs |
java | spring-projects__spring-security | web/src/test/java/org/springframework/security/web/util/matcher/RegexRequestMatcherTests.java | {
"start": 1460,
"end": 6319
} | class ____ {
@Mock
private HttpServletRequest request;
@Test
public void doesntMatchIfHttpMethodIsDifferent() {
RegexRequestMatcher matcher = new RegexRequestMatcher(".*", "GET");
MockHttpServletRequest request = new MockHttpServletRequest("POST", "/anything");
assertThat(matcher.matches(request)).isFalse();
}
@Test
public void matchesIfHttpMethodAndPathMatch() {
RegexRequestMatcher matcher = new RegexRequestMatcher(".*", "GET");
MockHttpServletRequest request = get("/anything").build();
assertThat(matcher.matches(request)).isTrue();
}
@Test
public void queryStringIsMatcherCorrectly() {
RegexRequestMatcher matcher = new RegexRequestMatcher(".*\\?x=y", "GET");
MockHttpServletRequest request = new MockHttpServletRequest("GET", "/any/path?x=y");
request.setServletPath("/any");
request.setPathInfo("/path");
request.setQueryString("x=y");
assertThat(matcher.matches(request)).isTrue();
}
@Test
public void requestHasNullMethodMatches() {
RegexRequestMatcher matcher = new RegexRequestMatcher("/something/.*", "GET");
HttpServletRequest request = createRequestWithNullMethod("/something/here");
assertThat(matcher.matches(request)).isTrue();
}
// SEC-2084
@Test
public void requestHasNullMethodNoMatch() {
RegexRequestMatcher matcher = new RegexRequestMatcher("/something/.*", "GET");
HttpServletRequest request = createRequestWithNullMethod("/nomatch");
assertThat(matcher.matches(request)).isFalse();
}
@Test
public void requestHasNullMethodAndNullMatcherMatches() {
RegexRequestMatcher matcher = new RegexRequestMatcher("/something/.*", null);
HttpServletRequest request = createRequestWithNullMethod("/something/here");
assertThat(matcher.matches(request)).isTrue();
}
@Test
public void requestHasNullMethodAndNullMatcherNoMatch() {
RegexRequestMatcher matcher = new RegexRequestMatcher("/something/.*", null);
HttpServletRequest request = createRequestWithNullMethod("/nomatch");
assertThat(matcher.matches(request)).isFalse();
}
// SEC-2831
@Test
public void matchesWithInvalidMethod() {
RegexRequestMatcher matcher = new RegexRequestMatcher("/blah", "GET");
MockHttpServletRequest request = new MockHttpServletRequest("INVALID", "/blah");
request.setMethod("INVALID");
assertThat(matcher.matches(request)).isFalse();
}
@Test
public void matchesWithCarriageReturn() {
RegexRequestMatcher matcher = new RegexRequestMatcher(".*", null);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "/blah%0a");
request.setServletPath("/blah\n");
assertThat(matcher.matches(request)).isTrue();
}
@Test
public void matchesWithLineFeed() {
RegexRequestMatcher matcher = new RegexRequestMatcher(".*", null);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "/blah%0d");
request.setServletPath("/blah\r");
assertThat(matcher.matches(request)).isTrue();
}
@Test
public void toStringThenFormatted() {
RegexRequestMatcher matcher = new RegexRequestMatcher("/blah", "GET");
assertThat(matcher.toString()).isEqualTo("Regex [pattern='/blah', GET]");
}
@Test
public void matchesWhenRequestUriMatchesThenMatchesTrue() {
RegexRequestMatcher matcher = regexMatcher(".*");
MockHttpServletRequest request = new MockHttpServletRequest("GET", "/something/anything");
assertThat(matcher.matches(request)).isTrue();
}
@Test
public void matchesWhenRequestUriDontMatchThenMatchesFalse() {
RegexRequestMatcher matcher = regexMatcher(".*\\?param=value");
MockHttpServletRequest request = new MockHttpServletRequest("GET", "/something/anything");
assertThat(matcher.matches(request)).isFalse();
}
@Test
public void matchesWhenRequestMethodMatchesThenMatchesTrue() {
RegexRequestMatcher matcher = regexMatcher(HttpMethod.GET);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "/something/anything");
assertThat(matcher.matches(request)).isTrue();
}
@Test
public void matchesWhenRequestMethodDontMatchThenMatchesFalse() {
RegexRequestMatcher matcher = regexMatcher(HttpMethod.POST);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "/something/anything");
assertThat(matcher.matches(request)).isFalse();
}
@Test
public void staticRegexMatcherWhenNoPatternThenException() {
assertThatIllegalArgumentException().isThrownBy(() -> regexMatcher((String) null))
.withMessage("pattern cannot be empty");
}
@Test
public void staticRegexMatcherNoMethodThenException() {
assertThatIllegalArgumentException().isThrownBy(() -> regexMatcher((HttpMethod) null))
.withMessage("method cannot be null");
}
private HttpServletRequest createRequestWithNullMethod(String path) {
given(this.request.getQueryString()).willReturn("doesntMatter");
given(this.request.getServletPath()).willReturn(path);
return this.request;
}
}
| RegexRequestMatcherTests |
java | alibaba__fastjson | src/main/java/com/alibaba/fastjson/JSONPath.java | {
"start": 96857,
"end": 98372
} | class ____ implements Segment {
private final int[] indexes;
public MultiIndexSegment(int[] indexes){
this.indexes = indexes;
}
public Object eval(JSONPath path, Object rootObject, Object currentObject) {
List<Object> items = new JSONArray(indexes.length);
for (int i = 0; i < indexes.length; ++i) {
Object item = path.getArrayItem(currentObject, indexes[i]);
items.add(item);
}
return items;
}
public void extract(JSONPath path, DefaultJSONParser parser, Context context) {
if (context.eval) {
Object object = parser.parse();
if (object instanceof List) {
int[] indexes = new int[this.indexes.length];
System.arraycopy(this.indexes, 0, indexes, 0, indexes.length);
boolean noneNegative = indexes[0] >= 0;
List list = (List) object;
if (noneNegative) {
for (int i = list.size() - 1; i >= 0; i--) {
if (Arrays.binarySearch(indexes, i) < 0) {
list.remove(i);
}
}
context.object = list;
return;
}
}
}
throw new UnsupportedOperationException();
}
}
static | MultiIndexSegment |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ForOverrideCheckerTest.java | {
"start": 2315,
"end": 2664
} | class ____ {
@ForOverride
void myMethod() {}
}
""")
.doTest();
}
@Test
public void cannotApplyForOverrideToPublicMethod() {
compilationHelper
.addSourceLines(
"test/Test.java",
"""
package test;
import com.google.errorprone.annotations.ForOverride;
public | Test |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/OpenshiftBuildsComponentBuilderFactory.java | {
"start": 4615,
"end": 5615
} | class ____
extends AbstractComponentBuilder<OpenshiftBuildsComponent>
implements OpenshiftBuildsComponentBuilder {
@Override
protected OpenshiftBuildsComponent buildConcreteComponent() {
return new OpenshiftBuildsComponent();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "kubernetesClient": ((OpenshiftBuildsComponent) component).setKubernetesClient((io.fabric8.kubernetes.client.KubernetesClient) value); return true;
case "lazyStartProducer": ((OpenshiftBuildsComponent) component).setLazyStartProducer((boolean) value); return true;
case "autowiredEnabled": ((OpenshiftBuildsComponent) component).setAutowiredEnabled((boolean) value); return true;
default: return false;
}
}
}
} | OpenshiftBuildsComponentBuilderImpl |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/ComposableRecordReader.java | {
"start": 1275,
"end": 1518
} | interface ____<K extends WritableComparable,
V extends Writable>
extends RecordReader<K,V>, Comparable<ComposableRecordReader<K,?>> {
/**
* Return the position in the collector this | ComposableRecordReader |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/lazyload/JtaLazyLoadingTest.java | {
"start": 3437,
"end": 3803
} | class ____ {
@Id
@GeneratedValue
private Long id;
private String name;
@ManyToOne(fetch = FetchType.LAZY)
private Parent parent;
public Child(){}
public Child(Parent parent) {
this.parent = parent;
parent.getChildren().add( this );
}
public Long getId() {
return id;
}
public Parent getParent() {
return parent;
}
}
}
| Child |
java | apache__camel | components/camel-olingo2/camel-olingo2-component/src/main/java/org/apache/camel/component/olingo2/Olingo2AppWrapper.java | {
"start": 1282,
"end": 1430
} | class ____ {@link org.apache.camel.component.olingo2.api.Olingo2App} and its lazily read
* {@link org.apache.olingo.odata2.api.edm.Edm}.
*/
public | for |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/id/enhanced/SequenceStyleGenerator.java | {
"start": 2406,
"end": 4337
} | interface ____.
* <p>
* <table>
* <caption>General configuration parameters</caption>
* <tr>
* <td><b>Parameter name</b></td>
* <td><b>Default value</b></td>
* <td><b>Interpretation</b></td>
* </tr>
* <tr>
* <td>{@value #SEQUENCE_PARAM}</td>
* <td></td>
* <td>The name of the sequence/table to use to store/retrieve values</td>
* </tr>
* <tr>
* <td>{@value #INITIAL_PARAM}</td>
* <td>{@value #DEFAULT_INITIAL_VALUE}</td>
* <td>The initial value to be stored for the given segment;
* the effect in terms of storage varies based on {@link Optimizer}
* and {@link DatabaseStructure}</td>
* </tr>
* <tr>
* <td>{@value #INCREMENT_PARAM}</td>
* <td>{@value #DEFAULT_INCREMENT_SIZE}</td>
* <td>The increment size for the underlying segment;
* the effect in terms of storage varies based on {@link Optimizer}
* and {@link DatabaseStructure}</td>
* </tr>
* <tr>
* <td>{@value #OPT_PARAM}</td>
* <td><em>depends on defined increment size</em></td>
* <td>Allows explicit definition of which optimization strategy to use</td>
* </tr>
* <tr>
* <td>{@value #FORCE_TBL_PARAM}</td>
* <td>{@code false}</td>
* <td>Allows explicit definition of which optimization strategy to use</td>
* </tr>
* </table>
* <p>
* Configuration parameters used specifically when the underlying structure is a table:
* <table>
* <caption>Table configuration parameters</caption>
* <tr>
* <td><b>Parameter name</b></td>
* <td><b>Default value</b></td>
* <td><b>Interpretation</b></td>
* </tr>
* <tr>
* <td>{@value #VALUE_COLUMN_PARAM}</td>
* <td>{@value #DEF_VALUE_COLUMN}</td>
* <td>The name of the column which holds the sequence value for the given segment</td>
* </tr>
* </table>
*
* @author Steve Ebersole
* @author Lukasz Antoniak
*/
public | internally |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/TypeExcludeFilterTests.java | {
"start": 1625,
"end": 2646
} | class ____ {
private @Nullable AnnotationConfigApplicationContext context;
@AfterEach
void cleanUp() {
if (this.context != null) {
this.context.close();
}
}
@Test
void loadsTypeExcludeFilters() {
this.context = new AnnotationConfigApplicationContext();
this.context.getBeanFactory().registerSingleton("filter1", new WithoutMatchOverrideFilter());
this.context.getBeanFactory().registerSingleton("filter2", new SampleTypeExcludeFilter());
this.context.register(Config.class);
this.context.refresh();
assertThat(this.context.getBean(ExampleComponent.class)).isNotNull();
assertThatExceptionOfType(NoSuchBeanDefinitionException.class).isThrownBy(() -> {
assertThat(this.context).isNotNull();
this.context.getBean(ExampleFilteredComponent.class);
});
}
@Configuration(proxyBeanMethods = false)
@ComponentScan(basePackageClasses = SampleTypeExcludeFilter.class,
excludeFilters = @Filter(type = FilterType.CUSTOM, classes = SampleTypeExcludeFilter.class))
static | TypeExcludeFilterTests |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelDefinitionDoc.java | {
"start": 8692,
"end": 10567
} | class ____ {
private String modelId;
private BytesReference binaryData;
private int docNum;
private Long totalDefinitionLength;
private long definitionLength;
private int compressionVersion;
private boolean eos;
public Builder setModelId(String modelId) {
this.modelId = modelId;
return this;
}
public Builder setCompressedString(String compressedString) {
this.binaryData = new BytesArray(Base64.getDecoder().decode(compressedString.getBytes(StandardCharsets.UTF_8)));
return this;
}
public Builder setBinaryData(BytesReference binaryData) {
this.binaryData = binaryData;
return this;
}
public Builder setDocNum(int docNum) {
this.docNum = docNum;
return this;
}
public Builder setTotalDefinitionLength(long totalDefinitionLength) {
this.totalDefinitionLength = totalDefinitionLength;
return this;
}
public Builder setDefinitionLength(long definitionLength) {
this.definitionLength = definitionLength;
return this;
}
public Builder setCompressionVersion(int compressionVersion) {
this.compressionVersion = compressionVersion;
return this;
}
public Builder setEos(boolean eos) {
this.eos = eos;
return this;
}
public TrainedModelDefinitionDoc build() {
return new TrainedModelDefinitionDoc(
this.binaryData,
this.modelId,
this.docNum,
this.totalDefinitionLength,
this.definitionLength,
this.compressionVersion,
this.eos
);
}
}
}
| Builder |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/converters/LongConverter.java | {
"start": 1511,
"end": 1701
} | class ____ extends NumberConverter<Long> {
public LongConverter() {
super("long", Schema.OPTIONAL_INT64_SCHEMA, new LongSerializer(), new LongDeserializer());
}
}
| LongConverter |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineEntityGroupPlugin.java | {
"start": 1238,
"end": 2800
} | class ____ {
/**
* Get the {@link TimelineEntityGroupId}s for the data sets that need to be
* scanned to serve the query.
*
* @param entityType Entity Type being queried
* @param primaryFilter Primary filter being applied
* @param secondaryFilters Secondary filters being applied in the query
* @return {@link org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId}
*/
public abstract Set<TimelineEntityGroupId> getTimelineEntityGroupId(
String entityType, NameValuePair primaryFilter,
Collection<NameValuePair> secondaryFilters);
/**
* Get the {@link TimelineEntityGroupId}s for the data sets that need to be
* scanned to serve the query.
*
* @param entityType Entity Type being queried
* @param entityId Entity Id being requested
* @return {@link org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId}
*/
public abstract Set<TimelineEntityGroupId> getTimelineEntityGroupId(
String entityId,
String entityType);
/**
* Get the {@link TimelineEntityGroupId}s for the data sets that need to be
* scanned to serve the query.
*
* @param entityType Entity Type being queried
* @param entityIds Entity Ids being requested
* @param eventTypes Event Types being requested
* @return {@link org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId}
*/
public abstract Set<TimelineEntityGroupId> getTimelineEntityGroupId(
String entityType, SortedSet<String> entityIds,
Set<String> eventTypes);
}
| TimelineEntityGroupPlugin |
java | apache__avro | lang/java/avro/src/main/java/org/apache/avro/ValidateAll.java | {
"start": 1258,
"end": 1766
} | class ____ implements SchemaValidator {
private final SchemaValidationStrategy strategy;
/**
* @param strategy The strategy to use for validation of pairwise schemas.
*/
public ValidateAll(SchemaValidationStrategy strategy) {
this.strategy = strategy;
}
@Override
public void validate(Schema toValidate, Iterable<Schema> schemasInOrder) throws SchemaValidationException {
for (Schema existing : schemasInOrder) {
strategy.validate(toValidate, existing);
}
}
}
| ValidateAll |
java | netty__netty | handler/src/test/java/io/netty/handler/ssl/BouncyCastleEngineAlpnTest.java | {
"start": 1077,
"end": 2277
} | class ____ {
@Test
public void testBouncyCastleSSLEngineSupportsAlpn() throws Exception {
Provider bouncyCastleProvider = new BouncyCastleJsseProvider();
SSLContext context = SslUtils.getSSLContext(bouncyCastleProvider, new SecureRandom());
SSLEngine engine = context.createSSLEngine();
assertTrue(BouncyCastleUtil.isBcJsseInUse(engine));
assertTrue(BouncyCastleAlpnSslUtils.isAlpnSupported());
BouncyCastleAlpnSslEngine alpnSslEngine = new BouncyCastleAlpnSslEngine(
engine, new JdkAlpnApplicationProtocolNegotiator("fake"), true);
// Call methods to ensure these not throw.
alpnSslEngine.setHandshakeApplicationProtocolSelector(new BiFunction<SSLEngine, List<String>, String>() {
@Override
public String apply(SSLEngine sslEngine, List<String> strings) {
return "fake";
}
});
// Check that none of the methods will throw.
alpnSslEngine.getHandshakeApplicationProtocolSelector();
alpnSslEngine.setNegotiatedApplicationProtocol("fake");
alpnSslEngine.getNegotiatedApplicationProtocol();
}
}
| BouncyCastleEngineAlpnTest |
java | apache__spark | sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/ParquetVectorUpdaterFactory.java | {
"start": 14569,
"end": 15559
} | class ____ implements ParquetVectorUpdater {
@Override
public void readValues(
int total,
int offset,
WritableColumnVector values,
VectorizedValuesReader valuesReader) {
for (int i = 0; i < total; ++i) {
values.putLong(offset + i, valuesReader.readInteger());
}
}
@Override
public void skipValues(int total, VectorizedValuesReader valuesReader) {
valuesReader.skipIntegers(total);
}
@Override
public void readValue(
int offset,
WritableColumnVector values,
VectorizedValuesReader valuesReader) {
values.putLong(offset, valuesReader.readInteger());
}
@Override
public void decodeSingleDictionaryId(
int offset,
WritableColumnVector values,
WritableColumnVector dictionaryIds,
Dictionary dictionary) {
values.putLong(offset, dictionary.decodeToInt(dictionaryIds.getDictId(offset)));
}
}
static | IntegerToLongUpdater |
java | spring-projects__spring-security | ldap/src/integration-test/java/org/springframework/security/ldap/search/FilterBasedLdapUserSearchWithSpacesTests.java | {
"start": 2462,
"end": 3093
} | class ____ implements DisposableBean {
private UnboundIdContainer container;
@Bean
UnboundIdContainer ldapContainer() {
this.container = new UnboundIdContainer("dc=spring framework,dc=org",
"classpath:test-server-with-spaces.ldif");
this.container.setPort(0);
return this.container;
}
@Bean
ContextSource contextSource(UnboundIdContainer ldapContainer) {
return new DefaultSpringSecurityContextSource(
"ldap://127.0.0.1:" + ldapContainer.getPort() + "/dc=spring%20framework,dc=org");
}
@Override
public void destroy() {
this.container.stop();
}
}
}
| UnboundIdContainerWithSpacesConfig |
java | google__guice | extensions/servlet/test/com/google/inject/servlet/VarargsFilterDispatchIntegrationTest.java | {
"start": 799,
"end": 5102
} | class ____ extends TestCase {
private static int inits, doFilters, destroys;
@Override
public final void setUp() {
inits = 0;
doFilters = 0;
destroys = 0;
GuiceFilter.reset();
}
public final void testDispatchRequestToManagedPipeline() throws ServletException, IOException {
final Injector injector =
Guice.createInjector(
new ServletModule() {
@Override
protected void configureServlets() {
// This is actually a double match for "/*"
filter("/*", "*.html", "/*").through(Key.get(TestFilter.class));
// These filters should never fire
filter("/index/*").through(Key.get(TestFilter.class));
filter("*.jsp").through(Key.get(TestFilter.class));
}
});
final FilterPipeline pipeline = injector.getInstance(FilterPipeline.class);
pipeline.initPipeline(null);
// create ourselves a mock request with test URI
HttpServletRequest requestMock = mock(HttpServletRequest.class);
when(requestMock.getRequestURI()).thenReturn("/index.html");
when(requestMock.getContextPath()).thenReturn("");
// dispatch request
pipeline.dispatch(requestMock, null, mock(FilterChain.class));
pipeline.destroyPipeline();
assertTrue(
"lifecycle states did not"
+ " fire correct number of times-- inits: "
+ inits
+ "; dos: "
+ doFilters
+ "; destroys: "
+ destroys,
inits == 1 && doFilters == 3 && destroys == 1);
}
public final void testDispatchThatNoFiltersFire() throws ServletException, IOException {
final Injector injector =
Guice.createInjector(
new ServletModule() {
@Override
protected void configureServlets() {
filter("/public/*", "*.html", "*.xml").through(Key.get(TestFilter.class));
// These filters should never fire
filter("/index/*").through(Key.get(TestFilter.class));
filter("*.jsp").through(Key.get(TestFilter.class));
}
});
final FilterPipeline pipeline = injector.getInstance(FilterPipeline.class);
pipeline.initPipeline(null);
// create ourselves a mock request with test URI
HttpServletRequest requestMock = mock(HttpServletRequest.class);
when(requestMock.getRequestURI()).thenReturn("/index.xhtml");
when(requestMock.getContextPath()).thenReturn("");
// dispatch request
pipeline.dispatch(requestMock, null, mock(FilterChain.class));
pipeline.destroyPipeline();
assertTrue(
"lifecycle states did not "
+ "fire correct number of times-- inits: "
+ inits
+ "; dos: "
+ doFilters
+ "; destroys: "
+ destroys,
inits == 1 && doFilters == 0 && destroys == 1);
}
public final void testDispatchFilterPipelineWithRegexMatching()
throws ServletException, IOException {
final Injector injector =
Guice.createInjector(
new ServletModule() {
@Override
protected void configureServlets() {
filterRegex("/[A-Za-z]*", "/index").through(TestFilter.class);
//these filters should never fire
filterRegex("\\w").through(Key.get(TestFilter.class));
}
});
final FilterPipeline pipeline = injector.getInstance(FilterPipeline.class);
pipeline.initPipeline(null);
// create ourselves a mock request with test URI
HttpServletRequest requestMock = mock(HttpServletRequest.class);
when(requestMock.getRequestURI()).thenReturn("/index");
when(requestMock.getContextPath()).thenReturn("");
// dispatch request
pipeline.dispatch(requestMock, null, mock(FilterChain.class));
pipeline.destroyPipeline();
assertTrue(
"lifecycle states did not fire "
+ "correct number of times-- inits: "
+ inits
+ "; dos: "
+ doFilters
+ "; destroys: "
+ destroys,
inits == 1 && doFilters == 2 && destroys == 1);
}
@Singleton
public static | VarargsFilterDispatchIntegrationTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java | {
"start": 912,
"end": 4576
} | class ____ extends ReplicationRequestBuilder<DeleteRequest, DeleteResponse, DeleteRequestBuilder>
implements
WriteRequestBuilder<DeleteRequestBuilder> {
private String id;
private String routing;
private Long version;
private VersionType versionType;
private Long seqNo;
private Long term;
private WriteRequest.RefreshPolicy refreshPolicy;
@SuppressWarnings("this-escape")
public DeleteRequestBuilder(ElasticsearchClient client, @Nullable String index) {
super(client, TransportDeleteAction.TYPE);
setIndex(index);
}
/**
* Sets the id of the document to delete.
*/
public DeleteRequestBuilder setId(String id) {
this.id = id;
return this;
}
/**
* Controls the shard routing of the delete request. Using this value to hash the shard
* and not the id.
*/
public DeleteRequestBuilder setRouting(String routing) {
this.routing = routing;
return this;
}
/**
* Sets the version, which will cause the delete operation to only be performed if a matching
* version exists and no changes happened on the doc since then.
*/
public DeleteRequestBuilder setVersion(long version) {
this.version = version;
return this;
}
/**
* Sets the type of versioning to use. Defaults to {@link VersionType#INTERNAL}.
*/
public DeleteRequestBuilder setVersionType(VersionType versionType) {
this.versionType = versionType;
return this;
}
/**
* only perform this delete request if the document was last modification was assigned the given
* sequence number. Must be used in combination with {@link #setIfPrimaryTerm(long)}
*
* If the document last modification was assigned a different sequence number a
* {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown.
*/
public DeleteRequestBuilder setIfSeqNo(long seqNo) {
this.seqNo = seqNo;
return this;
}
/**
* only perform this delete request if the document was last modification was assigned the given
* primary term. Must be used in combination with {@link #setIfSeqNo(long)}
*
* If the document last modification was assigned a different term a
* {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown.
*/
public DeleteRequestBuilder setIfPrimaryTerm(long term) {
this.term = term;
return this;
}
@Override
public DeleteRequestBuilder setRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) {
this.refreshPolicy = refreshPolicy;
return this;
}
@Override
public DeleteRequestBuilder setRefreshPolicy(String refreshPolicy) {
this.refreshPolicy = WriteRequest.RefreshPolicy.parse(refreshPolicy);
return this;
}
@Override
public DeleteRequest request() {
DeleteRequest request = new DeleteRequest();
super.apply(request);
if (id != null) {
request.id(id);
}
if (routing != null) {
request.routing(routing);
}
if (version != null) {
request.version(version);
}
if (versionType != null) {
request.versionType(versionType);
}
if (seqNo != null) {
request.setIfSeqNo(seqNo);
}
if (term != null) {
request.setIfPrimaryTerm(term);
}
if (refreshPolicy != null) {
request.setRefreshPolicy(refreshPolicy);
}
return request;
}
}
| DeleteRequestBuilder |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/filter/StructuredDataFilter.java | {
"start": 2051,
"end": 7724
} | class ____ extends MapFilter {
private static final int MAX_BUFFER_SIZE = 2048;
private static ThreadLocal<StringBuilder> threadLocalStringBuilder = new ThreadLocal<>();
private StructuredDataFilter(
final Map<String, List<String>> map, final boolean oper, final Result onMatch, final Result onMismatch) {
super(map, oper, onMatch, onMismatch);
}
@Override
public Result filter(
final Logger logger, final Level level, final Marker marker, final Message msg, final Throwable t) {
if (msg instanceof StructuredDataMessage) {
return filter((StructuredDataMessage) msg);
}
return Result.NEUTRAL;
}
@Override
public Result filter(final LogEvent event) {
final Message msg = event.getMessage();
if (msg instanceof StructuredDataMessage) {
return filter((StructuredDataMessage) msg);
}
return super.filter(event);
}
protected Result filter(final StructuredDataMessage message) {
boolean match = false;
final IndexedReadOnlyStringMap map = getStringMap();
for (int i = 0; i < map.size(); i++) {
final StringBuilder toMatch = getValue(message, map.getKeyAt(i));
if (toMatch != null) {
match = listContainsValue((List<String>) map.getValueAt(i), toMatch);
} else {
match = false;
}
if ((!isAnd() && match) || (isAnd() && !match)) {
break;
}
}
return match ? onMatch : onMismatch;
}
private StringBuilder getValue(final StructuredDataMessage data, final String key) {
final StringBuilder sb = getStringBuilder();
if (key.equalsIgnoreCase("id")) {
data.getId().formatTo(sb);
return sb;
} else if (key.equalsIgnoreCase("id.name")) {
return appendOrNull(data.getId().getName(), sb);
} else if (key.equalsIgnoreCase("type")) {
return appendOrNull(data.getType(), sb);
} else if (key.equalsIgnoreCase("message")) {
data.formatTo(sb);
return sb;
} else {
return appendOrNull(data.get(key), sb);
}
}
private StringBuilder getStringBuilder() {
StringBuilder result = threadLocalStringBuilder.get();
if (result == null) {
result = new StringBuilder();
threadLocalStringBuilder.set(result);
}
StringBuilders.trimToMaxSize(result, MAX_BUFFER_SIZE);
result.setLength(0);
return result;
}
private StringBuilder appendOrNull(final String value, final StringBuilder sb) {
if (value == null) {
return null;
}
sb.append(value);
return sb;
}
private boolean listContainsValue(final List<String> candidates, final StringBuilder toMatch) {
if (toMatch == null) {
for (int i = 0; i < candidates.size(); i++) {
final String candidate = candidates.get(i);
if (candidate == null) {
return true;
}
}
} else {
for (int i = 0; i < candidates.size(); i++) {
final String candidate = candidates.get(i);
if (candidate == null) {
return false;
}
if (StringBuilders.equals(candidate, 0, candidate.length(), toMatch, 0, toMatch.length())) {
return true;
}
}
}
return false;
}
/**
* Creates the StructuredDataFilter.
* @param pairs Key and value pairs.
* @param oper The operator to perform. If not "or" the operation will be an "and".
* @param match The action to perform on a match.
* @param mismatch The action to perform on a mismatch.
* @return The StructuredDataFilter.
*/
// TODO Consider refactoring to use AbstractFilter.AbstractFilterBuilder
@PluginFactory
public static StructuredDataFilter createFilter(
@PluginElement("Pairs") final KeyValuePair[] pairs,
@PluginAttribute("operator") final String oper,
@PluginAttribute("onMatch") final Result match,
@PluginAttribute("onMismatch") final Result mismatch) {
if (pairs == null || pairs.length == 0) {
LOGGER.error("keys and values must be specified for the StructuredDataFilter");
return null;
}
final Map<String, List<String>> map = new HashMap<>();
for (final KeyValuePair pair : pairs) {
final String key = pair.getKey();
if (key == null) {
LOGGER.error("A null key is not valid in MapFilter");
continue;
}
final String value = pair.getValue();
if (value == null) {
LOGGER.error("A null value for key " + key + " is not allowed in MapFilter");
continue;
}
List<String> list = map.get(pair.getKey());
if (list != null) {
list.add(value);
} else {
list = new ArrayList<>();
list.add(value);
map.put(pair.getKey(), list);
}
}
if (map.isEmpty()) {
LOGGER.error("StructuredDataFilter is not configured with any valid key value pairs");
return null;
}
final boolean isAnd = oper == null || !oper.equalsIgnoreCase("or");
return new StructuredDataFilter(map, isAnd, match, mismatch);
}
}
| StructuredDataFilter |
java | quarkusio__quarkus | independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/ObserverRegistrar.java | {
"start": 295,
"end": 956
} | interface ____ extends BuildContext {
/**
* Configure a new synthetic observer. The observer is not added to the deployment unless the
* {@link ObserverConfigurator#done()} method is called.
*
* @return a new synthetic observer configurator
*/
ObserverConfigurator configure();
/**
* The returned stream contains all non-synthetic beans (beans derived from classes) and beans
* registered by other {@link ObserverRegistrar}s before the stream is created.
*
* @return a new stream of beans
*/
BeanStream beans();
}
}
| RegistrationContext |
java | apache__spark | common/kvstore/src/test/java/org/apache/spark/util/kvstore/LevelDBIteratorSuite.java | {
"start": 1069,
"end": 1622
} | class ____ extends DBIteratorSuite {
private static File dbpath;
private static LevelDB db;
@AfterAll
public static void cleanup() throws Exception {
if (db != null) {
db.close();
}
if (dbpath != null) {
JavaUtils.deleteQuietly(dbpath);
}
}
@Override
protected KVStore createStore() throws Exception {
assumeFalse(SparkSystemUtils$.MODULE$.isMacOnAppleSilicon());
dbpath = File.createTempFile("test.", ".ldb");
dbpath.delete();
db = new LevelDB(dbpath);
return db;
}
}
| LevelDBIteratorSuite |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/FallThroughTest.java | {
"start": 8330,
"end": 8934
} | class ____ {
void f(int x) {
switch (x) {
case 0:
{
// fall through
}
case 1:
{
System.err.println();
// fall through
}
case 2:
break;
}
}
}
""")
.doTest();
}
@Test
public void emptyBlock() {
testHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/internal/util/concurrent/DetachedThreadLocal.java | {
"start": 3975,
"end": 4045
} | enum ____ {
THREAD,
INLINE,
MANUAL
}
}
| Cleaner |
java | elastic__elasticsearch | x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/CloudProviders.java | {
"start": 370,
"end": 12336
} | class ____ {
private CloudProviders() {
// no instances intended
}
public record Provider(double pue, Map<String, Double> co2TonsPerKWH) {}
private static Provider getProvider(String providerName) {
return PROVIDERS.get(providerName);
}
/**
* Returns the PUE for the given provider, or the default value if the provider is unknown.
* PUE stands for Power Usage Effectiveness, and is a measure of how much power is used by
* the datacenter infrastructure (cooling, lighting, etc.) vs. the IT equipment (servers, etc.).
* A PUE of 1.0 means that all power is used by the IT equipment, and a PUE of 2.0 means that
* half of the power is used by the IT equipment and half is used by the infrastructure.
* See also https://en.wikipedia.org/wiki/Power_usage_effectiveness .
*
* @param providerName The name of the provider.
* Currently supported providers are "aws", "gcp", and "azure".
* If the provider is unknown, the default value is returned.
* @param defaultPUEValue The default value to return if the provider is unknown.
* @return The PUE for the given provider, or the default value if the provider is unknown.
*/
public static double getPUEOrDefault(String providerName, double defaultPUEValue) {
Provider provider = getProvider(providerName);
if (provider == null) {
return defaultPUEValue;
}
return provider.pue;
}
/**
* Returns the CO2 emission factor for the given provider and region, or the default value if
* the provider or region is unknown. The CO2 emission factor is the amount of CO2 emitted per
* kWh of electricity consumed and measured in metric tons.
*
* @param providerName The name of the provider.
* Currently supported providers are "aws", "gcp", and "azure".
* If the provider is unknown, the default value is returned.
* @param region The name of the region.
* If the region is unknown, the default value is returned.
* @param defaultCO2Value The default value to return if the provider or region is unknown.
* @return The CO2 emission factor for the given provider and region, or the default value if
* the provider or region is unknown.
*/
public static double getCO2TonsPerKWHOrDefault(String providerName, String region, double defaultCO2Value) {
Provider provider = getProvider(providerName);
if (provider == null) {
return defaultCO2Value;
}
return provider.co2TonsPerKWH.getOrDefault(region, defaultCO2Value);
}
// The following data taken from https://www.cloudcarbonfootprint.org/docs/methodology/
// and updated from https://github.com/PaoloFrigo/cloud-carbon-footprint .
// License: Apache 2.0
// Copyright: Cloud Carbon Footprint, (C) 2021 Thoughtworks, Inc.
private static final Map<String, Provider> PROVIDERS;
static {
// noinspection (explicit type arguments speedup compilation and analysis time)
PROVIDERS = Map.of(
"aws",
new Provider(
1.135d,
Map.ofEntries(
entry("us-east-1", 0.000379069d),
entry("us-east-2", 0.000410608d),
entry("us-west-1", 0.000322167d),
entry("us-west-2", 0.000322167d),
entry("us-gov-east-1", 0.000379069d),
entry("us-gov-west-1", 0.000322167d),
entry("af-south-1", 0.0009006d),
entry("ap-east-1", 0.00071d),
entry("ap-south-1", 0.0007082d),
entry("ap-northeast-3", 0.0004658d),
entry("ap-northeast-2", 0.0004156d),
entry("ap-southeast-1", 0.000408d),
entry("ap-southeast-2", 0.00076d),
entry("ap-southeast-3", 0.0007177d),
entry("ap-northeast-1", 0.0004658d),
entry("ca-central-1", 0.00012d),
entry("cn-north-1", 0.0005374d),
entry("cn-northwest-1", 0.0005374d),
entry("eu-central-1", 0.000311d),
entry("eu-west-1", 0.0002786d),
entry("eu-west-2", 0.000225d),
entry("eu-south-1", 0.0002134d),
entry("eu-west-3", 0.0000511d),
entry("eu-north-1", 0.0000088d),
entry("me-south-1", 0.0005059d),
entry("me-central-1", 0.0004041),
entry("sa-east-1", 0.0000617d)
)
),
// noinspection (explicit type arguments speedup compilation and analysis time)
"gcp",
new Provider(
1.1d,
// These emission factors take into account Google Carbon Free Energy percentage in each region.
// Source: https://cloud.google.com/sustainability/region-carbon
Map.ofEntries(
entry("us-central1", 0.0002152373529d),
entry("us-central2", 0.0002152373529d),
entry("us-east1", 0.0003255d),
entry("us-east4", 0.00011124d),
entry("us-east5", 0.00011124d),
entry("us-west1", 0.0000072d),
entry("us-west2", 0.0000893d),
entry("us-west3", 0.00030912d),
entry("us-west4", 0.00028835d),
entry("us-south1", 0.0001776d),
entry("asia-east1", 0.00037848d),
entry("asia-east2", 0.0002592d),
entry("asia-northeast1", 0.00038976d),
entry("asia-northeast2", 0.00026496d),
entry("asia-northeast3", 0.00029325d),
entry("asia-south1", 0.000603d),
entry("asia-south2", 0.00061732d),
entry("asia-southeast1", 0.00035712d),
entry("asia-southeast2", 0.0005046d),
entry("australia-southeast1", 0.00047242d),
entry("australia-southeast2", 0.00035949d),
entry("europe-central2", 0.0004608d),
entry("europe-north1", 0.00001143d),
entry("europe-southwest1", 0.000121d),
entry("europe-west1", 0.0000198d),
entry("europe-west2", 0.00007396d),
entry("europe-west3", 0.0001076),
entry("europe-west4", 0.00013301d),
entry("europe-west6", 0.0000129d),
entry("europe-west8", 0.000298d),
entry("europe-west9", 0.000059d),
entry("northamerica-northeast1", 0d), // Montreal is 100% CFE
entry("northamerica-northeast2", 0.00000232d),
entry("southamerica-east1", 0.00002838d),
entry("southamerica-west1", 0.0000589d)
)
),
"azure",
new Provider(
1.185d,
Map.<String, Double>ofEntries(
entry("centralus", 0.000426254d),
entry("centraluseuap", 0.000426254d),
entry("centralusestage", 0.000426254d),
entry("eastus", 0.000379069d),
entry("useast", 0.000379069d),
entry("eastusstage", 0.000379069d),
entry("eastus2", 0.000379069d),
entry("useast2", 0.000379069d),
entry("eastus2euap", 0.000379069d),
entry("eastus2stage", 0.000379069d),
entry("eastus3", 0.000379069d),
entry("usnorth", 0.000410608d),
entry("northcentralus", 0.000410608d),
entry("northcentralusstage", 0.000410608d),
entry("southcentralus", 0.000373231d),
entry("southcentralusstage", 0.000373231d),
entry("unitedstates", 0.000373231d),
entry("unitedstateseuap", 0.000373231d),
entry("westcentralus", 0.000322167d),
entry("westcentralusstage", 0.000322167d),
entry("westus", 0.000322167d),
entry("westusstage", 0.000322167d),
entry("westus2", 0.000322167d),
entry("westus2stage", 0.000322167d),
entry("westus3", 0.000322167d),
entry("asia", 0.0005647d),
entry("asiapacific", 0.0005647d),
entry("eastasia", 0.00071d),
entry("eastasiastage", 0.00071d),
entry("southeastasia", 0.000408d),
entry("asiasoutheast", 0.000408d),
entry("southafricanorth", 0.0009006d),
entry("southafricawest", 0.0009006d),
entry("southafrica", 0.0009006d),
entry("australia", 0.00079d),
entry("australiacentral", 0.00079d),
entry("australiacentral2", 0.00079d),
entry("australiaeast", 0.00079d),
entry("australiasoutheast", 0.00096d),
entry("apeast", 0.00071d),
entry("apsoutheast", 0.000408d),
entry("japan", 0.0004658d),
entry("japanwest", 0.0004658d),
entry("japaneast", 0.0004658d),
entry("korea", 0.0004156d),
entry("koreaeast", 0.0004156d),
entry("koreasouth", 0.0004156d),
entry("india", 0.0007082d),
entry("indiacentral", 0.0007082d),
entry("centralindia", 0.0007082d),
entry("jioindiacentral", 0.0007082d),
entry("indiawest", 0.0007082d),
entry("westindia", 0.0007082d),
entry("jioindiawest", 0.0007082d),
entry("indiasouth", 0.0007082d),
entry("southindia", 0.0007082d),
entry("northeurope", 0.0002786d),
entry("europenorth", 0.0002786d),
entry("westeurope", 0.0003284d),
entry("europewest", 0.0003284d),
entry("france", 0.00005128d),
entry("francecentral", 0.00005128d),
entry("francesouth", 0.00005128d),
entry("swedencentral", 0.00000567d),
entry("switzerland", 0.00001152d),
entry("switzerlandnorth", 0.00001152d),
entry("switzerlandwest", 0.00001152d),
entry("uk", 0.000225d),
entry("uksouth", 0.000225d),
entry("ukwest", 0.000228d),
entry("germany", 0.00033866d),
entry("germanynorth", 0.00033866d),
entry("germanywestcentral", 0.00033866d),
entry("norway", 0.00000762d),
entry("norwayeast", 0.00000762d),
entry("norwaywest", 0.00000762d),
entry("uae", 0.0004041d),
entry("uaenorth", 0.0004041d),
entry("uaecentral", 0.0004041d),
entry("canada", 0.00012d),
entry("canadacentral", 0.00012d),
entry("canadaeast", 0.00012d),
entry("brazil", 0.0000617d),
entry("brazilsouth", 0.0000617d),
entry("brazilsoutheast", 0.0000617d)
)
)
);
}
}
| CloudProviders |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/annotations/SQLDelete.java | {
"start": 1309,
"end": 1561
} | interface ____ {
/**
* Procedure name or SQL {@code DELETE} statement.
*/
String sql();
/**
* Is the statement callable (aka a {@link java.sql.CallableStatement})?
*/
boolean callable() default false;
/**
* An {@link Expectation} | SQLDelete |
java | google__guava | android/guava/src/com/google/common/collect/Multimaps.java | {
"start": 40314,
"end": 44759
} | interface ____
* instances of {@code SortedSet}, {@code Set}, {@code List} and {@code Collection}, in that order
* of preference.
*
* @param collection the collection for which to return an unmodifiable view
* @return an unmodifiable view of the collection
*/
private static <V extends @Nullable Object> Collection<V> unmodifiableValueCollection(
Collection<V> collection) {
if (collection instanceof SortedSet) {
return Collections.unmodifiableSortedSet((SortedSet<V>) collection);
} else if (collection instanceof Set) {
return Collections.unmodifiableSet((Set<V>) collection);
} else if (collection instanceof List) {
return Collections.unmodifiableList((List<V>) collection);
}
return Collections.unmodifiableCollection(collection);
}
/**
* Returns an unmodifiable view of the specified collection of entries. The {@link Entry#setValue}
* operation throws an {@link UnsupportedOperationException}. If the specified collection is a
* {@code Set}, the returned collection is also a {@code Set}.
*
* @param entries the entries for which to return an unmodifiable view
* @return an unmodifiable view of the entries
*/
private static <K extends @Nullable Object, V extends @Nullable Object>
Collection<Entry<K, V>> unmodifiableEntries(Collection<Entry<K, V>> entries) {
if (entries instanceof Set) {
return Maps.unmodifiableEntrySet((Set<Entry<K, V>>) entries);
}
return new Maps.UnmodifiableEntries<>(Collections.unmodifiableCollection(entries));
}
/**
* Returns {@link ListMultimap#asMap multimap.asMap()}, with its type corrected from {@code Map<K,
* Collection<V>>} to {@code Map<K, List<V>>}.
*
* @since 15.0
*/
@SuppressWarnings("unchecked")
// safe by specification of ListMultimap.asMap()
public static <K extends @Nullable Object, V extends @Nullable Object> Map<K, List<V>> asMap(
ListMultimap<K, V> multimap) {
return (Map<K, List<V>>) (Map<K, ?>) multimap.asMap();
}
/**
* Returns {@link SetMultimap#asMap multimap.asMap()}, with its type corrected from {@code Map<K,
* Collection<V>>} to {@code Map<K, Set<V>>}.
*
* @since 15.0
*/
@SuppressWarnings("unchecked")
// safe by specification of SetMultimap.asMap()
public static <K extends @Nullable Object, V extends @Nullable Object> Map<K, Set<V>> asMap(
SetMultimap<K, V> multimap) {
return (Map<K, Set<V>>) (Map<K, ?>) multimap.asMap();
}
/**
* Returns {@link SortedSetMultimap#asMap multimap.asMap()}, with its type corrected from {@code
* Map<K, Collection<V>>} to {@code Map<K, SortedSet<V>>}.
*
* @since 15.0
*/
@SuppressWarnings("unchecked")
// safe by specification of SortedSetMultimap.asMap()
public static <K extends @Nullable Object, V extends @Nullable Object> Map<K, SortedSet<V>> asMap(
SortedSetMultimap<K, V> multimap) {
return (Map<K, SortedSet<V>>) (Map<K, ?>) multimap.asMap();
}
/**
* Returns {@link Multimap#asMap multimap.asMap()}. This is provided for parity with the other
* more strongly-typed {@code asMap()} implementations.
*
* @since 15.0
*/
public static <K extends @Nullable Object, V extends @Nullable Object>
Map<K, Collection<V>> asMap(Multimap<K, V> multimap) {
return multimap.asMap();
}
/**
* Returns a multimap view of the specified map. The multimap is backed by the map, so changes to
* the map are reflected in the multimap, and vice versa. If the map is modified while an
* iteration over one of the multimap's collection views is in progress (except through the
* iterator's own {@code remove} operation, or through the {@code setValue} operation on a map
* entry returned by the iterator), the results of the iteration are undefined.
*
* <p>The multimap supports mapping removal, which removes the corresponding mapping from the map.
* It does not support any operations which might add mappings, such as {@code put}, {@code
* putAll} or {@code replaceValues}.
*
* <p>The returned multimap will be serializable if the specified map is serializable.
*
* @param map the backing map for the returned multimap view
*/
public static <K extends @Nullable Object, V extends @Nullable Object> SetMultimap<K, V> forMap(
Map<K, V> map) {
return new MapMultimap<>(map);
}
/**
* @see Multimaps#forMap
*/
private static final | for |
java | resilience4j__resilience4j | resilience4j-cache/src/test/java/io/github/resilience4j/cache/internal/ComputeIfAbsentTest.java | {
"start": 2150,
"end": 2462
} | class ____ implements CheckedSupplier<String> {
private final String suppliedValue;
SpyableSupplier(String suppliedValue) {
this.suppliedValue = suppliedValue;
}
@Override
public String get() {
return suppliedValue;
}
}
}
| SpyableSupplier |
java | quarkusio__quarkus | extensions/mongodb-client/runtime/src/test/java/io/quarkus/mongodb/reactive/MongoWithReplicasTestBase.java | {
"start": 1382,
"end": 8196
} | class ____ {
private static final Logger LOGGER = Logger.getLogger(MongoWithReplicasTestBase.class);
private static List<TransitionWalker.ReachedState<RunningMongodProcess>> startedServers = Collections.emptyList();
@BeforeAll
public static void startMongoDatabase() {
String uri = getConfiguredConnectionString();
// This switch allow testing against a running mongo database.
if (uri == null) {
startedServers = startReplicaSet(Version.Main.V7_0, 27018, "test001");
} else {
LOGGER.infof("Using existing Mongo %s", uri);
}
}
private static Net net(String hostName, int port) {
return Net.builder()
.from(Net.defaults())
.bindIp(hostName)
.port(port)
.build();
}
private static List<TransitionWalker.ReachedState<RunningMongodProcess>> startReplicaSet(
IFeatureAwareVersion version, int basePort, String replicaSet) {
TransitionWalker.ReachedState<RunningMongodProcess> firstStarted = mongodWithPort(basePort, replicaSet).start(version);
try {
TransitionWalker.ReachedState<RunningMongodProcess> secondStarted = mongodWithPort(basePort + 1, replicaSet)
.start(version);
try {
ServerAddress firstAddress = firstStarted.current().getServerAddress();
ServerAddress secondAddress = secondStarted.current().getServerAddress();
initializeReplicaSet(Arrays.asList(firstAddress, secondAddress), replicaSet);
LOGGER.infof("ReplicaSet initialized with servers - firstServer: %s , secondServer: %s",
firstAddress, secondAddress);
return Arrays.asList(secondStarted, firstStarted);
} catch (Exception ex) {
LOGGER.error("Shutting down second Mongo Server.");
secondStarted.close();
LOGGER.errorv(ex, "Error while initializing replicaSet. Error Message %s", ex.getMessage());
throw new RuntimeException("Error starting second server and initializing replicaset.", ex);
}
} catch (RuntimeException rx) {
LOGGER.error("Shutting down first Mongo Server.");
firstStarted.close();
throw rx;
}
}
private static Mongod mongodWithPort(int port, String replicaSet) {
return Mongod.instance().withNet(Start.to(Net.class).initializedWith(net("localhost", port)))
.withProcessOutput(Start.to(ProcessOutput.class).initializedWith(ProcessOutput.silent()))
.withMongodArguments(Start.to(MongodArguments.class).initializedWith(
MongodArguments.defaults().withArgs(Map.of("--replSet", replicaSet)).withSyncDelay(10)
.withUseSmallFiles(true).withUseNoJournal(false)))
.withProcessConfig(
Start.to(ProcessConfig.class)
.initializedWith(ProcessConfig.defaults().withStopTimeoutInMillis(15_000)));
}
@AfterAll
public static void stopMongoDatabase() {
for (TransitionWalker.ReachedState<RunningMongodProcess> startedServer : startedServers) {
try {
startedServer.close();
} catch (RuntimeException rx) {
LOGGER.error("startedServer.close", rx);
}
}
}
protected String getConnectionString() {
if (getConfiguredConnectionString() != null) {
return getConfiguredConnectionString();
} else {
return "mongodb://localhost:27018";
}
}
private static void initializeReplicaSet(final List<ServerAddress> mongodConfigList, String replicaSet)
throws UnknownHostException {
String arbitrerAddress = "mongodb://" + mongodConfigList.get(0).getHost() + ":"
+ mongodConfigList.get(0).getPort();
MongoClientSettings mo = MongoClientSettings.builder()
.applyConnectionString(new ConnectionString(arbitrerAddress)).build();
try (MongoClient mongo = MongoClients.create(mo)) {
MongoDatabase mongoAdminDB = mongo.getDatabase("admin");
Document cr = mongoAdminDB.runCommand(new Document("isMaster", 1));
LOGGER.infof("isMaster: %s", cr);
// Build replica set configuration settings
Document rsConfiguration = buildReplicaSetConfiguration(mongodConfigList, replicaSet);
LOGGER.infof("replSetSettings: %s", rsConfiguration);
// Initialize replica set
cr = mongoAdminDB.runCommand(new Document("replSetInitiate", rsConfiguration));
LOGGER.infof("replSetInitiate: %s", cr);
// Check replica set status before to proceed
Awaitility.await().atMost(ONE_MINUTE).with().pollInterval(ONE_SECOND).until(() -> {
Document result = mongoAdminDB.runCommand(new Document("replSetGetStatus", 1));
LOGGER.infof("replSetGetStatus: %s", result);
boolean replicaSetStatus = isReplicaSetStarted(result);
LOGGER.infof("replicaSet Readiness Status: %s", replicaSetStatus);
return replicaSetStatus;
});
LOGGER.info("ReplicaSet is now ready with 2 cluster node.");
}
}
private static Document buildReplicaSetConfiguration(List<ServerAddress> configList, String replicaSet)
throws UnknownHostException {
Document replicaSetSetting = new Document();
replicaSetSetting.append("_id", replicaSet);
List<Document> members = new ArrayList<>();
int i = 0;
for (ServerAddress mongoConfig : configList) {
members.add(new Document().append("_id", i++).append("host", mongoConfig.getHost() + ":" + mongoConfig.getPort()));
}
replicaSetSetting.append("members", members);
LOGGER.infof("ReplicaSet Configuration settings: %s", replicaSetSetting);
return replicaSetSetting;
}
private static boolean isReplicaSetStarted(Document setting) {
if (!setting.containsKey("members")) {
return false;
}
@SuppressWarnings("unchecked")
List<Document> members = setting.get("members", List.class);
for (Document member : members) {
LOGGER.infof("replica set member %s", member);
int state = member.getInteger("state");
LOGGER.infof("state: %s", state);
// 1 - PRIMARY, 2 - SECONDARY, 7 - ARBITER
if (state != 1 && state != 2 && state != 7) {
return false;
}
}
return true;
}
}
| MongoWithReplicasTestBase |
java | google__guava | android/guava/src/com/google/common/cache/LocalCache.java | {
"start": 133763,
"end": 133918
} | class ____ extends HashIterator<V> {
@Override
public V next() {
return nextEntry().getValue();
}
}
/**
* Custom Entry | ValueIterator |
java | apache__kafka | metadata/src/test/java/org/apache/kafka/controller/ClusterControlManagerTest.java | {
"start": 4043,
"end": 60397
} | class ____ {
@Test
public void testReplay() {
MockTime time = new MockTime(0, 0, 0);
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
FeatureControlManager featureControl = new FeatureControlManager.Builder().
setSnapshotRegistry(snapshotRegistry).
setQuorumFeatures(new QuorumFeatures(0,
QuorumFeatures.defaultSupportedFeatureMap(true),
List.of(0))).
build();
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setTime(time).
setSnapshotRegistry(snapshotRegistry).
setSessionTimeoutNs(1000).
setFeatureControlManager(featureControl).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
clusterControl.activate();
assertFalse(clusterControl.isUnfenced(0));
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(1);
brokerRecord.endPoints().add(new BrokerEndpoint().
setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).
setPort((short) 9092).
setName("PLAINTEXT").
setHost("example.com"));
clusterControl.replay(brokerRecord, 100L);
clusterControl.checkBrokerEpoch(1, 100);
assertThrows(StaleBrokerEpochException.class,
() -> clusterControl.checkBrokerEpoch(1, 101));
assertThrows(StaleBrokerEpochException.class,
() -> clusterControl.checkBrokerEpoch(2, 100));
assertFalse(clusterControl.isUnfenced(0));
assertFalse(clusterControl.isUnfenced(1));
BrokerRegistrationChangeRecord changeRecord =
new BrokerRegistrationChangeRecord().setBrokerId(1).setBrokerEpoch(100).setFenced(BrokerRegistrationFencingChange.UNFENCE.value());
clusterControl.replay(changeRecord);
assertFalse(clusterControl.isUnfenced(0));
assertTrue(clusterControl.isUnfenced(1));
changeRecord =
new BrokerRegistrationChangeRecord().setBrokerId(1).setBrokerEpoch(100).setFenced(BrokerRegistrationFencingChange.FENCE.value());
clusterControl.replay(changeRecord);
assertFalse(clusterControl.isUnfenced(0));
assertFalse(clusterControl.isUnfenced(1));
}
@Test
public void testReplayRegisterBrokerRecord() {
MockTime time = new MockTime(0, 0, 0);
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
FeatureControlManager featureControl = new FeatureControlManager.Builder().
setSnapshotRegistry(snapshotRegistry).
setQuorumFeatures(new QuorumFeatures(0,
QuorumFeatures.defaultSupportedFeatureMap(true),
List.of(0))).
build();
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("fPZv1VBsRFmnlRvmGcOW9w").
setTime(time).
setSnapshotRegistry(snapshotRegistry).
setSessionTimeoutNs(1000).
setFeatureControlManager(featureControl).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
assertFalse(clusterControl.isUnfenced(0));
assertFalse(clusterControl.inControlledShutdown(0));
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().
setBrokerEpoch(100).
setBrokerId(0).
setRack(null).
setFenced(true).
setInControlledShutdown(true);
brokerRecord.endPoints().add(new BrokerEndpoint().
setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).
setPort((short) 9092).
setName("PLAINTEXT").
setHost("example.com"));
clusterControl.replay(brokerRecord, 100L);
assertFalse(clusterControl.isUnfenced(0));
assertTrue(clusterControl.inControlledShutdown(0));
brokerRecord.setInControlledShutdown(false);
clusterControl.replay(brokerRecord, 100L);
assertFalse(clusterControl.isUnfenced(0));
assertFalse(clusterControl.inControlledShutdown(0));
assertEquals(100L, clusterControl.registerBrokerRecordOffset(brokerRecord.brokerId()).getAsLong());
brokerRecord.setFenced(false);
clusterControl.replay(brokerRecord, 100L);
assertTrue(clusterControl.isUnfenced(0));
assertFalse(clusterControl.inControlledShutdown(0));
}
@Test
public void testReplayBrokerRegistrationChangeRecord() {
MockTime time = new MockTime(0, 0, 0);
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
FeatureControlManager featureControl = new FeatureControlManager.Builder().
setSnapshotRegistry(snapshotRegistry).
setQuorumFeatures(new QuorumFeatures(0,
QuorumFeatures.defaultSupportedFeatureMap(true),
List.of(0))).
build();
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("fPZv1VBsRFmnlRvmGcOW9w").
setTime(time).
setSnapshotRegistry(snapshotRegistry).
setSessionTimeoutNs(1000).
setFeatureControlManager(featureControl).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
assertFalse(clusterControl.isUnfenced(0));
assertFalse(clusterControl.inControlledShutdown(0));
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().
setBrokerEpoch(100).
setBrokerId(0).
setRack(null).
setFenced(false);
brokerRecord.endPoints().add(new BrokerEndpoint().
setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).
setPort((short) 9092).
setName("PLAINTEXT").
setHost("example.com"));
clusterControl.replay(brokerRecord, 100L);
assertTrue(clusterControl.isUnfenced(0));
assertFalse(clusterControl.inControlledShutdown(0));
BrokerRegistrationChangeRecord registrationChangeRecord = new BrokerRegistrationChangeRecord()
.setBrokerId(0)
.setBrokerEpoch(100)
.setInControlledShutdown(BrokerRegistrationInControlledShutdownChange.IN_CONTROLLED_SHUTDOWN.value());
clusterControl.replay(registrationChangeRecord);
assertTrue(clusterControl.isUnfenced(0));
assertTrue(clusterControl.inControlledShutdown(0));
registrationChangeRecord = new BrokerRegistrationChangeRecord()
.setBrokerId(0)
.setBrokerEpoch(100)
.setFenced(BrokerRegistrationFencingChange.UNFENCE.value());
clusterControl.replay(registrationChangeRecord);
assertTrue(clusterControl.isUnfenced(0));
assertTrue(clusterControl.inControlledShutdown(0));
}
@Test
public void testRegistrationWithIncorrectClusterId() {
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
FeatureControlManager featureControl = new FeatureControlManager.Builder().
setSnapshotRegistry(snapshotRegistry).
setQuorumFeatures(new QuorumFeatures(0,
QuorumFeatures.defaultSupportedFeatureMap(true),
List.of(0))).
build();
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("fPZv1VBsRFmnlRvmGcOW9w").
setTime(new MockTime(0, 0, 0)).
setSnapshotRegistry(snapshotRegistry).
setSessionTimeoutNs(1000).
setFeatureControlManager(featureControl).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
clusterControl.activate();
assertThrows(InconsistentClusterIdException.class, () ->
clusterControl.registerBroker(new BrokerRegistrationRequestData().
setClusterId("WIjw3grwRZmR2uOpdpVXbg").
setBrokerId(0).
setRack(null).
setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")),
123L,
new FinalizedControllerFeatures(Map.of(), 456L),
false));
}
private static Stream<Arguments> metadataVersions() {
return Stream.of(
MetadataVersion.MINIMUM_VERSION,
MetadataVersion.IBP_3_7_IV2, // introduces directory assignment
MetadataVersion.latestTesting()
).map(Arguments::of);
}
@ParameterizedTest
@MethodSource("metadataVersions")
public void testRegisterBrokerRecordVersion(MetadataVersion metadataVersion) {
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
FeatureControlManager featureControl = new FeatureControlManager.Builder().
setSnapshotRegistry(snapshotRegistry).
setQuorumFeatures(new QuorumFeatures(0,
QuorumFeatures.defaultSupportedFeatureMap(true),
List.of(0))).
build();
featureControl.replay(new FeatureLevelRecord().
setName(MetadataVersion.FEATURE_NAME).
setFeatureLevel(metadataVersion.featureLevel()));
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("fPZv1VBsRFmnlRvmGcOW9w").
setTime(new MockTime(0, 0, 0)).
setSnapshotRegistry(snapshotRegistry).
setSessionTimeoutNs(1000).
setFeatureControlManager(featureControl).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
clusterControl.activate();
List<Uuid> logDirs = metadataVersion.isDirectoryAssignmentSupported() ? List.of(
Uuid.fromString("63k9SN1nQOS0dFHSCIMA0A"),
Uuid.fromString("Vm1MjsOCR1OjDDydOsDbzg")
) : List.of();
ControllerResult<BrokerRegistrationReply> result = clusterControl.registerBroker(
new BrokerRegistrationRequestData().
setClusterId("fPZv1VBsRFmnlRvmGcOW9w").
setBrokerId(0).
setLogDirs(logDirs).
setRack(null).
setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
Set.of(new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(metadataVersion.featureLevel()).
setMaxSupportedVersion(metadataVersion.featureLevel())).iterator())).
setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")),
123L,
new FinalizedControllerFeatures(Map.of(MetadataVersion.FEATURE_NAME, metadataVersion.featureLevel()), 456L),
false);
short expectedVersion = metadataVersion.registerBrokerRecordVersion();
assertEquals(
List.of(new ApiMessageAndVersion(new RegisterBrokerRecord().
setBrokerEpoch(123L).
setBrokerId(0).
setRack(null).
setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")).
setFenced(true).
setLogDirs(logDirs).
setFeatures(new RegisterBrokerRecord.BrokerFeatureCollection(List.of(
new RegisterBrokerRecord.BrokerFeature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(metadataVersion.featureLevel()).
setMaxSupportedVersion(metadataVersion.featureLevel())).iterator())).
setInControlledShutdown(false), expectedVersion)),
result.records());
}
@Test
public void testUnregister() {
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().
setBrokerId(1).
setBrokerEpoch(100).
setIncarnationId(Uuid.fromString("fPZv1VBsRFmnlRvmGcOW9w")).
setRack("arack");
brokerRecord.endPoints().add(new BrokerEndpoint().
setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).
setPort((short) 9092).
setName("PLAINTEXT").
setHost("example.com"));
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
FeatureControlManager featureControl = new FeatureControlManager.Builder().
setSnapshotRegistry(snapshotRegistry).
setQuorumFeatures(new QuorumFeatures(0,
QuorumFeatures.defaultSupportedFeatureMap(true),
List.of(0))).
build();
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setTime(new MockTime(0, 0, 0)).
setSnapshotRegistry(snapshotRegistry).
setSessionTimeoutNs(1000).
setFeatureControlManager(featureControl).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
clusterControl.activate();
clusterControl.replay(brokerRecord, 100L);
assertEquals(new BrokerRegistration.Builder().
setId(1).
setEpoch(100).
setIncarnationId(Uuid.fromString("fPZv1VBsRFmnlRvmGcOW9w")).
setListeners(Map.of("PLAINTEXT",
new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "example.com", 9092))).
setRack(Optional.of("arack")).
setFenced(true).
setInControlledShutdown(false).build(),
clusterControl.brokerRegistrations().get(1));
assertEquals(100L, clusterControl.registerBrokerRecordOffset(brokerRecord.brokerId()).getAsLong());
UnregisterBrokerRecord unregisterRecord = new UnregisterBrokerRecord().
setBrokerId(1).
setBrokerEpoch(100);
clusterControl.replay(unregisterRecord);
assertFalse(clusterControl.brokerRegistrations().containsKey(1));
assertFalse(clusterControl.registerBrokerRecordOffset(brokerRecord.brokerId()).isPresent());
}
@ParameterizedTest
@ValueSource(ints = {3, 10})
public void testPlaceReplicas(int numUsableBrokers) {
MockTime time = new MockTime(0, 0, 0);
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
FeatureControlManager featureControl = new FeatureControlManager.Builder().
setSnapshotRegistry(snapshotRegistry).
setQuorumFeatures(new QuorumFeatures(0,
QuorumFeatures.defaultSupportedFeatureMap(true),
List.of(0))).
build();
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setTime(time).
setSnapshotRegistry(snapshotRegistry).
setSessionTimeoutNs(1000).
setFeatureControlManager(featureControl).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
clusterControl.activate();
for (int i = 0; i < numUsableBrokers; i++) {
RegisterBrokerRecord brokerRecord =
new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(i);
brokerRecord.endPoints().add(new BrokerEndpoint().
setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).
setPort((short) 9092).
setName("PLAINTEXT").
setHost("example.com"));
clusterControl.replay(brokerRecord, 100L);
UnfenceBrokerRecord unfenceRecord =
new UnfenceBrokerRecord().setId(i).setEpoch(100);
clusterControl.replay(unfenceRecord);
clusterControl.heartbeatManager().touch(i, false, 0);
}
for (int i = 0; i < numUsableBrokers; i++) {
assertTrue(clusterControl.isUnfenced(i),
String.format("broker %d was not unfenced.", i));
}
for (int i = 0; i < 100; i++) {
List<PartitionAssignment> results = clusterControl.replicaPlacer().place(
new PlacementSpec(0,
1,
(short) 3),
new ClusterDescriber() {
@Override
public Iterator<UsableBroker> usableBrokers() {
return clusterControl.usableBrokers();
}
@Override
public Uuid defaultDir(int brokerId) {
return DirectoryId.UNASSIGNED;
}
}
).assignments();
HashSet<Integer> seen = new HashSet<>();
for (Integer result : results.get(0).replicas()) {
assertTrue(result >= 0);
assertTrue(result < numUsableBrokers);
assertTrue(seen.add(result));
}
}
}
@Test
public void testRegistrationsToRecords() {
MetadataVersion metadataVersion = MetadataVersion.MINIMUM_VERSION;
MockTime time = new MockTime(0, 0, 0);
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
FeatureControlManager featureControl = new FeatureControlManager.Builder().
setSnapshotRegistry(snapshotRegistry).
setQuorumFeatures(new QuorumFeatures(0,
QuorumFeatures.defaultSupportedFeatureMap(true),
List.of(0))).
build();
featureControl.replay(new FeatureLevelRecord().
setName(MetadataVersion.FEATURE_NAME).
setFeatureLevel(metadataVersion.featureLevel()));
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setTime(time).
setSnapshotRegistry(snapshotRegistry).
setSessionTimeoutNs(1000).
setFeatureControlManager(featureControl).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
clusterControl.activate();
assertFalse(clusterControl.isUnfenced(0));
for (int i = 0; i < 3; i++) {
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().
setBrokerEpoch(100).setBrokerId(i).setRack(null);
brokerRecord.endPoints().add(new BrokerEndpoint().
setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).
setPort((short) 9092 + i).
setName("PLAINTEXT").
setHost("example.com"));
clusterControl.replay(brokerRecord, 100L);
}
for (int i = 0; i < 2; i++) {
UnfenceBrokerRecord unfenceBrokerRecord =
new UnfenceBrokerRecord().setId(i).setEpoch(100);
clusterControl.replay(unfenceBrokerRecord);
}
BrokerRegistrationChangeRecord registrationChangeRecord =
new BrokerRegistrationChangeRecord().
setBrokerId(0).
setBrokerEpoch(100).
setInControlledShutdown(BrokerRegistrationInControlledShutdownChange.
IN_CONTROLLED_SHUTDOWN.value());
clusterControl.replay(registrationChangeRecord);
short expectedVersion = metadataVersion.registerBrokerRecordVersion();
ImageWriterOptions options = new ImageWriterOptions.Builder(metadataVersion).
setLossHandler(__ -> { }).
build();
assertEquals(new ApiMessageAndVersion(new RegisterBrokerRecord().
setBrokerEpoch(100).setBrokerId(0).setRack(null).
setEndPoints(new BrokerEndpointCollection(Set.of(
new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).
setPort((short) 9092).
setName("PLAINTEXT").
setHost("example.com")).iterator())).
setInControlledShutdown(true).
setFenced(false), expectedVersion),
clusterControl.brokerRegistrations().get(0).toRecord(options));
assertEquals(new ApiMessageAndVersion(new RegisterBrokerRecord().
setBrokerEpoch(100).setBrokerId(1).setRack(null).
setEndPoints(new BrokerEndpointCollection(Set.of(
new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).
setPort((short) 9093).
setName("PLAINTEXT").
setHost("example.com")).iterator())).
setFenced(false), expectedVersion),
clusterControl.brokerRegistrations().get(1).toRecord(options));
assertEquals(new ApiMessageAndVersion(new RegisterBrokerRecord().
setBrokerEpoch(100).setBrokerId(2).setRack(null).
setEndPoints(new BrokerEndpointCollection(Set.of(
new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).
setPort((short) 9094).
setName("PLAINTEXT").
setHost("example.com")).iterator())).
setFenced(true), expectedVersion),
clusterControl.brokerRegistrations().get(2).toRecord(options));
}
@Test
public void testRegistrationWithUnsupportedFeature() {
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
Map<String, VersionRange> supportedFeatures = new HashMap<>();
supportedFeatures.put(MetadataVersion.FEATURE_NAME, VersionRange.of(
MetadataVersion.MINIMUM_VERSION.featureLevel(),
MetadataVersion.IBP_3_7_IV0.featureLevel()));
supportedFeatures.put(TestFeatureVersion.FEATURE_NAME, VersionRange.of(
TestFeatureVersion.TEST_0.featureLevel(),
TestFeatureVersion.TEST_1.featureLevel()));
FeatureControlManager featureControl = new FeatureControlManager.Builder().
setSnapshotRegistry(snapshotRegistry).
setQuorumFeatures(new QuorumFeatures(0, supportedFeatures, List.of(0))).
build();
featureControl.replay(new FeatureLevelRecord().
setName(MetadataVersion.FEATURE_NAME).
setFeatureLevel(MetadataVersion.IBP_3_7_IV0.featureLevel()));
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("fPZv1VBsRFmnlRvmGcOW9w").
setTime(new MockTime(0, 0, 0)).
setSnapshotRegistry(snapshotRegistry).
setFeatureControlManager(featureControl).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
clusterControl.activate();
FeatureLevelRecord testFeatureRecord = new FeatureLevelRecord().
setName(TestFeatureVersion.FEATURE_NAME).setFeatureLevel((short) 1);
featureControl.replay(testFeatureRecord);
List<Uuid> logDirs = List.of(Uuid.fromString("yJGxmjfbQZSVFAlNM3uXZg"), Uuid.fromString("Mj3CW3OSRi29cFeNJlXuAQ"));
BrokerRegistrationRequestData baseRequest = new BrokerRegistrationRequestData().
setClusterId("fPZv1VBsRFmnlRvmGcOW9w").
setBrokerId(0).
setRack(null).
setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")).
setLogDirs(logDirs);
assertEquals("Unable to register because the broker does not support finalized version 1 of " +
"test.feature.version. The broker wants a version between 0 and 0, inclusive.",
assertThrows(UnsupportedVersionException.class,
() -> clusterControl.registerBroker(
baseRequest.setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
Set.of(new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel()).
setMaxSupportedVersion(MetadataVersion.IBP_3_7_IV0.featureLevel())).iterator())),
123L,
featureControl.finalizedFeatures(Long.MAX_VALUE),
false)).getMessage());
}
@Test
public void testRegistrationWithUnsupportedKraftVersion() {
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
Map<String, VersionRange> supportedFeatures = new HashMap<>();
supportedFeatures.put(MetadataVersion.FEATURE_NAME, VersionRange.of(
MetadataVersion.MINIMUM_VERSION.featureLevel(),
MetadataVersion.IBP_3_9_IV0.featureLevel()));
supportedFeatures.put(KRaftVersion.FEATURE_NAME, VersionRange.of(
KRaftVersion.KRAFT_VERSION_1.featureLevel(),
KRaftVersion.KRAFT_VERSION_1.featureLevel()));
FeatureControlManager featureControl = new FeatureControlManager.Builder().
setSnapshotRegistry(snapshotRegistry).
setQuorumFeatures(new QuorumFeatures(0, supportedFeatures, List.of(0))).
build();
featureControl.replay(new FeatureLevelRecord().
setName(MetadataVersion.FEATURE_NAME).
setFeatureLevel(MetadataVersion.IBP_3_9_IV0.featureLevel()));
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("fPZv1VBsRFmnlRvmGcOW9w").
setTime(new MockTime(0, 0, 0)).
setSnapshotRegistry(snapshotRegistry).
setFeatureControlManager(featureControl).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
clusterControl.activate();
List<Uuid> logDirs = List.of(Uuid.fromString("yJGxmjfbQZSVFAlNM3uXZg"), Uuid.fromString("Mj3CW3OSRi29cFeNJlXuAQ"));
BrokerRegistrationRequestData baseRequest = new BrokerRegistrationRequestData().
setClusterId("fPZv1VBsRFmnlRvmGcOW9w").
setBrokerId(0).
setRack(null).
setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")).
setLogDirs(logDirs);
// quorum controller passes in the latest kraft version to populate finalized features
Map<String, Short> updatedFeaturesMap = new HashMap<>(featureControl.finalizedFeatures(Long.MAX_VALUE).featureMap());
updatedFeaturesMap.put(KRaftVersion.FEATURE_NAME, KRaftVersion.KRAFT_VERSION_1.featureLevel());
FinalizedControllerFeatures updatedFinalizedFeatures = new FinalizedControllerFeatures(updatedFeaturesMap, Long.MAX_VALUE);
assertEquals("Unable to register because the broker does not support finalized version 1 of " +
"kraft.version. The broker wants a version between 0 and 0, inclusive.",
assertThrows(UnsupportedVersionException.class,
() -> clusterControl.registerBroker(
baseRequest.setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
Set.of(new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.IBP_3_9_IV0.featureLevel()).
setMaxSupportedVersion(MetadataVersion.IBP_3_9_IV0.featureLevel())).iterator())),
123L,
updatedFinalizedFeatures,
false)).getMessage());
assertEquals("Unable to register because the broker does not support finalized version 1 of " +
"kraft.version. The broker wants a version between 0 and 0, inclusive.",
assertThrows(UnsupportedVersionException.class,
() -> clusterControl.registerBroker(
baseRequest.setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
List.of(
new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.IBP_3_9_IV0.featureLevel()).
setMaxSupportedVersion(MetadataVersion.IBP_3_9_IV0.featureLevel()),
new BrokerRegistrationRequestData.Feature().
setName(KRaftVersion.FEATURE_NAME).
setMinSupportedVersion(KRaftVersion.KRAFT_VERSION_0.featureLevel()).
setMaxSupportedVersion(KRaftVersion.KRAFT_VERSION_0.featureLevel())).iterator())),
123L,
updatedFinalizedFeatures,
false)).getMessage());
clusterControl.registerBroker(
baseRequest.setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
List.of(
new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.IBP_3_9_IV0.featureLevel()).
setMaxSupportedVersion(MetadataVersion.IBP_3_9_IV0.featureLevel()),
new BrokerRegistrationRequestData.Feature().
setName(KRaftVersion.FEATURE_NAME).
setMinSupportedVersion(KRaftVersion.KRAFT_VERSION_1.featureLevel()).
setMaxSupportedVersion(KRaftVersion.KRAFT_VERSION_1.featureLevel())).iterator())),
123L,
updatedFinalizedFeatures,
false);
}
@Test
public void testRegistrationWithUnsupportedMetadataVersion() {
SnapshotRegistry snapshotRegistry = new SnapshotRegistry(new LogContext());
FeatureControlManager featureControl = new FeatureControlManager.Builder().
setSnapshotRegistry(snapshotRegistry).
setQuorumFeatures(new QuorumFeatures(0,
Map.of(MetadataVersion.FEATURE_NAME, VersionRange.of(
MetadataVersion.IBP_3_5_IV0.featureLevel(),
MetadataVersion.IBP_3_6_IV0.featureLevel())),
List.of(0))).
build();
featureControl.replay(new FeatureLevelRecord().
setName(MetadataVersion.FEATURE_NAME).
setFeatureLevel(MetadataVersion.IBP_3_5_IV0.featureLevel()));
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("fPZv1VBsRFmnlRvmGcOW9w").
setTime(new MockTime(0, 0, 0)).
setSnapshotRegistry(snapshotRegistry).
setFeatureControlManager(featureControl).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
clusterControl.activate();
assertEquals("Unable to register because the broker does not support finalized version 9 of " +
"metadata.version. The broker wants a version between 7 and 7, inclusive.",
assertThrows(UnsupportedVersionException.class,
() -> clusterControl.registerBroker(
new BrokerRegistrationRequestData().
setClusterId("fPZv1VBsRFmnlRvmGcOW9w").
setBrokerId(0).
setRack(null).
setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
Set.of(new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel()).
setMaxSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel())).iterator())).
setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")),
123L,
featureControl.finalizedFeatures(Long.MAX_VALUE),
false)).getMessage());
assertEquals("Unable to register because the broker does not support finalized version 9 of " +
"metadata.version. The broker wants a version between 8 and 8, inclusive.",
assertThrows(UnsupportedVersionException.class,
() -> clusterControl.registerBroker(
new BrokerRegistrationRequestData().
setClusterId("fPZv1VBsRFmnlRvmGcOW9w").
setBrokerId(0).
setRack(null).
setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
Set.of(new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.IBP_3_4_IV0.featureLevel()).
setMaxSupportedVersion(MetadataVersion.IBP_3_4_IV0.featureLevel())).iterator())).
setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")),
123L,
featureControl.finalizedFeatures(Long.MAX_VALUE),
false)).getMessage());
}
@Test
public void testRegisterControlWithUnsupportedMetadataVersion() {
FeatureControlManager featureControl = new FeatureControlManager.Builder().
build();
featureControl.replay(new FeatureLevelRecord().
setName(MetadataVersion.FEATURE_NAME).
setFeatureLevel(MetadataVersion.IBP_3_6_IV2.featureLevel()));
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("fPZv1VBsRFmnlRvmGcOW9w").
setFeatureControlManager(featureControl).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
clusterControl.activate();
assertEquals("The current MetadataVersion is too old to support controller registrations.",
assertThrows(UnsupportedVersionException.class, () -> clusterControl.registerController(
new ControllerRegistrationRequestData().setControllerId(1))).getMessage());
}
@Test
public void testRegisterWithDuplicateDirectoryId() {
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("QzZZEtC7SxucRM29Xdzijw").
setFeatureControlManager(createFeatureControlManager()).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(0).setLogDirs(List.of(
Uuid.fromString("yJGxmjfbQZSVFAlNM3uXZg"),
Uuid.fromString("Mj3CW3OSRi29cFeNJlXuAQ")
));
brokerRecord.endPoints().add(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092).setName("PLAINTEXT").setHost("127.0.0.1"));
clusterControl.replay(brokerRecord, 100L);
clusterControl.activate();
assertDoesNotThrow(() ->
registerNewBrokerWithDirs(clusterControl, 0, List.of(Uuid.fromString("yJGxmjfbQZSVFAlNM3uXZg"), Uuid.fromString("Mj3CW3OSRi29cFeNJlXuAQ"))),
"it should be possible to re-register the same broker with the same directories"
);
assertEquals("No directories specified in request", assertThrows(InvalidRegistrationException.class, () ->
registerNewBrokerWithDirs(clusterControl, 1, List.of())
).getMessage());
assertEquals("Broker 0 is already registered with directory Mj3CW3OSRi29cFeNJlXuAQ", assertThrows(InvalidRegistrationException.class, () ->
registerNewBrokerWithDirs(clusterControl, 1, List.of(Uuid.fromString("TyNK6XSSQJaJc2q9uflNHg"), Uuid.fromString("Mj3CW3OSRi29cFeNJlXuAQ")))
).getMessage());
assertEquals("Reserved directory ID in request", assertThrows(InvalidRegistrationException.class, () ->
registerNewBrokerWithDirs(clusterControl, 1, List.of(Uuid.fromString("TyNK6XSSQJaJc2q9uflNHg"), DirectoryId.UNASSIGNED))
).getMessage());
assertEquals("Duplicate directory ID in request", assertThrows(InvalidRegistrationException.class, () ->
registerNewBrokerWithDirs(clusterControl, 1, List.of(Uuid.fromString("aR6lssMrSeyXRf65hiUovQ"), Uuid.fromString("aR6lssMrSeyXRf65hiUovQ")))
).getMessage());
}
void registerNewBrokerWithDirs(ClusterControlManager clusterControl, int brokerId, List<Uuid> dirs) {
BrokerRegistrationRequestData data = new BrokerRegistrationRequestData().setBrokerId(brokerId)
.setClusterId(clusterControl.clusterId())
.setIncarnationId(new Uuid(brokerId, brokerId))
.setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
Set.of(new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel()).
setMaxSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel())).iterator()))
.setLogDirs(dirs);
FinalizedControllerFeatures finalizedFeatures = new FinalizedControllerFeatures(
Map.of(MetadataVersion.FEATURE_NAME, MetadataVersion.MINIMUM_VERSION.featureLevel()), 456L);
ControllerResult<BrokerRegistrationReply> result = clusterControl.registerBroker(data, 123L, finalizedFeatures, false);
RecordTestUtils.replayAll(clusterControl, result.records());
}
@Test
public void testHasOnlineDir() {
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("pjvUwj3ZTEeSVQmUiH3IJw").
setFeatureControlManager(createFeatureControlManager()).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
clusterControl.activate();
registerNewBrokerWithDirs(clusterControl, 1, List.of(Uuid.fromString("dir1SEbpRuG1dcpTRGOvJw"), Uuid.fromString("dir2xaEwR2m3JHTiy7PWwA")));
assertTrue(clusterControl.registration(1).hasOnlineDir(Uuid.fromString("dir1SEbpRuG1dcpTRGOvJw")));
assertTrue(clusterControl.hasOnlineDir(1, Uuid.fromString("dir1SEbpRuG1dcpTRGOvJw")));
assertTrue(clusterControl.hasOnlineDir(1, Uuid.fromString("dir2xaEwR2m3JHTiy7PWwA")));
assertTrue(clusterControl.hasOnlineDir(1, DirectoryId.UNASSIGNED));
assertTrue(clusterControl.hasOnlineDir(1, DirectoryId.MIGRATING));
assertFalse(clusterControl.hasOnlineDir(1, Uuid.fromString("otherAA1QFK4U1GWzkjZ5A")));
assertFalse(clusterControl.hasOnlineDir(77, Uuid.fromString("8xVRVs6UQHGVonA9SRYseQ")));
assertFalse(clusterControl.hasOnlineDir(1, DirectoryId.LOST));
}
@Test
public void testDefaultDir() {
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("pjvUwj3ZTEeSVQmUiH3IJw").
setFeatureControlManager(createFeatureControlManager()).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
clusterControl.activate();
RegisterBrokerRecord brokerRecord = new RegisterBrokerRecord().setBrokerEpoch(100).setBrokerId(1).setLogDirs(List.of());
brokerRecord.endPoints().add(new BrokerEndpoint().setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setPort((short) 9092).setName("PLAINTEXT").setHost("127.0.0.1"));
clusterControl.replay(brokerRecord, 100L);
registerNewBrokerWithDirs(clusterControl, 2, List.of(Uuid.fromString("singleOnlineDirectoryA")));
registerNewBrokerWithDirs(clusterControl, 3, List.of(Uuid.fromString("s4fRmyNFSH6J0vI8AVA5ew"), Uuid.fromString("UbtxBcqYSnKUEMcnTyZFWw")));
assertEquals(DirectoryId.MIGRATING, clusterControl.defaultDir(1));
assertEquals(Uuid.fromString("singleOnlineDirectoryA"), clusterControl.defaultDir(2));
assertEquals(DirectoryId.UNASSIGNED, clusterControl.defaultDir(3));
assertEquals(DirectoryId.UNASSIGNED, clusterControl.defaultDir(4));
}
@ParameterizedTest
@ValueSource(booleans = {false, true})
public void testReRegistrationAndBrokerEpoch(boolean newIncarnationId) {
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("pjvUwj3ZTEeSVQmUiH3IJw").
setFeatureControlManager(createFeatureControlManager()).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
build();
clusterControl.activate();
var finalizedFeatures = new FinalizedControllerFeatures(Map.of(MetadataVersion.FEATURE_NAME, MetadataVersion.MINIMUM_VERSION.featureLevel()),
100L);
RecordTestUtils.replayAll(clusterControl, clusterControl.registerBroker(
new BrokerRegistrationRequestData().
setBrokerId(1).
setClusterId(clusterControl.clusterId()).
setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
Set.of(new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel()).
setMaxSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel())).iterator())).
setIncarnationId(Uuid.fromString("mISEfEFwQIuaD1gKCc5tzQ")).
setLogDirs(List.of(Uuid.fromString("Vv1gzkM2QpuE-PPrIc6XEw"))),
100,
finalizedFeatures,
false).
records());
RecordTestUtils.replayAll(clusterControl, clusterControl.registerBroker(
new BrokerRegistrationRequestData().
setBrokerId(1).
setClusterId(clusterControl.clusterId()).
setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
Set.of(new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel()).
setMaxSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel())).iterator())).
setIncarnationId(newIncarnationId ?
Uuid.fromString("07OOcU7MQFeSmGAFPP2Zww") : Uuid.fromString("mISEfEFwQIuaD1gKCc5tzQ")).
setLogDirs(List.of(Uuid.fromString("Vv1gzkM2QpuE-PPrIc6XEw"))),
111,
finalizedFeatures,
false).
records());
if (newIncarnationId) {
assertEquals(Uuid.fromString("07OOcU7MQFeSmGAFPP2Zww"),
clusterControl.brokerRegistrations().get(1).incarnationId());
assertEquals(111,
clusterControl.brokerRegistrations().get(1).epoch());
} else {
assertEquals(Uuid.fromString("mISEfEFwQIuaD1gKCc5tzQ"),
clusterControl.brokerRegistrations().get(1).incarnationId());
assertEquals(100,
clusterControl.brokerRegistrations().get(1).epoch());
}
}
@ParameterizedTest
@ValueSource(booleans = {false, true})
public void testReRegistrationWithCleanShutdownDetection(boolean isCleanShutdown) {
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("pjvUwj3ZTEeSVQmUiH3IJw").
setFeatureControlManager(createFeatureControlManager()).
setBrokerShutdownHandler((brokerId, cleanShutdown, records) -> {
if (!cleanShutdown) {
records.add(new ApiMessageAndVersion(new PartitionChangeRecord(), PartitionChangeRecord.HIGHEST_SUPPORTED_VERSION));
}
}).
build();
clusterControl.activate();
var finalizedFeatures = new FinalizedControllerFeatures(Map.of(MetadataVersion.FEATURE_NAME,
MetadataVersion.MINIMUM_VERSION.featureLevel()), 100L);
List<ApiMessageAndVersion> records = clusterControl.registerBroker(
new BrokerRegistrationRequestData().
setBrokerId(1).
setClusterId(clusterControl.clusterId()).
setIncarnationId(Uuid.fromString("mISEfEFwQIuaD1gKCc5tzQ")).
setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
Set.of(new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel()).
setMaxSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel())).iterator())).
setLogDirs(List.of(Uuid.fromString("Vv1gzkM2QpuE-PPrIc6XEw"))),
100,
finalizedFeatures,
true).
records();
records.add(new ApiMessageAndVersion(new BrokerRegistrationChangeRecord().
setBrokerId(1).setBrokerEpoch(100).
setInControlledShutdown(BrokerRegistrationInControlledShutdownChange.IN_CONTROLLED_SHUTDOWN.value()),
(short) 1));
RecordTestUtils.replayAll(clusterControl, records);
records = clusterControl.registerBroker(
new BrokerRegistrationRequestData().
setBrokerId(1).
setClusterId(clusterControl.clusterId()).
setIncarnationId(Uuid.fromString("07OOcU7MQFeSmGAFPP2Zww")).
setPreviousBrokerEpoch(isCleanShutdown ? 100 : 10).
setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
Set.of(new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel()).
setMaxSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel())).iterator())).
setLogDirs(List.of(Uuid.fromString("Vv1gzkM2QpuE-PPrIc6XEw"))),
111,
finalizedFeatures,
true).records();
RecordTestUtils.replayAll(clusterControl, records);
assertEquals(Uuid.fromString("07OOcU7MQFeSmGAFPP2Zww"),
clusterControl.brokerRegistrations().get(1).incarnationId());
assertFalse(clusterControl.brokerRegistrations().get(1).inControlledShutdown());
assertEquals(111, clusterControl.brokerRegistrations().get(1).epoch());
if (isCleanShutdown) {
assertEquals(1, records.size());
} else {
assertEquals(2, records.size());
}
}
@Test
public void testBrokerContactTimesAreUpdatedOnClusterControlActivation() {
MockTime time = new MockTime(0L, 20L, 1000L);
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("pjvUwj3ZTEeSVQmUiH3IJw").
setFeatureControlManager(new FeatureControlManager.Builder().build()).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
setTime(time).
build();
clusterControl.replay(new RegisterBrokerRecord().
setBrokerEpoch(100).
setBrokerId(0).
setLogDirs(List.of(Uuid.fromString("Mj3CW3OSRi29cFeNJlXuAQ"))), 10002);
clusterControl.replay(new RegisterBrokerRecord().
setBrokerEpoch(123).
setBrokerId(1).
setFenced(false).
setLogDirs(List.of(Uuid.fromString("TyNK6XSSQJaJc2q9uflNHg"))), 10005);
clusterControl.activate();
assertEquals(OptionalLong.empty(), clusterControl.heartbeatManager().tracker().
contactTime(new BrokerIdAndEpoch(0, 100)));
assertEquals(OptionalLong.of(1000L), clusterControl.heartbeatManager().tracker().
contactTime(new BrokerIdAndEpoch(1, 123)));
assertEquals(OptionalLong.empty(), clusterControl.heartbeatManager().tracker().
contactTime(new BrokerIdAndEpoch(1, 124)));
assertEquals(OptionalLong.empty(), clusterControl.heartbeatManager().tracker().
contactTime(new BrokerIdAndEpoch(2, 100)));
}
@Test
public void testDuplicateBrokerRegistrationWithActiveOldBroker() {
// active here means brokerHeartbeatManager last recorded the broker as unfenced and not in controlled shutdown
long brokerSessionTimeoutMs = 1000;
MockTime time = new MockTime(0L, 20L, 1000L);
FinalizedControllerFeatures finalizedFeatures = new FinalizedControllerFeatures(
Map.of(MetadataVersion.FEATURE_NAME, MetadataVersion.LATEST_PRODUCTION.featureLevel()), 456L);
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("pjvUwj3ZTEeSVQmUiH3IJw").
setFeatureControlManager(createFeatureControlManager()).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
setSessionTimeoutNs(TimeUnit.MILLISECONDS.toNanos(brokerSessionTimeoutMs)).
setTime(time).
build();
clusterControl.replay(new RegisterBrokerRecord().
setBrokerEpoch(100).
setBrokerId(0).
setLogDirs(List.of(Uuid.fromString("Mj3CW3OSRi29cFeNJlXuAQ"))).
setFenced(false), 10002);
clusterControl.activate();
assertEquals(OptionalLong.of(1000L), clusterControl.heartbeatManager().tracker().
contactTime(new BrokerIdAndEpoch(0, 100)));
// while session is still valid for old broker, duplicate requests should fail
time.sleep(brokerSessionTimeoutMs / 2);
assertThrows(DuplicateBrokerRegistrationException.class, () ->
clusterControl.registerBroker(new BrokerRegistrationRequestData().
setClusterId("pjvUwj3ZTEeSVQmUiH3IJw").
setBrokerId(0).
setLogDirs(List.of(Uuid.fromString("TyNK6XSSQJaJc2q9uflNHg"))).
setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
Set.of(new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel()).
setMaxSupportedVersion(MetadataVersion.LATEST_PRODUCTION.featureLevel())).iterator())).
setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")),
101L,
finalizedFeatures,
false));
// if session expires for broker, even if the broker was active the new registration will succeed
time.sleep(brokerSessionTimeoutMs);
clusterControl.registerBroker(new BrokerRegistrationRequestData().
setClusterId("pjvUwj3ZTEeSVQmUiH3IJw").
setBrokerId(0).
setLogDirs(List.of(Uuid.fromString("TyNK6XSSQJaJc2q9uflNHg"))).
setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
Set.of(new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel()).
setMaxSupportedVersion(MetadataVersion.LATEST_PRODUCTION.featureLevel())).iterator())).
setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")),
101L,
finalizedFeatures,
false);
}
@Test
public void testDuplicateBrokerRegistrationWithInactiveBroker() {
// inactive here means brokerHeartbeatManager last recorded the broker as fenced or in controlled shutdown
long brokerSessionTimeoutMs = 1000;
MockTime time = new MockTime(0L, 20L, 1000L);
FinalizedControllerFeatures finalizedFeatures = new FinalizedControllerFeatures(
Map.of(MetadataVersion.FEATURE_NAME, MetadataVersion.LATEST_PRODUCTION.featureLevel()), 456L);
ClusterControlManager clusterControl = new ClusterControlManager.Builder().
setClusterId("pjvUwj3ZTEeSVQmUiH3IJw").
setFeatureControlManager(createFeatureControlManager()).
setBrokerShutdownHandler((brokerId, isCleanShutdown, records) -> { }).
setSessionTimeoutNs(TimeUnit.MILLISECONDS.toNanos(brokerSessionTimeoutMs)).
setTime(time).
build();
// first broker is fenced
clusterControl.replay(new RegisterBrokerRecord().
setBrokerEpoch(100).
setBrokerId(0).
setLogDirs(List.of(Uuid.fromString("Mj3CW3OSRi29cFeNJlXuAQ"))).
setFenced(true).
setInControlledShutdown(false), 10002);
// second broker is in controlled shutdown
clusterControl.replay(new RegisterBrokerRecord().
setBrokerEpoch(200).
setBrokerId(1).
setLogDirs(List.of(Uuid.fromString("TyNK6XSSQJaJc2q9uflNHg"))).
setFenced(false).
setInControlledShutdown(true), 20002);
clusterControl.activate();
clusterControl.heartbeatManager().maybeUpdateControlledShutdownOffset(1, 20002);
assertEquals(OptionalLong.empty(), clusterControl.heartbeatManager().tracker().
contactTime(new BrokerIdAndEpoch(0, 100)));
assertEquals(OptionalLong.of(1000L), clusterControl.heartbeatManager().tracker().
contactTime(new BrokerIdAndEpoch(1, 200)));
time.sleep(brokerSessionTimeoutMs / 2);
clusterControl.registerBroker(new BrokerRegistrationRequestData().
setClusterId("pjvUwj3ZTEeSVQmUiH3IJw").
setBrokerId(0).
setLogDirs(List.of(Uuid.fromString("yJGxmjfbQZSVFAlNM3uXZg"))).
setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
Set.of(new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel()).
setMaxSupportedVersion(MetadataVersion.LATEST_PRODUCTION.featureLevel())).iterator())).
setIncarnationId(Uuid.fromString("0H4fUu1xQEKXFYwB1aBjhg")),
101L,
finalizedFeatures,
false);
assertThrows(DuplicateBrokerRegistrationException.class, () -> {
clusterControl.registerBroker(new BrokerRegistrationRequestData().
setClusterId("pjvUwj3ZTEeSVQmUiH3IJw").
setBrokerId(1).
setLogDirs(List.of(Uuid.fromString("b66ybsWIQoygs01vdjH07A"))).
setFeatures(new BrokerRegistrationRequestData.FeatureCollection(
Set.of(new BrokerRegistrationRequestData.Feature().
setName(MetadataVersion.FEATURE_NAME).
setMinSupportedVersion(MetadataVersion.MINIMUM_VERSION.featureLevel()).
setMaxSupportedVersion(MetadataVersion.LATEST_PRODUCTION.featureLevel())).iterator())).
setIncarnationId(Uuid.fromString("vZKYST0pSA2HO5x_6hoO2Q")),
201L,
finalizedFeatures,
false);
});
}
private FeatureControlManager createFeatureControlManager() {
FeatureControlManager featureControlManager = new FeatureControlManager.Builder().build();
featureControlManager.replay(new FeatureLevelRecord().
setName(MetadataVersion.FEATURE_NAME).
setFeatureLevel(MetadataVersion.LATEST_PRODUCTION.featureLevel()));
return featureControlManager;
}
}
| ClusterControlManagerTest |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/comparison/legacy/RecursiveComparisonAssert_bddSoftAssertions_Test.java | {
"start": 983,
"end": 2764
} | class ____ extends WithLegacyIntrospectionStrategyBaseTest {
private BDDSoftAssertions softly;
@BeforeEach
public void beforeEachTest() {
super.beforeEachTest();
Assertions.setRemoveAssertJRelatedElementsFromStackTrace(false);
softly = new BDDSoftAssertions();
}
@Test
void should_pass_with_bdd_soft_assertions() {
// GIVEN
Person actual = new Person("John");
actual.home.address.number = 1;
Person expected = new Person("John");
expected.home.address.number = 1;
// WHEN
softly.then(actual).usingRecursiveComparison(recursiveComparisonConfiguration).isEqualTo(expected);
// THEN
softly.assertAll();
}
@Test
void should_report_all_errors_with_bdd_soft_assertions() {
// GIVEN
Person john = new Person("John");
john.home.address.number = 1;
Person jack = new Person("Jack");
jack.home.address.number = 2;
// WHEN
softly.then(john).usingRecursiveComparison(recursiveComparisonConfiguration).isEqualTo(jack);
softly.then(jack).usingRecursiveComparison(recursiveComparisonConfiguration).isEqualTo(john);
// THEN
List<Throwable> errorsCollected = softly.errorsCollected();
then(errorsCollected).hasSize(2);
then(errorsCollected.get(0)).hasMessageContaining("field/property 'home.address.number' differ:")
.hasMessageContaining("- actual value : 1")
.hasMessageContaining("- expected value: 2");
then(errorsCollected.get(1)).hasMessageContaining("field/property 'home.address.number' differ:")
.hasMessageContaining("- actual value : 2")
.hasMessageContaining("- expected value: 1");
}
}
| RecursiveComparisonAssert_bddSoftAssertions_Test |
java | apache__avro | lang/java/ipc/src/test/java/org/apache/avro/io/Perf.java | {
"start": 20655,
"end": 22090
} | class ____ extends BasicTest {
private long[] sourceData = null;
public LongTest() throws IOException {
super("Long", "{ \"type\": \"long\"} ");
}
@Override
void genSourceData() {
Random r = newRandom();
sourceData = new long[count];
for (int i = 0; i < sourceData.length; i += 4) {
sourceData[i] = r.nextLong() % 0x7FL; // half fit in 1, half in 2
sourceData[i + 1] = r.nextLong() % 0x1FFFFFL; // half fit in <=3, half in 4
sourceData[i + 2] = r.nextLong() % 0x3FFFFFFFFL; // half in <=5, half in 6
sourceData[i + 3] = r.nextLong() % 0x1FFFFFFFFFFFFL; // half in <=8, half in 9
}
// last 16, make full size
for (int i = sourceData.length - 16; i < sourceData.length; i++) {
sourceData[i] = r.nextLong();
}
}
@Override
void readInternal(Decoder d) throws IOException {
for (int i = 0; i < count / 4; i++) {
d.readLong();
d.readLong();
d.readLong();
d.readLong();
}
}
@Override
void writeInternal(Encoder e) throws IOException {
for (int i = 0; i < sourceData.length; i += 4) {
e.writeLong(sourceData[i]);
e.writeLong(sourceData[i + 1]);
e.writeLong(sourceData[i + 2]);
e.writeLong(sourceData[i + 3]);
}
}
@Override
void reset() {
sourceData = null;
data = null;
}
}
static | LongTest |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/utils/CompatibleTypeUtils.java | {
"start": 1247,
"end": 9692
} | class ____ {
private static final String DATE_FORMAT = "yyyy-MM-dd HH:mm:ss";
/**
* the text to parse such as "2007-12-03T10:15:30"
*/
private static final int ISO_LOCAL_DATE_TIME_MIN_LEN = 19;
private CompatibleTypeUtils() {}
/**
* Compatible type convert. Null value is allowed to pass in. If no conversion is needed, then the original value
* will be returned.
* <p>
* Supported compatible type conversions include (primary types and corresponding wrappers are not listed):
* <ul>
* <li> String -> char, enum, Date
* <li> byte, short, int, long -> byte, short, int, long
* <li> float, double -> float, double
* </ul>
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public static Object compatibleTypeConvert(Object value, Class<?> type) {
if (value == null || type == null || type.isAssignableFrom(value.getClass())) {
return value;
}
if (value instanceof String) {
String string = (String) value;
if (char.class.equals(type) || Character.class.equals(type)) {
if (string.length() != 1) {
throw new IllegalArgumentException(String.format(
"CAN NOT convert String(%s) to char!"
+ " when convert String to char, the String MUST only 1 char.",
string));
}
return string.charAt(0);
}
if (type.isEnum()) {
return Enum.valueOf((Class<Enum>) type, string);
}
if (type == BigInteger.class) {
return new BigInteger(string);
}
if (type == BigDecimal.class) {
return new BigDecimal(string);
}
if (type == Short.class || type == short.class) {
return new Short(string);
}
if (type == Integer.class || type == int.class) {
return new Integer(string);
}
if (type == Long.class || type == long.class) {
return new Long(string);
}
if (type == Double.class || type == double.class) {
return new Double(string);
}
if (type == Float.class || type == float.class) {
return new Float(string);
}
if (type == Byte.class || type == byte.class) {
return new Byte(string);
}
if (type == Boolean.class || type == boolean.class) {
return Boolean.valueOf(string);
}
if (type == Date.class
|| type == java.sql.Date.class
|| type == java.sql.Timestamp.class
|| type == java.sql.Time.class) {
try {
Date date = new SimpleDateFormat(DATE_FORMAT).parse(string);
if (type == java.sql.Date.class) {
return new java.sql.Date(date.getTime());
}
if (type == java.sql.Timestamp.class) {
return new java.sql.Timestamp(date.getTime());
}
if (type == java.sql.Time.class) {
return new java.sql.Time(date.getTime());
}
return date;
} catch (ParseException e) {
throw new IllegalStateException(
"Failed to parse date " + value + " by format " + DATE_FORMAT + ", cause: "
+ e.getMessage(),
e);
}
}
if (type == java.time.LocalDateTime.class) {
if (StringUtils.isEmpty(string)) {
return null;
}
return LocalDateTime.parse(string);
}
if (type == java.time.LocalDate.class) {
if (StringUtils.isEmpty(string)) {
return null;
}
return LocalDate.parse(string);
}
if (type == java.time.LocalTime.class) {
if (StringUtils.isEmpty(string)) {
return null;
}
if (string.length() >= ISO_LOCAL_DATE_TIME_MIN_LEN) {
return LocalDateTime.parse(string).toLocalTime();
} else {
return LocalTime.parse(string);
}
}
if (type == Class.class) {
try {
return ReflectUtils.name2class(string);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e.getMessage(), e);
}
}
if (char[].class.equals(type)) {
// Process string to char array for generic invoke
// See
// - https://github.com/apache/dubbo/issues/2003
int len = string.length();
char[] chars = new char[len];
string.getChars(0, len, chars, 0);
return chars;
}
}
if (value instanceof Number) {
Number number = (Number) value;
if (type == byte.class || type == Byte.class) {
return number.byteValue();
}
if (type == short.class || type == Short.class) {
return number.shortValue();
}
if (type == int.class || type == Integer.class) {
return number.intValue();
}
if (type == long.class || type == Long.class) {
return number.longValue();
}
if (type == float.class || type == Float.class) {
return number.floatValue();
}
if (type == double.class || type == Double.class) {
return number.doubleValue();
}
if (type == BigInteger.class) {
return BigInteger.valueOf(number.longValue());
}
if (type == BigDecimal.class) {
return new BigDecimal(number.toString());
}
if (type == Date.class) {
return new Date(number.longValue());
}
if (type == boolean.class || type == Boolean.class) {
return 0 != number.intValue();
}
}
if (value instanceof Collection) {
Collection collection = (Collection) value;
if (type.isArray()) {
int length = collection.size();
Object array = Array.newInstance(type.getComponentType(), length);
int i = 0;
for (Object item : collection) {
Array.set(array, i++, item);
}
return array;
}
if (!type.isInterface()) {
try {
Collection result =
(Collection) type.getDeclaredConstructor().newInstance();
result.addAll(collection);
return result;
} catch (Throwable ignored) {
}
}
if (type == List.class) {
return new ArrayList<Object>(collection);
}
if (type == Set.class) {
return new HashSet<Object>(collection);
}
}
if (value.getClass().isArray() && Collection.class.isAssignableFrom(type)) {
int length = Array.getLength(value);
Collection collection;
if (!type.isInterface()) {
try {
collection = (Collection) type.getDeclaredConstructor().newInstance();
} catch (Exception e) {
collection = new ArrayList<Object>(length);
}
} else if (type == Set.class) {
collection = new HashSet<Object>(Math.max((int) (length / .75f) + 1, 16));
} else {
collection = new ArrayList<Object>(length);
}
for (int i = 0; i < length; i++) {
collection.add(Array.get(value, i));
}
return collection;
}
return value;
}
}
| CompatibleTypeUtils |
java | grpc__grpc-java | xds/src/test/java/io/grpc/xds/CdsLoadBalancer2Test.java | {
"start": 4702,
"end": 33693
} | class ____ {
@Rule public final MockitoRule mocks = MockitoJUnit.rule();
@Rule
public final GrpcCleanupRule cleanupRule = new GrpcCleanupRule();
private static final String SERVER_NAME = "example.com";
private static final String CLUSTER = "cluster-foo.googleapis.com";
private static final String EDS_SERVICE_NAME = "backend-service-1.googleapis.com";
private static final String NODE_ID = "node-id";
private final io.grpc.xds.EnvoyServerProtoData.UpstreamTlsContext upstreamTlsContext =
CommonTlsContextTestsUtil.buildUpstreamTlsContext("cert-instance-name", true);
private static final Cluster EDS_CLUSTER = Cluster.newBuilder()
.setName(CLUSTER)
.setType(Cluster.DiscoveryType.EDS)
.setEdsClusterConfig(Cluster.EdsClusterConfig.newBuilder()
.setServiceName(EDS_SERVICE_NAME)
.setEdsConfig(ConfigSource.newBuilder()
.setAds(AggregatedConfigSource.newBuilder())))
.build();
private final FakeClock fakeClock = new FakeClock();
private final LoadBalancerRegistry lbRegistry = new LoadBalancerRegistry();
private final List<FakeLoadBalancer> childBalancers = new ArrayList<>();
private final XdsTestControlPlaneService controlPlaneService = new XdsTestControlPlaneService();
private final XdsClient xdsClient = XdsTestUtils.createXdsClient(
Arrays.asList("control-plane.example.com"),
serverInfo -> new GrpcXdsTransportFactory.GrpcXdsTransport(
InProcessChannelBuilder
.forName(serverInfo.target())
.directExecutor()
.build()),
fakeClock);
private final ServerInfo lrsServerInfo = xdsClient.getBootstrapInfo().servers().get(0);
private XdsDependencyManager xdsDepManager;
@Mock
private Helper helper;
@Captor
private ArgumentCaptor<SubchannelPicker> pickerCaptor;
private CdsLoadBalancer2 loadBalancer;
private XdsConfig lastXdsConfig;
@Before
public void setUp() throws Exception {
lbRegistry.register(new FakeLoadBalancerProvider(CLUSTER_RESOLVER_POLICY_NAME));
lbRegistry.register(new FakeLoadBalancerProvider("round_robin"));
lbRegistry.register(
new FakeLoadBalancerProvider("ring_hash_experimental", new RingHashLoadBalancerProvider()));
lbRegistry.register(new FakeLoadBalancerProvider("least_request_experimental",
new LeastRequestLoadBalancerProvider()));
lbRegistry.register(new FakeLoadBalancerProvider("wrr_locality_experimental",
new WrrLocalityLoadBalancerProvider()));
CdsLoadBalancerProvider cdsLoadBalancerProvider = new CdsLoadBalancerProvider(lbRegistry);
lbRegistry.register(cdsLoadBalancerProvider);
loadBalancer = (CdsLoadBalancer2) cdsLoadBalancerProvider.newLoadBalancer(helper);
cleanupRule.register(InProcessServerBuilder
.forName("control-plane.example.com")
.addService(controlPlaneService)
.directExecutor()
.build()
.start());
SynchronizationContext syncContext = new SynchronizationContext((t, e) -> {
throw new AssertionError(e);
});
when(helper.getSynchronizationContext()).thenReturn(syncContext);
when(helper.getScheduledExecutorService()).thenReturn(fakeClock.getScheduledExecutorService());
NameResolver.Args nameResolverArgs = NameResolver.Args.newBuilder()
.setDefaultPort(8080)
.setProxyDetector((address) -> null)
.setSynchronizationContext(syncContext)
.setServiceConfigParser(mock(NameResolver.ServiceConfigParser.class))
.setChannelLogger(mock(ChannelLogger.class))
.setScheduledExecutorService(fakeClock.getScheduledExecutorService())
.setNameResolverRegistry(new NameResolverRegistry())
.build();
xdsDepManager = new XdsDependencyManager(
xdsClient,
syncContext,
SERVER_NAME,
SERVER_NAME,
nameResolverArgs);
controlPlaneService.setXdsConfig(ADS_TYPE_URL_LDS, ImmutableMap.of(
SERVER_NAME, ControlPlaneRule.buildClientListener(SERVER_NAME, "my-route")));
controlPlaneService.setXdsConfig(ADS_TYPE_URL_RDS, ImmutableMap.of(
"my-route", XdsTestUtils.buildRouteConfiguration(SERVER_NAME, "my-route", CLUSTER)));
controlPlaneService.setXdsConfig(ADS_TYPE_URL_EDS, ImmutableMap.of(
EDS_SERVICE_NAME, ControlPlaneRule.buildClusterLoadAssignment(
"127.0.0.1", "", 1234, EDS_SERVICE_NAME)));
}
@After
public void tearDown() {
if (loadBalancer != null) {
shutdownLoadBalancer();
}
assertThat(childBalancers).isEmpty();
if (xdsDepManager != null) {
xdsDepManager.shutdown();
}
xdsClient.shutdown();
}
private void shutdownLoadBalancer() {
LoadBalancer lb = this.loadBalancer;
this.loadBalancer = null; // Must avoid calling acceptResolvedAddresses after shutdown
lb.shutdown();
}
@Test
public void discoverTopLevelEdsCluster() {
Cluster cluster = Cluster.newBuilder()
.setName(CLUSTER)
.setType(Cluster.DiscoveryType.EDS)
.setEdsClusterConfig(Cluster.EdsClusterConfig.newBuilder()
.setServiceName(EDS_SERVICE_NAME)
.setEdsConfig(ConfigSource.newBuilder()
.setAds(AggregatedConfigSource.newBuilder())))
.setLbPolicy(Cluster.LbPolicy.ROUND_ROBIN)
.setLrsServer(ConfigSource.newBuilder()
.setSelf(SelfConfigSource.getDefaultInstance()))
.setCircuitBreakers(CircuitBreakers.newBuilder()
.addThresholds(CircuitBreakers.Thresholds.newBuilder()
.setPriority(RoutingPriority.DEFAULT)
.setMaxRequests(UInt32Value.newBuilder().setValue(100))))
.setTransportSocket(TransportSocket.newBuilder()
.setName("envoy.transport_sockets.tls")
.setTypedConfig(Any.pack(UpstreamTlsContext.newBuilder()
.setCommonTlsContext(upstreamTlsContext.getCommonTlsContext())
.build())))
.setOutlierDetection(OutlierDetection.getDefaultInstance())
.build();
controlPlaneService.setXdsConfig(ADS_TYPE_URL_CDS, ImmutableMap.of(CLUSTER, cluster));
startXdsDepManager();
verify(helper, never()).updateBalancingState(eq(ConnectivityState.TRANSIENT_FAILURE), any());
assertThat(childBalancers).hasSize(1);
FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers);
assertThat(childBalancer.name).isEqualTo(CLUSTER_RESOLVER_POLICY_NAME);
ClusterResolverConfig childLbConfig = (ClusterResolverConfig) childBalancer.config;
assertThat(childLbConfig.discoveryMechanism).isEqualTo(
DiscoveryMechanism.forEds(
CLUSTER, EDS_SERVICE_NAME, lrsServerInfo, 100L, upstreamTlsContext,
Collections.emptyMap(), io.grpc.xds.EnvoyServerProtoData.OutlierDetection.create(
null, null, null, null, SuccessRateEjection.create(null, null, null, null),
FailurePercentageEjection.create(null, null, null, null)), null));
assertThat(
GracefulSwitchLoadBalancerAccessor.getChildProvider(childLbConfig.lbConfig).getPolicyName())
.isEqualTo("wrr_locality_experimental");
}
@Test
public void discoverTopLevelLogicalDnsCluster() {
Cluster cluster = Cluster.newBuilder()
.setName(CLUSTER)
.setType(Cluster.DiscoveryType.LOGICAL_DNS)
.setLoadAssignment(ClusterLoadAssignment.newBuilder()
.addEndpoints(LocalityLbEndpoints.newBuilder()
.addLbEndpoints(LbEndpoint.newBuilder()
.setEndpoint(Endpoint.newBuilder()
.setAddress(Address.newBuilder()
.setSocketAddress(SocketAddress.newBuilder()
.setAddress("dns.example.com")
.setPortValue(1111)))))))
.setEdsClusterConfig(Cluster.EdsClusterConfig.newBuilder()
.setServiceName(EDS_SERVICE_NAME)
.setEdsConfig(ConfigSource.newBuilder()
.setAds(AggregatedConfigSource.newBuilder())))
.setLbPolicy(Cluster.LbPolicy.LEAST_REQUEST)
.setLrsServer(ConfigSource.newBuilder()
.setSelf(SelfConfigSource.getDefaultInstance()))
.setCircuitBreakers(CircuitBreakers.newBuilder()
.addThresholds(CircuitBreakers.Thresholds.newBuilder()
.setPriority(RoutingPriority.DEFAULT)
.setMaxRequests(UInt32Value.newBuilder().setValue(100))))
.setTransportSocket(TransportSocket.newBuilder()
.setName("envoy.transport_sockets.tls")
.setTypedConfig(Any.pack(UpstreamTlsContext.newBuilder()
.setCommonTlsContext(upstreamTlsContext.getCommonTlsContext())
.build())))
.build();
controlPlaneService.setXdsConfig(ADS_TYPE_URL_CDS, ImmutableMap.of(CLUSTER, cluster));
startXdsDepManager();
verify(helper, never()).updateBalancingState(eq(ConnectivityState.TRANSIENT_FAILURE), any());
assertThat(childBalancers).hasSize(1);
FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers);
assertThat(childBalancer.name).isEqualTo(CLUSTER_RESOLVER_POLICY_NAME);
ClusterResolverConfig childLbConfig = (ClusterResolverConfig) childBalancer.config;
assertThat(childLbConfig.discoveryMechanism).isEqualTo(
DiscoveryMechanism.forLogicalDns(
CLUSTER, "dns.example.com:1111", lrsServerInfo, 100L, upstreamTlsContext,
Collections.emptyMap(), null));
assertThat(
GracefulSwitchLoadBalancerAccessor.getChildProvider(childLbConfig.lbConfig).getPolicyName())
.isEqualTo("wrr_locality_experimental");
}
@Test
public void nonAggregateCluster_resourceNotExist_returnErrorPicker() {
startXdsDepManager();
verify(helper).updateBalancingState(
eq(ConnectivityState.TRANSIENT_FAILURE), pickerCaptor.capture());
Status unavailable = Status.UNAVAILABLE.withDescription(
"CDS resource " + CLUSTER + " does not exist nodeID: " + NODE_ID);
assertPickerStatus(pickerCaptor.getValue(), unavailable);
assertThat(childBalancers).isEmpty();
}
@Test
public void nonAggregateCluster_resourceUpdate() {
Cluster cluster = EDS_CLUSTER.toBuilder()
.setCircuitBreakers(CircuitBreakers.newBuilder()
.addThresholds(CircuitBreakers.Thresholds.newBuilder()
.setPriority(RoutingPriority.DEFAULT)
.setMaxRequests(UInt32Value.newBuilder().setValue(100))))
.build();
controlPlaneService.setXdsConfig(ADS_TYPE_URL_CDS, ImmutableMap.of(CLUSTER, cluster));
startXdsDepManager();
verify(helper, never()).updateBalancingState(eq(ConnectivityState.TRANSIENT_FAILURE), any());
assertThat(childBalancers).hasSize(1);
FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers);
ClusterResolverConfig childLbConfig = (ClusterResolverConfig) childBalancer.config;
assertThat(childLbConfig.discoveryMechanism).isEqualTo(
DiscoveryMechanism.forEds(
CLUSTER, EDS_SERVICE_NAME, null, 100L, null, Collections.emptyMap(), null, null));
cluster = EDS_CLUSTER.toBuilder()
.setCircuitBreakers(CircuitBreakers.newBuilder()
.addThresholds(CircuitBreakers.Thresholds.newBuilder()
.setPriority(RoutingPriority.DEFAULT)
.setMaxRequests(UInt32Value.newBuilder().setValue(200))))
.build();
controlPlaneService.setXdsConfig(ADS_TYPE_URL_CDS, ImmutableMap.of(CLUSTER, cluster));
verify(helper, never()).updateBalancingState(eq(ConnectivityState.TRANSIENT_FAILURE), any());
assertThat(childBalancers).hasSize(1);
childBalancer = Iterables.getOnlyElement(childBalancers);
childLbConfig = (ClusterResolverConfig) childBalancer.config;
assertThat(childLbConfig.discoveryMechanism).isEqualTo(
DiscoveryMechanism.forEds(
CLUSTER, EDS_SERVICE_NAME, null, 200L, null, Collections.emptyMap(), null, null));
}
@Test
public void nonAggregateCluster_resourceRevoked() {
controlPlaneService.setXdsConfig(ADS_TYPE_URL_CDS, ImmutableMap.of(CLUSTER, EDS_CLUSTER));
startXdsDepManager();
verify(helper, never()).updateBalancingState(eq(ConnectivityState.TRANSIENT_FAILURE), any());
assertThat(childBalancers).hasSize(1);
FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers);
ClusterResolverConfig childLbConfig = (ClusterResolverConfig) childBalancer.config;
assertThat(childLbConfig.discoveryMechanism).isEqualTo(
DiscoveryMechanism.forEds(
CLUSTER, EDS_SERVICE_NAME, null, null, null, Collections.emptyMap(), null, null));
controlPlaneService.setXdsConfig(ADS_TYPE_URL_CDS, ImmutableMap.of());
assertThat(childBalancer.shutdown).isTrue();
Status unavailable = Status.UNAVAILABLE.withDescription(
"CDS resource " + CLUSTER + " does not exist nodeID: " + NODE_ID);
verify(helper).updateBalancingState(
eq(ConnectivityState.TRANSIENT_FAILURE), pickerCaptor.capture());
assertPickerStatus(pickerCaptor.getValue(), unavailable);
assertThat(childBalancer.shutdown).isTrue();
assertThat(childBalancers).isEmpty();
}
@Test
public void dynamicCluster() {
String clusterName = "cluster2";
Cluster cluster = EDS_CLUSTER.toBuilder()
.setName(clusterName)
.build();
controlPlaneService.setXdsConfig(ADS_TYPE_URL_CDS, ImmutableMap.of(
clusterName, cluster,
CLUSTER, Cluster.newBuilder().setName(CLUSTER).build()));
startXdsDepManager(new CdsConfig(clusterName, /*dynamic=*/ true));
verify(helper, never()).updateBalancingState(eq(ConnectivityState.TRANSIENT_FAILURE), any());
assertThat(childBalancers).hasSize(1);
FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers);
ClusterResolverConfig childLbConfig = (ClusterResolverConfig) childBalancer.config;
assertThat(childLbConfig.discoveryMechanism).isEqualTo(
DiscoveryMechanism.forEds(
clusterName, EDS_SERVICE_NAME, null, null, null, Collections.emptyMap(), null, null));
assertThat(this.lastXdsConfig.getClusters()).containsKey(clusterName);
shutdownLoadBalancer();
assertThat(this.lastXdsConfig.getClusters()).doesNotContainKey(clusterName);
}
@Test
public void discoverAggregateCluster_createsPriorityLbPolicy() {
lbRegistry.register(new FakeLoadBalancerProvider(PRIORITY_POLICY_NAME));
CdsLoadBalancerProvider cdsLoadBalancerProvider = new CdsLoadBalancerProvider(lbRegistry);
lbRegistry.register(cdsLoadBalancerProvider);
loadBalancer = (CdsLoadBalancer2) cdsLoadBalancerProvider.newLoadBalancer(helper);
String cluster1 = "cluster-01.googleapis.com";
String cluster2 = "cluster-02.googleapis.com";
String cluster3 = "cluster-03.googleapis.com";
String cluster4 = "cluster-04.googleapis.com";
controlPlaneService.setXdsConfig(ADS_TYPE_URL_CDS, ImmutableMap.of(
// CLUSTER (aggr.) -> [cluster1 (aggr.), cluster2 (logical DNS), cluster3 (EDS)]
CLUSTER, Cluster.newBuilder()
.setName(CLUSTER)
.setClusterType(Cluster.CustomClusterType.newBuilder()
.setName("envoy.clusters.aggregate")
.setTypedConfig(Any.pack(ClusterConfig.newBuilder()
.addClusters(cluster1)
.addClusters(cluster2)
.addClusters(cluster3)
.build())))
.setLbPolicy(Cluster.LbPolicy.RING_HASH)
.build(),
// cluster1 (aggr.) -> [cluster3 (EDS), cluster4 (EDS)]
cluster1, Cluster.newBuilder()
.setName(cluster1)
.setClusterType(Cluster.CustomClusterType.newBuilder()
.setName("envoy.clusters.aggregate")
.setTypedConfig(Any.pack(ClusterConfig.newBuilder()
.addClusters(cluster3)
.addClusters(cluster4)
.build())))
.build(),
cluster2, Cluster.newBuilder()
.setName(cluster2)
.setType(Cluster.DiscoveryType.LOGICAL_DNS)
.setLoadAssignment(ClusterLoadAssignment.newBuilder()
.addEndpoints(LocalityLbEndpoints.newBuilder()
.addLbEndpoints(LbEndpoint.newBuilder()
.setEndpoint(Endpoint.newBuilder()
.setAddress(Address.newBuilder()
.setSocketAddress(SocketAddress.newBuilder()
.setAddress("dns.example.com")
.setPortValue(1111)))))))
.build(),
cluster3, EDS_CLUSTER.toBuilder()
.setName(cluster3)
.setCircuitBreakers(CircuitBreakers.newBuilder()
.addThresholds(CircuitBreakers.Thresholds.newBuilder()
.setPriority(RoutingPriority.DEFAULT)
.setMaxRequests(UInt32Value.newBuilder().setValue(100))))
.build(),
cluster4, EDS_CLUSTER.toBuilder().setName(cluster4).build()));
startXdsDepManager();
verify(helper, never()).updateBalancingState(eq(ConnectivityState.TRANSIENT_FAILURE), any());
assertThat(childBalancers).hasSize(1);
FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers);
assertThat(childBalancer.name).isEqualTo(PRIORITY_POLICY_NAME);
PriorityLoadBalancerProvider.PriorityLbConfig childLbConfig =
(PriorityLoadBalancerProvider.PriorityLbConfig) childBalancer.config;
assertThat(childLbConfig.priorities).hasSize(3);
assertThat(childLbConfig.priorities.get(0)).isEqualTo(cluster3);
assertThat(childLbConfig.priorities.get(1)).isEqualTo(cluster4);
assertThat(childLbConfig.priorities.get(2)).isEqualTo(cluster2);
assertThat(childLbConfig.childConfigs).hasSize(3);
PriorityLoadBalancerProvider.PriorityLbConfig.PriorityChildConfig childConfig3 =
childLbConfig.childConfigs.get(cluster3);
assertThat(
GracefulSwitchLoadBalancerAccessor.getChildProvider(childConfig3.childConfig)
.getPolicyName())
.isEqualTo("cds_experimental");
PriorityLoadBalancerProvider.PriorityLbConfig.PriorityChildConfig childConfig4 =
childLbConfig.childConfigs.get(cluster4);
assertThat(
GracefulSwitchLoadBalancerAccessor.getChildProvider(childConfig4.childConfig)
.getPolicyName())
.isEqualTo("cds_experimental");
PriorityLoadBalancerProvider.PriorityLbConfig.PriorityChildConfig childConfig2 =
childLbConfig.childConfigs.get(cluster2);
assertThat(
GracefulSwitchLoadBalancerAccessor.getChildProvider(childConfig2.childConfig)
.getPolicyName())
.isEqualTo("cds_experimental");
}
@Test
// Both priorities will get tried using real priority LB policy.
public void discoverAggregateCluster_testChildCdsLbPolicyParsing() {
lbRegistry.register(new PriorityLoadBalancerProvider());
CdsLoadBalancerProvider cdsLoadBalancerProvider = new CdsLoadBalancerProvider(lbRegistry);
lbRegistry.register(cdsLoadBalancerProvider);
loadBalancer = (CdsLoadBalancer2) cdsLoadBalancerProvider.newLoadBalancer(helper);
String cluster1 = "cluster-01.googleapis.com";
String cluster2 = "cluster-02.googleapis.com";
controlPlaneService.setXdsConfig(ADS_TYPE_URL_CDS, ImmutableMap.of(
// CLUSTER (aggr.) -> [cluster1 (EDS), cluster2 (EDS)]
CLUSTER, Cluster.newBuilder()
.setName(CLUSTER)
.setClusterType(Cluster.CustomClusterType.newBuilder()
.setName("envoy.clusters.aggregate")
.setTypedConfig(Any.pack(ClusterConfig.newBuilder()
.addClusters(cluster1)
.addClusters(cluster2)
.build())))
.build(),
cluster1, EDS_CLUSTER.toBuilder().setName(cluster1).build(),
cluster2, EDS_CLUSTER.toBuilder().setName(cluster2).build()));
startXdsDepManager();
verify(helper, never()).updateBalancingState(eq(ConnectivityState.TRANSIENT_FAILURE), any());
assertThat(childBalancers).hasSize(2);
ClusterResolverConfig cluster1ResolverConfig =
(ClusterResolverConfig) childBalancers.get(0).config;
assertThat(cluster1ResolverConfig.discoveryMechanism.cluster)
.isEqualTo("cluster-01.googleapis.com");
assertThat(cluster1ResolverConfig.discoveryMechanism.type)
.isEqualTo(DiscoveryMechanism.Type.EDS);
assertThat(cluster1ResolverConfig.discoveryMechanism.edsServiceName)
.isEqualTo("backend-service-1.googleapis.com");
ClusterResolverConfig cluster2ResolverConfig =
(ClusterResolverConfig) childBalancers.get(1).config;
assertThat(cluster2ResolverConfig.discoveryMechanism.cluster)
.isEqualTo("cluster-02.googleapis.com");
assertThat(cluster2ResolverConfig.discoveryMechanism.type)
.isEqualTo(DiscoveryMechanism.Type.EDS);
assertThat(cluster2ResolverConfig.discoveryMechanism.edsServiceName)
.isEqualTo("backend-service-1.googleapis.com");
}
@Test
public void aggregateCluster_noChildren() {
controlPlaneService.setXdsConfig(ADS_TYPE_URL_CDS, ImmutableMap.of(
// CLUSTER (aggr.) -> []
CLUSTER, Cluster.newBuilder()
.setName(CLUSTER)
.setClusterType(Cluster.CustomClusterType.newBuilder()
.setName("envoy.clusters.aggregate")
.setTypedConfig(Any.pack(ClusterConfig.newBuilder()
.build())))
.build()));
startXdsDepManager();
verify(helper)
.updateBalancingState(eq(ConnectivityState.TRANSIENT_FAILURE), pickerCaptor.capture());
PickResult result = pickerCaptor.getValue().pickSubchannel(mock(PickSubchannelArgs.class));
Status actualStatus = result.getStatus();
assertThat(actualStatus.getCode()).isEqualTo(Status.Code.UNAVAILABLE);
assertThat(actualStatus.getDescription())
.contains("aggregate ClusterConfig.clusters must not be empty");
assertThat(childBalancers).isEmpty();
}
@Test
public void aggregateCluster_noNonAggregateClusterExits_returnErrorPicker() {
lbRegistry.register(new PriorityLoadBalancerProvider());
CdsLoadBalancerProvider cdsLoadBalancerProvider = new CdsLoadBalancerProvider(lbRegistry);
lbRegistry.register(cdsLoadBalancerProvider);
loadBalancer = (CdsLoadBalancer2) cdsLoadBalancerProvider.newLoadBalancer(helper);
String cluster1 = "cluster-01.googleapis.com";
controlPlaneService.setXdsConfig(ADS_TYPE_URL_CDS, ImmutableMap.of(
// CLUSTER (aggr.) -> [cluster1 (missing)]
CLUSTER, Cluster.newBuilder()
.setName(CLUSTER)
.setClusterType(Cluster.CustomClusterType.newBuilder()
.setName("envoy.clusters.aggregate")
.setTypedConfig(Any.pack(ClusterConfig.newBuilder()
.addClusters(cluster1)
.build())))
.setLbPolicy(Cluster.LbPolicy.RING_HASH)
.build()));
startXdsDepManager();
verify(helper).updateBalancingState(
eq(ConnectivityState.TRANSIENT_FAILURE), pickerCaptor.capture());
Status status = Status.UNAVAILABLE.withDescription(
"CDS resource " + cluster1 + " does not exist nodeID: " + NODE_ID);
assertPickerStatus(pickerCaptor.getValue(), status);
assertThat(childBalancers).isEmpty();
}
@Test
public void handleNameResolutionErrorFromUpstream_beforeChildLbCreated_failingPicker() {
Status status = Status.UNAVAILABLE.withDescription("unreachable");
loadBalancer.handleNameResolutionError(status);
verify(helper).updateBalancingState(
eq(ConnectivityState.TRANSIENT_FAILURE), pickerCaptor.capture());
assertPickerStatus(pickerCaptor.getValue(), status);
}
@Test
public void handleNameResolutionErrorFromUpstream_afterChildLbCreated_fallThrough() {
Cluster cluster = Cluster.newBuilder()
.setName(CLUSTER)
.setType(Cluster.DiscoveryType.EDS)
.setEdsClusterConfig(Cluster.EdsClusterConfig.newBuilder()
.setServiceName(EDS_SERVICE_NAME)
.setEdsConfig(ConfigSource.newBuilder()
.setAds(AggregatedConfigSource.newBuilder())))
.build();
controlPlaneService.setXdsConfig(ADS_TYPE_URL_CDS, ImmutableMap.of(CLUSTER, cluster));
startXdsDepManager();
verify(helper, never()).updateBalancingState(eq(ConnectivityState.TRANSIENT_FAILURE), any());
FakeLoadBalancer childBalancer = Iterables.getOnlyElement(childBalancers);
assertThat(childBalancer.shutdown).isFalse();
loadBalancer.handleNameResolutionError(Status.UNAVAILABLE.withDescription("unreachable"));
assertThat(childBalancer.upstreamError.getCode()).isEqualTo(Code.UNAVAILABLE);
assertThat(childBalancer.upstreamError.getDescription()).isEqualTo("unreachable");
verify(helper).updateBalancingState(
eq(ConnectivityState.CONNECTING), any(SubchannelPicker.class));
}
@Test
public void unknownLbProvider() {
Cluster cluster = Cluster.newBuilder()
.setName(CLUSTER)
.setType(Cluster.DiscoveryType.EDS)
.setEdsClusterConfig(Cluster.EdsClusterConfig.newBuilder()
.setServiceName(EDS_SERVICE_NAME)
.setEdsConfig(ConfigSource.newBuilder()
.setAds(AggregatedConfigSource.newBuilder())))
.setLoadBalancingPolicy(LoadBalancingPolicy.newBuilder()
.addPolicies(Policy.newBuilder()
.setTypedExtensionConfig(TypedExtensionConfig.newBuilder()
.setTypedConfig(Any.pack(TypedStruct.newBuilder()
.setTypeUrl("type.googleapis.com/unknownLb")
.setValue(Struct.getDefaultInstance())
.build())))))
.build();
controlPlaneService.setXdsConfig(ADS_TYPE_URL_CDS, ImmutableMap.of(CLUSTER, cluster));
startXdsDepManager();
verify(helper).updateBalancingState(
eq(ConnectivityState.TRANSIENT_FAILURE), pickerCaptor.capture());
PickResult result = pickerCaptor.getValue().pickSubchannel(mock(PickSubchannelArgs.class));
Status actualStatus = result.getStatus();
assertThat(actualStatus.getCode()).isEqualTo(Status.Code.UNAVAILABLE);
assertThat(actualStatus.getDescription()).contains("Invalid LoadBalancingPolicy");
}
@Test
public void invalidLbConfig() {
Cluster cluster = Cluster.newBuilder()
.setName(CLUSTER)
.setType(Cluster.DiscoveryType.EDS)
.setEdsClusterConfig(Cluster.EdsClusterConfig.newBuilder()
.setServiceName(EDS_SERVICE_NAME)
.setEdsConfig(ConfigSource.newBuilder()
.setAds(AggregatedConfigSource.newBuilder())))
.setLoadBalancingPolicy(LoadBalancingPolicy.newBuilder()
.addPolicies(Policy.newBuilder()
.setTypedExtensionConfig(TypedExtensionConfig.newBuilder()
.setTypedConfig(Any.pack(TypedStruct.newBuilder()
.setTypeUrl("type.googleapis.com/ring_hash_experimental")
.setValue(Struct.newBuilder()
.putFields("minRingSize", Value.newBuilder().setNumberValue(-1).build()))
.build())))))
.build();
controlPlaneService.setXdsConfig(ADS_TYPE_URL_CDS, ImmutableMap.of(CLUSTER, cluster));
startXdsDepManager();
verify(helper).updateBalancingState(
eq(ConnectivityState.TRANSIENT_FAILURE), pickerCaptor.capture());
PickResult result = pickerCaptor.getValue().pickSubchannel(mock(PickSubchannelArgs.class));
Status actualStatus = result.getStatus();
assertThat(actualStatus.getCode()).isEqualTo(Status.Code.UNAVAILABLE);
assertThat(actualStatus.getDescription()).contains("Invalid 'minRingSize'");
}
private void startXdsDepManager() {
startXdsDepManager(new CdsConfig(CLUSTER));
}
private void startXdsDepManager(final CdsConfig cdsConfig) {
xdsDepManager.start(
xdsConfig -> {
if (!xdsConfig.hasValue()) {
throw new AssertionError("" + xdsConfig.getStatus());
}
this.lastXdsConfig = xdsConfig.getValue();
if (loadBalancer == null) {
return;
}
loadBalancer.acceptResolvedAddresses(ResolvedAddresses.newBuilder()
.setAddresses(Collections.emptyList())
.setAttributes(Attributes.newBuilder()
.set(XdsAttributes.XDS_CONFIG, xdsConfig.getValue())
.set(XdsAttributes.XDS_CLUSTER_SUBSCRIPT_REGISTRY, xdsDepManager)
.build())
.setLoadBalancingPolicyConfig(cdsConfig)
.build());
});
// trigger does not exist timer, so broken config is more obvious
fakeClock.forwardTime(10, TimeUnit.MINUTES);
}
private static void assertPickerStatus(SubchannelPicker picker, Status expectedStatus) {
PickResult result = picker.pickSubchannel(mock(PickSubchannelArgs.class));
Status actualStatus = result.getStatus();
assertThat(actualStatus.getCode()).isEqualTo(expectedStatus.getCode());
assertThat(actualStatus.getDescription()).isEqualTo(expectedStatus.getDescription());
}
private final | CdsLoadBalancer2Test |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java | {
"start": 33846,
"end": 34075
} | class ____ extends CompositeService {
public void addTestService(CompositeService service) {
addService(service);
}
public ServiceManager(String name) {
super(name);
}
}
public static | ServiceManager |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/tags/EscapeBodyTag.java | {
"start": 2333,
"end": 4125
} | class ____ extends HtmlEscapingAwareTag implements BodyTag {
private boolean javaScriptEscape = false;
private @Nullable BodyContent bodyContent;
/**
* Set JavaScript escaping for this tag, as boolean value.
* Default is "false".
*/
public void setJavaScriptEscape(boolean javaScriptEscape) throws JspException {
this.javaScriptEscape = javaScriptEscape;
}
@Override
protected int doStartTagInternal() {
// do nothing
return EVAL_BODY_BUFFERED;
}
@Override
public void doInitBody() {
// do nothing
}
@Override
public void setBodyContent(BodyContent bodyContent) {
this.bodyContent = bodyContent;
}
@Override
public int doAfterBody() throws JspException {
try {
String content = readBodyContent();
// HTML and/or JavaScript escape, if demanded
content = htmlEscape(content);
content = (this.javaScriptEscape ? JavaScriptUtils.javaScriptEscape(content) : content);
writeBodyContent(content);
}
catch (IOException ex) {
throw new JspException("Could not write escaped body", ex);
}
return (SKIP_BODY);
}
/**
* Read the unescaped body content from the page.
* @return the original content
* @throws IOException if reading failed
*/
protected String readBodyContent() throws IOException {
Assert.state(this.bodyContent != null, "No BodyContent set");
return this.bodyContent.getString();
}
/**
* Write the escaped body content to the page.
* <p>Can be overridden in subclasses, for example, for testing purposes.
* @param content the content to write
* @throws IOException if writing failed
*/
protected void writeBodyContent(String content) throws IOException {
Assert.state(this.bodyContent != null, "No BodyContent set");
this.bodyContent.getEnclosingWriter().print(content);
}
}
| EscapeBodyTag |
java | quarkusio__quarkus | extensions/hibernate-validator/deployment/src/test/java/io/quarkus/hibernate/validator/test/config/ConfigMappingInjectionInValidatorTest.java | {
"start": 1441,
"end": 1681
} | interface ____ {
@WithDefault("true")
boolean isValid();
}
@Target({ TYPE, ANNOTATION_TYPE })
@Retention(RUNTIME)
@Constraint(validatedBy = { ValidEntityValidator.class })
@Documented
public @ | ValidConfig |
java | apache__camel | core/camel-base-engine/src/main/java/org/apache/camel/impl/engine/AbstractCamelContext.java | {
"start": 181624,
"end": 183690
} | class ____ implements AutoCloseable {
final Map<String, String> originalContextMap;
final ClassLoader tccl;
LifecycleHelper() {
// Using the ApplicationClassLoader as the default for TCCL
tccl = Thread.currentThread().getContextClassLoader();
if (applicationContextClassLoader != null) {
Thread.currentThread().setContextClassLoader(applicationContextClassLoader);
}
if (isUseMDCLogging()) {
originalContextMap = MDC.getCopyOfContextMap();
MDC.put(MDC_CAMEL_CONTEXT_ID, camelContextExtension.getName());
} else {
originalContextMap = null;
}
}
@Override
public void close() {
if (isUseMDCLogging()) {
if (originalContextMap != null) {
MDC.setContextMap(originalContextMap);
} else {
MDC.clear();
}
}
Thread.currentThread().setContextClassLoader(tccl);
}
}
@Override
public Registry getRegistry() {
return camelContextExtension.getRegistry();
}
Set<EndpointStrategy> getEndpointStrategies() {
return endpointStrategies;
}
Set<AutoMockInterceptStrategy> getAutoMockInterceptStrategies() {
return autoMockInterceptStrategies;
}
List<RouteStartupOrder> getRouteStartupOrder() {
return routeStartupOrder;
}
InternalServiceManager getInternalServiceManager() {
return internalServiceManager;
}
/*
* This method exists for testing purposes only: we need to make sure we don't leak bootstraps.
* This allows us to check for leaks without compromising the visibility/access on the DefaultCamelContextExtension.
* Check the test AddRoutesAtRuntimeTest for details.
*/
@SuppressWarnings("unused")
private List<BootstrapCloseable> getBootstraps() {
return camelContextExtension.getBootstraps();
}
}
| LifecycleHelper |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java | {
"start": 51098,
"end": 56208
} | class ____ extends DocumentParserContext {
private final ContentPath path = new ContentPath();
private final XContentParser parser;
private final LuceneDocument document;
private final List<LuceneDocument> documents = new ArrayList<>();
private final long maxAllowedNumNestedDocs;
private long numNestedDocs;
private boolean docsReversed = false;
private final BytesRef tsid;
RootDocumentParserContext(
MappingLookup mappingLookup,
MappingParserContext mappingParserContext,
SourceToParse source,
XContentParser parser
) throws IOException {
super(
mappingLookup,
mappingParserContext,
source,
mappingLookup.getMapping().getRoot(),
ObjectMapper.Dynamic.getRootDynamic(mappingLookup)
);
IndexSettings indexSettings = mappingParserContext.getIndexSettings();
BytesRef tsid = source.tsid();
if (tsid == null
&& indexSettings.getMode() == IndexMode.TIME_SERIES
&& indexSettings.getIndexRouting() instanceof IndexRouting.ExtractFromSource.ForIndexDimensions forIndexDimensions) {
// the tsid is normally set on the coordinating node during shard routing and passed to the data node via the index request
// but when applying a translog operation, shard routing is not happening, and we have to create the tsid from source
tsid = forIndexDimensions.buildTsid(source.getXContentType(), source.source());
}
this.tsid = tsid;
assert this.tsid == null || indexSettings.getMode() == IndexMode.TIME_SERIES
: "tsid should only be set for time series indices";
if (mappingLookup.getMapping().getRoot().subobjects() == ObjectMapper.Subobjects.ENABLED) {
this.parser = DotExpandingXContentParser.expandDots(parser, this.path);
} else {
this.parser = parser;
}
this.document = new LuceneDocument();
this.documents.add(document);
this.maxAllowedNumNestedDocs = indexSettings().getMappingNestedDocsLimit();
this.numNestedDocs = 0L;
}
@Override
public Mapper getMapper(String name) {
Mapper mapper = getMetadataMapper(name);
if (mapper != null) {
return mapper;
}
return super.getMapper(name);
}
@Override
public ContentPath path() {
return this.path;
}
@Override
public XContentParser parser() {
return this.parser;
}
@Override
public LuceneDocument rootDoc() {
return documents.get(0);
}
@Override
public LuceneDocument doc() {
return this.document;
}
@Override
protected void addDoc(LuceneDocument doc) {
numNestedDocs++;
if (numNestedDocs > maxAllowedNumNestedDocs) {
throw new DocumentParsingException(
parser.getTokenLocation(),
"The number of nested documents has exceeded the allowed limit of ["
+ maxAllowedNumNestedDocs
+ "]."
+ " This limit can be set by changing the ["
+ MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.getKey()
+ "] index level setting."
);
}
this.documents.add(doc);
}
@Override
public BytesRef getTsid() {
return this.tsid;
}
@Override
public Iterable<LuceneDocument> nonRootDocuments() {
if (docsReversed) {
throw new IllegalStateException("documents are already reversed");
}
return documents.subList(1, documents.size());
}
/**
* Returns a copy of the provided {@link List} where parent documents appear
* after their children.
*/
private List<LuceneDocument> reorderParentAndGetDocs() {
if (documents.size() > 1 && docsReversed == false) {
docsReversed = true;
// We preserve the order of the children while ensuring that parents appear after them.
List<LuceneDocument> newDocs = new ArrayList<>(documents.size());
LinkedList<LuceneDocument> parents = new LinkedList<>();
for (LuceneDocument doc : documents) {
while (parents.peek() != doc.getParent()) {
newDocs.add(parents.poll());
}
parents.add(0, doc);
}
newDocs.addAll(parents);
documents.clear();
documents.addAll(newDocs);
}
return documents;
}
}
}
| RootDocumentParserContext |
java | spring-projects__spring-framework | spring-webmvc/src/main/java/org/springframework/web/servlet/config/FreeMarkerConfigurerBeanDefinitionParser.java | {
"start": 1328,
"end": 2384
} | class ____ extends AbstractSingleBeanDefinitionParser {
/**
* The bean name used for the {@code FreeMarkerConfigurer}.
*/
public static final String BEAN_NAME = "mvcFreeMarkerConfigurer";
@Override
protected String getBeanClassName(Element element) {
return "org.springframework.web.servlet.view.freemarker.FreeMarkerConfigurer";
}
@Override
protected String resolveId(Element element, AbstractBeanDefinition definition, ParserContext parserContext) {
return BEAN_NAME;
}
@Override
protected void doParse(Element element, ParserContext parserContext, BeanDefinitionBuilder builder) {
List<Element> childElements = DomUtils.getChildElementsByTagName(element, "template-loader-path");
if (!childElements.isEmpty()) {
List<String> locations = new ArrayList<>(childElements.size());
for (Element childElement : childElements) {
locations.add(childElement.getAttribute("location"));
}
builder.addPropertyValue("templateLoaderPaths", StringUtils.toStringArray(locations));
}
}
}
| FreeMarkerConfigurerBeanDefinitionParser |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsonschema/NewSchemaTest.java | {
"start": 2952,
"end": 13203
} | class ____
extends JsonFormatVisitorWrapper.Base
{
// Implement handlers just to get more exercise...
@Override
public JsonObjectFormatVisitor expectObjectFormat(JavaType type) {
return new JsonObjectFormatVisitor.Base(getContext()) {
@Override
public void property(BeanProperty prop) {
_visit(prop);
}
@Override
public void property(String name, JsonFormatVisitable handler,
JavaType propertyTypeHint) { }
@Override
public void optionalProperty(BeanProperty prop) {
_visit(prop);
}
@Override
public void optionalProperty(String name, JsonFormatVisitable handler,
JavaType propertyTypeHint) { }
private void _visit(BeanProperty prop)
{
if (!(prop instanceof BeanPropertyWriter)) {
return;
}
BeanPropertyWriter bpw = (BeanPropertyWriter) prop;
ValueSerializer<?> ser = bpw.getSerializer();
final SerializationContext prov = getContext();
if (ser == null) {
if (prov == null) {
throw new Error("SerializationContext missing");
}
ser = prov.findPrimaryPropertySerializer(prop.getType(), prop);
}
JsonFormatVisitorWrapper visitor = new JsonFormatVisitorWrapper.Base(getContext());
ser.acceptJsonFormatVisitor(visitor, prop.getType());
}
};
}
@Override
public JsonArrayFormatVisitor expectArrayFormat(JavaType type) {
return new JsonArrayFormatVisitor.Base(getContext());
}
@Override
public JsonStringFormatVisitor expectStringFormat(JavaType type) {
return new JsonStringFormatVisitor.Base();
}
@Override
public JsonNumberFormatVisitor expectNumberFormat(JavaType type) {
return new JsonNumberFormatVisitor.Base();
}
@Override
public JsonIntegerFormatVisitor expectIntegerFormat(JavaType type) {
return new JsonIntegerFormatVisitor.Base();
}
@Override
public JsonBooleanFormatVisitor expectBooleanFormat(JavaType type) {
return new JsonBooleanFormatVisitor.Base();
}
@Override
public JsonNullFormatVisitor expectNullFormat(JavaType type) {
return new JsonNullFormatVisitor.Base();
}
@Override
public JsonAnyFormatVisitor expectAnyFormat(JavaType type) {
return new JsonAnyFormatVisitor.Base();
}
@Override
public JsonMapFormatVisitor expectMapFormat(JavaType type) {
return new JsonMapFormatVisitor.Base();
}
}
/*
/**********************************************************
/* Test methods
/**********************************************************
*/
private final ObjectMapper MAPPER = newJsonMapper();
/* Silly little test for simply triggering traversal, without attempting to
* verify what is being reported. Smoke test that should trigger problems
* if basic POJO type/serializer traversal had issues.
*/
@Test
public void testBasicTraversal() throws Exception
{
MAPPER.acceptJsonFormatVisitor(POJO.class, new BogusJsonFormatVisitorWrapper());
MAPPER.acceptJsonFormatVisitor(POJOWithScalars.class, new BogusJsonFormatVisitorWrapper());
MAPPER.acceptJsonFormatVisitor(LinkedHashMap.class, new BogusJsonFormatVisitorWrapper());
MAPPER.acceptJsonFormatVisitor(ArrayList.class, new BogusJsonFormatVisitorWrapper());
MAPPER.acceptJsonFormatVisitor(EnumSet.class, new BogusJsonFormatVisitorWrapper());
MAPPER.acceptJsonFormatVisitor(POJOWithRefs.class, new BogusJsonFormatVisitorWrapper());
MAPPER.acceptJsonFormatVisitor(POJOWithJsonValue.class, new BogusJsonFormatVisitorWrapper());
}
@Test
public void testSimpleEnum() throws Exception
{
final Set<String> values = new TreeSet<>();
ObjectWriter w = MAPPER.writer(EnumFeature.WRITE_ENUMS_USING_TO_STRING);
w.acceptJsonFormatVisitor(TestEnum.class, new JsonFormatVisitorWrapper.Base() {
@Override
public JsonStringFormatVisitor expectStringFormat(JavaType type) {
return new JsonStringFormatVisitor() {
@Override
public void enumTypes(Set<String> enums) {
values.addAll(enums);
}
@Override
public void format(JsonValueFormat format) { }
};
}
});
assertEquals(3, values.size());
TreeSet<String> exp = new TreeSet<>(Arrays.asList(
"ToString:A",
"ToString:B",
"ToString:C"
));
assertEquals(exp, values);
}
@Test
public void testEnumWithJsonValue() throws Exception
{
final Set<String> values = new TreeSet<String>();
MAPPER.acceptJsonFormatVisitor(TestEnumWithJsonValue.class,
new JsonFormatVisitorWrapper.Base() {
@Override
public JsonStringFormatVisitor expectStringFormat(JavaType type) {
return new JsonStringFormatVisitor() {
@Override
public void enumTypes(Set<String> enums) {
values.addAll(enums);
}
@Override
public void format(JsonValueFormat format) { }
};
}
});
assertEquals(3, values.size());
TreeSet<String> exp = new TreeSet<String>(Arrays.asList(
"value-A",
"value-B",
"value-C"
));
assertEquals(exp, values);
}
// Ensure JsonValueFormat serializes/deserializes as expected
@Test
public void testJsonValueFormatHandling() throws Exception
{
// first: serialize using 'toString()', not name
final String EXP = q("host-name");
assertEquals(EXP, MAPPER.writeValueAsString(JsonValueFormat.HOST_NAME));
// and second, deserialize ok from that as well
assertSame(JsonValueFormat.HOST_NAME, MAPPER.readValue(EXP, JsonValueFormat.class));
}
// [databind#1045], regression wrt BigDecimal
@Test
public void testSimpleNumbers() throws Exception
{
final StringBuilder sb = new StringBuilder();
MAPPER.acceptJsonFormatVisitor(Numbers.class,
new JsonFormatVisitorWrapper.Base() {
@Override
public JsonObjectFormatVisitor expectObjectFormat(final JavaType type) {
return new JsonObjectFormatVisitor.Base(getContext()) {
@Override
public void optionalProperty(BeanProperty prop) {
sb.append("[optProp ").append(prop.getName()).append("(");
ValueSerializer<Object> ser = null;
if (prop instanceof BeanPropertyWriter) {
BeanPropertyWriter bpw = (BeanPropertyWriter) prop;
ser = bpw.getSerializer();
}
final SerializationContext prov = getContext();
if (ser == null) {
ser = prov.findPrimaryPropertySerializer(prop.getType(), prop);
}
ser.acceptJsonFormatVisitor(new JsonFormatVisitorWrapper.Base() {
@Override
public JsonNumberFormatVisitor expectNumberFormat(
JavaType t) {
return new JsonNumberFormatVisitor() {
@Override
public void format(JsonValueFormat format) {
sb.append("[numberFormat=").append(format).append("]");
}
@Override
public void enumTypes(Set<String> enums) { }
@Override
public void numberType(NumberType numberType) {
sb.append("[numberType=").append(numberType).append("]");
}
};
}
@Override
public JsonIntegerFormatVisitor expectIntegerFormat(JavaType t) {
return new JsonIntegerFormatVisitor() {
@Override
public void format(JsonValueFormat format) {
sb.append("[integerFormat=").append(format).append("]");
}
@Override
public void enumTypes(Set<String> enums) { }
@Override
public void numberType(NumberType numberType) {
sb.append("[numberType=").append(numberType).append("]");
}
};
}
}, prop.getType());
sb.append(")]");
}
};
}
});
assertEquals("[optProp dec([numberType=BIG_DECIMAL])][optProp bigInt([numberType=BIG_INTEGER])]",
sb.toString());
}
}
| BogusJsonFormatVisitorWrapper |
java | apache__maven | impl/maven-testing/src/test/java/org/apache/maven/api/plugin/testing/ExpressionEvaluatorTest.java | {
"start": 10118,
"end": 11119
} | class ____ {
private String field1;
private int field2;
public String getField1() {
return field1;
}
public void setField1(String field1) {
this.field1 = field1;
}
public int getField2() {
return field2;
}
public void setField2(int field2) {
this.field2 = field2;
}
}
@Provides
@SuppressWarnings("unused")
Session session() {
Session session = SessionMock.getMockSession(LOCAL_REPO);
doReturn(new Properties()).when(session).getSystemProperties();
doReturn(new Properties()).when(session).getUserProperties();
doAnswer(iom -> Paths.get(MojoExtension.getBasedir())).when(session).getRootDirectory();
return session;
}
@Provides
Project project() {
ProjectStub project = new ProjectStub();
project.setBasedir(Paths.get(MojoExtension.getBasedir()));
return project;
}
}
| TestBean |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/exceptionhandling/TransientObjectExceptionHandlingTest.java | {
"start": 659,
"end": 2220
} | class ____ extends BaseExceptionHandlingTest {
public TransientObjectExceptionHandlingTest(BootstrapMethod bootstrapMethod,
ExceptionExpectations exceptionExpectations) {
super( bootstrapMethod, exceptionExpectations );
}
@Override
protected Class<?>[] getAnnotatedClasses() {
return new Class[] { A.class, AInfo.class };
}
@Test
public void testPersist() {
TransactionUtil.inTransaction( sessionFactory(), (s) -> {
A a = new A();
a.id = 1;
a.aInfo = new AInfo();
try {
s.persist( a );
fail( "should have thrown an exception" );
}
catch (RuntimeException expected) {
exceptionExpectations.onTransientObjectOnPersistAndMergeAndFlush( expected );
}
} );
}
@Test
public void testMerge() {
TransactionUtil.inTransaction( sessionFactory(), (s) -> {
A a = new A();
a.id = 1;
a.aInfo = new AInfo();
try {
s.merge( a );
fail( "should have thrown an exception" );
}
catch (RuntimeException expected) {
exceptionExpectations.onTransientObjectOnPersistAndMergeAndFlush( expected );
}
} );
}
@Test
public void testMergeFlush() {
TransactionUtil.inTransaction( sessionFactory(), (s) -> {
A a = new A();
a.id = 1;
a.aInfo = new AInfo();
try {
s.merge( a );
s.flush();
fail( "should have thrown an exception" );
}
catch (RuntimeException expected) {
exceptionExpectations.onTransientObjectOnPersistAndMergeAndFlush( expected );
}
} );
}
@SuppressWarnings("unused")
@Entity(name = "A")
public static | TransientObjectExceptionHandlingTest |
java | quarkusio__quarkus | extensions/spring-scheduled/deployment/src/test/java/io/quarkus/spring/scheduled/deployment/UnsupportedFixedDelayStringParamTest.java | {
"start": 828,
"end": 964
} | class ____ {
@Scheduled(fixedDelayString = "1000")
void unsupportedParamFixedDelayString() {
}
}
}
| InvalidBean |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/internal/SessionFactoryImpl.java | {
"start": 41479,
"end": 41534
} | enum ____ {
OPEN,
CLOSING,
CLOSED
}
private | Status |
java | spring-projects__spring-boot | module/spring-boot-data-elasticsearch/src/main/java/org/springframework/boot/data/elasticsearch/autoconfigure/DataElasticsearchReactiveRepositoriesRegistrar.java | {
"start": 1369,
"end": 1956
} | class ____ extends AbstractRepositoryConfigurationSourceSupport {
@Override
protected Class<? extends Annotation> getAnnotation() {
return EnableReactiveElasticsearchRepositories.class;
}
@Override
protected Class<?> getConfiguration() {
return EnableElasticsearchRepositoriesConfiguration.class;
}
@Override
protected RepositoryConfigurationExtension getRepositoryConfigurationExtension() {
return new ReactiveElasticsearchRepositoryConfigurationExtension();
}
@EnableReactiveElasticsearchRepositories
private static final | DataElasticsearchReactiveRepositoriesRegistrar |
java | apache__camel | components/camel-servlet/src/test/java/org/apache/camel/component/servlet/rest/RestServletNoContentRestConfigTest.java | {
"start": 1242,
"end": 7693
} | class ____ extends ServletCamelRouterTestSupport {
@Test
public void testEmptyJson204ConfigNoContentEnabled() throws Exception {
WebRequest req = new GetMethodWebRequest(contextUrl + "/services/v1/empty/country");
WebResponse response = query(req, false);
assertEquals(204, response.getResponseCode());
assertTrue(response.getText().isEmpty());
}
@Test
public void testEmptyXml204ConfigNoContentEnabled() throws Exception {
WebRequest req = new GetMethodWebRequest(contextUrl + "/services/v1/empty/address");
WebResponse response = query(req, false);
assertEquals(204, response.getResponseCode());
assertTrue(response.getText().isEmpty());
}
@Test
public void testEmptyJson200RestConfigNoContentDisabled() throws Exception {
WebRequest req = new GetMethodWebRequest(contextUrl + "/services/v2/empty/country");
WebResponse response = query(req, false);
assertEquals(200, response.getResponseCode());
assertTrue(response.getText().equals("[]"));
}
@Test
public void testEmptyXml200RestConfigNoContentDisabled() throws Exception {
WebRequest req = new GetMethodWebRequest(contextUrl + "/services/v2/empty/address");
WebResponse response = query(req, false);
assertEquals(200, response.getResponseCode());
assertEquals("<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n" +
"<address:address xmlns:address=\"http://www.camel.apache.org/jaxb/example/address/1\"/>\n",
response.getText());
}
@Test
public void testEmpty200VerbNoContentDisabled() throws Exception {
WebRequest req = new GetMethodWebRequest(contextUrl + "/services/v3/empty/country");
WebResponse response = query(req, false);
assertEquals(200, response.getResponseCode());
assertTrue(response.getText().equals("[]"));
}
@Test
public void testJson200ConfigNoContentEnabled() throws Exception {
WebRequest req = new GetMethodWebRequest(contextUrl + "/services/country");
WebResponse response = query(req, false);
assertEquals(200, response.getResponseCode());
assertEquals("{\"iso\":\"EN\",\"country\":\"England\"}",
response.getText());
}
@Test
public void testXml200ConfigNoContentEnabled() throws Exception {
WebRequest req = new GetMethodWebRequest(contextUrl + "/services/address");
WebResponse response = query(req, false);
assertEquals(200, response.getResponseCode());
assertEquals("<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n" +
"<address:address xmlns:address=\"http://www.camel.apache.org/jaxb/example/address/1\">\n" +
" <address:street>Main Street</address:street>\n" +
" <address:streetNumber>3a</address:streetNumber>\n" +
" <address:zip>65843</address:zip>\n" +
" <address:city>Sulzbach</address:city>\n" +
"</address:address>\n",
response.getText());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
restConfiguration().component("servlet").host("localhost")
.bindingMode(RestBindingMode.auto).enableNoContentResponse(true);
rest("/v1/empty")
.get("/country").to("direct:v1country")
.get("/address").to("direct:v1address").produces("application/xml").type(Address.class);
rest("/v2/empty/").enableNoContentResponse(false)
.get("/country").to("direct:v2country")
.get("/address").to("direct:v2address").produces("application/xml").type(Address.class);
rest("/v3/empty")
.get("/country").to("direct:v3country").enableNoContentResponse(false);
rest()
.get("/country").to("direct:v4country")
.get("/address").to("direct:v3address").produces("application/xml").type(Address.class);
from("direct:v1country").transform().constant(new java.util.ArrayList<CountryPojo>());
from("direct:v2country").transform().constant(new java.util.ArrayList<CountryPojo>());
from("direct:v3country").transform().constant(new java.util.ArrayList<CountryPojo>());
CountryPojo country = new CountryPojo();
country.setIso("EN");
country.setCountry("England");
from("direct:v4country").transform().constant(country);
Address emptyAddress = new Address();
from("direct:v1address")
.setHeader(JaxbConstants.JAXB_PART_CLASS, simple("org.apache.camel.component.servlet.rest.Address"))
.setHeader(JaxbConstants.JAXB_PART_NAMESPACE,
simple("{http://www.camel.apache.org/jaxb/example/address/1}address"))
.transform()
.constant(emptyAddress);
from("direct:v2address")
.setHeader(JaxbConstants.JAXB_PART_CLASS, simple("org.apache.camel.component.servlet.rest.Address"))
.setHeader(JaxbConstants.JAXB_PART_NAMESPACE,
simple("{http://www.camel.apache.org/jaxb/example/address/1}address"))
.transform()
.constant(emptyAddress);
Address address = new Address();
address.setStreet("Main Street");
address.setStreetNumber("3a");
address.setZip("65843");
address.setCity("Sulzbach");
from("direct:v3address")
.setHeader(JaxbConstants.JAXB_PART_CLASS, simple("org.apache.camel.component.servlet.rest.Address"))
.setHeader(JaxbConstants.JAXB_PART_NAMESPACE,
simple("{http://www.camel.apache.org/jaxb/example/address/1}address"))
.transform()
.constant(address);
}
};
}
}
| RestServletNoContentRestConfigTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/path/PathAssert_isEmptyDirectory_Test.java | {
"start": 885,
"end": 1207
} | class ____ extends PathAssertBaseTest {
@Override
protected PathAssert invoke_api_method() {
return assertions.isEmptyDirectory();
}
@Override
protected void verify_internal_effects() {
verify(paths).assertIsEmptyDirectory(getInfo(assertions), getActual(assertions));
}
}
| PathAssert_isEmptyDirectory_Test |
java | spring-projects__spring-security | oauth2/oauth2-resource-server/src/test/java/org/springframework/security/oauth2/server/resource/introspection/SpringOpaqueTokenIntrospectorTests.java | {
"start": 2388,
"end": 19168
} | class ____ {
private static final ParameterizedTypeReference<Map<String, Object>> STRING_OBJECT_MAP = new ParameterizedTypeReference<Map<String, Object>>() {
};
private static final String INTROSPECTION_URL = "https://server.example.com";
private static final String CLIENT_ID = "client";
private static final String CLIENT_SECRET = "secret";
// @formatter:off
private static final String ACTIVE_RESPONSE = "{\n"
+ " \"active\": true,\n"
+ " \"client_id\": \"l238j323ds-23ij4\",\n"
+ " \"username\": \"jdoe\",\n"
+ " \"scope\": \"read write dolphin\",\n"
+ " \"sub\": \"Z5O3upPC88QrAjx00dis\",\n"
+ " \"aud\": \"https://protected.example.net/resource\",\n"
+ " \"iss\": \"https://server.example.com/\",\n"
+ " \"exp\": 1419356238,\n"
+ " \"iat\": 1419350238,\n"
+ " \"extension_field\": \"twenty-seven\"\n"
+ " }";
// @formatter:on
// @formatter:off
private static final String INACTIVE_RESPONSE = "{\n"
+ " \"active\": false\n"
+ " }";
// @formatter:on
// @formatter:off
private static final String INVALID_RESPONSE = "{\n"
+ " \"client_id\": \"l238j323ds-23ij4\",\n"
+ " \"username\": \"jdoe\",\n"
+ " \"scope\": \"read write dolphin\",\n"
+ " \"sub\": \"Z5O3upPC88QrAjx00dis\",\n"
+ " \"aud\": \"https://protected.example.net/resource\",\n"
+ " \"iss\": \"https://server.example.com/\",\n"
+ " \"exp\": 1419356238,\n"
+ " \"iat\": 1419350238,\n"
+ " \"extension_field\": \"twenty-seven\"\n"
+ " }";
// @formatter:on
// @formatter:off
private static final String MALFORMED_SCOPE_RESPONSE = "{\n"
+ " \"active\": true,\n"
+ " \"client_id\": \"l238j323ds-23ij4\",\n"
+ " \"username\": \"jdoe\",\n"
+ " \"scope\": [ \"read\", \"write\", \"dolphin\" ],\n"
+ " \"sub\": \"Z5O3upPC88QrAjx00dis\",\n"
+ " \"aud\": \"https://protected.example.net/resource\",\n"
+ " \"iss\": \"https://server.example.com/\",\n"
+ " \"exp\": 1419356238,\n"
+ " \"iat\": 1419350238,\n"
+ " \"extension_field\": \"twenty-seven\"\n"
+ " }";
// @formatter:on
private static final ResponseEntity<Map<String, Object>> ACTIVE = response(ACTIVE_RESPONSE);
private static final ResponseEntity<Map<String, Object>> INACTIVE = response(INACTIVE_RESPONSE);
private static final ResponseEntity<Map<String, Object>> INVALID = response(INVALID_RESPONSE);
private static final ResponseEntity<Map<String, Object>> MALFORMED_SCOPE = response(MALFORMED_SCOPE_RESPONSE);
@Test
public void introspectWhenActiveTokenThenOk() throws Exception {
try (MockWebServer server = new MockWebServer()) {
server.setDispatcher(requiresAuth(CLIENT_ID, CLIENT_SECRET, ACTIVE_RESPONSE));
String introspectUri = server.url("/introspect").toString();
OpaqueTokenIntrospector introspectionClient = new SpringOpaqueTokenIntrospector(introspectUri, CLIENT_ID,
CLIENT_SECRET);
OAuth2AuthenticatedPrincipal authority = introspectionClient.introspect("token");
// @formatter:off
assertThat(authority.getAttributes())
.isNotNull()
.containsEntry(OAuth2TokenIntrospectionClaimNames.ACTIVE, true)
.containsEntry(OAuth2TokenIntrospectionClaimNames.AUD,
Arrays.asList("https://protected.example.net/resource"))
.containsEntry(OAuth2TokenIntrospectionClaimNames.CLIENT_ID, "l238j323ds-23ij4")
.containsEntry(OAuth2TokenIntrospectionClaimNames.EXP, Instant.ofEpochSecond(1419356238))
.containsEntry(OAuth2TokenIntrospectionClaimNames.ISS, "https://server.example.com/")
.containsEntry(OAuth2TokenIntrospectionClaimNames.SCOPE, Arrays.asList("read", "write", "dolphin"))
.containsEntry(OAuth2TokenIntrospectionClaimNames.SUB, "Z5O3upPC88QrAjx00dis")
.containsEntry(OAuth2TokenIntrospectionClaimNames.USERNAME, "jdoe")
.containsEntry("extension_field", "twenty-seven");
// @formatter:on
}
}
@Test
public void introspectWhenBadClientCredentialsThenError() throws IOException {
try (MockWebServer server = new MockWebServer()) {
server.setDispatcher(requiresAuth(CLIENT_ID, CLIENT_SECRET, ACTIVE_RESPONSE));
String introspectUri = server.url("/introspect").toString();
OpaqueTokenIntrospector introspectionClient = new SpringOpaqueTokenIntrospector(introspectUri, CLIENT_ID,
"wrong");
assertThatExceptionOfType(OAuth2IntrospectionException.class)
.isThrownBy(() -> introspectionClient.introspect("token"));
}
}
@Test
public void introspectWhenInactiveTokenThenInvalidToken() {
RestOperations restOperations = mock(RestOperations.class);
OpaqueTokenIntrospector introspectionClient = new SpringOpaqueTokenIntrospector(INTROSPECTION_URL,
restOperations);
given(restOperations.exchange(any(RequestEntity.class), eq(STRING_OBJECT_MAP))).willReturn(INACTIVE);
// @formatter:off
assertThatExceptionOfType(OAuth2IntrospectionException.class)
.isThrownBy(() -> introspectionClient.introspect("token"))
.withMessage("Provided token isn't active");
// @formatter:on
}
@Test
public void introspectWhenActiveTokenThenParsesValuesInResponse() {
Map<String, Object> introspectedValues = new HashMap<>();
introspectedValues.put(OAuth2TokenIntrospectionClaimNames.ACTIVE, true);
introspectedValues.put(OAuth2TokenIntrospectionClaimNames.AUD, Arrays.asList("aud"));
introspectedValues.put(OAuth2TokenIntrospectionClaimNames.NBF, 29348723984L);
RestOperations restOperations = mock(RestOperations.class);
OpaqueTokenIntrospector introspectionClient = new SpringOpaqueTokenIntrospector(INTROSPECTION_URL,
restOperations);
given(restOperations.exchange(any(RequestEntity.class), eq(STRING_OBJECT_MAP)))
.willReturn(response(introspectedValues));
OAuth2AuthenticatedPrincipal authority = introspectionClient.introspect("token");
// @formatter:off
assertThat(authority.getAttributes())
.isNotNull()
.containsEntry(OAuth2TokenIntrospectionClaimNames.ACTIVE, true)
.containsEntry(OAuth2TokenIntrospectionClaimNames.AUD, Arrays.asList("aud"))
.containsEntry(OAuth2TokenIntrospectionClaimNames.NBF, Instant.ofEpochSecond(29348723984L))
.doesNotContainKey(OAuth2TokenIntrospectionClaimNames.CLIENT_ID)
.doesNotContainKey(OAuth2TokenIntrospectionClaimNames.SCOPE);
// @formatter:on
}
@Test
public void introspectWhenIntrospectionEndpointThrowsExceptionThenInvalidToken() {
RestOperations restOperations = mock(RestOperations.class);
OpaqueTokenIntrospector introspectionClient = new SpringOpaqueTokenIntrospector(INTROSPECTION_URL,
restOperations);
given(restOperations.exchange(any(RequestEntity.class), eq(STRING_OBJECT_MAP)))
.willThrow(new IllegalStateException("server was unresponsive"));
// @formatter:off
assertThatExceptionOfType(OAuth2IntrospectionException.class)
.isThrownBy(() -> introspectionClient.introspect("token"))
.withMessage("server was unresponsive");
// @formatter:on
}
@Test
public void introspectWhenIntrospectionEndpointReturnsMalformedResponseThenInvalidToken() {
RestOperations restOperations = mock(RestOperations.class);
OpaqueTokenIntrospector introspectionClient = new SpringOpaqueTokenIntrospector(INTROSPECTION_URL,
restOperations);
given(restOperations.exchange(any(RequestEntity.class), eq(STRING_OBJECT_MAP))).willReturn(response("{}"));
assertThatExceptionOfType(OAuth2IntrospectionException.class)
.isThrownBy(() -> introspectionClient.introspect("token"));
}
@Test
public void introspectWhenIntrospectionTokenReturnsInvalidResponseThenInvalidToken() {
RestOperations restOperations = mock(RestOperations.class);
OpaqueTokenIntrospector introspectionClient = new SpringOpaqueTokenIntrospector(INTROSPECTION_URL,
restOperations);
given(restOperations.exchange(any(RequestEntity.class), eq(STRING_OBJECT_MAP))).willReturn(INVALID);
assertThatExceptionOfType(OAuth2IntrospectionException.class)
.isThrownBy(() -> introspectionClient.introspect("token"));
}
// gh-7563
@Test
public void introspectWhenIntrospectionTokenReturnsMalformedScopeThenEmptyAuthorities() {
RestOperations restOperations = mock(RestOperations.class);
OpaqueTokenIntrospector introspectionClient = new SpringOpaqueTokenIntrospector(INTROSPECTION_URL,
restOperations);
given(restOperations.exchange(any(RequestEntity.class), eq(STRING_OBJECT_MAP))).willReturn(MALFORMED_SCOPE);
OAuth2AuthenticatedPrincipal principal = introspectionClient.introspect("token");
assertThat(principal.getAuthorities()).isEmpty();
Collection<String> scope = principal.getAttribute("scope");
assertThat(scope).containsExactly("read", "write", "dolphin");
}
// gh-15165
@Test
public void introspectWhenActiveThenMapsAuthorities() {
RestOperations restOperations = mock(RestOperations.class);
OpaqueTokenIntrospector introspectionClient = new SpringOpaqueTokenIntrospector(INTROSPECTION_URL,
restOperations);
given(restOperations.exchange(any(RequestEntity.class), eq(STRING_OBJECT_MAP))).willReturn(ACTIVE);
OAuth2AuthenticatedPrincipal principal = introspectionClient.introspect("token");
assertThat(principal.getAuthorities()).isNotEmpty();
Collection<String> scope = principal.getAttribute("scope");
assertThat(scope).containsExactly("read", "write", "dolphin");
Collection<String> authorities = AuthorityUtils.authorityListToSet(principal.getAuthorities());
assertThat(authorities).containsExactly("SCOPE_read", "SCOPE_write", "SCOPE_dolphin");
}
@Test
public void constructorWhenIntrospectionUriIsNullThenIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new SpringOpaqueTokenIntrospector(null, CLIENT_ID, CLIENT_SECRET));
}
@Test
public void constructorWhenClientIdIsNullThenIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new SpringOpaqueTokenIntrospector(INTROSPECTION_URL, null, CLIENT_SECRET));
}
@Test
public void constructorWhenClientSecretIsNullThenIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new SpringOpaqueTokenIntrospector(INTROSPECTION_URL, CLIENT_ID, null));
}
@Test
public void constructorWhenRestOperationsIsNullThenIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> new SpringOpaqueTokenIntrospector(INTROSPECTION_URL, null));
}
@Test
public void setRequestEntityConverterWhenConverterIsNullThenExceptionIsThrown() {
RestOperations restOperations = mock(RestOperations.class);
SpringOpaqueTokenIntrospector introspectionClient = new SpringOpaqueTokenIntrospector(INTROSPECTION_URL,
restOperations);
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> introspectionClient.setRequestEntityConverter(null));
}
@SuppressWarnings("unchecked")
@Test
public void setRequestEntityConverterWhenNonNullConverterGivenThenConverterUsed() {
RestOperations restOperations = mock(RestOperations.class);
Converter<String, RequestEntity<?>> requestEntityConverter = mock(Converter.class);
RequestEntity requestEntity = mock(RequestEntity.class);
String tokenToIntrospect = "some token";
given(requestEntityConverter.convert(tokenToIntrospect)).willReturn(requestEntity);
given(restOperations.exchange(requestEntity, STRING_OBJECT_MAP)).willReturn(ACTIVE);
SpringOpaqueTokenIntrospector introspectionClient = new SpringOpaqueTokenIntrospector(INTROSPECTION_URL,
restOperations);
introspectionClient.setRequestEntityConverter(requestEntityConverter);
introspectionClient.introspect(tokenToIntrospect);
verify(requestEntityConverter).convert(tokenToIntrospect);
}
@Test
public void setAuthenticationConverterWhenConverterIsNullThenExceptionIsThrown() {
RestOperations restOperations = mock(RestOperations.class);
SpringOpaqueTokenIntrospector introspectionClient = new SpringOpaqueTokenIntrospector(INTROSPECTION_URL,
restOperations);
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> introspectionClient.setAuthenticationConverter(null));
}
@Test
public void setAuthenticationConverterWhenNonNullConverterGivenThenConverterUsed() {
RestOperations restOperations = mock(RestOperations.class);
Converter<String, RequestEntity<?>> requestEntityConverter = mock(Converter.class);
RequestEntity requestEntity = mock(RequestEntity.class);
Converter<OAuth2TokenIntrospectionClaimAccessor, OAuth2AuthenticatedPrincipal> authenticationConverter = mock(
Converter.class);
OAuth2AuthenticatedPrincipal oAuth2AuthenticatedPrincipal = mock(OAuth2AuthenticatedPrincipal.class);
String tokenToIntrospect = "some token";
given(requestEntityConverter.convert(tokenToIntrospect)).willReturn(requestEntity);
given(restOperations.exchange(requestEntity, STRING_OBJECT_MAP)).willReturn(response(ACTIVE_RESPONSE));
given(authenticationConverter.convert(any())).willReturn(oAuth2AuthenticatedPrincipal);
SpringOpaqueTokenIntrospector introspectionClient = new SpringOpaqueTokenIntrospector(INTROSPECTION_URL,
restOperations);
introspectionClient.setRequestEntityConverter(requestEntityConverter);
introspectionClient.setAuthenticationConverter(authenticationConverter);
introspectionClient.introspect(tokenToIntrospect);
verify(authenticationConverter).convert(any());
}
@Test
public void introspectWithoutEncodeClientCredentialsThenExceptionIsThrown() throws Exception {
try (MockWebServer server = new MockWebServer()) {
String response = """
{
"active": true,
"username": "client%&1"
}
""";
server.setDispatcher(requiresAuth("client%25%261", "secret%40%242", response));
String introspectUri = server.url("/introspect").toString();
OpaqueTokenIntrospector introspectionClient = new SpringOpaqueTokenIntrospector(introspectUri, "client%&1",
"secret@$2");
assertThatExceptionOfType(OAuth2IntrospectionException.class)
.isThrownBy(() -> introspectionClient.introspect("token"));
}
}
@Test
public void introspectWithEncodeClientCredentialsThenOk() throws Exception {
try (MockWebServer server = new MockWebServer()) {
String response = """
{
"active": true,
"username": "client&1"
}
""";
server.setDispatcher(requiresAuth("client%261", "secret%40%242", response));
String introspectUri = server.url("/introspect").toString();
OpaqueTokenIntrospector introspectionClient = SpringOpaqueTokenIntrospector
.withIntrospectionUri(introspectUri)
.clientId("client&1")
.clientSecret("secret@$2")
.build();
OAuth2AuthenticatedPrincipal authority = introspectionClient.introspect("token");
// @formatter:off
assertThat(authority.getAttributes())
.isNotNull()
.containsEntry(OAuth2TokenIntrospectionClaimNames.ACTIVE, true)
.containsEntry(OAuth2TokenIntrospectionClaimNames.USERNAME, "client&1");
// @formatter:on
}
}
private static ResponseEntity<Map<String, Object>> response(String content) {
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON);
try {
return new ResponseEntity<>(JSONObjectUtils.parse(content), headers, HttpStatus.OK);
}
catch (Exception ex) {
throw new IllegalArgumentException(ex);
}
}
private static ResponseEntity<Map<String, Object>> response(Map<String, Object> content) {
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON);
try {
return new ResponseEntity<>(content, headers, HttpStatus.OK);
}
catch (Exception ex) {
throw new IllegalArgumentException(ex);
}
}
private static Dispatcher requiresAuth(String username, String password, String response) {
return new Dispatcher() {
@Override
public MockResponse dispatch(RecordedRequest request) {
String authorization = request.getHeader(HttpHeaders.AUTHORIZATION);
// @formatter:off
return Optional.ofNullable(authorization)
.filter((a) -> isAuthorized(authorization, username, password))
.map((a) -> ok(response))
.orElse(unauthorized());
// @formatter:on
}
};
}
private static boolean isAuthorized(String authorization, String username, String password) {
String[] values = new String(Base64.getDecoder().decode(authorization.substring(6))).split(":");
return username.equals(values[0]) && password.equals(values[1]);
}
private static MockResponse ok(String response) {
// @formatter:off
return new MockResponse().setBody(response)
.setHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE);
// @formatter:on
}
private static MockResponse unauthorized() {
return new MockResponse().setResponseCode(401);
}
}
| SpringOpaqueTokenIntrospectorTests |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/impl/LoadedManifestData.java | {
"start": 1340,
"end": 2833
} | class ____ {
/**
* Directories.
*/
private final Collection<DirEntry> directories;
/**
* Path of the intermediate cache of
* files to rename.
* This will be a sequence file of long -> FileEntry
*/
private final Path entrySequenceData;
/**
* How many files will be renamed.
*/
private final int fileCount;
/**
* Data about the loaded manifests.
* @param directories directories
* @param entrySequenceData Path in local fs to the entry sequence data.
* @param fileCount number of files.
*/
public LoadedManifestData(
final Collection<DirEntry> directories,
final Path entrySequenceData,
final int fileCount) {
this.directories = requireNonNull(directories);
this.fileCount = fileCount;
this.entrySequenceData = requireNonNull(entrySequenceData);
}
public Collection<DirEntry> getDirectories() {
return directories;
}
public int getFileCount() {
return fileCount;
}
/**
* Get the path to the entry sequence data file.
* @return the path
*/
public Path getEntrySequenceData() {
return entrySequenceData;
}
/**
* Get the entry sequence data as a file.
*/
public File getEntrySequenceFile() {
return new File(entrySequenceData.toUri());
}
/**
* Delete the entry sequence file.
* @return whether or not the delete was successful.
*/
public boolean deleteEntrySequenceFile() {
return getEntrySequenceFile().delete();
}
}
| LoadedManifestData |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/heap/HeapPriorityQueueSet.java | {
"start": 2085,
"end": 6918
} | class ____<T extends HeapPriorityQueueElement> extends HeapPriorityQueue<T>
implements KeyGroupedInternalPriorityQueue<T> {
/** Function to extract the key from contained elements. */
private final KeyExtractorFunction<T> keyExtractor;
/**
* This array contains one hash set per key-group. The sets are used for fast de-duplication and
* deletes of elements.
*/
private final HashMap<T, T>[] deduplicationMapsByKeyGroup;
/** The key-group range of elements that are managed by this queue. */
private final KeyGroupRange keyGroupRange;
/** The total number of key-groups of the job. */
private final int totalNumberOfKeyGroups;
/**
* Creates an empty {@link HeapPriorityQueueSet} with the requested initial capacity.
*
* @param elementPriorityComparator comparator for the priority of contained elements.
* @param keyExtractor function to extract a key from the contained elements.
* @param minimumCapacity the minimum and initial capacity of this priority queue.
* @param keyGroupRange the key-group range of the elements in this set.
* @param totalNumberOfKeyGroups the total number of key-groups of the job.
*/
@SuppressWarnings("unchecked")
public HeapPriorityQueueSet(
@Nonnull PriorityComparator<T> elementPriorityComparator,
@Nonnull KeyExtractorFunction<T> keyExtractor,
@Nonnegative int minimumCapacity,
@Nonnull KeyGroupRange keyGroupRange,
@Nonnegative int totalNumberOfKeyGroups) {
super(elementPriorityComparator, minimumCapacity);
this.keyExtractor = keyExtractor;
this.totalNumberOfKeyGroups = totalNumberOfKeyGroups;
this.keyGroupRange = keyGroupRange;
final int keyGroupsInLocalRange = keyGroupRange.getNumberOfKeyGroups();
final int deduplicationSetSize = 1 + minimumCapacity / keyGroupsInLocalRange;
this.deduplicationMapsByKeyGroup = new HashMap[keyGroupsInLocalRange];
for (int i = 0; i < keyGroupsInLocalRange; ++i) {
deduplicationMapsByKeyGroup[i] =
CollectionUtil.newHashMapWithExpectedSize(deduplicationSetSize);
}
}
@Override
@Nullable
public T poll() {
final T toRemove = super.poll();
return toRemove != null ? getDedupMapForElement(toRemove).remove(toRemove) : null;
}
/**
* Adds the element to the queue. In contrast to the superclass and to maintain set semantics,
* this happens only if no such element is already contained (determined by {@link
* #equals(Object)}).
*
* @return <code>true</code> if the operation changed the head element or if is it unclear if
* the head element changed. Only returns <code>false</code> iff the head element was not
* changed by this operation.
*/
@Override
public boolean add(@Nonnull T element) {
return getDedupMapForElement(element).putIfAbsent(element, element) == null
&& super.add(element);
}
/**
* In contrast to the superclass and to maintain set semantics, removal here is based on
* comparing the given element via {@link #equals(Object)}.
*
* @return <code>true</code> if the operation changed the head element or if is it unclear if
* the head element changed. Only returns <code>false</code> iff the head element was not
* changed by this operation.
*/
@Override
public boolean remove(@Nonnull T toRemove) {
T storedElement = getDedupMapForElement(toRemove).remove(toRemove);
return storedElement != null && super.remove(storedElement);
}
@Override
public void clear() {
super.clear();
for (HashMap<?, ?> elementHashMap : deduplicationMapsByKeyGroup) {
elementHashMap.clear();
}
}
private HashMap<T, T> getDedupMapForKeyGroup(@Nonnegative int keyGroupId) {
return deduplicationMapsByKeyGroup[globalKeyGroupToLocalIndex(keyGroupId)];
}
private HashMap<T, T> getDedupMapForElement(T element) {
int keyGroup =
KeyGroupRangeAssignment.assignToKeyGroup(
keyExtractor.extractKeyFromElement(element), totalNumberOfKeyGroups);
return getDedupMapForKeyGroup(keyGroup);
}
private int globalKeyGroupToLocalIndex(int keyGroup) {
checkArgument(
keyGroupRange.contains(keyGroup),
"%s does not contain key group %s",
keyGroupRange,
keyGroup);
return keyGroup - keyGroupRange.getStartKeyGroup();
}
@Nonnull
@Override
public Set<T> getSubsetForKeyGroup(int keyGroupId) {
return getDedupMapForKeyGroup(keyGroupId).keySet();
}
}
| HeapPriorityQueueSet |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/network/Selectable.java | {
"start": 1106,
"end": 4034
} | interface ____ {
/**
* See {@link #connect(String, InetSocketAddress, int, int) connect()}
*/
int USE_DEFAULT_BUFFER_SIZE = -1;
/**
* Begin establishing a socket connection to the given address identified by the given address
* @param id The id for this connection
* @param address The address to connect to
* @param sendBufferSize The send buffer for the socket
* @param receiveBufferSize The receive buffer for the socket
* @throws IOException If we cannot begin connecting
*/
void connect(String id, InetSocketAddress address, int sendBufferSize, int receiveBufferSize) throws IOException;
/**
* Wakeup this selector if it is blocked on I/O
*/
void wakeup();
/**
* Close this selector
*/
void close();
/**
* Close the connection identified by the given id
*/
void close(String id);
/**
* Queue the given request for sending in the subsequent {@link #poll(long) poll()} calls
* @param send The request to send
*/
void send(NetworkSend send);
/**
* Do I/O. Reads, writes, connection establishment, etc.
* @param timeout The amount of time to block if there is nothing to do
* @throws IOException
*/
void poll(long timeout) throws IOException;
/**
* The list of sends that completed on the last {@link #poll(long) poll()} call.
*/
List<NetworkSend> completedSends();
/**
* The collection of receives that completed on the last {@link #poll(long) poll()} call.
*
* Note that the caller of this method assumes responsibility to close the NetworkReceive resources which may be
* backed by a {@link MemoryPool}. In such scenarios (when NetworkReceive uses a {@link MemoryPool}), it is necessary
* to close the {@link NetworkReceive} to prevent any memory leaks.
*/
Collection<NetworkReceive> completedReceives();
/**
* The connections that finished disconnecting on the last {@link #poll(long) poll()}
* call. Channel state indicates the local channel state at the time of disconnection.
*/
Map<String, ChannelState> disconnected();
/**
* The list of connections that completed their connection on the last {@link #poll(long) poll()}
* call.
*/
List<String> connected();
/**
* Disable reads from the given connection
* @param id The id for the connection
*/
void mute(String id);
/**
* Re-enable reads from the given connection
* @param id The id for the connection
*/
void unmute(String id);
/**
* Disable reads from all connections
*/
void muteAll();
/**
* Re-enable reads from all connections
*/
void unmuteAll();
/**
* returns true if a channel is ready
* @param id The id for the connection
*/
boolean isChannelReady(String id);
}
| Selectable |
java | apache__rocketmq | example/src/main/java/org/apache/rocketmq/example/tracemessage/OpenTracingTransactionProducer.java | {
"start": 1709,
"end": 4302
} | class ____ {
public static final String PRODUCER_GROUP = "please_rename_unique_group_name";
public static final String DEFAULT_NAMESRVADDR = "127.0.0.1:9876";
public static final String TOPIC = "TopicTest";
public static final String TAG = "Tag";
public static final String KEY = "KEY";
public static final int MESSAGE_COUNT = 100000;
public static void main(String[] args) throws MQClientException, InterruptedException {
Tracer tracer = initTracer();
TransactionMQProducer producer = new TransactionMQProducer(PRODUCER_GROUP);
// Uncomment the following line while debugging, namesrvAddr should be set to your local address
// producer.setNamesrvAddr(DEFAULT_NAMESRVADDR);
producer.getDefaultMQProducerImpl().registerSendMessageHook(new SendMessageOpenTracingHookImpl(tracer));
producer.getDefaultMQProducerImpl().registerEndTransactionHook(new EndTransactionOpenTracingHookImpl(tracer));
producer.setTransactionListener(new TransactionListener() {
@Override
public LocalTransactionState executeLocalTransaction(Message msg, Object arg) {
return LocalTransactionState.COMMIT_MESSAGE;
}
@Override
public LocalTransactionState checkLocalTransaction(MessageExt msg) {
return LocalTransactionState.COMMIT_MESSAGE;
}
});
producer.start();
try {
Message msg = new Message(TOPIC, TAG, KEY,
"Hello RocketMQ".getBytes(RemotingHelper.DEFAULT_CHARSET));
SendResult sendResult = producer.sendMessageInTransaction(msg, null);
System.out.printf("%s%n", sendResult);
} catch (MQClientException | UnsupportedEncodingException e) {
e.printStackTrace();
}
for (int i = 0; i < MESSAGE_COUNT; i++) {
Thread.sleep(1000);
}
producer.shutdown();
}
private static Tracer initTracer() {
Configuration.SamplerConfiguration samplerConfig = Configuration.SamplerConfiguration.fromEnv()
.withType(ConstSampler.TYPE)
.withParam(1);
Configuration.ReporterConfiguration reporterConfig = Configuration.ReporterConfiguration.fromEnv()
.withLogSpans(true);
Configuration config = new Configuration("rocketmq")
.withSampler(samplerConfig)
.withReporter(reporterConfig);
GlobalTracer.registerIfAbsent(config.getTracer());
return config.getTracer();
}
}
| OpenTracingTransactionProducer |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/schedulers/Schedulers.java | {
"start": 35884,
"end": 36052
} | class ____ implements Supplier<Scheduler> {
@Override
public Scheduler get() {
return IoHolder.DEFAULT;
}
}
static final | IOTask |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/persister/entity/AbstractEntityPersister.java | {
"start": 19651,
"end": 25034
} | class ____
private final String[] subclassPropertyNameClosure;
private final Type[] subclassPropertyTypeClosure;
private final String[][] subclassPropertyFormulaTemplateClosure;
private final String[][] subclassPropertyColumnNameClosure;
private final String[][] subclassPropertyColumnReaderClosure;
private final String[][] subclassPropertyColumnReaderTemplateClosure;
private final FetchMode[] subclassPropertyFetchModeClosure;
private Map<String, SingleIdArrayLoadPlan> lazyLoadPlanByFetchGroup;
private final LockModeEnumMap<LockingStrategy> lockers = new LockModeEnumMap<>();
private String sqlVersionSelectString;
private EntityTableMapping[] tableMappings;
private InsertCoordinator insertCoordinator;
private UpdateCoordinator updateCoordinator;
private DeleteCoordinator deleteCoordinator;
private UpdateCoordinator mergeCoordinator;
private SqmMultiTableMutationStrategy sqmMultiTableMutationStrategy;
private SqmMultiTableInsertStrategy sqmMultiTableInsertStrategy;
private final EntityDataAccess cacheAccessStrategy;
private final NaturalIdDataAccess naturalIdRegionAccessStrategy;
private final CacheEntryHelper cacheEntryHelper;
private final boolean canReadFromCache;
private final boolean canWriteToCache;
private final boolean invalidateCache;
private final boolean isLazyPropertiesCacheable;
private final boolean useReferenceCacheEntries;
private final boolean useShallowQueryCacheLayout;
private final boolean storeDiscriminatorInShallowQueryCacheLayout;
// dynamic filters attached to the class-level
private final FilterHelper filterHelper;
private volatile Set<String> affectingFetchProfileNames;
protected List<? extends ModelPart> insertGeneratedProperties;
protected List<? extends ModelPart> updateGeneratedProperties;
private GeneratedValuesProcessor insertGeneratedValuesProcessor;
private GeneratedValuesProcessor updateGeneratedValuesProcessor;
private GeneratedValuesMutationDelegate insertDelegate;
private GeneratedValuesMutationDelegate updateDelegate;
private String identitySelectString;
private final JavaType<?> javaType;
private final EntityRepresentationStrategy representationStrategy;
private EntityMappingType superMappingType;
private SortedMap<String, EntityMappingType> subclassMappingTypes;
private final boolean concreteProxy;
private EntityConcreteTypeLoader concreteTypeLoader;
private EntityIdentifierMapping identifierMapping;
private NaturalIdMapping naturalIdMapping;
private EntityVersionMapping versionMapping;
private EntityRowIdMapping rowIdMapping;
private EntityDiscriminatorMapping discriminatorMapping;
private SoftDeleteMapping softDeleteMapping;
private AttributeMappingsList attributeMappings;
protected AttributeMappingsMap declaredAttributeMappings = AttributeMappingsMap.builder().build();
protected AttributeMappingsList staticFetchableList;
// We build a cache for getters and setters to avoid megamorphic calls
private Getter[] getterCache;
private Setter[] setterCache;
private final String queryLoaderName;
private BeforeExecutionGenerator versionGenerator;
protected ReflectionOptimizer.AccessOptimizer accessOptimizer;
protected final String[] fullDiscriminatorSQLValues;
private final Object[] fullDiscriminatorValues;
/**
* Warning:
* When there are duplicated property names in the subclasses
* then propertyMapping will only contain one of those properties.
* To ensure correct results, propertyMapping should only be used
* for the concrete EntityPersister (since the concrete EntityPersister
* cannot have duplicated property names).
*/
private final EntityPropertyMapping propertyMapping;
private List<UniqueKeyEntry> uniqueKeyEntries = null; //lazily initialized
private ConcurrentHashMap<String,SingleIdArrayLoadPlan> nonLazyPropertyLoadPlansByName;
public AbstractEntityPersister(
final PersistentClass persistentClass,
final EntityDataAccess cacheAccessStrategy,
final NaturalIdDataAccess naturalIdRegionAccessStrategy,
final RuntimeModelCreationContext creationContext)
throws HibernateException {
super( persistentClass, creationContext );
jpaEntityName = persistentClass.getJpaEntityName();
//set it here, but don't call it, since it's still uninitialized!
factory = creationContext.getSessionFactory();
sqlAliasStem = SqlAliasStemHelper.INSTANCE.generateStemFromEntityName( persistentClass.getEntityName() );
navigableRole = new NavigableRole( persistentClass.getEntityName() );
final var factoryOptions = creationContext.getSessionFactoryOptions();
if ( factoryOptions.isSecondLevelCacheEnabled() ) {
this.cacheAccessStrategy = cacheAccessStrategy;
this.naturalIdRegionAccessStrategy = naturalIdRegionAccessStrategy;
canWriteToCache = determineCanWriteToCache( persistentClass, cacheAccessStrategy );
canReadFromCache = determineCanReadFromCache( persistentClass, cacheAccessStrategy );
isLazyPropertiesCacheable = persistentClass.getRootClass().isLazyPropertiesCacheable();
}
else {
this.cacheAccessStrategy = null;
this.naturalIdRegionAccessStrategy = null;
canWriteToCache = false;
canReadFromCache = false;
isLazyPropertiesCacheable = true;
}
entityEntryFactory =
isMutable()
? MutableEntityEntryFactory.INSTANCE
: ImmutableEntityEntryFactory.INSTANCE;
// Handle any filters applied to the | hierarchy |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/engine/executor/GlideExecutor.java | {
"start": 15162,
"end": 19693
} | class ____ {
/**
* Prevents core and non-core threads from timing out ever if provided to {@link
* #setThreadTimeoutMillis(long)}.
*/
public static final long NO_THREAD_TIMEOUT = 0L;
private final boolean preventNetworkOperations;
private int corePoolSize;
private int maximumPoolSize;
@NonNull private ThreadFactory threadFactory = new DefaultPriorityThreadFactory();
@NonNull
private UncaughtThrowableStrategy uncaughtThrowableStrategy = UncaughtThrowableStrategy.DEFAULT;
private String name;
private long threadTimeoutMillis;
private Function<? super Runnable, ? extends Runnable> onExecuteDecorator;
@Synthetic
Builder(boolean preventNetworkOperations) {
this.preventNetworkOperations = preventNetworkOperations;
}
/**
* Allows both core and non-core threads in the executor to be terminated if no tasks arrive for
* at least the given timeout milliseconds.
*
* <p>Use {@link #NO_THREAD_TIMEOUT} to remove a previously set timeout.
*/
public Builder setThreadTimeoutMillis(long threadTimeoutMillis) {
this.threadTimeoutMillis = threadTimeoutMillis;
return this;
}
/** Sets the maximum number of threads to use. */
public Builder setThreadCount(@IntRange(from = 1) int threadCount) {
corePoolSize = threadCount;
maximumPoolSize = threadCount;
return this;
}
/**
* Sets the {@link ThreadFactory} responsible for creating threads and setting their priority.
*
* <p>Usage of this method may override other options on this builder. No guarantees are
* provided with regards to the behavior of this method or how it interacts with other methods
* on the builder. Use at your own risk.
*
* @deprecated This is an experimental method that may be removed without warning in a future
* version.
*/
@Deprecated
public Builder setThreadFactory(@NonNull ThreadFactory threadFactory) {
this.threadFactory = threadFactory;
return this;
}
/**
* Sets the {@link UncaughtThrowableStrategy} to use for unexpected exceptions thrown by tasks
* on {@link GlideExecutor}s built by this {@code Builder}.
*/
public Builder setUncaughtThrowableStrategy(@NonNull UncaughtThrowableStrategy strategy) {
this.uncaughtThrowableStrategy = strategy;
return this;
}
/**
* Sets the prefix to use for each thread name created by any {@link GlideExecutor}s built by
* this {@code Builder}.
*/
public Builder setName(String name) {
this.name = name;
return this;
}
/**
* Sets the decorator to be applied to each runnable executed by the executor.
*
* <p>This is an experimental method that may be removed without warning in a future version.
*/
public Builder experimentalSetOnExecuteDecorator(
Function<? super Runnable, ? extends Runnable> onExecuteDecorator) {
this.onExecuteDecorator = onExecuteDecorator;
return this;
}
/** Builds a new {@link GlideExecutor} with any previously specified options. */
public GlideExecutor build() {
if (TextUtils.isEmpty(name)) {
throw new IllegalArgumentException(
"Name must be non-null and non-empty, but given: " + name);
}
ThreadFactory factory =
new DefaultThreadFactory(
threadFactory, name, uncaughtThrowableStrategy, preventNetworkOperations);
ThreadPoolExecutor executor;
if (onExecuteDecorator != null) {
executor =
new ThreadPoolExecutor(
corePoolSize,
maximumPoolSize,
/* keepAliveTime= */ threadTimeoutMillis,
TimeUnit.MILLISECONDS,
new PriorityBlockingQueue<>(),
factory) {
@Override
public void execute(@NonNull Runnable command) {
super.execute(onExecuteDecorator.apply(command));
}
};
} else {
executor =
new ThreadPoolExecutor(
corePoolSize,
maximumPoolSize,
/* keepAliveTime= */ threadTimeoutMillis,
TimeUnit.MILLISECONDS,
new PriorityBlockingQueue<>(),
factory);
}
if (threadTimeoutMillis != NO_THREAD_TIMEOUT) {
executor.allowCoreThreadTimeOut(true);
}
return new GlideExecutor(executor);
}
}
}
| Builder |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/utils/TracingHeaderValidator.java | {
"start": 1364,
"end": 7675
} | class ____ implements Listener {
private String clientCorrelationId;
private String fileSystemId;
private String primaryRequestId = EMPTY_STRING;
private boolean needsPrimaryRequestId;
private String streamID = "";
private FSOperationType operation;
private int retryNum;
private TracingHeaderFormat format;
private static final String GUID_PATTERN = "^[0-9a-fA-F]{8}-([0-9a-fA-F]{4}-){3}[0-9a-fA-F]{12}$";
private String ingressHandler = null;
private String position = String.valueOf(0);
private ReadType readType = ReadType.UNKNOWN_READ;
private Integer operatedBlobCount = null;
@Override
public void callTracingHeaderValidator(String tracingContextHeader,
TracingHeaderFormat format) {
this.format = format;
validateTracingHeader(tracingContextHeader);
}
@Override
public TracingHeaderValidator getClone() {
TracingHeaderValidator tracingHeaderValidator = new TracingHeaderValidator(
clientCorrelationId, fileSystemId, operation, needsPrimaryRequestId,
retryNum, streamID);
tracingHeaderValidator.primaryRequestId = primaryRequestId;
tracingHeaderValidator.ingressHandler = ingressHandler;
tracingHeaderValidator.position = position;
tracingHeaderValidator.readType = readType;
tracingHeaderValidator.operatedBlobCount = operatedBlobCount;
return tracingHeaderValidator;
}
public TracingHeaderValidator(String clientCorrelationId, String fileSystemId,
FSOperationType operation, boolean needsPrimaryRequestId, int retryNum) {
this.clientCorrelationId = clientCorrelationId;
this.fileSystemId = fileSystemId;
this.operation = operation;
this.retryNum = retryNum;
this.needsPrimaryRequestId = needsPrimaryRequestId;
}
public TracingHeaderValidator(String clientCorrelationId, String fileSystemId,
FSOperationType operation, boolean needsPrimaryRequestId, int retryNum,
String streamID) {
this(clientCorrelationId, fileSystemId, operation, needsPrimaryRequestId,
retryNum);
this.streamID = streamID;
}
private void validateTracingHeader(String tracingContextHeader) {
String[] idList = tracingContextHeader.split(":", SPLIT_NO_LIMIT);
validateBasicFormat(idList);
if (format != TracingHeaderFormat.ALL_ID_FORMAT) {
return;
}
// Validate Operated Blob Count
if (operatedBlobCount != null) {
Assertions.assertThat(Integer.parseInt(idList[10]))
.describedAs("OperatedBlobCount is incorrect")
.isEqualTo(operatedBlobCount);
}
// Validate Primary Request ID
if (!primaryRequestId.isEmpty() && !idList[4].isEmpty()) {
Assertions.assertThat(idList[4])
.describedAs("PrimaryReqID should be common for these requests")
.isEqualTo(primaryRequestId);
}
// Validate Stream ID
if (!streamID.isEmpty()) {
Assertions.assertThat(idList[5])
.describedAs("Stream id should be common for these requests")
.isEqualTo(streamID);
}
}
private void validateBasicFormat(String[] idList) {
// Validate Version and Number of fields in the header
Assertions.assertThat(idList[0]).describedAs("Version should be present")
.isEqualTo(TracingHeaderVersion.getCurrentVersion().toString());
int expectedSize = 0;
if (format == TracingHeaderFormat.ALL_ID_FORMAT) {
expectedSize = TracingHeaderVersion.getCurrentVersion().getFieldCount();
} else if (format == TracingHeaderFormat.TWO_ID_FORMAT) {
expectedSize = 3;
} else {
Assertions.assertThat(idList).describedAs("header should have 1 element")
.hasSize(1);
Assertions.assertThat(idList[0])
.describedAs("Client request ID is a guid").matches(GUID_PATTERN);
return;
}
Assertions.assertThat(idList)
.describedAs("header should have " + expectedSize + " elements")
.hasSize(expectedSize);
// Validate Client Correlation ID
if (clientCorrelationId.matches("[a-zA-Z0-9-]*")) {
Assertions.assertThat(idList[1])
.describedAs("Correlation ID should match config")
.isEqualTo(clientCorrelationId);
} else {
Assertions.assertThat(idList[1])
.describedAs("Invalid config should be replaced with empty string")
.isEmpty();
}
// Validate Client Request ID
Assertions.assertThat(idList[2]).describedAs("Client request ID is a guid")
.matches(GUID_PATTERN);
if (format != TracingHeaderFormat.ALL_ID_FORMAT) {
return;
}
// Validate FileSystem ID
Assertions.assertThat(idList[3]).describedAs("Filesystem ID incorrect")
.isEqualTo(fileSystemId);
// Validate Primary Request ID
if (needsPrimaryRequestId && !operation
.equals(FSOperationType.READ)) {
Assertions.assertThat(idList[4]).describedAs("should have primaryReqId")
.isNotEmpty();
}
// Validate Operation Type
Assertions.assertThat(idList[6]).describedAs("Operation name incorrect")
.isEqualTo(operation.toString());
// Validate Retry Header
if (idList[7].contains("_")) {
idList[7] = idList[7].split("_")[0];
}
int retryCount = Integer.parseInt(idList[7]);
Assertions.assertThat(retryCount)
.describedAs("Retry was required due to issue on server side")
.isEqualTo(retryNum);
}
/**
* Sets the value of expected Hadoop operation
* @param operation Hadoop operation code (String of two characters)
*/
@Override
public void setOperation(FSOperationType operation) {
this.operation = operation;
}
@Override
public void updatePrimaryRequestID(String primaryRequestId) {
this.primaryRequestId = primaryRequestId;
}
@Override
public void updateIngressHandler(String ingressHandler) {
this.ingressHandler = ingressHandler;
}
@Override
public void updatePosition(String position) {
this.position = position;
}
@Override
public void updateReadType(ReadType readType) {
this.readType = readType;
}
/**
* Sets the value of the number of blobs operated on.
* @param operatedBlobCount number of blobs operated on
*/
public void setOperatedBlobCount(Integer operatedBlobCount) {
this.operatedBlobCount = operatedBlobCount;
}
}
| TracingHeaderValidator |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java | {
"start": 905,
"end": 988
} | interface ____
extends EventHandler<ContainerLauncherEvent> {
| ContainerLauncher |
java | quarkusio__quarkus | extensions/hibernate-reactive/deployment/src/test/java/io/quarkus/hibernate/reactive/context/Fruit.java | {
"start": 126,
"end": 185
} | class ____ {
@Id
Integer id;
String name;
}
| Fruit |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/IdGeneratorOverridingTest.java | {
"start": 623,
"end": 905
} | class ____ {
@FailureExpected(reason = "We don't (yet) allow overriding of ids declared by mapped superclasses")
@Test void test(EntityManagerFactoryScope scope) {
scope.inTransaction( em -> em.persist( new B() ) );
}
@MappedSuperclass
static abstract | IdGeneratorOverridingTest |
java | alibaba__nacos | api/src/test/java/com/alibaba/nacos/api/ai/listener/NacosMcpServerEventTest.java | {
"start": 900,
"end": 1640
} | class ____ {
@Test
void testConstructor() {
McpServerDetailInfo mcpServerDetailInfo = new McpServerDetailInfo();
mcpServerDetailInfo.setName("testName");
mcpServerDetailInfo.setId(UUID.randomUUID().toString());
mcpServerDetailInfo.setNamespaceId(AiConstants.Mcp.MCP_DEFAULT_NAMESPACE);
NacosMcpServerEvent event = new NacosMcpServerEvent(mcpServerDetailInfo);
assertEquals(mcpServerDetailInfo, event.getMcpServerDetailInfo());
assertEquals(mcpServerDetailInfo.getId(), event.getMcpId());
assertEquals(mcpServerDetailInfo.getNamespaceId(), event.getNamespaceId());
assertEquals(mcpServerDetailInfo.getName(), event.getMcpName());
}
} | NacosMcpServerEventTest |
java | spring-projects__spring-boot | integration-test/spring-boot-actuator-integration-tests/src/test/java/org/springframework/boot/actuate/web/mappings/MappingsEndpointIntegrationTests.java | {
"start": 10203,
"end": 10930
} | class ____ {
@Bean
DispatcherHandlersMappingDescriptionProvider dispatcherHandlersMappingDescriptionProvider() {
return new DispatcherHandlersMappingDescriptionProvider();
}
@Bean
RouterFunction<ServerResponse> routerFunction() {
return route(GET("/one"), (request) -> ServerResponse.ok().build()).andRoute(POST("/two"),
(request) -> ServerResponse.ok().build());
}
@RequestMapping("/three")
void three() {
}
@Bean
RouterFunction<ServerResponse> routerFunctionWithAttributes() {
return route(GET("/four"), (request) -> ServerResponse.ok().build()).withAttribute("test", "test");
}
}
@Configuration(proxyBeanMethods = false)
@EnableWebMvc
@Controller
static | ReactiveWebConfiguration |
java | elastic__elasticsearch | x-pack/plugin/gpu/src/internalClusterTest/java/org/elasticsearch/xpack/gpu/TestVectorsFormatUtils.java | {
"start": 424,
"end": 1014
} | class ____ {
static DenseVectorFieldMapper.VectorSimilarity randomGPUSupportedSimilarity(DenseVectorFieldMapper.VectorIndexType vectorIndexType) {
if (vectorIndexType == DenseVectorFieldMapper.VectorIndexType.INT8_HNSW) {
return randomFrom(
DenseVectorFieldMapper.VectorSimilarity.L2_NORM,
DenseVectorFieldMapper.VectorSimilarity.COSINE,
DenseVectorFieldMapper.VectorSimilarity.DOT_PRODUCT
);
}
return randomFrom(DenseVectorFieldMapper.VectorSimilarity.values());
}
}
| TestVectorsFormatUtils |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableOnErrorComplete.java | {
"start": 1600,
"end": 3133
} | class ____<T>
implements Observer<T>, Disposable {
final Observer<? super T> downstream;
final Predicate<? super Throwable> predicate;
Disposable upstream;
public OnErrorCompleteObserver(Observer<? super T> actual, Predicate<? super Throwable> predicate) {
this.downstream = actual;
this.predicate = predicate;
}
@Override
public void onSubscribe(Disposable d) {
if (DisposableHelper.validate(this.upstream, d)) {
this.upstream = d;
downstream.onSubscribe(this);
}
}
@Override
public void onNext(T value) {
downstream.onNext(value);
}
@Override
public void onError(Throwable e) {
boolean b;
try {
b = predicate.test(e);
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
downstream.onError(new CompositeException(e, ex));
return;
}
if (b) {
downstream.onComplete();
} else {
downstream.onError(e);
}
}
@Override
public void onComplete() {
downstream.onComplete();
}
@Override
public void dispose() {
upstream.dispose();
}
@Override
public boolean isDisposed() {
return upstream.isDisposed();
}
}
}
| OnErrorCompleteObserver |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/state/StateSnapshotKeyGroupReader.java | {
"start": 1196,
"end": 1577
} | interface ____ {
/**
* Read the data for the specified key-group from the input.
*
* @param div the input
* @param keyGroupId the key-group to write
* @throws IOException on write related problems
*/
void readMappingsInKeyGroup(@Nonnull DataInputView div, @Nonnegative int keyGroupId)
throws IOException;
}
| StateSnapshotKeyGroupReader |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/config/AbstractConfigTest.java | {
"start": 17568,
"end": 17810
} | class ____ with names or classes where all classes are visible to thread context classloader
Thread.currentThread().setContextClassLoader(defaultClassLoader);
ClassTestConfig.testOverrides();
// Test | overrides |
java | quarkusio__quarkus | extensions/panache/hibernate-orm-rest-data-panache/deployment/src/test/java/io/quarkus/hibernate/orm/rest/data/panache/deployment/repository/PanacheRepositoryResourceDevModeTest.java | {
"start": 267,
"end": 769
} | class ____ extends AbstractDevModeTest {
@RegisterExtension
static final QuarkusDevModeTest TEST = new QuarkusDevModeTest()
.withApplicationRoot((jar) -> jar
.addClasses(Collection.class, AbstractEntity.class, AbstractItem.class, Item.class,
ItemsResource.class, ItemsRepository.class)
.addAsResource("application.properties")
.addAsResource("import.sql"));
}
| PanacheRepositoryResourceDevModeTest |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/search/arguments/NumericFieldArgs.java | {
"start": 1576,
"end": 1750
} | class ____<K> extends FieldArgs.Builder<K, NumericFieldArgs<K>, Builder<K>> {
public Builder() {
super(new NumericFieldArgs<>());
}
}
}
| Builder |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyInitializationContextValidator.java | {
"start": 1064,
"end": 2905
} | class ____ {
private FederationPolicyInitializationContextValidator() {
// disable constructor per checkstyle
}
public static void validate(
FederationPolicyInitializationContext policyContext, String myType)
throws FederationPolicyInitializationException {
if (myType == null) {
throw new FederationPolicyInitializationException(
"The myType parameter" + " should not be null.");
}
if (policyContext == null) {
throw new FederationPolicyInitializationException(
"The FederationPolicyInitializationContext provided is null. Cannot"
+ " reinitialize " + "successfully.");
}
if (policyContext.getFederationStateStoreFacade() == null) {
throw new FederationPolicyInitializationException(
"The FederationStateStoreFacade provided is null. Cannot"
+ " reinitialize successfully.");
}
if (policyContext.getFederationSubclusterResolver() == null) {
throw new FederationPolicyInitializationException(
"The FederationSubclusterResolver provided is null. Cannot"
+ " reinitialize successfully.");
}
if (policyContext.getSubClusterPolicyConfiguration() == null) {
throw new FederationPolicyInitializationException(
"The SubClusterPolicyConfiguration provided is null. Cannot "
+ "reinitialize successfully.");
}
String intendedType =
policyContext.getSubClusterPolicyConfiguration().getType();
if (!myType.equals(intendedType)) {
throw new FederationPolicyInitializationException(
"The FederationPolicyConfiguration carries a type (" + intendedType
+ ") different then mine (" + myType
+ "). Cannot reinitialize successfully.");
}
}
}
| FederationPolicyInitializationContextValidator |
java | google__guava | android/guava/src/com/google/common/base/CharMatcher.java | {
"start": 45740,
"end": 46133
} | class ____ extends NamedFastMatcher {
static final CharMatcher INSTANCE = new JavaIsoControl();
private JavaIsoControl() {
super("CharMatcher.javaIsoControl()");
}
@Override
public boolean matches(char c) {
return c <= '\u001f' || (c >= '\u007f' && c <= '\u009f');
}
}
/** Implementation of {@link #invisible()}. */
private static final | JavaIsoControl |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestCheckpointPreemptionPolicy.java | {
"start": 3006,
"end": 13063
} | class ____ {
TaskAttemptListenerImpl pel= null;
RMContainerAllocator r;
JobId jid;
RunningAppContext mActxt;
Set<ContainerId> preemptedContainers = new HashSet<ContainerId>();
Map<ContainerId,TaskAttemptId> assignedContainers =
new HashMap<ContainerId, TaskAttemptId>();
private final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
HashMap<ContainerId,Resource> contToResourceMap =
new HashMap<ContainerId, Resource>();
private int minAlloc = 1024;
@BeforeEach
@SuppressWarnings("rawtypes") // mocked generics
public void setup() {
ApplicationId appId = ApplicationId.newInstance(200, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
jid = MRBuilderUtils.newJobId(appId, 1);
mActxt = mock(RunningAppContext.class);
@SuppressWarnings("unchecked")
EventHandler<Event> ea = mock(EventHandler.class);
when(mActxt.getEventHandler()).thenReturn(ea);
for (int i = 0; i < 40; ++i) {
ContainerId cId = ContainerId.newContainerId(appAttemptId, i);
if (0 == i % 7) {
preemptedContainers.add(cId);
}
TaskId tId = 0 == i % 2
? MRBuilderUtils.newTaskId(jid, i / 2, TaskType.MAP)
: MRBuilderUtils.newTaskId(jid, i / 2 + 1, TaskType.REDUCE);
assignedContainers.put(cId, MRBuilderUtils.newTaskAttemptId(tId, 0));
contToResourceMap.put(cId, Resource.newInstance(2 * minAlloc, 2));
}
for (Map.Entry<ContainerId,TaskAttemptId> ent :
assignedContainers.entrySet()) {
System.out.println("cont:" + ent.getKey().getContainerId() +
" type:" + ent.getValue().getTaskId().getTaskType() +
" res:" + contToResourceMap.get(ent.getKey()).getMemorySize() + "MB" );
}
}
@Test
public void testStrictPreemptionContract() {
final Map<ContainerId,TaskAttemptId> containers = assignedContainers;
AMPreemptionPolicy.Context mPctxt = new AMPreemptionPolicy.Context() {
@Override
public TaskAttemptId getTaskAttempt(ContainerId cId) {
return containers.get(cId);
}
@Override
public List<Container> getContainers(TaskType t) {
List<Container> p = new ArrayList<Container>();
for (Map.Entry<ContainerId,TaskAttemptId> ent :
assignedContainers.entrySet()) {
if (ent.getValue().getTaskId().getTaskType().equals(t)) {
p.add(Container.newInstance(ent.getKey(), null, null,
contToResourceMap.get(ent.getKey()),
Priority.newInstance(0), null));
}
}
return p;
}
};
PreemptionMessage pM = generatePreemptionMessage(preemptedContainers,
contToResourceMap, Resource.newInstance(1024, 1), true);
CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
policy.init(mActxt);
policy.preempt(mPctxt, pM);
for (ContainerId c : preemptedContainers) {
TaskAttemptId t = assignedContainers.get(c);
if (TaskType.MAP.equals(t.getTaskId().getTaskType())) {
assert policy.isPreempted(t) == false;
} else {
assert policy.isPreempted(t);
}
}
}
@Test
public void testPreemptionContract() {
final Map<ContainerId,TaskAttemptId> containers = assignedContainers;
AMPreemptionPolicy.Context mPctxt = new AMPreemptionPolicy.Context() {
@Override
public TaskAttemptId getTaskAttempt(ContainerId cId) {
return containers.get(cId);
}
@Override
public List<Container> getContainers(TaskType t) {
List<Container> p = new ArrayList<Container>();
for (Map.Entry<ContainerId,TaskAttemptId> ent :
assignedContainers.entrySet()){
if(ent.getValue().getTaskId().getTaskType().equals(t)){
p.add(Container.newInstance(ent.getKey(), null, null,
contToResourceMap.get(ent.getKey()),
Priority.newInstance(0), null));
}
}
return p;
}
};
PreemptionMessage pM = generatePreemptionMessage(preemptedContainers,
contToResourceMap, Resource.newInstance(minAlloc, 1), false);
CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
policy.init(mActxt);
int supposedMemPreemption = (int) pM.getContract().getResourceRequest()
.get(0).getResourceRequest().getCapability().getMemorySize()
* pM.getContract().getResourceRequest().get(0).getResourceRequest()
.getNumContainers();
// first round of preemption
policy.preempt(mPctxt, pM);
List<TaskAttemptId> preempting =
validatePreemption(pM, policy, supposedMemPreemption);
// redundant message
policy.preempt(mPctxt, pM);
List<TaskAttemptId> preempting2 =
validatePreemption(pM, policy, supposedMemPreemption);
// check that nothing got added
assert preempting2.equals(preempting);
// simulate 2 task completions/successful preemption
policy.handleCompletedContainer(preempting.get(0));
policy.handleCompletedContainer(preempting.get(1));
// remove from assignedContainers
Iterator<Map.Entry<ContainerId,TaskAttemptId>> it =
assignedContainers.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<ContainerId,TaskAttemptId> ent = it.next();
if (ent.getValue().equals(preempting.get(0)) ||
ent.getValue().equals(preempting.get(1)))
it.remove();
}
// one more message asking for preemption
policy.preempt(mPctxt, pM);
// triggers preemption of 2 more containers (i.e., the preemption set changes)
List<TaskAttemptId> preempting3 =
validatePreemption(pM, policy, supposedMemPreemption);
assert preempting3.equals(preempting2) == false;
}
private List<TaskAttemptId> validatePreemption(PreemptionMessage pM,
CheckpointAMPreemptionPolicy policy, int supposedMemPreemption) {
Resource effectivelyPreempted = Resource.newInstance(0, 0);
List<TaskAttemptId> preempting = new ArrayList<TaskAttemptId>();
for (Map.Entry<ContainerId, TaskAttemptId> ent :
assignedContainers.entrySet()) {
if (policy.isPreempted(ent.getValue())) {
Resources.addTo(effectivelyPreempted,contToResourceMap.get(ent.getKey()));
// preempt only reducers
if (policy.isPreempted(ent.getValue())){
assertEquals(TaskType.REDUCE, ent.getValue().getTaskId().getTaskType());
preempting.add(ent.getValue());
}
}
}
// preempt enough
assert (effectivelyPreempted.getMemorySize() >= supposedMemPreemption)
: " preempted: " + effectivelyPreempted.getMemorySize();
// preempt not too much enough
assert effectivelyPreempted.getMemorySize() <= supposedMemPreemption + minAlloc;
return preempting;
}
private PreemptionMessage generatePreemptionMessage(
Set<ContainerId> containerToPreempt,
HashMap<ContainerId, Resource> resPerCont,
Resource minimumAllocation, boolean strict) {
Set<ContainerId> currentContPreemption = Collections.unmodifiableSet(
new HashSet<ContainerId>(containerToPreempt));
containerToPreempt.clear();
Resource tot = Resource.newInstance(0, 0);
for(ContainerId c : currentContPreemption){
Resources.addTo(tot,
resPerCont.get(c));
}
int numCont = (int) Math.ceil(tot.getMemorySize() /
(double) minimumAllocation.getMemorySize());
ResourceRequest rr = ResourceRequest.newInstance(
Priority.newInstance(0), ResourceRequest.ANY,
minimumAllocation, numCont);
if (strict) {
return generatePreemptionMessage(new Allocation(null, null,
currentContPreemption, null, null));
}
return generatePreemptionMessage(new Allocation(null, null,
null, currentContPreemption,
Collections.singletonList(rr)));
}
private PreemptionMessage generatePreemptionMessage(Allocation allocation) {
PreemptionMessage pMsg = null;
// assemble strict preemption request
if (allocation.getStrictContainerPreemptions() != null) {
pMsg = recordFactory.newRecordInstance(PreemptionMessage.class);
StrictPreemptionContract pStrict =
recordFactory.newRecordInstance(StrictPreemptionContract.class);
Set<PreemptionContainer> pCont = new HashSet<PreemptionContainer>();
for (ContainerId cId : allocation.getStrictContainerPreemptions()) {
PreemptionContainer pc =
recordFactory.newRecordInstance(PreemptionContainer.class);
pc.setId(cId);
pCont.add(pc);
}
pStrict.setContainers(pCont);
pMsg.setStrictContract(pStrict);
}
// assemble negotiable preemption request
if (allocation.getResourcePreemptions() != null &&
allocation.getResourcePreemptions().size() > 0 &&
allocation.getContainerPreemptions() != null &&
allocation.getContainerPreemptions().size() > 0) {
if (pMsg == null) {
pMsg = recordFactory.newRecordInstance(PreemptionMessage.class);
}
PreemptionContract contract =
recordFactory.newRecordInstance(PreemptionContract.class);
Set<PreemptionContainer> pCont = new HashSet<PreemptionContainer>();
for (ContainerId cId : allocation.getContainerPreemptions()) {
PreemptionContainer pc =
recordFactory.newRecordInstance(PreemptionContainer.class);
pc.setId(cId);
pCont.add(pc);
}
List<PreemptionResourceRequest> pRes =
new ArrayList<PreemptionResourceRequest>();
for (ResourceRequest crr : allocation.getResourcePreemptions()) {
PreemptionResourceRequest prr =
recordFactory.newRecordInstance(PreemptionResourceRequest.class);
prr.setResourceRequest(crr);
pRes.add(prr);
}
contract.setContainers(pCont);
contract.setResourceRequest(pRes);
pMsg.setContract(contract);
}
return pMsg;
}
}
| TestCheckpointPreemptionPolicy |
java | apache__camel | components/camel-plc4x/src/main/java/org/apache/camel/component/plc4x/Plc4XPollingConsumer.java | {
"start": 1452,
"end": 4430
} | class ____ extends EventDrivenPollingConsumer {
private static final Logger LOGGER = LoggerFactory.getLogger(Plc4XPollingConsumer.class);
private final Plc4XEndpoint plc4XEndpoint;
public Plc4XPollingConsumer(Plc4XEndpoint endpoint) {
super(endpoint);
this.plc4XEndpoint = endpoint;
}
@Override
public String toString() {
return "Plc4XPollingConsumer[" + plc4XEndpoint + "]";
}
@Override
public Endpoint getEndpoint() {
return plc4XEndpoint;
}
@Override
protected void doStart() throws Exception {
super.doStart();
try {
plc4XEndpoint.setupConnection();
} catch (PlcConnectionException e) {
if (LOGGER.isTraceEnabled()) {
LOGGER.error("Connection setup failed, stopping PollingConsumer", e);
} else {
LOGGER.error("Connection setup failed, stopping PollingConsumer");
}
doStop();
}
}
@Override
public Exchange receive() {
return doReceive(-1);
}
@Override
public Exchange receiveNoWait() {
return doReceive(0);
}
@Override
public Exchange receive(long timeout) {
return doReceive(timeout);
}
protected Exchange doReceive(long timeout) {
Exchange exchange = plc4XEndpoint.createExchange();
try {
plc4XEndpoint.reconnectIfNeeded();
if (plc4XEndpoint.connection == null) {
throw new PlcConnectionException("Cannot establish connection");
}
PlcReadRequest request = plc4XEndpoint.buildPlcReadRequest();
CompletableFuture<? extends PlcReadResponse> future
= request.execute().whenComplete((plcReadResponse, throwable) -> {
});
PlcReadResponse response;
if (timeout >= 0) {
response = future.get(timeout, TimeUnit.MILLISECONDS);
} else {
response = future.get();
}
Map<String, Object> rsp = new HashMap<>();
for (String field : response.getTagNames()) {
rsp.put(field, response.getObject(field));
}
exchange.getIn().setBody(rsp);
} catch (ExecutionException | TimeoutException e) {
getExceptionHandler().handleException(e);
exchange.getIn().setBody(new HashMap<>());
} catch (InterruptedException e) {
getExceptionHandler().handleException(e);
Thread.currentThread().interrupt();
} catch (PlcConnectionException e) {
if (LOGGER.isTraceEnabled()) {
LOGGER.warn("Unable to reconnect, skipping request", e);
} else {
LOGGER.warn("Unable to reconnect, skipping request");
}
exchange.getIn().setBody(new HashMap<>());
}
return exchange;
}
}
| Plc4XPollingConsumer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.