language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/HttpSecurityRequestMatchersTests.java | {
"start": 10360,
"end": 11082
} | class ____ {
@Bean
PathPatternRequestMatcherBuilderFactoryBean requestMatcherBuilder() {
return new PathPatternRequestMatcherBuilderFactoryBean();
}
@Bean
SecurityFilterChain filterChain(HttpSecurity http, PathPatternRequestMatcher.Builder builder) throws Exception {
// @formatter:off
http
.securityMatchers((security) -> security
.requestMatchers(builder.matcher("/path")))
.httpBasic(withDefaults())
.authorizeHttpRequests((requests) -> requests
.anyRequest().denyAll());
// @formatter:on
return http.build();
}
@Bean
UserDetailsService userDetailsService() {
return new InMemoryUserDetailsManager();
}
@RestController
static | RequestMatchersMvcMatcherConfig |
java | elastic__elasticsearch | x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/event/parser/field/PaginationAnalyticsEventField.java | {
"start": 818,
"end": 2291
} | class ____ {
public static final ParseField PAGINATION_FIELD = new ParseField("page");
public static final ParseField CURRENT_PAGE_FIELD = new ParseField("current");
public static final ParseField PAGE_SIZE_FIELD = new ParseField("size");
private static final ObjectParser<Map<String, Integer>, AnalyticsEvent.Context> PARSER = new ObjectParser<>(
PAGINATION_FIELD.getPreferredName(),
HashMap::new
);
private static int requirePositiveInt(int i, String field) {
if (i < 0) throw new IllegalArgumentException(Strings.format("field [%s] must be positive", field));
return i;
}
static {
PARSER.declareInt(
(b, v) -> b.put(CURRENT_PAGE_FIELD.getPreferredName(), requirePositiveInt(v, CURRENT_PAGE_FIELD.getPreferredName())),
CURRENT_PAGE_FIELD
);
PARSER.declareInt(
(b, v) -> b.put(PAGE_SIZE_FIELD.getPreferredName(), requirePositiveInt(v, PAGE_SIZE_FIELD.getPreferredName())),
PAGE_SIZE_FIELD
);
PARSER.declareRequiredFieldSet(CURRENT_PAGE_FIELD.getPreferredName());
PARSER.declareRequiredFieldSet(PAGE_SIZE_FIELD.getPreferredName());
}
private PaginationAnalyticsEventField() {}
public static Map<String, Integer> fromXContent(XContentParser parser, AnalyticsEvent.Context context) throws IOException {
return Map.copyOf(PARSER.parse(parser, context));
}
}
| PaginationAnalyticsEventField |
java | apache__spark | sql/catalyst/src/main/java/org/apache/spark/sql/connector/write/RowLevelOperationBuilder.java | {
"start": 909,
"end": 1005
} | interface ____ building a {@link RowLevelOperation}.
*
* @since 3.3.0
*/
@Experimental
public | for |
java | spring-projects__spring-boot | core/spring-boot/src/test/java/org/springframework/boot/context/config/FileExtensionHintTests.java | {
"start": 846,
"end": 1980
} | class ____ {
@Test
void isPresentWhenHasHint() {
assertThat(FileExtensionHint.from("foo[.bar]").isPresent()).isTrue();
}
@Test
void isPresentWhenHasNoHint() {
assertThat(FileExtensionHint.from("foo").isPresent()).isFalse();
assertThat(FileExtensionHint.from("foo[bar]").isPresent()).isFalse();
assertThat(FileExtensionHint.from("foo[.b[ar]").isPresent()).isFalse();
}
@Test
void orElseWhenHasHint() {
assertThat(FileExtensionHint.from("foo[.bar]").orElse(".txt")).isEqualTo(".bar");
}
@Test
void orElseWhenHasNoHint() {
assertThat(FileExtensionHint.from("foo").orElse(".txt")).isEqualTo(".txt");
}
@Test
void toStringWhenHasHintReturnsDotExtension() {
assertThat(FileExtensionHint.from("foo[.bar]")).hasToString(".bar");
}
@Test
void toStringWhenHasNoHintReturnsEmpty() {
assertThat(FileExtensionHint.from("foo")).hasToString("");
}
@Test
void removeFromWhenHasHint() {
assertThat(FileExtensionHint.removeFrom("foo[.bar]")).isEqualTo("foo");
}
@Test
void removeFromWhenHasNoHint() {
assertThat(FileExtensionHint.removeFrom("foo[bar]")).isEqualTo("foo[bar]");
}
}
| FileExtensionHintTests |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/cdiprovider/CDIProviderTest.java | {
"start": 1431,
"end": 1773
} | class ____ {
private int val;
static final AtomicBoolean DESTROYED = new AtomicBoolean();
@PostConstruct
void init() {
val = 10;
}
@PreDestroy
void destroy() {
DESTROYED.set(true);
}
int getVal() {
return val;
}
}
}
| Moo |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/io/network/partition/JobMasterPartitionTrackerImplTest.java | {
"start": 21069,
"end": 21877
} | class ____ {
private final ResourceID taskExecutorId;
private final JobID jobId;
private final Collection<ResultPartitionID> releasedPartitions;
private ReleaseCall(
ResourceID taskExecutorId,
JobID jobId,
Collection<ResultPartitionID> releasedPartitions) {
this.taskExecutorId = taskExecutorId;
this.jobId = jobId;
this.releasedPartitions = releasedPartitions;
}
public ResourceID getTaskExecutorId() {
return taskExecutorId;
}
public JobID getJobId() {
return jobId;
}
public Collection<ResultPartitionID> getReleasedPartitions() {
return releasedPartitions;
}
}
private static | ReleaseCall |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/nestedbeans/unmappable/erroneous/UnmappableSourceValuePropertyMapper.java | {
"start": 608,
"end": 686
} | class ____ extends BaseValuePropertyMapper {
}
| UnmappableSourceValuePropertyMapper |
java | quarkusio__quarkus | extensions/grpc/deployment/src/test/java/io/quarkus/grpc/client/tls/TlsWithJKSTrustStoreAndTlsRegistryTest.java | {
"start": 733,
"end": 1973
} | class ____ {
private static final String configuration = """
quarkus.tls.trust-store.jks.path=target/certs/grpc-client-truststore.jks
quarkus.tls.trust-store.jks.password=password
quarkus.grpc.clients.hello.host=localhost
quarkus.grpc.clients.hello.port=9001
quarkus.grpc.clients.hello.plain-text=false
quarkus.grpc.clients.hello.use-quarkus-grpc-client=true
# Legacy server
quarkus.grpc.server.ssl.certificate=target/certs/grpc.crt
quarkus.grpc.server.ssl.key=target/certs/grpc.key
""";
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.addPackage(HelloWorldTlsEndpoint.class.getPackage())
.addPackage(io.grpc.examples.helloworld.GreeterGrpc.class.getPackage())
.add(new StringAsset(configuration), "application.properties"));
@Test
void testClientTlsConfiguration() {
String response = get("/hello/blocking/neo").asString();
assertThat(response).isEqualTo("Hello neo");
}
}
| TlsWithJKSTrustStoreAndTlsRegistryTest |
java | spring-cloud__spring-cloud-gateway | spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/filter/factory/SetRequestHostHeaderGatewayFilterFactory.java | {
"start": 1404,
"end": 2609
} | class ____
extends AbstractGatewayFilterFactory<SetRequestHostHeaderGatewayFilterFactory.Config> {
public SetRequestHostHeaderGatewayFilterFactory() {
super(Config.class);
}
@Override
public List<String> shortcutFieldOrder() {
return Collections.singletonList("host");
}
@Override
public GatewayFilter apply(Config config) {
return new GatewayFilter() {
@Override
public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) {
String value = ServerWebExchangeUtils.expand(exchange, config.getHost());
ServerHttpRequest request = exchange.getRequest().mutate().headers(httpHeaders -> {
httpHeaders.remove("Host");
httpHeaders.add("Host", value);
}).build();
// Make sure the header we just set is preserved
exchange.getAttributes().put(PRESERVE_HOST_HEADER_ATTRIBUTE, true);
return chain.filter(exchange.mutate().request(request).build());
}
@Override
public String toString() {
String host = config.getHost();
return filterToStringCreator(SetRequestHostHeaderGatewayFilterFactory.this)
.append(host != null ? host : "")
.toString();
}
};
}
public static | SetRequestHostHeaderGatewayFilterFactory |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java | {
"start": 14200,
"end": 16309
} | class ____ extends IOMapperBase<Long> {
protected CompressionCodec compressionCodec;
protected String blockStoragePolicy;
IOStatMapper() {
}
@Override // Mapper
public void configure(JobConf conf) {
super.configure(conf);
// grab compression
String compression = getConf().get("test.io.compression.class", null);
Class<? extends CompressionCodec> codec;
// try to initialize codec
try {
codec = (compression == null) ? null :
Class.forName(compression).asSubclass(CompressionCodec.class);
} catch(Exception e) {
throw new RuntimeException("Compression codec not found: ", e);
}
if(codec != null) {
compressionCodec = (CompressionCodec)
ReflectionUtils.newInstance(codec, getConf());
}
blockStoragePolicy = getConf().get(STORAGE_POLICY_NAME_KEY, null);
}
@Override // IOMapperBase
void collectStats(OutputCollector<Text, Text> output,
String name,
long execTime,
Long objSize) throws IOException {
long totalSize = objSize.longValue();
float ioRateMbSec = (float)totalSize * 1000 / (execTime * MEGA);
LOG.info("Number of bytes processed = " + totalSize);
LOG.info("Exec time = " + execTime);
LOG.info("IO rate = " + ioRateMbSec);
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "tasks"),
new Text(String.valueOf(1)));
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "size"),
new Text(String.valueOf(totalSize)));
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "time"),
new Text(String.valueOf(execTime)));
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "rate"),
new Text(String.valueOf(ioRateMbSec*1000)));
output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "sqrate"),
new Text(String.valueOf(ioRateMbSec*ioRateMbSec*1000)));
}
}
/**
* Write mapper class.
*/
public static | IOStatMapper |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/byte2darrays/Byte2DArrays_assertContains_at_Index_Test.java | {
"start": 1116,
"end": 1476
} | class ____ extends Byte2DArraysBaseTest {
@Test
void should_delegate_to_Arrays2D() {
// GIVEN
byte[] bytes = new byte[] { 6, 8, 10 };
// WHEN
byte2dArrays.assertContains(info, actual, bytes, atIndex(1));
// THEN
verify(arrays2d).assertContains(info, failures, actual, bytes, atIndex(1));
}
}
| Byte2DArrays_assertContains_at_Index_Test |
java | apache__spark | examples/src/main/java/org/apache/spark/examples/mllib/JavaSVDExample.java | {
"start": 1445,
"end": 2802
} | class ____ {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("SVD Example");
SparkContext sc = new SparkContext(conf);
JavaSparkContext jsc = JavaSparkContext.fromSparkContext(sc);
// $example on$
List<Vector> data = Arrays.asList(
Vectors.sparse(5, new int[] {1, 3}, new double[] {1.0, 7.0}),
Vectors.dense(2.0, 0.0, 3.0, 4.0, 5.0),
Vectors.dense(4.0, 0.0, 0.0, 6.0, 7.0)
);
JavaRDD<Vector> rows = jsc.parallelize(data);
// Create a RowMatrix from JavaRDD<Vector>.
RowMatrix mat = new RowMatrix(rows.rdd());
// Compute the top 5 singular values and corresponding singular vectors.
SingularValueDecomposition<RowMatrix, Matrix> svd = mat.computeSVD(5, true, 1.0E-9d);
RowMatrix U = svd.U(); // The U factor is a RowMatrix.
Vector s = svd.s(); // The singular values are stored in a local dense vector.
Matrix V = svd.V(); // The V factor is a local dense matrix.
// $example off$
Vector[] collectPartitions = (Vector[]) U.rows().collect();
System.out.println("U factor is:");
for (Vector vector : collectPartitions) {
System.out.println("\t" + vector);
}
System.out.println("Singular values are: " + s);
System.out.println("V factor is:\n" + V);
jsc.stop();
}
}
| JavaSVDExample |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/FluxCacheTest.java | {
"start": 980,
"end": 11682
} | class ____ {
@Test
public void cacheFlux() {
VirtualTimeScheduler vts = VirtualTimeScheduler.create();
Flux<Tuple2<Long, Integer>> source = Flux.just(1, 2, 3)
.delayElements(Duration.ofMillis(1000)
, vts)
.cache()
.elapsed(vts);
StepVerifier.withVirtualTime(() -> source, () -> vts, Long.MAX_VALUE)
.thenAwait(Duration.ofSeconds(3))
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 1)
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 2)
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 3)
.verifyComplete();
StepVerifier.withVirtualTime(() -> source, () -> vts, Long.MAX_VALUE)
.thenAwait(Duration.ofSeconds(3))
.expectNextMatches(t -> t.getT1() == 0 && t.getT2() == 1)
.expectNextMatches(t -> t.getT1() == 0 && t.getT2() == 2)
.expectNextMatches(t -> t.getT1() == 0 && t.getT2() == 3)
.verifyComplete();
}
@Test
public void cacheFluxTTL() {
VirtualTimeScheduler vts = VirtualTimeScheduler.create();
Flux<Tuple2<Long, Integer>> source = Flux.just(1, 2, 3)
.delayElements(Duration.ofMillis(1000)
, vts)
.cache(Duration.ofMillis(2000), vts)
.elapsed(vts);
StepVerifier.withVirtualTime(() -> source, () -> vts, Long.MAX_VALUE)
.thenAwait(Duration.ofSeconds(3))
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 1)
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 2)
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 3)
.verifyComplete();
StepVerifier.withVirtualTime(() -> source, () -> vts, Long.MAX_VALUE)
.thenAwait(Duration.ofSeconds(3))
.expectNextMatches(t -> t.getT1() == 0 && t.getT2() == 2)
.expectNextMatches(t -> t.getT1() == 0 && t.getT2() == 3)
.verifyComplete();
}
@Test
public void cacheFluxHistoryTTL() {
VirtualTimeScheduler vts = VirtualTimeScheduler.create();
Flux<Tuple2<Long, Integer>> source = Flux.just(1, 2, 3)
.delayElements(Duration.ofMillis
(1000), vts)
.cache(2, Duration.ofMillis(2000), vts)
.elapsed(vts);
StepVerifier.withVirtualTime(() -> source, () -> vts, Long.MAX_VALUE)
.thenAwait(Duration.ofSeconds(3))
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 1)
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 2)
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 3)
.verifyComplete();
StepVerifier.withVirtualTime(() -> source, () -> vts, Long.MAX_VALUE)
.thenAwait(Duration.ofSeconds(3))
.expectNextMatches(t -> t.getT1() == 0 && t.getT2() == 2)
.expectNextMatches(t -> t.getT1() == 0 && t.getT2() == 3)
.verifyComplete();
}
@Test
public void cacheFluxTTLReconnectsAfterTTL() {
VirtualTimeScheduler vts = VirtualTimeScheduler.create();
AtomicInteger i = new AtomicInteger(0);
Flux<Integer> source = Flux.defer(() -> Flux.just(i.incrementAndGet()))
.cache(Duration.ofMillis(2000), vts);
StepVerifier.create(source)
.expectNext(1)
.verifyComplete();
StepVerifier.create(source)
.expectNext(1)
.verifyComplete();
vts.advanceTimeBy(Duration.ofSeconds(3));
StepVerifier.create(source)
.expectNext(2)
.verifyComplete();
}
@Test
void cacheZeroFluxCachesCompletion() {
VirtualTimeScheduler vts = VirtualTimeScheduler.create();
Flux<Tuple2<Long, Integer>> source = Flux.just(1, 2, 3)
.delayElements(Duration.ofMillis(1000)
, vts)
.cache(0)
.elapsed(vts);
StepVerifier.withVirtualTime(() -> source, () -> vts, Long.MAX_VALUE)
.thenAwait(Duration.ofSeconds(3))
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 1)
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 2)
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 3)
.verifyComplete();
StepVerifier.create(source).verifyComplete();
}
@Test
public void cacheZeroFluxTTLReconnectsAfterSourceCompletion() {
VirtualTimeScheduler vts = VirtualTimeScheduler.create();
Flux<Tuple2<Long, Integer>> source = Flux.just(1, 2, 3)
.delayElements(
Duration.ofMillis(1000), vts
)
.cache(0, Duration.ofMillis(2000), vts)
.elapsed(vts);
StepVerifier.withVirtualTime(() -> source, () -> vts, Long.MAX_VALUE)
.thenAwait(Duration.ofSeconds(3))
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 1)
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 2)
.expectNextMatches(t -> t.getT1() == 1000 && t.getT2() == 3)
.verifyComplete();
StepVerifier.create(source).expectTimeout(Duration.ofMillis(500)).verify();
}
@Test
public void cacheContextHistory() {
AtomicInteger contextFillCount = new AtomicInteger();
Flux<String> cached = Flux.just(1, 2)
.flatMap(i -> Mono.deferContextual(Mono::just)
.map(ctx -> ctx.getOrDefault("a", "BAD"))
)
.cache(1)
.contextWrite(ctx -> ctx.put("a", "GOOD" + contextFillCount.incrementAndGet()));
//at first pass, the context is captured
String cacheMiss = cached.blockLast();
assertThat(cacheMiss).as("cacheMiss").isEqualTo("GOOD1");
assertThat(contextFillCount).as("cacheMiss").hasValue(1);
//at second subscribe, the Context fill attempt is still done, but ultimately ignored since first context is cached
String cacheHit = cached.blockLast();
assertThat(cacheHit).as("cacheHit").isEqualTo("GOOD1"); //value from the cache
assertThat(contextFillCount).as("cacheHit").hasValue(2); //function was still invoked
//at third subscribe, function is called for the 3rd time, but the context is still cached
String cacheHit2 = cached.blockLast();
assertThat(cacheHit2).as("cacheHit2").isEqualTo("GOOD1");
assertThat(contextFillCount).as("cacheHit2").hasValue(3);
//at fourth subscribe, function is called for the 4th time, but the context is still cached
String cacheHit3 = cached.blockLast();
assertThat(cacheHit3).as("cacheHit3").isEqualTo("GOOD1");
assertThat(contextFillCount).as("cacheHit3").hasValue(4);
}
@Test
public void cacheZeroContext() {
AtomicInteger contextFillCount = new AtomicInteger();
Flux<String> cached = Flux.just(1, 2)
.flatMap(i -> Mono.deferContextual(Mono::just)
.map(ctx -> ctx.getOrDefault("a", "BAD"))
)
.cache(0)
.contextWrite(ctx -> ctx.put("a", "GOOD" + contextFillCount.incrementAndGet()));
// at first pass, the Context is propagated to subscriber, but not cached
String cacheMiss = cached.blockLast();
assertThat(cacheMiss).as("cacheMiss").isEqualTo("GOOD1");
assertThat(contextFillCount).as("cacheMiss").hasValue(1);
// at second subscribe, the Context fill attempt is still done, but ultimately
// ignored since source terminated
String zeroCache = cached.blockLast();
assertThat(zeroCache).as("zeroCache").isNull(); //value from the cache
assertThat(contextFillCount).as("zeroCache").hasValue(2); //function was still invoked
//at third subscribe, function is called for the 3rd time, but the context is still cached
String zeroCache2 = cached.blockLast();
assertThat(zeroCache2).as("zeroCache2").isNull();
assertThat(contextFillCount).as("zeroCache2").hasValue(3);
//at fourth subscribe, function is called for the 4th time, but the context is still cached
String zeroCache3 = cached.blockLast();
assertThat(zeroCache3).as("zeroCache3").isNull();
assertThat(contextFillCount).as("zeroCache3").hasValue(4);
}
@Test
public void cacheContextTime() {
AtomicInteger contextFillCount = new AtomicInteger();
VirtualTimeScheduler vts = VirtualTimeScheduler.create();
Flux<String> cached = Flux.just(1)
.flatMap(i -> Mono.deferContextual(Mono::just)
.map(ctx -> ctx.getOrDefault("a", "BAD"))
)
.replay(Duration.ofMillis(500), vts)
.autoConnect()
.contextWrite(ctx -> ctx.put("a", "GOOD" + contextFillCount.incrementAndGet()));
//at first pass, the context is captured
String cacheMiss = cached.blockLast();
assertThat(cacheMiss).as("cacheMiss").isEqualTo("GOOD1");
assertThat(contextFillCount).as("cacheMiss").hasValue(1);
//at second subscribe, the Context fill attempt is still done, but ultimately ignored since Mono.deferContextual(Mono::just) result is cached
String cacheHit = cached.blockLast();
assertThat(cacheHit).as("cacheHit").isEqualTo("GOOD1"); //value from the cache
assertThat(contextFillCount).as("cacheHit").hasValue(2); //function was still invoked
vts.advanceTimeBy(Duration.ofMillis(501));
//at third subscribe, after the expiration delay, function is called for the 3rd time, but this time the resulting context is cached
String cacheExpired = cached.blockLast();
assertThat(cacheExpired).as("cacheExpired").isEqualTo("GOOD3");
assertThat(contextFillCount).as("cacheExpired").hasValue(3);
//at fourth subscribe, function is called but ignored, the cached context is visible
String cachePostExpired = cached.blockLast();
assertThat(cachePostExpired).as("cachePostExpired").isEqualTo("GOOD3");
assertThat(contextFillCount).as("cachePostExpired").hasValue(4);
vts.dispose();
}
}
| FluxCacheTest |
java | apache__flink | flink-streaming-java/src/test/java/org/apache/flink/streaming/api/functions/sink/filesystem/TestUtils.java | {
"start": 12221,
"end": 12838
} | class ____ implements BucketAssigner<Tuple2<String, Integer>, String> {
private static final long serialVersionUID = 1L;
@Override
public String getBucketId(Tuple2<String, Integer> element, Context context) {
return element.f0;
}
@Override
public SimpleVersionedSerializer<String> getSerializer() {
return SimpleVersionedStringSerializer.INSTANCE;
}
}
/**
* A simple {@link BucketAssigner} that returns the second (Integer) element of a {@code Tuple2}
* object as the bucket id.
*/
static | TupleToStringBucketer |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/annotation/AnnotatedBeanDefinitionReader.java | {
"start": 7036,
"end": 7524
} | class ____
*/
@SuppressWarnings("unchecked")
public void registerBean(Class<?> beanClass, @Nullable String name,
Class<? extends Annotation>... qualifiers) {
doRegisterBean(beanClass, name, qualifiers, null, null);
}
/**
* Register a bean from the given bean class, deriving its metadata from
* class-declared annotations, using the given supplier for obtaining a new
* instance (possibly declared as a lambda expression or method reference).
* @param beanClass the | level |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/sql/dialect/h2/parser/H2SelectParser.java | {
"start": 834,
"end": 1291
} | class ____ extends SQLSelectParser {
public H2SelectParser(SQLExprParser exprParser) {
super(exprParser);
}
public H2SelectParser(SQLExprParser exprParser, SQLSelectListCache selectListCache) {
super(exprParser, selectListCache);
}
public H2SelectParser(String sql) {
this(new H2ExprParser(sql));
}
protected SQLExprParser createExprParser() {
return new H2ExprParser(lexer);
}
}
| H2SelectParser |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/asyncprocessing/SimpleAsyncExecutionController.java | {
"start": 3307,
"end": 3831
} | class ____<K, RET> extends AsyncRequest<K> {
final CheckedSupplier<RET> runnable;
public RunnableTask(
RecordContext<K> context,
boolean sync,
InternalAsyncFuture<RET> asyncFuture,
CheckedSupplier<RET> runnable) {
super(context, sync, asyncFuture);
this.runnable = runnable;
}
private void run() throws Exception {
asyncFuture.complete(runnable.get());
}
}
static | RunnableTask |
java | elastic__elasticsearch | x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/NlpInferenceInput.java | {
"start": 596,
"end": 2127
} | class ____ {
public static NlpInferenceInput fromText(String inputText) {
return new NlpInferenceInput(inputText);
}
public static NlpInferenceInput fromDoc(Map<String, Object> doc) {
return new NlpInferenceInput(doc);
}
private final String inputText;
private final Map<String, Object> doc;
private NlpInferenceInput(String inputText) {
this.inputText = ExceptionsHelper.requireNonNull(inputText, "input_text");
doc = null;
}
private NlpInferenceInput(Map<String, Object> doc) {
this.doc = ExceptionsHelper.requireNonNull(doc, "doc");
this.inputText = null;
}
public boolean isTextInput() {
return inputText != null;
}
public String getInputText() {
return inputText;
}
public String extractInput(TrainedModelInput input) {
if (isTextInput()) {
return getInputText();
}
assert input.getFieldNames().size() == 1;
String inputField = input.getFieldNames().get(0);
Object inputValue = XContentMapValues.extractValue(inputField, doc);
if (inputValue == null) {
throw ExceptionsHelper.badRequestException("Input field [{}] does not exist in the source document", inputField);
}
if (inputValue instanceof String) {
return (String) inputValue;
}
throw ExceptionsHelper.badRequestException("Input value [{}] for field [{}] must be a string", inputValue, inputField);
}
}
| NlpInferenceInput |
java | apache__spark | sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java | {
"start": 1893,
"end": 5943
} | class ____ extends MetadataOperation {
private final String catalogName;
private final String schemaName;
private final String tableName;
private final List<String> tableTypeList;
protected final RowSet rowSet;
private final TableTypeMapping tableTypeMapping;
private static final TableSchema RESULT_SET_SCHEMA = new TableSchema()
.addStringColumn("TABLE_CAT", "Catalog name. NULL if not applicable.")
.addStringColumn("TABLE_SCHEM", "Schema name.")
.addStringColumn("TABLE_NAME", "Table name.")
.addStringColumn("TABLE_TYPE", "The table type, e.g. \"TABLE\", \"VIEW\", etc.")
.addStringColumn("REMARKS", "Comments about the table.")
.addStringColumn("TYPE_CAT", "The types catalog.")
.addStringColumn("TYPE_SCHEM", "The types schema.")
.addStringColumn("TYPE_NAME", "Type name.")
.addStringColumn("SELF_REFERENCING_COL_NAME",
"Name of the designated \"identifier\" column of a typed table.")
.addStringColumn("REF_GENERATION",
"Specifies how values in SELF_REFERENCING_COL_NAME are created.");
protected GetTablesOperation(HiveSession parentSession,
String catalogName, String schemaName, String tableName,
List<String> tableTypes) {
super(parentSession, OperationType.GET_TABLES);
this.catalogName = catalogName;
this.schemaName = schemaName;
this.tableName = tableName;
String tableMappingStr = getParentSession().getHiveConf()
.getVar(HiveConf.ConfVars.HIVE_SERVER2_TABLE_TYPE_MAPPING);
tableTypeMapping =
TableTypeMappingFactory.getTableTypeMapping(tableMappingStr);
if (tableTypes != null) {
tableTypeList = new ArrayList<String>();
for (String tableType : tableTypes) {
tableTypeList.addAll(Arrays.asList(tableTypeMapping.mapToHiveType(tableType.trim())));
}
} else {
tableTypeList = null;
}
this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion(), false);
}
@Override
public void runInternal() throws HiveSQLException {
setState(OperationState.RUNNING);
try {
IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient();
String schemaPattern = convertSchemaPattern(schemaName);
List<String> matchingDbs = metastoreClient.getDatabases(schemaPattern);
if(isAuthV2Enabled()){
List<HivePrivilegeObject> privObjs = HivePrivilegeObjectUtils.getHivePrivDbObjects(matchingDbs);
String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName;
authorizeMetaGets(HiveOperationType.GET_TABLES, privObjs, cmdStr);
}
String tablePattern = convertIdentifierPattern(tableName, true);
for (TableMeta tableMeta :
metastoreClient.getTableMeta(schemaPattern, tablePattern, tableTypeList)) {
rowSet.addRow(new Object[] {
DEFAULT_HIVE_CATALOG,
tableMeta.getDbName(),
tableMeta.getTableName(),
tableTypeMapping.mapToClientType(tableMeta.getTableType()),
tableMeta.getComments(),
null, null, null, null, null
});
}
setState(OperationState.FINISHED);
} catch (Exception e) {
setState(OperationState.ERROR);
throw new HiveSQLException(e);
}
}
/* (non-Javadoc)
* @see org.apache.hive.service.cli.Operation#getResultSetSchema()
*/
@Override
public TTableSchema getResultSetSchema() throws HiveSQLException {
assertState(OperationState.FINISHED);
return RESULT_SET_SCHEMA.toTTableSchema();
}
/* (non-Javadoc)
* @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long)
*/
@Override
public TRowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException {
assertState(OperationState.FINISHED);
validateDefaultFetchOrientation(orientation);
if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
rowSet.setStartOffset(0);
}
return rowSet.extractSubset((int)maxRows).toTRowSet();
}
}
| GetTablesOperation |
java | apache__flink | flink-table/flink-sql-parser/src/main/java/org/apache/flink/sql/parser/ddl/SqlAlterModel.java | {
"start": 1092,
"end": 1218
} | class ____ describe statements like ALTER MODEL [IF EXISTS] [[catalogName.]
* dataBasesName.]modelName ...
*/
public abstract | to |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ComparableTypeTest.java | {
"start": 4459,
"end": 4528
} | class ____ implements Comparable<OnlyComparable> {}
/** | OnlyComparable |
java | netty__netty | transport/src/main/java/io/netty/channel/DefaultChannelPipeline.java | {
"start": 1693,
"end": 6183
} | class ____ implements ChannelPipeline {
static final InternalLogger logger = InternalLoggerFactory.getInstance(DefaultChannelPipeline.class);
private static final String HEAD_NAME = generateName0(HeadContext.class);
private static final String TAIL_NAME = generateName0(TailContext.class);
private static final FastThreadLocal<Map<Class<?>, String>> nameCaches =
new FastThreadLocal<Map<Class<?>, String>>() {
@Override
protected Map<Class<?>, String> initialValue() {
return new WeakHashMap<Class<?>, String>();
}
};
private static final AtomicReferenceFieldUpdater<DefaultChannelPipeline, MessageSizeEstimator.Handle> ESTIMATOR =
AtomicReferenceFieldUpdater.newUpdater(
DefaultChannelPipeline.class, MessageSizeEstimator.Handle.class, "estimatorHandle");
final HeadContext head;
final TailContext tail;
private final Channel channel;
private final ChannelFuture succeededFuture;
private final VoidChannelPromise voidPromise;
private final boolean touch = ResourceLeakDetector.isEnabled();
private Map<EventExecutorGroup, EventExecutor> childExecutors;
private volatile MessageSizeEstimator.Handle estimatorHandle;
private boolean firstRegistration = true;
/**
* This is the head of a linked list that is processed by {@link #callHandlerAddedForAllHandlers()} and so process
* all the pending {@link #callHandlerAdded0(AbstractChannelHandlerContext)}.
* <p>
* We only keep the head because it is expected that the list is used infrequently and its size is small.
* Thus full iterations to do insertions is assumed to be a good compromised to saving memory and tail management
* complexity.
*/
private PendingHandlerCallback pendingHandlerCallbackHead;
/**
* Set to {@code true} once the {@link AbstractChannel} is registered.Once set to {@code true} the value will never
* change.
*/
private boolean registered;
protected DefaultChannelPipeline(Channel channel) {
this.channel = ObjectUtil.checkNotNull(channel, "channel");
succeededFuture = new SucceededChannelFuture(channel, null);
voidPromise = new VoidChannelPromise(channel, true);
tail = new TailContext(this);
head = new HeadContext(this);
head.next = tail;
tail.prev = head;
}
final MessageSizeEstimator.Handle estimatorHandle() {
MessageSizeEstimator.Handle handle = estimatorHandle;
if (handle == null) {
handle = channel.config().getMessageSizeEstimator().newHandle();
if (!ESTIMATOR.compareAndSet(this, null, handle)) {
handle = estimatorHandle;
}
}
return handle;
}
final Object touch(Object msg, AbstractChannelHandlerContext next) {
return touch ? ReferenceCountUtil.touch(msg, next) : msg;
}
private AbstractChannelHandlerContext newContext(EventExecutorGroup group, String name, ChannelHandler handler) {
return new DefaultChannelHandlerContext(this, childExecutor(group), name, handler);
}
private EventExecutor childExecutor(EventExecutorGroup group) {
if (group == null) {
return null;
}
Boolean pinEventExecutor = channel.config().getOption(ChannelOption.SINGLE_EVENTEXECUTOR_PER_GROUP);
if (pinEventExecutor != null && !pinEventExecutor) {
return group.next();
}
Map<EventExecutorGroup, EventExecutor> childExecutors = this.childExecutors;
if (childExecutors == null) {
// Use size of 4 as most people only use one extra EventExecutor.
childExecutors = this.childExecutors = new IdentityHashMap<EventExecutorGroup, EventExecutor>(4);
}
// Pin one of the child executors once and remember it so that the same child executor
// is used to fire events for the same channel.
EventExecutor childExecutor = childExecutors.get(group);
if (childExecutor == null) {
childExecutor = group.next();
childExecutors.put(group, childExecutor);
}
return childExecutor;
}
@Override
public final Channel channel() {
return channel;
}
@Override
public final ChannelPipeline addFirst(String name, ChannelHandler handler) {
return addFirst(null, name, handler);
}
private | DefaultChannelPipeline |
java | spring-projects__spring-security | config/src/main/java/org/springframework/security/config/annotation/web/builders/HttpSecurity.java | {
"start": 11836,
"end": 12477
} | class ____ {
*
* @Bean
* public SecurityFilterChain securityFilterChain(HttpSecurity http) {
* http
* .headers((headers) ->
* headers
* .defaultsDisabled()
* .cacheControl(withDefaults())
* .frameOptions(withDefaults())
* );
* return http.build();
* }
* }
* </pre>
*
* You can also choose to keep the defaults but explicitly disable a subset of
* headers. For example, the following will enable all the default headers except
* {@link HeadersConfigurer#frameOptions(Customizer)}.
*
* <pre>
* @Configuration
* @EnableWebSecurity
* public | CsrfSecurityConfig |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Syncable.java | {
"start": 1215,
"end": 1677
} | interface ____ {
/** Flush out the data in client's user buffer. After the return of
* this call, new readers will see the data.
* @throws IOException if any error occurs
*/
void hflush() throws IOException;
/** Similar to posix fsync, flush out the data in client's user buffer
* all the way to the disk device (but the disk may have it in its cache).
* @throws IOException if error occurs
*/
void hsync() throws IOException;
}
| Syncable |
java | spring-projects__spring-security | test/src/test/java/org/springframework/security/test/web/reactive/server/SecurityMockServerConfigurersAnnotatedTests.java | {
"start": 1637,
"end": 5205
} | class ____ extends AbstractMockServerConfigurersTests {
WebTestClient client = WebTestClient.bindToController(this.controller)
.webFilter(new SecurityContextServerWebExchangeWebFilter())
.apply(SecurityMockServerConfigurers.springSecurity())
.configureClient()
.defaultHeader(HttpHeaders.ACCEPT, MediaType.APPLICATION_JSON_VALUE)
.build();
@Test
@WithMockUser
public void withMockUserWhenOnMethodThenSuccess() {
this.client.get().exchange().expectStatus().isOk();
Authentication authentication = TestSecurityContextHolder.getContext().getAuthentication();
this.controller.assertPrincipalIsEqualTo(authentication);
}
@Test
@WithMockUser
public void withMockUserWhenGlobalMockPrincipalThenOverridesAnnotation() {
TestingAuthenticationToken authentication = new TestingAuthenticationToken("authentication", "secret",
"ROLE_USER");
this.client = WebTestClient.bindToController(this.controller)
.webFilter(new SecurityContextServerWebExchangeWebFilter())
.apply(SecurityMockServerConfigurers.springSecurity())
.apply(SecurityMockServerConfigurers.mockAuthentication(authentication))
.configureClient()
.defaultHeader(HttpHeaders.ACCEPT, MediaType.APPLICATION_JSON_VALUE)
.build();
this.client.get().exchange().expectStatus().isOk();
this.controller.assertPrincipalIsEqualTo(authentication);
}
@Test
@WithMockUser
public void withMockUserWhenMutateWithMockPrincipalThenOverridesAnnotation() {
TestingAuthenticationToken authentication = new TestingAuthenticationToken("authentication", "secret",
"ROLE_USER");
this.client.mutateWith(SecurityMockServerConfigurers.mockAuthentication(authentication))
.get()
.exchange()
.expectStatus()
.isOk();
this.controller.assertPrincipalIsEqualTo(authentication);
}
@Test
@WithMockUser
public void withMockUserWhenMutateWithMockPrincipalAndNoMutateThenOverridesAnnotationAndUsesAnnotation() {
TestingAuthenticationToken authentication = new TestingAuthenticationToken("authentication", "secret",
"ROLE_USER");
this.client.mutateWith(SecurityMockServerConfigurers.mockAuthentication(authentication))
.get()
.exchange()
.expectStatus()
.isOk();
this.controller.assertPrincipalIsEqualTo(authentication);
this.client.get().exchange().expectStatus().isOk();
assertPrincipalCreatedFromUserDetails(this.controller.removePrincipal(), this.userBuilder.build());
}
@Test
@WithMockUser
public void withMockUserWhenOnMethodAndRequestIsExecutedOnDifferentThreadThenSuccess() {
Authentication authentication = TestSecurityContextHolder.getContext().getAuthentication();
ForkJoinPool.commonPool().submit(() -> this.client.get().exchange().expectStatus().isOk()).join();
this.controller.assertPrincipalIsEqualTo(authentication);
}
@Test
@WithMockUser
public void withMockUserAndWithCallOnSeparateThreadWhenMutateWithMockPrincipalAndNoMutateThenOverridesAnnotationAndUsesAnnotation() {
TestingAuthenticationToken authentication = new TestingAuthenticationToken("authentication", "secret",
"ROLE_USER");
ForkJoinPool.commonPool()
.submit(() -> this.client.mutateWith(SecurityMockServerConfigurers.mockAuthentication(authentication))
.get()
.exchange()
.expectStatus()
.isOk())
.join();
this.controller.assertPrincipalIsEqualTo(authentication);
ForkJoinPool.commonPool().submit(() -> this.client.get().exchange().expectStatus().isOk()).join();
assertPrincipalCreatedFromUserDetails(this.controller.removePrincipal(), this.userBuilder.build());
}
}
| SecurityMockServerConfigurersAnnotatedTests |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/pattern/LevelPatternConverter.java | {
"start": 1488,
"end": 5028
} | class ____ extends LogEventPatternConverter {
private static final String OPTION_LENGTH = "length";
private static final String OPTION_LOWER = "lowerCase";
/**
* Singleton.
*/
private static final LevelPatternConverter INSTANCE = new SimpleLevelPatternConverter();
/**
* Private constructor.
*/
private LevelPatternConverter() {
super("Level", "level");
}
/**
* Obtains an instance of pattern converter.
*
* @param options
* options, may be null. May contain a list of level names and The value that should be displayed for the
* Level.
* @return instance of pattern converter.
*/
public static LevelPatternConverter newInstance(final String[] options) {
if (options == null || options.length == 0) {
return INSTANCE;
}
final Map<Level, String> levelMap = new HashMap<>();
int length = Integer.MAX_VALUE; // More than the longest level name.
boolean lowerCase = false;
final String[] definitions = options[0].split(Patterns.COMMA_SEPARATOR);
for (final String def : definitions) {
final String[] pair = def.split("=");
if (pair == null || pair.length != 2) {
LOGGER.error("Invalid option {}", def);
continue;
}
final String key = pair[0].trim();
final String value = pair[1].trim();
if (OPTION_LENGTH.equalsIgnoreCase(key)) {
length = Integers.parseInt(value);
} else if (OPTION_LOWER.equalsIgnoreCase(key)) {
lowerCase = Boolean.parseBoolean(value);
} else {
final Level level = Level.toLevel(key, null);
if (level == null) {
LOGGER.error("Invalid Level {}", key);
} else {
levelMap.put(level, value);
}
}
}
if (levelMap.isEmpty() && length == Integer.MAX_VALUE && !lowerCase) {
return INSTANCE;
}
for (final Level level : Level.values()) {
if (!levelMap.containsKey(level)) {
final String left = left(level, length);
levelMap.put(level, lowerCase ? toRootLowerCase(left) : left);
}
}
return new LevelMapLevelPatternConverter(levelMap);
}
/**
* Returns the leftmost chars of the level name for the given level.
*
* @param level
* The level
* @param length
* How many chars to return
* @return The abbreviated level name, or the whole level name if the {@code length} is greater than the level name
* length,
*/
private static String left(final Level level, final int length) {
final String string = level.toString();
if (length >= string.length()) {
return string;
}
return string.substring(0, length);
}
/**
* {@inheritDoc}
*/
@Override
public void format(final LogEvent event, final StringBuilder output) {
throw new UnsupportedOperationException("Overridden by subclasses");
}
/**
* {@inheritDoc}
*/
@Override
public String getStyleClass(final Object e) {
if (e instanceof LogEvent) {
return "level " + toRootLowerCase(((LogEvent) e).getLevel().name());
}
return "level";
}
private static final | LevelPatternConverter |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/processor/internals/GlobalStreamThread.java | {
"start": 9109,
"end": 20898
} | class ____ {
private final Consumer<byte[], byte[]> globalConsumer;
private final GlobalStateMaintainer stateMaintainer;
private final Duration pollTime;
private final Logger log;
StateConsumer(final LogContext logContext,
final Consumer<byte[], byte[]> globalConsumer,
final GlobalStateMaintainer stateMaintainer,
final Duration pollTime) {
this.log = logContext.logger(getClass());
this.globalConsumer = globalConsumer;
this.stateMaintainer = stateMaintainer;
this.pollTime = pollTime;
}
/**
* @throws IllegalStateException If a store gets registered after initialized is already finished
* @throws StreamsException if the store's change log does not contain the partition
*/
void initialize() {
final Map<TopicPartition, Long> partitionOffsets = stateMaintainer.initialize();
globalConsumer.assign(partitionOffsets.keySet());
for (final Map.Entry<TopicPartition, Long> entry : partitionOffsets.entrySet()) {
globalConsumer.seek(entry.getKey(), entry.getValue());
}
}
void pollAndUpdate() {
final ConsumerRecords<byte[], byte[]> received = globalConsumer.poll(pollTime);
for (final ConsumerRecord<byte[], byte[]> record : received) {
stateMaintainer.update(record);
}
stateMaintainer.maybeCheckpoint();
}
public void close(final boolean wipeStateStore) throws IOException {
try {
globalConsumer.close();
} catch (final RuntimeException e) {
// just log an error if the consumer throws an exception during close
// so we can always attempt to close the state stores.
log.error("Failed to close global consumer due to the following error:", e);
}
stateMaintainer.close(wipeStateStore);
}
}
@Override
public void run() {
final StateConsumer stateConsumer = initialize();
if (stateConsumer == null) {
// during initialization, the caller thread would wait for the state consumer
// to restore the global state store before transiting to RUNNING state and return;
// if an error happens during the restoration process, the stateConsumer will be null
// and in this case we will transit the state to PENDING_SHUTDOWN and DEAD immediately.
// the exception will be thrown in the caller thread during start() function.
setState(State.PENDING_SHUTDOWN);
setState(State.DEAD);
log.error("Error happened during initialization of the global state store; this thread has shutdown.");
streamsMetrics.removeAllThreadLevelSensors(getName());
streamsMetrics.removeAllThreadLevelMetrics(getName());
return;
}
boolean wipeStateStore = false;
try {
while (stillRunning()) {
final long size = cacheSize.getAndSet(-1L);
if (size != -1L) {
cache.resize(size);
}
stateConsumer.pollAndUpdate();
if (fetchDeadlineClientInstanceId != -1) {
if (fetchDeadlineClientInstanceId >= time.milliseconds()) {
try {
// we pass in a timeout of zero, to just trigger the "get instance id" background RPC,
// we don't want to block the global thread that can do useful work in the meantime
clientInstanceIdFuture.complete(globalConsumer.clientInstanceId(Duration.ZERO));
fetchDeadlineClientInstanceId = -1;
} catch (final IllegalStateException disabledError) {
// if telemetry is disabled on a client, we swallow the error,
// to allow returning a partial result for all other clients
clientInstanceIdFuture.complete(null);
fetchDeadlineClientInstanceId = -1;
} catch (final TimeoutException swallow) {
// swallow
} catch (final Exception error) {
clientInstanceIdFuture.completeExceptionally(error);
fetchDeadlineClientInstanceId = -1;
}
} else {
clientInstanceIdFuture.completeExceptionally(
new TimeoutException("Could not retrieve global consumer client instance id.")
);
fetchDeadlineClientInstanceId = -1;
}
}
}
} catch (final InvalidOffsetException recoverableException) {
wipeStateStore = true;
log.error(
"Updating global state failed due to inconsistent local state. Will attempt to clean up the local state. You can restart KafkaStreams to recover from this error.",
recoverableException
);
final StreamsException e = new StreamsException(
"Updating global state failed. You can restart KafkaStreams to launch a new GlobalStreamThread to recover from this error.",
recoverableException
);
this.streamsUncaughtExceptionHandler.accept(e);
} catch (final Exception e) {
log.error("Error happened while maintaining global state store. The streams application or client will now close to ERROR.", e);
this.streamsUncaughtExceptionHandler.accept(e);
} finally {
// set the state to pending shutdown first as it may be called due to error;
// its state may already be PENDING_SHUTDOWN so it will return false but we
// intentionally do not check the returned flag
setState(State.PENDING_SHUTDOWN);
log.info("Shutting down");
try {
stateConsumer.close(wipeStateStore);
} catch (final IOException e) {
log.error("Failed to close state maintainer due to the following error:", e);
}
streamsMetrics.removeAllThreadLevelSensors(getName());
streamsMetrics.removeAllThreadLevelMetrics(getName());
setState(DEAD);
log.info("Shutdown complete");
}
}
public void setUncaughtExceptionHandler(final java.util.function.Consumer<Throwable> streamsUncaughtExceptionHandler) {
this.streamsUncaughtExceptionHandler = streamsUncaughtExceptionHandler;
}
public void resize(final long cacheSize) {
this.cacheSize.set(cacheSize);
}
private StateConsumer initialize() {
StateConsumer stateConsumer = null;
try {
final GlobalStateManager stateMgr = new GlobalStateManagerImpl(
logContext,
time,
topology,
globalConsumer,
stateDirectory,
stateRestoreListener,
config
);
final GlobalProcessorContextImpl globalProcessorContext = new GlobalProcessorContextImpl(
config,
stateMgr,
streamsMetrics,
cache,
time
);
stateMgr.setGlobalProcessorContext(globalProcessorContext);
final StreamsThreadMetricsDelegatingReporter globalMetricsReporter = new StreamsThreadMetricsDelegatingReporter(globalConsumer, getName(), Optional.empty());
streamsMetrics.metricsRegistry().addReporter(globalMetricsReporter);
stateConsumer = new StateConsumer(
logContext,
globalConsumer,
new GlobalStateUpdateTask(
logContext,
topology,
globalProcessorContext,
stateMgr,
config.deserializationExceptionHandler(),
time,
config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG)
),
Duration.ofMillis(config.getLong(StreamsConfig.POLL_MS_CONFIG))
);
try {
stateConsumer.initialize();
} catch (final InvalidOffsetException recoverableException) {
log.error(
"Bootstrapping global state failed due to inconsistent local state. Will attempt to clean up the local state. You can restart KafkaStreams to recover from this error.",
recoverableException
);
closeStateConsumer(stateConsumer, true);
throw new StreamsException(
"Bootstrapping global state failed. You can restart KafkaStreams to recover from this error.",
recoverableException
);
}
setState(RUNNING);
return stateConsumer;
} catch (final StreamsException fatalException) {
closeStateConsumer(stateConsumer, false);
startupException = fatalException;
} catch (final Throwable fatalException) {
closeStateConsumer(stateConsumer, false);
startupException = new StreamsException("Exception caught during initialization of GlobalStreamThread", fatalException);
} finally {
initializationLatch.countDown();
}
return null;
}
private void closeStateConsumer(final StateConsumer stateConsumer, final boolean wipeStateStore) {
if (stateConsumer != null) {
try {
stateConsumer.close(wipeStateStore);
} catch (final IOException e) {
log.error("Failed to close state consumer due to the following error:", e);
}
}
}
@Override
public synchronized void start() {
super.start();
try {
initializationLatch.await();
} catch (final InterruptedException e) {
currentThread().interrupt();
throw new IllegalStateException("GlobalStreamThread was interrupted during initialization", e);
}
if (startupException != null) {
throw startupException;
}
if (inErrorState()) {
throw new IllegalStateException("Initialization for the global stream thread failed");
}
}
public void shutdown() {
// one could call shutdown() multiple times, so ignore subsequent calls
// if already shutting down or dead
setState(PENDING_SHUTDOWN);
initializationLatch.countDown();
}
public Map<MetricName, Metric> consumerMetrics() {
return Collections.unmodifiableMap(globalConsumer.metrics());
}
// this method is NOT thread-safe (we rely on the callee to be `synchronized`)
public KafkaFuture<Uuid> globalConsumerInstanceId(final Duration timeout) {
boolean setDeadline = false;
if (clientInstanceIdFuture.isDone()) {
if (clientInstanceIdFuture.isCompletedExceptionally()) {
clientInstanceIdFuture = new KafkaFutureImpl<>();
setDeadline = true;
}
} else {
setDeadline = true;
}
if (setDeadline) {
fetchDeadlineClientInstanceId = time.milliseconds() + timeout.toMillis();
}
return clientInstanceIdFuture;
}
}
| StateConsumer |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/support/BootstrapTestUtilsMergedConfigTests.java | {
"start": 20113,
"end": 20396
} | class ____ {
}
// org.springframework.test.context.support --> 5 levels up to the root of the classpath
@ContextConfiguration(locations = "../../../../../example/foo.xml")
@TestPropertySource("../../../../../example/foo.properties")
static | AbsoluteFooXmlLocationWithClasspathPrefix |
java | spring-projects__spring-security | oauth2/oauth2-client/src/main/java/org/springframework/security/oauth2/client/oidc/authentication/logout/LogoutTokenClaimNames.java | {
"start": 1084,
"end": 1907
} | class ____ {
/**
* {@code jti} - the JTI identifier
*/
public static final String JTI = "jti";
/**
* {@code iss} - the Issuer identifier
*/
public static final String ISS = "iss";
/**
* {@code sub} - the Subject identifier
*/
public static final String SUB = "sub";
/**
* {@code aud} - the Audience(s) that the ID Token is intended for
*/
public static final String AUD = "aud";
/**
* {@code iat} - the time at which the ID Token was issued
*/
public static final String IAT = "iat";
/**
* {@code events} - a JSON object that identifies this token as a logout token
*/
public static final String EVENTS = "events";
/**
* {@code sid} - the session id for the OIDC provider
*/
public static final String SID = "sid";
private LogoutTokenClaimNames() {
}
}
| LogoutTokenClaimNames |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/types/inference/DefaultStateTypeStrategy.java | {
"start": 1208,
"end": 2419
} | class ____ implements StateTypeStrategy {
private final TypeStrategy typeStrategy;
private final @Nullable Duration timeToLive;
DefaultStateTypeStrategy(TypeStrategy typeStrategy, @Nullable Duration timeToLive) {
this.typeStrategy =
Preconditions.checkNotNull(typeStrategy, "Type strategy must not be null.");
this.timeToLive = timeToLive;
}
@Override
public Optional<DataType> inferType(CallContext callContext) {
return typeStrategy.inferType(callContext);
}
@Override
public Optional<Duration> getTimeToLive(CallContext callContext) {
return Optional.ofNullable(timeToLive);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final DefaultStateTypeStrategy that = (DefaultStateTypeStrategy) o;
return Objects.equals(typeStrategy, that.typeStrategy)
&& Objects.equals(timeToLive, that.timeToLive);
}
@Override
public int hashCode() {
return Objects.hash(typeStrategy, timeToLive);
}
}
| DefaultStateTypeStrategy |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/console/ConsoleDetailsTests.java | {
"start": 5030,
"end": 7698
} | class ____ {
@Test
void reportSingleMessage(TestReporter reporter) {
reporter.publishEntry("foo");
}
@Test
void reportMultipleMessages(TestReporter reporter) {
reporter.publishEntry("foo");
reporter.publishEntry("bar");
}
@Test
void reportSingleEntryWithSingleMapping(TestReporter reporter) {
reporter.publishEntry("foo", "bar");
}
@Test
void reportMultiEntriesWithSingleMapping(TestReporter reporter) {
reporter.publishEntry("foo", "bar");
reporter.publishEntry("far", "boo");
}
@Test
void reportMultiEntriesWithMultiMappings(TestReporter reporter) {
Map<String, String> values = new LinkedHashMap<>();
values.put("user name", "dk38");
values.put("award year", "1974");
reporter.publishEntry(values);
reporter.publishEntry("single", "mapping");
Map<String, String> more = new LinkedHashMap<>();
more.put("user name", "st77");
more.put("award year", "1977");
more.put("last seen", "2001");
reporter.publishEntry(more);
}
}
private record Runner(String dirName, String outName, String... args) implements Executable {
@Override
public void execute() throws Throwable {
var wrapper = new ConsoleLauncherWrapper();
var result = wrapper.execute(Optional.empty(), args);
var optionalUri = toUri(dirName, outName);
if (optionalUri.isEmpty()) {
if (Boolean.getBoolean("org.junit.platform.console.ConsoleDetailsTests.writeResultOut")) {
// do not use Files.createTempDirectory(prefix) as we want one folder for one container
var temp = Path.of(System.getProperty("java.io.tmpdir"), dirName.replace('/', '-'));
Files.createDirectories(temp);
var path = Files.writeString(temp.resolve(outName), result.out);
throw new TestAbortedException(
"resource `%s` not found\nwrote console stdout to: %s/%s".formatted(dirName, outName, path));
}
fail("could not load resource named `" + dirName + "/" + outName + "`");
}
var path = Path.of(optionalUri.get());
assumeTrue(Files.exists(path), "path does not exist: " + path);
assumeTrue(Files.isReadable(path), "can not read: " + path);
var expectedLines = Files.readAllLines(path, UTF_8);
var actualLines = List.of(result.out.split("\\R"));
assertLinesMatch(expectedLines, actualLines);
}
}
static Optional<URI> toUri(String dirName, String outName) {
var resourceName = dirName + "/" + outName;
var url = ConsoleDetailsTests.class.getClassLoader().getResource(resourceName);
if (url == null) {
return Optional.empty();
}
try {
return Optional.of(url.toURI());
}
catch (URISyntaxException e) {
return Optional.empty();
}
}
}
| ReportTestCase |
java | quarkusio__quarkus | independent-projects/bootstrap/maven-resolver/src/main/java/io/quarkus/bootstrap/resolver/maven/DeploymentInjectionException.java | {
"start": 93,
"end": 524
} | class ____ extends RuntimeException {
/**
*
*/
private static final long serialVersionUID = 1L;
public DeploymentInjectionException(String message, Throwable cause) {
super(message, cause);
}
public DeploymentInjectionException(Throwable cause) {
super(cause);
}
public DeploymentInjectionException(String message) {
super(message);
}
}
| DeploymentInjectionException |
java | mockito__mockito | mockito-core/src/main/java/org/mockito/Mockito.java | {
"start": 62506,
"end": 62655
} | interface ____ be mocked has a function like:
* void execute(String operand, Callback callback);
*
* // the example callback has a function and the | to |
java | apache__camel | components/camel-aws/camel-aws2-athena/src/test/java/org/apache/camel/component/aws2/athena/AmazonAthenaClientMock.java | {
"start": 2429,
"end": 6829
} | class ____ implements AthenaClient {
private Queue<String> startQueryExecutionResults = new LinkedList<>();
private Queue<QueryExecution> getQueryExecutionResults = new LinkedList<>();
/**
* Optionally provide a FIFO queue of results in the order they should be returned for each call to
* {@link #startQueryExecution(StartQueryExecutionRequest)}.
*
* @param startQueryExecutionResults FIFO ordered queue of results in the order they will be returned
*/
public void setStartQueryExecutionResults(LinkedList<String> startQueryExecutionResults) {
this.startQueryExecutionResults = startQueryExecutionResults;
}
/**
* Optionally provide a FIFO queue of results in the order they should be returned for each call to
* {@link #getQueryExecution(GetQueryExecutionRequest)}.
*
* @param getQueryExecutionResults FIFO ordered queue of results in the order they will be returned
*/
public void setGetQueryExecutionResults(LinkedList<QueryExecution> getQueryExecutionResults) {
this.getQueryExecutionResults = getQueryExecutionResults;
}
@Override
public GetQueryExecutionResponse getQueryExecution(GetQueryExecutionRequest getQueryExecutionRequest)
throws SdkException {
QueryExecution defaultResult = QueryExecution.builder()
.queryExecutionId("11111111-1111-1111-1111-111111111111")
.status(QueryExecutionStatus.builder().state(QueryExecutionState.SUCCEEDED).build())
.resultConfiguration(ResultConfiguration.builder().outputLocation("s3://bucket/file.csv").build())
.build();
QueryExecution result = getQueryExecutionResults.isEmpty() ? defaultResult : getQueryExecutionResults.poll();
// if query execution id is 3333..., sleep for 500 ms to imitate a long running query
if ("33333333-3333-3333-3333-333333333333".equals(result.queryExecutionId())) {
try {
Thread.sleep(500);
} catch (InterruptedException e) {
// noop
}
}
return GetQueryExecutionResponse.builder()
.queryExecution(result)
.build();
}
@Override
public ListQueryExecutionsResponse listQueryExecutions(ListQueryExecutionsRequest listQueryExecutionsRequest)
throws SdkException {
return ListQueryExecutionsResponse.builder()
.queryExecutionIds(
"11111111-1111-1111-1111-111111111111",
"22222222-2222-2222-2222-222222222222")
.nextToken(listQueryExecutionsRequest.nextToken())
.build();
}
@Override
public StartQueryExecutionResponse startQueryExecution(StartQueryExecutionRequest startQueryExecutionRequest)
throws SdkException {
String defaultResult = "11111111-1111-1111-1111-111111111111";
String result = startQueryExecutionResults.isEmpty() ? defaultResult : startQueryExecutionResults.poll();
return StartQueryExecutionResponse.builder()
.queryExecutionId(result)
.build();
}
@Override
public AthenaServiceClientConfiguration serviceClientConfiguration() {
return null;
}
@Override
public GetQueryResultsResponse getQueryResults(GetQueryResultsRequest getQueryResultsRequest) throws SdkException {
return GetQueryResultsResponse.builder()
.nextToken(null)
.resultSet(ResultSet.builder()
.resultSetMetadata(ResultSetMetadata.builder()
.columnInfo(ColumnInfo.builder().name("id").build())
.build())
.rows(Row.builder()
.data(Datum.builder().varCharValue("42").build())
.build())
.build())
.build();
}
@Override
public GetQueryResultsIterable getQueryResultsPaginator(GetQueryResultsRequest getQueryResultsRequest)
throws SdkException {
return new GetQueryResultsIterable(this, getQueryResultsRequest);
}
@Override
public String serviceName() {
return null;
}
@Override
public void close() {
// noop
}
}
| AmazonAthenaClientMock |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/body/stream/ByteQueue.java | {
"start": 899,
"end": 2063
} | class ____ {
// originally from micronaut-servlet
// not the most efficient implementation, but the most readable.
private final Queue<ByteBuffer> queue = new ArrayDeque<>();
/**
* Add a copy of the given array to this queue.
*
* @param arr The input array
* @param off The offset of the section to add
* @param len The length of the section to add
*/
public void addCopy(byte[] arr, int off, int len) {
add(Arrays.copyOfRange(arr, off, off + len));
}
private void add(byte[] arr) {
if (arr.length == 0) {
return;
}
queue.add(ByteBuffer.wrap(arr));
}
public boolean isEmpty() {
return queue.isEmpty();
}
public int take(byte[] arr, int off, int len) {
ByteBuffer peek = queue.peek();
if (peek == null) {
throw new IllegalStateException("Queue is empty");
}
int n = Math.min(len, peek.remaining());
peek.get(arr, off, n);
if (peek.remaining() == 0) {
queue.poll();
}
return n;
}
public void clear() {
queue.clear();
}
}
| ByteQueue |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/bidi/SpecialAuction.java | {
"start": 248,
"end": 1060
} | class ____ {
private Long id;
private String description;
private List<AbstractBid> bids = new ArrayList<>();
private AbstractBid successfulBid;
private Date end;
public Date getEnd() {
return end;
}
public void setEnd(Date end) {
this.end = end;
}
public List<AbstractBid> getBids() {
return bids;
}
public void setBids(List<AbstractBid> bids) {
this.bids = bids;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public AbstractBid getSuccessfulBid() {
return successfulBid;
}
public void setSuccessfulBid(AbstractBid successfulBid) {
this.successfulBid = successfulBid;
}
}
| SpecialAuction |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/jmx/access/MBeanProxyFactoryBean.java | {
"start": 1358,
"end": 1609
} | interface ____ the resource you wish to proxy.
*
* <p>There is no need for the managed resource to implement the proxy interface,
* although you may find it convenient to do. It is not required that every
* operation and attribute in the management | of |
java | apache__camel | archetypes/camel-archetype-component/src/main/resources/archetype-resources/src/main/java/EventBusHelper.java | {
"start": 1239,
"end": 2091
} | class ____ {
// TODO: Delete me when you implemented your custom component
private static EventBusHelper INSTANCE;
private final Set<Consumer> subscribers = ConcurrentHashMap.newKeySet();
private EventBusHelper() {
}
public static EventBusHelper getInstance(){
if (INSTANCE == null) {
INSTANCE = new EventBusHelper();
}
return INSTANCE;
}
public <T> void subscribe(final Consumer<T> subscriber) {
subscribers.add(subscriber);
}
@SuppressWarnings("unchecked")
public <T> void publish(final T event){
// Notify all subscribers
subscribers.forEach(consumer -> publishSingleEvent(event, consumer));
}
private <T> void publishSingleEvent(final T event, final Consumer<T> subscriber){
subscriber.accept(event);
}
} | EventBusHelper |
java | junit-team__junit5 | platform-tests/src/test/java/org/junit/platform/commons/support/ModifierSupportTests.java | {
"start": 6982,
"end": 7082
} | class ____ {
@SuppressWarnings("unused")
protected void protectedMethod() {
}
}
| ProtectedClass |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsResponse.java | {
"start": 1112,
"end": 2477
} | class ____ extends AbstractResponse {
/**
* Possible error codes:
*
* REQUEST_TIMED_OUT(7)
* INVALID_TOPIC_EXCEPTION(17)
* TOPIC_AUTHORIZATION_FAILED(29)
* NOT_CONTROLLER(41)
* INVALID_REQUEST(42)
* TOPIC_DELETION_DISABLED(73)
*/
private final DeleteTopicsResponseData data;
public DeleteTopicsResponse(DeleteTopicsResponseData data) {
super(ApiKeys.DELETE_TOPICS);
this.data = data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public DeleteTopicsResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> counts = new EnumMap<>(Errors.class);
data.responses().forEach(result ->
updateErrorCounts(counts, Errors.forCode(result.errorCode()))
);
return counts;
}
public static DeleteTopicsResponse parse(Readable readable, short version) {
return new DeleteTopicsResponse(new DeleteTopicsResponseData(readable, version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 2;
}
}
| DeleteTopicsResponse |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/manytomany/inverseToSuperclass/DetailSubclass.java | {
"start": 231,
"end": 443
} | class ____ extends DetailSuperclass {
private String str2;
public DetailSubclass() {
}
public String getStr2() {
return str2;
}
public void setStr2(String str2) {
this.str2 = str2;
}
}
| DetailSubclass |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/FileChannelMemoryMappedBoundedData.java | {
"start": 2357,
"end": 8388
} | class ____ implements BoundedData {
/** The file channel backing the memory mapped file. */
private final FileChannel fileChannel;
/**
* The reusable array with header buffer and data buffer, to use gathering writes on the file
* channel ({@link java.nio.channels.GatheringByteChannel#write(ByteBuffer[])}).
*/
private final ByteBuffer[] headerAndBufferArray;
/** All memory mapped regions. */
private final ArrayList<ByteBuffer> memoryMappedRegions;
/** The path of the memory mapped file. */
private final Path filePath;
/**
* The position in the file channel. Cached for efficiency, because an actual position lookup in
* the channel involves various locks and checks.
*/
private long pos;
/** The position where the current memory mapped region must end. */
private long endOfCurrentRegion;
/** The position where the current memory mapped started. */
private long startOfCurrentRegion;
/** The maximum size of each mapped region. */
private final long maxRegionSize;
FileChannelMemoryMappedBoundedData(
Path filePath, FileChannel fileChannel, int maxSizePerMappedRegion) {
this.filePath = filePath;
this.fileChannel = fileChannel;
this.headerAndBufferArray = BufferReaderWriterUtil.allocatedWriteBufferArray();
this.memoryMappedRegions = new ArrayList<>(4);
this.maxRegionSize = maxSizePerMappedRegion;
this.endOfCurrentRegion = maxSizePerMappedRegion;
}
@Override
public void writeBuffer(Buffer buffer) throws IOException {
if (tryWriteBuffer(buffer)) {
return;
}
mapRegionAndStartNext();
if (!tryWriteBuffer(buffer)) {
throwTooLargeBuffer(buffer);
}
}
private boolean tryWriteBuffer(Buffer buffer) throws IOException {
final long spaceLeft = endOfCurrentRegion - pos;
final long bytesWritten =
BufferReaderWriterUtil.writeToByteChannelIfBelowSize(
fileChannel, buffer, headerAndBufferArray, spaceLeft);
if (bytesWritten >= 0) {
pos += bytesWritten;
return true;
} else {
return false;
}
}
@Override
public BoundedData.Reader createReader(ResultSubpartitionView ignored) {
checkState(!fileChannel.isOpen());
final List<ByteBuffer> buffers =
memoryMappedRegions.stream()
.map((bb) -> bb.duplicate().order(ByteOrder.nativeOrder()))
.collect(Collectors.toList());
return new MemoryMappedBoundedData.BufferSlicer(buffers);
}
/**
* Finishes the current region and prevents further writes. After calling this method, further
* calls to {@link #writeBuffer(Buffer)} will fail.
*/
@Override
public void finishWrite() throws IOException {
mapRegionAndStartNext();
fileChannel.close();
}
/**
* Closes the file and unmaps all memory mapped regions. After calling this method, access to
* any ByteBuffer obtained from this instance will cause a segmentation fault.
*/
public void close() throws IOException {
IOUtils.closeQuietly(fileChannel);
for (ByteBuffer bb : memoryMappedRegions) {
PlatformDependent.freeDirectBuffer(bb);
}
memoryMappedRegions.clear();
// To make this compatible with all versions of Windows, we must wait with
// deleting the file until it is unmapped.
// See also
// https://stackoverflow.com/questions/11099295/file-flag-delete-on-close-and-memory-mapped-files/51649618#51649618
Files.delete(filePath);
}
@Override
public long getSize() {
return pos;
}
@Override
public Path getFilePath() {
return filePath;
}
private void mapRegionAndStartNext() throws IOException {
final ByteBuffer region =
fileChannel.map(
MapMode.READ_ONLY, startOfCurrentRegion, pos - startOfCurrentRegion);
region.order(ByteOrder.nativeOrder());
memoryMappedRegions.add(region);
startOfCurrentRegion = pos;
endOfCurrentRegion = startOfCurrentRegion + maxRegionSize;
}
private void throwTooLargeBuffer(Buffer buffer) throws IOException {
throw new IOException(
String.format(
"The buffer (%d bytes) is larger than the maximum size of a memory buffer (%d bytes)",
buffer.getSize(), maxRegionSize));
}
// ------------------------------------------------------------------------
// Factories
// ------------------------------------------------------------------------
/**
* Creates new FileChannelMemoryMappedBoundedData, creating a memory mapped file at the given
* path.
*/
public static FileChannelMemoryMappedBoundedData create(Path memMappedFilePath)
throws IOException {
return createWithRegionSize(memMappedFilePath, Integer.MAX_VALUE);
}
/**
* Creates new FileChannelMemoryMappedBoundedData, creating a memory mapped file at the given
* path. Each mapped region (= ByteBuffer) will be of the given size.
*/
public static FileChannelMemoryMappedBoundedData createWithRegionSize(
Path memMappedFilePath, int regionSize) throws IOException {
checkNotNull(memMappedFilePath, "memMappedFilePath");
checkArgument(regionSize > 0, "regions size most be > 0");
final FileChannel fileChannel =
FileChannel.open(
memMappedFilePath,
StandardOpenOption.READ,
StandardOpenOption.WRITE,
StandardOpenOption.CREATE_NEW);
return new FileChannelMemoryMappedBoundedData(memMappedFilePath, fileChannel, regionSize);
}
}
| FileChannelMemoryMappedBoundedData |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/fetching/BatchFetchingTest.java | {
"start": 945,
"end": 2362
} | class ____ {
private final Logger log = Logger.getLogger( BatchFetchingTest.class );
@AfterEach
void tearDown(SessionFactoryScope factoryScope) {
factoryScope.dropData();
}
@Test
public void test(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( (entityManager) -> {
for (long i = 0; i < 10; i++) {
Department department = new Department();
department.id = i;
entityManager.persist(department);
for (int j = 0; j < Math.random() * 5; j++) {
Employee employee = new Employee();
employee.id = (i * 5) + j;
employee.name = String.format("John %d", employee.getId());
employee.department = department;
entityManager.persist(employee);
department.employees.add(employee);
}
}
} );
factoryScope.inTransaction( (entityManager) -> {
//tag::fetching-batch-fetching-example[]
List<Department> departments = entityManager.createQuery(
"select d " +
"from Department d " +
"inner join d.employees e " +
"where e.name like 'John%'", Department.class)
.getResultList();
for (Department department : departments) {
log.infof(
"Department %d has {} employees",
department.getId(),
department.getEmployees().size()
);
}
//end::fetching-batch-fetching-example[]
} );
}
//tag::fetching-batch-mapping-example[]
@Entity(name = "Department")
public static | BatchFetchingTest |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/rest/handler/cluster/JobManagerLogListHandlerTest.java | {
"start": 2155,
"end": 5094
} | class ____ {
private static HandlerRequest<EmptyRequestBody> testRequest;
@TempDir private java.nio.file.Path temporaryFolder;
private DispatcherGateway dispatcherGateway;
@BeforeAll
static void setupClass() throws HandlerRequestException {
testRequest =
HandlerRequest.create(
EmptyRequestBody.getInstance(),
EmptyMessageParameters.getInstance(),
Collections.emptyList());
}
@BeforeEach
void setUp() {
dispatcherGateway = TestingDispatcherGateway.newBuilder().build();
}
@Test
void testGetJobManagerLogsList() throws Exception {
File logRoot = temporaryFolder.toFile();
List<LogInfo> expectedLogInfo =
Arrays.asList(
new LogInfo("jobmanager.log", 5, 1632844800000L),
new LogInfo("jobmanager.out", 7, 1632844800000L),
new LogInfo("test.log", 13, 1632844800000L));
createLogFiles(logRoot, expectedLogInfo);
JobManagerLogListHandler jobManagerLogListHandler = createHandler(logRoot);
LogListInfo logListInfo =
jobManagerLogListHandler.handleRequest(testRequest, dispatcherGateway).get();
assertThat(logListInfo.getLogInfos()).containsExactlyInAnyOrderElementsOf(expectedLogInfo);
}
@Test
void testGetJobManagerLogsListWhenLogDirIsNull() throws Exception {
JobManagerLogListHandler jobManagerLogListHandler = createHandler(null);
LogListInfo logListInfo =
jobManagerLogListHandler.handleRequest(testRequest, dispatcherGateway).get();
assertThat(logListInfo.getLogInfos()).isEmpty();
}
private JobManagerLogListHandler createHandler(@Nullable final File jobManagerLogRoot) {
return new JobManagerLogListHandler(
() -> CompletableFuture.completedFuture(dispatcherGateway),
TestingUtils.TIMEOUT,
Collections.emptyMap(),
JobManagerLogListHeaders.getInstance(),
jobManagerLogRoot);
}
private void createLogFiles(final File logRoot, final List<LogInfo> expectedLogFiles) {
for (LogInfo logInfo : expectedLogFiles) {
createFile(new File(logRoot, logInfo.getName()), logInfo.getSize(), logInfo.getMtime());
}
}
private void createFile(final File file, final long size, final long mtime) {
try {
final String randomFileContent =
StringUtils.generateRandomAlphanumericString(
ThreadLocalRandom.current(), Math.toIntExact(size));
FileUtils.writeStringToFile(file, randomFileContent, StandardCharsets.UTF_8);
file.setLastModified(mtime);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
| JobManagerLogListHandlerTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreRouterState.java | {
"start": 2330,
"end": 8780
} | class ____ extends TestStateStoreBase {
private static RouterStore routerStore;
@BeforeAll
public static void create() {
// Reduce expirations to 2 seconds
getConf().setTimeDuration(
RBFConfigKeys.FEDERATION_STORE_ROUTER_EXPIRATION_MS,
2, TimeUnit.SECONDS);
// Set deletion time to 2 seconds
getConf().setTimeDuration(
RBFConfigKeys.FEDERATION_STORE_ROUTER_EXPIRATION_DELETION_MS,
2, TimeUnit.SECONDS);
}
@BeforeEach
public void setup() throws IOException, InterruptedException {
if (routerStore == null) {
routerStore =
getStateStore().getRegisteredRecordStore(RouterStore.class);
}
// Clear router status registrations
assertTrue(clearRecords(getStateStore(), RouterState.class));
}
@Test
public void testStateStoreDisconnected() throws Exception {
// Close the data store driver
getStateStore().closeDriver();
assertEquals(false, getStateStore().isDriverReady());
// Test all APIs that access the data store to ensure they throw the correct
// exception.
GetRouterRegistrationRequest getSingleRequest =
GetRouterRegistrationRequest.newInstance();
verifyException(routerStore, "getRouterRegistration",
StateStoreUnavailableException.class,
new Class[] {GetRouterRegistrationRequest.class},
new Object[] {getSingleRequest});
GetRouterRegistrationsRequest getRequest =
GetRouterRegistrationsRequest.newInstance();
routerStore.loadCache(true);
verifyException(routerStore, "getRouterRegistrations",
StateStoreUnavailableException.class,
new Class[] {GetRouterRegistrationsRequest.class},
new Object[] {getRequest});
RouterHeartbeatRequest hbRequest = RouterHeartbeatRequest.newInstance(
RouterState.newInstance("test", 0, RouterServiceState.UNINITIALIZED));
verifyException(routerStore, "routerHeartbeat",
StateStoreUnavailableException.class,
new Class[] {RouterHeartbeatRequest.class},
new Object[] {hbRequest});
}
//
// Router
//
@Test
public void testUpdateRouterStatus()
throws IllegalStateException, IOException {
long dateStarted = Time.now();
String address = "testaddress";
// Set
RouterHeartbeatRequest request = RouterHeartbeatRequest.newInstance(
RouterState.newInstance(
address, dateStarted, RouterServiceState.RUNNING));
assertTrue(routerStore.routerHeartbeat(request).getStatus());
// Verify
GetRouterRegistrationRequest getRequest =
GetRouterRegistrationRequest.newInstance(address);
RouterState record =
routerStore.getRouterRegistration(getRequest).getRouter();
assertNotNull(record);
assertEquals(RouterServiceState.RUNNING, record.getStatus());
assertEquals(address, record.getAddress());
assertEquals(FederationUtil.getCompileInfo(), record.getCompileInfo());
// Build version may vary a bit
assertFalse(record.getVersion().isEmpty());
}
@Test
public void testRouterStateExpiredAndDeletion()
throws IOException, InterruptedException, TimeoutException {
long dateStarted = Time.now();
String address = "testaddress";
RouterHeartbeatRequest request = RouterHeartbeatRequest.newInstance(
RouterState.newInstance(
address, dateStarted, RouterServiceState.RUNNING));
// Set
assertTrue(routerStore.routerHeartbeat(request).getStatus());
// Verify
GetRouterRegistrationRequest getRequest =
GetRouterRegistrationRequest.newInstance(address);
RouterState record =
routerStore.getRouterRegistration(getRequest).getRouter();
assertNotNull(record);
// Wait past expiration (set in conf to 2 seconds)
GenericTestUtils.waitFor(() -> {
try {
RouterState routerState = routerStore
.getRouterRegistration(getRequest).getRouter();
// Verify entry is expired
return routerState.getStatus() == RouterServiceState.EXPIRED;
} catch (IOException e) {
return false;
}
}, 100, 3000);
// Heartbeat again and this shouldn't be EXPIRED at this point
assertTrue(routerStore.routerHeartbeat(request).getStatus());
RouterState r = routerStore.getRouterRegistration(getRequest).getRouter();
assertEquals(RouterServiceState.RUNNING, r.getStatus());
// Wait past expiration (set in conf to 2 seconds)
GenericTestUtils.waitFor(() -> {
try {
RouterState routerState = routerStore
.getRouterRegistration(getRequest).getRouter();
// Verify entry is expired
return routerState.getStatus() == RouterServiceState.EXPIRED;
} catch (IOException e) {
return false;
}
}, 100, 3000);
// Wait deletion (set in conf to 2 seconds)
GenericTestUtils.waitFor(() -> {
try {
RouterState routerState = routerStore
.getRouterRegistration(getRequest).getRouter();
// Verify entry is deleted
return routerState.getStatus() == null;
} catch (IOException e) {
return false;
}
}, 100, 3000);
}
@Test
public void testGetAllRouterStates()
throws StateStoreUnavailableException, IOException {
// Set 2 entries
RouterHeartbeatRequest heartbeatRequest1 =
RouterHeartbeatRequest.newInstance(
RouterState.newInstance(
"testaddress1", Time.now(), RouterServiceState.RUNNING));
assertTrue(routerStore.routerHeartbeat(heartbeatRequest1).getStatus());
RouterHeartbeatRequest heartbeatRequest2 =
RouterHeartbeatRequest.newInstance(
RouterState.newInstance(
"testaddress2", Time.now(), RouterServiceState.RUNNING));
assertTrue(routerStore.routerHeartbeat(heartbeatRequest2).getStatus());
// Verify
routerStore.loadCache(true);
GetRouterRegistrationsRequest request =
GetRouterRegistrationsRequest.newInstance();
List<RouterState> entries =
routerStore.getRouterRegistrations(request).getRouters();
assertEquals(2, entries.size());
Collections.sort(entries);
assertEquals("testaddress1", entries.get(0).getAddress());
assertEquals("testaddress2", entries.get(1).getAddress());
assertEquals(RouterServiceState.RUNNING, entries.get(0).getStatus());
assertEquals(RouterServiceState.RUNNING, entries.get(1).getStatus());
}
}
| TestStateStoreRouterState |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/threadsafety/ThreadSafety.java | {
"start": 2731,
"end": 2896
} | class ____ gives information about the annotation of types; if a type isn't annotated, {@link
* Violation} gives information as to why it is not.
*/
public final | which |
java | netty__netty | codec-socks/src/main/java/io/netty/handler/codec/socks/SocksMessage.java | {
"start": 954,
"end": 1898
} | class ____ {
private final SocksMessageType type;
private final SocksProtocolVersion protocolVersion = SocksProtocolVersion.SOCKS5;
protected SocksMessage(SocksMessageType type) {
this.type = ObjectUtil.checkNotNull(type, "type");
}
/**
* Returns the {@link SocksMessageType} of this {@link SocksMessage}
*
* @return The {@link SocksMessageType} of this {@link SocksMessage}
*/
public SocksMessageType type() {
return type;
}
/**
* Returns the {@link SocksProtocolVersion} of this {@link SocksMessage}
*
* @return The {@link SocksProtocolVersion} of this {@link SocksMessage}
*/
public SocksProtocolVersion protocolVersion() {
return protocolVersion;
}
/**
* @deprecated Do not use; this method was intended for an internal use only.
*/
@Deprecated
public abstract void encodeAsByteBuf(ByteBuf byteBuf);
}
| SocksMessage |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestFSDownload.java | {
"start": 3809,
"end": 29966
} | enum ____ {
TAR, JAR, ZIP, TGZ
};
private Configuration conf = new Configuration();
@AfterAll
public static void deleteTestDir() throws IOException {
FileContext fs = FileContext.getLocalFSFileContext();
fs.delete(new Path("target", TestFSDownload.class.getSimpleName()), true);
}
static final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
static LocalResource createFile(FileContext files, Path p, int len,
Random r, LocalResourceVisibility vis) throws IOException {
createFile(files, p, len, r);
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
ret.setResource(URL.fromPath(p));
ret.setSize(len);
ret.setType(LocalResourceType.FILE);
ret.setVisibility(vis);
ret.setTimestamp(files.getFileStatus(p).getModificationTime());
return ret;
}
static void createFile(FileContext files, Path p, int len, Random r)
throws IOException {
FSDataOutputStream out = null;
try {
byte[] bytes = new byte[len];
out = files.create(p, EnumSet.of(CREATE, OVERWRITE));
r.nextBytes(bytes);
out.write(bytes);
} finally {
if (out != null) out.close();
}
}
static LocalResource createJar(FileContext files, Path p,
LocalResourceVisibility vis) throws IOException {
LOG.info("Create jar file " + p);
File jarFile = new File((files.makeQualified(p)).toUri());
FileOutputStream stream = new FileOutputStream(jarFile);
LOG.info("Create jar out stream ");
JarOutputStream out = new JarOutputStream(stream, new Manifest());
ZipEntry entry = new ZipEntry("classes/1.class");
out.putNextEntry(entry);
out.write(1);
out.write(2);
out.write(3);
out.closeEntry();
ZipEntry entry2 = new ZipEntry("classes/2.class");
out.putNextEntry(entry2);
out.write(1);
out.write(2);
out.write(3);
out.closeEntry();
LOG.info("Done writing jar stream ");
out.close();
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
ret.setResource(URL.fromPath(p));
FileStatus status = files.getFileStatus(p);
ret.setSize(status.getLen());
ret.setTimestamp(status.getModificationTime());
ret.setType(LocalResourceType.PATTERN);
ret.setVisibility(vis);
ret.setPattern("classes/.*");
return ret;
}
static LocalResource createTarFile(FileContext files, Path p, int len,
Random r, LocalResourceVisibility vis) throws IOException,
URISyntaxException {
byte[] bytes = new byte[len];
r.nextBytes(bytes);
File archiveFile = new File(p.toUri().getPath() + ".tar");
archiveFile.createNewFile();
TarArchiveOutputStream out = new TarArchiveOutputStream(
new FileOutputStream(archiveFile));
TarArchiveEntry entry = new TarArchiveEntry(p.getName());
entry.setSize(bytes.length);
out.putArchiveEntry(entry);
out.write(bytes);
out.closeArchiveEntry();
out.close();
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
ret.setResource(URL.fromPath(new Path(p.toString()
+ ".tar")));
ret.setSize(len);
ret.setType(LocalResourceType.ARCHIVE);
ret.setVisibility(vis);
ret.setTimestamp(files.getFileStatus(new Path(p.toString() + ".tar"))
.getModificationTime());
return ret;
}
static LocalResource createTgzFile(FileContext files, Path p, int len,
Random r, LocalResourceVisibility vis) throws IOException,
URISyntaxException {
byte[] bytes = new byte[len];
r.nextBytes(bytes);
File gzipFile = new File(p.toUri().getPath() + ".tar.gz");
gzipFile.createNewFile();
TarArchiveOutputStream out = new TarArchiveOutputStream(
new GZIPOutputStream(new FileOutputStream(gzipFile)));
TarArchiveEntry entry = new TarArchiveEntry(p.getName());
entry.setSize(bytes.length);
out.putArchiveEntry(entry);
out.write(bytes);
out.closeArchiveEntry();
out.close();
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
ret.setResource(URL.fromPath(new Path(p.toString()
+ ".tar.gz")));
ret.setSize(len);
ret.setType(LocalResourceType.ARCHIVE);
ret.setVisibility(vis);
ret.setTimestamp(files.getFileStatus(new Path(p.toString() + ".tar.gz"))
.getModificationTime());
return ret;
}
static LocalResource createJarFile(FileContext files, Path p, int len,
Random r, LocalResourceVisibility vis) throws IOException,
URISyntaxException {
byte[] bytes = new byte[len];
r.nextBytes(bytes);
File archiveFile = new File(p.toUri().getPath() + ".jar");
archiveFile.createNewFile();
JarOutputStream out = new JarOutputStream(
new FileOutputStream(archiveFile));
out.putNextEntry(new JarEntry(p.getName()));
out.write(bytes);
out.closeEntry();
out.close();
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
ret.setResource(URL.fromPath(new Path(p.toString()
+ ".jar")));
ret.setSize(len);
ret.setType(LocalResourceType.ARCHIVE);
ret.setVisibility(vis);
ret.setTimestamp(files.getFileStatus(new Path(p.toString() + ".jar"))
.getModificationTime());
return ret;
}
static LocalResource createZipFile(FileContext files, Path p, int len,
Random r, LocalResourceVisibility vis) throws IOException,
URISyntaxException {
byte[] bytes = new byte[len];
r.nextBytes(bytes);
File archiveFile = new File(p.toUri().getPath() + ".ZIP");
archiveFile.createNewFile();
ZipOutputStream out = new ZipOutputStream(
new FileOutputStream(archiveFile));
out.putNextEntry(new ZipEntry(p.getName()));
out.write(bytes);
out.closeEntry();
out.close();
LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
ret.setResource(URL.fromPath(new Path(p.toString()
+ ".ZIP")));
ret.setSize(len);
ret.setType(LocalResourceType.ARCHIVE);
ret.setVisibility(vis);
ret.setTimestamp(files.getFileStatus(new Path(p.toString() + ".ZIP"))
.getModificationTime());
return ret;
}
@Test
@Timeout(10000)
void testDownloadBadPublic() throws IOException, URISyntaxException,
InterruptedException {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Map<LocalResource, LocalResourceVisibility> rsrcVis =
new HashMap<LocalResource, LocalResourceVisibility>();
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource, Future<Path>> pending =
new HashMap<LocalResource, Future<Path>>();
ExecutorService exec = HadoopExecutors.newSingleThreadExecutor();
LocalDirAllocator dirs =
new LocalDirAllocator(TestFSDownload.class.getName());
int size = 512;
LocalResourceVisibility vis = LocalResourceVisibility.PUBLIC;
Path path = new Path(basedir, "test-file");
LocalResource rsrc = createFile(files, path, size, rand, vis);
rsrcVis.put(rsrc, vis);
Path destPath = dirs.getLocalPathForWrite(
basedir.toString(), size, conf);
destPath = new Path(destPath,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd =
new FSDownload(files, UserGroupInformation.getCurrentUser(), conf,
destPath, rsrc);
pending.put(rsrc, exec.submit(fsd));
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS)) ;
assertTrue(pending.get(rsrc).isDone());
try {
for (Map.Entry<LocalResource, Future<Path>> p : pending.entrySet()) {
p.getValue().get();
fail("We localized a file that is not public.");
}
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof IOException);
}
}
@Test
@Timeout(60000)
void testDownloadPublicWithStatCache() throws IOException,
URISyntaxException, InterruptedException, ExecutionException {
FileContext files = FileContext.getLocalFSFileContext(conf);
Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
// if test directory doesn't have ancestor permission, skip this test
FileSystem f = basedir.getFileSystem(conf);
assumeTrue(FSDownload.ancestorsHaveExecutePermissions(f, basedir, null));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
int size = 512;
final ConcurrentMap<Path, AtomicInteger> counts =
new ConcurrentHashMap<Path, AtomicInteger>();
final CacheLoader<Path, Future<FileStatus>> loader =
FSDownload.createStatusCacheLoader(conf);
final LoadingCache<Path, Future<FileStatus>> statCache =
CacheBuilder.newBuilder().build(new CacheLoader<Path, Future<FileStatus>>() {
public Future<FileStatus> load(Path path) throws Exception {
// increment the count
AtomicInteger count = counts.get(path);
if (count == null) {
count = new AtomicInteger(0);
AtomicInteger existing = counts.putIfAbsent(path, count);
if (existing != null) {
count = existing;
}
}
count.incrementAndGet();
// use the default loader
return loader.load(path);
}
});
// test FSDownload.isPublic() concurrently
final int fileCount = 3;
List<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>();
for (int i = 0; i < fileCount; i++) {
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
final Path path = new Path(basedir, "test-file-" + i);
createFile(files, path, size, rand);
final FileSystem fs = path.getFileSystem(conf);
final FileStatus sStat = fs.getFileStatus(path);
tasks.add(new Callable<Boolean>() {
public Boolean call() throws IOException {
return FSDownload.isPublic(fs, path, sStat, statCache);
}
});
}
ExecutorService exec = HadoopExecutors.newFixedThreadPool(fileCount);
try {
List<Future<Boolean>> futures = exec.invokeAll(tasks);
// files should be public
for (Future<Boolean> future : futures) {
assertTrue(future.get());
}
// for each path exactly one file status call should be made
for (AtomicInteger count : counts.values()) {
assertSame(count.get(), 1);
}
} finally {
exec.shutdown();
}
}
@Test
@Timeout(10000)
void testDownload() throws IOException, URISyntaxException,
InterruptedException {
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Map<LocalResource, LocalResourceVisibility> rsrcVis =
new HashMap<LocalResource, LocalResourceVisibility>();
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource, Future<Path>> pending =
new HashMap<LocalResource, Future<Path>>();
ExecutorService exec = HadoopExecutors.newSingleThreadExecutor();
LocalDirAllocator dirs =
new LocalDirAllocator(TestFSDownload.class.getName());
int[] sizes = new int[10];
for (int i = 0; i < 10; ++i) {
sizes[i] = rand.nextInt(512) + 512;
LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
if (i % 2 == 1) {
vis = LocalResourceVisibility.APPLICATION;
}
Path p = new Path(basedir, "" + i);
LocalResource rsrc = createFile(files, p, sizes[i], rand, vis);
rsrcVis.put(rsrc, vis);
Path destPath = dirs.getLocalPathForWrite(
basedir.toString(), sizes[i], conf);
destPath = new Path(destPath,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd =
new FSDownload(files, UserGroupInformation.getCurrentUser(), conf,
destPath, rsrc);
pending.put(rsrc, exec.submit(fsd));
}
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS)) ;
for (Future<Path> path : pending.values()) {
assertTrue(path.isDone());
}
try {
for (Map.Entry<LocalResource, Future<Path>> p : pending.entrySet()) {
Path localized = p.getValue().get();
assertEquals(sizes[Integer.parseInt(localized.getName())], p.getKey()
.getSize());
FileStatus status = files.getFileStatus(localized.getParent());
FsPermission perm = status.getPermission();
assertEquals(new FsPermission((short) 0755), perm,
"Cache directory permissions are incorrect");
status = files.getFileStatus(localized);
perm = status.getPermission();
System.out.println("File permission " + perm +
" for rsrc vis " + p.getKey().getVisibility().name());
assert(rsrcVis.containsKey(p.getKey()));
assertTrue(perm.toShort() == FSDownload.PRIVATE_FILE_PERMS.toShort(),
"Private file should be 500");
}
} catch (ExecutionException e) {
throw new IOException("Failed exec", e);
}
}
private void downloadWithFileType(TEST_FILE_TYPE fileType) throws IOException,
URISyntaxException, InterruptedException{
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>();
ExecutorService exec = HadoopExecutors.newSingleThreadExecutor();
LocalDirAllocator dirs = new LocalDirAllocator(
TestFSDownload.class.getName());
int size = rand.nextInt(512) + 512;
LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
Path p = new Path(basedir, "" + 1);
String strFileName = "";
LocalResource rsrc = null;
switch (fileType) {
case TAR:
rsrc = createTarFile(files, p, size, rand, vis);
break;
case JAR:
rsrc = createJarFile(files, p, size, rand, vis);
rsrc.setType(LocalResourceType.PATTERN);
break;
case ZIP:
rsrc = createZipFile(files, p, size, rand, vis);
strFileName = p.getName() + ".ZIP";
break;
case TGZ:
rsrc = createTgzFile(files, p, size, rand, vis);
break;
}
Path destPath = dirs.getLocalPathForWrite(basedir.toString(), size, conf);
destPath = new Path (destPath,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd = new FSDownload(files,
UserGroupInformation.getCurrentUser(), conf, destPath, rsrc);
pending.put(rsrc, exec.submit(fsd));
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS));
try {
pending.get(rsrc).get(); // see if there was an Exception during download
FileStatus[] filesstatus = files.getDefaultFileSystem().listStatus(
basedir);
for (FileStatus filestatus : filesstatus) {
if (filestatus.isDirectory()) {
FileStatus[] childFiles = files.getDefaultFileSystem().listStatus(
filestatus.getPath());
for (FileStatus childfile : childFiles) {
if (strFileName.endsWith(".ZIP") && childfile.getPath().getName().equals(strFileName)
&& !childfile.isDirectory()) {
fail("Failure...After unzip, there should have been a"
+ " directory formed with zip file name but found a file. "
+ childfile.getPath());
}
if (childfile.getPath().getName().startsWith("tmp")) {
fail("Tmp File should not have been there "
+ childfile.getPath());
}
}
}
}
}catch (Exception e) {
throw new IOException("Failed exec", e);
}
}
@Test
@Timeout(10000)
void testDownloadArchive() throws IOException, URISyntaxException,
InterruptedException {
downloadWithFileType(TEST_FILE_TYPE.TAR);
}
@Test
@Timeout(10000)
void testDownloadPatternJar() throws IOException, URISyntaxException,
InterruptedException {
downloadWithFileType(TEST_FILE_TYPE.JAR);
}
@Test
@Timeout(10000)
void testDownloadArchiveZip() throws IOException, URISyntaxException,
InterruptedException {
downloadWithFileType(TEST_FILE_TYPE.ZIP);
}
/*
* To test fix for YARN-3029
*/
@Test
@Timeout(10000)
void testDownloadArchiveZipWithTurkishLocale() throws IOException,
URISyntaxException, InterruptedException {
Locale defaultLocale = Locale.getDefault();
// Set to Turkish
Locale turkishLocale = new Locale("tr", "TR");
Locale.setDefault(turkishLocale);
downloadWithFileType(TEST_FILE_TYPE.ZIP);
// Set the locale back to original default locale
Locale.setDefault(defaultLocale);
}
@Test
@Timeout(10000)
void testDownloadArchiveTgz() throws IOException, URISyntaxException,
InterruptedException {
downloadWithFileType(TEST_FILE_TYPE.TGZ);
}
private void verifyPermsRecursively(FileSystem fs,
FileContext files, Path p,
LocalResourceVisibility vis) throws IOException {
FileStatus status = files.getFileStatus(p);
if (status.isDirectory()) {
if (vis == LocalResourceVisibility.PUBLIC) {
assertTrue(status.getPermission().toShort() ==
FSDownload.PUBLIC_DIR_PERMS.toShort());
}
else {
assertTrue(status.getPermission().toShort() ==
FSDownload.PRIVATE_DIR_PERMS.toShort());
}
if (!status.isSymlink()) {
FileStatus[] statuses = fs.listStatus(p);
for (FileStatus stat : statuses) {
verifyPermsRecursively(fs, files, stat.getPath(), vis);
}
}
}
else {
if (vis == LocalResourceVisibility.PUBLIC) {
assertTrue(status.getPermission().toShort() ==
FSDownload.PUBLIC_FILE_PERMS.toShort());
}
else {
assertTrue(status.getPermission().toShort() ==
FSDownload.PRIVATE_FILE_PERMS.toShort());
}
}
}
@Test
@Timeout(10000)
void testDirDownload() throws IOException, InterruptedException {
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
Map<LocalResource, LocalResourceVisibility> rsrcVis =
new HashMap<LocalResource, LocalResourceVisibility>();
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map<LocalResource, Future<Path>> pending =
new HashMap<LocalResource, Future<Path>>();
ExecutorService exec = HadoopExecutors.newSingleThreadExecutor();
LocalDirAllocator dirs =
new LocalDirAllocator(TestFSDownload.class.getName());
for (int i = 0; i < 5; ++i) {
LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
if (i % 2 == 1) {
vis = LocalResourceVisibility.APPLICATION;
}
Path p = new Path(basedir, "dir" + i + ".jar");
LocalResource rsrc = createJar(files, p, vis);
rsrcVis.put(rsrc, vis);
Path destPath = dirs.getLocalPathForWrite(
basedir.toString(), conf);
destPath = new Path(destPath,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd =
new FSDownload(files, UserGroupInformation.getCurrentUser(), conf,
destPath, rsrc);
pending.put(rsrc, exec.submit(fsd));
}
exec.shutdown();
while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS)) ;
for (Future<Path> path : pending.values()) {
assertTrue(path.isDone());
}
try {
for (Map.Entry<LocalResource, Future<Path>> p : pending.entrySet()) {
Path localized = p.getValue().get();
FileStatus status = files.getFileStatus(localized);
System.out.println("Testing path " + localized);
assert(status.isDirectory());
assert(rsrcVis.containsKey(p.getKey()));
verifyPermsRecursively(localized.getFileSystem(conf),
files, localized, rsrcVis.get(p.getKey()));
}
} catch (ExecutionException e) {
throw new IOException("Failed exec", e);
}
}
@Test
@Timeout(10000)
void testUniqueDestinationPath() throws Exception {
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(new Path("target",
TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
ExecutorService singleThreadedExec = HadoopExecutors
.newSingleThreadExecutor();
LocalDirAllocator dirs =
new LocalDirAllocator(TestFSDownload.class.getName());
Path destPath = dirs.getLocalPathForWrite(basedir.toString(), conf);
destPath =
new Path(destPath, Long.toString(uniqueNumberGenerator
.incrementAndGet()));
Path p = new Path(basedir, "dir" + 0 + ".jar");
LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
LocalResource rsrc = createJar(files, p, vis);
FSDownload fsd =
new FSDownload(files, UserGroupInformation.getCurrentUser(), conf,
destPath, rsrc);
Future<Path> rPath = singleThreadedExec.submit(fsd);
singleThreadedExec.shutdown();
while (!singleThreadedExec.awaitTermination(1000, TimeUnit.MILLISECONDS)) ;
assertTrue(rPath.isDone());
// Now FSDownload will not create a random directory to localize the
// resource. Therefore the final localizedPath for the resource should be
// destination directory (passed as an argument) + file name.
assertEquals(destPath, rPath.get().getParent());
}
/**
* This test method is responsible for creating an IOException resulting
* from modification to the local resource's timestamp on the source FS just
* before the download of this local resource has started.
*/
@Test
@Timeout(10000)
void testResourceTimestampChangeDuringDownload()
throws IOException, InterruptedException {
conf = new Configuration();
FileContext files = FileContext.getLocalFSFileContext(conf);
final Path basedir = files.makeQualified(
new Path("target", TestFSDownload.class.getSimpleName()));
files.mkdir(basedir, null, true);
conf.setStrings(TestFSDownload.class.getName(), basedir.toString());
LocalDirAllocator dirs =
new LocalDirAllocator(TestFSDownload.class.getName());
Path path = new Path(basedir, "test-file");
Random rand = new Random();
long sharedSeed = rand.nextLong();
rand.setSeed(sharedSeed);
int size = 512;
LocalResourceVisibility vis = LocalResourceVisibility.PUBLIC;
LocalResource localResource = createFile(files, path, size, rand, vis);
Path destPath = dirs.getLocalPathForWrite(basedir.toString(), size, conf);
destPath = new Path(destPath,
Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsDownload = new FSDownload(files,
UserGroupInformation.getCurrentUser(), conf, destPath, localResource);
// Store the original local resource timestamp used to set up the
// FSDownload object just before (but before the download starts)
// for comparison purposes later on.
long origLRTimestamp = localResource.getTimestamp();
// Modify the local resource's timestamp to yesterday on the Filesystem
// just before FSDownload starts.
final long msInADay = 86400 * 1000;
long modifiedFSTimestamp = origLRTimestamp - msInADay;
try {
Path sourceFsPath = localResource.getResource().toPath();
FileSystem sourceFs = sourceFsPath.getFileSystem(conf);
sourceFs.setTimes(sourceFsPath, modifiedFSTimestamp, modifiedFSTimestamp);
} catch (URISyntaxException use) {
fail("No exception expected.");
}
// Execute the FSDownload operation.
Map<LocalResource, Future<Path>> pending = new HashMap<>();
ExecutorService exec = HadoopExecutors.newSingleThreadExecutor();
pending.put(localResource, exec.submit(fsDownload));
exec.shutdown();
exec.awaitTermination(1000, TimeUnit.MILLISECONDS);
assertTrue(pending.get(localResource).isDone());
try {
for (Map.Entry<LocalResource, Future<Path>> p : pending.entrySet()) {
p.getValue().get();
}
fail("Exception expected from timestamp update during download");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof IOException);
assertTrue(ee.getMessage().contains(Times.formatISO8601(origLRTimestamp)),
"Exception contains original timestamp");
assertTrue(ee.getMessage().contains(Times.formatISO8601(modifiedFSTimestamp)),
"Exception contains modified timestamp");
}
}
}
| TEST_FILE_TYPE |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java | {
"start": 33621,
"end": 33815
} | class ____ extends Options.IntegerOption
implements Option {
ReplicationOption(int value) {
super(value);
}
}
static | ReplicationOption |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/internal/FailedSecondPassException.java | {
"start": 468,
"end": 846
} | class ____ extends AnnotationException {
/**
* Constructs a FailedSecondPassException using the given message and underlying cause.
*
* @param msg The message explaining the condition that caused the exception
* @param cause The underlying exception
*/
public FailedSecondPassException(String msg, Throwable cause) {
super( msg, cause );
}
}
| FailedSecondPassException |
java | elastic__elasticsearch | x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeFollowAction.java | {
"start": 912,
"end": 2034
} | class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(POST, "/{index}/_ccr/resume_follow"));
}
@Override
public String getName() {
return "ccr_resume_follow_action";
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
Request request = createRequest(restRequest);
return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel));
}
static Request createRequest(RestRequest restRequest) throws IOException {
if (restRequest.hasContentOrSourceParam()) {
try (XContentParser parser = restRequest.contentOrSourceParamParser()) {
return Request.fromXContent(getMasterNodeTimeout(restRequest), parser, restRequest.param("index"));
}
} else {
final var request = new Request(getMasterNodeTimeout(restRequest));
request.setFollowerIndex(restRequest.param("index"));
return request;
}
}
}
| RestResumeFollowAction |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/functions/aggregate/ListAggWsWithRetractAggFunction.java | {
"start": 3264,
"end": 7530
} | class ____ {
public ListView<StringData> list;
public ListView<StringData> retractList;
public StringData delimiter;
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ListAggWsWithRetractAccumulator that = (ListAggWsWithRetractAccumulator) o;
return Objects.equals(list, that.list) && Objects.equals(retractList, that.retractList);
}
@Override
public int hashCode() {
return Objects.hash(list, retractList);
}
}
@Override
public ListAggWsWithRetractAccumulator createAccumulator() {
final ListAggWsWithRetractAccumulator acc = new ListAggWsWithRetractAccumulator();
acc.list = new ListView<>();
acc.retractList = new ListView<>();
acc.delimiter = StringData.fromString(",");
return acc;
}
public void accumulate(
ListAggWsWithRetractAccumulator acc, StringData value, StringData lineDelimiter)
throws Exception {
if (value != null) {
acc.delimiter = lineDelimiter;
acc.list.add(value);
}
}
public void retract(
ListAggWsWithRetractAccumulator acc, StringData value, StringData lineDelimiter)
throws Exception {
if (value != null) {
acc.delimiter = lineDelimiter;
if (!acc.list.remove(value)) {
acc.retractList.add(value);
}
}
}
public void merge(
ListAggWsWithRetractAccumulator acc, Iterable<ListAggWsWithRetractAccumulator> its)
throws Exception {
for (ListAggWsWithRetractAccumulator otherAcc : its) {
if (!otherAcc.list.get().iterator().hasNext()
&& !otherAcc.retractList.get().iterator().hasNext()) {
// otherAcc is empty, skip it
continue;
}
acc.delimiter = otherAcc.delimiter;
// merge list of acc and other
List<StringData> buffer = new ArrayList<>();
for (StringData binaryString : acc.list.get()) {
buffer.add(binaryString);
}
for (StringData binaryString : otherAcc.list.get()) {
buffer.add(binaryString);
}
// merge retract list of acc and other
List<StringData> retractBuffer = new ArrayList<>();
for (StringData binaryString : acc.retractList.get()) {
retractBuffer.add(binaryString);
}
for (StringData binaryString : otherAcc.retractList.get()) {
retractBuffer.add(binaryString);
}
// merge list & retract list
List<StringData> newRetractBuffer = new ArrayList<>();
for (StringData binaryString : retractBuffer) {
if (!buffer.remove(binaryString)) {
newRetractBuffer.add(binaryString);
}
}
// update to acc
acc.list.clear();
acc.list.addAll(buffer);
acc.retractList.clear();
acc.retractList.addAll(newRetractBuffer);
}
}
@SuppressWarnings({"rawtypes", "unchecked"})
@Override
public StringData getValue(ListAggWsWithRetractAccumulator acc) {
try {
// the element must be BinaryStringData because it's the only implementation.
Iterable<BinaryStringData> accList = (Iterable) acc.list.get();
if (accList == null || !accList.iterator().hasNext()) {
// return null when the list is empty
return null;
} else {
return BinaryStringDataUtil.concatWs((BinaryStringData) acc.delimiter, accList);
}
} catch (Exception e) {
throw new FlinkRuntimeException(e);
}
}
public void resetAccumulator(ListAggWsWithRetractAccumulator acc) {
acc.delimiter = StringData.fromString(",");
acc.list.clear();
acc.retractList.clear();
}
}
| ListAggWsWithRetractAccumulator |
java | apache__flink | flink-core/src/test/java/org/apache/flink/configuration/description/DescriptionHtmlTest.java | {
"start": 1185,
"end": 4897
} | class ____ {
@Test
void testDescriptionWithLink() {
Description description =
Description.builder()
.text("This is a text with a link %s", link("https://somepage", "to here"))
.build();
String formattedDescription = new HtmlFormatter().format(description);
assertThat(formattedDescription)
.isEqualTo(
"This is a text with a link <a href=\"https://somepage\">" + "to here</a>");
}
@Test
void testDescriptionWithPercents() {
Description description =
Description.builder()
.text("This is a text that has some percentage value of 20%.")
.build();
String formattedDescription = new HtmlFormatter().format(description);
assertThat(formattedDescription)
.isEqualTo("This is a text that has some percentage value of 20%.");
}
@Test
void testDescriptionWithMultipleLinks() {
Description description =
Description.builder()
.text(
"This is a text with a link %s and another %s",
link("https://somepage", "to here"), link("https://link"))
.build();
String formattedDescription = new HtmlFormatter().format(description);
assertThat(formattedDescription)
.isEqualTo(
"This is a text with a link <a href=\"https://somepage\">to here</a> and another "
+ "<a href=\"https://link\">https://link</a>");
}
@Test
void testDescriptionWithList() {
Description description =
Description.builder()
.text("This is some list: ")
.list(
link("http://first_link"),
text(
"this is second element of list with a %s",
link("https://link")))
.build();
String formattedDescription = new HtmlFormatter().format(description);
assertThat(formattedDescription)
.isEqualTo(
"This is some list: <ul><li><a href=\"http://first_link\">http://first_link"
+ "</a></li><li>this is second element of list "
+ "with a <a href=\"https://link\">https://link</a></li></ul>");
}
@Test
void testDescriptionWithLineBreak() {
Description description =
Description.builder()
.text("This is first line.")
.linebreak()
.text("This is second line.")
.build();
String formattedDescription = new HtmlFormatter().format(description);
assertThat(formattedDescription).isEqualTo("This is first line.<br />This is second line.");
}
@Test
void testDescriptionWithListAndEscaping() {
Description description =
Description.builder()
.text("This is some list: ")
.list(text("this is first element with illegal character '>' and '<'"))
.build();
String formattedDescription = new HtmlFormatter().format(description);
assertThat(formattedDescription)
.isEqualTo(
"This is some list: <ul><li>this is first element with illegal character '>' and '<'</li></ul>");
}
}
| DescriptionHtmlTest |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/context/support/StaticWebApplicationContext.java | {
"start": 1611,
"end": 2120
} | interface ____ allow for direct replacement of an {@link XmlWebApplicationContext},
* despite not actually supporting external configuration files.
*
* <p>Interprets resource paths as servlet context resources, i.e. as paths beneath
* the web application root. Absolute paths, for example, for files outside the web app root,
* can be accessed via "file:" URLs, as implemented by
* {@link org.springframework.core.io.DefaultResourceLoader}.
*
* @author Rod Johnson
* @author Juergen Hoeller
*/
public | to |
java | spring-projects__spring-boot | documentation/spring-boot-actuator-docs/src/test/java/org/springframework/boot/actuate/docs/flyway/FlywayEndpointDocumentationTests.java | {
"start": 4024,
"end": 4369
} | class ____ {
@Bean
DataSource dataSource() {
return new EmbeddedDatabaseBuilder().generateUniqueName(true)
.setType(EmbeddedDatabaseConnection.get(getClass().getClassLoader()).getType())
.build();
}
@Bean
FlywayEndpoint endpoint(ApplicationContext context) {
return new FlywayEndpoint(context);
}
}
}
| TestConfiguration |
java | apache__flink | flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/over/RowTimeRowsBoundedPrecedingFunctionTest.java | {
"start": 1390,
"end": 2515
} | class ____ extends RowTimeOverWindowTestBase {
@Test
void testLateRecordMetrics() throws Exception {
RowTimeRowsBoundedPrecedingFunction<RowData> function =
new RowTimeRowsBoundedPrecedingFunction<>(
1000, 2000, aggsHandleFunction, accTypes, inputFieldTypes, 2000, 2);
KeyedProcessOperator<RowData, RowData, RowData> operator =
new KeyedProcessOperator<>(function);
OneInputStreamOperatorTestHarness<RowData, RowData> testHarness =
createTestHarness(operator);
testHarness.open();
Counter counter = function.getCounter();
// put some records
testHarness.processElement(insertRecord("key", 1L, 100L));
testHarness.processElement(insertRecord("key", 1L, 100L));
testHarness.processElement(insertRecord("key", 1L, 500L));
testHarness.processWatermark(new Watermark(500L));
// late record
testHarness.processElement(insertRecord("key", 1L, 400L));
assertThat(counter.getCount()).isEqualTo(1L);
}
}
| RowTimeRowsBoundedPrecedingFunctionTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/suppress/CustomSuppressionTest.java | {
"start": 1944,
"end": 2285
} | interface ____ {}
@BugPattern(
summary = "Test checker that accepts both custom suppression annotations",
explanation = "Test checker that accepts both custom suppression annotations",
suppressionAnnotations = {SuppressBothCheckers.class, SuppressMyChecker2.class},
severity = ERROR)
public static | SuppressMyChecker2 |
java | apache__spark | core/src/main/java/org/apache/spark/shuffle/api/ShufflePartitionWriter.java | {
"start": 1225,
"end": 4733
} | interface ____ {
/**
* Open and return an {@link OutputStream} that can write bytes to the underlying
* data store.
* <p>
* This method will only be called once on this partition writer in the map task, to write the
* bytes to the partition. The output stream will only be used to write the bytes for this
* partition. The map task closes this output stream upon writing all the bytes for this
* block, or if the write fails for any reason.
* <p>
* Implementations that intend on combining the bytes for all the partitions written by this
* map task should reuse the same OutputStream instance across all the partition writers provided
* by the parent {@link ShuffleMapOutputWriter}. If one does so, ensure that
* {@link OutputStream#close()} does not close the resource, since it will be reused across
* partition writes. The underlying resources should be cleaned up in
* {@link ShuffleMapOutputWriter#commitAllPartitions(long[])} and
* {@link ShuffleMapOutputWriter#abort(Throwable)}.
*/
OutputStream openStream() throws IOException;
/**
* Opens and returns a {@link WritableByteChannelWrapper} for transferring bytes from
* input byte channels to the underlying shuffle data store.
* <p>
* This method will only be called once on this partition writer in the map task, to write the
* bytes to the partition. The channel will only be used to write the bytes for this
* partition. The map task closes this channel upon writing all the bytes for this
* block, or if the write fails for any reason.
* <p>
* Implementations that intend on combining the bytes for all the partitions written by this
* map task should reuse the same channel instance across all the partition writers provided
* by the parent {@link ShuffleMapOutputWriter}. If one does so, ensure that
* {@link WritableByteChannelWrapper#close()} does not close the resource, since the channel
* will be reused across partition writes. The underlying resources should be cleaned up in
* {@link ShuffleMapOutputWriter#commitAllPartitions(long[])} and
* {@link ShuffleMapOutputWriter#abort(Throwable)}.
* <p>
* This method is primarily for advanced optimizations where bytes can be copied from the input
* spill files to the output channel without copying data into memory. If such optimizations are
* not supported, the implementation should return {@link Optional#empty()}. By default, the
* implementation returns {@link Optional#empty()}.
* <p>
* Note that the returned {@link WritableByteChannelWrapper} itself is closed, but not the
* underlying channel that is returned by {@link WritableByteChannelWrapper#channel()}. Ensure
* that the underlying channel is cleaned up in {@link WritableByteChannelWrapper#close()},
* {@link ShuffleMapOutputWriter#commitAllPartitions(long[])}, or
* {@link ShuffleMapOutputWriter#abort(Throwable)}.
*/
default Optional<WritableByteChannelWrapper> openChannelWrapper() throws IOException {
return Optional.empty();
}
/**
* Returns the number of bytes written either by this writer's output stream opened by
* {@link #openStream()} or the byte channel opened by {@link #openChannelWrapper()}.
* <p>
* This can be different from the number of bytes given by the caller. For example, the
* stream might compress or encrypt the bytes before persisting the data to the backing
* data store.
*/
long getNumBytesWritten();
}
| ShufflePartitionWriter |
java | apache__flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/abilities/sink/SinkAbilitySpec.java | {
"start": 1257,
"end": 1987
} | interface ____ can not only serialize/deserialize the sink abilities to/from JSON, but also
* can apply the abilities to a {@link DynamicTableSink}.
*/
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "type")
@JsonSubTypes({
@JsonSubTypes.Type(value = BucketingSpec.class),
@JsonSubTypes.Type(value = OverwriteSpec.class),
@JsonSubTypes.Type(value = PartitioningSpec.class),
@JsonSubTypes.Type(value = WritingMetadataSpec.class),
@JsonSubTypes.Type(value = RowLevelDeleteSpec.class),
@JsonSubTypes.Type(value = RowLevelUpdateSpec.class),
@JsonSubTypes.Type(value = TargetColumnWritingSpec.class)
})
@Internal
public | that |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/serializer/filters/PropertyFilter_double.java | {
"start": 332,
"end": 3825
} | class ____ extends TestCase {
public void test_0() throws Exception {
PropertyFilter filter = new PropertyFilter() {
public boolean apply(Object source, String name, Object value) {
return false;
}
};
SerializeWriter out = new SerializeWriter();
JSONSerializer serializer = new JSONSerializer(out);
serializer.getPropertyFilters().add(filter);
A a = new A();
serializer.write(a);
String text = out.toString();
Assert.assertEquals("{}", text);
}
public void test_1() throws Exception {
PropertyFilter filter = new PropertyFilter() {
public boolean apply(Object source, String name, Object value) {
if ("id".equals(name)) {
Assert.assertTrue(value instanceof Double);
return true;
}
return false;
}
};
SerializeWriter out = new SerializeWriter();
JSONSerializer serializer = new JSONSerializer(out);
serializer.getPropertyFilters().add(filter);
A a = new A();
serializer.write(a);
String text = out.toString();
Assert.assertEquals("{\"id\":0.0}", text);
}
public void test_2() throws Exception {
PropertyFilter filter = new PropertyFilter() {
public boolean apply(Object source, String name, Object value) {
if ("name".equals(name)) {
return true;
}
return false;
}
};
SerializeWriter out = new SerializeWriter();
JSONSerializer serializer = new JSONSerializer(out);
serializer.getPropertyFilters().add(filter);
A a = new A();
a.setName("chennp2008");
serializer.write(a);
String text = out.toString();
Assert.assertEquals("{\"name\":\"chennp2008\"}", text);
}
public void test_3() throws Exception {
PropertyFilter filter = new PropertyFilter() {
public boolean apply(Object source, String name, Object value) {
if ("name".equals(name)) {
return true;
}
return false;
}
};
SerializeWriter out = new SerializeWriter();
JSONSerializer serializer = new JSONSerializer(out);
serializer.getPropertyFilters().add(filter);
Map<String, Object> map = new HashMap<String, Object>();
map.put("name", "chennp2008");
serializer.write(map);
String text = out.toString();
Assert.assertEquals("{\"name\":\"chennp2008\"}", text);
}
public void test_4() throws Exception {
PropertyFilter filter = new PropertyFilter() {
public boolean apply(Object source, String name, Object value) {
if ("name".equals(name)) {
return false;
}
return true;
}
};
SerializeWriter out = new SerializeWriter();
JSONSerializer serializer = new JSONSerializer(out);
serializer.getPropertyFilters().add(filter);
Map<String, Object> map = new HashMap<String, Object>();
map.put("id", 3);
map.put("name", "chennp2008");
serializer.write(map);
String text = out.toString();
Assert.assertEquals("{\"id\":3}", text);
}
public static | PropertyFilter_double |
java | spring-projects__spring-framework | spring-core/src/main/java/org/springframework/core/type/filter/AspectJTypeFilter.java | {
"start": 1486,
"end": 1619
} | class ____ examined to match with a type pattern.
*
* @author Ramnivas Laddad
* @author Juergen Hoeller
* @since 2.5
*/
public | being |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/expressions/context/DefaultExpressionCompilationContextFactory.java | {
"start": 1406,
"end": 5523
} | class ____ implements ExpressionCompilationContextFactory {
private static final Collection<ClassElement> CONTEXT_TYPES = ConcurrentHashMap.newKeySet();
private ExtensibleExpressionEvaluationContext sharedContext;
private final VisitorContext visitorContext;
public DefaultExpressionCompilationContextFactory(VisitorContext visitorContext) {
this.sharedContext = recreateContext();
this.visitorContext = visitorContext;
}
@NonNull
private DefaultExpressionEvaluationContext recreateContext() {
return new DefaultExpressionEvaluationContext(CONTEXT_TYPES.toArray(ClassElement[]::new));
}
@Override
@NonNull
public ExpressionEvaluationContext buildContextForMethod(@NonNull EvaluatedExpressionReference expression,
@NonNull MethodElement methodElement) {
return buildForExpression(expression, null)
.extendWith(methodElement);
}
@Override
@NonNull
public ExpressionEvaluationContext buildContext(EvaluatedExpressionReference expression, ClassElement thisElement) {
return buildForExpression(expression, thisElement);
}
@NonNull
@Override
public ExpressionCompilationContextFactory registerContextClass(@NonNull ClassElement contextClass) {
CONTEXT_TYPES.add(contextClass);
this.sharedContext = recreateContext();
return this;
}
private ExtensibleExpressionEvaluationContext buildForExpression(EvaluatedExpressionReference expression, ClassElement thisElement) {
String annotationName = expression.annotationName();
String memberName = expression.annotationMember();
ClassElement annotation = visitorContext.getClassElement(annotationName).orElse(null);
ExtensibleExpressionEvaluationContext evaluationContext = sharedContext;
if (annotation != null) {
evaluationContext = addAnnotationEvaluationContext(evaluationContext, annotation);
evaluationContext = addAnnotationMemberEvaluationContext(evaluationContext, annotation, memberName);
}
if (thisElement != null) {
return evaluationContext.withThis(thisElement);
}
return evaluationContext;
}
private ExtensibleExpressionEvaluationContext addAnnotationEvaluationContext(
ExtensibleExpressionEvaluationContext currentEvaluationContext,
ClassElement annotation) {
return annotation.findAnnotation(AnnotationExpressionContext.class)
.flatMap(av -> av.annotationClassValue(AnnotationMetadata.VALUE_MEMBER))
.map(AnnotationClassValue::getName)
.flatMap(visitorContext::getClassElement)
.map(currentEvaluationContext::extendWith)
.orElse(currentEvaluationContext);
}
private ExtensibleExpressionEvaluationContext addAnnotationMemberEvaluationContext(
ExtensibleExpressionEvaluationContext currentEvaluationContext,
ClassElement annotation,
String annotationMember) {
ElementQuery<MethodElement> memberQuery =
ElementQuery.ALL_METHODS
.onlyDeclared()
.annotated(am -> am.hasAnnotation(AnnotationExpressionContext.class))
.named(annotationMember);
return annotation.getEnclosedElements(memberQuery).stream()
.flatMap(element -> Optional.ofNullable(element.getDeclaredAnnotation(AnnotationExpressionContext.class)).stream())
.flatMap(av -> av.annotationClassValue(AnnotationMetadata.VALUE_MEMBER).stream())
.map(AnnotationClassValue::getName)
.flatMap(className -> visitorContext.getClassElement(className).stream())
.reduce(currentEvaluationContext, ExtensibleExpressionEvaluationContext::extendWith, (a, b) -> a);
}
/**
* cleanup any stored contexts.
*/
@Internal
public static void reset() {
CONTEXT_TYPES.clear();
}
}
| DefaultExpressionCompilationContextFactory |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/observable/ObservableMapTest.java | {
"start": 1268,
"end": 13929
} | class ____ extends RxJavaTest {
Observer<String> stringObserver;
Observer<String> stringObserver2;
static final BiFunction<String, Integer, String> APPEND_INDEX = new BiFunction<String, Integer, String>() {
@Override
public String apply(String value, Integer index) {
return value + index;
}
};
@Before
public void before() {
stringObserver = TestHelper.mockObserver();
stringObserver2 = TestHelper.mockObserver();
}
@Test
public void map() {
Map<String, String> m1 = getMap("One");
Map<String, String> m2 = getMap("Two");
Observable<Map<String, String>> o = Observable.just(m1, m2);
Observable<String> m = o.map(new Function<Map<String, String>, String>() {
@Override
public String apply(Map<String, String> map) {
return map.get("firstName");
}
});
m.subscribe(stringObserver);
verify(stringObserver, never()).onError(any(Throwable.class));
verify(stringObserver, times(1)).onNext("OneFirst");
verify(stringObserver, times(1)).onNext("TwoFirst");
verify(stringObserver, times(1)).onComplete();
}
@Test
public void mapMany() {
/* simulate a top-level async call which returns IDs */
Observable<Integer> ids = Observable.just(1, 2);
/* now simulate the behavior to take those IDs and perform nested async calls based on them */
Observable<String> m = ids.flatMap(new Function<Integer, Observable<String>>() {
@Override
public Observable<String> apply(Integer id) {
/* simulate making a nested async call which creates another Observable */
Observable<Map<String, String>> subObservable = null;
if (id == 1) {
Map<String, String> m1 = getMap("One");
Map<String, String> m2 = getMap("Two");
subObservable = Observable.just(m1, m2);
} else {
Map<String, String> m3 = getMap("Three");
Map<String, String> m4 = getMap("Four");
subObservable = Observable.just(m3, m4);
}
/* simulate kicking off the async call and performing a select on it to transform the data */
return subObservable.map(new Function<Map<String, String>, String>() {
@Override
public String apply(Map<String, String> map) {
return map.get("firstName");
}
});
}
});
m.subscribe(stringObserver);
verify(stringObserver, never()).onError(any(Throwable.class));
verify(stringObserver, times(1)).onNext("OneFirst");
verify(stringObserver, times(1)).onNext("TwoFirst");
verify(stringObserver, times(1)).onNext("ThreeFirst");
verify(stringObserver, times(1)).onNext("FourFirst");
verify(stringObserver, times(1)).onComplete();
}
@Test
public void mapMany2() {
Map<String, String> m1 = getMap("One");
Map<String, String> m2 = getMap("Two");
Observable<Map<String, String>> observable1 = Observable.just(m1, m2);
Map<String, String> m3 = getMap("Three");
Map<String, String> m4 = getMap("Four");
Observable<Map<String, String>> observable2 = Observable.just(m3, m4);
Observable<Observable<Map<String, String>>> o = Observable.just(observable1, observable2);
Observable<String> m = o.flatMap(new Function<Observable<Map<String, String>>, Observable<String>>() {
@Override
public Observable<String> apply(Observable<Map<String, String>> o) {
return o.map(new Function<Map<String, String>, String>() {
@Override
public String apply(Map<String, String> map) {
return map.get("firstName");
}
});
}
});
m.subscribe(stringObserver);
verify(stringObserver, never()).onError(any(Throwable.class));
verify(stringObserver, times(1)).onNext("OneFirst");
verify(stringObserver, times(1)).onNext("TwoFirst");
verify(stringObserver, times(1)).onNext("ThreeFirst");
verify(stringObserver, times(1)).onNext("FourFirst");
verify(stringObserver, times(1)).onComplete();
}
@Test
public void mapWithError() {
Observable<String> w = Observable.just("one", "fail", "two", "three", "fail");
Observable<String> m = w.map(new Function<String, String>() {
@Override
public String apply(String s) {
if ("fail".equals(s)) {
throw new RuntimeException("Forced Failure");
}
return s;
}
}).doOnError(new Consumer<Throwable>() {
@Override
public void accept(Throwable t1) {
t1.printStackTrace();
}
});
m.subscribe(stringObserver);
verify(stringObserver, times(1)).onNext("one");
verify(stringObserver, never()).onNext("two");
verify(stringObserver, never()).onNext("three");
verify(stringObserver, never()).onComplete();
verify(stringObserver, times(1)).onError(any(Throwable.class));
}
@Test(expected = IllegalArgumentException.class)
public void mapWithIssue417() {
Observable.just(1).observeOn(Schedulers.computation())
.map(new Function<Integer, Integer>() {
@Override
public Integer apply(Integer arg0) {
throw new IllegalArgumentException("any error");
}
}).blockingSingle();
}
@Test(expected = IllegalArgumentException.class)
public void mapWithErrorInFuncAndThreadPoolScheduler() throws InterruptedException {
// The error will throw in one of threads in the thread pool.
// If map does not handle it, the error will disappear.
// so map needs to handle the error by itself.
Observable<String> m = Observable.just("one")
.observeOn(Schedulers.computation())
.map(new Function<String, String>() {
@Override
public String apply(String arg0) {
throw new IllegalArgumentException("any error");
}
});
// block for response, expecting exception thrown
m.blockingLast();
}
/**
* While mapping over range(1,0).last() we expect NoSuchElementException since the sequence is empty.
*/
@Test
public void errorPassesThruMap() {
assertNull(Observable.range(1, 0).lastElement().map(new Function<Integer, Integer>() {
@Override
public Integer apply(Integer i) {
return i;
}
}).blockingGet());
}
/**
* We expect IllegalStateException to pass thru map.
*/
@Test(expected = IllegalStateException.class)
public void errorPassesThruMap2() {
Observable.error(new IllegalStateException()).map(new Function<Object, Object>() {
@Override
public Object apply(Object i) {
return i;
}
}).blockingSingle();
}
/**
* We expect an ArithmeticException exception here because last() emits a single value
* but then we divide by 0.
*/
@Test(expected = ArithmeticException.class)
public void mapWithErrorInFunc() {
Observable.range(1, 1).lastElement().map(new Function<Integer, Integer>() {
@Override
public Integer apply(Integer i) {
return i / 0;
}
}).blockingGet();
}
// FIXME RS subscribers can't throw
// @Test(expected = OnErrorNotImplementedException.class)
// public void verifyExceptionIsThrownIfThereIsNoExceptionHandler() {
//
// ObservableSource<Object> creator = new ObservableSource<Object>() {
//
// @Override
// public void subscribeActual(Observer<? super Object> observer) {
// observer.onSubscribe(EmptyDisposable.INSTANCE);
// observer.onNext("a");
// observer.onNext("b");
// observer.onNext("c");
// observer.onComplete();
// }
// };
//
// Function<Object, Observable<Object>> manyMapper = new Function<Object, Observable<Object>>() {
//
// @Override
// public Observable<Object> apply(Object object) {
// return Observable.just(object);
// }
// };
//
// Function<Object, Object> mapper = new Function<Object, Object>() {
// private int count = 0;
//
// @Override
// public Object apply(Object object) {
// ++count;
// if (count > 2) {
// throw new RuntimeException();
// }
// return object;
// }
// };
//
// Consumer<Object> onNext = new Consumer<Object>() {
//
// @Override
// public void accept(Object object) {
// System.out.println(object.toString());
// }
// };
//
// try {
// Observable.unsafeCreate(creator).flatMap(manyMapper).map(mapper).subscribe(onNext);
// } catch (RuntimeException e) {
// e.printStackTrace();
// throw e;
// }
// }
private static Map<String, String> getMap(String prefix) {
Map<String, String> m = new HashMap<>();
m.put("firstName", prefix + "First");
m.put("lastName", prefix + "Last");
return m;
}
// FIXME RS subscribers can't throw
// @Test(expected = OnErrorNotImplementedException.class)
// public void testShouldNotSwallowOnErrorNotImplementedException() {
// Observable.just("a", "b").flatMap(new Function<String, Observable<String>>() {
// @Override
// public Observable<String> apply(String s) {
// return Observable.just(s + "1", s + "2");
// }
// }).flatMap(new Function<String, Observable<String>>() {
// @Override
// public Observable<String> apply(String s) {
// return Observable.error(new Exception("test"));
// }
// }).forEach(new Consumer<String>() {
// @Override
// public void accept(String s) {
// System.out.println(s);
// }
// });
// }
@Test
public void dispose() {
TestHelper.checkDisposed(Observable.range(1, 5).map(Functions.identity()));
}
@Test
public void doubleOnSubscribe() {
TestHelper.checkDoubleOnSubscribeObservable(new Function<Observable<Object>, ObservableSource<Object>>() {
@Override
public ObservableSource<Object> apply(Observable<Object> o) throws Exception {
return o.map(Functions.identity());
}
});
}
@Test
public void fusedSync() {
TestObserverEx<Integer> to = new TestObserverEx<>(QueueFuseable.ANY);
Observable.range(1, 5)
.map(Functions.<Integer>identity())
.subscribe(to);
to.assertFusionMode(QueueFuseable.SYNC)
.assertResult(1, 2, 3, 4, 5);
}
@Test
public void fusedAsync() {
TestObserverEx<Integer> to = new TestObserverEx<>(QueueFuseable.ANY);
UnicastSubject<Integer> us = UnicastSubject.create();
us
.map(Functions.<Integer>identity())
.subscribe(to);
TestHelper.emit(us, 1, 2, 3, 4, 5);
to.assertFusionMode(QueueFuseable.ASYNC)
.assertResult(1, 2, 3, 4, 5);
}
@Test
public void fusedReject() {
TestObserverEx<Integer> to = new TestObserverEx<>(QueueFuseable.ANY | QueueFuseable.BOUNDARY);
Observable.range(1, 5)
.map(Functions.<Integer>identity())
.subscribe(to);
to.assertFusionMode(QueueFuseable.NONE)
.assertResult(1, 2, 3, 4, 5);
}
@Test
public void badSource() {
TestHelper.checkBadSourceObservable(new Function<Observable<Object>, Object>() {
@Override
public Object apply(Observable<Object> o) throws Exception {
return o.map(Functions.identity());
}
}, false, 1, 1, 1);
}
}
| ObservableMapTest |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/InstanceOfAssertFactoriesTest.java | {
"start": 17679,
"end": 18357
} | class ____ {
private final Object actual = completedFuture("done");
@Test
void createAssert() {
// WHEN
CompletableFutureAssert<Object> result = COMPLETION_STAGE.createAssert(actual);
// THEN
result.isDone();
}
@Test
void createAssert_with_ValueProvider() {
// GIVEN
ValueProvider<?> valueProvider = mockThatDelegatesTo(type -> actual);
// WHEN
CompletableFutureAssert<Object> result = COMPLETION_STAGE.createAssert(valueProvider);
// THEN
result.isDone();
verify(valueProvider).apply(parameterizedType(CompletionStage.class, Object.class));
}
}
@Nested
| CompletionStage_Factory |
java | apache__camel | components/camel-thrift/src/test/java/org/apache/camel/component/thrift/generated/Calculator.java | {
"start": 107960,
"end": 110099
} | class ____ extends org.apache.thrift.scheme.StandardScheme<add_result> {
@Override
public void read(org.apache.thrift.protocol.TProtocol iprot, add_result struct)
throws org.apache.thrift.TException {
org.apache.thrift.protocol.TField schemeField;
iprot.readStructBegin();
while (true) {
schemeField = iprot.readFieldBegin();
if (schemeField.type == org.apache.thrift.protocol.TType.STOP) {
break;
}
switch (schemeField.id) {
case 0: // SUCCESS
if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
struct.success = iprot.readI32();
struct.setSuccessIsSet(true);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
}
iprot.readFieldEnd();
}
iprot.readStructEnd();
// check for required fields of primitive type, which can't be checked in the validate method
struct.validate();
}
@Override
public void write(org.apache.thrift.protocol.TProtocol oprot, add_result struct)
throws org.apache.thrift.TException {
struct.validate();
oprot.writeStructBegin(STRUCT_DESC);
if (struct.isSetSuccess()) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
oprot.writeI32(struct.success);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
}
}
private static | add_resultStandardScheme |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/source/presencecheck/spi/CollectionPresenceMapper.java | {
"start": 352,
"end": 520
} | interface ____ {
CollectionPresenceMapper INSTANCE = Mappers.getMapper( CollectionPresenceMapper.class );
Target map(Source source);
| CollectionPresenceMapper |
java | spring-projects__spring-security | saml2/saml2-service-provider/src/main/java/org/springframework/security/saml2/jackson2/Saml2AuthenticationExceptionMixin.java | {
"start": 1142,
"end": 1863
} | class ____ used to serialize/deserialize {@link Saml2AuthenticationException}.
*
* @author Ulrich Grave
* @since 5.7
* @see Saml2AuthenticationException
* @see Saml2Jackson2Module
* @deprecated as of 7.0 in favor of
* {@code org.springframework.security.saml2.jackson.Saml2AuthenticationExceptionMixin}
* based on Jackson 3
*/
@SuppressWarnings("removal")
@Deprecated(forRemoval = true)
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS)
@JsonAutoDetect(fieldVisibility = JsonAutoDetect.Visibility.NONE, getterVisibility = JsonAutoDetect.Visibility.NONE,
isGetterVisibility = JsonAutoDetect.Visibility.NONE)
@JsonIgnoreProperties(ignoreUnknown = true, value = { "cause", "stackTrace", "suppressedExceptions" })
abstract | is |
java | google__truth | core/src/test/java/com/google/common/truth/GuavaOptionalSubjectTest.java | {
"start": 1112,
"end": 3195
} | class ____ {
@Test
public void isPresent() {
assertThat(Optional.of("foo")).isPresent();
}
@Test
public void isPresentFailing() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(Optional.absent()).isPresent());
assertFailureKeys(e, "expected to be present");
}
@Test
public void isPresentFailingNull() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that((Optional<?>) null).isPresent());
assertFailureKeys(e, "expected present optional", "but was");
}
@Test
public void isAbsent() {
assertThat(Optional.absent()).isAbsent();
}
@Test
public void isAbsentFailing() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(Optional.of("foo")).isAbsent());
assertFailureKeys(e, "expected to be absent", "but was present with value");
assertFailureValue(e, "but was present with value", "foo");
}
@Test
public void isAbsentFailingNull() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that((Optional<?>) null).isAbsent());
assertFailureKeys(e, "expected absent optional", "but was");
}
@Test
public void hasValue() {
assertThat(Optional.of("foo")).hasValue("foo");
}
@Test
public void hasValue_failingWithAbsent() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(Optional.absent()).hasValue("foo"));
assertFailureKeys(e, "expected to have value", "but was absent");
assertFailureValue(e, "expected to have value", "foo");
}
@Test
public void hasValue_failingWithNullParameter() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(Optional.of("foo")).hasValue(null));
assertFailureKeys(e, "expected an optional with a null value, but that is impossible", "was");
}
@Test
public void hasValue_failingWithWrongValue() {
AssertionError e =
expectFailure(whenTesting -> whenTesting.that(Optional.of("foo")).hasValue("boo"));
assertFailureValue(e, "value of", "optional.get()");
}
}
| GuavaOptionalSubjectTest |
java | google__dagger | javatests/dagger/internal/codegen/ModuleFactoryGeneratorTest.java | {
"start": 53549,
"end": 53786
} | interface ____ {}");
private static final Source SCOPE_B =
CompilerTests.javaSource(
"test.ScopeB",
"package test;",
"",
"import javax.inject.Scope;",
"",
"@Scope @ | ScopeA |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/bug/Bug_for_kongmu.java | {
"start": 117,
"end": 649
} | class ____ extends TestCase {
public void test_for_bug() throws Exception {
String JSON_STRING = "{\n"
+ "\t\"body\":\"parentBody\",\n"
+ "\t\"name\":\"child-1\",\n"
+ "\t\"result\":{\n"
+ "\t\t\"code\":11\n"
+ "\t},\n"
+ "\t\"toy\":{\n"
+ "\t\t\"type\":\"toytype\"\n"
+ "\t}\n"
+ "}";
JSON.parseObject(JSON_STRING, Child.class);
}
public static | Bug_for_kongmu |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/util/DateUtil_toCalendar_Test.java | {
"start": 1067,
"end": 1665
} | class ____ {
@Test
void should_convert_date_to_calendar() throws ParseException {
String dateAsString = "26/08/1994";
Date date = new SimpleDateFormat("dd/MM/yyyy").parse(dateAsString);
Calendar calendar = new GregorianCalendar();
// clear all fields to have a Date without time (no hours, minutes...).
calendar.clear();
calendar.set(1994, 7, 26); // month is 0 value based.
assertThat(toCalendar(date)).isEqualTo(calendar);
}
@Test
void should_return_null_if_date_to_convert_is_null() {
assertThat(toCalendar(null)).isNull();
}
}
| DateUtil_toCalendar_Test |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/SearchFeatures.java | {
"start": 644,
"end": 2707
} | class ____ implements FeatureSpecification {
public static final NodeFeature LUCENE_10_0_0_UPGRADE = new NodeFeature("lucene_10_upgrade");
public static final NodeFeature LUCENE_10_1_0_UPGRADE = new NodeFeature("lucene_10_1_upgrade");
@Override
public Set<NodeFeature> getFeatures() {
return Set.of(LUCENE_10_0_0_UPGRADE, LUCENE_10_1_0_UPGRADE);
}
public static final NodeFeature RETRIEVER_RESCORER_ENABLED = new NodeFeature("search.retriever.rescorer.enabled");
public static final NodeFeature COMPLETION_FIELD_SUPPORTS_DUPLICATE_SUGGESTIONS = new NodeFeature(
"search.completion_field.duplicate.support"
);
public static final NodeFeature RESCORER_MISSING_FIELD_BAD_REQUEST = new NodeFeature("search.rescorer.missing.field.bad.request");
public static final NodeFeature INT_SORT_FOR_INT_SHORT_BYTE_FIELDS = new NodeFeature("search.sort.int_sort_for_int_short_byte_fields");
static final NodeFeature MULTI_MATCH_CHECKS_POSITIONS = new NodeFeature("search.multi.match.checks.positions");
public static final NodeFeature BBQ_HNSW_DEFAULT_INDEXING = new NodeFeature("search.vectors.mappers.default_bbq_hnsw");
public static final NodeFeature SEARCH_WITH_NO_DIMENSIONS_BUGFIX = new NodeFeature("search.vectors.no_dimensions_bugfix");
public static final NodeFeature SEARCH_RESCORE_SCRIPT = new NodeFeature("search.rescore.script");
public static final NodeFeature NEGATIVE_FUNCTION_SCORE_BAD_REQUEST = new NodeFeature("search.negative.function.score.bad.request");
@Override
public Set<NodeFeature> getTestFeatures() {
return Set.of(
RETRIEVER_RESCORER_ENABLED,
COMPLETION_FIELD_SUPPORTS_DUPLICATE_SUGGESTIONS,
RESCORER_MISSING_FIELD_BAD_REQUEST,
INT_SORT_FOR_INT_SHORT_BYTE_FIELDS,
MULTI_MATCH_CHECKS_POSITIONS,
BBQ_HNSW_DEFAULT_INDEXING,
SEARCH_WITH_NO_DIMENSIONS_BUGFIX,
SEARCH_RESCORE_SCRIPT,
NEGATIVE_FUNCTION_SCORE_BAD_REQUEST
);
}
}
| SearchFeatures |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/persister/collection/mutation/RemoveCoordinatorTablePerSubclass.java | {
"start": 922,
"end": 4690
} | class ____ implements RemoveCoordinator {
private final OneToManyPersister mutationTarget;
private final OperationProducer operationProducer;
private final MutationExecutorService mutationExecutorService;
private MutationOperationGroup[] operationGroups;
/**
* Creates the coordinator.
*
* @implNote We pass a Supplier here and lazily create the operation-group because
* of timing (chicken-egg) back on the persister.
*/
public RemoveCoordinatorTablePerSubclass(
OneToManyPersister mutationTarget,
OperationProducer operationProducer,
ServiceRegistry serviceRegistry) {
this.mutationTarget = mutationTarget;
this.operationProducer = operationProducer;
mutationExecutorService = serviceRegistry.getService( MutationExecutorService.class );
}
@Override
public String toString() {
return "RemoveCoordinator(" + mutationTarget.getRolePath() + ")";
}
@Override
public CollectionMutationTarget getMutationTarget() {
return mutationTarget;
}
@Override
public String getSqlString() {
throw new UnsupportedOperationException();
}
@Override
public void deleteAllRows(Object key, SharedSessionContractImplementor session) {
if ( MODEL_MUTATION_LOGGER.isTraceEnabled() ) {
MODEL_MUTATION_LOGGER.removingCollection( mutationTarget.getRolePath(), key );
}
var operationGroups = this.operationGroups;
if ( operationGroups == null ) {
// delayed creation of the operation-group
operationGroups = this.operationGroups = buildOperationGroups();
}
final var foreignKeyDescriptor = mutationTarget.getTargetPart().getKeyDescriptor();
for ( var operationGroup : operationGroups ) {
final var mutationExecutor = mutationExecutorService.createExecutor(
() -> null,
operationGroup,
session
);
try {
foreignKeyDescriptor.getKeyPart().decompose(
key,
0,
mutationExecutor.getJdbcValueBindings(),
null,
RowMutationOperations.DEFAULT_RESTRICTOR,
session
);
mutationExecutor.execute(
key,
null,
null,
null,
session
);
}
finally {
mutationExecutor.release();
}
}
}
private MutationOperationGroup[] buildOperationGroups() {
final var subMappingTypes =
mutationTarget.getElementPersister()
.getRootEntityDescriptor()
.getSubMappingTypes();
final var operationGroups = new MutationOperationGroup[subMappingTypes.size()];
int i = 0;
for ( var subMappingType : subMappingTypes ) {
operationGroups[i++] = buildOperationGroup( subMappingType.getEntityPersister() );
}
return operationGroups;
}
private MutationOperationGroup buildOperationGroup(EntityPersister elementPersister) {
assert mutationTarget.getTargetPart() != null
&& mutationTarget.getTargetPart().getKeyDescriptor() != null;
// if ( MODEL_MUTATION_LOGGER.isTraceEnabled() ) {
// MODEL_MUTATION_LOGGER.tracef( "Starting RemoveCoordinator#buildOperationGroup - %s",
// mutationTarget.getRolePath() );
// }
final var collectionTableMapping = mutationTarget.getCollectionTableMapping();
final var tableReference = new MutatingTableReference(
new CollectionTableMapping(
elementPersister.getMappedTableDetails().getTableName(),
collectionTableMapping.getSpaces(),
collectionTableMapping.isJoinTable(),
collectionTableMapping.isInverse(),
collectionTableMapping.getInsertDetails(),
collectionTableMapping.getUpdateDetails(),
collectionTableMapping.isCascadeDeleteEnabled(),
collectionTableMapping.getDeleteDetails(),
collectionTableMapping.getDeleteRowDetails()
)
);
return singleOperation( MutationType.DELETE, mutationTarget,
operationProducer.createOperation( tableReference ) );
}
}
| RemoveCoordinatorTablePerSubclass |
java | junit-team__junit5 | junit-platform-commons/src/main/java/org/junit/platform/commons/support/AnnotationSupport.java | {
"start": 16350,
"end": 16597
} | interface ____ are annotated or <em>meta-annotated</em> with the specified
* {@code annotationType} and match the specified {@code predicate}, using
* top-down search semantics within the type hierarchy.
*
* <p>Fields declared in the same | that |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KeyVaultEndpointBuilderFactory.java | {
"start": 8902,
"end": 11003
} | interface ____ {
/**
* Azure Key Vault (camel-azure-key-vault)
* Manage secrets and keys in Azure Key Vault Service
*
* Category: cloud,cloud
* Since: 3.17
* Maven coordinates: org.apache.camel:camel-azure-key-vault
*
* @return the dsl builder for the headers' name.
*/
default KeyVaultHeaderNameBuilder azureKeyVault() {
return KeyVaultHeaderNameBuilder.INSTANCE;
}
/**
* Azure Key Vault (camel-azure-key-vault)
* Manage secrets and keys in Azure Key Vault Service
*
* Category: cloud,cloud
* Since: 3.17
* Maven coordinates: org.apache.camel:camel-azure-key-vault
*
* Syntax: <code>azure-key-vault:vaultName</code>
*
* Path parameter: vaultName
* Vault Name to be used
*
* @param path vaultName
* @return the dsl builder
*/
default KeyVaultEndpointBuilder azureKeyVault(String path) {
return KeyVaultEndpointBuilderFactory.endpointBuilder("azure-key-vault", path);
}
/**
* Azure Key Vault (camel-azure-key-vault)
* Manage secrets and keys in Azure Key Vault Service
*
* Category: cloud,cloud
* Since: 3.17
* Maven coordinates: org.apache.camel:camel-azure-key-vault
*
* Syntax: <code>azure-key-vault:vaultName</code>
*
* Path parameter: vaultName
* Vault Name to be used
*
* @param componentName to use a custom component name for the endpoint
* instead of the default name
* @param path vaultName
* @return the dsl builder
*/
default KeyVaultEndpointBuilder azureKeyVault(String componentName, String path) {
return KeyVaultEndpointBuilderFactory.endpointBuilder(componentName, path);
}
}
/**
* The builder of headers' name for the Azure Key Vault component.
*/
public static | KeyVaultBuilders |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/MonoReduce.java | {
"start": 1786,
"end": 5666
} | class ____<T> implements InnerOperator<T, T>,
Fuseable,
QueueSubscription<T> {
static final Object CANCELLED = new Object();
final BiFunction<T, T, T> aggregator;
final CoreSubscriber<? super T> actual;
@Nullable T aggregate;
@SuppressWarnings("NotNullFieldNotInitialized") // s initialized in onSubscribe
Subscription s;
boolean done;
ReduceSubscriber(CoreSubscriber<? super T> actual,
BiFunction<T, T, T> aggregator) {
this.actual = actual;
this.aggregator = aggregator;
}
@Override
public CoreSubscriber<? super T> actual() {
return this.actual;
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.TERMINATED) return done;
if (key == Attr.CANCELLED) return !done && aggregate == CANCELLED;
if (key == Attr.PREFETCH) return 0;
if (key == Attr.PARENT) return s;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return InnerOperator.super.scanUnsafe(key);
}
@Override
public void onSubscribe(Subscription s) {
if (Operators.validate(this.s, s)) {
this.s = s;
actual.onSubscribe(this);
}
}
@Override
public void onNext(T t) {
if (done) {
Operators.onNextDropped(t, actual.currentContext());
return;
}
final T r = this.aggregate;
if (r == CANCELLED) {
Operators.onDiscard(t, actual.currentContext());
return;
}
// initial scenario when aggregate has nothing in it
if (r == null) {
synchronized (this) {
if (this.aggregate == null) {
this.aggregate = t;
return;
}
}
Operators.onDiscard(t, actual.currentContext());
}
else {
try {
synchronized (this) {
if (this.aggregate != CANCELLED) {
this.aggregate = Objects.requireNonNull(aggregator.apply(r, t), "The aggregator returned a null value");
return;
}
}
Operators.onDiscard(t, actual.currentContext());
}
catch (Throwable ex) {
done = true;
Context ctx = actual.currentContext();
synchronized (this) {
this.aggregate = null;
}
Operators.onDiscard(t, ctx);
Operators.onDiscard(r, ctx);
actual.onError(Operators.onOperatorError(s, ex, t,
actual.currentContext()));
}
}
}
@Override
public void onError(Throwable t) {
if (done) {
Operators.onErrorDropped(t, actual.currentContext());
return;
}
done = true;
final T r;
synchronized (this) {
r = this.aggregate;
this.aggregate = null;
}
if (r == CANCELLED) {
Operators.onErrorDropped(t, actual.currentContext());
return;
}
if (r != null) {
Operators.onDiscard(r, actual.currentContext());
}
actual.onError(t);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
final T r;
synchronized (this) {
r = this.aggregate;
this.aggregate = null;
}
if (r == CANCELLED) {
return;
}
if (r == null) {
actual.onComplete();
}
else {
actual.onNext(r);
actual.onComplete();
}
}
@Override
public void cancel() {
s.cancel();
final T r;
synchronized (this) {
r = this.aggregate;
@SuppressWarnings("unchecked")
T cancelled = (T) CANCELLED;
this.aggregate = cancelled;
}
if (r == null || r == CANCELLED) {
return;
}
Operators.onDiscard(r, actual.currentContext());
}
@Override
public void request(long n) {
s.request(Long.MAX_VALUE);
}
@Override
public @Nullable T poll() {
return null;
}
@Override
public int requestFusion(int requestedMode) {
return Fuseable.NONE;
}
@Override
public int size() {
return 0;
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public void clear() {
}
}
}
| ReduceSubscriber |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/archive/spi/ArchiveEntry.java | {
"start": 217,
"end": 706
} | interface ____ {
/**
* Get the entry's name
*
* @return The name
*/
String getName();
/**
* Get the relative name of the entry within the archive. Typically what we are looking for here is
* the ClassLoader resource lookup name.
*
* @return The name relative to the archive root
*/
String getNameWithinArchive();
/**
* Get access to the stream for the entry
*
* @return Obtain stream access to the entry
*/
InputStreamAccess getStreamAccess();
}
| ArchiveEntry |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/ext/ExternalTypeIdTest.java | {
"start": 20997,
"end": 21628
} | class ____ implements Fruit3008 {
public String name;
public String color;
public Orange(@JsonProperty("name") String name, @JsonProperty("name") String color) {
this.name = name;
this.color = color;
}
}
// for [databind#3008]
@Test
public void testIssue3008() throws Exception
{
ObjectReader r = MAPPER.readerFor(Box3008.class);
Box3008 deserOrangeBox = r.readValue("{\"type\":null,\"fruit\":null}");
assertNull(deserOrangeBox.fruit);
assertNull(deserOrangeBox.type); // error: "expected null, but was:<null>"
}
}
| Orange |
java | quarkusio__quarkus | extensions/resteasy-classic/rest-client-config/runtime/src/main/java/io/quarkus/restclient/config/RestClientsConfig.java | {
"start": 22840,
"end": 28053
} | class ____ have a public no-argument constructor.
*/
Optional<String> hostnameVerifier();
/**
* The name of the TLS configuration to use.
* <p>
* If a name is configured, it uses the configuration from {@code quarkus.tls.<name>.*}
* If a name is configured, but no TLS configuration is found with that name then an error will be thrown.
* The default TLS configuration will be ignored.
* <p>
* If no named TLS configuration is set, then the key-store, trust-store, etc. properties will be used.
* <p>
* This property is not applicable to the RESTEasy Client.
*/
Optional<String> tlsConfigurationName();
/**
* The time in ms for which a connection remains unused in the connection pool before being evicted and closed.
* A timeout of {@code 0} means there is no timeout.
*/
OptionalInt connectionTTL();
/**
* The size of the connection pool for this client.
*/
@ConfigDocDefault("50")
OptionalInt connectionPoolSize();
/**
* If set to false disables the keep alive completely.
*/
Optional<Boolean> keepAliveEnabled();
/**
* The maximum number of redirection a request can follow.
* <p>
* This property is not applicable to the RESTEasy Client.
*/
OptionalInt maxRedirects();
/**
* The HTTP headers that should be applied to all requests of the rest client.
* <p>
* This property is not applicable to the RESTEasy Client.
*/
@ConfigDocMapKey("header-name")
Map<String, String> headers();
/**
* Set to true to share the HTTP client between REST clients.
* There can be multiple shared clients distinguished by <em>name</em>, when no specific name is set,
* the name <code>__vertx.DEFAULT</code> is used.
* <p>
* This property is not applicable to the RESTEasy Client.
*/
Optional<Boolean> shared();
/**
* Set the HTTP client name, used when the client is shared, otherwise ignored.
* <p>
* This property is not applicable to the RESTEasy Client.
*/
Optional<String> name();
/**
* Configure the HTTP user-agent header to use.
* <p>
* This property is not applicable to the RESTEasy Client.
*/
Optional<String> userAgent();
/**
* If this is true then HTTP/2 will be enabled.
*/
Optional<Boolean> http2();
/**
* Configures the HTTP/2 upgrade maximum length of the aggregated content in bytes.
* <p>
* This property is not applicable to the RESTEasy Client.
*/
@ConfigDocDefault("64K")
Optional<MemorySize> http2UpgradeMaxContentLength();
/**
* Configures two different things:
* <ul>
* <li>The max HTTP chunk size, up to {@code Integer.MAX_VALUE} bytes.</li>
* <li>The size of the chunk to be read when an {@link InputStream} is being read and sent to the server</li>
* </ul>
* <p>
* This property is not applicable to the RESTEasy Client.
*/
@ConfigDocDefault("8K")
Optional<MemorySize> maxChunkSize();
/**
* Supports receiving compressed messages using GZIP.
* When this feature is enabled and a server returns a response that includes the header {@code Content-Encoding: gzip},
* REST Client will automatically decode the content and proceed with the message handling.
* <p>
* This property is not applicable to the RESTEasy Client.
*/
Optional<Boolean> enableResponseDecompression();
/**
* If the Application-Layer Protocol Negotiation is enabled, the client will negotiate which protocol to use over the
* protocols exposed by the server. By default, it will try to use HTTP/2 first and if it's not enabled, it will
* use HTTP/1.1.
* When the property `http2` is enabled, this flag will be automatically enabled.
*/
Optional<Boolean> alpn();
/**
* If {@code true}, the stacktrace of the invocation of the REST Client method is captured.
* This stacktrace will be used if the invocation throws an exception
*/
Optional<Boolean> captureStacktrace();
/**
* If set to {@code true}, then this REST Client will not the default exception mapper which
* always throws an exception if HTTP response code >= 400.
* <p>
* This property is only taken into account if the REST Client returns {@code jakarta.ws.rs.core.Response} or
* {@code org.jboss.resteasy.reactive.RestResponse}
* <p>
* This property is not applicable to the RESTEasy Client.
*/
@WithDefault("false")
Boolean disableDefaultMapper();
/**
* Logging configuration.
*/
Optional<RestClientLoggingConfig> logging();
}
}
| must |
java | apache__kafka | clients/src/main/java/org/apache/kafka/clients/admin/internals/AlterShareGroupOffsetsHandler.java | {
"start": 1845,
"end": 8209
} | class ____ extends AdminApiHandler.Batched<CoordinatorKey, Map<TopicPartition, ApiException>> {
private final CoordinatorKey groupId;
private final Logger log;
private final Map<TopicPartition, Long> offsets;
private final CoordinatorStrategy lookupStrategy;
public AlterShareGroupOffsetsHandler(String groupId, Map<TopicPartition, Long> offsets, LogContext logContext) {
this.groupId = CoordinatorKey.byGroupId(groupId);
this.offsets = offsets;
this.log = logContext.logger(AlterShareGroupOffsetsHandler.class);
this.lookupStrategy = new CoordinatorStrategy(FindCoordinatorRequest.CoordinatorType.GROUP, logContext);
}
public static AdminApiFuture.SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, ApiException>> newFuture(String groupId) {
return AdminApiFuture.forKeys(Set.of(CoordinatorKey.byGroupId(groupId)));
}
private void validateKeys(Set<CoordinatorKey> groupIds) {
if (!groupIds.equals(Set.of(groupId))) {
throw new IllegalArgumentException("Received unexpected group ids " + groupIds +
" (expected only " + Set.of(groupId) + ")");
}
}
@Override
AlterShareGroupOffsetsRequest.Builder buildBatchedRequest(int brokerId, Set<CoordinatorKey> groupIds) {
var data = new AlterShareGroupOffsetsRequestData().setGroupId(groupId.idValue);
offsets.forEach((tp, offset) -> {
var topic = data.topics().find(tp.topic());
if (topic == null) {
topic = new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopic()
.setTopicName(tp.topic());
data.topics().add(topic);
}
topic.partitions().add(new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition()
.setPartitionIndex(tp.partition())
.setStartOffset(offset));
});
return new AlterShareGroupOffsetsRequest.Builder(data);
}
@Override
public String apiName() {
return "alterShareGroupOffsets";
}
@Override
public ApiResult<CoordinatorKey, Map<TopicPartition, ApiException>> handleResponse(Node broker, Set<CoordinatorKey> keys, AbstractResponse abstractResponse) {
validateKeys(keys);
AlterShareGroupOffsetsResponse response = (AlterShareGroupOffsetsResponse) abstractResponse;
final Set<CoordinatorKey> groupsToUnmap = new HashSet<>();
final Set<CoordinatorKey> groupsToRetry = new HashSet<>();
final Map<TopicPartition, ApiException> partitionResults = new HashMap<>();
if (response.data().errorCode() != Errors.NONE.code()) {
final Errors topLevelError = Errors.forCode(response.data().errorCode());
final String topLevelErrorMessage = response.data().errorMessage();
offsets.forEach((topicPartition, offset) ->
handleError(
groupId,
topicPartition,
topLevelError,
topLevelErrorMessage,
partitionResults,
groupsToUnmap,
groupsToRetry
));
} else {
response.data().responses().forEach(topic -> topic.partitions().forEach(partition -> {
final Errors partitionError = Errors.forCode(partition.errorCode());
if (partitionError != Errors.NONE) {
String errorMessageToLog = partition.errorMessage() == null ? "" : partition.errorMessage();
log.debug("AlterShareGroupOffsets request for group id {} and topic-partition {}-{} failed and returned error {}. {}",
groupId.idValue, topic.topicName(), partition.partitionIndex(), partitionError.name(), errorMessageToLog);
}
partitionResults.put(new TopicPartition(topic.topicName(), partition.partitionIndex()), partitionError.exception(partition.errorMessage()));
}));
}
if (groupsToUnmap.isEmpty() && groupsToRetry.isEmpty()) {
return ApiResult.completed(groupId, partitionResults);
} else {
return ApiResult.unmapped(new ArrayList<>(groupsToUnmap));
}
}
private void handleError(
CoordinatorKey groupId,
TopicPartition topicPartition,
Errors error,
String errorMessage,
Map<TopicPartition, ApiException> partitionResults,
Set<CoordinatorKey> groupsToUnmap,
Set<CoordinatorKey> groupsToRetry
) {
String errorMessageToLog = errorMessage == null ? "" : errorMessage;
switch (error) {
case COORDINATOR_LOAD_IN_PROGRESS:
case REBALANCE_IN_PROGRESS:
log.debug("AlterShareGroupOffsets request for group id {} returned error {}. Will retry. {}",
groupId.idValue, error, errorMessageToLog);
groupsToRetry.add(groupId);
break;
case COORDINATOR_NOT_AVAILABLE:
case NOT_COORDINATOR:
log.debug("AlterShareGroupOffsets request for group id {} returned error {}. Will rediscover the coordinator and retry. {}",
groupId.idValue, error, errorMessageToLog);
groupsToUnmap.add(groupId);
break;
case GROUP_ID_NOT_FOUND:
case NON_EMPTY_GROUP:
case INVALID_REQUEST:
case UNKNOWN_SERVER_ERROR:
case KAFKA_STORAGE_ERROR:
case GROUP_AUTHORIZATION_FAILED:
log.debug("AlterShareGroupOffsets request for group id {} failed due to error {}. {}",
groupId.idValue, error, errorMessageToLog);
partitionResults.put(topicPartition, error.exception(errorMessage));
break;
default:
log.error("AlterShareGroupOffsets request for group id {} failed due to unexpected error {}. {}",
groupId.idValue, error, errorMessageToLog);
partitionResults.put(topicPartition, error.exception(errorMessage));
}
}
@Override
public AdminApiLookupStrategy<CoordinatorKey> lookupStrategy() {
return lookupStrategy;
}
}
| AlterShareGroupOffsetsHandler |
java | grpc__grpc-java | benchmarks/src/main/java/io/grpc/benchmarks/qps/ClientConfiguration.java | {
"start": 1288,
"end": 2656
} | class ____ implements Configuration {
private static final ClientConfiguration DEFAULT = new ClientConfiguration();
Transport transport = Transport.NETTY_NIO;
boolean tls;
boolean testca;
String authorityOverride = TestUtils.TEST_SERVER_HOST;
boolean useDefaultCiphers;
boolean directExecutor;
String target;
int channels = 4;
int outstandingRpcsPerChannel = 10;
int serverPayload;
int clientPayload;
int flowControlWindow = Utils.DEFAULT_FLOW_CONTROL_WINDOW;
// seconds
int duration = 60;
// seconds
int warmupDuration = 10;
int targetQps;
String histogramFile;
RpcType rpcType = RpcType.UNARY;
PayloadType payloadType = PayloadType.COMPRESSABLE;
private ClientConfiguration() {
}
public ManagedChannel newChannel() throws IOException {
return Utils.newClientChannel(transport, target, tls, testca, authorityOverride,
flowControlWindow, directExecutor);
}
public Messages.SimpleRequest newRequest() {
return Utils.makeRequest(payloadType, clientPayload, serverPayload);
}
/**
* Constructs a builder for configuring a client application with supported parameters. If no
* parameters are provided, all parameters are assumed to be supported.
*/
static Builder newBuilder(ClientParam... supportedParams) {
return new Builder(supportedParams);
}
static final | ClientConfiguration |
java | quarkusio__quarkus | extensions/tls-registry/runtime/src/main/java/io/quarkus/tls/runtime/config/JKSKeyStoreConfig.java | {
"start": 167,
"end": 827
} | interface ____ {
/**
* Path to the keystore file (JKS format).
*/
Path path();
/**
* Password of the key store.
* When not set, the password must be retrieved from the credential provider.
*/
Optional<String> password();
/**
* Alias of the private key and certificate in the key store.
*/
Optional<String> alias();
/**
* Password of the alias in the key store.
* When not set, the password may be retrieved from the credential provider.
*/
Optional<String> aliasPassword();
/**
* Provider of the key store.
*/
Optional<String> provider();
}
| JKSKeyStoreConfig |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/values/ValuesInputFormat.java | {
"start": 1859,
"end": 3892
} | class ____ extends GenericInputFormat<RowData>
implements NonParallelInput, ResultTypeQueryable<RowData>, LineageVertexProvider {
private static final String LINEAGE_NAMESPACE = "values://ValuesInputFormat";
private static final Logger LOG = LoggerFactory.getLogger(ValuesInputFormat.class);
private static final long serialVersionUID = 1L;
private GeneratedInput<GenericInputFormat<RowData>> generatedInput;
private final InternalTypeInfo<RowData> returnType;
private GenericInputFormat<RowData> format;
public ValuesInputFormat(
GeneratedInput<GenericInputFormat<RowData>> generatedInput,
InternalTypeInfo<RowData> returnType) {
this.generatedInput = generatedInput;
this.returnType = returnType;
}
@Override
public void open(GenericInputSplit split) {
LOG.debug(
"Compiling GenericInputFormat: {} \n\n Code:\n{}",
generatedInput.getClassName(),
generatedInput.getCode());
LOG.debug("Instantiating GenericInputFormat.");
format = generatedInput.newInstance(getRuntimeContext().getUserCodeClassLoader());
generatedInput = null;
}
@Override
public boolean reachedEnd() throws IOException {
return format.reachedEnd();
}
@Override
public RowData nextRecord(RowData reuse) throws IOException {
return format.nextRecord(reuse);
}
@Override
public InternalTypeInfo<RowData> getProducedType() {
return returnType;
}
@Override
public LineageVertex getLineageVertex() {
return new SourceLineageVertex() {
@Override
public Boundedness boundedness() {
return Boundedness.BOUNDED;
}
@Override
public List<LineageDataset> datasets() {
return Arrays.asList(
new DefaultLineageDataset("", LINEAGE_NAMESPACE, new HashMap<>()));
}
};
}
}
| ValuesInputFormat |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/oauth2/server/authorization/OidcProviderConfigurationTests.java | {
"start": 19612,
"end": 19942
} | class ____ extends AuthorizationServerConfiguration {
@Bean
AuthorizationServerSettings authorizationServerSettings() {
return AuthorizationServerSettings.builder().issuer(ISSUER + "?").build();
}
}
@EnableWebSecurity
@Configuration(proxyBeanMethods = false)
static | AuthorizationServerConfigurationWithIssuerEmptyQuery |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/resource/beans/container/internal/ContainerManagedLifecycleStrategy.java | {
"start": 4451,
"end": 4653
} | class ____ known to CDI : " + beanType.getName(), e );
}
}
@Override
protected B produceFallbackInstance() {
return fallbackProducer.produceBeanInstance( beanType );
}
}
private static | not |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java | {
"start": 11651,
"end": 11896
} | class ____ extends Plugin implements IngestPlugin {
@Override
public Map<String, Processor.Factory> getProcessors(Processor.Parameters parameters) {
return null;
}
}
| BazIngestPlugin |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/record/ControlRecordUtilsTest.java | {
"start": 1508,
"end": 3986
} | class ____ {
@Test
public void testCurrentVersions() {
// If any of these asserts fail, please make sure that Kafka supports reading and
// writing the latest version for these records.
assertEquals(
(short) 0,
ControlRecordUtils.LEADER_CHANGE_CURRENT_VERSION
);
assertEquals(
SnapshotHeaderRecord.HIGHEST_SUPPORTED_VERSION,
ControlRecordUtils.SNAPSHOT_HEADER_CURRENT_VERSION
);
assertEquals(
SnapshotFooterRecord.HIGHEST_SUPPORTED_VERSION,
ControlRecordUtils.SNAPSHOT_FOOTER_CURRENT_VERSION
);
assertEquals(
KRaftVersionRecord.HIGHEST_SUPPORTED_VERSION,
ControlRecordUtils.KRAFT_VERSION_CURRENT_VERSION
);
assertEquals(
VotersRecord.HIGHEST_SUPPORTED_VERSION,
ControlRecordUtils.KRAFT_VOTERS_CURRENT_VERSION
);
}
@Test
public void testInvalidControlRecordType() {
IllegalArgumentException thrown = assertThrows(
IllegalArgumentException.class,
() -> testDeserializeRecord(ControlRecordType.COMMIT)
);
assertEquals(
"Expected KRAFT_VOTERS control record type(6), but found COMMIT",
thrown.getMessage()
);
}
@Test
public void testDeserializeByteData() {
testDeserializeRecord(ControlRecordType.KRAFT_VOTERS);
}
private void testDeserializeRecord(ControlRecordType controlRecordType) {
final int voterId = 0;
final List<Voter> voters = Collections.singletonList(
new Voter().setVoterId(voterId)
);
VotersRecord data = new VotersRecord().setVoters(voters);
ByteBuffer valueBuffer = ByteBuffer.allocate(256);
data.write(new ByteBufferAccessor(valueBuffer), new ObjectSerializationCache(), data.highestSupportedVersion());
valueBuffer.flip();
byte[] keyData = new byte[]{0, 0, 0, (byte) controlRecordType.type()};
DefaultRecord record = new DefaultRecord(
256, (byte) 0, 0, 0L, 0, ByteBuffer.wrap(keyData), valueBuffer, null
);
VotersRecord deserializedData = ControlRecordUtils.deserializeVotersRecord(record);
assertEquals(voters, deserializedData.voters());
assertEquals(Collections.singletonList(
new Voter().setVoterId(voterId)), deserializedData.voters());
}
}
| ControlRecordUtilsTest |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/MonoSourceTest.java | {
"start": 1041,
"end": 7021
} | class ____ {
@Test
public void empty() {
Mono<Integer> m = Mono.from(Flux.empty());
assertThat(m == Mono.<Integer>empty()).isTrue();
StepVerifier.create(m)
.verifyComplete();
}
@Test
public void just() {
Mono<Integer> m = Mono.from(Flux.just(1));
assertThat(m).isInstanceOf(MonoJust.class);
StepVerifier.create(m)
.expectNext(1)
.verifyComplete();
}
@Test
public void error() {
Mono<Integer> m = Mono.from(Flux.error(new Exception("test")));
assertThat(m).isInstanceOf(MonoError.class);
StepVerifier.create(m)
.verifyErrorMessage("test");
}
@Test
public void errorPropagate() {
Mono<Integer> m = Mono.from(Flux.error(new Error("test")));
assertThat(m).isInstanceOf(MonoError.class);
StepVerifier.create(m)
.verifyErrorMessage("test");
}
@Test
public void justNext() {
StepVerifier.create(Mono.from(Flux.just(1, 2, 3)))
.expectNext(1)
.verifyComplete();
}
@Test
public void asJustNext() {
StepVerifier.create(Flux.just(1, 2, 3).as(Mono::from))
.expectNext(1)
.verifyComplete();
}
@Test
public void monoNext() {
StepVerifier.create(Flux.just(1, 2, 3).next())
.expectNext(1)
.verifyComplete();
}
@Test
public void monoDirect() {
StepVerifier.create(Flux.just(1).as(Mono::fromDirect))
.expectNext(1)
.verifyComplete();
}
@Test
public void monoDirectHidden() {
StepVerifier.create(Flux.just(1).hide().as(Mono::fromDirect))
.expectNext(1)
.verifyComplete();
}
@Test
public void monoDirectIdentity() {
StepVerifier.create(Mono.just(1).as(Mono::fromDirect))
.expectNext(1)
.verifyComplete();
}
@Test
public void monoDirectPlainFuseable() {
StepVerifier.create(Mono.just(1).as(TestPubFuseable::new))
.expectNext(1)
.verifyComplete();
}
@Test
public void monoDirectPlain() {
StepVerifier.create(Mono.just(1).as(TestPub::new))
.expectNext(1)
.verifyComplete();
}
@Test
public void monoFromFluxThatIsItselfFromMono() {
AtomicBoolean emitted = new AtomicBoolean();
AtomicBoolean terminated = new AtomicBoolean();
AtomicBoolean cancelled = new AtomicBoolean();
AtomicBoolean succeeded = new AtomicBoolean();
Mono<String> withCallback = Mono.just("foo")
.doOnNext(v -> emitted.set(true));
Mono<String> original = withCallback
.doOnCancel(() -> cancelled.set(true))
.doOnSuccess(v -> succeeded.set(true))
.doOnTerminate(() -> terminated.set(true))
.hide();
assertThat(withCallback).as("withCallback is not Callable")
.isNotInstanceOf(Fuseable.ScalarCallable.class)
.isNotInstanceOf(Callable.class);
assertThat(original).as("original is not callable Mono")
.isNotInstanceOf(Fuseable.class)
.isNotInstanceOf(Fuseable.ScalarCallable.class)
.isNotInstanceOf(Callable.class);
Flux<String> firstConversion = Flux.from(original);
Mono<String> secondConversion = Mono.from(firstConversion);
assertThat(secondConversion.block()).isEqualTo("foo");
assertThat(emitted).as("emitted").isTrue();
assertThat(succeeded).as("succeeded").isTrue();
assertThat(cancelled).as("cancelled").isFalse();
assertThat(terminated).as("terminated").isTrue();
assertThat(secondConversion).as("conversions negated").isSameAs(original);
}
@Test
public void monoFromFluxThatIsItselfFromMonoFuseable() {
Mono<String> original = Mono.just("foo").map(v -> v + "bar");
Flux<String> firstConversion = Flux.from(original);
Mono<String> secondConversion = Mono.from(firstConversion);
assertThat(original).isInstanceOf(Fuseable.class);
assertThat(secondConversion).isInstanceOf(Fuseable.class);
assertThat(secondConversion.block()).isEqualTo("foobar");
assertThat(secondConversion).as("conversions negated").isSameAs(original);
}
@Test
public void monoFromFluxThatIsItselfFromMono_scalarCallableNotOptimized() {
Mono<String> original = Mono.just("foo");
Flux<String> firstConversion = Flux.from(original);
Mono<String> secondConversion = Mono.from(firstConversion);
assertThat(secondConversion.block()).isEqualTo("foo");
assertThat(secondConversion).as("conversions not negated but equivalent")
.isNotSameAs(original)
.hasSameClassAs(original);
}
@Test
public void monoFromFluxItselfMonoToFlux() {
Mono<String> original = Mono.just("foo").hide();
Flux<String> firstConversion = original.flux();
Mono<String> secondConversion = Mono.from(firstConversion);
assertThat(secondConversion.block()).isEqualTo("foo");
assertThat(secondConversion).as("conversions negated").isSameAs(original);
}
@Test
public void monoFromFluxItselfMonoToFlux_fuseable() {
Mono<String> original = Mono.just("foo").map(v -> v + "bar");
Flux<String> firstConversion = original.flux();
Mono<String> secondConversion = Mono.from(firstConversion);
assertThat(original).isInstanceOf(Fuseable.class);
assertThat(secondConversion).isInstanceOf(Fuseable.class);
assertThat(secondConversion.block()).isEqualTo("foobar");
assertThat(secondConversion).as("conversions negated").isSameAs(original);
}
@Test
public void monoFromFluxItselfMonoToFlux_scalarCallableNotOptimized() {
Mono<String> original = Mono.just("foo");
Flux<String> firstConversion = original.flux();
Mono<String> secondConversion = Mono.from(firstConversion);
assertThat(secondConversion.block()).isEqualTo("foo");
assertThat(secondConversion).as("conversions not negated but equivalent")
.isNotSameAs(original)
.hasSameClassAs(original);
}
final static | MonoSourceTest |
java | quarkusio__quarkus | integration-tests/smallrye-config/src/test/java/io/quarkus/it/smallrye/config/QuarkusTestProfileTest.java | {
"start": 971,
"end": 1460
} | class ____ implements QuarkusTestProfile {
@Override
public Map<String, String> getConfigOverrides() {
Map<String, String> configs = new HashMap<>();
configs.put("quarkus.config.locations", "test-profile.properties");
configs.put("smallrye.config.locations", "relocate.properties");
return configs;
}
@Override
public String getConfigProfile() {
return "custom";
}
}
}
| TestProfile |
java | apache__camel | components/camel-zeebe/src/main/java/org/apache/camel/component/zeebe/ZeebeConsumer.java | {
"start": 1609,
"end": 2926
} | class ____ extends DefaultConsumer {
private static final Logger LOG = LoggerFactory.getLogger(ZeebeConsumer.class);
private final ZeebeEndpoint endpoint;
private JobWorker jobWorker;
private ObjectMapper objectMapper = new ObjectMapper();
public ZeebeConsumer(ZeebeEndpoint endpoint, Processor processor) throws CamelException {
super(endpoint, processor);
this.endpoint = endpoint;
}
@Override
protected void doStart() throws Exception {
super.doStart();
final OperationName operationName = getEndpoint().getOperationName();
switch (operationName) {
case REGISTER_JOB_WORKER:
ObjectHelper.notNull(getEndpoint().getJobKey(), "jobKey");
jobWorker = getEndpoint().getZeebeService().registerJobHandler(new ConsumerJobHandler(),
getEndpoint().getJobKey(), getEndpoint().getTimeout());
break;
default:
throw new CamelException(String.format("Invalid Operation for Consumer %s", operationName.value()));
}
}
@Override
protected void doStop() throws Exception {
super.doStop();
if (jobWorker != null && jobWorker.isOpen()) {
jobWorker.close();
}
}
private | ZeebeConsumer |
java | apache__spark | sql/hive-thriftserver/src/main/java/org/apache/hive/service/server/HiveServer2.java | {
"start": 9548,
"end": 9818
} | class ____ implements ServerOptionsExecutor {
@Override
public void execute() {
try {
startHiveServer2();
} catch (Throwable t) {
LOG.error("Error starting HiveServer2", t);
System.exit(-1);
}
}
}
}
| StartOptionExecutor |
java | alibaba__nacos | config/src/main/java/com/alibaba/nacos/config/server/service/query/handler/SpecialTagNotFoundHandler.java | {
"start": 1104,
"end": 1840
} | class ____ extends AbstractConfigQueryHandler {
private static final String SPECIAL_TAG_NOT_FOUND_HANDLER = "specialTagNotFoundHandler";
@Override
public String getName() {
return SPECIAL_TAG_NOT_FOUND_HANDLER;
}
@Override
public ConfigQueryChainResponse handle(ConfigQueryChainRequest request) throws IOException {
if (StringUtils.isNotBlank(request.getTag())) {
ConfigQueryChainResponse response = new ConfigQueryChainResponse();
response.setStatus(ConfigQueryChainResponse.ConfigQueryStatus.SPECIAL_TAG_CONFIG_NOT_FOUND);
return response;
} else {
return nextHandler.handle(request);
}
}
} | SpecialTagNotFoundHandler |
java | apache__maven | impl/maven-core/src/test/java/org/apache/maven/project/harness/Xpp3DomAttributeIterator.java | {
"start": 1209,
"end": 2366
} | class ____ implements NodeIterator {
private NodePointer parent;
private XmlNode node;
private List<Map.Entry<String, String>> attributes;
private Map.Entry<String, String> attribute;
private int position;
Xpp3DomAttributeIterator(NodePointer parent, QName qname) {
this.parent = parent;
this.node = (XmlNode) parent.getNode();
this.attributes = this.node.attributes().entrySet().stream()
.filter(a -> a.getKey().equals(qname.getName()) || "*".equals(qname.getName()))
.collect(Collectors.toList());
}
@Override
public NodePointer getNodePointer() {
if (position == 0) {
setPosition(1);
}
return (attribute == null) ? null : new Xpp3DomAttributePointer(parent, attribute);
}
@Override
public int getPosition() {
return position;
}
@Override
public boolean setPosition(int position) {
this.position = position;
attribute = (position > 0 && position <= attributes.size()) ? attributes.get(position - 1) : null;
return attribute != null;
}
}
| Xpp3DomAttributeIterator |
java | quarkusio__quarkus | extensions/vertx-http/runtime/src/main/java/io/quarkus/vertx/http/runtime/attribute/ExchangeAttributeWrapper.java | {
"start": 129,
"end": 227
} | interface ____ {
ExchangeAttribute wrap(ExchangeAttribute attribute);
}
| ExchangeAttributeWrapper |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/response/ExceptionInWriterTest.java | {
"start": 3929,
"end": 4192
} | class ____ implements ExceptionMapper<GreetingException> {
@Override
public Response toResponse(GreetingException exception) {
return Response.status(200).entity(new Greeting("fallback")).build();
}
}
}
| GreetingExceptionMapper |
java | elastic__elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/dependencies/rules/ExcludeByGroup.java | {
"start": 1187,
"end": 1633
} | class ____ implements ComponentMetadataRule {
private final List<String> groupIds;
@Inject
public ExcludeByGroup(List<String> groupIds) {
this.groupIds = groupIds;
}
@Override
public void execute(ComponentMetadataContext context) {
context.getDetails()
.allVariants(v -> v.withDependencies(dependencies -> dependencies.removeIf(dep -> groupIds.contains(dep.getGroup()))));
}
}
| ExcludeByGroup |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/mock/web/MockMultipartHttpServletRequest.java | {
"start": 1955,
"end": 5593
} | class ____ extends MockHttpServletRequest implements MultipartHttpServletRequest {
private final MultiValueMap<String, MultipartFile> multipartFiles = new LinkedMultiValueMap<>();
/**
* Create a new {@code MockMultipartHttpServletRequest} with a default
* {@link MockServletContext}.
* @see #MockMultipartHttpServletRequest(ServletContext)
*/
public MockMultipartHttpServletRequest() {
this(null);
}
/**
* Create a new {@code MockMultipartHttpServletRequest} with the supplied {@link ServletContext}.
* @param servletContext the ServletContext that the request runs in
* (may be {@code null} to use a default {@link MockServletContext})
*/
public MockMultipartHttpServletRequest(@Nullable ServletContext servletContext) {
super(servletContext);
setMethod("POST");
setContentType("multipart/form-data");
}
/**
* Add a file to this request. The parameter name from the multipart
* form is taken from the {@link MultipartFile#getName()}.
* @param file multipart file to be added
*/
public void addFile(MultipartFile file) {
Assert.notNull(file, "MultipartFile must not be null");
this.multipartFiles.add(file.getName(), file);
}
@Override
public Iterator<String> getFileNames() {
return this.multipartFiles.keySet().iterator();
}
@Override
public @Nullable MultipartFile getFile(String name) {
return this.multipartFiles.getFirst(name);
}
@Override
public List<MultipartFile> getFiles(String name) {
List<MultipartFile> multipartFiles = this.multipartFiles.get(name);
return Objects.requireNonNullElse(multipartFiles, Collections.emptyList());
}
@Override
public Map<String, MultipartFile> getFileMap() {
return this.multipartFiles.asSingleValueMap();
}
@Override
public MultiValueMap<String, MultipartFile> getMultiFileMap() {
return new LinkedMultiValueMap<>(this.multipartFiles);
}
@Override
public @Nullable String getMultipartContentType(String paramOrFileName) {
MultipartFile file = getFile(paramOrFileName);
if (file != null) {
return file.getContentType();
}
try {
Part part = getPart(paramOrFileName);
if (part != null) {
return part.getContentType();
}
}
catch (ServletException | IOException ex) {
// Should never happen (we're not actually parsing)
throw new IllegalStateException(ex);
}
return null;
}
@Override
public HttpMethod getRequestMethod() {
String method = getMethod();
Assert.state(method != null, "Method must not be null");
return HttpMethod.valueOf(method);
}
@Override
public HttpHeaders getRequestHeaders() {
HttpHeaders headers = new HttpHeaders();
Enumeration<String> headerNames = getHeaderNames();
while (headerNames.hasMoreElements()) {
String headerName = headerNames.nextElement();
headers.put(headerName, Collections.list(getHeaders(headerName)));
}
return headers;
}
@Override
public @Nullable HttpHeaders getMultipartHeaders(String paramOrFileName) {
MultipartFile file = getFile(paramOrFileName);
if (file != null) {
HttpHeaders headers = new HttpHeaders();
if (file.getContentType() != null) {
headers.add(HttpHeaders.CONTENT_TYPE, file.getContentType());
}
return headers;
}
try {
Part part = getPart(paramOrFileName);
if (part != null) {
HttpHeaders headers = new HttpHeaders();
for (String headerName : part.getHeaderNames()) {
headers.put(headerName, new ArrayList<>(part.getHeaders(headerName)));
}
return headers;
}
}
catch (Throwable ex) {
throw new MultipartException("Could not access multipart servlet request", ex);
}
return null;
}
}
| MockMultipartHttpServletRequest |
java | apache__flink | flink-python/src/main/java/org/apache/flink/table/runtime/arrow/writers/TimestampWriter.java | {
"start": 4645,
"end": 5130
} | class ____ extends TimestampWriter<ArrayData> {
private TimestampWriterForArray(ValueVector valueVector, int precision) {
super(valueVector, precision);
}
@Override
boolean isNullAt(ArrayData in, int ordinal) {
return in.isNullAt(ordinal);
}
@Override
TimestampData readTimestamp(ArrayData in, int ordinal) {
return in.getTimestamp(ordinal, precision);
}
}
}
| TimestampWriterForArray |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/index/mapper/flattened/KeyedFlattenedFieldTypeTests.java | {
"start": 1820,
"end": 9567
} | class ____ extends FieldTypeTestCase {
private static KeyedFlattenedFieldType createFieldType() {
return new KeyedFlattenedFieldType("field", IndexType.terms(true, true), "key", false, Collections.emptyMap(), false);
}
public void testIndexedValueForSearch() {
KeyedFlattenedFieldType ft = createFieldType();
BytesRef keywordValue = ft.indexedValueForSearch("value");
assertEquals(new BytesRef("key\0value"), keywordValue);
BytesRef doubleValue = ft.indexedValueForSearch(2.718);
assertEquals(new BytesRef("key\0" + "2.718"), doubleValue);
BytesRef booleanValue = ft.indexedValueForSearch(true);
assertEquals(new BytesRef("key\0true"), booleanValue);
}
public void testTermQuery() {
KeyedFlattenedFieldType ft = createFieldType();
Query expected = new TermQuery(new Term(ft.name(), "key\0value"));
assertEquals(expected, ft.termQuery("value", null));
expected = AutomatonQueries.caseInsensitiveTermQuery(new Term(ft.name(), "key\0value"));
assertEquals(expected, ft.termQueryCaseInsensitive("value", null));
KeyedFlattenedFieldType unsearchable = new KeyedFlattenedFieldType(
"field",
IndexType.terms(false, true),
"key",
false,
Collections.emptyMap(),
false
);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("field", null));
assertEquals("Cannot search on field [" + ft.name() + "] since it is not indexed.", e.getMessage());
}
public void testTermsQuery() {
KeyedFlattenedFieldType ft = createFieldType();
Query expected = new TermInSetQuery(ft.name(), List.of(new BytesRef("key\0value1"), new BytesRef("key\0value2")));
List<String> terms = new ArrayList<>();
terms.add("value1");
terms.add("value2");
Query actual = ft.termsQuery(terms, null);
assertEquals(expected, actual);
}
public void testExistsQuery() {
KeyedFlattenedFieldType ft = createFieldType();
Query expected = new PrefixQuery(new Term(ft.name(), "key\0"));
assertEquals(expected, ft.existsQuery(null));
}
public void testPrefixQuery() {
KeyedFlattenedFieldType ft = createFieldType();
Query expected = new PrefixQuery(new Term(ft.name(), "key\0val"));
assertEquals(expected, ft.prefixQuery("val", randomBoolean() ? null : CONSTANT_SCORE_BLENDED_REWRITE, false, MOCK_CONTEXT));
expected = AutomatonQueries.caseInsensitivePrefixQuery(new Term(ft.name(), "key\0vAl"));
assertEquals(expected, ft.prefixQuery("vAl", randomBoolean() ? null : CONSTANT_SCORE_BLENDED_REWRITE, true, MOCK_CONTEXT));
ElasticsearchException ee = expectThrows(
ElasticsearchException.class,
() -> ft.prefixQuery("val", randomBoolean() ? null : CONSTANT_SCORE_BLENDED_REWRITE, false, MOCK_CONTEXT_DISALLOW_EXPENSIVE)
);
assertEquals(
"[prefix] queries cannot be executed when 'search.allow_expensive_queries' is set to false. "
+ "For optimised prefix queries on text fields please enable [index_prefixes].",
ee.getMessage()
);
}
public void testFuzzyQuery() {
KeyedFlattenedFieldType ft = createFieldType();
UnsupportedOperationException e = expectThrows(
UnsupportedOperationException.class,
() -> ft.fuzzyQuery("value", Fuzziness.fromEdits(2), 1, 50, true, randomMockContext())
);
assertEquals("[fuzzy] queries are not currently supported on keyed [flattened] fields.", e.getMessage());
}
public void testRangeQuery() {
KeyedFlattenedFieldType ft = createFieldType();
TermRangeQuery expected = new TermRangeQuery(ft.name(), new BytesRef("key\0lower"), new BytesRef("key\0upper"), false, false);
assertEquals(expected, ft.rangeQuery("lower", "upper", false, false, MOCK_CONTEXT));
expected = new TermRangeQuery(ft.name(), new BytesRef("key\0lower"), new BytesRef("key\0upper"), true, true);
assertEquals(expected, ft.rangeQuery("lower", "upper", true, true, MOCK_CONTEXT));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> ft.rangeQuery("lower", null, false, false, null));
assertEquals("[range] queries on keyed [flattened] fields must include both an upper and a lower bound.", e.getMessage());
e = expectThrows(IllegalArgumentException.class, () -> ft.rangeQuery(null, "upper", false, false, MOCK_CONTEXT));
assertEquals("[range] queries on keyed [flattened] fields must include both an upper and a lower bound.", e.getMessage());
ElasticsearchException ee = expectThrows(
ElasticsearchException.class,
() -> ft.rangeQuery("lower", "upper", false, false, MOCK_CONTEXT_DISALLOW_EXPENSIVE)
);
assertEquals(
"[range] queries on [text] or [keyword] fields cannot be executed when " + "'search.allow_expensive_queries' is set to false.",
ee.getMessage()
);
}
public void testRegexpQuery() {
KeyedFlattenedFieldType ft = createFieldType();
UnsupportedOperationException e = expectThrows(
UnsupportedOperationException.class,
() -> ft.regexpQuery("valu*", 0, 0, 10, null, randomMockContext())
);
assertEquals("[regexp] queries are not currently supported on keyed [flattened] fields.", e.getMessage());
}
public void testWildcardQuery() {
KeyedFlattenedFieldType ft = createFieldType();
UnsupportedOperationException e = expectThrows(
UnsupportedOperationException.class,
() -> ft.wildcardQuery("valu*", null, false, randomMockContext())
);
assertEquals("[wildcard] queries are not currently supported on keyed [flattened] fields.", e.getMessage());
}
public void testFetchIsEmpty() throws IOException {
Map<String, Object> sourceValue = Map.of("key", "value");
KeyedFlattenedFieldType ft = createFieldType();
assertEquals(List.of(), fetchSourceValue(ft, sourceValue));
assertEquals(List.of(), fetchSourceValue(ft, null));
}
public void testFetchSourceValue() throws IOException {
KeyedFlattenedFieldType ft = createFieldType();
Map<String, Object> sourceValue = Map.of("key", "value");
SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class);
when(searchExecutionContext.getIndexSettings()).thenReturn(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY));
when(searchExecutionContext.isSourceEnabled()).thenReturn(true);
when(searchExecutionContext.sourcePath("field.key")).thenReturn(Set.of("field.key"));
ValueFetcher fetcher = ft.valueFetcher(searchExecutionContext, null);
{
Source source = Source.fromMap(Collections.singletonMap("field", sourceValue), randomFrom(XContentType.values()));
assertEquals(List.of("value"), fetcher.fetchValues(source, -1, new ArrayList<>()));
}
{
Source source = Source.fromMap(Collections.singletonMap("field", null), randomFrom(XContentType.values()));
assertEquals(List.of(), fetcher.fetchValues(source, -1, new ArrayList<>()));
}
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> ft.valueFetcher(searchExecutionContext, "format"));
assertEquals("Field [field.key] of type [flattened] doesn't support formats.", e.getMessage());
}
}
| KeyedFlattenedFieldTypeTests |
java | alibaba__nacos | config/src/main/java/com/alibaba/nacos/config/server/service/repository/extrnal/ExternalConfigMigratePersistServiceImpl.java | {
"start": 3262,
"end": 24512
} | class ____ implements ConfigMigratePersistService {
/**
* The Jt.
*/
protected JdbcTemplate jt;
/**
* The Tjt.
*/
protected TransactionTemplate tjt;
private DataSourceService dataSourceService;
private MapperManager mapperManager;
private ConfigInfoPersistService configInfoPersistService;
private ConfigInfoGrayPersistService configInfoGrayPersistService;
/**
* Instantiates a new External config migrate persist service.
*
* @param configInfoPersistService the config info persist service
* @param configInfoGrayPersistService the config info gray persist service
*/
public ExternalConfigMigratePersistServiceImpl(
@Qualifier("externalConfigInfoPersistServiceImpl") ConfigInfoPersistService configInfoPersistService,
@Qualifier("externalConfigInfoGrayPersistServiceImpl") ConfigInfoGrayPersistService configInfoGrayPersistService) {
this.dataSourceService = DynamicDataSource.getInstance().getDataSource();
this.jt = dataSourceService.getJdbcTemplate();
this.tjt = dataSourceService.getTransactionTemplate();
Boolean isDataSourceLogEnable = EnvUtil.getProperty(CommonConstant.NACOS_PLUGIN_DATASOURCE_LOG, Boolean.class,
false);
this.mapperManager = MapperManager.instance(isDataSourceLogEnable);
this.configInfoPersistService = configInfoPersistService;
this.configInfoGrayPersistService = configInfoGrayPersistService;
}
@Override
public <E> PaginationHelper<E> createPaginationHelper() {
return new ExternalStoragePaginationHelperImpl<>(jt);
}
@Override
public Integer configInfoConflictCount(String srcUser) {
ConfigMigrateMapper configMigrateMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.MIGRATE_CONFIG);
MapperContext context = new MapperContext();
context.putWhereParameter(FieldConstant.SRC_USER, srcUser);
MapperResult mapperResult = configMigrateMapper.getConfigConflictCount(context);
Integer result = jt.queryForObject(mapperResult.getSql(), mapperResult.getParamList().toArray(), Integer.class);
if (result == null) {
throw new IllegalArgumentException("configInfoConflictCount error");
}
return result;
}
@Override
public Integer configInfoGrayConflictCount(String srcUser) {
ConfigMigrateMapper configMigrateMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.MIGRATE_CONFIG);
MapperContext context = new MapperContext();
context.putWhereParameter(FieldConstant.SRC_USER, srcUser);
MapperResult mapperResult = configMigrateMapper.getConfigGrayConflictCount(context);
Integer result = jt.queryForObject(mapperResult.getSql(), mapperResult.getParamList().toArray(), Integer.class);
if (result == null) {
throw new IllegalArgumentException("configInfoGrayConflictCount error");
}
return result;
}
@Override
public List<Long> getMigrateConfigInsertIdList(long startId, int pageSize) {
ConfigMigrateMapper configMigrateMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.MIGRATE_CONFIG);
MapperContext context = new MapperContext();
context.putWhereParameter(FieldConstant.ID, startId);
context.setPageSize(pageSize);
MapperResult mapperResult = configMigrateMapper.findConfigIdNeedInsertMigrate(context);
return jt.queryForList(mapperResult.getSql(), mapperResult.getParamList().toArray(), Long.class);
}
@Override
public List<Long> getMigrateConfigGrayInsertIdList(long startId, int pageSize) {
ConfigMigrateMapper configMigrateMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.MIGRATE_CONFIG);
MapperContext context = new MapperContext();
context.putWhereParameter(FieldConstant.ID, startId);
context.setPageSize(pageSize);
MapperResult mapperResult = configMigrateMapper.findConfigGrayIdNeedInsertMigrate(context);
return jt.queryForList(mapperResult.getSql(), mapperResult.getParamList().toArray(), Long.class);
}
@Override
public List<ConfigInfo> getMigrateConfigUpdateList(long startId, int pageSize, String srcTenant,
String targetTenant, String srcUser) {
ConfigMigrateMapper configMigrateMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.MIGRATE_CONFIG);
MapperContext context = new MapperContext();
context.putWhereParameter(FieldConstant.SRC_USER, srcUser);
context.putWhereParameter(FieldConstant.ID, startId);
context.putWhereParameter(FieldConstant.SRC_TENANT, srcTenant);
context.putWhereParameter(FieldConstant.TARGET_TENANT, targetTenant);
context.setPageSize(pageSize);
MapperResult mapperResult = configMigrateMapper.findConfigNeedUpdateMigrate(context);
return jt.query(mapperResult.getSql(), mapperResult.getParamList().toArray(),
CONFIG_INFO_ROW_MAPPER);
}
@Override
public List<ConfigInfoGrayWrapper> getMigrateConfigGrayUpdateList(long startId, int pageSize, String srcTenant,
String targetTenant, String srcUser) {
ConfigMigrateMapper configMigrateMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.MIGRATE_CONFIG);
MapperContext context = new MapperContext();
context.putWhereParameter(FieldConstant.SRC_USER, srcUser);
context.putWhereParameter(FieldConstant.ID, startId);
context.putWhereParameter(FieldConstant.SRC_TENANT, srcTenant);
context.putWhereParameter(FieldConstant.TARGET_TENANT, targetTenant);
context.setPageSize(pageSize);
MapperResult mapperResult = configMigrateMapper.findConfigGrayNeedUpdateMigrate(context);
return jt.query(mapperResult.getSql(), mapperResult.getParamList().toArray(),
CONFIG_INFO_GRAY_WRAPPER_ROW_MAPPER);
}
@Override
public void migrateConfigInsertByIds(List<Long> ids, String srcUser) {
ConfigMigrateMapper configMigrateMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.MIGRATE_CONFIG);
MapperContext context = new MapperContext();
context.putWhereParameter(FieldConstant.IDS, ids);
context.putWhereParameter(FieldConstant.SRC_USER, srcUser);
MapperResult mapperResult = configMigrateMapper.migrateConfigInsertByIds(context);
try {
jt.update(mapperResult.getSql(), mapperResult.getParamList().toArray());
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] migrateConfigInsertByIds" + e, e);
throw e;
}
}
@Override
public void migrateConfigGrayInsertByIds(List<Long> ids, String srcUser) {
ConfigMigrateMapper configMigrateMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.MIGRATE_CONFIG);
MapperContext context = new MapperContext();
context.putWhereParameter(FieldConstant.IDS, ids);
context.putWhereParameter(FieldConstant.SRC_USER, srcUser);
MapperResult mapperResult = configMigrateMapper.migrateConfigGrayInsertByIds(context);
try {
jt.update(mapperResult.getSql(), mapperResult.getParamList().toArray());
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] migrateConfigGrayInsertByIds" + e, e);
throw e;
}
}
@Override
public void syncConfigGray(String dataId, String group, String tenant, String grayName, String targetTenant,
String srcUser) {
tjt.execute(status -> {
try {
ConfigInfoGrayWrapper sourceConfigInfoGrayWrapper = configInfoGrayPersistService.findConfigInfo4Gray(
dataId, group, tenant, grayName);
ConfigInfoGrayWrapper targetConfigInfoGrayWrapper = configInfoGrayPersistService.findConfigInfo4Gray(
dataId, group, targetTenant, grayName);
if (sourceConfigInfoGrayWrapper == null) {
removeConfigInfoGrayWithoutHistory(dataId, group, targetTenant, grayName, null, srcUser);
ConfigInfoGrayWrapper configInfoGrayWrapper = configInfoGrayPersistService.findConfigInfo4Gray(
dataId, group, tenant, grayName);
if (configInfoGrayWrapper != null) {
throw new Exception("sourceConfigInfoGray has been updated,dataId=" + dataId + ",group=" + group
+ ",tenant=" + tenant + ",grayName=" + grayName);
}
} else {
if (targetConfigInfoGrayWrapper == null) {
sourceConfigInfoGrayWrapper.setTenant(targetTenant);
configInfoGrayPersistService.addConfigInfoGrayAtomic(-1, sourceConfigInfoGrayWrapper,
sourceConfigInfoGrayWrapper.getGrayName(), sourceConfigInfoGrayWrapper.getGrayRule(),
null, srcUser);
ConfigInfoGrayWrapper configInfoGrayWrapper = configInfoGrayPersistService.findConfigInfo4Gray(
dataId, group, tenant, grayName);
if (!StringUtils.equals(configInfoGrayWrapper.getMd5(), sourceConfigInfoGrayWrapper.getMd5())
|| !StringUtils.equals(configInfoGrayWrapper.getGrayRule(),
sourceConfigInfoGrayWrapper.getGrayRule())) {
throw new Exception(
"sourceConfigInfoGray has been updated,dataId=" + dataId + ",group=" + group
+ ",tenant=" + tenant + ",grayName=" + grayName);
}
} else if (sourceConfigInfoGrayWrapper.getLastModified()
>= targetConfigInfoGrayWrapper.getLastModified()) {
sourceConfigInfoGrayWrapper.setTenant(targetTenant);
updateConfigInfo4GrayWithoutHistory(sourceConfigInfoGrayWrapper,
sourceConfigInfoGrayWrapper.getGrayName(), sourceConfigInfoGrayWrapper.getGrayRule(),
null, srcUser, targetConfigInfoGrayWrapper.getLastModified(), targetConfigInfoGrayWrapper.getMd5());
ConfigInfoGrayWrapper configInfoGrayWrapper = configInfoGrayPersistService.findConfigInfo4Gray(
dataId, group, tenant, grayName);
if (!StringUtils.equals(configInfoGrayWrapper.getMd5(), sourceConfigInfoGrayWrapper.getMd5())
|| !StringUtils.equals(configInfoGrayWrapper.getGrayRule(),
sourceConfigInfoGrayWrapper.getGrayRule())) {
throw new Exception(
"sourceConfigInfoGray has been updated,dataId=" + dataId + ",group=" + group
+ ",tenant=" + tenant + ",grayName=" + grayName);
}
}
}
} catch (Exception e) {
LogUtil.FATAL_LOG.error("[db-error] syncConfigGray" + e, e);
throw new RuntimeException(e);
}
return null;
});
}
/**
* Remove config info gray without history.
*
* @param dataId the data id
* @param group the group
* @param tenant the tenant
* @param grayName the gray name
* @param srcIp the src ip
* @param srcUser the src user
*/
public void removeConfigInfoGrayWithoutHistory(final String dataId, final String group, final String tenant,
final String grayName, final String srcIp, final String srcUser) {
String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant;
String grayNameTmp = StringUtils.isBlank(grayName) ? StringUtils.EMPTY : grayName;
try {
ConfigInfoGrayMapper configInfoGrayMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_GRAY);
jt.update(configInfoGrayMapper.delete(Arrays.asList("data_id", "group_id", "tenant_id", "gray_name")),
dataId, group, tenantTmp, grayNameTmp);
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
}
/**
* Update config info 4 gray without history.
*
* @param configInfo the config info
* @param grayName the gray name
* @param grayRule the gray rule
* @param srcIp the src ip
* @param srcUser the src user
*/
public void updateConfigInfo4GrayWithoutHistory(ConfigInfo configInfo, String grayName, String grayRule,
String srcIp, String srcUser, long lastModified, final String targetMd5) {
String appNameTmp = StringUtils.defaultEmptyIfBlank(configInfo.getAppName());
String tenantTmp = StringUtils.defaultEmptyIfBlank(configInfo.getTenant());
String grayNameTmp = StringUtils.isBlank(grayName) ? StringUtils.EMPTY : grayName.trim();
String grayRuleTmp = StringUtils.isBlank(grayRule) ? StringUtils.EMPTY : grayRule.trim();
Timestamp modifiedTime = new Timestamp(lastModified);
try {
String md5 = MD5Utils.md5Hex(configInfo.getContent(), Constants.ENCODE);
ConfigInfoGrayMapper configInfoGrayMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_GRAY);
jt.update(configInfoGrayMapper.update(
Arrays.asList("content", "encrypted_data_key", "md5", "src_ip", "src_user", "gmt_modified@NOW()",
"app_name", "gray_rule"), Arrays.asList("data_id", "group_id", "tenant_id", "gray_name", "gmt_modified", "md5")),
configInfo.getContent(), configInfo.getEncryptedDataKey(), md5, srcIp, srcUser, appNameTmp,
grayRuleTmp, configInfo.getDataId(), configInfo.getGroup(), tenantTmp, grayNameTmp, modifiedTime, targetMd5);
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
}
@Override
public void syncConfig(String dataId, String group, String tenant, String targetTenant, String srcUser) {
tjt.execute(status -> {
try {
ConfigInfoWrapper sourceConfigInfoWrapper = configInfoPersistService.findConfigInfo(dataId, group,
tenant);
ConfigInfoWrapper targetConfigInfoWrapper = configInfoPersistService.findConfigInfo(dataId, group,
targetTenant);
if (sourceConfigInfoWrapper == null) {
configInfoPersistService.removeConfigInfoAtomic(dataId, group, targetTenant, null, srcUser);
ConfigInfoWrapper configInfoWrapper = configInfoPersistService.findConfigInfo(dataId, group,
tenant);
if (configInfoWrapper != null) {
LogUtil.FATAL_LOG.error(
"syncConfig failed, sourceConfigInfo has been updated,dataId=" + dataId + ",group="
+ group + ",tenant=" + tenant);
throw new Exception(
"syncConfig failed,sourceConfigInfo has been updated,dataId=" + dataId + ",group="
+ group + ",tenant=" + tenant);
}
} else {
if (targetConfigInfoWrapper == null) {
sourceConfigInfoWrapper.setTenant(targetTenant);
configInfoPersistService.addConfigInfoAtomic(-1, null, srcUser, sourceConfigInfoWrapper, null);
ConfigInfoWrapper configInfoWrapper = configInfoPersistService.findConfigInfo(dataId, group,
tenant);
if (!StringUtils.equals(configInfoWrapper.getMd5(), sourceConfigInfoWrapper.getMd5())) {
LogUtil.FATAL_LOG.error(
"syncConfig failed, sourceConfigInfo has been updated,dataId=" + dataId + ",group="
+ group + ",tenant=" + tenant);
throw new Exception(
"syncConfig failed, sourceConfigInfo has been updated,dataId=" + dataId + ",group="
+ group + ",tenant=" + tenant);
}
} else if (sourceConfigInfoWrapper.getLastModified() >= targetConfigInfoWrapper.getLastModified()) {
sourceConfigInfoWrapper.setTenant(targetTenant);
updateConfigInfoAtomic(sourceConfigInfoWrapper, null, srcUser, null, targetConfigInfoWrapper.getLastModified(),
targetConfigInfoWrapper.getMd5());
ConfigInfoWrapper configInfoWrapper = configInfoPersistService.findConfigInfo(dataId, group,
tenant);
if (!StringUtils.equals(configInfoWrapper.getMd5(), sourceConfigInfoWrapper.getMd5())) {
LogUtil.FATAL_LOG.error(
"syncConfig failed, sourceConfigInfo has been updated,dataId=" + dataId + ",group="
+ group + ",tenant=" + tenant);
throw new Exception(
"syncConfig failed, sourceConfigInfo has been updated,dataId=" + dataId + ",group="
+ group + ",tenant=" + tenant);
}
}
}
} catch (Exception e) {
LogUtil.FATAL_LOG.error("[db-error] syncConfig" + e, e);
throw new RuntimeException(e);
}
return null;
});
}
/**
* Update config info atomic.
*
* @param configInfo the config info
* @param srcIp the src ip
* @param srcUser the src user
* @param configAdvanceInfo the config advance info
* @param lastModified the last modified
*/
public void updateConfigInfoAtomic(final ConfigInfo configInfo, final String srcIp, final String srcUser,
Map<String, Object> configAdvanceInfo, long lastModified, final String targetMd5) {
String appNameTmp = StringUtils.defaultEmptyIfBlank(configInfo.getAppName());
String tenantTmp = StringUtils.defaultEmptyIfBlank(configInfo.getTenant());
final String md5Tmp = MD5Utils.md5Hex(configInfo.getContent(), Constants.ENCODE);
String desc = configAdvanceInfo == null ? null : (String) configAdvanceInfo.get("desc");
String use = configAdvanceInfo == null ? null : (String) configAdvanceInfo.get("use");
String effect = configAdvanceInfo == null ? null : (String) configAdvanceInfo.get("effect");
String type = configAdvanceInfo == null ? null : (String) configAdvanceInfo.get("type");
String schema = configAdvanceInfo == null ? null : (String) configAdvanceInfo.get("schema");
Timestamp modifiedTime = new Timestamp(lastModified);
final String encryptedDataKey =
configInfo.getEncryptedDataKey() == null ? StringUtils.EMPTY : configInfo.getEncryptedDataKey();
try {
ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO);
jt.update(configInfoMapper.update(
Arrays.asList("content", "md5", "src_ip", "src_user", "gmt_modified@NOW()", "app_name", "c_desc",
"c_use", "effect", "type", "c_schema", "encrypted_data_key"),
Arrays.asList("data_id", "group_id", "tenant_id", "gmt_modified", "md5")), configInfo.getContent(), md5Tmp, srcIp,
srcUser, appNameTmp, desc, use, effect, type, schema, encryptedDataKey, configInfo.getDataId(),
configInfo.getGroup(), tenantTmp, modifiedTime, targetMd5);
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
}
}
| ExternalConfigMigratePersistServiceImpl |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.