language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__dubbo | dubbo-plugin/dubbo-mcp/src/main/java/org/apache/dubbo/mcp/transport/DubboMcpStreamableTransportProvider.java | {
"start": 2707,
"end": 26240
} | class ____ implements McpStreamableServerTransportProvider {
private static final ErrorTypeAwareLogger logger =
LoggerFactory.getErrorTypeAwareLogger(DubboMcpStreamableTransportProvider.class);
private Factory sessionFactory;
private final ObjectMapper objectMapper;
public static final String SESSION_ID_HEADER = "mcp-session-id";
private static final String LAST_EVENT_ID_HEADER = "Last-Event-ID";
/**
* TODO: This design is suboptimal. A mechanism should be implemented to remove the session object upon connection closure or timeout.
*/
private final ExpiringMap<String, McpStreamableServerSession> sessions;
public DubboMcpStreamableTransportProvider(ObjectMapper objectMapper) {
this(objectMapper, McpConstant.DEFAULT_SESSION_TIMEOUT);
}
public DubboMcpStreamableTransportProvider(ObjectMapper objectMapper, Integer expireSeconds) {
// Minimum expiration time is 60 seconds
if (expireSeconds != null) {
if (expireSeconds < 60) {
expireSeconds = 60;
}
} else {
expireSeconds = 60;
}
sessions = new ExpiringMap<>(expireSeconds, 30);
this.objectMapper = objectMapper;
sessions.getExpireThread().startExpiryIfNotStarted();
}
@Override
public void setSessionFactory(Factory sessionFactory) {
this.sessionFactory = sessionFactory;
}
@Override
public Mono<Void> notifyClients(String method, Object params) {
if (sessions.isEmpty()) {
return Mono.empty();
}
return Flux.fromIterable(sessions.values())
.flatMap(session -> session.sendNotification(method, params)
.doOnError(e -> logger.error(
COMMON_UNEXPECTED_EXCEPTION,
"",
"",
String.format(
"Failed to send message to session %s: %s", session.getId(), e.getMessage()),
e))
.onErrorComplete())
.then();
}
@Override
public Mono<Void> closeGracefully() {
return Flux.fromIterable(sessions.values())
.flatMap(McpStreamableServerSession::closeGracefully)
.then();
}
public void handleRequest(StreamObserver<ServerSentEvent<byte[]>> responseObserver) {
HttpRequest request = RpcContext.getServiceContext().getRequest(HttpRequest.class);
HttpResponse response = RpcContext.getServiceContext().getResponse(HttpResponse.class);
if (HttpMethods.isGet(request.method())) {
handleGet(responseObserver);
} else if (HttpMethods.isPost(request.method())) {
handlePost(responseObserver);
} else if (HttpMethods.DELETE.name().equals(request.method())) {
handleDelete(responseObserver);
} else {
// unSupport method
response.setStatus(HttpStatus.METHOD_NOT_ALLOWED.getCode());
response.setBody(new McpError("Method not allowed: " + request.method()).getJsonRpcError());
if (responseObserver != null) {
responseObserver.onError(HttpResult.builder()
.status(HttpStatus.METHOD_NOT_ALLOWED.getCode())
.body(new McpError("Method not allowed: " + request.method()).getJsonRpcError())
.build()
.toPayload());
responseObserver.onCompleted();
}
}
}
private void handleGet(StreamObserver<ServerSentEvent<byte[]>> responseObserver) {
HttpRequest request = RpcContext.getServiceContext().getRequest(HttpRequest.class);
HttpResponse response = RpcContext.getServiceContext().getResponse(HttpResponse.class);
List<String> badRequestErrors = new ArrayList<>();
// check Accept header
List<String> accepts = HttpUtils.parseAccept(request.accept());
if (CollectionUtils.isEmpty(accepts)
|| (!accepts.contains(MediaType.TEXT_EVENT_STREAM.getName())
&& !accepts.contains(MediaType.APPLICATION_JSON.getName()))) {
badRequestErrors.add("text/event-stream or application/json required in Accept header");
}
// check sessionId
String sessionId = request.header(SESSION_ID_HEADER);
if (StringUtils.isBlank(sessionId)) {
badRequestErrors.add("Session ID required in mcp-session-id header");
}
if (!badRequestErrors.isEmpty()) {
String combinedMessage = String.join("; ", badRequestErrors);
response.setStatus(HttpStatus.BAD_REQUEST.getCode());
response.setBody(new McpError(combinedMessage).getJsonRpcError());
if (responseObserver != null) {
responseObserver.onError(HttpResult.builder()
.status(HttpStatus.BAD_REQUEST.getCode())
.body(new McpError(combinedMessage).getJsonRpcError())
.build()
.toPayload());
responseObserver.onCompleted();
}
return;
}
// Find existing session
McpStreamableServerSession session = sessions.get(sessionId);
if (session == null) {
response.setStatus(HttpStatus.NOT_FOUND.getCode());
response.setBody(new McpError("Session not found").getJsonRpcError());
if (responseObserver != null) {
responseObserver.onError(HttpResult.builder()
.status(HttpStatus.NOT_FOUND.getCode())
.body(new McpError("Session not found").getJsonRpcError())
.build()
.toPayload());
responseObserver.onCompleted();
}
return;
}
// Check if this is a replay request
String lastEventId = request.header(LAST_EVENT_ID_HEADER);
if (StringUtils.isNotBlank(lastEventId)) {
// Handle replay request by calling session.replay()
try {
session.replay(lastEventId)
.subscribe(
message -> {
if (responseObserver != null) {
try {
String jsonData = objectMapper.writeValueAsString(message);
responseObserver.onNext(ServerSentEvent.<byte[]>builder()
.event("message")
.data(jsonData.getBytes(StandardCharsets.UTF_8))
.build());
} catch (Exception e) {
logger.error(
COMMON_UNEXPECTED_EXCEPTION,
"",
"",
String.format(
"Failed to serialize replay message for session %s: %s",
sessionId, e.getMessage()),
e);
}
}
},
error -> {
logger.error(
COMMON_UNEXPECTED_EXCEPTION,
"",
"",
String.format(
"Failed to replay messages for session %s with lastEventId %s: %s",
sessionId, lastEventId, error.getMessage()),
error);
response.setStatus(HttpStatus.INTERNAL_SERVER_ERROR.getCode());
response.setBody(new McpError("Failed to replay messages: " + error.getMessage())
.getJsonRpcError());
if (responseObserver != null) {
responseObserver.onError(HttpResult.builder()
.status(HttpStatus.INTERNAL_SERVER_ERROR.getCode())
.body(new McpError("Failed to replay messages: " + error.getMessage())
.getJsonRpcError())
.build()
.toPayload());
responseObserver.onCompleted();
}
},
() -> {
if (responseObserver != null) {
responseObserver.onCompleted();
}
});
} catch (Exception e) {
logger.error(
COMMON_UNEXPECTED_EXCEPTION,
"",
"",
String.format(
"Failed to handle replay for session %s with lastEventId %s: %s",
sessionId, lastEventId, e.getMessage()),
e);
response.setStatus(HttpStatus.INTERNAL_SERVER_ERROR.getCode());
response.setBody(new McpError("Failed to handle replay: " + e.getMessage()).getJsonRpcError());
if (responseObserver != null) {
responseObserver.onError(HttpResult.builder()
.status(HttpStatus.INTERNAL_SERVER_ERROR.getCode())
.body(new McpError("Failed to handle replay: " + e.getMessage()).getJsonRpcError())
.build()
.toPayload());
responseObserver.onCompleted();
}
}
} else {
// Send initial notification for new connection
session.sendNotification("tools").subscribe();
}
}
private void handlePost(StreamObserver<ServerSentEvent<byte[]>> responseObserver) {
HttpRequest request = RpcContext.getServiceContext().getRequest(HttpRequest.class);
HttpResponse response = RpcContext.getServiceContext().getResponse(HttpResponse.class);
List<String> badRequestErrors = new ArrayList<>();
McpStreamableServerSession session = null;
try {
// Check Accept header
List<String> accepts = HttpUtils.parseAccept(request.accept());
if (CollectionUtils.isEmpty(accepts)
|| (!accepts.contains(MediaType.TEXT_EVENT_STREAM.getName())
&& !accepts.contains(MediaType.APPLICATION_JSON.getName()))) {
badRequestErrors.add("text/event-stream or application/json required in Accept header");
}
// Read and deserialize JSON-RPC message from request body
String requestBody = IOUtils.read(request.inputStream(), StandardCharsets.UTF_8.name());
McpSchema.JSONRPCMessage message = McpSchema.deserializeJsonRpcMessage(objectMapper, requestBody);
// Check if it's an initialization request
if (message instanceof McpSchema.JSONRPCRequest
&& McpSchema.METHOD_INITIALIZE.equals(((McpSchema.JSONRPCRequest) message).method())) {
// New initialization request
if (!badRequestErrors.isEmpty()) {
String combinedMessage = String.join("; ", badRequestErrors);
response.setStatus(HttpStatus.BAD_REQUEST.getCode());
response.setBody(new McpError(combinedMessage).getJsonRpcError());
if (responseObserver != null) {
responseObserver.onError(HttpResult.builder()
.status(HttpStatus.BAD_REQUEST.getCode())
.body(new McpError(combinedMessage).getJsonRpcError())
.build()
.toPayload());
responseObserver.onCompleted();
}
return;
}
// Create new session
McpSchema.InitializeRequest initializeRequest = objectMapper.convertValue(
((McpSchema.JSONRPCRequest) message).params(), new TypeReference<>() {});
McpStreamableServerSession.McpStreamableServerSessionInit init =
sessionFactory.startSession(initializeRequest);
session = init.session();
sessions.put(session.getId(), session);
try {
McpSchema.InitializeResult initResult = init.initResult().block();
response.setHeader("Content-Type", MediaType.APPLICATION_JSON.getName());
response.setHeader(SESSION_ID_HEADER, session.getId());
response.setStatus(HttpStatus.OK.getCode());
String jsonResponse = objectMapper.writeValueAsString(new McpSchema.JSONRPCResponse(
McpSchema.JSONRPC_VERSION, ((McpSchema.JSONRPCRequest) message).id(), initResult, null));
if (responseObserver != null) {
responseObserver.onNext(ServerSentEvent.<byte[]>builder()
.event("message")
.data(jsonResponse.getBytes(StandardCharsets.UTF_8))
.build());
responseObserver.onCompleted();
}
return;
} catch (Exception e) {
response.setStatus(HttpStatus.INTERNAL_SERVER_ERROR.getCode());
response.setBody(new McpError("Failed to initialize session: " + e.getMessage()).getJsonRpcError());
if (responseObserver != null) {
responseObserver.onError(HttpResult.builder()
.status(HttpStatus.INTERNAL_SERVER_ERROR.getCode())
.body(new McpError("Failed to initialize session: " + e.getMessage()).getJsonRpcError())
.build()
.toPayload());
responseObserver.onCompleted();
}
return;
}
}
// Non-initialization request requires sessionId
String sessionId = request.header(SESSION_ID_HEADER);
if (StringUtils.isBlank(sessionId)) {
badRequestErrors.add("Session ID required in mcp-session-id header");
}
if (!badRequestErrors.isEmpty()) {
String combinedMessage = String.join("; ", badRequestErrors);
response.setStatus(HttpStatus.BAD_REQUEST.getCode());
response.setBody(new McpError(combinedMessage).getJsonRpcError());
if (responseObserver != null) {
responseObserver.onError(HttpResult.builder()
.status(HttpStatus.BAD_REQUEST.getCode())
.body(new McpError(combinedMessage).getJsonRpcError())
.build()
.toPayload());
responseObserver.onCompleted();
}
return;
}
// Find existing session
session = sessions.get(sessionId);
if (session == null) {
response.setStatus(HttpStatus.NOT_FOUND.getCode());
response.setBody(new McpError("Unknown sessionId: " + sessionId).getJsonRpcError());
if (responseObserver != null) {
responseObserver.onError(HttpResult.builder()
.status(HttpStatus.NOT_FOUND.getCode())
.body(new McpError("Unknown sessionId: " + sessionId).getJsonRpcError())
.build()
.toPayload());
responseObserver.onCompleted();
}
return;
}
// Refresh session expiration time
refreshSessionExpire(session);
if (message instanceof McpSchema.JSONRPCResponse) {
session.accept((McpSchema.JSONRPCResponse) message).block();
response.setStatus(HttpStatus.ACCEPTED.getCode());
if (responseObserver != null) {
responseObserver.onNext(ServerSentEvent.<byte[]>builder()
.event("response")
.data("{\"status\":\"accepted\"}".getBytes(StandardCharsets.UTF_8))
.build());
responseObserver.onCompleted();
}
} else if (message instanceof McpSchema.JSONRPCNotification) {
session.accept((McpSchema.JSONRPCNotification) message).block();
response.setStatus(HttpStatus.ACCEPTED.getCode());
if (responseObserver != null) {
responseObserver.onNext(ServerSentEvent.<byte[]>builder()
.event("response")
.data("{\"status\":\"accepted\"}".getBytes(StandardCharsets.UTF_8))
.build());
responseObserver.onCompleted();
}
} else if (message instanceof McpSchema.JSONRPCRequest) {
// For streaming responses, we need to return SSE
response.setHeader("Content-Type", MediaType.TEXT_EVENT_STREAM.getName());
response.setHeader("Cache-Control", "no-cache");
response.setHeader("Connection", "keep-alive");
response.setHeader("Access-Control-Allow-Origin", "*");
// Handle request stream
DubboMcpSessionTransport sessionTransport =
new DubboMcpSessionTransport(responseObserver, objectMapper);
session.responseStream((McpSchema.JSONRPCRequest) message, sessionTransport)
.block();
} else {
response.setStatus(HttpStatus.INTERNAL_SERVER_ERROR.getCode());
response.setBody(new McpError("Unknown message type").getJsonRpcError());
if (responseObserver != null) {
responseObserver.onError(HttpResult.builder()
.status(HttpStatus.INTERNAL_SERVER_ERROR.getCode())
.body(new McpError("Unknown message type").getJsonRpcError())
.build()
.toPayload());
responseObserver.onCompleted();
}
}
} catch (IOException e) {
response.setStatus(HttpStatus.BAD_REQUEST.getCode());
response.setBody(new McpError("Invalid message format: " + e.getMessage()).getJsonRpcError());
if (responseObserver != null) {
responseObserver.onError(HttpResult.builder()
.status(HttpStatus.BAD_REQUEST.getCode())
.body(new McpError("Invalid message format: " + e.getMessage()).getJsonRpcError())
.build()
.toPayload());
responseObserver.onCompleted();
}
} catch (Exception e) {
response.setStatus(HttpStatus.INTERNAL_SERVER_ERROR.getCode());
response.setBody(new McpError("Internal server error: " + e.getMessage()).getJsonRpcError());
if (responseObserver != null) {
responseObserver.onError(HttpResult.builder()
.status(HttpStatus.INTERNAL_SERVER_ERROR.getCode())
.body(new McpError("Internal server error: " + e.getMessage()).getJsonRpcError())
.build()
.toPayload());
responseObserver.onCompleted();
}
}
}
private void handleDelete(StreamObserver<ServerSentEvent<byte[]>> responseObserver) {
HttpRequest request = RpcContext.getServiceContext().getRequest(HttpRequest.class);
HttpResponse response = RpcContext.getServiceContext().getResponse(HttpResponse.class);
String sessionId = request.header(SESSION_ID_HEADER);
if (StringUtils.isBlank(sessionId)) {
response.setStatus(HttpStatus.BAD_REQUEST.getCode());
response.setBody(new McpError("Session ID required in mcp-session-id header").getJsonRpcError());
if (responseObserver != null) {
responseObserver.onError(HttpResult.builder()
.status(HttpStatus.BAD_REQUEST.getCode())
.body(new McpError("Session ID required in mcp-session-id header").getJsonRpcError())
.build()
.toPayload());
responseObserver.onCompleted();
}
return;
}
McpStreamableServerSession session = sessions.get(sessionId);
if (session == null) {
response.setStatus(HttpStatus.NOT_FOUND.getCode());
if (responseObserver != null) {
responseObserver.onCompleted();
}
return;
}
try {
session.delete().block();
sessions.remove(sessionId);
response.setStatus(HttpStatus.OK.getCode());
if (responseObserver != null) {
responseObserver.onNext(ServerSentEvent.<byte[]>builder()
.event("response")
.data("{\"status\":\"deleted\"}".getBytes(StandardCharsets.UTF_8))
.build());
responseObserver.onCompleted();
}
} catch (Exception e) {
response.setStatus(HttpStatus.INTERNAL_SERVER_ERROR.getCode());
response.setBody(new McpError(e.getMessage()).getJsonRpcError());
if (responseObserver != null) {
responseObserver.onError(HttpResult.builder()
.status(HttpStatus.INTERNAL_SERVER_ERROR.getCode())
.body(new McpError(e.getMessage()).getJsonRpcError())
.build()
.toPayload());
responseObserver.onCompleted();
}
}
}
private void refreshSessionExpire(McpStreamableServerSession session) {
sessions.put(session.getId(), session);
}
private static | DubboMcpStreamableTransportProvider |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NullStateStoreMetrics.java | {
"start": 1015,
"end": 1747
} | class ____ extends StateStoreMetrics {
public void addRead(long latency) {}
public long getReadOps() {
return -1;
}
public double getReadAvg() {
return -1;
}
public void addWrite(long latency) {}
public long getWriteOps() {
return -1;
}
public double getWriteAvg() {
return -1;
}
public void addFailure(long latency) { }
public long getFailureOps() {
return -1;
}
public double getFailureAvg() {
return -1;
}
public void addRemove(long latency) {}
public long getRemoveOps() {
return -1;
}
public double getRemoveAvg() {
return -1;
}
public void setCacheSize(String name, int size) {}
public void reset() {}
public void shutdown() {}
}
| NullStateStoreMetrics |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/utils/ClassUtils.java | {
"start": 7448,
"end": 8113
} | class ____ resource loading (but not necessarily for
* <code>Class.forName</code>, which accepts a <code>null</code> ClassLoader
* reference as well).
*
* @return the default ClassLoader (never <code>null</code>)
* @see java.lang.Thread#getContextClassLoader()
*/
public static ClassLoader getClassLoader() {
return getClassLoader(ClassUtils.class);
}
/**
* Same as <code>Class.forName()</code>, except that it works for primitive
* types.
*/
public static Class<?> forName(String name) throws ClassNotFoundException {
return forName(name, getClassLoader());
}
/**
* find | path |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetadata.java | {
"start": 944,
"end": 3710
} | class ____ extends AbstractNamedDiffable<Metadata.ProjectCustom> implements Metadata.ProjectCustom {
public static final String TYPE = "watcher";
private final boolean manuallyStopped;
public WatcherMetadata(boolean manuallyStopped) {
this.manuallyStopped = manuallyStopped;
}
public boolean manuallyStopped() {
return manuallyStopped;
}
@Override
public String getWriteableName() {
return TYPE;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.minimumCompatible();
}
@Override
public EnumSet<Metadata.XContentContext> context() {
return EnumSet.of(Metadata.XContentContext.GATEWAY);
}
public WatcherMetadata(StreamInput streamInput) throws IOException {
this(streamInput.readBoolean());
}
public static NamedDiff<Metadata.ProjectCustom> readDiffFrom(StreamInput streamInput) throws IOException {
return readDiffFrom(Metadata.ProjectCustom.class, TYPE, streamInput);
}
@Override
public void writeTo(StreamOutput streamOutput) throws IOException {
streamOutput.writeBoolean(manuallyStopped);
}
@Override
public String toString() {
return "manuallyStopped[" + manuallyStopped + "]";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
WatcherMetadata action = (WatcherMetadata) o;
return manuallyStopped == action.manuallyStopped;
}
@Override
public int hashCode() {
return Objects.hash(manuallyStopped);
}
public static Metadata.ProjectCustom fromXContent(XContentParser parser) throws IOException {
XContentParser.Token token;
Boolean manuallyStopped = null;
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
switch (token) {
case FIELD_NAME:
currentFieldName = parser.currentName();
break;
case VALUE_BOOLEAN:
if (Field.MANUALLY_STOPPED.match(currentFieldName, parser.getDeprecationHandler())) {
manuallyStopped = parser.booleanValue();
}
break;
}
}
if (manuallyStopped != null) {
return new WatcherMetadata(manuallyStopped);
}
return null;
}
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return chunk((b, p) -> b.field(Field.MANUALLY_STOPPED.getPreferredName(), manuallyStopped));
}
| WatcherMetadata |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/aroundconstruct/AroundConstructAppliedViaConstructorTest.java | {
"start": 2369,
"end": 2562
} | class ____ {
@Inject
@MyTransactional
public SimpleBean_ConstructorWithInject(DummyObject h) {
}
}
@Singleton
static | SimpleBean_ConstructorWithInject |
java | apache__kafka | streams/src/main/java/org/apache/kafka/streams/state/internals/CompositeKeyValueIterator.java | {
"start": 1018,
"end": 2293
} | class ____<K, V, StoreType> implements KeyValueIterator<K, V> {
private final Iterator<StoreType> storeIterator;
private final NextIteratorFunction<K, V, StoreType> nextIteratorFunction;
private KeyValueIterator<K, V> current;
CompositeKeyValueIterator(final Iterator<StoreType> underlying,
final NextIteratorFunction<K, V, StoreType> nextIteratorFunction) {
this.storeIterator = underlying;
this.nextIteratorFunction = nextIteratorFunction;
}
@Override
public void close() {
if (current != null) {
current.close();
current = null;
}
}
@Override
public K peekNextKey() {
throw new UnsupportedOperationException("peekNextKey not supported");
}
@Override
public boolean hasNext() {
while ((current == null || !current.hasNext()) && storeIterator.hasNext()) {
close();
current = nextIteratorFunction.apply(storeIterator.next());
}
return current != null && current.hasNext();
}
@Override
public KeyValue<K, V> next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
return current.next();
}
}
| CompositeKeyValueIterator |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/inheritance/single/relation/PolymorphicRemovalTest.java | {
"start": 3886,
"end": 4669
} | class ____ {
@Id
@GeneratedValue
private Integer id;
@OneToMany(mappedBy = "type")
private Set<Employee> employees;
// used to expose the discriminator value for assertion checking
@Column(name = "TYPE", insertable = false, updatable = false, nullable = false, length = 31)
private String type;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public Set<Employee> getEmployees() {
return employees;
}
public void setEmployees(Set<Employee> employees) {
this.employees = employees;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
}
@Entity(name = "SalaryEmployee")
@DiscriminatorValue("SALARY")
@Audited
public static | EmployeeType |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/StreamToIterableTest.java | {
"start": 2408,
"end": 2860
} | class ____ {
void test(List<Integer> i) {
addAll(Stream.of(1, 2, 3)::iterator);
}
void addAll(Iterable<Integer> ints) {}
}
""")
.addOutputLines(
"Test.java",
"""
import static com.google.common.collect.ImmutableList.toImmutableList;
import java.util.List;
import java.util.stream.Stream;
| Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/cuk/CompositePropertyRefTest.java | {
"start": 1186,
"end": 4429
} | class ____ {
@Test
@SuppressWarnings({ "unchecked", "unused" })
public void testOneToOnePropertyRef(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
Person p = new Person();
p.setName( "Steve" );
p.setUserId( "steve" );
Address a = new Address();
a.setAddress( "Texas" );
a.setCountry( "USA" );
p.setAddress( a );
a.setPerson( p );
session.persist( p );
Person p2 = new Person();
p2.setName( "Max" );
p2.setUserId( "max" );
session.persist( p2 );
Account act = new Account();
act.setType( 'c' );
act.setUser( p2 );
p2.getAccounts().add( act );
session.persist( act );
session.flush();
session.clear();
p = session.get( Person.class, p.getId() ); //get address reference by outer join
p2 = session.get( Person.class, p2.getId() ); //get null address reference by outer join
assertNull( p2.getAddress() );
assertNotNull( p.getAddress() );
List l = session.createQuery( "from Person" ).list(); //pull address references for cache
assertEquals( 2, l.size() );
assertTrue( l.contains( p ) && l.contains( p2 ) );
session.clear();
l = session.createQuery( "from Person p order by p.name" )
.list(); //get address references by sequential selects
assertEquals( 2, l.size() );
assertNull( ( (Person) l.get( 0 ) ).getAddress() );
assertNotNull( ( (Person) l.get( 1 ) ).getAddress() );
session.clear();
l = session.createQuery( "from Person p left join fetch p.address a order by a.country" )
.list(); //get em by outer join
assertEquals( 2, l.size() );
if ( ( (Person) l.get( 0 ) ).getName().equals( "Max" ) ) {
assertNull( ( (Person) l.get( 0 ) ).getAddress() );
assertNotNull( ( (Person) l.get( 1 ) ).getAddress() );
}
else {
assertNull( ( (Person) l.get( 1 ) ).getAddress() );
assertNotNull( ( (Person) l.get( 0 ) ).getAddress() );
}
session.clear();
l = session.createQuery( "from Person p left join p.accounts", Person.class ).list();
for ( int i = 0; i < 2; i++ ) {
Person px = (Person) l.get( i );
Set accounts = px.getAccounts();
assertFalse( Hibernate.isInitialized( accounts ) );
// assertTrue( px.getAccounts().size()>0 || row[1]==null );
}
session.clear();
l = session.createQuery( "from Person p left join fetch p.accounts a order by p.name" ).list();
Person p0 = (Person) l.get( 0 );
assertTrue( Hibernate.isInitialized( p0.getAccounts() ) );
assertEquals( 1, p0.getAccounts().size() );
assertSame( ( (Account) p0.getAccounts().iterator().next() ).getUser(), p0 );
Person p1 = (Person) l.get( 1 );
assertTrue( Hibernate.isInitialized( p1.getAccounts() ) );
assertEquals( 0, p1.getAccounts().size() );
session.clear();
l = session.createQuery( "from Account a join fetch a.user" ).list();
session.clear();
l = session.createQuery( "from Person p left join fetch p.address" ).list();
session.clear();
}
);
}
@AfterEach
public void tearDown(SessionFactoryScope scope) {
scope.getSessionFactory().getSchemaManager().truncate();
}
}
| CompositePropertyRefTest |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/pool/DruidDataSourceTest8.java | {
"start": 330,
"end": 1326
} | class ____ extends TestCase {
private DruidDataSource dataSource;
protected void setUp() throws Exception {
dataSource = new DruidDataSource();
dataSource.setUrl("jdbc:mock:xxx");
dataSource.setInitialSize(1);
dataSource.getProxyFilters().add(new FilterAdapter() {
@Override
public ConnectionProxy connection_connect(FilterChain chain, Properties info) throws SQLException {
throw new Error();
}
});
}
protected void tearDown() throws Exception {
dataSource.close();
}
public void testInitError() throws Exception {
assertEquals(0, dataSource.getCreateErrorCount());
Throwable error = null;
try {
dataSource.init();
} catch (Throwable e) {
error = e;
}
assertNotNull(error);
assertTrue(dataSource.getCreateErrorCount() > 0);
dataSource.getCompositeData();
}
}
| DruidDataSourceTest8 |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_1772/Target.java | {
"start": 231,
"end": 495
} | class ____ {
private NestedTarget nestedTarget;
public NestedTarget getNestedTarget() {
return nestedTarget;
}
public void setNestedTarget(NestedTarget nestedTarget) {
this.nestedTarget = nestedTarget;
}
public static | Target |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/connector/source/lookup/LookupOptions.java | {
"start": 7840,
"end": 8005
} | enum ____ {
NONE,
PARTIAL,
FULL
}
/** Defines which {@link CacheReloadTrigger} to use. */
@PublicEvolving
public | LookupCacheType |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/onetoone/TrousersZip.java | {
"start": 303,
"end": 406
} | class ____ {
@Id
public Integer id;
@OneToOne(mappedBy = "zip")
public Trousers trousers;
}
| TrousersZip |
java | google__error-prone | check_api/src/main/java/com/google/errorprone/dataflow/nullnesspropagation/TrustingNullnessPropagation.java | {
"start": 2062,
"end": 2629
} | class ____ extends NullnessPropagationTransfer {
private static final long serialVersionUID = -3128676755493202966L;
TrustingNullnessPropagation() {
super(Nullness.NONNULL, TrustReturnAnnotation.INSTANCE);
}
@Override
Nullness fieldNullness(
@Nullable ClassAndField accessed,
@Nullable AccessPath path,
AccessPathValues<Nullness> store) {
if (accessed == null) {
return defaultAssumption; // optimistically assume non-null if we can't resolve
}
// TODO(kmb): Reverse subtyping between this | TrustingNullnessPropagation |
java | google__truth | extensions/proto/src/main/java/com/google/common/truth/extensions/proto/MapWithProtoValuesSubject.java | {
"start": 28166,
"end": 38239
} | class ____<M extends Message>
implements MapWithProtoValuesFluentAssertion<M> {
private final MapWithProtoValuesSubject<M> subject;
MapWithProtoValuesFluentAssertionImpl(MapWithProtoValuesSubject<M> subject) {
this.subject = subject;
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringFieldAbsenceForValues() {
return subject.ignoringFieldAbsenceForValues();
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringFieldAbsenceOfFieldsForValues(
int firstFieldNumber, int... rest) {
return subject.ignoringFieldAbsenceOfFieldsForValues(firstFieldNumber, rest);
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringFieldAbsenceOfFieldsForValues(
Iterable<Integer> fieldNumbers) {
return subject.ignoringFieldAbsenceOfFieldsForValues(fieldNumbers);
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringFieldAbsenceOfFieldDescriptorsForValues(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest) {
return subject.ignoringFieldAbsenceOfFieldDescriptorsForValues(firstFieldDescriptor, rest);
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringFieldAbsenceOfFieldDescriptorsForValues(
Iterable<FieldDescriptor> fieldDescriptors) {
return subject.ignoringFieldAbsenceOfFieldDescriptorsForValues(fieldDescriptors);
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringRepeatedFieldOrderForValues() {
return subject.ignoringRepeatedFieldOrderForValues();
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringRepeatedFieldOrderOfFieldsForValues(
int firstFieldNumber, int... rest) {
return subject.ignoringRepeatedFieldOrderOfFieldsForValues(firstFieldNumber, rest);
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringRepeatedFieldOrderOfFieldsForValues(
Iterable<Integer> fieldNumbers) {
return subject.ignoringRepeatedFieldOrderOfFieldsForValues(fieldNumbers);
}
@Override
public MapWithProtoValuesFluentAssertion<M>
ignoringRepeatedFieldOrderOfFieldDescriptorsForValues(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest) {
return subject.ignoringRepeatedFieldOrderOfFieldDescriptorsForValues(
firstFieldDescriptor, rest);
}
@Override
public MapWithProtoValuesFluentAssertion<M>
ignoringRepeatedFieldOrderOfFieldDescriptorsForValues(
Iterable<FieldDescriptor> fieldDescriptors) {
return subject.ignoringRepeatedFieldOrderOfFieldDescriptorsForValues(fieldDescriptors);
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringExtraRepeatedFieldElementsForValues() {
return subject.ignoringExtraRepeatedFieldElementsForValues();
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringExtraRepeatedFieldElementsOfFieldsForValues(
int firstFieldNumber, int... rest) {
return subject.ignoringExtraRepeatedFieldElementsOfFieldsForValues(firstFieldNumber, rest);
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringExtraRepeatedFieldElementsOfFieldsForValues(
Iterable<Integer> fieldNumbers) {
return subject.ignoringExtraRepeatedFieldElementsOfFieldsForValues(fieldNumbers);
}
@Override
public MapWithProtoValuesFluentAssertion<M>
ignoringExtraRepeatedFieldElementsOfFieldDescriptorsForValues(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest) {
return subject.ignoringExtraRepeatedFieldElementsOfFieldDescriptorsForValues(
firstFieldDescriptor, rest);
}
@Override
public MapWithProtoValuesFluentAssertion<M>
ignoringExtraRepeatedFieldElementsOfFieldDescriptorsForValues(
Iterable<FieldDescriptor> fieldDescriptors) {
return subject.ignoringExtraRepeatedFieldElementsOfFieldDescriptorsForValues(
fieldDescriptors);
}
@Override
public MapWithProtoValuesFluentAssertion<M> usingDoubleToleranceForValues(double tolerance) {
return subject.usingDoubleToleranceForValues(tolerance);
}
@Override
public MapWithProtoValuesFluentAssertion<M> usingDoubleToleranceForFieldsForValues(
double tolerance, int firstFieldNumber, int... rest) {
return subject.usingDoubleToleranceForFieldsForValues(tolerance, firstFieldNumber, rest);
}
@Override
public MapWithProtoValuesFluentAssertion<M> usingDoubleToleranceForFieldsForValues(
double tolerance, Iterable<Integer> fieldNumbers) {
return subject.usingDoubleToleranceForFieldsForValues(tolerance, fieldNumbers);
}
@Override
public MapWithProtoValuesFluentAssertion<M> usingDoubleToleranceForFieldDescriptorsForValues(
double tolerance, FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest) {
return subject.usingDoubleToleranceForFieldDescriptorsForValues(
tolerance, firstFieldDescriptor, rest);
}
@Override
public MapWithProtoValuesFluentAssertion<M> usingDoubleToleranceForFieldDescriptorsForValues(
double tolerance, Iterable<FieldDescriptor> fieldDescriptors) {
return subject.usingDoubleToleranceForFieldDescriptorsForValues(tolerance, fieldDescriptors);
}
@Override
public MapWithProtoValuesFluentAssertion<M> usingFloatToleranceForValues(float tolerance) {
return subject.usingFloatToleranceForValues(tolerance);
}
@Override
public MapWithProtoValuesFluentAssertion<M> usingFloatToleranceForFieldsForValues(
float tolerance, int firstFieldNumber, int... rest) {
return subject.usingFloatToleranceForFieldsForValues(tolerance, firstFieldNumber, rest);
}
@Override
public MapWithProtoValuesFluentAssertion<M> usingFloatToleranceForFieldsForValues(
float tolerance, Iterable<Integer> fieldNumbers) {
return subject.usingFloatToleranceForFieldsForValues(tolerance, fieldNumbers);
}
@Override
public MapWithProtoValuesFluentAssertion<M> usingFloatToleranceForFieldDescriptorsForValues(
float tolerance, FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest) {
return subject.usingFloatToleranceForFieldDescriptorsForValues(
tolerance, firstFieldDescriptor, rest);
}
@Override
public MapWithProtoValuesFluentAssertion<M> usingFloatToleranceForFieldDescriptorsForValues(
float tolerance, Iterable<FieldDescriptor> fieldDescriptors) {
return subject.usingFloatToleranceForFieldDescriptorsForValues(tolerance, fieldDescriptors);
}
@Override
public MapWithProtoValuesFluentAssertion<M> comparingExpectedFieldsOnlyForValues() {
return subject.comparingExpectedFieldsOnlyForValues();
}
@Override
public MapWithProtoValuesFluentAssertion<M> withPartialScopeForValues(FieldScope fieldScope) {
return subject.withPartialScopeForValues(fieldScope);
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringFieldsForValues(
int firstFieldNumber, int... rest) {
return subject.ignoringFieldsForValues(firstFieldNumber, rest);
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringFieldsForValues(
Iterable<Integer> fieldNumbers) {
return subject.ignoringFieldsForValues(fieldNumbers);
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringFieldDescriptorsForValues(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest) {
return subject.ignoringFieldDescriptorsForValues(firstFieldDescriptor, rest);
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringFieldDescriptorsForValues(
Iterable<FieldDescriptor> fieldDescriptors) {
return subject.ignoringFieldDescriptorsForValues(fieldDescriptors);
}
@Override
public MapWithProtoValuesFluentAssertion<M> ignoringFieldScopeForValues(FieldScope fieldScope) {
return subject.ignoringFieldScopeForValues(fieldScope);
}
@Override
public MapWithProtoValuesFluentAssertion<M> reportingMismatchesOnlyForValues() {
return subject.reportingMismatchesOnlyForValues();
}
@Override
public MapWithProtoValuesFluentAssertion<M> unpackingAnyUsingForValues(
TypeRegistry typeRegistry, ExtensionRegistry extensionRegistry) {
return subject.unpackingAnyUsingForValues(typeRegistry, extensionRegistry);
}
@Override
public void containsEntry(@Nullable Object expectedKey, @Nullable M expectedValue) {
subject
.usingCorrespondence(Arrays.asList(expectedValue))
.containsEntry(expectedKey, expectedValue);
}
@Override
public void doesNotContainEntry(@Nullable Object excludedKey, @Nullable M excludedValue) {
subject
.usingCorrespondence(Arrays.asList(excludedValue))
.doesNotContainEntry(excludedKey, excludedValue);
}
@Override
@CanIgnoreReturnValue
@SuppressWarnings("unchecked") // ClassCastException is fine
public Ordered containsExactly(@Nullable Object k0, @Nullable M v0, @Nullable Object... rest) {
List<M> expectedValues = new ArrayList<>();
expectedValues.add(v0);
for (int i = 1; i < rest.length; i += 2) {
expectedValues.add((M) rest[i]);
}
return subject.usingCorrespondence(expectedValues).containsExactly(k0, v0, rest);
}
@Override
@CanIgnoreReturnValue
public Ordered containsExactlyEntriesIn(Map<?, ? extends M> expectedMap) {
return subject
.usingCorrespondence(expectedMap.values())
.containsExactlyEntriesIn(expectedMap);
}
@SuppressWarnings("DoNotCall")
@Override
@Deprecated
public boolean equals(@Nullable Object o) {
return subject.equals(o);
}
@SuppressWarnings("DoNotCall")
@Override
@Deprecated
public int hashCode() {
return subject.hashCode();
}
}
}
| MapWithProtoValuesFluentAssertionImpl |
java | apache__camel | components/camel-ldap/src/test/java/org/apache/directory/server/core/integ5/DirectoryExtension.java | {
"start": 3998,
"end": 4549
} | class ____ then
DirectoryServiceFactory dsf = new DefaultDirectoryServiceFactory();
classDirectoryService = dsf.getDirectoryService();
// enable CL explicitly cause we are not using DSAnnotationProcessor
classDirectoryService.getChangeLog().setEnabled(true);
dsf.init("default" + UUID.randomUUID().toString());
// Load the schemas
DSAnnotationProcessor.loadSchemas(description, classDirectoryService);
}
// Apply the | DS |
java | elastic__elasticsearch | modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java | {
"start": 1696,
"end": 19780
} | class ____ extends ESTestCase {
// a temporary directory that mmdb files can be copied to and read from
private Path tmpDir;
@Before
public void setup() {
tmpDir = createTempDir();
}
@After
public void cleanup() throws IOException {
IOUtils.rm(tmpDir);
}
public void testMaxmindCity() throws Exception {
String ip = "2602:306:33d3:8000::3257:9652";
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE, // n.b. this is a "geoip" processor
randomAlphaOfLength(10),
null,
"source_field",
loader("GeoLite2-City.mmdb"),
() -> true,
"target_field",
getMaxmindCityLookup(),
false,
false,
"filename"
);
Map<String, Object> document = new HashMap<>();
document.put("source_field", ip);
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
processor.execute(ingestDocument);
assertThat(ingestDocument.getSourceAndMetadata().get("source_field"), equalTo(ip));
@SuppressWarnings("unchecked")
Map<String, Object> data = (Map<String, Object>) ingestDocument.getSourceAndMetadata().get("target_field");
assertThat(data, notNullValue());
assertThat(data.get("ip"), equalTo(ip));
assertThat(data.get("city_name"), equalTo("Homestead"));
// see MaxmindIpDataLookupsTests for more tests of the data lookup behavior
}
public void testIpinfoGeolocation() throws Exception {
String ip = "72.20.12.220";
GeoIpProcessor processor = new GeoIpProcessor(
IP_LOCATION_TYPE, // n.b. this is an "ip_location" processor
randomAlphaOfLength(10),
null,
"source_field",
loader("ipinfo/ip_geolocation_standard_sample.mmdb"),
() -> true,
"target_field",
getIpinfoGeolocationLookup(),
false,
false,
"filename"
);
Map<String, Object> document = new HashMap<>();
document.put("source_field", ip);
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
processor.execute(ingestDocument);
assertThat(ingestDocument.getSourceAndMetadata().get("source_field"), equalTo(ip));
@SuppressWarnings("unchecked")
Map<String, Object> data = (Map<String, Object>) ingestDocument.getSourceAndMetadata().get("target_field");
assertThat(data, notNullValue());
assertThat(data.get("ip"), equalTo(ip));
assertThat(data.get("city_name"), equalTo("Chicago"));
// see IpinfoIpDataLookupsTests for more tests of the data lookup behavior
}
public void testNullValueWithIgnoreMissing() throws Exception {
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE,
randomAlphaOfLength(10),
null,
"source_field",
loader("GeoLite2-City.mmdb"),
() -> true,
"target_field",
getMaxmindCityLookup(),
true,
false,
"filename"
);
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(
random(),
Collections.singletonMap("source_field", null)
);
IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
processor.execute(ingestDocument);
assertIngestDocument(originalIngestDocument, ingestDocument);
}
public void testNonExistentWithIgnoreMissing() throws Exception {
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE,
randomAlphaOfLength(10),
null,
"source_field",
loader("GeoLite2-City.mmdb"),
() -> true,
"target_field",
getMaxmindCityLookup(),
true,
false,
"filename"
);
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of());
IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
processor.execute(ingestDocument);
assertIngestDocument(originalIngestDocument, ingestDocument);
}
public void testNullWithoutIgnoreMissing() {
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE,
randomAlphaOfLength(10),
null,
"source_field",
loader("GeoLite2-City.mmdb"),
() -> true,
"target_field",
getMaxmindCityLookup(),
false,
false,
"filename"
);
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(
random(),
Collections.singletonMap("source_field", null)
);
IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
Exception exception = expectThrows(Exception.class, () -> processor.execute(ingestDocument));
assertThat(exception.getMessage(), equalTo("field [source_field] is null, cannot extract geoip information."));
}
public void testNonExistentWithoutIgnoreMissing() {
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE,
randomAlphaOfLength(10),
null,
"source_field",
loader("GeoLite2-City.mmdb"),
() -> true,
"target_field",
getMaxmindCityLookup(),
false,
false,
"filename"
);
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of());
IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
Exception exception = expectThrows(Exception.class, () -> processor.execute(ingestDocument));
assertThat(exception.getMessage(), equalTo("field [source_field] not present as part of path [source_field]"));
}
public void testAddressIsNotInTheDatabase() throws Exception {
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE,
randomAlphaOfLength(10),
null,
"source_field",
loader("GeoLite2-City.mmdb"),
() -> true,
"target_field",
getMaxmindCityLookup(),
false,
false,
"filename"
);
Map<String, Object> document = new HashMap<>();
document.put("source_field", "127.0.0.1");
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
processor.execute(ingestDocument);
assertThat(ingestDocument.getSourceAndMetadata().containsKey("target_field"), is(false));
}
/**
* Tests that an exception in the IpDataLookup is propagated out of the GeoIpProcessor's execute method
*/
public void testExceptionPropagates() {
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE,
randomAlphaOfLength(10),
null,
"source_field",
loader("GeoLite2-City.mmdb"),
() -> true,
"target_field",
getMaxmindCityLookup(),
false,
false,
"filename"
);
Map<String, Object> document = new HashMap<>();
document.put("source_field", "www.google.com");
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
Exception e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument));
assertThat(e.getMessage(), containsString("not an IP string literal"));
}
public void testListAllValid() throws Exception {
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE,
randomAlphaOfLength(10),
null,
"source_field",
loader("GeoLite2-City.mmdb"),
() -> true,
"target_field",
getMaxmindCityLookup(),
false,
false,
"filename"
);
Map<String, Object> document = new HashMap<>();
document.put("source_field", List.of("8.8.8.8", "82.171.64.0"));
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
processor.execute(ingestDocument);
@SuppressWarnings("unchecked")
List<Map<String, Object>> data = (List<Map<String, Object>>) ingestDocument.getSourceAndMetadata().get("target_field");
assertThat(data, notNullValue());
assertThat(data.size(), equalTo(2));
assertThat(data.get(0).get("location"), equalTo(Map.of("lat", 37.751d, "lon", -97.822d)));
assertThat(data.get(1).get("city_name"), equalTo("Hoensbroek"));
}
public void testListPartiallyValid() throws Exception {
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE,
randomAlphaOfLength(10),
null,
"source_field",
loader("GeoLite2-City.mmdb"),
() -> true,
"target_field",
getMaxmindCityLookup(),
false,
false,
"filename"
);
Map<String, Object> document = new HashMap<>();
document.put("source_field", List.of("8.8.8.8", "127.0.0.1"));
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
processor.execute(ingestDocument);
@SuppressWarnings("unchecked")
List<Map<String, Object>> data = (List<Map<String, Object>>) ingestDocument.getSourceAndMetadata().get("target_field");
assertThat(data, notNullValue());
assertThat(data.size(), equalTo(2));
assertThat(data.get(0).get("location"), equalTo(Map.of("lat", 37.751d, "lon", -97.822d)));
assertThat(data.get(1), nullValue());
}
public void testListNoMatches() throws Exception {
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE,
randomAlphaOfLength(10),
null,
"source_field",
loader("GeoLite2-City.mmdb"),
() -> true,
"target_field",
getMaxmindCityLookup(),
false,
false,
"filename"
);
Map<String, Object> document = new HashMap<>();
document.put("source_field", List.of("127.0.0.1", "127.0.0.1"));
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
processor.execute(ingestDocument);
assertFalse(ingestDocument.hasField("target_field"));
}
public void testListDatabaseReferenceCounting() throws Exception {
AtomicBoolean closeCheck = new AtomicBoolean(false);
var loader = loader("GeoLite2-City.mmdb", closeCheck);
GeoIpProcessor processor = new GeoIpProcessor(GEOIP_TYPE, randomAlphaOfLength(10), null, "source_field", () -> {
loader.preLookup();
return loader;
}, () -> true, "target_field", getMaxmindCityLookup(), false, false, "filename");
Map<String, Object> document = new HashMap<>();
document.put("source_field", List.of("8.8.8.8", "82.171.64.0"));
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
processor.execute(ingestDocument);
@SuppressWarnings("unchecked")
List<Map<String, Object>> data = (List<Map<String, Object>>) ingestDocument.getSourceAndMetadata().get("target_field");
assertThat(data, notNullValue());
assertThat(data.size(), equalTo(2));
assertThat(data.get(0).get("location"), equalTo(Map.of("lat", 37.751d, "lon", -97.822d)));
assertThat(data.get(1).get("city_name"), equalTo("Hoensbroek"));
// Check the loader's reference count and attempt to close
assertThat(loader.current(), equalTo(0));
loader.shutdown();
assertTrue(closeCheck.get());
}
public void testListFirstOnly() throws Exception {
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE,
randomAlphaOfLength(10),
null,
"source_field",
loader("GeoLite2-City.mmdb"),
() -> true,
"target_field",
getMaxmindCityLookup(),
false,
true,
"filename"
);
Map<String, Object> document = new HashMap<>();
document.put("source_field", List.of("8.8.8.8", "127.0.0.1"));
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
processor.execute(ingestDocument);
@SuppressWarnings("unchecked")
Map<String, Object> data = (Map<String, Object>) ingestDocument.getSourceAndMetadata().get("target_field");
assertThat(data, notNullValue());
assertThat(data.get("location"), equalTo(Map.of("lat", 37.751d, "lon", -97.822d)));
}
public void testListFirstOnlyNoMatches() throws Exception {
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE,
randomAlphaOfLength(10),
null,
"source_field",
loader("GeoLite2-City.mmdb"),
() -> true,
"target_field",
getMaxmindCityLookup(),
false,
true,
"filename"
);
Map<String, Object> document = new HashMap<>();
document.put("source_field", List.of("127.0.0.1", "127.0.0.2"));
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
processor.execute(ingestDocument);
assertThat(ingestDocument.getSourceAndMetadata().containsKey("target_field"), is(false));
}
public void testInvalidDatabase() throws Exception {
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE,
randomAlphaOfLength(10),
null,
"source_field",
loader("GeoLite2-City.mmdb"),
() -> false,
"target_field",
getMaxmindCityLookup(),
false,
true,
"filename"
);
Map<String, Object> document = new HashMap<>();
document.put("source_field", List.of("127.0.0.1", "127.0.0.2"));
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
processor.execute(ingestDocument);
assertThat(ingestDocument.getSourceAndMetadata().containsKey("target_field"), is(false));
assertThat(ingestDocument.getSourceAndMetadata(), hasEntry("tags", List.of("_geoip_expired_database")));
}
public void testNoDatabase() throws Exception {
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE,
randomAlphaOfLength(10),
null,
"source_field",
() -> null,
() -> true,
"target_field",
getMaxmindCityLookup(),
false,
false,
"GeoLite2-City"
);
Map<String, Object> document = new HashMap<>();
document.put("source_field", "8.8.8.8");
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
processor.execute(ingestDocument);
assertThat(ingestDocument.getSourceAndMetadata().containsKey("target_field"), is(false));
assertThat(ingestDocument.getSourceAndMetadata(), hasEntry("tags", List.of("_geoip_database_unavailable_GeoLite2-City")));
}
public void testNoDatabase_ignoreMissing() throws Exception {
GeoIpProcessor processor = new GeoIpProcessor(
GEOIP_TYPE,
randomAlphaOfLength(10),
null,
"source_field",
() -> null,
() -> true,
"target_field",
getMaxmindCityLookup(),
true,
false,
"GeoLite2-City"
);
Map<String, Object> document = new HashMap<>();
document.put("source_field", "8.8.8.8");
IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
IngestDocument ingestDocument = new IngestDocument(originalIngestDocument);
processor.execute(ingestDocument);
assertIngestDocument(originalIngestDocument, ingestDocument);
}
private static IpDataLookup getMaxmindCityLookup() {
final var database = Database.City;
return MaxmindIpDataLookups.getMaxmindLookup(database).apply(database.properties());
}
private static IpDataLookup getIpinfoGeolocationLookup() {
final var database = Database.CityV2;
return IpinfoIpDataLookups.getIpinfoLookup(database).apply(database.properties());
}
private CheckedSupplier<IpDatabase, IOException> loader(final String path) {
var loader = loader(path, null);
return () -> loader;
}
@FixForMultiProject(description = "Replace DEFAULT project")
private DatabaseReaderLazyLoader loader(final String databaseName, final AtomicBoolean closed) {
int last = databaseName.lastIndexOf("/");
final Path path = tmpDir.resolve(last == -1 ? databaseName : databaseName.substring(last + 1));
copyDatabase(databaseName, path);
final GeoIpCache cache = new GeoIpCache(1000);
return new DatabaseReaderLazyLoader(ProjectId.DEFAULT, cache, path, null) {
@Override
protected void doShutdown() throws IOException {
if (closed != null) {
closed.set(true);
}
super.doShutdown();
}
};
}
}
| GeoIpProcessorTests |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/deser/generic/GenericTest5.java | {
"start": 253,
"end": 955
} | class ____ extends TestCase {
public void test_generic() {
Pair<Long> p1 = new Pair<Long>();
p1.label = "p1";
p1.value = 3L;
String p1Json = JSON.toJSONString(p1);
JSON.parseObject(p1Json, LongPair.class);
JSON.parseObject(p1Json, StringPair.class);
JSONObject p1Jobj = JSON.parseObject(p1Json);
TypeReference<Pair<Long>> tr = new TypeReference<Pair<Long>>() {};
Pair<Long> p2 = null;
p2 = JSON.parseObject(p1Json, tr);
assertNotNull(p2); // 基于JSON字符串的转化正常
p2 = p1Jobj.toJavaObject(tr);
assertNotNull(p2);
assertEquals(Long.valueOf(3), p2.value);
}
public static | GenericTest5 |
java | apache__flink | flink-libraries/flink-cep/src/test/java/org/apache/flink/cep/operator/CEPMigrationTest.java | {
"start": 2840,
"end": 25666
} | class ____ implements MigrationTest {
private final FlinkVersion migrateVersion;
@Parameterized.Parameters(name = "Migration Savepoint: {0}")
public static Collection<FlinkVersion> parameters() {
return FlinkVersion.rangeOf(
FlinkVersion.v1_20, MigrationTest.getMostRecentlyPublishedVersion());
}
public CEPMigrationTest(FlinkVersion migrateVersion) {
this.migrateVersion = migrateVersion;
}
@SnapshotsGenerator
public void writeAfterBranchingPatternSnapshot(FlinkVersion flinkGenerateSavepointVersion)
throws Exception {
KeySelector<Event, Integer> keySelector =
new KeySelector<Event, Integer>() {
private static final long serialVersionUID = -4873366487571254798L;
@Override
public Integer getKey(Event value) throws Exception {
return value.getId();
}
};
final Event startEvent = new Event(42, "start", 1.0);
final SubEvent middleEvent1 = new SubEvent(42, "foo1", 1.0, 10.0);
final SubEvent middleEvent2 = new SubEvent(42, "foo2", 2.0, 10.0);
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
new KeyedOneInputStreamOperatorTestHarness<>(
CepOperatorTestUtilities.getKeyedCepOperator(false, new NFAFactory()),
keySelector,
BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup();
harness.open();
harness.processElement(new StreamRecord<Event>(startEvent, 1));
harness.processElement(new StreamRecord<Event>(new Event(42, "foobar", 1.0), 2));
harness.processElement(
new StreamRecord<Event>(new SubEvent(42, "barfoo", 1.0, 5.0), 3));
harness.processElement(new StreamRecord<Event>(middleEvent1, 2));
harness.processElement(new StreamRecord<Event>(middleEvent2, 3));
harness.processWatermark(new Watermark(5));
// do snapshot and save to file
OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
OperatorSnapshotUtil.writeStateHandle(
snapshot,
"src/test/resources/cep-migration-after-branching-flink"
+ flinkGenerateSavepointVersion
+ "-snapshot");
} finally {
harness.close();
}
}
@Test
public void testRestoreAfterBranchingPattern() throws Exception {
KeySelector<Event, Integer> keySelector =
new KeySelector<Event, Integer>() {
private static final long serialVersionUID = -4873366487571254798L;
@Override
public Integer getKey(Event value) throws Exception {
return value.getId();
}
};
final Event startEvent = new Event(42, "start", 1.0);
final SubEvent middleEvent1 = new SubEvent(42, "foo1", 1.0, 10.0);
final SubEvent middleEvent2 = new SubEvent(42, "foo2", 2.0, 10.0);
final Event endEvent = new Event(42, "end", 1.0);
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
new KeyedOneInputStreamOperatorTestHarness<>(
CepOperatorTestUtilities.getKeyedCepOperator(false, new NFAFactory()),
keySelector,
BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup();
harness.initializeState(
OperatorSnapshotUtil.getResourceFilename(
"cep-migration-after-branching-flink" + migrateVersion + "-snapshot"));
harness.open();
harness.processElement(new StreamRecord<>(new Event(42, "start", 1.0), 4));
harness.processElement(new StreamRecord<>(endEvent, 5));
harness.processWatermark(new Watermark(20));
ConcurrentLinkedQueue<Object> result = harness.getOutput();
// watermark and 2 results
assertEquals(3, result.size());
Object resultObject1 = result.poll();
assertTrue(resultObject1 instanceof StreamRecord);
StreamRecord<?> resultRecord1 = (StreamRecord<?>) resultObject1;
assertTrue(resultRecord1.getValue() instanceof Map);
Object resultObject2 = result.poll();
assertTrue(resultObject2 instanceof StreamRecord);
StreamRecord<?> resultRecord2 = (StreamRecord<?>) resultObject2;
assertTrue(resultRecord2.getValue() instanceof Map);
@SuppressWarnings("unchecked")
Map<String, List<Event>> patternMap1 =
(Map<String, List<Event>>) resultRecord1.getValue();
assertEquals(startEvent, patternMap1.get("start").get(0));
assertEquals(middleEvent1, patternMap1.get("middle").get(0));
assertEquals(endEvent, patternMap1.get("end").get(0));
@SuppressWarnings("unchecked")
Map<String, List<Event>> patternMap2 =
(Map<String, List<Event>>) resultRecord2.getValue();
assertEquals(startEvent, patternMap2.get("start").get(0));
assertEquals(middleEvent2, patternMap2.get("middle").get(0));
assertEquals(endEvent, patternMap2.get("end").get(0));
// and now go for a checkpoint with the new serializers
final Event startEvent1 = new Event(42, "start", 2.0);
final SubEvent middleEvent3 = new SubEvent(42, "foo", 1.0, 11.0);
final Event endEvent1 = new Event(42, "end", 2.0);
harness.processElement(new StreamRecord<Event>(startEvent1, 21));
harness.processElement(new StreamRecord<Event>(middleEvent3, 23));
// simulate snapshot/restore with some elements in internal sorting queue
OperatorSubtaskState snapshot = harness.snapshot(1L, 1L);
harness.close();
harness =
new KeyedOneInputStreamOperatorTestHarness<>(
CepOperatorTestUtilities.getKeyedCepOperator(false, new NFAFactory()),
keySelector,
BasicTypeInfo.INT_TYPE_INFO);
harness.setup();
harness.initializeState(snapshot);
harness.open();
harness.processElement(new StreamRecord<>(endEvent1, 25));
harness.processWatermark(new Watermark(50));
result = harness.getOutput();
// watermark and the result
assertEquals(2, result.size());
Object resultObject3 = result.poll();
assertTrue(resultObject3 instanceof StreamRecord);
StreamRecord<?> resultRecord3 = (StreamRecord<?>) resultObject3;
assertTrue(resultRecord3.getValue() instanceof Map);
@SuppressWarnings("unchecked")
Map<String, List<Event>> patternMap3 =
(Map<String, List<Event>>) resultRecord3.getValue();
assertEquals(startEvent1, patternMap3.get("start").get(0));
assertEquals(middleEvent3, patternMap3.get("middle").get(0));
assertEquals(endEvent1, patternMap3.get("end").get(0));
} finally {
harness.close();
}
}
@SnapshotsGenerator
public void writeStartingNewPatternAfterMigrationSnapshot(
FlinkVersion flinkGenerateSavepointVersion) throws Exception {
KeySelector<Event, Integer> keySelector =
new KeySelector<Event, Integer>() {
private static final long serialVersionUID = -4873366487571254798L;
@Override
public Integer getKey(Event value) throws Exception {
return value.getId();
}
};
final Event startEvent1 = new Event(42, "start", 1.0);
final SubEvent middleEvent1 = new SubEvent(42, "foo1", 1.0, 10.0);
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
new KeyedOneInputStreamOperatorTestHarness<>(
CepOperatorTestUtilities.getKeyedCepOperator(false, new NFAFactory()),
keySelector,
BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup();
harness.open();
harness.processElement(new StreamRecord<Event>(startEvent1, 1));
harness.processElement(new StreamRecord<Event>(new Event(42, "foobar", 1.0), 2));
harness.processElement(
new StreamRecord<Event>(new SubEvent(42, "barfoo", 1.0, 5.0), 3));
harness.processElement(new StreamRecord<Event>(middleEvent1, 2));
harness.processWatermark(new Watermark(5));
// do snapshot and save to file
OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
OperatorSnapshotUtil.writeStateHandle(
snapshot,
"src/test/resources/cep-migration-starting-new-pattern-flink"
+ flinkGenerateSavepointVersion
+ "-snapshot");
} finally {
harness.close();
}
}
@Test
public void testRestoreStartingNewPatternAfterMigration() throws Exception {
KeySelector<Event, Integer> keySelector =
new KeySelector<Event, Integer>() {
private static final long serialVersionUID = -4873366487571254798L;
@Override
public Integer getKey(Event value) throws Exception {
return value.getId();
}
};
final Event startEvent1 = new Event(42, "start", 1.0);
final SubEvent middleEvent1 = new SubEvent(42, "foo1", 1.0, 10.0);
final Event startEvent2 = new Event(42, "start", 5.0);
final SubEvent middleEvent2 = new SubEvent(42, "foo2", 2.0, 10.0);
final Event endEvent = new Event(42, "end", 1.0);
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
new KeyedOneInputStreamOperatorTestHarness<>(
CepOperatorTestUtilities.getKeyedCepOperator(false, new NFAFactory()),
keySelector,
BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup();
harness.initializeState(
OperatorSnapshotUtil.getResourceFilename(
"cep-migration-starting-new-pattern-flink"
+ migrateVersion
+ "-snapshot"));
harness.open();
harness.processElement(new StreamRecord<>(startEvent2, 5));
harness.processElement(new StreamRecord<Event>(middleEvent2, 6));
harness.processElement(new StreamRecord<>(endEvent, 7));
harness.processWatermark(new Watermark(20));
ConcurrentLinkedQueue<Object> result = harness.getOutput();
// watermark and 3 results
assertEquals(4, result.size());
Object resultObject1 = result.poll();
assertTrue(resultObject1 instanceof StreamRecord);
StreamRecord<?> resultRecord1 = (StreamRecord<?>) resultObject1;
assertTrue(resultRecord1.getValue() instanceof Map);
Object resultObject2 = result.poll();
assertTrue(resultObject2 instanceof StreamRecord);
StreamRecord<?> resultRecord2 = (StreamRecord<?>) resultObject2;
assertTrue(resultRecord2.getValue() instanceof Map);
Object resultObject3 = result.poll();
assertTrue(resultObject3 instanceof StreamRecord);
StreamRecord<?> resultRecord3 = (StreamRecord<?>) resultObject3;
assertTrue(resultRecord3.getValue() instanceof Map);
@SuppressWarnings("unchecked")
Map<String, List<Event>> patternMap1 =
(Map<String, List<Event>>) resultRecord1.getValue();
assertEquals(startEvent1, patternMap1.get("start").get(0));
assertEquals(middleEvent1, patternMap1.get("middle").get(0));
assertEquals(endEvent, patternMap1.get("end").get(0));
@SuppressWarnings("unchecked")
Map<String, List<Event>> patternMap2 =
(Map<String, List<Event>>) resultRecord2.getValue();
assertEquals(startEvent1, patternMap2.get("start").get(0));
assertEquals(middleEvent2, patternMap2.get("middle").get(0));
assertEquals(endEvent, patternMap2.get("end").get(0));
@SuppressWarnings("unchecked")
Map<String, List<Event>> patternMap3 =
(Map<String, List<Event>>) resultRecord3.getValue();
assertEquals(startEvent2, patternMap3.get("start").get(0));
assertEquals(middleEvent2, patternMap3.get("middle").get(0));
assertEquals(endEvent, patternMap3.get("end").get(0));
// and now go for a checkpoint with the new serializers
final Event startEvent3 = new Event(42, "start", 2.0);
final SubEvent middleEvent3 = new SubEvent(42, "foo", 1.0, 11.0);
final Event endEvent1 = new Event(42, "end", 2.0);
harness.processElement(new StreamRecord<Event>(startEvent3, 21));
harness.processElement(new StreamRecord<Event>(middleEvent3, 23));
// simulate snapshot/restore with some elements in internal sorting queue
OperatorSubtaskState snapshot = harness.snapshot(1L, 1L);
harness.close();
harness =
new KeyedOneInputStreamOperatorTestHarness<>(
CepOperatorTestUtilities.getKeyedCepOperator(false, new NFAFactory()),
keySelector,
BasicTypeInfo.INT_TYPE_INFO);
harness.setup();
harness.initializeState(snapshot);
harness.open();
harness.processElement(new StreamRecord<>(endEvent1, 25));
harness.processWatermark(new Watermark(50));
result = harness.getOutput();
// watermark and the result
assertEquals(2, result.size());
Object resultObject4 = result.poll();
assertTrue(resultObject4 instanceof StreamRecord);
StreamRecord<?> resultRecord4 = (StreamRecord<?>) resultObject4;
assertTrue(resultRecord4.getValue() instanceof Map);
@SuppressWarnings("unchecked")
Map<String, List<Event>> patternMap4 =
(Map<String, List<Event>>) resultRecord4.getValue();
assertEquals(startEvent3, patternMap4.get("start").get(0));
assertEquals(middleEvent3, patternMap4.get("middle").get(0));
assertEquals(endEvent1, patternMap4.get("end").get(0));
} finally {
harness.close();
}
}
@SnapshotsGenerator
public void writeSinglePatternAfterMigrationSnapshot(FlinkVersion flinkGenerateSavepointVersion)
throws Exception {
KeySelector<Event, Integer> keySelector =
new KeySelector<Event, Integer>() {
private static final long serialVersionUID = -4873366487571254798L;
@Override
public Integer getKey(Event value) throws Exception {
return value.getId();
}
};
final Event startEvent1 = new Event(42, "start", 1.0);
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
new KeyedOneInputStreamOperatorTestHarness<>(
CepOperatorTestUtilities.getKeyedCepOperator(
false, new SinglePatternNFAFactory()),
keySelector,
BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup();
harness.open();
harness.processWatermark(new Watermark(5));
// do snapshot and save to file
OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
OperatorSnapshotUtil.writeStateHandle(
snapshot,
"src/test/resources/cep-migration-single-pattern-afterwards-flink"
+ flinkGenerateSavepointVersion
+ "-snapshot");
} finally {
harness.close();
}
}
@Test
public void testSinglePatternAfterMigration() throws Exception {
KeySelector<Event, Integer> keySelector =
new KeySelector<Event, Integer>() {
private static final long serialVersionUID = -4873366487571254798L;
@Override
public Integer getKey(Event value) throws Exception {
return value.getId();
}
};
final Event startEvent1 = new Event(42, "start", 1.0);
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
new KeyedOneInputStreamOperatorTestHarness<>(
CepOperatorTestUtilities.getKeyedCepOperator(
false, new SinglePatternNFAFactory()),
keySelector,
BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup();
harness.initializeState(
OperatorSnapshotUtil.getResourceFilename(
"cep-migration-single-pattern-afterwards-flink"
+ migrateVersion
+ "-snapshot"));
harness.open();
harness.processElement(new StreamRecord<>(startEvent1, 5));
harness.processWatermark(new Watermark(20));
ConcurrentLinkedQueue<Object> result = harness.getOutput();
// watermark and the result
assertEquals(2, result.size());
Object resultObject = result.poll();
assertTrue(resultObject instanceof StreamRecord);
StreamRecord<?> resultRecord = (StreamRecord<?>) resultObject;
assertTrue(resultRecord.getValue() instanceof Map);
@SuppressWarnings("unchecked")
Map<String, List<Event>> patternMap =
(Map<String, List<Event>>) resultRecord.getValue();
assertEquals(startEvent1, patternMap.get("start").get(0));
} finally {
harness.close();
}
}
@SnapshotsGenerator
public void writeAndOrSubtypConditionsPatternAfterMigrationSnapshot(
FlinkVersion flinkGenerateSavepointVersion) throws Exception {
KeySelector<Event, Integer> keySelector =
new KeySelector<Event, Integer>() {
private static final long serialVersionUID = -4873366487571254798L;
@Override
public Integer getKey(Event value) throws Exception {
return value.getId();
}
};
final Event startEvent1 = new SubEvent(42, "start", 1.0, 6.0);
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
new KeyedOneInputStreamOperatorTestHarness<>(
CepOperatorTestUtilities.getKeyedCepOperator(
false, new NFAComplexConditionsFactory()),
keySelector,
BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup();
harness.open();
harness.processElement(new StreamRecord<>(startEvent1, 5));
harness.processWatermark(new Watermark(6));
// do snapshot and save to file
OperatorSubtaskState snapshot = harness.snapshot(0L, 0L);
OperatorSnapshotUtil.writeStateHandle(
snapshot,
"src/test/resources/cep-migration-conditions-flink"
+ flinkGenerateSavepointVersion
+ "-snapshot");
} finally {
harness.close();
}
}
@Test
public void testAndOrSubtypeConditionsAfterMigration() throws Exception {
KeySelector<Event, Integer> keySelector =
new KeySelector<Event, Integer>() {
private static final long serialVersionUID = -4873366487571254798L;
@Override
public Integer getKey(Event value) throws Exception {
return value.getId();
}
};
final Event startEvent1 = new SubEvent(42, "start", 1.0, 6.0);
OneInputStreamOperatorTestHarness<Event, Map<String, List<Event>>> harness =
new KeyedOneInputStreamOperatorTestHarness<>(
CepOperatorTestUtilities.getKeyedCepOperator(
false, new NFAComplexConditionsFactory()),
keySelector,
BasicTypeInfo.INT_TYPE_INFO);
try {
harness.setup();
harness.initializeState(
OperatorSnapshotUtil.getResourceFilename(
"cep-migration-conditions-flink" + migrateVersion + "-snapshot"));
harness.open();
final Event endEvent = new SubEvent(42, "end", 1.0, 2.0);
harness.processElement(new StreamRecord<>(endEvent, 9));
harness.processWatermark(new Watermark(20));
ConcurrentLinkedQueue<Object> result = harness.getOutput();
// watermark and the result
assertEquals(2, result.size());
Object resultObject = result.poll();
assertTrue(resultObject instanceof StreamRecord);
StreamRecord<?> resultRecord = (StreamRecord<?>) resultObject;
assertTrue(resultRecord.getValue() instanceof Map);
@SuppressWarnings("unchecked")
Map<String, List<Event>> patternMap =
(Map<String, List<Event>>) resultRecord.getValue();
assertEquals(startEvent1, patternMap.get("start").get(0));
assertEquals(endEvent, patternMap.get("start").get(1));
} finally {
harness.close();
}
}
private static | CEPMigrationTest |
java | ReactiveX__RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableBufferExactBoundary.java | {
"start": 5324,
"end": 5908
} | class ____<T, U extends Collection<? super T>, B>
extends DisposableObserver<B> {
final BufferExactBoundaryObserver<T, U, B> parent;
BufferBoundaryObserver(BufferExactBoundaryObserver<T, U, B> parent) {
this.parent = parent;
}
@Override
public void onNext(B t) {
parent.next();
}
@Override
public void onError(Throwable t) {
parent.onError(t);
}
@Override
public void onComplete() {
parent.onComplete();
}
}
}
| BufferBoundaryObserver |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/list/ListAssert_usingComparator_Test.java | {
"start": 1143,
"end": 1546
} | class ____ extends ListAssertBaseTest {
private Comparator<List<? extends String>> comparator = alwaysEqual();
@Override
protected ListAssert<String> invoke_api_method() {
return assertions.usingComparator(comparator);
}
@Override
protected void verify_internal_effects() {
assertThat(getObjects(assertions).getComparator()).isSameAs(comparator);
}
}
| ListAssert_usingComparator_Test |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/EqualTest_number.java | {
"start": 248,
"end": 1232
} | class ____ extends TestCase {
public void test_exits() throws Exception {
String sql = "3.5";
String sql_c = "3.51";
SQLNumberExpr exprA, exprB, exprC;
{
OracleExprParser parser = new OracleExprParser(sql);
exprA = (SQLNumberExpr) parser.expr();
}
{
OracleExprParser parser = new OracleExprParser(sql);
exprB = (SQLNumberExpr) parser.expr();
}
{
OracleExprParser parser = new OracleExprParser(sql_c);
exprC = (SQLNumberExpr) parser.expr();
}
assertEquals(exprA, exprB);
assertNotEquals(exprA, exprC);
assertTrue(exprA.equals(exprA));
assertFalse(exprA.equals(new Object()));
assertEquals(exprA.hashCode(), exprB.hashCode());
assertEquals(new SQLNumberExpr(), new SQLNumberExpr());
assertEquals(new SQLNumberExpr().hashCode(), new SQLNumberExpr().hashCode());
}
}
| EqualTest_number |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/ExtendedLogMetaRequest.java | {
"start": 1175,
"end": 2397
} | class ____ {
private final String user;
private final String appId;
private final String containerId;
private final MatchExpression nodeId;
private final MatchExpression fileName;
private final ComparisonCollection fileSize;
private final ComparisonCollection modificationTime;
public ExtendedLogMetaRequest(
String user, String appId, String containerId, MatchExpression nodeId,
MatchExpression fileName, ComparisonCollection fileSize,
ComparisonCollection modificationTime) {
this.user = user;
this.appId = appId;
this.containerId = containerId;
this.nodeId = nodeId;
this.fileName = fileName;
this.fileSize = fileSize;
this.modificationTime = modificationTime;
}
public String getUser() {
return user;
}
public String getAppId() {
return appId;
}
public String getContainerId() {
return containerId;
}
public MatchExpression getNodeId() {
return nodeId;
}
public MatchExpression getFileName() {
return fileName;
}
public ComparisonCollection getFileSize() {
return fileSize;
}
public ComparisonCollection getModificationTime() {
return modificationTime;
}
public static | ExtendedLogMetaRequest |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/streaming/util/OperatorSnapshotUtil.java | {
"start": 1630,
"end": 9247
} | class ____ {
public static String getResourceFilename(String filename) {
ClassLoader cl = OperatorSnapshotUtil.class.getClassLoader();
URL resource = cl.getResource(filename);
return resource.getFile();
}
public static void writeStateHandle(OperatorSubtaskState state, String path)
throws IOException {
FileOutputStream out = new FileOutputStream(path);
try (DataOutputStream dos = new DataOutputStream(out)) {
// required for backwards compatibility.
dos.writeInt(MetadataV3Serializer.VERSION);
// still required for compatibility
MetadataV3Serializer.serializeStreamStateHandle(null, dos);
Collection<OperatorStateHandle> rawOperatorState = state.getRawOperatorState();
if (rawOperatorState != null) {
dos.writeInt(rawOperatorState.size());
for (OperatorStateHandle operatorStateHandle : rawOperatorState) {
MetadataV3Serializer.INSTANCE.serializeOperatorStateHandleUtil(
operatorStateHandle, dos);
}
} else {
// this means no states, not even an empty list
dos.writeInt(-1);
}
Collection<OperatorStateHandle> managedOperatorState = state.getManagedOperatorState();
if (managedOperatorState != null) {
dos.writeInt(managedOperatorState.size());
for (OperatorStateHandle operatorStateHandle : managedOperatorState) {
MetadataV3Serializer.INSTANCE.serializeOperatorStateHandleUtil(
operatorStateHandle, dos);
}
} else {
// this means no states, not even an empty list
dos.writeInt(-1);
}
Collection<KeyedStateHandle> rawKeyedState = state.getRawKeyedState();
if (rawKeyedState != null) {
dos.writeInt(rawKeyedState.size());
for (KeyedStateHandle keyedStateHandle : rawKeyedState) {
MetadataV3Serializer.INSTANCE.serializeKeyedStateHandleUtil(
keyedStateHandle, dos);
}
} else {
// this means no operator states, not even an empty list
dos.writeInt(-1);
}
Collection<KeyedStateHandle> managedKeyedState = state.getManagedKeyedState();
if (managedKeyedState != null) {
dos.writeInt(managedKeyedState.size());
for (KeyedStateHandle keyedStateHandle : managedKeyedState) {
MetadataV3Serializer.INSTANCE.serializeKeyedStateHandleUtil(
keyedStateHandle, dos);
}
} else {
// this means no operator states, not even an empty list
dos.writeInt(-1);
}
Collection<InputStateHandle> inputChannelStateHandles = state.getInputChannelState();
dos.writeInt(inputChannelStateHandles.size());
for (InputStateHandle inputChannelStateHandle : inputChannelStateHandles) {
MetadataV3Serializer.INSTANCE.serializeInputStateHandle(
inputChannelStateHandle, dos);
}
Collection<OutputStateHandle> resultSubpartitionStateHandles =
state.getResultSubpartitionState();
dos.writeInt(inputChannelStateHandles.size());
for (OutputStateHandle resultSubpartitionStateHandle : resultSubpartitionStateHandles) {
MetadataV3Serializer.INSTANCE.serializeOutputStateHandle(
resultSubpartitionStateHandle, dos);
}
dos.flush();
}
}
public static OperatorSubtaskState readStateHandle(String path)
throws IOException, ClassNotFoundException {
FileInputStream in = new FileInputStream(path);
try (DataInputStream dis = new DataInputStream(in)) {
// required for backwards compatibility.
final int v = dis.readInt();
// still required for compatibility to consume the bytes.
MetadataV3Serializer.deserializeStreamStateHandle(dis);
List<OperatorStateHandle> rawOperatorState = null;
int numRawOperatorStates = dis.readInt();
if (numRawOperatorStates >= 0) {
rawOperatorState = new ArrayList<>();
for (int i = 0; i < numRawOperatorStates; i++) {
OperatorStateHandle operatorState =
MetadataV3Serializer.INSTANCE.deserializeOperatorStateHandleUtil(dis);
rawOperatorState.add(operatorState);
}
}
List<OperatorStateHandle> managedOperatorState = null;
int numManagedOperatorStates = dis.readInt();
if (numManagedOperatorStates >= 0) {
managedOperatorState = new ArrayList<>();
for (int i = 0; i < numManagedOperatorStates; i++) {
OperatorStateHandle operatorState =
MetadataV3Serializer.INSTANCE.deserializeOperatorStateHandleUtil(dis);
managedOperatorState.add(operatorState);
}
}
List<KeyedStateHandle> rawKeyedState = null;
int numRawKeyedStates = dis.readInt();
if (numRawKeyedStates >= 0) {
rawKeyedState = new ArrayList<>();
for (int i = 0; i < numRawKeyedStates; i++) {
KeyedStateHandle keyedState =
MetadataV3Serializer.INSTANCE.deserializeKeyedStateHandleUtil(dis);
rawKeyedState.add(keyedState);
}
}
List<KeyedStateHandle> managedKeyedState = null;
int numManagedKeyedStates = dis.readInt();
if (numManagedKeyedStates >= 0) {
managedKeyedState = new ArrayList<>();
for (int i = 0; i < numManagedKeyedStates; i++) {
KeyedStateHandle keyedState =
MetadataV3Serializer.INSTANCE.deserializeKeyedStateHandleUtil(dis);
managedKeyedState.add(keyedState);
}
}
final StateObjectCollection<InputStateHandle> inputChannelStateHandles =
v == MetadataV3Serializer.VERSION
? MetadataV3Serializer.INSTANCE.deserializeInputStateHandle(dis)
: StateObjectCollection.empty();
final StateObjectCollection<OutputStateHandle> resultSubpartitionStateHandles =
v == MetadataV3Serializer.VERSION
? MetadataV3Serializer.INSTANCE.deserializeOutputStateHandle(dis)
: StateObjectCollection.empty();
return OperatorSubtaskState.builder()
.setManagedOperatorState(new StateObjectCollection<>(managedOperatorState))
.setRawOperatorState(new StateObjectCollection<>(rawOperatorState))
.setManagedKeyedState(new StateObjectCollection<>(managedKeyedState))
.setRawKeyedState(new StateObjectCollection<>(rawKeyedState))
.setInputChannelState(inputChannelStateHandles)
.setResultSubpartitionState(resultSubpartitionStateHandles)
.build();
}
}
}
| OperatorSnapshotUtil |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/bug/Bug_for_issue_807.java | {
"start": 221,
"end": 1344
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
String text = "{\"ckid\":\"81a5953835310708e414057adb45e826\",\"rcToken\":\"E+jkQCWSwop+JICPBHc+fxMYeExTx2NTDGZCJ8gIPg7NbMLNvfmZBPU2dR5uxpRRe+zPnOIaCATpHcSa6q+k39HGjNFFDRt9PNlEJokpxhTw9gYJ/WKoSlVR/4ibjIgjvVHxS2lNLS4=\",\"userInfo\":{\"openid\":\"oEH-vt-7mGHOQets-XbE1c3DKpVc\",\"nickname\":\"Pietro\",\"sex\":1,\"language\":\"zh_CN\",\"city\":\"\",\"province\":\"Beijing\",\"country\":\"CN\",\"headimgurl\":\"http://wx.qlogo.cn/mmopen/kox8ma2sryApONj7kInbic4iaCZD8tXL4sqe7k3wROLpb2uCZhOiceAbL69ANeXSMu9zf7hibmt3Y0Ed4A6zIt9ibnPaiciauLZn57c/0\",\"privilege\":[],\"unionid\":\"oq9QRtyW-kb6R_7289hIycrOfnyc\"},\"isNewUser\":false}";
Root root = JSON.parseObject(text, Root.class);
assertEquals("oq9QRtyW-kb6R_7289hIycrOfnyc", root.userInfo.unionId);
JSONObject jsonObject = JSON.parseObject(text);
WechatUserInfo wechatUserInfo = jsonObject.getObject("userInfo", WechatUserInfo.class);
assertEquals("oq9QRtyW-kb6R_7289hIycrOfnyc", wechatUserInfo.unionId);
}
public static | Bug_for_issue_807 |
java | quarkusio__quarkus | extensions/micrometer/deployment/src/main/java/io/quarkus/micrometer/deployment/export/PrometheusRegistryProcessor.java | {
"start": 2083,
"end": 2452
} | class ____ implements BooleanSupplier {
MicrometerConfig mConfig;
public boolean getAsBoolean() {
return (REGISTRY_CLASS != null) && QuarkusClassLoader.isClassPresentAtRuntime(REGISTRY_CLASS_NAME)
&& mConfig.checkRegistryEnabledWithDefault(mConfig.export().prometheus());
}
}
public static | PrometheusEnabled |
java | quarkusio__quarkus | independent-projects/resteasy-reactive/server/processor/src/main/java/org/jboss/resteasy/reactive/server/processor/generation/multipart/GeneratedHandlerMultipartReturnTypeIndexerExtension.java | {
"start": 504,
"end": 1279
} | class ____
implements EndpointIndexer.MultipartReturnTypeIndexerExtension {
private final Map<String, Boolean> multipartOutputGeneratedPopulators = new HashMap<>();
final ClassOutput classOutput;
public GeneratedHandlerMultipartReturnTypeIndexerExtension(ClassOutput classOutput) {
this.classOutput = classOutput;
}
@Override
public boolean handleMultipartForReturnType(AdditionalWriters additionalWriters, ClassInfo multipartClassInfo,
IndexView index) {
String className = multipartClassInfo.name().toString();
Boolean canHandle = multipartOutputGeneratedPopulators.get(className);
if (canHandle != null) {
// we've already seen this | GeneratedHandlerMultipartReturnTypeIndexerExtension |
java | elastic__elasticsearch | x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/ValuesBooleanAggregatorFunctionSupplier.java | {
"start": 652,
"end": 1635
} | class ____ implements AggregatorFunctionSupplier {
public ValuesBooleanAggregatorFunctionSupplier() {
}
@Override
public List<IntermediateStateDesc> nonGroupingIntermediateStateDesc() {
return ValuesBooleanAggregatorFunction.intermediateStateDesc();
}
@Override
public List<IntermediateStateDesc> groupingIntermediateStateDesc() {
return ValuesBooleanGroupingAggregatorFunction.intermediateStateDesc();
}
@Override
public ValuesBooleanAggregatorFunction aggregator(DriverContext driverContext,
List<Integer> channels) {
return ValuesBooleanAggregatorFunction.create(driverContext, channels);
}
@Override
public ValuesBooleanGroupingAggregatorFunction groupingAggregator(DriverContext driverContext,
List<Integer> channels) {
return ValuesBooleanGroupingAggregatorFunction.create(channels, driverContext);
}
@Override
public String describe() {
return "values of booleans";
}
}
| ValuesBooleanAggregatorFunctionSupplier |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/parser/number/NumberValueTest_error_13.java | {
"start": 12653,
"end": 12778
} | class ____ {
public BigDecimal val;
public M1(BigDecimal val) {
this.val = val;
}
}
}
| M1 |
java | apache__dubbo | dubbo-cluster/src/main/java/org/apache/dubbo/rpc/cluster/router/mesh/rule/destination/TcpKeepalive.java | {
"start": 877,
"end": 972
} | class ____ {
private int probes;
private int time;
private int interval;
}
| TcpKeepalive |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java | {
"start": 2699,
"end": 2908
} | class ____ the record.
*/
public static <T extends BaseRecord>
Class<? extends BaseRecord> getRecordClass(final T record) {
return getRecordClass(record.getClass());
}
/**
* Get the base | for |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponseTests.java | {
"start": 1048,
"end": 4811
} | class ____ extends AbstractWireSerializingTestCase<IndicesAliasesResponse> {
public void testMixedModeSerialization() throws IOException {
// AcknowledgedResponse to IndicesAliasesResponse
// in version before TransportVersions.ALIAS_ACTION_RESULTS
{
var ack = AcknowledgedResponse.of(randomBoolean());
try (BytesStreamOutput output = new BytesStreamOutput()) {
ack.writeTo(output);
try (StreamInput in = output.bytes().streamInput()) {
in.setTransportVersion(TransportVersions.V_8_12_0);
var indicesAliasesResponse = new IndicesAliasesResponse(in);
assertEquals(ack.isAcknowledged(), indicesAliasesResponse.isAcknowledged());
assertTrue(indicesAliasesResponse.getActionResults().isEmpty());
assertFalse(indicesAliasesResponse.hasErrors());
}
}
}
// IndicesAliasesResponse to AcknowledgedResponse
// out version before TransportVersions.ALIAS_ACTION_RESULTS
{
var indicesAliasesResponse = randomIndicesAliasesResponse();
try (BytesStreamOutput output = new BytesStreamOutput()) {
output.setTransportVersion(TransportVersions.V_8_12_0);
indicesAliasesResponse.writeTo(output);
try (StreamInput in = output.bytes().streamInput()) {
var ack = AcknowledgedResponse.readFrom(in);
assertEquals(ack.isAcknowledged(), indicesAliasesResponse.isAcknowledged());
}
}
}
}
@Override
protected Writeable.Reader<IndicesAliasesResponse> instanceReader() {
return IndicesAliasesResponse::new;
}
@Override
protected IndicesAliasesResponse createTestInstance() {
return randomIndicesAliasesResponse();
}
private static IndicesAliasesResponse randomIndicesAliasesResponse() {
int numActions = between(0, 5);
List<IndicesAliasesResponse.AliasActionResult> results = new ArrayList<>();
for (int i = 0; i < numActions; ++i) {
results.add(randomIndicesAliasesResult());
}
return new IndicesAliasesResponse(randomBoolean(), randomBoolean(), results);
}
@Override
protected IndicesAliasesResponse mutateInstance(IndicesAliasesResponse instance) throws IOException {
switch (between(0, 2)) {
case 0: {
boolean acknowledged = instance.isAcknowledged() == false;
return new IndicesAliasesResponse(acknowledged, instance.hasErrors(), instance.getActionResults());
}
case 1: {
boolean errors = instance.hasErrors() == false;
return new IndicesAliasesResponse(instance.isAcknowledged(), errors, instance.getActionResults());
}
default: {
var results = new ArrayList<>(instance.getActionResults());
if (results.isEmpty()) {
results.add(randomIndicesAliasesResult());
} else {
results.remove(between(0, results.size() - 1));
}
return new IndicesAliasesResponse(instance.isAcknowledged(), instance.hasErrors(), results);
}
}
}
private static IndicesAliasesResponse.AliasActionResult randomIndicesAliasesResult() {
var action = RandomAliasActionsGenerator.randomAliasAction();
var indices = Arrays.asList(generateRandomStringArray(10, 5, false, false));
return IndicesAliasesResponse.AliasActionResult.build(indices, action, randomIntBetween(0, 3));
}
}
| IndicesAliasesResponseTests |
java | apache__flink | flink-core/src/test/java/org/apache/flink/api/common/typeutils/base/MapSerializerUpgradeTest.java | {
"start": 1415,
"end": 2377
} | class ____
extends TypeSerializerUpgradeTestBase<Map<Integer, String>, Map<Integer, String>> {
private static final String SPEC_NAME = "map-serializer";
public Collection<TestSpecification<?, ?>> createTestSpecifications(FlinkVersion flinkVersion)
throws Exception {
ArrayList<TestSpecification<?, ?>> testSpecifications = new ArrayList<>();
testSpecifications.add(
new TestSpecification<>(
SPEC_NAME,
flinkVersion,
MapSerializerSetup.class,
MapSerializerVerifier.class));
return testSpecifications;
}
// ----------------------------------------------------------------------------------------------
// Specification for "map-serializer"
// ----------------------------------------------------------------------------------------------
/**
* This | MapSerializerUpgradeTest |
java | apache__flink | flink-test-utils-parent/flink-test-utils-junit/src/main/java/org/apache/flink/testutils/executor/TestExecutorExtension.java | {
"start": 1195,
"end": 1977
} | class ____<T extends ExecutorService>
implements BeforeAllCallback, AfterAllCallback {
private final Supplier<T> serviceFactory;
private T executorService;
public TestExecutorExtension(Supplier<T> serviceFactory) {
this.serviceFactory = serviceFactory;
}
@Override
public void beforeAll(ExtensionContext context) throws Exception {
executorService = serviceFactory.get();
}
public T getExecutor() {
// only return an Executor since this resource is in charge of the life cycle
return executorService;
}
@Override
public void afterAll(ExtensionContext context) throws Exception {
if (executorService != null) {
executorService.shutdown();
}
}
}
| TestExecutorExtension |
java | elastic__elasticsearch | modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpStatsActionNodeResponseTests.java | {
"start": 813,
"end": 2253
} | class ____ extends ESTestCase {
public void testInputsAreDefensivelyCopied() {
DiscoveryNode node = DiscoveryNodeUtils.create("id");
Set<String> databases = new HashSet<>(randomList(10, () -> randomAlphaOfLengthBetween(5, 10)));
Set<String> files = new HashSet<>(randomList(10, () -> randomAlphaOfLengthBetween(5, 10)));
Set<String> configDatabases = new HashSet<>(randomList(10, () -> randomAlphaOfLengthBetween(5, 10)));
GeoIpStatsAction.NodeResponse nodeResponse = new GeoIpStatsAction.NodeResponse(
node,
GeoIpDownloaderStatsSerializingTests.createRandomInstance(),
randomBoolean() ? null : CacheStatsSerializingTests.createRandomInstance(),
databases,
files,
configDatabases
);
assertThat(nodeResponse.getDatabases(), equalTo(databases));
assertThat(nodeResponse.getFilesInTemp(), equalTo(files));
assertThat(nodeResponse.getConfigDatabases(), equalTo(configDatabases));
databases.add(randomAlphaOfLength(20));
files.add(randomAlphaOfLength(20));
configDatabases.add(randomAlphaOfLength(20));
assertThat(nodeResponse.getDatabases(), not(equalTo(databases)));
assertThat(nodeResponse.getFilesInTemp(), not(equalTo(files)));
assertThat(nodeResponse.getConfigDatabases(), not(equalTo(configDatabases)));
}
}
| GeoIpStatsActionNodeResponseTests |
java | elastic__elasticsearch | x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistoBackedSumAggregatorTests.java | {
"start": 1859,
"end": 6008
} | class ____ extends AggregatorTestCase {
private static final String FIELD_NAME = "field";
public void testNoDocs() throws IOException {
testCase(new MatchAllDocsQuery(), iw -> {
// Intentionally not writing any docs
}, sum -> {
assertEquals(0L, sum.value(), 0d);
assertFalse(AggregationInspectionHelper.hasValue(sum));
});
}
public void testNoMatchingField() throws IOException {
testCase(new MatchAllDocsQuery(), iw -> {
iw.addDocument(singleton(histogramFieldDocValues("wrong_field", new double[] { 3, 1.2, 10 })));
iw.addDocument(singleton(histogramFieldDocValues("wrong_field", new double[] { 5.3, 6, 20 })));
}, sum -> {
assertEquals(0L, sum.value(), 0d);
assertFalse(AggregationInspectionHelper.hasValue(sum));
});
}
public void testSimpleHistogram() throws IOException {
testCase(new MatchAllDocsQuery(), iw -> {
iw.addDocument(singleton(histogramFieldDocValues(FIELD_NAME, new double[] { 3, 1.2, 10 })));
iw.addDocument(singleton(histogramFieldDocValues(FIELD_NAME, new double[] { 5.3, 6, 6, 20 })));
iw.addDocument(singleton(histogramFieldDocValues(FIELD_NAME, new double[] { -10, 0.01, 1, 90 })));
}, sum -> {
assertEquals(132.51d, sum.value(), 0.01d);
assertTrue(AggregationInspectionHelper.hasValue(sum));
});
}
public void testQueryFiltering() throws IOException {
testCase(new TermQuery(new Term("match", "yes")), iw -> {
iw.addDocument(
Arrays.asList(
new StringField("match", "yes", Field.Store.NO),
histogramFieldDocValues(FIELD_NAME, new double[] { 3, 1.2, 10 })
)
);
iw.addDocument(
Arrays.asList(
new StringField("match", "yes", Field.Store.NO),
histogramFieldDocValues(FIELD_NAME, new double[] { 5.3, 6, 20 })
)
);
iw.addDocument(
Arrays.asList(
new StringField("match", "no", Field.Store.NO),
histogramFieldDocValues(FIELD_NAME, new double[] { 3, 1.2, 10 })
)
);
iw.addDocument(
Arrays.asList(
new StringField("match", "no", Field.Store.NO),
histogramFieldDocValues(FIELD_NAME, new double[] { 3, 1.2, 10 })
)
);
iw.addDocument(
Arrays.asList(
new StringField("match", "yes", Field.Store.NO),
histogramFieldDocValues(FIELD_NAME, new double[] { -10, 0.01, 1, 90 })
)
);
}, sum -> {
assertEquals(126.51d, sum.value(), 0.01d);
assertTrue(AggregationInspectionHelper.hasValue(sum));
});
}
private void testCase(Query query, CheckedConsumer<RandomIndexWriter, IOException> indexer, Consumer<Sum> verify) throws IOException {
testCase(indexer, verify, new AggTestConfig(sum("_name").field(FIELD_NAME), defaultFieldType()).withQuery(query));
}
@Override
protected List<SearchPlugin> getSearchPlugins() {
return List.of(new AnalyticsPlugin());
}
@Override
protected List<ValuesSourceType> getSupportedValuesSourceTypes() {
// Note: this is the same list as Core, plus Analytics
return List.of(
CoreValuesSourceType.NUMERIC,
CoreValuesSourceType.BOOLEAN,
CoreValuesSourceType.DATE,
AnalyticsValuesSourceType.HISTOGRAM
);
}
@Override
protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldType, String fieldName) {
return new SumAggregationBuilder("_name").field(fieldName);
}
private MappedFieldType defaultFieldType() {
return new HistogramFieldMapper.HistogramFieldType(HistoBackedSumAggregatorTests.FIELD_NAME, Collections.emptyMap());
}
}
| HistoBackedSumAggregatorTests |
java | apache__maven | compat/maven-resolver-provider/src/main/java/org/apache/maven/repository/internal/MavenMetadata.java | {
"start": 1580,
"end": 4311
} | class ____ extends AbstractMetadata implements MergeableMetadata {
static final String MAVEN_METADATA_XML = "maven-metadata.xml";
protected Metadata metadata;
private final Path path;
protected final Date timestamp;
private boolean merged;
@Deprecated
protected MavenMetadata(Metadata metadata, File file, Date timestamp) {
this(metadata, file != null ? file.toPath() : null, timestamp);
}
protected MavenMetadata(Metadata metadata, Path path, Date timestamp) {
this.metadata = metadata;
this.path = path;
this.timestamp = timestamp;
}
@Override
public String getType() {
return MAVEN_METADATA_XML;
}
@Deprecated
@Override
public File getFile() {
return path != null ? path.toFile() : null;
}
@Override
public Path getPath() {
return path;
}
@Override
public void merge(File existing, File result) throws RepositoryException {
merge(existing != null ? existing.toPath() : null, result != null ? result.toPath() : null);
}
@Override
public void merge(Path existing, Path result) throws RepositoryException {
Metadata recessive = read(existing);
merge(recessive);
write(result, metadata);
merged = true;
}
@Override
public boolean isMerged() {
return merged;
}
protected abstract void merge(Metadata recessive);
static Metadata read(Path metadataPath) throws RepositoryException {
if (!Files.exists(metadataPath)) {
return new Metadata();
}
try (InputStream input = Files.newInputStream(metadataPath)) {
return new Metadata(new MetadataStaxReader().read(input, false));
} catch (IOException | XMLStreamException e) {
throw new RepositoryException("Could not parse metadata " + metadataPath + ": " + e.getMessage(), e);
}
}
private void write(Path metadataPath, Metadata metadata) throws RepositoryException {
try {
Files.createDirectories(metadataPath.getParent());
try (OutputStream output = Files.newOutputStream(metadataPath)) {
new MetadataStaxWriter().write(output, metadata.getDelegate());
}
} catch (IOException | XMLStreamException e) {
throw new RepositoryException("Could not write metadata " + metadataPath + ": " + e.getMessage(), e);
}
}
@Override
public Map<String, String> getProperties() {
return Collections.emptyMap();
}
@Override
public org.eclipse.aether.metadata.Metadata setProperties(Map<String, String> properties) {
return this;
}
}
| MavenMetadata |
java | processing__processing4 | java/test/processing/mode/java/preproc/BadParamMessageSimplifierStrategyTest.java | {
"start": 272,
"end": 1321
} | class ____ {
private PreprocessIssueMessageSimplifier.PreprocIssueMessageSimplifierStrategy strategy;
@Before
public void setup() {
strategy = PreprocessIssueMessageSimplifier.get().createErrorOnParameterStrategy();
}
@Test
public void testPresent() {
Optional<PdeIssueEmitter.IssueMessageSimplification> msg = strategy.simplify("void test (int x,\ny) \n{");
Assert.assertTrue(msg.isPresent());
}
@Test
public void testPresentUnderscore() {
Optional<PdeIssueEmitter.IssueMessageSimplification> msg = strategy.simplify("void test (int x,\ny_y) \n{");
Assert.assertTrue(msg.isPresent());
}
@Test
public void testPresentVarType() {
Optional<PdeIssueEmitter.IssueMessageSimplification> msg = strategy.simplify("void test (int x,\nint) \n{");
Assert.assertTrue(msg.isPresent());
}
@Test
public void testNotPresent() {
Optional<PdeIssueEmitter.IssueMessageSimplification> msg = strategy.simplify("int x = y");
Assert.assertTrue(msg.isEmpty());
}
} | BadParamMessageSimplifierStrategyTest |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/internal/map/UnmodifiableArrayBackedMap.java | {
"start": 2596,
"end": 4011
} | class ____ implements Map.Entry<String, String> {
/**
* This field is functionally final, but marking it as such can cause
* performance problems. Consider marking it final after
* https://bugs.openjdk.org/browse/JDK-8324186 is solved.
*/
private int index;
public UnmodifiableEntry(int index) {
this.index = index;
}
@Override
public String getKey() {
return (String) backingArray[getArrayIndexForKey(index)];
}
@Override
public String getValue() {
return (String) backingArray[getArrayIndexForValue(index)];
}
/**
* Per spec, the hashcode is a function of the key and value. Calculation
* exactly matches HashMap.
*/
public int hashCode() {
String key = (String) backingArray[getArrayIndexForKey(index)];
String value = (String) backingArray[getArrayIndexForValue(index)];
return Objects.hashCode(key) ^ Objects.hashCode(value);
}
@Override
public String setValue(String value) {
throw new UnsupportedOperationException("Cannot update Entry instances in UnmodifiableArrayBackedMap");
}
}
/**
* Simple Entry iterator, tracking solely the index in the array. Blocks
* modifications.
*/
private | UnmodifiableEntry |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java | {
"start": 8706,
"end": 11471
} | interface
____ (addrs.hasMoreElements()) {
InetAddress addr = addrs.nextElement();
assertTrue(NetUtils.isLocalAddress(addr));
}
}
}
assertFalse(NetUtils.isLocalAddress(InetAddress.getByName("8.8.8.8")));
}
@Test
public void testWrapConnectException() throws Throwable {
IOException e = new ConnectException("failed");
IOException wrapped = verifyExceptionClass(e, ConnectException.class);
assertInException(wrapped, "failed");
assertWikified(wrapped);
assertInException(wrapped, "localhost");
assertRemoteDetailsIncluded(wrapped);
assertInException(wrapped, "/ConnectionRefused");
}
@Test
public void testWrapBindException() throws Throwable {
IOException e = new BindException("failed");
IOException wrapped = verifyExceptionClass(e, BindException.class);
assertInException(wrapped, "failed");
assertLocalDetailsIncluded(wrapped);
assertNotInException(wrapped, DEST_PORT_NAME);
assertInException(wrapped, "/BindException");
}
@Test
public void testWrapUnknownHostException() throws Throwable {
IOException e = new UnknownHostException("failed");
IOException wrapped = verifyExceptionClass(e, UnknownHostException.class);
assertInException(wrapped, "failed");
assertWikified(wrapped);
assertInException(wrapped, "localhost");
assertRemoteDetailsIncluded(wrapped);
assertInException(wrapped, "/UnknownHost");
}
@Test
public void testWrapEOFException() throws Throwable {
IOException e = new EOFException("eof");
IOException wrapped = verifyExceptionClass(e, EOFException.class);
assertInException(wrapped, "eof");
assertWikified(wrapped);
assertInException(wrapped, "localhost");
assertRemoteDetailsIncluded(wrapped);
assertInException(wrapped, "/EOFException");
}
@Test
public void testWrapKerbAuthException() throws Throwable {
IOException e = new KerberosAuthException("socket timeout on connection");
IOException wrapped = verifyExceptionClass(e, KerberosAuthException.class);
assertInException(wrapped, "socket timeout on connection");
assertInException(wrapped, "localhost");
assertInException(wrapped, "DestHost:destPort ");
assertInException(wrapped, "LocalHost:localPort");
assertRemoteDetailsIncluded(wrapped);
assertInException(wrapped, "KerberosAuthException");
}
@Test
public void testWrapIOEWithNoStringConstructor() throws Throwable {
IOException e = new CharacterCodingException();
IOException wrapped =
verifyExceptionClass(e, CharacterCodingException.class);
assertEquals(null, wrapped.getMessage());
}
@Test
public void testWrapIOEWithPrivateStringConstructor() throws Throwable {
| while |
java | apache__camel | core/camel-management/src/main/java/org/apache/camel/management/mbean/ManagedRoute.java | {
"start": 40552,
"end": 40817
} | class ____ implements Comparator<ManagedProcessorMBean>, Serializable {
@Override
public int compare(ManagedProcessorMBean o1, ManagedProcessorMBean o2) {
return o1.getIndex().compareTo(o2.getIndex());
}
}
}
| OrderProcessorMBeans |
java | apache__logging-log4j2 | log4j-api/src/main/java/org/apache/logging/log4j/util/FilteredObjectInputStream.java | {
"start": 1487,
"end": 3320
} | class ____ extends ObjectInputStream {
private final Collection<String> allowedExtraClasses;
public FilteredObjectInputStream() throws IOException, SecurityException {
this.allowedExtraClasses = Collections.emptySet();
}
public FilteredObjectInputStream(final InputStream inputStream) throws IOException {
super(inputStream);
this.allowedExtraClasses = Collections.emptySet();
}
public FilteredObjectInputStream(final Collection<String> allowedExtraClasses)
throws IOException, SecurityException {
this.allowedExtraClasses = allowedExtraClasses;
}
public FilteredObjectInputStream(final InputStream inputStream, final Collection<String> allowedExtraClasses)
throws IOException {
super(inputStream);
this.allowedExtraClasses = allowedExtraClasses;
}
public Collection<String> getAllowedClasses() {
return allowedExtraClasses;
}
@Override
protected Class<?> resolveClass(final ObjectStreamClass desc) throws IOException, ClassNotFoundException {
final String name = SerializationUtil.stripArray(desc.getName());
if (!(isAllowedByDefault(name) || allowedExtraClasses.contains(name))) {
throw new InvalidObjectException("Class is not allowed for deserialization: " + name);
}
return super.resolveClass(desc);
}
private static boolean isAllowedByDefault(final String name) {
return isRequiredPackage(name) || REQUIRED_JAVA_CLASSES.contains(name);
}
private static boolean isRequiredPackage(final String name) {
for (final String packageName : REQUIRED_JAVA_PACKAGES) {
if (name.startsWith(packageName)) {
return true;
}
}
return false;
}
}
| FilteredObjectInputStream |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy/deployment/src/test/java/io/quarkus/resteasy/test/security/HttpPolicyAuthFailureExceptionMapperTest.java | {
"start": 2699,
"end": 2820
} | class ____ {
@GET
public String hello() {
return "hello world";
}
}
}
| HelloResource |
java | spring-projects__spring-framework | spring-test/src/test/java/org/springframework/test/context/junit/jupiter/nested/WebAppConfigurationNestedTests.java | {
"start": 2571,
"end": 2783
} | class ____ {
@Test
void test(ApplicationContext context) {
assertThat(context).isInstanceOf(WebApplicationContext.class);
}
}
@Nested
@NestedTestConfiguration(INHERIT)
| ConfigOverriddenByDefaultWebTests |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/FileTokensToolProvider.java | {
"start": 403,
"end": 631
} | class ____ implements CliToolProvider {
@Override
public String name() {
return "service-tokens";
}
@Override
public Command create() {
return new FileTokensTool();
}
}
| FileTokensToolProvider |
java | junit-team__junit5 | junit-jupiter-engine/src/main/java/org/junit/jupiter/engine/descriptor/DynamicDescendantFilter.java | {
"start": 875,
"end": 2392
} | class ____ implements BiPredicate<UniqueId, Integer> {
private final Set<UniqueId> allowedUniqueIds = new HashSet<>();
private final Set<Integer> allowedIndices = new HashSet<>();
private Mode mode = Mode.EXPLICIT;
public void allowUniqueIdPrefix(UniqueId uniqueId) {
if (this.mode == Mode.EXPLICIT) {
this.allowedUniqueIds.add(uniqueId);
}
}
public void allowIndex(int index) {
if (this.mode == Mode.EXPLICIT) {
this.allowedIndices.add(index);
}
}
public void allowIndex(Set<Integer> indices) {
if (this.mode == Mode.EXPLICIT) {
this.allowedIndices.addAll(indices);
}
}
public void allowAll() {
this.mode = Mode.ALLOW_ALL;
this.allowedUniqueIds.clear();
this.allowedIndices.clear();
}
@Override
public boolean test(UniqueId uniqueId, Integer index) {
return isEverythingAllowed() //
|| isUniqueIdAllowed(uniqueId) //
|| allowedIndices.contains(index);
}
private boolean isEverythingAllowed() {
return allowedUniqueIds.isEmpty() && allowedIndices.isEmpty();
}
private boolean isUniqueIdAllowed(UniqueId uniqueId) {
return allowedUniqueIds.stream().anyMatch(allowedUniqueId -> isPrefixOrViceVersa(uniqueId, allowedUniqueId));
}
private boolean isPrefixOrViceVersa(UniqueId currentUniqueId, UniqueId allowedUniqueId) {
return allowedUniqueId.hasPrefix(currentUniqueId) || currentUniqueId.hasPrefix(allowedUniqueId);
}
public DynamicDescendantFilter withoutIndexFiltering() {
return new WithoutIndexFiltering();
}
private | DynamicDescendantFilter |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/EqualsIncompatibleTypeTest.java | {
"start": 1790,
"end": 1873
} | class ____ extends C {
public abstract boolean equals(Object o);
}
abstract | C1 |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/metamodel/internal/EntityRepresentationStrategyPojoStandard.java | {
"start": 8850,
"end": 9236
} | interface ____ the top of a class
// hierarchy will be used first when a HibernateProxy decides what it
// should implement.
final Set<Class<?>> proxyInterfaces = new LinkedHashSet<>();
if ( proxyInterface != null && ! mappedClass.equals( proxyInterface ) ) {
if ( ! proxyInterface.isInterface() ) {
throw new MappingException( "proxy must be either an interface, or the | at |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/lifecycle/internal/ProjectBuildList.java | {
"start": 1516,
"end": 4160
} | class ____ implements Iterable<ProjectSegment> {
private final List<ProjectSegment> items;
public ProjectBuildList(List<ProjectSegment> items) {
this.items = Collections.unmodifiableList(items);
}
// TODO Optimize; or maybe just rewrite the whole way aggregating mojos are being run.
/**
* Returns aProjectBuildList that contains only items for the specified taskSegment
* @param taskSegment the requested task segment
* @return a project build list for the supplied task segment
*/
public ProjectBuildList getByTaskSegment(TaskSegment taskSegment) {
return new ProjectBuildList(
items.stream().filter(pb -> taskSegment == pb.getTaskSegment()).collect(Collectors.toList()));
}
public Map<MavenProject, ProjectSegment> selectSegment(TaskSegment taskSegment) {
return items.stream()
.filter(pb -> taskSegment == pb.getTaskSegment())
.collect(Collectors.toMap(ProjectSegment::getProject, Function.identity()));
}
/**
* Finds the first ProjectSegment matching the supplied project
* @param mavenProject the requested project
* @return The projectSegment or null.
*/
public ProjectSegment findByMavenProject(MavenProject mavenProject) {
return items.stream()
.filter(pb -> mavenProject.equals(pb.getProject()))
.findFirst()
.orElse(null);
}
@Override
public Iterator<ProjectSegment> iterator() {
return items.iterator();
}
public void closeAll() {
for (ProjectSegment item : items) {
MavenSession sessionForThisModule = item.getSession();
sessionForThisModule.setCurrentProject(null);
}
}
public int size() {
return items.size();
}
public ProjectSegment get(int index) {
return items.get(index);
}
public Set<String> getReactorProjectKeys() {
Set<String> projectKeys = new HashSet<>(items.size() * 2);
for (ProjectSegment projectBuild : items) {
MavenProject project = projectBuild.getProject();
String key = ArtifactUtils.key(project.getGroupId(), project.getArtifactId(), project.getVersion());
projectKeys.add(key);
}
return projectKeys;
}
public boolean isEmpty() {
return items.isEmpty();
}
/**
* @return a set of all the projects managed by the build
*/
public Set<MavenProject> getProjects() {
return items.stream().map(ProjectSegment::getProject).collect(Collectors.toSet());
}
}
| ProjectBuildList |
java | google__guava | android/guava/src/com/google/common/util/concurrent/UncaughtExceptionHandlers.java | {
"start": 2013,
"end": 2904
} | class ____ implements UncaughtExceptionHandler {
private static final LazyLogger logger = new LazyLogger(Exiter.class);
private final RuntimeWrapper runtime;
Exiter(RuntimeWrapper runtime) {
this.runtime = runtime;
}
@Override
public void uncaughtException(Thread t, Throwable e) {
try {
logger
.get()
.log(
SEVERE,
String.format(Locale.ROOT, "Caught an exception in %s. Shutting down.", t),
e);
} catch (Throwable errorInLogging) { // sneaky checked exception
// If logging fails, e.g. due to missing memory, at least try to log the
// message and the cause for the failed logging.
System.err.println(e.getMessage());
System.err.println(errorInLogging.getMessage());
} finally {
runtime.exit(1);
}
}
}
}
| Exiter |
java | apache__flink | flink-state-backends/flink-statebackend-rocksdb/src/test/java/org/apache/flink/state/rocksdb/RocksDBKeyedStateBackendTestFactory.java | {
"start": 1699,
"end": 3928
} | class ____ implements AutoCloseable {
private MockEnvironment env;
private RocksDBKeyedStateBackend<?> keyedStateBackend;
public <K> RocksDBKeyedStateBackend<K> create(
TemporaryFolder tmp, TypeSerializer<K> keySerializer, int maxKeyGroupNumber)
throws Exception {
EmbeddedRocksDBStateBackend backend = getRocksDBStateBackend(tmp);
env = MockEnvironment.builder().build();
JobID jobID = new JobID();
KeyGroupRange keyGroupRange = new KeyGroupRange(0, maxKeyGroupNumber - 1);
TaskKvStateRegistry kvStateRegistry = mock(TaskKvStateRegistry.class);
CloseableRegistry cancelStreamRegistry = new CloseableRegistry();
keyedStateBackend =
(RocksDBKeyedStateBackend<K>)
backend.createKeyedStateBackend(
new KeyedStateBackendParametersImpl<>(
env,
jobID,
"Test",
keySerializer,
maxKeyGroupNumber,
keyGroupRange,
kvStateRegistry,
TtlTimeProvider.DEFAULT,
new UnregisteredMetricsGroup(),
Collections.emptyList(),
cancelStreamRegistry));
return (RocksDBKeyedStateBackend<K>) keyedStateBackend;
}
@Override
public void close() {
if (keyedStateBackend != null) {
keyedStateBackend.dispose();
}
IOUtils.closeQuietly(env);
}
private EmbeddedRocksDBStateBackend getRocksDBStateBackend(TemporaryFolder tmp)
throws IOException {
String dbPath = tmp.newFolder().getAbsolutePath();
String checkpointPath = tmp.newFolder().toURI().toString();
EmbeddedRocksDBStateBackend backend = new EmbeddedRocksDBStateBackend(TernaryBoolean.TRUE);
backend.setDbStoragePath(dbPath);
return backend;
}
}
| RocksDBKeyedStateBackendTestFactory |
java | apache__dubbo | dubbo-serialization/dubbo-serialization-api/src/main/java/org/apache/dubbo/common/serialize/support/SerializableClassRegistry.java | {
"start": 957,
"end": 1160
} | class ____ for {@code dubbo-serialization-fst}
* and {@code dubbo-serialization-kryo}, it will register some classes at startup time (for example {@link AbstractKryoFactory#create})
*/
public abstract | used |
java | google__error-prone | core/src/test/java/com/google/errorprone/refaster/testdata/template/InferredThisTemplate.java | {
"start": 948,
"end": 1163
} | class ____ {
@BeforeTemplate
public void before(Thread thread) {
thread.setName(thread.getName());
}
@AfterTemplate
public void after(Thread thread) {
thread.setName("foo");
}
}
| InferredThisTemplate |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/apidiff/ApiDiffCheckerTest.java | {
"start": 12513,
"end": 12991
} | class ____ extends B {}
""")
.build())
.compileOutputToJarOrDie();
Path newJar =
new CompilationBuilder(JavacTool.create(), tempFolder.newFolder(), fileManager)
.setSources(
new SourceBuilder(tempFolder.newFolder())
.addSourceLines(
"A.java",
"""
package lib;
public | C |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java | {
"start": 203772,
"end": 210456
} | class ____ extends BooleanExpressionContext {
public BooleanExpressionContext left;
public Token operator;
public BooleanExpressionContext right;
public List<BooleanExpressionContext> booleanExpression() {
return getRuleContexts(BooleanExpressionContext.class);
}
public BooleanExpressionContext booleanExpression(int i) {
return getRuleContext(BooleanExpressionContext.class,i);
}
public TerminalNode AND() { return getToken(EsqlBaseParser.AND, 0); }
public TerminalNode OR() { return getToken(EsqlBaseParser.OR, 0); }
@SuppressWarnings("this-escape")
public LogicalBinaryContext(BooleanExpressionContext ctx) { copyFrom(ctx); }
@Override
public void enterRule(ParseTreeListener listener) {
if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).enterLogicalBinary(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).exitLogicalBinary(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if ( visitor instanceof EsqlBaseParserVisitor ) return ((EsqlBaseParserVisitor<? extends T>)visitor).visitLogicalBinary(this);
else return visitor.visitChildren(this);
}
}
public final BooleanExpressionContext booleanExpression() throws RecognitionException {
return booleanExpression(0);
}
private BooleanExpressionContext booleanExpression(int _p) throws RecognitionException {
ParserRuleContext _parentctx = _ctx;
int _parentState = getState();
BooleanExpressionContext _localctx = new BooleanExpressionContext(_ctx, _parentState);
BooleanExpressionContext _prevctx = _localctx;
int _startState = 146;
enterRecursionRule(_localctx, 146, RULE_booleanExpression, _p);
int _la;
try {
int _alt;
enterOuterAlt(_localctx, 1);
{
setState(712);
_errHandler.sync(this);
switch ( getInterpreter().adaptivePredict(_input,58,_ctx) ) {
case 1:
{
_localctx = new LogicalNotContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
setState(684);
match(NOT);
setState(685);
booleanExpression(8);
}
break;
case 2:
{
_localctx = new BooleanDefaultContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
setState(686);
valueExpression();
}
break;
case 3:
{
_localctx = new RegexExpressionContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
setState(687);
regexBooleanExpression();
}
break;
case 4:
{
_localctx = new LogicalInContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
setState(688);
valueExpression();
setState(690);
_errHandler.sync(this);
_la = _input.LA(1);
if (_la==NOT) {
{
setState(689);
match(NOT);
}
}
setState(692);
match(IN);
setState(693);
match(LP);
setState(694);
valueExpression();
setState(699);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==COMMA) {
{
{
setState(695);
match(COMMA);
setState(696);
valueExpression();
}
}
setState(701);
_errHandler.sync(this);
_la = _input.LA(1);
}
setState(702);
match(RP);
}
break;
case 5:
{
_localctx = new IsNullContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
setState(704);
valueExpression();
setState(705);
match(IS);
setState(707);
_errHandler.sync(this);
_la = _input.LA(1);
if (_la==NOT) {
{
setState(706);
match(NOT);
}
}
setState(709);
match(NULL);
}
break;
case 6:
{
_localctx = new MatchExpressionContext(_localctx);
_ctx = _localctx;
_prevctx = _localctx;
setState(711);
matchBooleanExpression();
}
break;
}
_ctx.stop = _input.LT(-1);
setState(722);
_errHandler.sync(this);
_alt = getInterpreter().adaptivePredict(_input,60,_ctx);
while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) {
if ( _alt==1 ) {
if ( _parseListeners!=null ) triggerExitRuleEvent();
_prevctx = _localctx;
{
setState(720);
_errHandler.sync(this);
switch ( getInterpreter().adaptivePredict(_input,59,_ctx) ) {
case 1:
{
_localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState));
((LogicalBinaryContext)_localctx).left = _prevctx;
pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression);
setState(714);
if (!(precpred(_ctx, 5))) throw new FailedPredicateException(this, "precpred(_ctx, 5)");
setState(715);
((LogicalBinaryContext)_localctx).operator = match(AND);
setState(716);
((LogicalBinaryContext)_localctx).right = booleanExpression(6);
}
break;
case 2:
{
_localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState));
((LogicalBinaryContext)_localctx).left = _prevctx;
pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression);
setState(717);
if (!(precpred(_ctx, 4))) throw new FailedPredicateException(this, "precpred(_ctx, 4)");
setState(718);
((LogicalBinaryContext)_localctx).operator = match(OR);
setState(719);
((LogicalBinaryContext)_localctx).right = booleanExpression(5);
}
break;
}
}
}
setState(724);
_errHandler.sync(this);
_alt = getInterpreter().adaptivePredict(_input,60,_ctx);
}
}
}
catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
}
finally {
unrollRecursionContexts(_parentctx);
}
return _localctx;
}
@SuppressWarnings("CheckReturnValue")
public static | LogicalBinaryContext |
java | google__dagger | dagger-compiler/main/java/dagger/internal/codegen/base/ComponentAnnotation.java | {
"start": 1769,
"end": 8161
} | class ____ {
/** The root component annotation types. */
private static final ImmutableSet<XClassName> ROOT_COMPONENT_ANNOTATIONS =
ImmutableSet.of(XTypeNames.COMPONENT, XTypeNames.PRODUCTION_COMPONENT);
/** The subcomponent annotation types. */
private static final ImmutableSet<XClassName> SUBCOMPONENT_ANNOTATIONS =
ImmutableSet.of(XTypeNames.SUBCOMPONENT, XTypeNames.PRODUCTION_SUBCOMPONENT);
/** All component annotation types. */
private static final ImmutableSet<XClassName> ALL_COMPONENT_ANNOTATIONS =
ImmutableSet.<XClassName>builder()
.addAll(ROOT_COMPONENT_ANNOTATIONS)
.addAll(SUBCOMPONENT_ANNOTATIONS)
.build();
/** All component and creator annotation types. */
private static final ImmutableSet<XClassName> ALL_COMPONENT_AND_CREATOR_ANNOTATIONS =
ImmutableSet.<XClassName>builder()
.addAll(ALL_COMPONENT_ANNOTATIONS)
.addAll(ComponentCreatorAnnotation.allCreatorAnnotations())
.build();
/** All production annotation types. */
private static final ImmutableSet<XClassName> PRODUCTION_ANNOTATIONS =
ImmutableSet.of(
XTypeNames.PRODUCTION_COMPONENT,
XTypeNames.PRODUCTION_SUBCOMPONENT,
XTypeNames.PRODUCER_MODULE);
private XAnnotation annotation;
/** The annotation itself. */
public final XAnnotation annotation() {
return annotation;
}
/** Returns the {@link XClassName} name of the annotation. */
public abstract XClassName className();
/** The simple name of the annotation type. */
public final String simpleName() {
return className().getSimpleName();
}
/**
* Returns {@code true} if the annotation is a {@code @Subcomponent} or
* {@code @ProductionSubcomponent}.
*/
public final boolean isSubcomponent() {
return SUBCOMPONENT_ANNOTATIONS.contains(className());
}
/**
* Returns {@code true} if the annotation is a {@code @ProductionComponent},
* {@code @ProductionSubcomponent}, or {@code @ProducerModule}.
*/
public final boolean isProduction() {
return PRODUCTION_ANNOTATIONS.contains(className());
}
/**
* Returns {@code true} if the annotation is a real component annotation and not a module
* annotation.
*/
public final boolean isRealComponent() {
return ALL_COMPONENT_ANNOTATIONS.contains(className());
}
/** The types listed as {@code dependencies}. */
@Memoized
public ImmutableList<XType> dependencyTypes() {
return isRootComponent()
? ImmutableList.copyOf(annotation.getAsTypeList("dependencies"))
: ImmutableList.of();
}
/**
* The types listed as {@code dependencies}.
*
* @throws IllegalArgumentException if any of {@link #dependencyTypes()} are error types
*/
@Memoized
public ImmutableSet<XTypeElement> dependencies() {
return dependencyTypes().stream().map(XType::getTypeElement).collect(toImmutableSet());
}
/**
* The types listed as {@code modules}.
*
* @throws IllegalArgumentException if any module is an error type.
*/
@Memoized
public ImmutableSet<XTypeElement> modules() {
return annotation.getAsTypeList(isRealComponent() ? "modules" : "includes").stream()
.map(XType::getTypeElement)
.collect(toImmutableSet());
}
private final boolean isRootComponent() {
return ROOT_COMPONENT_ANNOTATIONS.contains(className());
}
/**
* Returns an object representing a root component annotation, not a subcomponent annotation, if
* one is present on {@code typeElement}.
*/
public static Optional<ComponentAnnotation> rootComponentAnnotation(
XTypeElement typeElement, DaggerSuperficialValidation superficialValidation) {
return anyComponentAnnotation(typeElement, ROOT_COMPONENT_ANNOTATIONS, superficialValidation);
}
/**
* Returns an object representing a subcomponent annotation, if one is present on {@code
* typeElement}.
*/
public static Optional<ComponentAnnotation> subcomponentAnnotation(
XTypeElement typeElement, DaggerSuperficialValidation superficialValidation) {
return anyComponentAnnotation(typeElement, SUBCOMPONENT_ANNOTATIONS, superficialValidation);
}
/**
* Returns an object representing a root component or subcomponent annotation, if one is present
* on {@code typeElement}.
*/
public static Optional<ComponentAnnotation> anyComponentAnnotation(
XElement element, DaggerSuperficialValidation superficialValidation) {
return anyComponentAnnotation(element, ALL_COMPONENT_ANNOTATIONS, superficialValidation);
}
private static Optional<ComponentAnnotation> anyComponentAnnotation(
XElement element,
Collection<XClassName> annotations,
DaggerSuperficialValidation superficialValidation) {
return getAnyAnnotation(element, annotations)
.map(
annotation -> {
superficialValidation.validateAnnotationOf(element, annotation);
return create(annotation);
});
}
/** Returns {@code true} if the argument is a component annotation. */
public static boolean isComponentAnnotation(XAnnotation annotation) {
return ALL_COMPONENT_ANNOTATIONS.contains(asClassName(annotation));
}
/** Creates a fictional component annotation representing a module. */
public static ComponentAnnotation fromModuleAnnotation(ModuleAnnotation moduleAnnotation) {
return create(moduleAnnotation.annotation());
}
private static ComponentAnnotation create(XAnnotation annotation) {
ComponentAnnotation componentAnnotation =
new AutoValue_ComponentAnnotation(asClassName(annotation));
componentAnnotation.annotation = annotation;
return componentAnnotation;
}
/** The root component annotation types. */
public static ImmutableSet<XClassName> rootComponentAnnotations() {
return ROOT_COMPONENT_ANNOTATIONS;
}
/** The subcomponent annotation types. */
public static ImmutableSet<XClassName> subcomponentAnnotations() {
return SUBCOMPONENT_ANNOTATIONS;
}
/** All component annotation types. */
public static ImmutableSet<XClassName> allComponentAnnotations() {
return ALL_COMPONENT_ANNOTATIONS;
}
/** All component and creator annotation types. */
public static ImmutableSet<XClassName> allComponentAndCreatorAnnotations() {
return ALL_COMPONENT_AND_CREATOR_ANNOTATIONS;
}
}
| ComponentAnnotation |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskExecutor.java | {
"start": 11177,
"end": 111500
} | class ____ extends RpcEndpoint implements TaskExecutorGateway {
public static final String TASK_MANAGER_NAME = "taskmanager";
/** The access to the leader election and retrieval services. */
private final HighAvailabilityServices haServices;
private final TaskManagerServices taskExecutorServices;
/** The task manager configuration. */
private final TaskManagerConfiguration taskManagerConfiguration;
/** The fatal error handler to use in case of a fatal error. */
private final FatalErrorHandler fatalErrorHandler;
private final TaskExecutorBlobService taskExecutorBlobService;
private final LibraryCacheManager libraryCacheManager;
/** The address to metric query service on this Task Manager. */
@Nullable private final String metricQueryServiceAddress;
// --------- TaskManager services --------
/** The connection information of this task manager. */
private final UnresolvedTaskManagerLocation unresolvedTaskManagerLocation;
private final TaskManagerMetricGroup taskManagerMetricGroup;
/** The state manager for this task, providing state managers per slot. */
private final TaskExecutorLocalStateStoresManager localStateStoresManager;
/**
* The file merging manager for this task, providing file merging snapshot manager per job, see
* {@link FileMergingSnapshotManager} for details.
*/
private final TaskExecutorFileMergingManager fileMergingManager;
/** The changelog manager for this task, providing changelog storage per job. */
private final TaskExecutorStateChangelogStoragesManager changelogStoragesManager;
/**
* The channel state executor factory manager for this task, providing channel state executor
* factory per job.
*/
private final TaskExecutorChannelStateExecutorFactoryManager channelStateExecutorFactoryManager;
/** Information provider for external resources. */
private final ExternalResourceInfoProvider externalResourceInfoProvider;
/** The network component in the task manager. */
private final ShuffleEnvironment<?, ?> shuffleEnvironment;
/** The kvState registration service in the task manager. */
private final KvStateService kvStateService;
private final Executor ioExecutor;
/** {@link MemoryManager} shared across all tasks. */
private final SharedResources sharedResources;
// --------- task slot allocation table -----------
private final TaskSlotTable<Task> taskSlotTable;
private final Map<JobID, UUID> currentSlotOfferPerJob = new HashMap<>();
private final JobTable jobTable;
private final JobLeaderService jobLeaderService;
private final LeaderRetrievalService resourceManagerLeaderRetriever;
private final SlotAllocationSnapshotPersistenceService slotAllocationSnapshotPersistenceService;
// ------------------------------------------------------------------------
private final HardwareDescription hardwareDescription;
private final TaskExecutorMemoryConfiguration memoryConfiguration;
private FileCache fileCache;
/** The heartbeat manager for job manager in the task manager. */
private final HeartbeatManager<AllocatedSlotReport, TaskExecutorToJobManagerHeartbeatPayload>
jobManagerHeartbeatManager;
/** The heartbeat manager for resource manager in the task manager. */
private final HeartbeatManager<Void, TaskExecutorHeartbeatPayload>
resourceManagerHeartbeatManager;
private final TaskExecutorPartitionTracker partitionTracker;
private final DelegationTokenReceiverRepository delegationTokenReceiverRepository;
// --------- resource manager --------
@Nullable private ResourceManagerAddress resourceManagerAddress;
@Nullable private EstablishedResourceManagerConnection establishedResourceManagerConnection;
@Nullable private TaskExecutorToResourceManagerConnection resourceManagerConnection;
@Nullable private UUID currentRegistrationTimeoutId;
private final Map<JobID, Collection<CompletableFuture<ExecutionState>>>
taskResultPartitionCleanupFuturesPerJob = CollectionUtil.newHashMapWithExpectedSize(8);
private final ThreadInfoSampleService threadInfoSampleService;
private final GroupCache<JobID, PermanentBlobKey, JobInformation> jobInformationCache;
private final GroupCache<JobID, PermanentBlobKey, TaskInformation> taskInformationCache;
private final GroupCache<JobID, PermanentBlobKey, ShuffleDescriptorGroup>
shuffleDescriptorsCache;
private final ProfilingService profilingService;
private final Set<JobID> jobPartitionToCleanupSet = new HashSet<>();
public TaskExecutor(
RpcService rpcService,
TaskManagerConfiguration taskManagerConfiguration,
HighAvailabilityServices haServices,
TaskManagerServices taskExecutorServices,
ExternalResourceInfoProvider externalResourceInfoProvider,
HeartbeatServices heartbeatServices,
TaskManagerMetricGroup taskManagerMetricGroup,
@Nullable String metricQueryServiceAddress,
TaskExecutorBlobService taskExecutorBlobService,
FatalErrorHandler fatalErrorHandler,
TaskExecutorPartitionTracker partitionTracker,
DelegationTokenReceiverRepository delegationTokenReceiverRepository) {
super(rpcService, RpcServiceUtils.createRandomName(TASK_MANAGER_NAME));
checkArgument(
taskManagerConfiguration.getNumberSlots() > 0,
"The number of slots has to be larger than 0.");
this.taskManagerConfiguration = checkNotNull(taskManagerConfiguration);
this.taskExecutorServices = checkNotNull(taskExecutorServices);
this.haServices = checkNotNull(haServices);
this.fatalErrorHandler = checkNotNull(fatalErrorHandler);
this.partitionTracker = partitionTracker;
this.delegationTokenReceiverRepository = checkNotNull(delegationTokenReceiverRepository);
this.taskManagerMetricGroup = checkNotNull(taskManagerMetricGroup);
this.taskExecutorBlobService = checkNotNull(taskExecutorBlobService);
this.metricQueryServiceAddress = metricQueryServiceAddress;
this.externalResourceInfoProvider = checkNotNull(externalResourceInfoProvider);
this.libraryCacheManager = taskExecutorServices.getLibraryCacheManager();
this.taskSlotTable = taskExecutorServices.getTaskSlotTable();
this.jobTable = taskExecutorServices.getJobTable();
this.jobLeaderService = taskExecutorServices.getJobLeaderService();
this.unresolvedTaskManagerLocation =
taskExecutorServices.getUnresolvedTaskManagerLocation();
this.localStateStoresManager = taskExecutorServices.getTaskManagerStateStore();
this.fileMergingManager = taskExecutorServices.getTaskManagerFileMergingManager();
this.changelogStoragesManager = taskExecutorServices.getTaskManagerChangelogManager();
this.channelStateExecutorFactoryManager =
taskExecutorServices.getTaskManagerChannelStateManager();
this.shuffleEnvironment = taskExecutorServices.getShuffleEnvironment();
this.kvStateService = taskExecutorServices.getKvStateService();
this.ioExecutor = taskExecutorServices.getIOExecutor();
this.resourceManagerLeaderRetriever = haServices.getResourceManagerLeaderRetriever();
this.hardwareDescription =
HardwareDescription.extractFromSystem(taskExecutorServices.getManagedMemorySize());
this.memoryConfiguration =
TaskExecutorMemoryConfiguration.create(taskManagerConfiguration.getConfiguration());
this.resourceManagerAddress = null;
this.resourceManagerConnection = null;
this.currentRegistrationTimeoutId = null;
final ResourceID resourceId =
taskExecutorServices.getUnresolvedTaskManagerLocation().getResourceID();
this.jobManagerHeartbeatManager =
createJobManagerHeartbeatManager(heartbeatServices, resourceId);
this.resourceManagerHeartbeatManager =
createResourceManagerHeartbeatManager(heartbeatServices, resourceId);
ExecutorThreadFactory sampleThreadFactory =
new ExecutorThreadFactory.Builder()
.setPoolName("flink-thread-info-sampler")
.build();
ScheduledExecutorService sampleExecutor =
Executors.newSingleThreadScheduledExecutor(sampleThreadFactory);
this.threadInfoSampleService = new ThreadInfoSampleService(sampleExecutor);
this.profilingService =
ProfilingService.getInstance(taskManagerConfiguration.getConfiguration());
this.slotAllocationSnapshotPersistenceService =
taskExecutorServices.getSlotAllocationSnapshotPersistenceService();
this.sharedResources = taskExecutorServices.getSharedResources();
this.jobInformationCache = taskExecutorServices.getJobInformationCache();
this.taskInformationCache = taskExecutorServices.getTaskInformationCache();
this.shuffleDescriptorsCache = taskExecutorServices.getShuffleDescriptorCache();
}
private HeartbeatManager<Void, TaskExecutorHeartbeatPayload>
createResourceManagerHeartbeatManager(
HeartbeatServices heartbeatServices, ResourceID resourceId) {
return heartbeatServices.createHeartbeatManager(
resourceId, new ResourceManagerHeartbeatListener(), getMainThreadExecutor(), log);
}
private HeartbeatManager<AllocatedSlotReport, TaskExecutorToJobManagerHeartbeatPayload>
createJobManagerHeartbeatManager(
HeartbeatServices heartbeatServices, ResourceID resourceId) {
return heartbeatServices.createHeartbeatManager(
resourceId, new JobManagerHeartbeatListener(), getMainThreadExecutor(), log);
}
private boolean shouldRetainPartitionsOnJobManagerConnectionLost() {
return taskManagerConfiguration
.getConfiguration()
.get(BatchExecutionOptions.JOB_RECOVERY_ENABLED)
&& taskManagerConfiguration
.getConfiguration()
.get(SHUFFLE_SERVICE_FACTORY_CLASS)
.equals(NETTY_SHUFFLE_SERVICE_FACTORY_CLASS);
}
@Override
public CompletableFuture<Boolean> canBeReleased() {
return CompletableFuture.completedFuture(
shuffleEnvironment.getPartitionsOccupyingLocalResources().isEmpty());
}
@Override
public CompletableFuture<Collection<LogInfo>> requestLogList(Duration timeout) {
return CompletableFuture.supplyAsync(
() -> {
final String logDir = taskManagerConfiguration.getTaskManagerLogDir();
if (logDir != null) {
final File[] logFiles = new File(logDir).listFiles();
if (logFiles == null) {
throw new CompletionException(
new FlinkException(
String.format(
"There isn't a log file in TaskExecutor’s log dir %s.",
logDir)));
}
return Arrays.stream(logFiles)
.filter(File::isFile)
.map(
logFile ->
new LogInfo(
logFile.getName(),
logFile.length(),
logFile.lastModified()))
.collect(Collectors.toList());
}
return Collections.emptyList();
},
ioExecutor);
}
// ------------------------------------------------------------------------
// Life cycle
// ------------------------------------------------------------------------
@Override
public void onStart() throws Exception {
try {
startTaskExecutorServices();
} catch (Throwable t) {
final TaskManagerException exception =
new TaskManagerException(
String.format("Could not start the TaskExecutor %s", getAddress()), t);
onFatalError(exception);
throw exception;
}
startRegistrationTimeout();
}
private void startTaskExecutorServices() throws Exception {
try {
// start by connecting to the ResourceManager
resourceManagerLeaderRetriever.start(new ResourceManagerLeaderListener());
// tell the task slot table who's responsible for the task slot actions
taskSlotTable.start(new SlotActionsImpl(), getMainThreadExecutor());
// start the job leader service
jobLeaderService.start(
getAddress(), getRpcService(), haServices, new JobLeaderListenerImpl());
fileCache =
new FileCache(
taskManagerConfiguration.getTmpDirectories(),
taskExecutorBlobService.getPermanentBlobService());
tryLoadLocalAllocationSnapshots();
} catch (Exception e) {
handleStartTaskExecutorServicesException(e);
}
}
private void handleStartTaskExecutorServicesException(Exception e) throws Exception {
try {
stopTaskExecutorServices();
} catch (Exception inner) {
e.addSuppressed(inner);
}
throw e;
}
/** Called to shut down the TaskManager. The method closes all TaskManager services. */
@Override
public CompletableFuture<Void> onStop() {
log.info("Stopping TaskExecutor {}.", getAddress());
Throwable jobManagerDisconnectThrowable = null;
FlinkExpectedException cause =
new FlinkExpectedException("The TaskExecutor is shutting down.");
closeResourceManagerConnection(cause);
for (JobTable.Job job : jobTable.getJobs()) {
try {
closeJob(job, cause);
} catch (Throwable t) {
jobManagerDisconnectThrowable =
ExceptionUtils.firstOrSuppressed(t, jobManagerDisconnectThrowable);
}
}
changelogStoragesManager.shutdown();
channelStateExecutorFactoryManager.shutdown();
jobInformationCache.clear();
taskInformationCache.clear();
shuffleDescriptorsCache.clear();
Preconditions.checkState(jobTable.isEmpty());
final Throwable throwableBeforeTasksCompletion = jobManagerDisconnectThrowable;
return FutureUtils.runAfterwards(taskSlotTable.closeAsync(), this::stopTaskExecutorServices)
.handle(
(ignored, throwable) -> {
handleOnStopException(throwableBeforeTasksCompletion, throwable);
return null;
});
}
private void handleOnStopException(
Throwable throwableBeforeTasksCompletion, Throwable throwableAfterTasksCompletion) {
final Throwable throwable;
if (throwableBeforeTasksCompletion != null) {
throwable =
ExceptionUtils.firstOrSuppressed(
throwableBeforeTasksCompletion, throwableAfterTasksCompletion);
} else {
throwable = throwableAfterTasksCompletion;
}
if (throwable != null) {
throw new CompletionException(
new FlinkException("Error while shutting the TaskExecutor down.", throwable));
} else {
log.info("Stopped TaskExecutor {}.", getAddress());
}
}
private void stopTaskExecutorServices() throws Exception {
Exception exception = null;
try {
threadInfoSampleService.close();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
jobLeaderService.stop();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
resourceManagerLeaderRetriever.stop();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
taskExecutorServices.shutDown();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
fileCache.shutdown();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
// it will call close() recursively from the parent to children
taskManagerMetricGroup.close();
ExceptionUtils.tryRethrowException(exception);
}
// ======================================================================
// RPC methods
// ======================================================================
@Override
public CompletableFuture<TaskThreadInfoResponse> requestThreadInfoSamples(
final Collection<ExecutionAttemptID> taskExecutionAttemptIds,
final ThreadInfoSamplesRequest requestParams,
final Duration timeout) {
final Collection<Task> tasks = new ArrayList<>();
for (ExecutionAttemptID executionAttemptId : taskExecutionAttemptIds) {
final Task task = taskSlotTable.getTask(executionAttemptId);
if (task == null) {
log.warn(
String.format(
"Cannot sample task %s. "
+ "Task is not known to the task manager.",
executionAttemptId));
} else {
tasks.add(task);
}
}
Map<Long, ExecutionAttemptID> sampleableTasks =
tasks.stream()
.collect(
Collectors.toMap(
task -> task.getExecutingThread().getId(),
Task::getExecutionId));
final CompletableFuture<Map<ExecutionAttemptID, Collection<ThreadInfoSample>>>
stackTracesFuture =
threadInfoSampleService.requestThreadInfoSamples(
sampleableTasks, requestParams);
return stackTracesFuture.thenApply(TaskThreadInfoResponse::new);
}
// ----------------------------------------------------------------------
// Task lifecycle RPCs
// ----------------------------------------------------------------------
@Override
public CompletableFuture<Acknowledge> submitTask(
TaskDeploymentDescriptor tdd, JobMasterId jobMasterId, Duration timeout) {
final JobID jobId = tdd.getJobId();
// todo: consider adding task info
try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobId))) {
final ExecutionAttemptID executionAttemptID = tdd.getExecutionAttemptId();
final JobTable.Connection jobManagerConnection =
jobTable.getConnection(jobId)
.orElseThrow(
() -> {
final String message =
"Could not submit task because there is no JobManager "
+ "associated for the job "
+ jobId
+ '.';
log.debug(message);
return new TaskSubmissionException(message);
});
if (!Objects.equals(jobManagerConnection.getJobMasterId(), jobMasterId)) {
final String message =
"Rejecting the task submission because the job manager leader id "
+ jobMasterId
+ " does not match the expected job manager leader id "
+ jobManagerConnection.getJobMasterId()
+ '.';
log.debug(message);
throw new TaskSubmissionException(message);
}
if (!taskSlotTable.tryMarkSlotActive(jobId, tdd.getAllocationId())) {
final String message =
"No task slot allocated for job ID "
+ jobId
+ " and allocation ID "
+ tdd.getAllocationId()
+ '.';
log.debug(message);
throw new TaskSubmissionException(message);
}
// re-integrate offloaded data and deserialize shuffle descriptors
try {
tdd.loadBigData(
taskExecutorBlobService.getPermanentBlobService(),
jobInformationCache,
taskInformationCache,
shuffleDescriptorsCache);
} catch (IOException | ClassNotFoundException e) {
throw new TaskSubmissionException(
"Could not re-integrate offloaded TaskDeploymentDescriptor data.", e);
}
// deserialize the pre-serialized information
final JobInformation jobInformation;
final TaskInformation taskInformation;
final JobManagerTaskRestore taskRestore;
try {
jobInformation = tdd.getJobInformation();
taskInformation = tdd.getTaskInformation();
taskRestore = tdd.getTaskRestore();
} catch (IOException | ClassNotFoundException e) {
throw new TaskSubmissionException(
"Could not deserialize the job or task information.", e);
}
if (!jobId.equals(jobInformation.getJobId())) {
throw new TaskSubmissionException(
"Inconsistent job ID information inside TaskDeploymentDescriptor ("
+ tdd.getJobId()
+ " vs. "
+ jobInformation.getJobId()
+ ")");
}
TaskManagerJobMetricGroup jobGroup =
taskManagerMetricGroup.addJob(
jobInformation.getJobId(), jobInformation.getJobName());
// note that a pre-existing job group can NOT be closed concurrently - this is done by
// the same TM thread in removeJobMetricsGroup
TaskMetricGroup taskMetricGroup =
jobGroup.addTask(tdd.getExecutionAttemptId(), taskInformation.getTaskName());
InputSplitProvider inputSplitProvider =
new RpcInputSplitProvider(
jobManagerConnection.getJobManagerGateway(),
taskInformation.getJobVertexId(),
tdd.getExecutionAttemptId(),
taskManagerConfiguration.getRpcTimeout());
final TaskOperatorEventGateway taskOperatorEventGateway =
new RpcTaskOperatorEventGateway(
jobManagerConnection.getJobManagerGateway(),
executionAttemptID,
(t) ->
runAsync(
() ->
failTask(
jobInformation.getJobId(),
executionAttemptID,
t)));
TaskManagerActions taskManagerActions = jobManagerConnection.getTaskManagerActions();
CheckpointResponder checkpointResponder = jobManagerConnection.getCheckpointResponder();
GlobalAggregateManager aggregateManager =
jobManagerConnection.getGlobalAggregateManager();
LibraryCacheManager.ClassLoaderHandle classLoaderHandle =
jobManagerConnection.getClassLoaderHandle();
PartitionProducerStateChecker partitionStateChecker =
jobManagerConnection.getPartitionStateChecker();
final TaskLocalStateStore localStateStore =
localStateStoresManager.localStateStoreForSubtask(
jobId,
tdd.getAllocationId(),
taskInformation.getJobVertexId(),
tdd.getSubtaskIndex(),
taskManagerConfiguration.getConfiguration(),
jobInformation.getJobConfiguration());
final FileMergingSnapshotManager fileMergingSnapshotManager =
fileMergingManager.fileMergingSnapshotManagerForTask(
jobId,
getResourceID(),
tdd.getExecutionAttemptId(),
taskManagerConfiguration.getConfiguration(),
jobInformation.getJobConfiguration(),
jobGroup);
final FileMergingSnapshotManagerClosableWrapper fileMergingSnapshotManagerClosable =
fileMergingSnapshotManager == null
? null
: FileMergingSnapshotManagerClosableWrapper.of(
fileMergingSnapshotManager,
() ->
fileMergingManager.releaseMergingSnapshotManagerForTask(
jobId, tdd.getExecutionAttemptId()));
// TODO: Pass config value from user program and do overriding here.
final StateChangelogStorage<?> changelogStorage;
try {
changelogStorage =
changelogStoragesManager.stateChangelogStorageForJob(
jobId,
taskManagerConfiguration.getConfiguration(),
jobGroup,
localStateStore.getLocalRecoveryConfig());
} catch (IOException e) {
throw new TaskSubmissionException(e);
}
final TaskStateManager taskStateManager =
new TaskStateManagerImpl(
jobId,
tdd.getExecutionAttemptId(),
localStateStore,
fileMergingSnapshotManagerClosable,
changelogStorage,
changelogStoragesManager,
taskRestore,
checkpointResponder);
MemoryManager memoryManager;
try {
memoryManager = taskSlotTable.getTaskMemoryManager(tdd.getAllocationId());
} catch (SlotNotFoundException e) {
throw new TaskSubmissionException("Could not submit task.", e);
}
Task task =
new Task(
jobInformation,
taskInformation,
tdd.getExecutionAttemptId(),
tdd.getAllocationId(),
tdd.getProducedPartitions(),
tdd.getInputGates(),
memoryManager,
sharedResources,
taskExecutorServices.getIOManager(),
taskExecutorServices.getShuffleEnvironment(),
taskExecutorServices.getKvStateService(),
taskExecutorServices.getBroadcastVariableManager(),
taskExecutorServices.getTaskEventDispatcher(),
externalResourceInfoProvider,
taskStateManager,
taskManagerActions,
inputSplitProvider,
checkpointResponder,
taskOperatorEventGateway,
aggregateManager,
classLoaderHandle,
fileCache,
taskManagerConfiguration,
taskMetricGroup,
partitionStateChecker,
MdcUtils.scopeToJob(jobId, getRpcService().getScheduledExecutor()),
channelStateExecutorFactoryManager.getOrCreateExecutorFactory(jobId));
taskMetricGroup.gauge(MetricNames.IS_BACK_PRESSURED, task::isBackPressured);
log.info(
"Received task {} ({}), deploy into slot with allocation id {}.",
task.getTaskInfo().getTaskNameWithSubtasks(),
tdd.getExecutionAttemptId(),
tdd.getAllocationId());
boolean taskAdded;
try {
taskAdded = taskSlotTable.addTask(task);
} catch (SlotNotFoundException | SlotNotActiveException e) {
throw new TaskSubmissionException("Could not submit task.", e);
}
if (taskAdded) {
task.startTaskThread();
setupResultPartitionBookkeeping(
tdd.getJobId(), tdd.getProducedPartitions(), task.getTerminationFuture());
return CompletableFuture.completedFuture(Acknowledge.get());
} else {
final String message =
"TaskManager already contains a task for id " + task.getExecutionId() + '.';
log.debug(message);
throw new TaskSubmissionException(message);
}
} catch (TaskSubmissionException e) {
return FutureUtils.completedExceptionally(e);
}
}
private void setupResultPartitionBookkeeping(
JobID jobId,
Collection<ResultPartitionDeploymentDescriptor> producedResultPartitions,
CompletableFuture<ExecutionState> terminationFuture) {
final Set<ResultPartitionID> partitionsRequiringRelease =
filterPartitionsRequiringRelease(producedResultPartitions)
.peek(
rpdd ->
partitionTracker.startTrackingPartition(
jobId, TaskExecutorPartitionInfo.from(rpdd)))
.map(ResultPartitionDeploymentDescriptor::getShuffleDescriptor)
.map(ShuffleDescriptor::getResultPartitionID)
.collect(Collectors.toSet());
final CompletableFuture<ExecutionState> taskTerminationWithResourceCleanupFuture =
terminationFuture.thenApplyAsync(
executionState -> {
if (executionState != ExecutionState.FINISHED) {
partitionTracker.stopTrackingPartitions(partitionsRequiringRelease);
}
return executionState;
},
getMainThreadExecutor());
taskResultPartitionCleanupFuturesPerJob.compute(
jobId,
(ignored, completableFutures) -> {
if (completableFutures == null) {
completableFutures = new ArrayList<>(4);
}
completableFutures.add(taskTerminationWithResourceCleanupFuture);
return completableFutures;
});
}
private Stream<ResultPartitionDeploymentDescriptor> filterPartitionsRequiringRelease(
Collection<ResultPartitionDeploymentDescriptor> producedResultPartitions) {
return producedResultPartitions.stream()
// only releaseByScheduler partitions require explicit release call
.filter(d -> d.getPartitionType().isReleaseByScheduler())
// partitions without local resources don't store anything on the TaskExecutor
.filter(d -> d.getShuffleDescriptor().storesLocalResourcesOn().isPresent());
}
@Override
public CompletableFuture<Acknowledge> cancelTask(
ExecutionAttemptID executionAttemptID, Duration timeout) {
final Task task = taskSlotTable.getTask(executionAttemptID);
if (task != null) {
try (MdcCloseable ignored =
MdcUtils.withContext(MdcUtils.asContextData(task.getJobID()))) {
try {
task.cancelExecution();
return CompletableFuture.completedFuture(Acknowledge.get());
} catch (Throwable t) {
return FutureUtils.completedExceptionally(
new TaskException(
"Cannot cancel task for execution " + executionAttemptID + '.',
t));
}
}
} else {
final String message =
"Cannot find task to stop for execution " + executionAttemptID + '.';
log.debug(message);
return FutureUtils.completedExceptionally(new TaskException(message));
}
}
// ----------------------------------------------------------------------
// Partition lifecycle RPCs
// ----------------------------------------------------------------------
@Override
public CompletableFuture<Acknowledge> updatePartitions(
final ExecutionAttemptID executionAttemptID,
Iterable<PartitionInfo> partitionInfos,
Duration timeout) {
final Task task = taskSlotTable.getTask(executionAttemptID);
if (task != null) {
for (final PartitionInfo partitionInfo : partitionInfos) {
// Run asynchronously because it might be blocking
FutureUtils.assertNoException(
CompletableFuture.runAsync(
() -> {
try {
if (!shuffleEnvironment.updatePartitionInfo(
executionAttemptID, partitionInfo)) {
log.debug(
"Discard update for input gate partition {} of result {} in task {}. "
+ "The partition is no longer available.",
partitionInfo
.getShuffleDescriptor()
.getResultPartitionID(),
partitionInfo.getIntermediateDataSetID(),
executionAttemptID);
}
} catch (IOException | InterruptedException e) {
log.error(
"Could not update input data location for task {}. Trying to fail task.",
task.getTaskInfo().getTaskName(),
e);
task.failExternally(e);
}
},
getRpcService().getScheduledExecutor()));
}
return CompletableFuture.completedFuture(Acknowledge.get());
} else {
log.debug(
"Discard update for input partitions of task {}. Task is no longer running.",
executionAttemptID);
return CompletableFuture.completedFuture(Acknowledge.get());
}
}
@Override
public void releasePartitions(JobID jobId, Set<ResultPartitionID> partitionIds) {
try {
partitionTracker.stopTrackingAndReleaseJobPartitions(partitionIds);
closeJobManagerConnectionIfNoAllocatedResources(jobId);
} catch (Throwable t) {
onFatalError(t);
}
}
@Override
public CompletableFuture<Acknowledge> promotePartitions(
JobID jobId, Set<ResultPartitionID> partitionIds) {
CompletableFuture<Acknowledge> future = new CompletableFuture<>();
try {
partitionTracker.promoteJobPartitions(partitionIds);
if (establishedResourceManagerConnection != null) {
establishedResourceManagerConnection
.getResourceManagerGateway()
.reportClusterPartitions(
getResourceID(), partitionTracker.createClusterPartitionReport())
.thenAccept(ignore -> future.complete(Acknowledge.get()));
} else {
future.completeExceptionally(
new RuntimeException(
"Task executor is not connecting to ResourceManager. "
+ "Fail to report cluster partition to ResourceManager"));
}
closeJobManagerConnectionIfNoAllocatedResources(jobId);
} catch (Throwable t) {
future.completeExceptionally(t);
onFatalError(t);
}
return future;
}
@Override
public CompletableFuture<Acknowledge> releaseClusterPartitions(
Collection<IntermediateDataSetID> dataSetsToRelease, Duration timeout) {
partitionTracker.stopTrackingAndReleaseClusterPartitions(dataSetsToRelease);
return CompletableFuture.completedFuture(Acknowledge.get());
}
// ----------------------------------------------------------------------
// Heartbeat RPC
// ----------------------------------------------------------------------
@Override
public CompletableFuture<Void> heartbeatFromJobManager(
ResourceID resourceID, AllocatedSlotReport allocatedSlotReport) {
return jobManagerHeartbeatManager.requestHeartbeat(resourceID, allocatedSlotReport);
}
@Override
public CompletableFuture<Void> heartbeatFromResourceManager(ResourceID resourceID) {
return resourceManagerHeartbeatManager.requestHeartbeat(resourceID, null);
}
// ----------------------------------------------------------------------
// Checkpointing RPCs
// ----------------------------------------------------------------------
@Override
public CompletableFuture<Acknowledge> triggerCheckpoint(
ExecutionAttemptID executionAttemptID,
long checkpointId,
long checkpointTimestamp,
CheckpointOptions checkpointOptions) {
final Task task = taskSlotTable.getTask(executionAttemptID);
if (task != null) {
try (MdcCloseable ignored =
MdcUtils.withContext(MdcUtils.asContextData(task.getJobID()))) {
log.debug(
"Trigger checkpoint {}@{} for {}.",
checkpointId,
checkpointTimestamp,
executionAttemptID);
task.triggerCheckpointBarrier(checkpointId, checkpointTimestamp, checkpointOptions);
return CompletableFuture.completedFuture(Acknowledge.get());
}
} else {
final String message =
"TaskManager received a checkpoint request for unknown task "
+ executionAttemptID
+ '.';
log.debug(message);
return FutureUtils.completedExceptionally(
new CheckpointException(
message, CheckpointFailureReason.TASK_CHECKPOINT_FAILURE));
}
}
@Override
public CompletableFuture<Acknowledge> confirmCheckpoint(
ExecutionAttemptID executionAttemptID,
long completedCheckpointId,
long completedCheckpointTimestamp,
long lastSubsumedCheckpointId) {
final Task task = taskSlotTable.getTask(executionAttemptID);
if (task != null) {
try (MdcCloseable ignored =
MdcUtils.withContext(MdcUtils.asContextData(task.getJobID()))) {
log.debug(
"Confirm completed checkpoint {}@{} and last subsumed checkpoint {} for {}.",
completedCheckpointId,
completedCheckpointTimestamp,
lastSubsumedCheckpointId,
executionAttemptID);
task.notifyCheckpointComplete(completedCheckpointId);
task.notifyCheckpointSubsumed(lastSubsumedCheckpointId);
return CompletableFuture.completedFuture(Acknowledge.get());
}
} else {
final String message =
"TaskManager received a checkpoint confirmation for unknown task "
+ executionAttemptID
+ '.';
log.debug(message);
return FutureUtils.completedExceptionally(
new CheckpointException(
message,
CheckpointFailureReason.UNKNOWN_TASK_CHECKPOINT_NOTIFICATION_FAILURE));
}
}
@Override
public CompletableFuture<Acknowledge> abortCheckpoint(
ExecutionAttemptID executionAttemptID,
long checkpointId,
long latestCompletedCheckpointId,
long checkpointTimestamp) {
final Task task = taskSlotTable.getTask(executionAttemptID);
if (task != null) {
try (MdcCloseable ignored =
MdcUtils.withContext(MdcUtils.asContextData(task.getJobID()))) {
log.debug(
"Abort checkpoint {}@{} for {}.",
checkpointId,
checkpointTimestamp,
executionAttemptID);
task.notifyCheckpointAborted(checkpointId, latestCompletedCheckpointId);
return CompletableFuture.completedFuture(Acknowledge.get());
}
} else {
final String message =
"TaskManager received an aborted checkpoint for unknown task "
+ executionAttemptID
+ '.';
log.debug(message);
return FutureUtils.completedExceptionally(
new CheckpointException(
message,
CheckpointFailureReason.UNKNOWN_TASK_CHECKPOINT_NOTIFICATION_FAILURE));
}
}
// ----------------------------------------------------------------------
// Slot allocation RPCs
// ----------------------------------------------------------------------
@Override
public CompletableFuture<Acknowledge> requestSlot(
final SlotID slotId,
final JobID jobId,
final AllocationID allocationId,
final ResourceProfile resourceProfile,
final String targetAddress,
final ResourceManagerId resourceManagerId,
final Duration timeout) {
// TODO: Filter invalid requests from the resource manager by using the
// instance/registration Id
try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobId))) {
log.info(
"Receive slot request {} for job {} from resource manager with leader id {}.",
allocationId,
jobId,
resourceManagerId);
if (!isConnectedToResourceManager(resourceManagerId)) {
final String message =
String.format(
"TaskManager is not connected to the resource manager %s.",
resourceManagerId);
log.debug(message);
return FutureUtils.completedExceptionally(new TaskManagerException(message));
}
tryPersistAllocationSnapshot(
new SlotAllocationSnapshot(
slotId, jobId, targetAddress, allocationId, resourceProfile));
try {
final boolean isConnected =
allocateSlotForJob(
jobId, slotId, allocationId, resourceProfile, targetAddress);
if (isConnected) {
offerSlotsToJobManager(jobId);
}
return CompletableFuture.completedFuture(Acknowledge.get());
} catch (SlotAllocationException e) {
log.debug("Could not allocate slot for allocation id {}.", allocationId, e);
return FutureUtils.completedExceptionally(e);
}
}
}
private boolean allocateSlotForJob(
JobID jobId,
SlotID slotId,
AllocationID allocationId,
ResourceProfile resourceProfile,
String targetAddress)
throws SlotAllocationException {
allocateSlot(slotId, jobId, allocationId, resourceProfile);
final JobTable.Job job;
try {
job =
jobTable.getOrCreateJob(
jobId, () -> registerNewJobAndCreateServices(jobId, targetAddress));
} catch (Exception e) {
// free the allocated slot
try {
taskSlotTable.freeSlot(allocationId);
} catch (SlotNotFoundException slotNotFoundException) {
// slot no longer existent, this should actually never happen, because we've
// just allocated the slot. So let's fail hard in this case!
onFatalError(slotNotFoundException);
}
// release local state under the allocation id.
localStateStoresManager.releaseLocalStateForAllocationId(allocationId);
// sanity check
if (!taskSlotTable.isSlotFree(slotId.getSlotNumber())) {
onFatalError(new Exception("Could not free slot " + slotId));
}
throw new SlotAllocationException("Could not create new job.", e);
}
return job.isConnected();
}
private TaskExecutorJobServices registerNewJobAndCreateServices(
JobID jobId, String targetAddress) throws Exception {
jobLeaderService.addJob(jobId, targetAddress);
final JobPermanentBlobService permanentBlobService =
taskExecutorBlobService.getPermanentBlobService();
permanentBlobService.registerJob(jobId);
return TaskExecutorJobServices.create(
libraryCacheManager.registerClassLoaderLease(jobId),
() -> permanentBlobService.releaseJob(jobId));
}
private void allocateSlot(
SlotID slotId, JobID jobId, AllocationID allocationId, ResourceProfile resourceProfile)
throws SlotAllocationException {
if (taskSlotTable.isSlotFree(slotId.getSlotNumber())) {
taskSlotTable.allocateSlot(
slotId.getSlotNumber(),
jobId,
allocationId,
resourceProfile,
taskManagerConfiguration.getSlotTimeout());
} else if (!taskSlotTable.isAllocated(slotId.getSlotNumber(), jobId, allocationId)) {
final String message =
"The slot " + slotId + " has already been allocated for a different job.";
log.info(message);
final AllocationID allocationID =
taskSlotTable.getCurrentAllocation(slotId.getSlotNumber());
throw new SlotOccupiedException(
message, allocationID, taskSlotTable.getOwningJob(allocationID));
}
}
@Override
public CompletableFuture<Acknowledge> freeSlot(
AllocationID allocationId, Throwable cause, Duration timeout) {
freeSlotInternal(allocationId, cause);
return CompletableFuture.completedFuture(Acknowledge.get());
}
@Override
public void freeInactiveSlots(JobID jobId, Duration timeout) {
try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobId))) {
log.debug("Freeing inactive slots for job {}.", jobId);
// need a copy to prevent ConcurrentModificationExceptions
final ImmutableList<TaskSlot<Task>> inactiveSlots =
ImmutableList.copyOf(taskSlotTable.getAllocatedSlots(jobId));
for (TaskSlot<Task> slot : inactiveSlots) {
freeSlotInternal(
slot.getAllocationId(),
new FlinkException("Slot was re-claimed by resource manager."));
}
}
}
@Override
public CompletableFuture<TransientBlobKey> requestFileUploadByType(
FileType fileType, Duration timeout) {
final String filePath;
switch (fileType) {
case LOG:
filePath = taskManagerConfiguration.getTaskManagerLogPath();
break;
case STDOUT:
filePath = taskManagerConfiguration.getTaskManagerStdoutPath();
break;
default:
filePath = null;
}
return requestFileUploadByFilePath(filePath, fileType.toString());
}
@Override
public CompletableFuture<TransientBlobKey> requestFileUploadByName(
String fileName, Duration timeout) {
return requestFileUploadByNameAndType(fileName, FileType.LOG, timeout);
}
@Override
public CompletableFuture<TransientBlobKey> requestFileUploadByNameAndType(
String fileName, FileType fileType, Duration timeout) {
final String filePath;
final String baseDir;
switch (fileType) {
case LOG:
baseDir = taskManagerConfiguration.getTaskManagerLogDir();
break;
case PROFILER:
baseDir = profilingService.getProfilingResultDir();
break;
default:
baseDir = null;
}
if (StringUtils.isNullOrWhitespaceOnly(baseDir)
|| StringUtils.isNullOrWhitespaceOnly(fileName)) {
filePath = null;
} else {
filePath = new File(baseDir, new File(fileName).getName()).getPath();
}
return requestFileUploadByFilePath(filePath, fileName);
}
@Override
public CompletableFuture<SerializableOptional<String>> requestMetricQueryServiceAddress(
Duration timeout) {
return CompletableFuture.completedFuture(
SerializableOptional.ofNullable(metricQueryServiceAddress));
}
// ----------------------------------------------------------------------
// Disconnection RPCs
// ----------------------------------------------------------------------
@Override
public void disconnectJobManager(JobID jobId, Exception cause) {
try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobId))) {
jobTable.getConnection(jobId)
.ifPresent(
jobManagerConnection ->
disconnectAndTryReconnectToJobManager(
jobManagerConnection, cause, true));
}
}
private void disconnectAndTryReconnectToJobManager(
JobTable.Connection jobManagerConnection, Exception cause, boolean releasePartitions) {
try (MdcCloseable ignored =
MdcUtils.withContext(MdcUtils.asContextData(jobManagerConnection.getJobId()))) {
disconnectJobManagerConnection(jobManagerConnection, cause, releasePartitions);
jobLeaderService.reconnect(jobManagerConnection.getJobId());
}
}
@Override
public void disconnectResourceManager(Exception cause) {
if (isRunning()) {
reconnectToResourceManager(cause);
}
}
// ----------------------------------------------------------------------
// Other RPCs
// ----------------------------------------------------------------------
@Override
public CompletableFuture<Acknowledge> sendOperatorEventToTask(
ExecutionAttemptID executionAttemptID,
OperatorID operatorId,
SerializedValue<OperatorEvent> evt) {
log.debug("Operator event for {} - {}", executionAttemptID, operatorId);
final Task task = taskSlotTable.getTask(executionAttemptID);
if (task == null) {
return FutureUtils.completedExceptionally(
new TaskNotRunningException(
"Task " + executionAttemptID + " not running on TaskManager"));
}
try {
task.deliverOperatorEvent(operatorId, evt);
return CompletableFuture.completedFuture(Acknowledge.get());
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalError(t);
return FutureUtils.completedExceptionally(t);
}
}
@Override
public CompletableFuture<ThreadDumpInfo> requestThreadDump(Duration timeout) {
int stacktraceMaxDepth =
taskManagerConfiguration
.getConfiguration()
.get(ClusterOptions.THREAD_DUMP_STACKTRACE_MAX_DEPTH);
return CompletableFuture.completedFuture(ThreadDumpInfo.dumpAndCreate(stacktraceMaxDepth));
}
@Override
public CompletableFuture<Acknowledge> updateDelegationTokens(
ResourceManagerId resourceManagerId, byte[] tokens) {
log.info(
"Receive update delegation tokens from resource manager with leader id {}.",
resourceManagerId);
if (!isConnectedToResourceManager(resourceManagerId)) {
final String message =
String.format(
"TaskManager is not connected to the resource manager %s.",
resourceManagerId);
log.debug(message);
return FutureUtils.completedExceptionally(new TaskManagerException(message));
}
try {
delegationTokenReceiverRepository.onNewTokensObtained(tokens);
return CompletableFuture.completedFuture(Acknowledge.get());
} catch (Throwable t) {
log.error("Could not update delegation tokens.", t);
ExceptionUtils.rethrowIfFatalError(t);
return FutureUtils.completedExceptionally(t);
}
}
@Override
public CompletableFuture<Collection<PartitionWithMetrics>> getAndRetainPartitionWithMetrics(
JobID jobId) {
jobPartitionToCleanupSet.remove(jobId);
Collection<TaskExecutorPartitionInfo> partitionInfoList =
partitionTracker.getTrackedPartitionsFor(jobId);
List<PartitionWithMetrics> partitionWithMetrics = new ArrayList<>();
partitionInfoList.forEach(
info -> {
ResultPartitionID partitionId = info.getResultPartitionId();
shuffleEnvironment
.getMetricsIfPartitionOccupyingLocalResource(partitionId)
.ifPresent(
metrics ->
partitionWithMetrics.add(
new DefaultPartitionWithMetrics(
info.getShuffleDescriptor(), metrics)));
});
return CompletableFuture.completedFuture(partitionWithMetrics);
}
@Override
public CompletableFuture<ProfilingInfo> requestProfiling(
int duration, ProfilingInfo.ProfilingMode mode, Duration timeout) {
return profilingService.requestProfiling(
getResourceID().getResourceIdString(), duration, mode);
}
@Override
public CompletableFuture<Collection<ProfilingInfo>> requestProfilingList(Duration timeout) {
return profilingService.getProfilingList(getResourceID().getResourceIdString());
}
// ------------------------------------------------------------------------
// Internal resource manager connection methods
// ------------------------------------------------------------------------
private void notifyOfNewResourceManagerLeader(
String newLeaderAddress, ResourceManagerId newResourceManagerId) {
resourceManagerAddress =
createResourceManagerAddress(newLeaderAddress, newResourceManagerId);
reconnectToResourceManager(
new FlinkException(
String.format(
"ResourceManager leader changed to new address %s",
resourceManagerAddress)));
}
@Nullable
private ResourceManagerAddress createResourceManagerAddress(
@Nullable String newLeaderAddress, @Nullable ResourceManagerId newResourceManagerId) {
if (newLeaderAddress == null) {
return null;
} else {
assert (newResourceManagerId != null);
return new ResourceManagerAddress(newLeaderAddress, newResourceManagerId);
}
}
private void reconnectToResourceManager(Exception cause) {
closeResourceManagerConnection(cause);
startRegistrationTimeout();
tryConnectToResourceManager();
}
private void tryConnectToResourceManager() {
if (resourceManagerAddress != null) {
connectToResourceManager();
}
}
private void connectToResourceManager() {
assert (resourceManagerAddress != null);
assert (establishedResourceManagerConnection == null);
assert (resourceManagerConnection == null);
log.info("Connecting to ResourceManager {}.", resourceManagerAddress);
final TaskExecutorRegistration taskExecutorRegistration =
new TaskExecutorRegistration(
getAddress(),
getResourceID(),
unresolvedTaskManagerLocation.getDataPort(),
JMXService.getPort().orElse(-1),
hardwareDescription,
memoryConfiguration,
taskManagerConfiguration.getDefaultSlotResourceProfile(),
taskManagerConfiguration.getTotalResourceProfile(),
unresolvedTaskManagerLocation.getNodeId());
resourceManagerConnection =
new TaskExecutorToResourceManagerConnection(
log,
getRpcService(),
taskManagerConfiguration.getRetryingRegistrationConfiguration(),
resourceManagerAddress.getAddress(),
resourceManagerAddress.getResourceManagerId(),
getMainThreadExecutor(),
new ResourceManagerRegistrationListener(),
taskExecutorRegistration);
resourceManagerConnection.start();
}
private void establishResourceManagerConnection(
ResourceManagerGateway resourceManagerGateway,
ResourceID resourceManagerResourceId,
InstanceID taskExecutorRegistrationId,
ClusterInformation clusterInformation) {
final CompletableFuture<Acknowledge> slotReportResponseFuture =
resourceManagerGateway.sendSlotReport(
getResourceID(),
taskExecutorRegistrationId,
taskSlotTable.createSlotReport(getResourceID()),
taskManagerConfiguration.getRpcTimeout());
slotReportResponseFuture.whenCompleteAsync(
(acknowledge, throwable) -> {
if (throwable != null) {
reconnectToResourceManager(
new TaskManagerException(
"Failed to send initial slot report to ResourceManager.",
throwable));
}
},
getMainThreadExecutor());
// monitor the resource manager as heartbeat target
resourceManagerHeartbeatManager.monitorTarget(
resourceManagerResourceId,
new ResourceManagerHeartbeatReceiver(resourceManagerGateway));
// set the propagated blob server address
final InetSocketAddress blobServerAddress =
new InetSocketAddress(
clusterInformation.getBlobServerHostname(),
clusterInformation.getBlobServerPort());
taskExecutorBlobService.setBlobServerAddress(blobServerAddress);
establishedResourceManagerConnection =
new EstablishedResourceManagerConnection(
resourceManagerGateway,
resourceManagerResourceId,
taskExecutorRegistrationId);
stopRegistrationTimeout();
}
private void closeResourceManagerConnection(Exception cause) {
if (establishedResourceManagerConnection != null) {
final ResourceID resourceManagerResourceId =
establishedResourceManagerConnection.getResourceManagerResourceId();
log.info(
"Close ResourceManager connection {}.",
resourceManagerResourceId,
ExceptionUtils.returnExceptionIfUnexpected(cause.getCause()));
ExceptionUtils.logExceptionIfExcepted(cause.getCause(), log);
resourceManagerHeartbeatManager.unmonitorTarget(resourceManagerResourceId);
ResourceManagerGateway resourceManagerGateway =
establishedResourceManagerConnection.getResourceManagerGateway();
resourceManagerGateway.disconnectTaskManager(getResourceID(), cause);
establishedResourceManagerConnection = null;
partitionTracker.stopTrackingAndReleaseAllClusterPartitions();
}
if (resourceManagerConnection != null) {
if (!resourceManagerConnection.isConnected()) {
if (log.isDebugEnabled()) {
log.debug(
"Terminating registration attempts towards ResourceManager {}.",
resourceManagerConnection.getTargetAddress(),
cause);
} else {
log.info(
"Terminating registration attempts towards ResourceManager {}.",
resourceManagerConnection.getTargetAddress());
}
}
resourceManagerConnection.close();
resourceManagerConnection = null;
}
}
private void startRegistrationTimeout() {
final Duration maxRegistrationDuration =
taskManagerConfiguration.getMaxRegistrationDuration();
if (maxRegistrationDuration != null) {
final UUID newRegistrationTimeoutId = UUID.randomUUID();
currentRegistrationTimeoutId = newRegistrationTimeoutId;
scheduleRunAsync(
() -> registrationTimeout(newRegistrationTimeoutId), maxRegistrationDuration);
}
}
private void stopRegistrationTimeout() {
currentRegistrationTimeoutId = null;
}
private void registrationTimeout(@Nonnull UUID registrationTimeoutId) {
if (registrationTimeoutId.equals(currentRegistrationTimeoutId)) {
final Duration maxRegistrationDuration =
taskManagerConfiguration.getMaxRegistrationDuration();
onFatalError(
new RegistrationTimeoutException(
String.format(
"Could not register at the ResourceManager within the specified maximum "
+ "registration duration %s. This indicates a problem with this instance. Terminating now.",
maxRegistrationDuration)));
}
}
// ------------------------------------------------------------------------
// Internal job manager connection methods
// ------------------------------------------------------------------------
private void offerSlotsToJobManager(final JobID jobId) {
jobTable.getConnection(jobId).ifPresent(this::internalOfferSlotsToJobManager);
}
private void internalOfferSlotsToJobManager(JobTable.Connection jobManagerConnection) {
final JobID jobId = jobManagerConnection.getJobId();
if (taskSlotTable.hasAllocatedSlots(jobId)) {
log.info("Offer reserved slots to the leader of job {}.", jobId);
final JobMasterGateway jobMasterGateway = jobManagerConnection.getJobManagerGateway();
final Iterator<TaskSlot<Task>> reservedSlotsIterator =
taskSlotTable.getAllocatedSlots(jobId);
final JobMasterId jobMasterId = jobManagerConnection.getJobMasterId();
final Collection<SlotOffer> reservedSlots =
CollectionUtil.newHashSetWithExpectedSize(2);
while (reservedSlotsIterator.hasNext()) {
SlotOffer offer = reservedSlotsIterator.next().generateSlotOffer();
reservedSlots.add(offer);
}
final UUID slotOfferId = UUID.randomUUID();
currentSlotOfferPerJob.put(jobId, slotOfferId);
CompletableFuture<Collection<SlotOffer>> acceptedSlotsFuture =
jobMasterGateway.offerSlots(
getResourceID(),
reservedSlots,
taskManagerConfiguration.getRpcTimeout());
acceptedSlotsFuture.whenCompleteAsync(
handleAcceptedSlotOffers(
jobId, jobMasterGateway, jobMasterId, reservedSlots, slotOfferId),
getMainThreadExecutor(jobId));
} else {
log.debug("There are no unassigned slots for the job {}.", jobId);
}
}
@Nonnull
private BiConsumer<Iterable<SlotOffer>, Throwable> handleAcceptedSlotOffers(
JobID jobId,
JobMasterGateway jobMasterGateway,
JobMasterId jobMasterId,
Collection<SlotOffer> offeredSlots,
UUID offerId) {
return (Iterable<SlotOffer> acceptedSlots, Throwable throwable) -> {
// check if this is the latest offer
if (!offerId.equals(currentSlotOfferPerJob.get(jobId))) {
// If this offer is outdated then it can be safely ignored.
// If the response for a given slot is identical in both offers (accepted/rejected),
// then this is naturally the case since the end-result is the same.
// If the responses differ, then there are 2 cases to consider:
// 1) initially rejected, later accepted
// This can happen when the resource requirements of a job increases between
// offers.
// In this case the first response MUST be ignored, so that
// the slot can be properly activated when the second response arrives.
// 2) initially accepted, later rejected
// This can happen when the resource requirements of a job decrease between
// offers.
// In this case the first response MAY be ignored, because the job no longer
// requires the slot (and already has initiated steps to free it) and we can thus
// assume that any in-flight task submissions are no longer relevant for the job
// execution.
log.debug(
"Discard slot offer response since there is a newer offer for the job {}.",
jobId);
return;
}
if (throwable != null) {
if (throwable instanceof TimeoutException) {
log.info(
"Slot offering to JobManager did not finish in time. Retrying the slot offering.");
// We ran into a timeout. Try again.
offerSlotsToJobManager(jobId);
} else {
log.warn(
"Slot offering to JobManager failed. Freeing the slots "
+ "and returning them to the ResourceManager.",
throwable);
// We encountered an exception. Free the slots and return them to the RM.
for (SlotOffer reservedSlot : offeredSlots) {
freeSlotInternal(reservedSlot.getAllocationId(), throwable);
}
}
} else {
// check if the response is still valid
if (isJobManagerConnectionValid(jobId, jobMasterId)) {
// mark accepted slots active
for (SlotOffer acceptedSlot : acceptedSlots) {
final AllocationID allocationId = acceptedSlot.getAllocationId();
try {
if (!taskSlotTable.markSlotActive(allocationId)) {
// the slot is either free or releasing at the moment
final String message =
"Could not mark slot " + allocationId + " active.";
log.debug(message);
jobMasterGateway.failSlot(
getResourceID(), allocationId, new FlinkException(message));
}
} catch (SlotNotFoundException e) {
final String message =
"Could not mark slot " + allocationId + " active.";
jobMasterGateway.failSlot(
getResourceID(), allocationId, new FlinkException(message));
}
offeredSlots.remove(acceptedSlot);
}
final Exception e = new Exception("The slot was rejected by the JobManager.");
for (SlotOffer rejectedSlot : offeredSlots) {
freeSlotInternal(rejectedSlot.getAllocationId(), e);
}
} else {
// discard the response since there is a new leader for the job
log.debug(
"Discard slot offer response since there is a new leader "
+ "for the job {}.",
jobId);
}
}
};
}
private void establishJobManagerConnection(
JobTable.Job job,
final JobMasterGateway jobMasterGateway,
JMTMRegistrationSuccess registrationSuccess) {
final JobID jobId = job.getJobId();
final Optional<JobTable.Connection> connection = job.asConnection();
if (connection.isPresent()) {
JobTable.Connection oldJobManagerConnection = connection.get();
if (Objects.equals(
oldJobManagerConnection.getJobMasterId(), jobMasterGateway.getFencingToken())) {
// we already are connected to the given job manager
log.debug(
"Ignore JobManager gained leadership message for {} because we are already connected to it.",
jobMasterGateway.getFencingToken());
return;
} else {
disconnectJobManagerConnection(
oldJobManagerConnection,
new Exception("Found new job leader for job id " + jobId + '.'),
true);
}
}
log.info("Establish JobManager connection for job {}.", jobId);
ResourceID jobManagerResourceID = registrationSuccess.getResourceID();
final JobTable.Connection establishedConnection =
associateWithJobManager(job, jobManagerResourceID, jobMasterGateway);
// monitor the job manager as heartbeat target
jobManagerHeartbeatManager.monitorTarget(
jobManagerResourceID, new JobManagerHeartbeatReceiver(jobMasterGateway));
internalOfferSlotsToJobManager(establishedConnection);
}
private void closeJob(JobTable.Job job, Exception cause) {
job.asConnection()
.ifPresent(
jobManagerConnection ->
disconnectJobManagerConnection(jobManagerConnection, cause, true));
job.close();
}
private void disconnectJobManagerConnection(
JobTable.Connection jobManagerConnection, Exception cause, boolean releasePartitions) {
final JobID jobId = jobManagerConnection.getJobId();
jobPartitionToCleanupSet.add(jobId);
log.info(
"Close JobManager connection for job {}.",
jobId,
ExceptionUtils.returnExceptionIfUnexpected(cause.getCause()));
ExceptionUtils.logExceptionIfExcepted(cause.getCause(), log);
// 1. fail tasks running under this JobID
Iterator<Task> tasks = taskSlotTable.getTasks(jobId);
final FlinkException failureCause =
new FlinkException(
String.format("Disconnect from JobManager responsible for %s.", jobId),
cause);
while (tasks.hasNext()) {
tasks.next().failExternally(failureCause);
}
// 2. Move the active slots to state allocated (possible to time out again)
Set<AllocationID> activeSlotAllocationIDs =
taskSlotTable.getActiveTaskSlotAllocationIdsPerJob(jobId);
final FlinkException freeingCause =
new FlinkException("Slot could not be marked inactive.");
for (AllocationID activeSlotAllocationID : activeSlotAllocationIDs) {
try {
if (!taskSlotTable.markSlotInactive(
activeSlotAllocationID, taskManagerConfiguration.getSlotTimeout())) {
freeSlotInternal(activeSlotAllocationID, freeingCause);
}
} catch (SlotNotFoundException e) {
log.debug("Could not mark the slot {} inactive.", activeSlotAllocationID, e);
}
}
if (!releasePartitions) {
// this branch is for job recovery from master failures
final Duration maxRegistrationDuration =
taskManagerConfiguration.getMaxRegistrationDuration();
if (maxRegistrationDuration != null) {
log.info(
"Waiting for {} mills for job {} to recover. If the job manager is not reconnected, "
+ "the job's partitions will be cleaned up.",
maxRegistrationDuration.toMillis(),
jobId);
scheduleRunAsync(
() -> {
// If the job is not recovery after wait for a period of time, we will
// clean up the partitions
Optional<JobTable.Job> job = jobTable.getJob(jobId);
if (!job.isPresent()
|| !job.get().isConnected()
|| jobPartitionToCleanupSet.contains(jobId)) {
scheduleResultPartitionCleanup(jobId);
}
},
maxRegistrationDuration);
}
} else {
// cleanup remaining partitions once all tasks for this job have completed
scheduleResultPartitionCleanup(jobId);
}
// 3. Disassociate from the JobManager
try {
jobManagerHeartbeatManager.unmonitorTarget(jobManagerConnection.getResourceId());
disassociateFromJobManager(jobManagerConnection, cause);
} catch (IOException e) {
log.warn(
"Could not properly disassociate from JobManager {}.",
jobManagerConnection.getJobManagerGateway().getAddress(),
e);
}
jobManagerConnection.disconnect();
}
private JobTable.Connection associateWithJobManager(
JobTable.Job job, ResourceID resourceID, JobMasterGateway jobMasterGateway) {
checkNotNull(resourceID);
checkNotNull(jobMasterGateway);
TaskManagerActions taskManagerActions =
new TaskManagerActionsImpl(job.getJobId(), jobMasterGateway);
CheckpointResponder checkpointResponder = new RpcCheckpointResponder(jobMasterGateway);
GlobalAggregateManager aggregateManager = new RpcGlobalAggregateManager(jobMasterGateway);
PartitionProducerStateChecker partitionStateChecker =
new RpcPartitionStateChecker(jobMasterGateway);
registerQueryableState(job.getJobId(), jobMasterGateway);
return job.connect(
resourceID,
jobMasterGateway,
taskManagerActions,
checkpointResponder,
aggregateManager,
partitionStateChecker);
}
private void disassociateFromJobManager(
JobTable.Connection jobManagerConnection, Exception cause) throws IOException {
checkNotNull(jobManagerConnection);
final JobID jobId = jobManagerConnection.getJobId();
final KvStateRegistry kvStateRegistry = kvStateService.getKvStateRegistry();
if (kvStateRegistry != null) {
kvStateRegistry.unregisterListener(jobId);
}
final KvStateClientProxy kvStateClientProxy = kvStateService.getKvStateClientProxy();
if (kvStateClientProxy != null) {
kvStateClientProxy.updateKvStateLocationOracle(jobManagerConnection.getJobId(), null);
}
JobMasterGateway jobManagerGateway = jobManagerConnection.getJobManagerGateway();
jobManagerGateway.disconnectTaskManager(getResourceID(), cause);
}
private void handleRejectedJobManagerConnection(
JobID jobId, String targetAddress, JMTMRegistrationRejection rejection) {
log.info(
"The JobManager under {} rejected the registration for job {}: {}. Releasing all job related resources.",
targetAddress,
jobId,
rejection.getReason());
releaseJobResources(
jobId,
new FlinkException(
String.format("JobManager %s has rejected the registration.", jobId)));
}
private void releaseJobResources(JobID jobId, Exception cause) {
log.debug("Releasing job resources for job {}.", jobId, cause);
if (partitionTracker.isTrackingPartitionsFor(jobId)) {
// stop tracking job partitions
partitionTracker.stopTrackingAndReleaseJobPartitionsFor(jobId);
}
// free slots
final Set<AllocationID> allocationIds = taskSlotTable.getAllocationIdsPerJob(jobId);
if (!allocationIds.isEmpty()) {
for (AllocationID allocationId : allocationIds) {
freeSlotInternal(allocationId, cause);
}
}
jobLeaderService.removeJob(jobId);
jobTable.getJob(jobId)
.ifPresent(
job -> {
closeJob(job, cause);
});
taskManagerMetricGroup.removeJobMetricsGroup(jobId);
changelogStoragesManager.releaseResourcesForJob(jobId);
currentSlotOfferPerJob.remove(jobId);
channelStateExecutorFactoryManager.releaseResourcesForJob(jobId);
jobInformationCache.clearCacheForGroup(jobId);
taskInformationCache.clearCacheForGroup(jobId);
shuffleDescriptorsCache.clearCacheForGroup(jobId);
fileMergingManager.releaseMergingSnapshotManagerForJob(jobId);
}
private void scheduleResultPartitionCleanup(JobID jobId) {
final Collection<CompletableFuture<ExecutionState>> taskTerminationFutures =
taskResultPartitionCleanupFuturesPerJob.remove(jobId);
if (taskTerminationFutures != null) {
FutureUtils.waitForAll(taskTerminationFutures)
.thenRunAsync(
() -> {
partitionTracker.stopTrackingAndReleaseJobPartitionsFor(jobId);
jobPartitionToCleanupSet.remove(jobId);
},
getMainThreadExecutor());
}
}
private void registerQueryableState(JobID jobId, JobMasterGateway jobMasterGateway) {
final KvStateServer kvStateServer = kvStateService.getKvStateServer();
final KvStateRegistry kvStateRegistry = kvStateService.getKvStateRegistry();
if (kvStateServer != null && kvStateRegistry != null) {
kvStateRegistry.registerListener(
jobId,
new RpcKvStateRegistryListener(
jobMasterGateway, kvStateServer.getServerAddress()));
}
final KvStateClientProxy kvStateProxy = kvStateService.getKvStateClientProxy();
if (kvStateProxy != null) {
kvStateProxy.updateKvStateLocationOracle(jobId, jobMasterGateway);
}
}
// ------------------------------------------------------------------------
// Internal task methods
// ------------------------------------------------------------------------
private void failTask(
final JobID jobID, final ExecutionAttemptID executionAttemptID, final Throwable cause) {
try (MdcUtils.MdcCloseable mdcCloseable =
MdcUtils.withContext(MdcUtils.asContextData(jobID))) {
final Task task = taskSlotTable.getTask(executionAttemptID);
if (task != null) {
try {
task.failExternally(cause);
} catch (Throwable t) {
log.error("Could not fail task {}.", executionAttemptID, t);
}
} else {
log.info(
"Cannot find task to fail for execution {} with exception:",
executionAttemptID,
cause);
}
}
}
private void updateTaskExecutionState(
final JobID jobID,
final JobMasterGateway jobMasterGateway,
final TaskExecutionState taskExecutionState) {
final ExecutionAttemptID executionAttemptID = taskExecutionState.getID();
CompletableFuture<Acknowledge> futureAcknowledge =
jobMasterGateway.updateTaskExecutionState(taskExecutionState);
futureAcknowledge.whenCompleteAsync(
(ack, throwable) -> {
if (throwable != null) {
failTask(jobID, executionAttemptID, throwable);
}
},
getMainThreadExecutor());
}
private void unregisterTaskAndNotifyFinalState(
final JobMasterGateway jobMasterGateway, final ExecutionAttemptID executionAttemptID) {
Task task = taskSlotTable.removeTask(executionAttemptID);
if (task != null) {
if (!task.getExecutionState().isTerminal()) {
try {
task.failExternally(
new IllegalStateException("Task is being remove from TaskManager."));
} catch (Exception e) {
log.error("Could not properly fail task.", e);
}
}
log.info(
"Un-registering task and sending final execution state {} to JobManager for task {} {}.",
task.getExecutionState(),
task.getTaskInfo().getTaskNameWithSubtasks(),
task.getExecutionId());
AccumulatorSnapshot accumulatorSnapshot = task.getAccumulatorRegistry().getSnapshot();
updateTaskExecutionState(
task.getJobID(),
jobMasterGateway,
new TaskExecutionState(
task.getExecutionId(),
task.getExecutionState(),
task.getFailureCause(),
accumulatorSnapshot,
task.getMetricGroup().getIOMetricGroup().createSnapshot()));
} else {
log.error("Cannot find task with ID {} to unregister.", executionAttemptID);
}
}
private void freeSlotInternal(AllocationID allocationId, Throwable cause) {
checkNotNull(allocationId);
// only respond to freeing slots when not shutting down to avoid freeing slot allocation
// information
if (isRunning()) {
final JobID jobId = taskSlotTable.getOwningJob(allocationId);
try (MdcCloseable ignored =
MdcUtils.withContext(
jobId == null
? Collections.emptyMap()
: MdcUtils.asContextData(jobId))) {
log.debug(
"Free slot with allocation id {} because: {}",
allocationId,
cause.getMessage());
final int slotIndex = taskSlotTable.freeSlot(allocationId, cause);
slotAllocationSnapshotPersistenceService.deleteAllocationSnapshot(slotIndex);
if (slotIndex != -1) {
if (isConnectedToResourceManager()) {
// the slot was freed. Tell the RM about it
ResourceManagerGateway resourceManagerGateway =
establishedResourceManagerConnection.getResourceManagerGateway();
resourceManagerGateway.notifySlotAvailable(
establishedResourceManagerConnection
.getTaskExecutorRegistrationId(),
new SlotID(getResourceID(), slotIndex),
allocationId);
}
if (jobId != null) {
closeJobManagerConnectionIfNoAllocatedResources(jobId);
}
}
} catch (SlotNotFoundException e) {
log.debug("Could not free slot for allocation id {}.", allocationId, e);
}
localStateStoresManager.releaseLocalStateForAllocationId(allocationId);
} else {
log.debug(
"Ignoring the freeing of slot {} because the TaskExecutor is shutting down.",
allocationId);
}
}
private void closeJobManagerConnectionIfNoAllocatedResources(JobID jobId) {
// check whether we still have allocated slots for the same job
if (taskSlotTable.getAllocationIdsPerJob(jobId).isEmpty()
&& !partitionTracker.isTrackingPartitionsFor(jobId)) {
// we can remove the job from the job leader service
final FlinkExpectedException cause =
new FlinkExpectedException(
"TaskExecutor "
+ getAddress()
+ " has no more allocated slots for job "
+ jobId
+ '.');
releaseJobResources(jobId, cause);
}
}
private void timeoutSlot(AllocationID allocationId, UUID ticket) {
checkNotNull(allocationId);
checkNotNull(ticket);
if (taskSlotTable.isValidTimeout(allocationId, ticket)) {
freeSlotInternal(
allocationId, new Exception("The slot " + allocationId + " has timed out."));
} else {
log.debug(
"Received an invalid timeout for allocation id {} with ticket {}.",
allocationId,
ticket);
}
}
/**
* Syncs the TaskExecutor's view on its allocated slots with the JobMaster's view. Slots which
* are no longer reported by the JobMaster are being freed. Slots which the JobMaster thinks it
* still owns but which are no longer allocated to it will be failed via {@link
* JobMasterGateway#failSlot}.
*
* @param jobMasterGateway jobMasterGateway to talk to the connected job master
* @param allocatedSlotReport represents the JobMaster's view on the current slot allocation
* state
*/
private void syncSlotsWithSnapshotFromJobMaster(
JobMasterGateway jobMasterGateway, AllocatedSlotReport allocatedSlotReport) {
failNoLongerAllocatedSlots(allocatedSlotReport, jobMasterGateway);
freeNoLongerUsedSlots(allocatedSlotReport);
}
private void failNoLongerAllocatedSlots(
AllocatedSlotReport allocatedSlotReport, JobMasterGateway jobMasterGateway) {
for (AllocatedSlotInfo allocatedSlotInfo : allocatedSlotReport.getAllocatedSlotInfos()) {
final AllocationID allocationId = allocatedSlotInfo.getAllocationId();
if (!taskSlotTable.isAllocated(
allocatedSlotInfo.getSlotIndex(),
allocatedSlotReport.getJobId(),
allocationId)) {
jobMasterGateway.failSlot(
getResourceID(),
allocationId,
new FlinkException(
String.format(
"Slot %s on TaskExecutor %s is not allocated by job %s.",
allocatedSlotInfo.getSlotIndex(),
getResourceID().getStringWithMetadata(),
allocatedSlotReport.getJobId())));
}
}
}
private void freeNoLongerUsedSlots(AllocatedSlotReport allocatedSlotReport) {
final Set<AllocationID> activeSlots =
taskSlotTable.getActiveTaskSlotAllocationIdsPerJob(allocatedSlotReport.getJobId());
final Set<AllocationID> reportedSlots =
allocatedSlotReport.getAllocatedSlotInfos().stream()
.map(AllocatedSlotInfo::getAllocationId)
.collect(Collectors.toSet());
final Sets.SetView<AllocationID> difference = Sets.difference(activeSlots, reportedSlots);
for (AllocationID allocationID : difference) {
freeSlotInternal(
allocationID,
new FlinkException(
String.format(
"%s is no longer allocated by job %s.",
allocationID, allocatedSlotReport.getJobId())));
}
}
private void tryPersistAllocationSnapshot(SlotAllocationSnapshot slotAllocationSnapshot) {
try {
slotAllocationSnapshotPersistenceService.persistAllocationSnapshot(
slotAllocationSnapshot);
} catch (IOException e) {
log.debug("Cannot persist the slot allocation snapshot {}.", slotAllocationSnapshot, e);
}
}
/**
* This method tries to repopulate the {@link JobTable} and {@link TaskSlotTable} from the local
* filesystem in a best-effort manner.
*/
private void tryLoadLocalAllocationSnapshots() {
Collection<SlotAllocationSnapshot> slotAllocationSnapshots =
slotAllocationSnapshotPersistenceService.loadAllocationSnapshots();
log.debug("Recovered slot allocation snapshots {}.", slotAllocationSnapshots);
final Set<AllocationID> allocatedSlots = new HashSet<>();
for (SlotAllocationSnapshot slotAllocationSnapshot : slotAllocationSnapshots) {
try {
allocateSlotForJob(
slotAllocationSnapshot.getJobId(),
slotAllocationSnapshot.getSlotID(),
slotAllocationSnapshot.getAllocationId(),
slotAllocationSnapshot.getResourceProfile(),
slotAllocationSnapshot.getJobTargetAddress());
} catch (SlotAllocationException e) {
log.debug("Cannot reallocate restored slot {}.", slotAllocationSnapshot, e);
}
allocatedSlots.add(slotAllocationSnapshot.getAllocationId());
}
localStateStoresManager.retainLocalStateForAllocations(allocatedSlots);
}
// ------------------------------------------------------------------------
// Internal utility methods
// ------------------------------------------------------------------------
private boolean isConnectedToResourceManager() {
return establishedResourceManagerConnection != null;
}
private boolean isConnectedToResourceManager(ResourceManagerId resourceManagerId) {
return establishedResourceManagerConnection != null
&& resourceManagerAddress != null
&& resourceManagerAddress.getResourceManagerId().equals(resourceManagerId);
}
private boolean isJobManagerConnectionValid(JobID jobId, JobMasterId jobMasterId) {
return jobTable.getConnection(jobId)
.map(jmConnection -> Objects.equals(jmConnection.getJobMasterId(), jobMasterId))
.orElse(false);
}
private CompletableFuture<TransientBlobKey> requestFileUploadByFilePath(
String filePath, String fileTag) {
log.debug("Received file upload request for file {}", fileTag);
if (!StringUtils.isNullOrWhitespaceOnly(filePath)) {
return CompletableFuture.supplyAsync(
() -> {
final File file = new File(filePath);
if (file.exists()) {
try {
return putTransientBlobStream(new FileInputStream(file), fileTag)
.get();
} catch (Exception e) {
log.debug("Could not upload file {}.", fileTag, e);
throw new CompletionException(
new FlinkException(
"Could not upload file " + fileTag + '.', e));
}
} else {
log.debug(
"The file {} does not exist on the TaskExecutor {}.",
fileTag,
getResourceID().getStringWithMetadata());
throw new CompletionException(
new FileNotFoundException(
"The file "
+ fileTag
+ " does not exist on the TaskExecutor."));
}
},
ioExecutor);
} else {
log.debug(
"The file {} is unavailable on the TaskExecutor {}.",
fileTag,
getResourceID().getStringWithMetadata());
return FutureUtils.completedExceptionally(
new FlinkException(
"The file " + fileTag + " is not available on the TaskExecutor."));
}
}
private CompletableFuture<TransientBlobKey> putTransientBlobStream(
InputStream inputStream, String fileTag) {
final TransientBlobService transientBlobService =
taskExecutorBlobService.getTransientBlobService();
final TransientBlobKey transientBlobKey;
try {
transientBlobKey = transientBlobService.putTransient(inputStream);
} catch (IOException e) {
log.debug("Could not upload file {}.", fileTag, e);
return FutureUtils.completedExceptionally(
new FlinkException("Could not upload file " + fileTag + '.', e));
}
return CompletableFuture.completedFuture(transientBlobKey);
}
// ------------------------------------------------------------------------
// Properties
// ------------------------------------------------------------------------
public ResourceID getResourceID() {
return unresolvedTaskManagerLocation.getResourceID();
}
// ------------------------------------------------------------------------
// Error Handling
// ------------------------------------------------------------------------
/**
* Notifies the TaskExecutor that a fatal error has occurred and it cannot proceed.
*
* @param t The exception describing the fatal error
*/
void onFatalError(final Throwable t) {
try {
log.error("Fatal error occurred in TaskExecutor {}.", getAddress(), t);
} catch (Throwable ignored) {
}
// The fatal error handler implementation should make sure that this call is non-blocking
fatalErrorHandler.onFatalError(t);
}
// ------------------------------------------------------------------------
// Access to fields for testing
// ------------------------------------------------------------------------
@VisibleForTesting
TaskExecutorToResourceManagerConnection getResourceManagerConnection() {
return resourceManagerConnection;
}
@VisibleForTesting
HeartbeatManager<Void, TaskExecutorHeartbeatPayload> getResourceManagerHeartbeatManager() {
return resourceManagerHeartbeatManager;
}
// ------------------------------------------------------------------------
// Utility classes
// ------------------------------------------------------------------------
private static final | TaskExecutor |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/WrapperObjectWithObjectIdTest.java | {
"start": 500,
"end": 611
} | class ____ extends DatabindTestUtil
{
@JsonRootName(value = "company")
static | WrapperObjectWithObjectIdTest |
java | google__gson | gson/src/test/java/com/google/gson/functional/DefaultTypeAdaptersTest.java | {
"start": 2312,
"end": 5228
} | class ____ {
private Gson gson;
private TimeZone oldTimeZone;
private Locale oldLocale;
@Before
public void setUp() throws Exception {
this.oldTimeZone = TimeZone.getDefault();
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"));
this.oldLocale = Locale.getDefault();
Locale.setDefault(Locale.US);
gson = new Gson();
}
@After
public void tearDown() {
TimeZone.setDefault(oldTimeZone);
Locale.setDefault(oldLocale);
}
@Test
public void testClassSerialization() {
var exception =
assertThrows(UnsupportedOperationException.class, () -> gson.toJson(String.class));
assertThat(exception)
.hasMessageThat()
.isEqualTo(
"Attempted to serialize java.lang.Class: java.lang.String. Forgot to register a type"
+ " adapter?\n"
+ "See https://github.com/google/gson/blob/main/Troubleshooting.md#java-lang-class-unsupported");
// Override with a custom type adapter for class.
gson = new GsonBuilder().registerTypeAdapter(Class.class, new MyClassTypeAdapter()).create();
assertThat(gson.toJson(String.class)).isEqualTo("\"java.lang.String\"");
}
@Test
public void testClassDeserialization() {
var exception =
assertThrows(
UnsupportedOperationException.class, () -> gson.fromJson("String.class", Class.class));
assertThat(exception)
.hasMessageThat()
.isEqualTo(
"Attempted to deserialize a java.lang.Class. Forgot to register a type adapter?\n"
+ "See https://github.com/google/gson/blob/main/Troubleshooting.md#java-lang-class-unsupported");
// Override with a custom type adapter for class.
gson = new GsonBuilder().registerTypeAdapter(Class.class, new MyClassTypeAdapter()).create();
assertThat(gson.fromJson("java.lang.String", Class.class)).isAssignableTo(String.class);
}
@Test
public void testUrlSerialization() throws Exception {
String urlValue = "http://google.com/";
URL url = new URL(urlValue);
assertThat(gson.toJson(url)).isEqualTo("\"http://google.com/\"");
}
@Test
public void testUrlDeserialization() {
String urlValue = "http://google.com/";
String json = "'http:\\/\\/google.com\\/'";
URL target1 = gson.fromJson(json, URL.class);
assertThat(target1.toExternalForm()).isEqualTo(urlValue);
URL target2 = gson.fromJson('"' + urlValue + '"', URL.class);
assertThat(target2.toExternalForm()).isEqualTo(urlValue);
}
@Test
public void testUrlNullSerialization() {
ClassWithUrlField target = new ClassWithUrlField();
assertThat(gson.toJson(target)).isEqualTo("{}");
}
@Test
public void testUrlNullDeserialization() {
String json = "{}";
ClassWithUrlField target = gson.fromJson(json, ClassWithUrlField.class);
assertThat(target.url).isNull();
}
private static | DefaultTypeAdaptersTest |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/script/field/BooleanDocValuesField.java | {
"start": 1252,
"end": 3498
} | class
____ ScriptDocValues.Booleans booleans = null;
public BooleanDocValuesField(SortedNumericLongValues input, String name) {
this.input = input;
this.name = name;
}
@Override
public void setNextDocId(int docId) throws IOException {
if (input.advanceExact(docId)) {
resize(input.docValueCount());
for (int i = 0; i < count; i++) {
values[i] = input.nextValue() == 1L;
}
} else {
resize(0);
}
}
private void resize(int newSize) {
count = newSize;
assert count >= 0 : "size must be positive (got " + count + "): likely integer overflow?";
if (values.length < count) {
values = Arrays.copyOf(values, ArrayUtil.oversize(count, 1));
}
}
@Override
public ScriptDocValues<Boolean> toScriptDocValues() {
if (booleans == null) {
booleans = new ScriptDocValues.Booleans(this);
}
return booleans;
}
// this method is required to support the Boolean return values
// for the old-style "doc" access in ScriptDocValues
@Override
public Boolean getInternal(int index) {
return values[index];
}
@Override
public String getName() {
return name;
}
@Override
public boolean isEmpty() {
return count == 0;
}
@Override
public int size() {
return count;
}
public boolean get(boolean defaultValue) {
return get(0, defaultValue);
}
public boolean get(int index, boolean defaultValue) {
if (isEmpty() || index < 0 || index >= count) {
return defaultValue;
}
return values[index];
}
@Override
public Iterator<Boolean> iterator() {
return new Iterator<Boolean>() {
private int index = 0;
@Override
public boolean hasNext() {
return index < count;
}
@Override
public Boolean next() {
if (hasNext() == false) {
throw new NoSuchElementException();
}
return values[index++];
}
};
}
}
| private |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/KubernetesNodesComponentBuilderFactory.java | {
"start": 1419,
"end": 1968
} | interface ____ {
/**
* Kubernetes Nodes (camel-kubernetes)
* Perform operations on Kubernetes Nodes and get notified on Node changes.
*
* Category: container,cloud
* Since: 2.17
* Maven coordinates: org.apache.camel:camel-kubernetes
*
* @return the dsl builder
*/
static KubernetesNodesComponentBuilder kubernetesNodes() {
return new KubernetesNodesComponentBuilderImpl();
}
/**
* Builder for the Kubernetes Nodes component.
*/
| KubernetesNodesComponentBuilderFactory |
java | spring-projects__spring-boot | test-support/spring-boot-test-support/src/main/java/org/springframework/boot/testsupport/classpath/resources/WithResource.java | {
"start": 1054,
"end": 1630
} | class ____.
* <p>
* For cases where one resource needs to refer to another, the resource's content may
* contain the placeholder <code>${resourceRoot}</code>. It will be replaced with the path
* to the root of the resources. For example, a resource with the {@link #name}
* {@code example.txt} can be referenced using <code>${resourceRoot}/example.txt</code>.
*
* @author Andy Wilkinson
*/
@Inherited
@Repeatable(WithResources.class)
@Retention(RetentionPolicy.RUNTIME)
@Target({ ElementType.METHOD, ElementType.TYPE })
@ExtendWith(ResourcesExtension.class)
public @ | loader |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/GeneratedClassGizmoAdaptor.java | {
"start": 497,
"end": 2882
} | class ____ implements ClassOutput {
private final BuildProducer<GeneratedClassBuildItem> generatedClasses;
private final Predicate<String> applicationClassPredicate;
private final Map<String, StringWriter> sources;
public GeneratedClassGizmoAdaptor(BuildProducer<GeneratedClassBuildItem> generatedClasses, boolean applicationClass) {
this(generatedClasses, new Predicate<String>() {
@Override
public boolean test(String t) {
return applicationClass;
}
});
}
public GeneratedClassGizmoAdaptor(BuildProducer<GeneratedClassBuildItem> generatedClasses,
Predicate<String> applicationClassPredicate) {
this.generatedClasses = generatedClasses;
this.applicationClassPredicate = applicationClassPredicate;
this.sources = BootstrapDebug.debugSourcesDir() != null ? new ConcurrentHashMap<>() : null;
}
public GeneratedClassGizmoAdaptor(BuildProducer<GeneratedClassBuildItem> generatedClasses,
Function<String, String> generatedToBaseNameFunction) {
this.generatedClasses = generatedClasses;
this.applicationClassPredicate = new Predicate<String>() {
@Override
public boolean test(String s) {
return isApplicationClass(generatedToBaseNameFunction.apply(s));
}
};
this.sources = BootstrapDebug.debugSourcesDir() != null ? new ConcurrentHashMap<>() : null;
}
@Override
public void write(String className, byte[] bytes) {
String source = null;
if (sources != null) {
StringWriter sw = sources.get(className);
if (sw != null) {
source = sw.toString();
}
}
generatedClasses.produce(
new GeneratedClassBuildItem(applicationClassPredicate.test(className), className, bytes, source));
}
@Override
public Writer getSourceWriter(String className) {
if (sources != null) {
StringWriter writer = new StringWriter();
sources.put(className, writer);
return writer;
}
return ClassOutput.super.getSourceWriter(className);
}
public static boolean isApplicationClass(String className) {
return QuarkusClassLoader.isApplicationClass(className);
}
}
| GeneratedClassGizmoAdaptor |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/booleanarray/BooleanArrayAssert_containsSubsequence_with_Boolean_array_Test.java | {
"start": 1216,
"end": 1986
} | class ____ extends BooleanArrayAssertBaseTest {
@Test
void should_fail_if_values_is_null() {
// GIVEN
Boolean[] subsequence = null;
// WHEN
Throwable thrown = catchThrowable(() -> assertions.containsSubsequence(subsequence));
// THEN
then(thrown).isInstanceOf(NullPointerException.class)
.hasMessage(shouldNotBeNull("subsequence").create());
}
@Override
protected BooleanArrayAssert invoke_api_method() {
return assertions.containsSubsequence(new Boolean[] { true, false });
}
@Override
protected void verify_internal_effects() {
verify(arrays).assertContainsSubsequence(getInfo(assertions), getActual(assertions), arrayOf(true, false));
}
}
| BooleanArrayAssert_containsSubsequence_with_Boolean_array_Test |
java | mockito__mockito | mockito-core/src/test/java/org/mockito/internal/invocation/InvocationMatcherTest.java | {
"start": 1019,
"end": 7630
} | class ____ extends TestBase {
private InvocationMatcher simpleMethod;
@Mock private IMethods mock;
@Before
public void setup() {
simpleMethod = new InvocationBuilder().mock(mock).simpleMethod().toInvocationMatcher();
}
@Test
public void should_be_a_citizen_of_hashes() throws Exception {
Invocation invocation = new InvocationBuilder().toInvocation();
Invocation invocationTwo = new InvocationBuilder().args("blah").toInvocation();
Map<InvocationMatcher, String> map = new HashMap<InvocationMatcher, String>();
map.put(new InvocationMatcher(invocation), "one");
map.put(new InvocationMatcher(invocationTwo), "two");
assertEquals(2, map.size());
}
@Test
public void should_not_equal_if_number_of_arguments_differ() throws Exception {
InvocationMatcher withOneArg =
new InvocationMatcher(new InvocationBuilder().args("test").toInvocation());
InvocationMatcher withTwoArgs =
new InvocationMatcher(new InvocationBuilder().args("test", 100).toInvocation());
assertFalse(withOneArg.equals(null));
assertFalse(withOneArg.equals(withTwoArgs));
}
@Test
public void should_to_string_with_matchers() throws Exception {
ArgumentMatcher m = NotNull.NOT_NULL;
InvocationMatcher notNull =
new InvocationMatcher(new InvocationBuilder().toInvocation(), asList(m));
ArgumentMatcher mTwo = new Equals('x');
InvocationMatcher equals =
new InvocationMatcher(new InvocationBuilder().toInvocation(), asList(mTwo));
assertThat(notNull.toString()).contains("simpleMethod(notNull())");
assertThat(equals.toString()).contains("simpleMethod('x')");
}
@Test
public void should_know_if_is_similar_to() throws Exception {
Invocation same = new InvocationBuilder().mock(mock).simpleMethod().toInvocation();
assertTrue(simpleMethod.hasSimilarMethod(same));
Invocation different = new InvocationBuilder().mock(mock).differentMethod().toInvocation();
assertFalse(simpleMethod.hasSimilarMethod(different));
}
@Test
public void should_not_be_similar_to_verified_invocation() throws Exception {
Invocation verified = new InvocationBuilder().simpleMethod().verified().toInvocation();
assertFalse(simpleMethod.hasSimilarMethod(verified));
}
@Test
public void should_not_be_similar_if_mocks_are_different() throws Exception {
Invocation onDifferentMock =
new InvocationBuilder().simpleMethod().mock("different mock").toInvocation();
assertFalse(simpleMethod.hasSimilarMethod(onDifferentMock));
}
@Test
public void should_not_be_similar_if_is_overloaded_but_used_with_the_same_arg()
throws Exception {
Method method = IMethods.class.getMethod("simpleMethod", String.class);
Method overloadedMethod = IMethods.class.getMethod("simpleMethod", Object.class);
String sameArg = "test";
InvocationMatcher invocation =
new InvocationBuilder().method(method).arg(sameArg).toInvocationMatcher();
Invocation overloadedInvocation =
new InvocationBuilder().method(overloadedMethod).arg(sameArg).toInvocation();
assertFalse(invocation.hasSimilarMethod(overloadedInvocation));
}
@Test
public void should_be_similar_if_is_overloaded_but_used_with_different_arg() throws Exception {
Method method = IMethods.class.getMethod("simpleMethod", String.class);
Method overloadedMethod = IMethods.class.getMethod("simpleMethod", Object.class);
InvocationMatcher invocation =
new InvocationBuilder().mock(mock).method(method).arg("foo").toInvocationMatcher();
Invocation overloadedInvocation =
new InvocationBuilder()
.mock(mock)
.method(overloadedMethod)
.arg("bar")
.toInvocation();
assertTrue(invocation.hasSimilarMethod(overloadedInvocation));
}
@Test
public void should_capture_arguments_from_invocation() throws Exception {
// given
Invocation invocation = new InvocationBuilder().args("1", 100).toInvocation();
CapturingMatcher capturingMatcher = new CapturingMatcher(List.class);
InvocationMatcher invocationMatcher =
new InvocationMatcher(invocation, (List) asList(new Equals("1"), capturingMatcher));
// when
invocationMatcher.captureArgumentsFrom(invocation);
// then
assertEquals(1, capturingMatcher.getAllValues().size());
assertEquals(100, capturingMatcher.getLastValue());
}
@Test
public void should_match_varargs_using_any_varargs() {
// given
mock.varargs("1", "2");
Invocation invocation = getLastInvocation();
InvocationMatcher invocationMatcher = new InvocationMatcher(invocation, asList(ANY, ANY));
// when
boolean match = invocationMatcher.matches(invocation);
// then
assertTrue(match);
}
@Test
public void should_capture_varargs_as_vararg() {
// given
mock.mixedVarargs(1, "a", "b");
Invocation invocation = getLastInvocation();
CapturingMatcher<String[]> m = new CapturingMatcher(String[].class);
InvocationMatcher invocationMatcher =
new InvocationMatcher(invocation, Arrays.<ArgumentMatcher>asList(new Equals(1), m));
// when
invocationMatcher.captureArgumentsFrom(invocation);
// then
Assertions.assertThat(m.getAllValues()).containsExactly(new String[] {"a", "b"});
}
@Test // like using several time the captor in the vararg
public void should_capture_arguments_when_args_count_does_NOT_match() {
// given
mock.varargs();
Invocation invocation = getLastInvocation();
// when
InvocationMatcher invocationMatcher = new InvocationMatcher(invocation, (List) asList(ANY));
// then
invocationMatcher.captureArgumentsFrom(invocation);
}
@Test
public void should_create_from_invocations() throws Exception {
// given
Invocation i = new InvocationBuilder().toInvocation();
// when
List<InvocationMatcher> out = InvocationMatcher.createFrom(asList(i));
// then
assertEquals(1, out.size());
assertEquals(i, out.get(0).getInvocation());
}
}
| InvocationMatcherTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractQueueCapacityCalculator.java | {
"start": 1046,
"end": 1143
} | class ____ encapsulate queue capacity setup and resource calculation
* logic.
*/
public abstract | to |
java | micronaut-projects__micronaut-core | aop/src/main/java/io/micronaut/aop/InterceptPhase.java | {
"start": 835,
"end": 1044
} | class ____ a set of common constants for typical phases used by interceptors thus making it easier to position an interceptor in the correct phase.</p>
*
* @author Graeme Rocher
* @since 1.0
*/
public | provides |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/execution/DefaultMavenExecutionRequestPopulator.java | {
"start": 1751,
"end": 8562
} | class ____ implements MavenExecutionRequestPopulator {
private final MavenRepositorySystem repositorySystem;
@Inject
public DefaultMavenExecutionRequestPopulator(MavenRepositorySystem repositorySystem) {
this.repositorySystem = repositorySystem;
}
@Override
public MavenExecutionRequest populateFromToolchains(MavenExecutionRequest request, PersistedToolchains toolchains)
throws MavenExecutionRequestPopulationException {
if (toolchains != null) {
Map<String, List<ToolchainModel>> groupedToolchains = new HashMap<>(2);
for (ToolchainModel model : toolchains.getToolchains()) {
if (!groupedToolchains.containsKey(model.getType())) {
groupedToolchains.put(model.getType(), new ArrayList<>());
}
groupedToolchains.get(model.getType()).add(model);
}
request.setToolchains(groupedToolchains);
}
return request;
}
@Override
public MavenExecutionRequest populateDefaults(MavenExecutionRequest request)
throws MavenExecutionRequestPopulationException {
baseDirectory(request);
localRepository(request);
populateDefaultPluginGroups(request);
return request;
}
//
//
//
private void populateDefaultPluginGroups(MavenExecutionRequest request) {
request.addPluginGroup("org.apache.maven.plugins");
request.addPluginGroup("org.codehaus.mojo");
}
private void localRepository(MavenExecutionRequest request) throws MavenExecutionRequestPopulationException {
// ------------------------------------------------------------------------
// Local Repository
//
// 1. Use a value has been passed in via the configuration
// 2. Use value in the resultant settings
// 3. Use default value
// ------------------------------------------------------------------------
if (request.getLocalRepository() == null) {
request.setLocalRepository(createLocalRepository(request));
}
if (request.getLocalRepositoryPath() == null) {
request.setLocalRepositoryPath(new File(request.getLocalRepository().getBasedir()).getAbsoluteFile());
}
}
// ------------------------------------------------------------------------
// Artifact Transfer Mechanism
// ------------------------------------------------------------------------
private ArtifactRepository createLocalRepository(MavenExecutionRequest request)
throws MavenExecutionRequestPopulationException {
String localRepositoryPath = null;
if (request.getLocalRepositoryPath() != null) {
localRepositoryPath = request.getLocalRepositoryPath().getAbsolutePath();
}
if (localRepositoryPath == null || localRepositoryPath.isEmpty()) {
String path = request.getUserProperties().getProperty(Constants.MAVEN_USER_CONF);
if (path == null) {
path = request.getSystemProperties().getProperty("user.home") + File.separator + ".m2";
}
localRepositoryPath = new File(path, "repository").getAbsolutePath();
}
try {
return repositorySystem.createLocalRepository(new File(localRepositoryPath));
} catch (Exception e) {
throw new MavenExecutionRequestPopulationException("Cannot create local repository.", e);
}
}
private void baseDirectory(MavenExecutionRequest request) {
if (request.getBaseDirectory() == null && request.getPom() != null) {
request.setBaseDirectory(request.getPom().getAbsoluteFile().getParentFile());
}
}
/*if_not[MAVEN4]*/
@Override
@Deprecated
public MavenExecutionRequest populateFromSettings(MavenExecutionRequest request, Settings settings)
throws MavenExecutionRequestPopulationException {
if (settings == null) {
return request;
}
request.setOffline(settings.isOffline());
request.setInteractiveMode(settings.isInteractiveMode());
request.setPluginGroups(settings.getPluginGroups());
request.setLocalRepositoryPath(settings.getLocalRepository());
for (Server server : settings.getServers()) {
server = server.clone();
request.addServer(server);
}
// <proxies>
// <proxy>
// <active>true</active>
// <protocol>http</protocol>
// <host>proxy.somewhere.com</host>
// <port>8080</port>
// <username>proxyuser</username>
// <password>somepassword</password>
// <nonProxyHosts>www.google.com|*.somewhere.com</nonProxyHosts>
// </proxy>
// </proxies>
for (Proxy proxy : settings.getProxies()) {
if (!proxy.isActive()) {
continue;
}
proxy = proxy.clone();
request.addProxy(proxy);
}
// <mirrors>
// <mirror>
// <id>nexus</id>
// <mirrorOf>*</mirrorOf>
// <url>http://repository.sonatype.org/content/groups/public</url>
// </mirror>
// </mirrors>
for (Mirror mirror : settings.getMirrors()) {
mirror = mirror.clone();
request.addMirror(mirror);
}
request.setActiveProfiles(settings.getActiveProfiles());
for (org.apache.maven.settings.Profile rawProfile : settings.getProfiles()) {
request.addProfile(SettingsUtils.convertFromSettingsProfile(rawProfile));
if (settings.getActiveProfiles().contains(rawProfile.getId())) {
List<Repository> remoteRepositories = rawProfile.getRepositories();
for (Repository remoteRepository : remoteRepositories) {
try {
request.addRemoteRepository(MavenRepositorySystem.buildArtifactRepository(remoteRepository));
} catch (InvalidRepositoryException e) {
// do nothing for now
}
}
List<Repository> pluginRepositories = rawProfile.getPluginRepositories();
for (Repository pluginRepo : pluginRepositories) {
try {
request.addPluginArtifactRepository(MavenRepositorySystem.buildArtifactRepository(pluginRepo));
} catch (InvalidRepositoryException e) {
// do nothing for now
}
}
}
}
return request;
}
/*end[MAVEN4]*/
}
| DefaultMavenExecutionRequestPopulator |
java | spring-projects__spring-framework | spring-tx/src/test/java/org/springframework/transaction/interceptor/TransactionAttributeSourceTests.java | {
"start": 1185,
"end": 4081
} | class ____ {
@Test
void matchAlwaysTransactionAttributeSource() throws Exception {
MatchAlwaysTransactionAttributeSource tas = new MatchAlwaysTransactionAttributeSource();
TransactionAttribute ta = tas.getTransactionAttribute(Object.class.getMethod("hashCode"), null);
assertThat(ta).isNotNull();
assertThat(ta.getPropagationBehavior()).isEqualTo(TransactionDefinition.PROPAGATION_REQUIRED);
tas.setTransactionAttribute(new DefaultTransactionAttribute(TransactionDefinition.PROPAGATION_SUPPORTS));
ta = tas.getTransactionAttribute(IOException.class.getMethod("getMessage"), IOException.class);
assertThat(ta).isNotNull();
assertThat(ta.getPropagationBehavior()).isEqualTo(TransactionDefinition.PROPAGATION_SUPPORTS);
}
@Test
void nameMatchTransactionAttributeSourceWithStarAtStartOfMethodName() throws Exception {
NameMatchTransactionAttributeSource tas = new NameMatchTransactionAttributeSource();
Properties attributes = new Properties();
attributes.put("*ashCode", "PROPAGATION_REQUIRED");
tas.setProperties(attributes);
TransactionAttribute ta = tas.getTransactionAttribute(Object.class.getMethod("hashCode"), null);
assertThat(ta).isNotNull();
assertThat(ta.getPropagationBehavior()).isEqualTo(TransactionDefinition.PROPAGATION_REQUIRED);
}
@Test
void nameMatchTransactionAttributeSourceWithStarAtEndOfMethodName() throws Exception {
NameMatchTransactionAttributeSource tas = new NameMatchTransactionAttributeSource();
Properties attributes = new Properties();
attributes.put("hashCod*", "PROPAGATION_REQUIRED");
tas.setProperties(attributes);
TransactionAttribute ta = tas.getTransactionAttribute(Object.class.getMethod("hashCode"), null);
assertThat(ta).isNotNull();
assertThat(ta.getPropagationBehavior()).isEqualTo(TransactionDefinition.PROPAGATION_REQUIRED);
}
@Test
void nameMatchTransactionAttributeSourceMostSpecificMethodNameIsDefinitelyMatched() throws Exception {
NameMatchTransactionAttributeSource tas = new NameMatchTransactionAttributeSource();
Properties attributes = new Properties();
attributes.put("*", "PROPAGATION_REQUIRED");
attributes.put("hashCode", "PROPAGATION_MANDATORY");
tas.setProperties(attributes);
TransactionAttribute ta = tas.getTransactionAttribute(Object.class.getMethod("hashCode"), null);
assertThat(ta).isNotNull();
assertThat(ta.getPropagationBehavior()).isEqualTo(TransactionDefinition.PROPAGATION_MANDATORY);
}
@Test
void nameMatchTransactionAttributeSourceWithEmptyMethodName() throws Exception {
NameMatchTransactionAttributeSource tas = new NameMatchTransactionAttributeSource();
Properties attributes = new Properties();
attributes.put("", "PROPAGATION_MANDATORY");
tas.setProperties(attributes);
TransactionAttribute ta = tas.getTransactionAttribute(Object.class.getMethod("hashCode"), null);
assertThat(ta).isNull();
}
}
| TransactionAttributeSourceTests |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/PahoMqtt5EndpointBuilderFactory.java | {
"start": 42167,
"end": 51685
} | interface ____
extends
EndpointConsumerBuilder {
default PahoMqtt5EndpointConsumerBuilder basic() {
return (PahoMqtt5EndpointConsumerBuilder) this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedPahoMqtt5EndpointConsumerBuilder bridgeErrorHandler(boolean bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions (if possible) occurred while the Camel
* consumer is trying to pickup incoming messages, or the likes, will
* now be processed as a message and handled by the routing Error
* Handler. Important: This is only possible if the 3rd party component
* allows Camel to be alerted if an exception was thrown. Some
* components handle this internally only, and therefore
* bridgeErrorHandler is not possible. In other situations we may
* improve the Camel component to hook into the 3rd party component and
* make this possible for future releases. By default the consumer will
* use the org.apache.camel.spi.ExceptionHandler to deal with
* exceptions, that will be logged at WARN or ERROR level and ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: consumer (advanced)
*
* @param bridgeErrorHandler the value to set
* @return the dsl builder
*/
default AdvancedPahoMqtt5EndpointConsumerBuilder bridgeErrorHandler(String bridgeErrorHandler) {
doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedPahoMqtt5EndpointConsumerBuilder exceptionHandler(org.apache.camel.spi.ExceptionHandler exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*
* @param exceptionHandler the value to set
* @return the dsl builder
*/
default AdvancedPahoMqtt5EndpointConsumerBuilder exceptionHandler(String exceptionHandler) {
doSetProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedPahoMqtt5EndpointConsumerBuilder exchangePattern(org.apache.camel.ExchangePattern exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*
* @param exchangePattern the value to set
* @return the dsl builder
*/
default AdvancedPahoMqtt5EndpointConsumerBuilder exchangePattern(String exchangePattern) {
doSetProperty("exchangePattern", exchangePattern);
return this;
}
/**
* To use an existing mqtt client.
*
* The option is a:
* <code>org.eclipse.paho.mqttv5.client.MqttClient</code> type.
*
* Group: advanced
*
* @param client the value to set
* @return the dsl builder
*/
default AdvancedPahoMqtt5EndpointConsumerBuilder client(org.eclipse.paho.mqttv5.client.MqttClient client) {
doSetProperty("client", client);
return this;
}
/**
* To use an existing mqtt client.
*
* The option will be converted to a
* <code>org.eclipse.paho.mqttv5.client.MqttClient</code> type.
*
* Group: advanced
*
* @param client the value to set
* @return the dsl builder
*/
default AdvancedPahoMqtt5EndpointConsumerBuilder client(String client) {
doSetProperty("client", client);
return this;
}
/**
* Sets the Custom WebSocket Headers for the WebSocket Connection.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.String></code> type.
*
* Group: advanced
*
* @param customWebSocketHeaders the value to set
* @return the dsl builder
*/
default AdvancedPahoMqtt5EndpointConsumerBuilder customWebSocketHeaders(Map<java.lang.String, java.lang.String> customWebSocketHeaders) {
doSetProperty("customWebSocketHeaders", customWebSocketHeaders);
return this;
}
/**
* Sets the Custom WebSocket Headers for the WebSocket Connection.
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.String></code>
* type.
*
* Group: advanced
*
* @param customWebSocketHeaders the value to set
* @return the dsl builder
*/
default AdvancedPahoMqtt5EndpointConsumerBuilder customWebSocketHeaders(String customWebSocketHeaders) {
doSetProperty("customWebSocketHeaders", customWebSocketHeaders);
return this;
}
/**
* Set the time in seconds that the executor service should wait when
* terminating before forcefully terminating. It is not recommended to
* change this value unless you are absolutely sure that you need to.
*
* The option is a: <code>int</code> type.
*
* Default: 1
* Group: advanced
*
* @param executorServiceTimeout the value to set
* @return the dsl builder
*/
default AdvancedPahoMqtt5EndpointConsumerBuilder executorServiceTimeout(int executorServiceTimeout) {
doSetProperty("executorServiceTimeout", executorServiceTimeout);
return this;
}
/**
* Set the time in seconds that the executor service should wait when
* terminating before forcefully terminating. It is not recommended to
* change this value unless you are absolutely sure that you need to.
*
* The option will be converted to a <code>int</code> type.
*
* Default: 1
* Group: advanced
*
* @param executorServiceTimeout the value to set
* @return the dsl builder
*/
default AdvancedPahoMqtt5EndpointConsumerBuilder executorServiceTimeout(String executorServiceTimeout) {
doSetProperty("executorServiceTimeout", executorServiceTimeout);
return this;
}
}
/**
* Builder for endpoint producers for the Paho MQTT 5 component.
*/
public | AdvancedPahoMqtt5EndpointConsumerBuilder |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/ClusterEntity.java | {
"start": 1099,
"end": 1494
} | class ____ extends HierarchicalTimelineEntity {
public ClusterEntity() {
super(TimelineEntityType.YARN_CLUSTER.toString());
}
public ClusterEntity(TimelineEntity entity) {
super(entity);
if (!entity.getType().equals(TimelineEntityType.YARN_CLUSTER.toString())) {
throw new IllegalArgumentException("Incompatible entity type: "
+ getId());
}
}
}
| ClusterEntity |
java | quarkusio__quarkus | extensions/vertx-http/deployment/src/test/java/io/quarkus/vertx/http/RandomPortTest.java | {
"start": 686,
"end": 1406
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest CONFIG = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addAsResource(new StringAsset("quarkus.http.test-port=0"),
"application.properties"));
@TestHTTPResource("test")
URL url;
@Test
public void portShouldNotBeZero() {
assertThat(url.getPort()).isNotZero();
}
@Test
public void testActualPortAccessibleToApp() {
RestAssured.get("/test").then().body(Matchers.equalTo(Integer.toString(url.getPort())));
RestAssured.get("/app").then().body(Matchers.equalTo(Integer.toString(url.getPort())));
}
public static | RandomPortTest |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/MonoCallableOnAssembly.java | {
"start": 1611,
"end": 3278
} | class ____<T> extends InternalMonoOperator<T, T>
implements Callable<T>, AssemblyOp {
final AssemblySnapshot stacktrace;
MonoCallableOnAssembly(Mono<? extends T> source, AssemblySnapshot stacktrace) {
super(source);
this.stacktrace = stacktrace;
}
@Override
public @Nullable T block() {
//duration is ignored below
return block(Duration.ZERO);
}
@Override
@SuppressWarnings("unchecked")
public @Nullable T block(Duration timeout) {
try {
return ((Callable<@Nullable T>) source).call();
}
catch (Throwable e) {
throw Exceptions.propagate(e);
}
}
@Override
@SuppressWarnings("unchecked")
public CoreSubscriber<? super T> subscribeOrReturn(CoreSubscriber<? super T> actual) {
if (actual instanceof Fuseable.ConditionalSubscriber) {
Fuseable.ConditionalSubscriber<? super T>
cs = (Fuseable.ConditionalSubscriber<? super T>) actual;
return new FluxOnAssembly.OnAssemblyConditionalSubscriber<>(cs,
stacktrace,
source,
this);
}
else {
return new FluxOnAssembly.OnAssemblySubscriber<>(actual, stacktrace, source, this);
}
}
@SuppressWarnings("unchecked")
@Override
public @Nullable T call() throws Exception {
return ((Callable<@Nullable T>) source).call();
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.ACTUAL_METADATA) return !stacktrace.isCheckpoint;
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return super.scanUnsafe(key);
}
@Override
public String stepName() {
return stacktrace.operatorAssemblyInformation();
}
@Override
public String toString() {
return stacktrace.operatorAssemblyInformation();
}
}
| MonoCallableOnAssembly |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent.java | {
"start": 1081,
"end": 1580
} | class ____ extends ContainerManagerEvent {
private final List<ApplicationId> appsToCleanup;
private final Reason reason;
public CMgrCompletedAppsEvent(List<ApplicationId> appsToCleanup, Reason reason) {
super(ContainerManagerEventType.FINISH_APPS);
this.appsToCleanup = appsToCleanup;
this.reason = reason;
}
public List<ApplicationId> getAppsToCleanup() {
return this.appsToCleanup;
}
public Reason getReason() {
return reason;
}
public | CMgrCompletedAppsEvent |
java | netty__netty | transport-native-epoll/src/test/java/io/netty/channel/epoll/EpollEventLoopTest.java | {
"start": 1644,
"end": 7311
} | class ____ extends AbstractSingleThreadEventLoopTest {
@Override
protected boolean supportsChannelIteration() {
return true;
}
@Override
protected EventLoopGroup newEventLoopGroup() {
return new EpollEventLoopGroup();
}
@Override
protected EventLoopGroup newAutoScalingEventLoopGroup() {
return new EpollEventLoopGroup(SCALING_MAX_THREADS, (Executor) null, AUTO_SCALING_CHOOSER_FACTORY,
DefaultSelectStrategyFactory.INSTANCE);
}
@Override
protected ServerSocketChannel newChannel() {
return new EpollServerSocketChannel();
}
@Override
protected Class<? extends ServerChannel> serverChannelClass() {
return EpollServerSocketChannel.class;
}
@Test
public void testScheduleBigDelayNotOverflow() {
final AtomicReference<Throwable> capture = new AtomicReference<Throwable>();
final EventLoopGroup group = new EpollEventLoop(null,
new ThreadPerTaskExecutor(new DefaultThreadFactory(getClass())), eventLoop -> new EpollIoHandler(
eventLoop, 0, DefaultSelectStrategyFactory.INSTANCE.newSelectStrategy()) {
@Override
void handleLoopException(Throwable t) {
capture.set(t);
super.handleLoopException(t);
}
});
try {
final EventLoop eventLoop = group.next();
Future<?> future = eventLoop.schedule(new Runnable() {
@Override
public void run() {
// NOOP
}
}, Long.MAX_VALUE, TimeUnit.MILLISECONDS);
assertFalse(future.awaitUninterruptibly(1000));
assertTrue(future.cancel(true));
assertNull(capture.get());
} finally {
group.shutdownGracefully();
}
}
@Test
public void testEventFDETSemantics() throws Throwable {
final FileDescriptor epoll = Native.newEpollCreate();
final FileDescriptor eventFd = Native.newEventFd();
final FileDescriptor timerFd = Native.newTimerFd();
final EpollEventArray array = new EpollEventArray(1024);
try {
Native.epollCtlAdd(epoll.intValue(), eventFd.intValue(), Native.EPOLLIN | Native.EPOLLET);
final AtomicReference<Throwable> causeRef = new AtomicReference<Throwable>();
final AtomicInteger integer = new AtomicInteger();
final Thread t = new Thread(new Runnable() {
@Override
public void run() {
try {
for (int i = 0; i < 2; i++) {
int ready = Native.epollWait(epoll, array, timerFd, -1, -1);
assertEquals(1, ready);
assertEquals(eventFd.intValue(), array.fd(0));
integer.incrementAndGet();
}
} catch (IOException e) {
causeRef.set(e);
}
}
});
t.start();
Native.eventFdWrite(eventFd.intValue(), 1);
// Spin until we was the wakeup.
while (integer.get() != 1) {
Thread.sleep(10);
}
// Sleep for a short moment to ensure there is not other wakeup.
Thread.sleep(1000);
assertEquals(1, integer.get());
Native.eventFdWrite(eventFd.intValue(), 1);
t.join();
Throwable cause = causeRef.get();
if (cause != null) {
throw cause;
}
assertEquals(2, integer.get());
} finally {
array.free();
epoll.close();
eventFd.close();
timerFd.close();
}
}
@Test
public void testResultNoTimeoutCorrectlyEncoded() throws Throwable {
final FileDescriptor epoll = Native.newEpollCreate();
final FileDescriptor eventFd = Native.newEventFd();
final FileDescriptor timerFd = Native.newTimerFd();
final EpollEventArray array = new EpollEventArray(1024);
try {
Native.epollCtlAdd(epoll.intValue(), eventFd.intValue(), Native.EPOLLIN | Native.EPOLLET);
final AtomicReference<Throwable> causeRef = new AtomicReference<Throwable>();
final Thread t = new Thread(new Runnable() {
@Override
public void run() {
try {
for (;;) {
long ready = Native.epollWait(epoll, array, timerFd, 0, 0, 10);
if (ready > 0) {
assertEquals(1, Native.epollReady(ready));
assertEquals(eventFd.intValue(), array.fd(0));
return;
}
Thread.sleep(100);
}
} catch (IOException e) {
causeRef.set(e);
} catch (InterruptedException ignore) {
// ignore
}
}
});
t.start();
Native.eventFdWrite(eventFd.intValue(), 1);
t.join();
Throwable cause = causeRef.get();
if (cause != null) {
throw cause;
}
} finally {
array.free();
epoll.close();
eventFd.close();
timerFd.close();
}
}
}
| EpollEventLoopTest |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java | {
"start": 4972,
"end": 5351
} | enum ____ {
OK(0),
INVALID_USER_NAME(2),
UNABLE_TO_EXECUTE_CONTAINER_SCRIPT(7),
INVALID_CONTAINER_PID(9),
INVALID_CONTAINER_EXEC_PERMISSIONS(22),
INVALID_CONFIG_FILE(24),
WRITE_CGROUP_FAILED(27);
private final int value;
ResultCode(int value) {
this.value = value;
}
public int getValue() {
return value;
}
}
} | ResultCode |
java | google__dagger | javatests/dagger/internal/codegen/LazyClassKeyMapBindingComponentProcessorTest.java | {
"start": 11472,
"end": 12085
} | interface ____ {",
" @Provides",
" @IntoMap",
" @LazyClassKey(FooKey.class)",
" static String provideString() { return \"\"; }",
"}");
CompilerTests.daggerCompiler(fooKey, fooKeyModule)
.withProcessingOptions(compilerMode.processorOptions())
.compile(
subject -> {
subject.hasErrorCount(0);
subject.generatedTextResourceFileWithPath(
"META-INF/proguard/test_FooKeyModule_LazyClassKeys.pro")
.isEqualTo("-keep,allowobfuscation,allowshrinking | FooKeyModule |
java | spring-projects__spring-framework | spring-aop/src/main/java/org/springframework/aop/support/AopUtils.java | {
"start": 2442,
"end": 4237
} | class ____ {
private static final boolean COROUTINES_REACTOR_PRESENT = ClassUtils.isPresent(
"kotlinx.coroutines.reactor.MonoKt", AopUtils.class.getClassLoader());
/**
* Check whether the given object is a JDK dynamic proxy or a CGLIB proxy.
* <p>This method additionally checks if the given object is an instance
* of {@link SpringProxy}.
* @param object the object to check
* @see #isJdkDynamicProxy
* @see #isCglibProxy
*/
@Contract("null -> false")
public static boolean isAopProxy(@Nullable Object object) {
return (object instanceof SpringProxy && (Proxy.isProxyClass(object.getClass()) ||
object.getClass().getName().contains(ClassUtils.CGLIB_CLASS_SEPARATOR)));
}
/**
* Check whether the given object is a JDK dynamic proxy.
* <p>This method goes beyond the implementation of
* {@link Proxy#isProxyClass(Class)} by additionally checking if the
* given object is an instance of {@link SpringProxy}.
* @param object the object to check
* @see java.lang.reflect.Proxy#isProxyClass
*/
@Contract("null -> false")
public static boolean isJdkDynamicProxy(@Nullable Object object) {
return (object instanceof SpringProxy && Proxy.isProxyClass(object.getClass()));
}
/**
* Check whether the given object is a CGLIB proxy.
* <p>This method goes beyond the implementation of
* {@link ClassUtils#isCglibProxy(Object)} by additionally checking if
* the given object is an instance of {@link SpringProxy}.
* @param object the object to check
* @see ClassUtils#isCglibProxy(Object)
*/
@Contract("null -> false")
public static boolean isCglibProxy(@Nullable Object object) {
return (object instanceof SpringProxy &&
object.getClass().getName().contains(ClassUtils.CGLIB_CLASS_SEPARATOR));
}
/**
* Determine the target | AopUtils |
java | spring-projects__spring-boot | module/spring-boot-kafka/src/main/java/org/springframework/boot/kafka/autoconfigure/KafkaAnnotationDrivenConfiguration.java | {
"start": 2871,
"end": 8096
} | class ____ {
private final KafkaProperties properties;
private final @Nullable RecordMessageConverter recordMessageConverter;
private final @Nullable RecordFilterStrategy<Object, Object> recordFilterStrategy;
private final BatchMessageConverter batchMessageConverter;
private final @Nullable KafkaTemplate<Object, Object> kafkaTemplate;
private final @Nullable KafkaAwareTransactionManager<Object, Object> transactionManager;
private final @Nullable ConsumerAwareRebalanceListener rebalanceListener;
private final @Nullable CommonErrorHandler commonErrorHandler;
private final @Nullable AfterRollbackProcessor<Object, Object> afterRollbackProcessor;
private final @Nullable RecordInterceptor<Object, Object> recordInterceptor;
private final @Nullable BatchInterceptor<Object, Object> batchInterceptor;
private final @Nullable Function<MessageListenerContainer, String> threadNameSupplier;
KafkaAnnotationDrivenConfiguration(KafkaProperties properties,
ObjectProvider<RecordMessageConverter> recordMessageConverter,
ObjectProvider<RecordFilterStrategy<Object, Object>> recordFilterStrategy,
ObjectProvider<BatchMessageConverter> batchMessageConverter,
ObjectProvider<KafkaTemplate<Object, Object>> kafkaTemplate,
ObjectProvider<KafkaAwareTransactionManager<Object, Object>> kafkaTransactionManager,
ObjectProvider<ConsumerAwareRebalanceListener> rebalanceListener,
ObjectProvider<CommonErrorHandler> commonErrorHandler,
ObjectProvider<AfterRollbackProcessor<Object, Object>> afterRollbackProcessor,
ObjectProvider<RecordInterceptor<Object, Object>> recordInterceptor,
ObjectProvider<BatchInterceptor<Object, Object>> batchInterceptor,
ObjectProvider<Function<MessageListenerContainer, String>> threadNameSupplier) {
this.properties = properties;
this.recordMessageConverter = recordMessageConverter.getIfUnique();
this.recordFilterStrategy = recordFilterStrategy.getIfUnique();
this.batchMessageConverter = batchMessageConverter
.getIfUnique(() -> new BatchMessagingMessageConverter(this.recordMessageConverter));
this.kafkaTemplate = kafkaTemplate.getIfUnique();
this.transactionManager = kafkaTransactionManager.getIfUnique();
this.rebalanceListener = rebalanceListener.getIfUnique();
this.commonErrorHandler = commonErrorHandler.getIfUnique();
this.afterRollbackProcessor = afterRollbackProcessor.getIfUnique();
this.recordInterceptor = recordInterceptor.getIfUnique();
this.batchInterceptor = batchInterceptor.getIfUnique();
this.threadNameSupplier = threadNameSupplier.getIfUnique();
}
@Bean
@ConditionalOnMissingBean
@ConditionalOnThreading(Threading.PLATFORM)
ConcurrentKafkaListenerContainerFactoryConfigurer kafkaListenerContainerFactoryConfigurer() {
return configurer();
}
@Bean(name = "kafkaListenerContainerFactoryConfigurer")
@ConditionalOnMissingBean
@ConditionalOnThreading(Threading.VIRTUAL)
ConcurrentKafkaListenerContainerFactoryConfigurer kafkaListenerContainerFactoryConfigurerVirtualThreads() {
ConcurrentKafkaListenerContainerFactoryConfigurer configurer = configurer();
SimpleAsyncTaskExecutor executor = new SimpleAsyncTaskExecutor("kafka-");
executor.setVirtualThreads(true);
configurer.setListenerTaskExecutor(executor);
return configurer;
}
private ConcurrentKafkaListenerContainerFactoryConfigurer configurer() {
ConcurrentKafkaListenerContainerFactoryConfigurer configurer = new ConcurrentKafkaListenerContainerFactoryConfigurer();
configurer.setKafkaProperties(this.properties);
configurer.setBatchMessageConverter(this.batchMessageConverter);
configurer.setRecordMessageConverter(this.recordMessageConverter);
configurer.setRecordFilterStrategy(this.recordFilterStrategy);
configurer.setReplyTemplate(this.kafkaTemplate);
configurer.setTransactionManager(this.transactionManager);
configurer.setRebalanceListener(this.rebalanceListener);
configurer.setCommonErrorHandler(this.commonErrorHandler);
configurer.setAfterRollbackProcessor(this.afterRollbackProcessor);
configurer.setRecordInterceptor(this.recordInterceptor);
configurer.setBatchInterceptor(this.batchInterceptor);
configurer.setThreadNameSupplier(this.threadNameSupplier);
return configurer;
}
@Bean
@ConditionalOnMissingBean(name = "kafkaListenerContainerFactory")
ConcurrentKafkaListenerContainerFactory<?, ?> kafkaListenerContainerFactory(
ConcurrentKafkaListenerContainerFactoryConfigurer configurer,
ObjectProvider<ConsumerFactory<Object, Object>> kafkaConsumerFactory,
ObjectProvider<ContainerCustomizer<Object, Object, ConcurrentMessageListenerContainer<Object, Object>>> kafkaContainerCustomizer) {
ConcurrentKafkaListenerContainerFactory<Object, Object> factory = new ConcurrentKafkaListenerContainerFactory<>();
configurer.configure(factory, kafkaConsumerFactory
.getIfAvailable(() -> new DefaultKafkaConsumerFactory<>(this.properties.buildConsumerProperties())));
kafkaContainerCustomizer.ifAvailable(factory::setContainerCustomizer);
return factory;
}
@Configuration(proxyBeanMethods = false)
@EnableKafka
@ConditionalOnMissingBean(name = KafkaListenerConfigUtils.KAFKA_LISTENER_ANNOTATION_PROCESSOR_BEAN_NAME)
static | KafkaAnnotationDrivenConfiguration |
java | elastic__elasticsearch | x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HistoBackedHDRPercentileRanksAggregator.java | {
"start": 817,
"end": 2374
} | class ____ extends AbstractHistoBackedHDRPercentilesAggregator {
public HistoBackedHDRPercentileRanksAggregator(
String name,
ValuesSourceConfig config,
AggregationContext context,
Aggregator parent,
double[] percents,
int numberOfSignificantValueDigits,
boolean keyed,
DocValueFormat format,
Map<String, Object> metadata
) throws IOException {
super(name, config, context, parent, percents, numberOfSignificantValueDigits, keyed, format, metadata);
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) {
DoubleHistogram state = getState(owningBucketOrdinal);
if (state == null) {
return buildEmptyAggregation();
} else {
return new InternalHDRPercentileRanks(name, keys, state, keyed, format, metadata());
}
}
@Override
public InternalAggregation buildEmptyAggregation() {
DoubleHistogram state;
state = new DoubleHistogram(numberOfSignificantValueDigits);
state.setAutoResize(true);
return new InternalHDRPercentileRanks(name, keys, state, keyed, format, metadata());
}
@Override
public double metric(String name, long bucketOrd) {
DoubleHistogram state = getState(bucketOrd);
if (state == null) {
return Double.NaN;
} else {
return InternalHDRPercentileRanks.percentileRank(state, Double.parseDouble(name));
}
}
}
| HistoBackedHDRPercentileRanksAggregator |
java | quarkusio__quarkus | independent-projects/arc/processor/src/main/java/io/quarkus/arc/processor/BeanConfiguratorBase.java | {
"start": 5731,
"end": 5815
} | class ____ a managed bean.
*
* @param type {@link Type} representation of a | of |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/mixedmode/ZeroCoordinates.java | {
"start": 240,
"end": 456
} | class ____ {
public float getLatitude() {
return 0f;
}
public void setLatitude(float latitude) {
}
public float getLongitude() {
return 0f;
}
public void setLongitude(float longitude) {
}
}
| ZeroCoordinates |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/snapshots/RegisteredPolicySnapshotsSerializationTests.java | {
"start": 854,
"end": 5118
} | class ____ extends AbstractChunkedSerializingTestCase<RegisteredPolicySnapshots> {
public void testMaybeAdd() {
{
RegisteredPolicySnapshots.Builder builder = new RegisteredPolicySnapshots.Builder(RegisteredPolicySnapshots.EMPTY);
var snap = new SnapshotId(randomAlphaOfLength(10), randomUUID());
builder.addIfSnapshotIsSLMInitiated(null, snap);
builder.addIfSnapshotIsSLMInitiated(Map.of(), snap);
builder.addIfSnapshotIsSLMInitiated(Map.of("not_policy", "policy-10"), snap);
builder.addIfSnapshotIsSLMInitiated(Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, 5), snap);
// immutable map in Map.of doesn't allows nulls
var meta = new HashMap<String, Object>();
meta.put(SnapshotsService.POLICY_ID_METADATA_FIELD, null);
builder.addIfSnapshotIsSLMInitiated(meta, snap);
RegisteredPolicySnapshots registered = builder.build();
assertTrue(registered.getSnapshots().isEmpty());
}
{
RegisteredPolicySnapshots.Builder builder = new RegisteredPolicySnapshots.Builder(RegisteredPolicySnapshots.EMPTY);
var snap = new SnapshotId(randomAlphaOfLength(10), randomUUID());
builder.addIfSnapshotIsSLMInitiated(Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, "cheddar"), snap);
RegisteredPolicySnapshots registered = builder.build();
assertEquals(List.of(new RegisteredPolicySnapshots.PolicySnapshot("cheddar", snap)), registered.getSnapshots());
}
}
@Override
protected RegisteredPolicySnapshots doParseInstance(XContentParser parser) throws IOException {
return RegisteredPolicySnapshots.parse(parser);
}
@Override
protected Writeable.Reader<RegisteredPolicySnapshots> instanceReader() {
return RegisteredPolicySnapshots::new;
}
@Override
protected RegisteredPolicySnapshots createTestInstance() {
return randomRegisteredPolicySnapshots();
}
@Override
protected RegisteredPolicySnapshots mutateInstance(RegisteredPolicySnapshots instance) throws IOException {
if (instance.getSnapshots().isEmpty()) {
return new RegisteredPolicySnapshots(List.of(randomPolicySnapshot()));
}
final int randIndex = between(0, instance.getSnapshots().size() - 1);
final RegisteredPolicySnapshots.PolicySnapshot policySnapshot = instance.getSnapshots().get(randIndex);
String policy = policySnapshot.getPolicy();
String snapshotName = policySnapshot.getSnapshotId().getName();
String snapshotUUID = policySnapshot.getSnapshotId().getUUID();
switch (between(0, 2)) {
case 0 -> {
policy = randomValueOtherThan(policy, this::randomPolicy);
}
case 1 -> {
snapshotName = randomValueOtherThan(snapshotName, ESTestCase::randomIdentifier);
}
case 2 -> {
snapshotUUID = randomValueOtherThan(snapshotName, ESTestCase::randomUUID);
}
default -> throw new AssertionError("failure, got illegal switch case");
}
List<RegisteredPolicySnapshots.PolicySnapshot> newSnapshots = new ArrayList<>(instance.getSnapshots());
newSnapshots.set(randIndex, new RegisteredPolicySnapshots.PolicySnapshot(policy, new SnapshotId(snapshotName, snapshotUUID)));
return new RegisteredPolicySnapshots(newSnapshots);
}
private RegisteredPolicySnapshots randomRegisteredPolicySnapshots() {
final List<RegisteredPolicySnapshots.PolicySnapshot> snapshots = new ArrayList<>();
for (int i = 0; i < randomIntBetween(0, 20); i++) {
snapshots.add(randomPolicySnapshot());
}
return new RegisteredPolicySnapshots(snapshots);
}
private String randomPolicy() {
return "policy-" + randomIntBetween(0, 20);
}
private RegisteredPolicySnapshots.PolicySnapshot randomPolicySnapshot() {
SnapshotId snapshotId = new SnapshotId(randomIdentifier(), randomUUID());
return new RegisteredPolicySnapshots.PolicySnapshot(randomPolicy(), snapshotId);
}
}
| RegisteredPolicySnapshotsSerializationTests |
java | elastic__elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfigUpdateTests.java | {
"start": 631,
"end": 2977
} | class ____ extends AbstractNlpConfigUpdateTestCase<TextExpansionConfigUpdate> {
public static TextExpansionConfigUpdate randomUpdate() {
TextExpansionConfigUpdate.Builder builder = new TextExpansionConfigUpdate.Builder();
if (randomBoolean()) {
builder.setResultsField(randomAlphaOfLength(8));
}
if (randomBoolean()) {
builder.setTokenizationUpdate(new BertTokenizationUpdate(randomFrom(Tokenization.Truncate.values()), null));
}
return builder.build();
}
public static TextExpansionConfigUpdate mutateForVersion(TextExpansionConfigUpdate instance, TransportVersion version) {
if (version.before(TransportVersions.V_8_1_0)) {
return new TextExpansionConfigUpdate(instance.getResultsField(), null);
}
return instance;
}
@Override
protected Writeable.Reader<TextExpansionConfigUpdate> instanceReader() {
return TextExpansionConfigUpdate::new;
}
@Override
protected TextExpansionConfigUpdate createTestInstance() {
return randomUpdate();
}
@Override
protected TextExpansionConfigUpdate mutateInstance(TextExpansionConfigUpdate instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected TextExpansionConfigUpdate doParseInstance(XContentParser parser) throws IOException {
return TextExpansionConfigUpdate.fromXContentStrict(parser);
}
@Override
protected TextExpansionConfigUpdate mutateInstanceForVersion(TextExpansionConfigUpdate instance, TransportVersion version) {
return mutateForVersion(instance, version);
}
@Override
Tuple<Map<String, Object>, TextExpansionConfigUpdate> fromMapTestInstances(TokenizationUpdate expectedTokenization) {
TextExpansionConfigUpdate expected = new TextExpansionConfigUpdate("ml-results", expectedTokenization);
Map<String, Object> config = new HashMap<>() {
{
put(NlpConfig.RESULTS_FIELD.getPreferredName(), "ml-results");
}
};
return Tuple.tuple(config, expected);
}
@Override
TextExpansionConfigUpdate fromMap(Map<String, Object> map) {
return TextExpansionConfigUpdate.fromMap(map);
}
}
| TextExpansionConfigUpdateTests |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/GenerationStamp.java | {
"start": 1198,
"end": 1509
} | class ____ extends SequentialNumber {
/**
* The last reserved generation stamp.
*/
public static final long LAST_RESERVED_STAMP = 1000L;
/**
* Create a new instance, initialized to {@link #LAST_RESERVED_STAMP}.
*/
public GenerationStamp() {
super(LAST_RESERVED_STAMP);
}
}
| GenerationStamp |
java | elastic__elasticsearch | x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java | {
"start": 5056,
"end": 52079
} | class ____ extends ESTestCase {
private static final NamedXContentRegistry REGISTRY;
private ThreadPool threadPool;
private Client noopClient;
private NoOpHistoryStore historyStore;
static {
try (IndexLifecycle indexLifecycle = new IndexLifecycle(Settings.EMPTY)) {
List<NamedXContentRegistry.Entry> entries = new ArrayList<>(indexLifecycle.getNamedXContent());
REGISTRY = new NamedXContentRegistry(entries);
}
}
@Before
public void prepare() {
threadPool = new TestThreadPool("test");
noopClient = new NoOpClient(threadPool);
ClusterSettings settings = new ClusterSettings(
Settings.EMPTY,
Sets.union(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS, Set.of(LIFECYCLE_HISTORY_INDEX_ENABLED_SETTING))
);
historyStore = new NoOpHistoryStore(noopClient, ClusterServiceUtils.createClusterService(threadPool, settings));
}
@After
public void shutdown() {
historyStore.close();
threadPool.shutdownNow();
}
public void testRunPolicyTerminalPolicyStep() {
String policyName = "async_action_policy";
TerminalPolicyStep step = TerminalPolicyStep.INSTANCE;
PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step);
ClusterService clusterService = mock(ClusterService.class);
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> 0L);
IndexMetadata indexMetadata = createIndex("my_index");
runner.runPolicyAfterStateChange(randomProjectIdOrDefault(), policyName, indexMetadata);
Mockito.verify(clusterService, times(1)).createTaskQueue(anyString(), any(), any());
Mockito.verifyNoMoreInteractions(clusterService);
}
public void testRunPolicyPhaseCompletePolicyStep() {
String policyName = "async_action_policy";
PhaseCompleteStep step = PhaseCompleteStep.finalStep(randomAlphaOfLength(4));
PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step);
ClusterService clusterService = mock(ClusterService.class);
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> 0L);
IndexMetadata indexMetadata = createIndex("my_index");
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
runner.runPolicyAfterStateChange(state.projectId(), policyName, indexMetadata);
runner.runPeriodicStep(state, policyName, indexMetadata);
Mockito.verify(clusterService, times(1)).createTaskQueue(anyString(), any(), any());
Mockito.verifyNoMoreInteractions(clusterService);
}
@SuppressWarnings("unchecked")
private static MasterServiceTaskQueue<IndexLifecycleClusterStateUpdateTask> newMockTaskQueue(ClusterService clusterService) {
final var masterServiceTaskQueue = mock(MasterServiceTaskQueue.class);
when(clusterService.<IndexLifecycleClusterStateUpdateTask>createTaskQueue(eq("ilm-runner"), eq(Priority.NORMAL), any())).thenReturn(
masterServiceTaskQueue
);
return masterServiceTaskQueue;
}
public void testRunPolicyPhaseCompleteWithMoreStepsPolicyStep() {
String policyName = "async_action_policy";
TerminalPolicyStep stop = TerminalPolicyStep.INSTANCE;
PhaseCompleteStep step = new PhaseCompleteStep(new StepKey("cold", "complete", "complete"), stop.getKey());
PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step);
ClusterService clusterService = mock(ClusterService.class);
MasterServiceTaskQueue<IndexLifecycleClusterStateUpdateTask> taskQueue = newMockTaskQueue(clusterService);
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> 0L);
IndexMetadata indexMetadata = createIndex("my_index");
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
runner.runPolicyAfterStateChange(state.projectId(), policyName, indexMetadata);
runner.runPeriodicStep(state, policyName, indexMetadata);
Mockito.verify(taskQueue, times(1)).submitTask(anyString(), any(), any());
}
public void testRunPolicyErrorStep() {
String policyName = "async_action_policy";
LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicyWithAllPhases(policyName);
String phaseName = randomFrom(policy.getPhases().keySet());
Phase phase = policy.getPhases().get(phaseName);
PhaseExecutionInfo phaseExecutionInfo = new PhaseExecutionInfo(policy.getName(), phase, 1, randomNonNegativeLong());
String phaseJson = Strings.toString(phaseExecutionInfo);
LifecycleAction action = randomValueOtherThan(MigrateAction.DISABLED, () -> randomFrom(phase.getActions().values()));
Step step = randomFrom(action.toSteps(new NoOpClient(threadPool), phaseName, null, null));
StepKey stepKey = step.getKey();
PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step);
ClusterService clusterService = mock(ClusterService.class);
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> 0L);
LifecycleExecutionState.Builder newState = LifecycleExecutionState.builder();
newState.setFailedStep(stepKey.name());
newState.setIsAutoRetryableError(false);
newState.setPhase(stepKey.phase());
newState.setAction(stepKey.action());
newState.setStep(ErrorStep.NAME);
newState.setPhaseDefinition(phaseJson);
IndexMetadata indexMetadata = IndexMetadata.builder("test")
.settings(randomIndexSettings())
.putCustom(ILM_CUSTOM_METADATA_KEY, newState.build().asMap())
.build();
runner.runPolicyAfterStateChange(randomProjectIdOrDefault(), policyName, indexMetadata);
Mockito.verify(clusterService).createTaskQueue(anyString(), any(Priority.class), any());
Mockito.verifyNoMoreInteractions(clusterService);
}
public void testSkip_afterStateChange() {
final var policyName = randomAlphaOfLength(10);
ClusterService clusterService = mock(ClusterService.class);
final var runner = new IndexLifecycleRunner(null, null, clusterService, null, () -> 0L);
final var index = IndexMetadata.builder(randomAlphaOfLength(5))
.settings(randomIndexSettings().put(LifecycleSettings.LIFECYCLE_SKIP, true))
.build();
runner.runPolicyAfterStateChange(randomProjectIdOrDefault(), policyName, index);
Mockito.verify(clusterService).createTaskQueue(anyString(), any(Priority.class), any());
Mockito.verifyNoMoreInteractions(clusterService);
}
public void testSkip_periodicRun() {
final var policyName = randomAlphaOfLength(10);
ClusterService clusterService = mock(ClusterService.class);
final var runner = new IndexLifecycleRunner(null, null, clusterService, null, () -> 0L);
final var index = IndexMetadata.builder(randomAlphaOfLength(5))
.settings(randomIndexSettings().put(LifecycleSettings.LIFECYCLE_SKIP, true))
.build();
runner.runPeriodicStep(null, policyName, index);
Mockito.verify(clusterService).createTaskQueue(anyString(), any(Priority.class), any());
Mockito.verifyNoMoreInteractions(clusterService);
}
public void testSkip_asyncAction() {
final var policyName = randomAlphaOfLength(10);
ClusterService clusterService = mock(ClusterService.class);
final var runner = new IndexLifecycleRunner(null, null, clusterService, null, () -> 0L);
final var index = IndexMetadata.builder(randomAlphaOfLength(5))
.settings(randomIndexSettings().put(LifecycleSettings.LIFECYCLE_SKIP, true))
.build();
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(index, true));
runner.maybeRunAsyncAction(state, index, policyName, null);
Mockito.verify(clusterService).createTaskQueue(anyString(), any(Priority.class), any());
Mockito.verifyNoMoreInteractions(clusterService);
}
/**
* Test that an async action step is not executed when ILM is stopped.
*/
public void testNotRunningAsyncActionWhenILMIsStopped() {
String policyName = "stopped_policy";
Step.StepKey stepKey = new Step.StepKey("phase", "action", "async_action_step");
MockAsyncActionStep step = new MockAsyncActionStep(stepKey, null);
PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step);
ClusterService clusterService = mock(ClusterService.class);
newMockTaskQueue(clusterService); // ensure constructor call to createTaskQueue is satisfied
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> 0L);
IndexMetadata indexMetadata = IndexMetadata.builder("test")
.settings(randomIndexSettings().put(LifecycleSettings.LIFECYCLE_NAME, policyName))
.build();
IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.STOPPED);
final var project = ProjectMetadata.builder(randomProjectIdOrDefault())
.put(indexMetadata, true)
.putCustom(IndexLifecycleMetadata.TYPE, ilm)
.build();
runner.maybeRunAsyncAction(projectStateFromProject(project), indexMetadata, policyName, stepKey);
assertThat(step.getExecuteCount(), equalTo(0L));
}
public void testRunPolicyErrorStepOnRetryableFailedStep() {
String policyName = "rollover_policy";
String phaseName = "hot";
TimeValue after = randomTimeValue(0, 1_000_000_000, TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS);
Map<String, LifecycleAction> actions = new HashMap<>();
RolloverAction action = RolloverActionTests.randomInstance();
actions.put(RolloverAction.NAME, action);
Phase phase = new Phase(phaseName, after, actions);
PhaseExecutionInfo phaseExecutionInfo = new PhaseExecutionInfo(policyName, phase, 1, randomNonNegativeLong());
String phaseJson = Strings.toString(phaseExecutionInfo);
NoOpClient client = new NoOpClient(threadPool);
List<Step> waitForRolloverStepList = action.toSteps(client, phaseName, null)
.stream()
.filter(s -> s.getKey().name().equals(WaitForRolloverReadyStep.NAME))
.toList();
assertThat(waitForRolloverStepList.size(), is(1));
Step waitForRolloverStep = waitForRolloverStepList.get(0);
StepKey stepKey = waitForRolloverStep.getKey();
PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, waitForRolloverStep);
ClusterService clusterService = mock(ClusterService.class);
MasterServiceTaskQueue<IndexLifecycleClusterStateUpdateTask> taskQueue = newMockTaskQueue(clusterService);
when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE);
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> 0L);
LifecycleExecutionState.Builder newState = LifecycleExecutionState.builder();
newState.setFailedStep(stepKey.name());
newState.setIsAutoRetryableError(true);
newState.setPhase(stepKey.phase());
newState.setAction(stepKey.action());
newState.setStep(ErrorStep.NAME);
newState.setPhaseDefinition(phaseJson);
IndexMetadata indexMetadata = IndexMetadata.builder("test")
.settings(randomIndexSettings().put(LifecycleSettings.LIFECYCLE_NAME, policyName))
.putCustom(ILM_CUSTOM_METADATA_KEY, newState.build().asMap())
.build();
final var state = projectStateFromProject(ProjectMetadata.builder(randomProjectIdOrDefault()).put(indexMetadata, true));
runner.runPeriodicStep(state, policyName, indexMetadata);
Mockito.verify(taskQueue, times(1)).submitTask(anyString(), any(), any());
}
public void testRunStateChangePolicyWithNoNextStep() throws Exception {
String policyName = "foo";
StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step");
MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, null);
PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step);
ThreadPool threadPool = new TestThreadPool("name");
IndexMetadata indexMetadata = IndexMetadata.builder("test")
.settings(randomIndexSettings().put(LifecycleSettings.LIFECYCLE_NAME, policyName))
.build();
ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool);
DiscoveryNode node = clusterService.localNode();
IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING);
final var project = ProjectMetadata.builder(randomProjectIdOrDefault())
.put(indexMetadata, true)
.putCustom(IndexLifecycleMetadata.TYPE, ilm)
.build();
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.putProjectMetadata(project)
.nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId()))
.build();
ClusterServiceUtils.setState(clusterService, state);
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> 0L);
ClusterState before = clusterService.state();
CountDownLatch latch = new CountDownLatch(1);
step.setLatch(latch);
runner.runPolicyAfterStateChange(project.id(), policyName, indexMetadata);
awaitLatch(latch, 5, TimeUnit.SECONDS);
ClusterState after = clusterService.state();
assertEquals(before, after);
assertThat(step.getExecuteCount(), equalTo(1L));
ClusterServiceUtils.awaitNoPendingTasks(clusterService);
clusterService.close();
threadPool.shutdownNow();
}
public void testRunStateChangePolicyWithNextStep() throws Exception {
String policyName = "foo";
StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step");
StepKey nextStepKey = new StepKey("phase", "action", "next_cluster_state_action_step");
MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, nextStepKey);
MockClusterStateActionStep nextStep = new MockClusterStateActionStep(nextStepKey, null);
MockPolicyStepsRegistry stepRegistry = createMultiStepPolicyStepRegistry(policyName, List.of(step, nextStep));
stepRegistry.setResolver((i, k) -> {
if (stepKey.equals(k)) {
return step;
} else if (nextStepKey.equals(k)) {
return nextStep;
} else {
fail("should not try to retrieve different step");
return null;
}
});
ThreadPool threadPool = new TestThreadPool("name");
LifecycleExecutionState les = LifecycleExecutionState.builder()
.setPhase("phase")
.setAction("action")
.setStep("cluster_state_action_step")
.build();
IndexMetadata indexMetadata = IndexMetadata.builder("test")
.settings(randomIndexSettings().put(LifecycleSettings.LIFECYCLE_NAME, policyName))
.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, les.asMap())
.build();
ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool);
DiscoveryNode node = clusterService.localNode();
IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING);
final var project = ProjectMetadata.builder(randomProjectIdOrDefault())
.put(indexMetadata, true)
.putCustom(IndexLifecycleMetadata.TYPE, ilm)
.build();
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.putProjectMetadata(project)
.nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId()))
.build();
ClusterServiceUtils.setState(clusterService, state);
long stepTime = randomLong();
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> stepTime);
ClusterState before = clusterService.state();
CountDownLatch latch = new CountDownLatch(1);
nextStep.setLatch(latch);
runner.runPolicyAfterStateChange(project.id(), policyName, indexMetadata);
awaitLatch(latch, 5, TimeUnit.SECONDS);
// The cluster state can take a few extra milliseconds to update after the steps are executed
assertBusy(() -> assertNotEquals(before, clusterService.state()));
LifecycleExecutionState newExecutionState = clusterService.state()
.metadata()
.getProject(project.id())
.index(indexMetadata.getIndex())
.getLifecycleExecutionState();
assertThat(newExecutionState.phase(), equalTo("phase"));
assertThat(newExecutionState.action(), equalTo("action"));
assertThat(newExecutionState.step(), equalTo("next_cluster_state_action_step"));
assertThat(newExecutionState.stepTime(), equalTo(stepTime));
assertThat(step.getExecuteCount(), equalTo(1L));
assertThat(nextStep.getExecuteCount(), equalTo(1L));
ClusterServiceUtils.awaitNoPendingTasks(clusterService);
clusterService.close();
threadPool.shutdownNow();
ILMHistoryItem historyItem = historyStore.getItems()
.stream()
.findFirst()
.orElseThrow(() -> new AssertionError("failed to register ILM history"));
assertThat(historyItem.toString(), containsString(Strings.format("""
{
"index": "test",
"policy": "foo",
"@timestamp": %s,
"success": true,
"state": {
"phase": "phase",
"action": "action",
"step": "next_cluster_state_action_step",
"step_time": "%s"
}
}""", stepTime, stepTime).replaceAll("\\s", "")));
}
public void testRunPeriodicPolicyWithFailureToReadPolicy() throws Exception {
doTestRunPolicyWithFailureToReadPolicy(false, true);
}
public void testRunStateChangePolicyWithFailureToReadPolicy() throws Exception {
doTestRunPolicyWithFailureToReadPolicy(false, false);
}
public void testRunAsyncActionPolicyWithFailureToReadPolicy() throws Exception {
doTestRunPolicyWithFailureToReadPolicy(true, false);
}
public void doTestRunPolicyWithFailureToReadPolicy(boolean asyncAction, boolean periodicAction) throws Exception {
String policyName = "foo";
StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step");
StepKey nextStepKey = new StepKey("phase", "action", "next_cluster_state_action_step");
MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, nextStepKey);
MockClusterStateActionStep nextStep = new MockClusterStateActionStep(nextStepKey, null);
MockPolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step);
AtomicBoolean resolved = new AtomicBoolean(false);
stepRegistry.setResolver((i, k) -> {
resolved.set(true);
throw new IllegalArgumentException("fake failure retrieving step");
});
ThreadPool threadPool = new TestThreadPool("name");
LifecycleExecutionState les = LifecycleExecutionState.builder()
.setPhase("phase")
.setAction("action")
.setStep("cluster_state_action_step")
.build();
IndexMetadata indexMetadata = IndexMetadata.builder("test")
.settings(randomIndexSettings().put(LifecycleSettings.LIFECYCLE_NAME, policyName))
.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, les.asMap())
.build();
ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool);
DiscoveryNode node = clusterService.localNode();
IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING);
final var project = ProjectMetadata.builder(randomProjectIdOrDefault())
.put(indexMetadata, true)
.putCustom(IndexLifecycleMetadata.TYPE, ilm)
.build();
ProjectState state = ClusterState.builder(ClusterName.DEFAULT)
.putProjectMetadata(project)
.nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId()))
.build()
.projectState(project.id());
ClusterServiceUtils.setState(clusterService, state.cluster());
long stepTime = randomLong();
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> stepTime);
if (asyncAction) {
runner.maybeRunAsyncAction(state, indexMetadata, policyName, stepKey);
} else if (periodicAction) {
runner.runPeriodicStep(state, policyName, indexMetadata);
} else {
runner.runPolicyAfterStateChange(project.id(), policyName, indexMetadata);
}
// The cluster state can take a few extra milliseconds to update after the steps are executed
ClusterServiceUtils.awaitClusterState(
s -> s.metadata().getProject(state.projectId()).index(indexMetadata.getIndex()).getLifecycleExecutionState().stepInfo() != null,
clusterService
);
LifecycleExecutionState newExecutionState = clusterService.state()
.metadata()
.getProject(state.projectId())
.index(indexMetadata.getIndex())
.getLifecycleExecutionState();
assertThat(newExecutionState.phase(), equalTo("phase"));
assertThat(newExecutionState.action(), equalTo("action"));
assertThat(newExecutionState.step(), equalTo("cluster_state_action_step"));
assertThat(step.getExecuteCount(), equalTo(0L));
assertThat(nextStep.getExecuteCount(), equalTo(0L));
assertThat(
newExecutionState.stepInfo(),
containsString("{\"type\":\"illegal_argument_exception\",\"reason\":\"fake failure retrieving step\"}")
);
ClusterServiceUtils.awaitNoPendingTasks(clusterService);
clusterService.close();
threadPool.shutdownNow();
}
public void testRunAsyncActionDoesNotRun() {
String policyName = "foo";
StepKey stepKey = new StepKey("phase", "action", "async_action_step");
MockAsyncActionStep step = new MockAsyncActionStep(stepKey, null);
PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step);
ThreadPool threadPool = new TestThreadPool("name");
IndexMetadata indexMetadata = IndexMetadata.builder("test")
.settings(randomIndexSettings().put(LifecycleSettings.LIFECYCLE_NAME, policyName))
.build();
ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool);
DiscoveryNode node = clusterService.localNode();
IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING);
final var project = ProjectMetadata.builder(randomProjectIdOrDefault())
.put(indexMetadata, true)
.putCustom(IndexLifecycleMetadata.TYPE, ilm)
.build();
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.putProjectMetadata(project)
.nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId()))
.build();
ClusterServiceUtils.setState(clusterService, state);
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> 0L);
ClusterState before = clusterService.state();
// State changes should not run AsyncAction steps
runner.runPolicyAfterStateChange(project.id(), policyName, indexMetadata);
ClusterState after = clusterService.state();
assertEquals(before, after);
assertThat(step.getExecuteCount(), equalTo(0L));
ClusterServiceUtils.awaitNoPendingTasks(clusterService);
clusterService.close();
threadPool.shutdownNow();
}
public void testRunStateChangePolicyWithAsyncActionNextStep() throws Exception {
String policyName = "foo";
StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step");
StepKey nextStepKey = new StepKey("phase", "action", "async_action_step");
MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, nextStepKey);
MockAsyncActionStep nextStep = new MockAsyncActionStep(nextStepKey, null);
MockPolicyStepsRegistry stepRegistry = createMultiStepPolicyStepRegistry(policyName, List.of(step, nextStep));
stepRegistry.setResolver((i, k) -> {
if (stepKey.equals(k)) {
return step;
} else if (nextStepKey.equals(k)) {
return nextStep;
} else {
fail("should not try to retrieve different step");
return null;
}
});
ThreadPool threadPool = new TestThreadPool("name");
LifecycleExecutionState les = LifecycleExecutionState.builder()
.setPhase("phase")
.setAction("action")
.setStep("cluster_state_action_step")
.build();
IndexMetadata indexMetadata = IndexMetadata.builder("test")
.settings(randomIndexSettings().put(LifecycleSettings.LIFECYCLE_NAME, policyName))
.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, les.asMap())
.build();
ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool);
DiscoveryNode node = clusterService.localNode();
IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING);
final var project = ProjectMetadata.builder(randomProjectIdOrDefault())
.put(indexMetadata, true)
.putCustom(IndexLifecycleMetadata.TYPE, ilm)
.build();
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.putProjectMetadata(project)
.nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId()))
.build();
ClusterServiceUtils.setState(clusterService, state);
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> 0L);
ClusterState before = clusterService.state();
CountDownLatch latch = new CountDownLatch(1);
step.setLatch(latch);
CountDownLatch asyncLatch = new CountDownLatch(1);
nextStep.setLatch(asyncLatch);
runner.runPolicyAfterStateChange(project.id(), policyName, indexMetadata);
// Wait for the cluster state action step
awaitLatch(latch, 5, TimeUnit.SECONDS);
// Wait for the async action step
awaitLatch(asyncLatch, 5, TimeUnit.SECONDS);
ClusterState after = clusterService.state();
assertNotEquals(before, after);
assertThat(step.getExecuteCount(), equalTo(1L));
assertThat(nextStep.getExecuteCount(), equalTo(1L));
ClusterServiceUtils.awaitNoPendingTasks(clusterService);
clusterService.close();
threadPool.shutdownNow();
ILMHistoryItem historyItem = historyStore.getItems()
.stream()
.findFirst()
.orElseThrow(() -> new AssertionError("failed to register ILM history"));
assertThat(historyItem.toString(), containsString("""
{
"index": "test",
"policy": "foo",
"@timestamp": 0,
"success": true,
"state": {
"phase": "phase",
"action": "action",
"step": "async_action_step",
"step_time": "0"
}
}""".replaceAll("\\s", "")));
}
public void testRunPeriodicStep() throws Exception {
String policyName = "foo";
StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step");
StepKey nextStepKey = new StepKey("phase", "action", "async_action_step");
MockAsyncWaitStep step = new MockAsyncWaitStep(stepKey, nextStepKey);
MockAsyncWaitStep nextStep = new MockAsyncWaitStep(nextStepKey, null);
MockPolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step);
stepRegistry.setResolver((i, k) -> {
if (stepKey.equals(k)) {
return step;
} else if (nextStepKey.equals(k)) {
return nextStep;
} else {
fail("should not try to retrieve different step");
return null;
}
});
ThreadPool threadPool = new TestThreadPool("name");
LifecycleExecutionState les = LifecycleExecutionState.builder()
.setPhase("phase")
.setAction("action")
.setStep("cluster_state_action_step")
.build();
IndexMetadata indexMetadata = IndexMetadata.builder("test")
.settings(randomIndexSettings().put(LifecycleSettings.LIFECYCLE_NAME, policyName))
.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, les.asMap())
.build();
ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool);
DiscoveryNode node = clusterService.localNode();
IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING);
final var project = ProjectMetadata.builder(randomProjectIdOrDefault())
.put(indexMetadata, true)
.putCustom(IndexLifecycleMetadata.TYPE, ilm)
.build();
ProjectState state = ClusterState.builder(ClusterName.DEFAULT)
.putProjectMetadata(project)
.nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId()))
.build()
.projectState(project.id());
logger.info("--> state: {}", state);
ClusterServiceUtils.setState(clusterService, state.cluster());
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> 0L);
ClusterState before = clusterService.state();
CountDownLatch latch = new CountDownLatch(1);
step.setLatch(latch);
runner.runPeriodicStep(state, policyName, indexMetadata);
awaitLatch(latch, 5, TimeUnit.SECONDS);
ClusterState after = clusterService.state();
assertEquals(before, after);
assertThat(step.getExecuteCount(), equalTo(1L));
assertThat(nextStep.getExecuteCount(), equalTo(0L));
ClusterServiceUtils.awaitNoPendingTasks(clusterService);
clusterService.close();
threadPool.shutdownNow();
}
public void testRunPolicyClusterStateActionStep() {
String policyName = "cluster_state_action_policy";
StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step");
MockClusterStateActionStep step = new MockClusterStateActionStep(stepKey, null);
PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step);
ClusterService clusterService = mock(ClusterService.class);
MasterServiceTaskQueue<IndexLifecycleClusterStateUpdateTask> taskQueue = newMockTaskQueue(clusterService);
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> 0L);
IndexMetadata indexMetadata = createIndex("my_index");
runner.runPolicyAfterStateChange(randomProjectIdOrDefault(), policyName, indexMetadata);
final ExecuteStepsUpdateTaskMatcher taskMatcher = new ExecuteStepsUpdateTaskMatcher(indexMetadata.getIndex(), policyName, step);
Mockito.verify(taskQueue, Mockito.times(1))
.submitTask(
Mockito.eq("""
ilm-execute-cluster-state-steps [{"phase":"phase","action":"action","name":"cluster_state_action_step"} => null]"""),
Mockito.argThat(taskMatcher),
Mockito.eq(null)
);
Mockito.verifyNoMoreInteractions(taskQueue);
Mockito.verify(clusterService, Mockito.times(1)).createTaskQueue(any(), any(), any());
Mockito.verifyNoMoreInteractions(clusterService);
}
public void testRunPolicyClusterStateWaitStep() {
String policyName = "cluster_state_action_policy";
StepKey stepKey = new StepKey("phase", "action", "cluster_state_action_step");
MockClusterStateWaitStep step = new MockClusterStateWaitStep(stepKey, null);
PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step);
ClusterService clusterService = mock(ClusterService.class);
MasterServiceTaskQueue<IndexLifecycleClusterStateUpdateTask> taskQueue = newMockTaskQueue(clusterService);
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> 0L);
IndexMetadata indexMetadata = createIndex("my_index");
runner.runPolicyAfterStateChange(randomProjectIdOrDefault(), policyName, indexMetadata);
final ExecuteStepsUpdateTaskMatcher taskMatcher = new ExecuteStepsUpdateTaskMatcher(indexMetadata.getIndex(), policyName, step);
Mockito.verify(taskQueue, Mockito.times(1))
.submitTask(
Mockito.eq("""
ilm-execute-cluster-state-steps [{"phase":"phase","action":"action","name":"cluster_state_action_step"} => null]"""),
Mockito.argThat(taskMatcher),
Mockito.eq(null)
);
Mockito.verifyNoMoreInteractions(taskQueue);
Mockito.verify(clusterService, Mockito.times(1)).createTaskQueue(any(), any(), any());
Mockito.verifyNoMoreInteractions(clusterService);
}
public void testRunPolicyAsyncActionStepClusterStateChangeIgnored() {
String policyName = "async_action_policy";
StepKey stepKey = new StepKey("phase", "action", "async_action_step");
MockAsyncActionStep step = new MockAsyncActionStep(stepKey, null);
Exception expectedException = new RuntimeException();
step.setException(expectedException);
PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step);
ClusterService clusterService = mock(ClusterService.class);
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> 0L);
IndexMetadata indexMetadata = createIndex("my_index");
runner.runPolicyAfterStateChange(randomProjectIdOrDefault(), policyName, indexMetadata);
assertEquals(0, step.getExecuteCount());
Mockito.verify(clusterService, Mockito.times(1)).createTaskQueue(any(), any(), any());
Mockito.verifyNoMoreInteractions(clusterService);
}
public void testRunPolicyAsyncWaitStepClusterStateChangeIgnored() {
String policyName = "async_wait_policy";
StepKey stepKey = new StepKey("phase", "action", "async_wait_step");
MockAsyncWaitStep step = new MockAsyncWaitStep(stepKey, null);
Exception expectedException = new RuntimeException();
step.setException(expectedException);
PolicyStepsRegistry stepRegistry = createOneStepPolicyStepRegistry(policyName, step);
ClusterService clusterService = mock(ClusterService.class);
IndexLifecycleRunner runner = new IndexLifecycleRunner(stepRegistry, historyStore, clusterService, threadPool, () -> 0L);
IndexMetadata indexMetadata = createIndex("my_index");
runner.runPolicyAfterStateChange(randomProjectIdOrDefault(), policyName, indexMetadata);
assertEquals(0, step.getExecuteCount());
Mockito.verify(clusterService, Mockito.times(1)).createTaskQueue(any(), any(), any());
Mockito.verifyNoMoreInteractions(clusterService);
}
public void testRunPolicyThatDoesntExist() {
String policyName = "cluster_state_action_policy";
ClusterService clusterService = mock(ClusterService.class);
MasterServiceTaskQueue<IndexLifecycleClusterStateUpdateTask> taskQueue = newMockTaskQueue(clusterService);
IndexLifecycleRunner runner = new IndexLifecycleRunner(
new PolicyStepsRegistry(NamedXContentRegistry.EMPTY, null, null),
historyStore,
clusterService,
threadPool,
() -> 0L
);
IndexMetadata indexMetadata = createIndex("my_index");
// verify that no exception is thrown
runner.runPolicyAfterStateChange(randomProjectIdOrDefault(), policyName, indexMetadata);
final SetStepInfoUpdateTaskMatcher taskMatcher = new SetStepInfoUpdateTaskMatcher(
indexMetadata.getIndex(),
policyName,
null,
(builder, params) -> {
builder.startObject();
builder.field("reason", "policy [does_not_exist] does not exist");
builder.field("type", "illegal_argument_exception");
builder.endObject();
return builder;
}
);
Mockito.verify(taskQueue, Mockito.times(1))
.submitTask(
Mockito.eq("ilm-set-step-info {policy [cluster_state_action_policy], index [my_index], currentStep [null]}"),
Mockito.argThat(taskMatcher),
Mockito.eq(null)
);
Mockito.verifyNoMoreInteractions(taskQueue);
Mockito.verify(clusterService, Mockito.times(1)).createTaskQueue(any(), any(), any());
Mockito.verifyNoMoreInteractions(clusterService);
}
public void testGetCurrentStep() {
String policyName = "policy";
StepKey firstStepKey = new StepKey("phase_1", "action_1", "step_1");
StepKey secondStepKey = new StepKey("phase_1", "action_1", "step_2");
Step firstStep = new MockStep(firstStepKey, secondStepKey);
Map<String, Step> firstStepMap = new HashMap<>();
firstStepMap.put(policyName, firstStep);
Map<String, Map<StepKey, Step>> stepMap = new HashMap<>();
Index index = new Index("test", "uuid");
Step.StepKey MOCK_STEP_KEY = new Step.StepKey("mock", "mock", "mock");
Client client = mock(Client.class);
when(client.settings()).thenReturn(Settings.EMPTY);
LifecyclePolicy policy = LifecyclePolicyTests.randomTimeseriesLifecyclePolicyWithAllPhases(policyName);
LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Map.of(), 1, randomNonNegativeLong());
String phaseName = randomFrom(policy.getPhases().keySet());
Phase phase = policy.getPhases().get(phaseName);
PhaseExecutionInfo pei = new PhaseExecutionInfo(policy.getName(), phase, 1, randomNonNegativeLong());
String phaseJson = Strings.toString(pei);
LifecycleAction action = randomValueOtherThan(MigrateAction.DISABLED, () -> randomFrom(phase.getActions().values()));
Step step = randomFrom(action.toSteps(client, phaseName, MOCK_STEP_KEY, null));
Settings indexSettings = randomIndexSettings().put(LifecycleSettings.LIFECYCLE_NAME, policyName).build();
LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder();
lifecycleState.setPhaseDefinition(phaseJson);
lifecycleState.setPhase(step.getKey().phase());
lifecycleState.setAction(step.getKey().action());
lifecycleState.setStep(step.getKey().name());
IndexMetadata indexMetadata = IndexMetadata.builder(index.getName())
.settings(indexSettings)
.putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap())
.build();
SortedMap<String, LifecyclePolicyMetadata> metas = new TreeMap<>();
metas.put(policyName, policyMetadata);
PolicyStepsRegistry registry = new PolicyStepsRegistry(metas, firstStepMap, stepMap, REGISTRY, client, null);
// First step is retrieved because there are no settings for the index
IndexMetadata indexMetadataWithNoKey = IndexMetadata.builder(index.getName())
.settings(indexSettings)
.putCustom(ILM_CUSTOM_METADATA_KEY, LifecycleExecutionState.builder().build().asMap())
.build();
Step stepFromNoSettings = IndexLifecycleRunner.getCurrentStep(registry, policy.getName(), indexMetadataWithNoKey);
assertEquals(firstStep, stepFromNoSettings);
// The step that was written into the metadata is retrieved
Step currentStep = IndexLifecycleRunner.getCurrentStep(registry, policy.getName(), indexMetadata);
assertEquals(step.getKey(), currentStep.getKey());
}
public void testIsReadyToTransition() {
String policyName = "async_action_policy";
StepKey stepKey = new StepKey("phase", MockAction.NAME, MockAction.NAME);
MockAsyncActionStep step = new MockAsyncActionStep(stepKey, null);
SortedMap<String, LifecyclePolicyMetadata> lifecyclePolicyMap = new TreeMap<>(
Map.of(
policyName,
new LifecyclePolicyMetadata(
createPolicy(policyName, null, step.getKey()),
new HashMap<>(),
randomNonNegativeLong(),
randomNonNegativeLong()
)
)
);
Map<String, Step> firstStepMap = Map.of(policyName, step);
Map<StepKey, Step> policySteps = Map.of(step.getKey(), step);
Map<String, Map<StepKey, Step>> stepMap = Map.of(policyName, policySteps);
PolicyStepsRegistry policyStepsRegistry = new PolicyStepsRegistry(
lifecyclePolicyMap,
firstStepMap,
stepMap,
NamedXContentRegistry.EMPTY,
null,
null
);
ClusterService clusterService = mock(ClusterService.class);
final AtomicLong now = new AtomicLong(5);
IndexLifecycleRunner runner = new IndexLifecycleRunner(policyStepsRegistry, historyStore, clusterService, threadPool, now::get);
IndexMetadata indexMetadata = createIndex("my_index");
// With no time, always transition
assertTrue(
"index should be able to transition with no creation date",
runner.isReadyToTransitionToThisPhase(policyName, indexMetadata, "phase")
);
LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder();
lifecycleState.setIndexCreationDate(10L);
indexMetadata = IndexMetadata.builder(indexMetadata)
.settings(Settings.builder().put(indexMetadata.getSettings()).build())
.putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap())
.build();
// Index is not old enough to transition
assertFalse(
"index is not able to transition if it isn't old enough",
runner.isReadyToTransitionToThisPhase(policyName, indexMetadata, "phase")
);
// Set to the fuuuuuttuuuuuuurre
now.set(Long.MAX_VALUE);
assertTrue(
"index should be able to transition past phase's age",
runner.isReadyToTransitionToThisPhase(policyName, indexMetadata, "phase")
);
// Come back to the "present"
now.set(5L);
indexMetadata = IndexMetadata.builder(indexMetadata)
.settings(Settings.builder().put(indexMetadata.getSettings()).put(IndexSettings.LIFECYCLE_ORIGINATION_DATE, 3L).build())
.putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap())
.build();
assertTrue(
"index should be able to transition due to the origination date indicating it's old enough",
runner.isReadyToTransitionToThisPhase(policyName, indexMetadata, "phase")
);
}
private static LifecyclePolicy createPolicy(String policyName, StepKey safeStep, StepKey unsafeStep) {
Map<String, Phase> phases = new HashMap<>();
if (safeStep != null) {
assert MockAction.NAME.equals(safeStep.action()) : "The safe action needs to be MockAction.NAME";
assert unsafeStep == null || safeStep.phase().equals(unsafeStep.phase()) == false
: "safe and unsafe actions must be in different phases";
Map<String, LifecycleAction> actions = new HashMap<>();
List<Step> steps = List.of(new MockStep(safeStep, null));
MockAction safeAction = new MockAction(steps, true);
actions.put(safeAction.getWriteableName(), safeAction);
Phase phase = new Phase(safeStep.phase(), TimeValue.timeValueMillis(0), actions);
phases.put(phase.getName(), phase);
}
if (unsafeStep != null) {
assert MockAction.NAME.equals(unsafeStep.action()) : "The unsafe action needs to be MockAction.NAME";
Map<String, LifecycleAction> actions = new HashMap<>();
List<Step> steps = List.of(new MockStep(unsafeStep, null));
MockAction unsafeAction = new MockAction(steps, false);
actions.put(unsafeAction.getWriteableName(), unsafeAction);
Phase phase = new Phase(unsafeStep.phase(), TimeValue.timeValueMillis(0), actions);
phases.put(phase.getName(), phase);
}
return newTestLifecyclePolicy(policyName, phases);
}
private static IndexMetadata createIndex(String name) {
return IndexMetadata.builder(name).settings(randomIndexSettings()).build();
}
private static Settings.Builder randomIndexSettings() {
return indexSettings(IndexVersion.current(), randomIntBetween(1, 5), randomIntBetween(0, 5));
}
static | IndexLifecycleRunnerTests |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/web/servlet/client/MockMvcWebTestClientSpecs.java | {
"start": 3386,
"end": 5379
} | class ____<B extends MockMvcServerSpec<B>>
implements MockMvcServerSpec<B> {
@Override
public <T extends B> T filters(Filter... filters) {
getMockMvcBuilder().addFilters(filters);
return self();
}
@Override
public final <T extends B> T filter(Filter filter, String... urlPatterns) {
getMockMvcBuilder().addFilter(filter, urlPatterns);
return self();
}
@Override
public <T extends B> T defaultRequest(RequestBuilder requestBuilder) {
getMockMvcBuilder().defaultRequest(requestBuilder);
return self();
}
@Override
public <T extends B> T alwaysExpect(ResultMatcher resultMatcher) {
getMockMvcBuilder().alwaysExpect(resultMatcher);
return self();
}
@Override
public <T extends B> T dispatchOptions(boolean dispatchOptions) {
getMockMvcBuilder().dispatchOptions(dispatchOptions);
return self();
}
@Override
public <T extends B> T dispatcherServletCustomizer(DispatcherServletCustomizer customizer) {
getMockMvcBuilder().addDispatcherServletCustomizer(customizer);
return self();
}
@Override
public <T extends B> T apply(MockMvcConfigurer configurer) {
getMockMvcBuilder().apply(configurer);
return self();
}
@SuppressWarnings("unchecked")
private <T extends B> T self() {
return (T) this;
}
/**
* Return the concrete {@link ConfigurableMockMvcBuilder} to delegate
* configuration methods and to use to create the {@link MockMvc}.
*/
protected abstract ConfigurableMockMvcBuilder<?> getMockMvcBuilder();
@Override
public WebTestClient.Builder configureClient() {
MockMvc mockMvc = getMockMvcBuilder().build();
ClientHttpConnector connector = new MockMvcHttpConnector(mockMvc);
return WebTestClient.bindToServer(connector);
}
@Override
public WebTestClient build() {
return configureClient().build();
}
}
/**
* Simple wrapper around a {@link DefaultMockMvcBuilder}.
*
* @author Rossen Stoyanchev
* @since 5.3
*/
static | AbstractMockMvcServerSpec |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/cdi/bcextensions/CustomPseudoScopeTest.java | {
"start": 4339,
"end": 5544
} | class ____ implements BuildCompatibleExtension {
@Discovery
public void discovery(MetaAnnotations meta) {
meta.addContext(Prototype.class, PrototypeContext.class);
}
}
// ---
/**
* Specifies that a bean belongs to the <em>prototype</em> pseudo-scope.
* <p>
* When a bean is declared to have the {@code @Prototype} scope:
* <ul>
* <li>Each injection point or dynamic lookup receives a new instance; instances are never shared.</li>
* <li>Lifecycle of instances is not managed by the CDI container.</li>
* </ul>
* <p>
* Every invocation of the {@link Context#get(Contextual, CreationalContext)} operation on the
* context object for the {@code @Prototype} scope returns a new instance of given bean.
* <p>
* Every invocation of the {@link Context#get(Contextual)} operation on the context object for the
* {@code @Prototype} scope returns a {@code null} value.
* <p>
* The {@code @Prototype} scope is always active.
*/
@Scope
@Target({ ElementType.TYPE, ElementType.METHOD, ElementType.FIELD })
@Retention(RetentionPolicy.RUNTIME)
@Inherited
public @ | MyExtension |
java | quarkusio__quarkus | integration-tests/jackson/src/test/java/io/quarkus/it/jackson/ModelWithSerializerAndDeserializerOnFieldResourceTest.java | {
"start": 433,
"end": 1434
} | class ____ {
@Test
public void testSerializer() throws IOException {
given()
.contentType("application/json")
.when().get("/fieldserder/tester/whatever")
.then()
.statusCode(200)
.body("name", equalTo("tester"))
.body("inner.someValue", equalTo("unchangeable"));
}
@Test
public void testDeserializer() throws IOException {
ObjectMapper objectMapper = new ObjectMapper();
given()
.contentType("application/json")
.body(objectMapper.writeValueAsString(
new ModelWithSerializerAndDeserializerOnField("tester",
new ModelWithSerializerAndDeserializerOnField.Inner())))
.when().post("/fieldserder")
.then()
.statusCode(200)
.body(is("tester/immutable"));
}
}
| ModelWithSerializerAndDeserializerOnFieldResourceTest |
java | quarkusio__quarkus | integration-tests/gradle/src/main/resources/jandex-included-build-kordamp/src/main/java/org/example/ExampleResource.java | {
"start": 235,
"end": 436
} | class ____ {
@Inject
SimpleService simpleService;
@GET
@Produces(MediaType.TEXT_PLAIN)
public String hello() {
return "hello " + simpleService.hello();
}
} | ExampleResource |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/rest/messages/JobVertexTaskManagersHeaders.java | {
"start": 1155,
"end": 2680
} | class ____
implements RuntimeMessageHeaders<
EmptyRequestBody, JobVertexTaskManagersInfo, JobVertexMessageParameters> {
private static final JobVertexTaskManagersHeaders INSTANCE = new JobVertexTaskManagersHeaders();
public static final String URL =
"/jobs"
+ "/:"
+ JobIDPathParameter.KEY
+ "/vertices"
+ "/:"
+ JobVertexIdPathParameter.KEY
+ "/taskmanagers";
private JobVertexTaskManagersHeaders() {}
@Override
public Class<EmptyRequestBody> getRequestClass() {
return EmptyRequestBody.class;
}
@Override
public Class<JobVertexTaskManagersInfo> getResponseClass() {
return JobVertexTaskManagersInfo.class;
}
@Override
public HttpResponseStatus getResponseStatusCode() {
return HttpResponseStatus.OK;
}
@Override
public JobVertexMessageParameters getUnresolvedMessageParameters() {
return new JobVertexMessageParameters();
}
@Override
public HttpMethodWrapper getHttpMethod() {
return HttpMethodWrapper.GET;
}
@Override
public String getTargetRestEndpointURL() {
return URL;
}
public static JobVertexTaskManagersHeaders getInstance() {
return INSTANCE;
}
@Override
public String getDescription() {
return "Returns task information aggregated by task manager.";
}
}
| JobVertexTaskManagersHeaders |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/createTable/MySqlCreateTableTest15.java | {
"start": 977,
"end": 3035
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "CREATE TABLE `xxx` (" +
" `id` int(10) unsigned NOT NULL AUTO_INCREMENT COMMENT 'ID'," +
" `create_date` datetime DEFAULT NULL," +
" `update_date` datetime DEFAULT NULL," +
" `product_id` int(11) NOT NULL COMMENT '产品ID'," +
" `memeber_id` int(11) NOT NULL COMMENT '用户ID'," +
" `name` varchar(50) DEFAULT NULL COMMENT '姓名'," +
" `address` varchar(500) DEFAULT NULL COMMENT '地址'," +
" `mobile` varchar(50) DEFAULT NULL COMMENT '手机'," +
" `amount` int(11) DEFAULT NULL COMMENT '兑换数量'," +
" PRIMARY KEY (`id`)" +
" ) ENGINE=InnoDB DEFAULT CHARSET=gbk;" +
"";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement statemen = statementList.get(0);
// print(statementList);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
statemen.accept(visitor);
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(1, visitor.getTables().size());
assertEquals(9, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
assertTrue(visitor.getTables().containsKey(new TableStat.Name("xxx")));
assertTrue(visitor.containsColumn("xxx", "id"));
assertTrue(visitor.containsColumn("xxx", "amount"));
}
}
| MySqlCreateTableTest15 |
java | apache__kafka | clients/src/main/java/org/apache/kafka/server/telemetry/ClientTelemetryExporter.java | {
"start": 1075,
"end": 1796
} | interface ____ {
/**
* Called by the broker when a client reports telemetry metrics. The telemetry context
* includes the push interval and authorization details which can be used by the metrics
* exporter to manage metric lifecycle and retrieval of additional client information.
* <p>
* This method may be called from the request handling thread, and as such should avoid blocking.
*
* @param context the client telemetry context including push interval and request authorization context
* @param payload the encoded telemetry payload as sent by the client
*/
void exportMetrics(ClientTelemetryContext context, ClientTelemetryPayload payload);
}
| ClientTelemetryExporter |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/mysql/show/MySqlShowTest_41.java | {
"start": 929,
"end": 1861
} | class ____ extends MysqlTest {
public void test_0() throws Exception {
String sql = "show create table dla_table1 like mapping('mysql_table1');";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLStatement stmt = statementList.get(0);
assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
assertEquals(0, visitor.getTables().size());
assertEquals(0, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
assertEquals(0, visitor.getOrderByColumns().size());
// assertTrue(visitor.getTables().containsKey(new TableStat.Name("abc")));
assertEquals("SHOW CREATE TABLE dla_table1 LIKE MAPPING ('mysql_table1');", stmt.toString());
}
}
| MySqlShowTest_41 |
java | apache__kafka | server/src/main/java/org/apache/kafka/server/quota/ClientQuotaManager.java | {
"start": 10956,
"end": 31081
} | class ____ extends ShutdownableThread {
private final DelayQueue<ThrottledChannel> delayQueue;
public ThrottledChannelReaper(DelayQueue<ThrottledChannel> delayQueue, String prefix) {
super(prefix + "ThrottledChannelReaper-" + quotaType, false);
this.delayQueue = delayQueue;
}
@Override
public void doWork() {
ThrottledChannel throttledChannel;
try {
throttledChannel = delayQueue.poll(1, TimeUnit.SECONDS);
if (throttledChannel != null) {
// Decrement the size of the delay queue
delayQueueSensor.record(-1);
// Notify the socket server that throttling is done for this channel
throttledChannel.notifyThrottlingDone();
}
} catch (InterruptedException e) {
// Ignore and continue
Thread.currentThread().interrupt();
}
}
}
/**
* Returns true if any quotas are enabled for this quota manager. This is used
* to determine if quota-related metrics should be created.
*/
public boolean quotasEnabled() {
return quotaTypesEnabled != NO_QUOTAS;
}
/**
* See recordAndGetThrottleTimeMs.
*/
public int maybeRecordAndGetThrottleTimeMs(Session session, String clientId, double value, long timeMs) {
// Record metrics only if quotas are enabled.
if (quotasEnabled()) {
return recordAndGetThrottleTimeMs(session, clientId, value, timeMs);
} else {
return 0;
}
}
/**
* Records that a user/clientId accumulated or would like to accumulate the provided amount at the
* specified time, returns throttle time in milliseconds.
*
* @param session The session from which the user is extracted
* @param clientId The client id
* @param value The value to accumulate
* @param timeMs The time at which to accumulate the value
* @return The throttle time in milliseconds defines as the time to wait until the average
* rate gets back to the defined quota
*/
public int recordAndGetThrottleTimeMs(Session session, String clientId, double value, long timeMs) {
var clientSensors = getOrCreateQuotaSensors(session, clientId);
try {
clientSensors.quotaSensor().record(value, timeMs, true);
return 0;
} catch (QuotaViolationException e) {
var throttleTimeMs = (int) throttleTime(e, timeMs);
if (LOG.isDebugEnabled()) {
LOG.debug("Quota violated for sensor ({}). Delay time: ({})",
clientSensors.quotaSensor().name(), throttleTimeMs);
}
return throttleTimeMs;
}
}
/**
* Records that a user/clientId changed some metric being throttled without checking for
* quota violation. The aggregate value will subsequently be used for throttling when the
* next request is processed.
*/
public void recordNoThrottle(Session session, String clientId, double value) {
var clientSensors = getOrCreateQuotaSensors(session, clientId);
clientSensors.quotaSensor().record(value, time.milliseconds(), false);
}
/**
* "Unrecord" the given value that has already been recorded for the given user/client by recording a negative value
* of the same quantity.
* For a throttled fetch, the broker should return an empty response and thus should not record the value. Ideally,
* we would like to compute the throttle time before actually recording the value, but the current Sensor code
* couples value recording and quota checking very tightly. As a workaround, we will unrecord the value for the fetch
* in case of throttling. Rate keeps the sum of values that fall in each time window, so this should bring the
* overall sum back to the previous value.
*/
public void unrecordQuotaSensor(Session session, String clientId, double value, long timeMs) {
var clientSensors = getOrCreateQuotaSensors(session, clientId);
clientSensors.quotaSensor().record(value * -1, timeMs, false);
}
/**
* Returns the maximum value that could be recorded without guaranteed throttling.
* Recording any larger value will always be throttled, even if no other values were recorded in the quota window.
* This is used for deciding the maximum bytes that can be fetched at once
*/
public double maxValueInQuotaWindow(Session session, String clientId) {
if (!quotasEnabled()) return Double.MAX_VALUE;
var clientSensors = getOrCreateQuotaSensors(session, clientId);
var limit = quotaCallback.quotaLimit(clientQuotaType, clientSensors.metricTags());
if (limit != null) return limit * (config.numQuotaSamples() - 1) * config.quotaWindowSizeSeconds();
return Double.MAX_VALUE;
}
/**
* Throttle a client by muting the associated channel for the given throttle time.
* @param clientId request client id
* @param session request session
* @param throttleTimeMs Duration in milliseconds for which the channel is to be muted.
* @param throttleCallback Callback for channel throttling
*/
public void throttle(
String clientId,
Session session,
ThrottleCallback throttleCallback,
int throttleTimeMs
) {
if (throttleTimeMs > 0) {
var clientSensors = getOrCreateQuotaSensors(session, clientId);
clientSensors.throttleTimeSensor().record(throttleTimeMs);
var throttledChannel = new ThrottledChannel(time, throttleTimeMs, throttleCallback);
delayQueue.add(throttledChannel);
delayQueueSensor.record();
if (LOG.isDebugEnabled()) {
LOG.debug("Channel throttled for sensor ({}). Delay time: ({})",
clientSensors.quotaSensor().name(), throttleTimeMs);
}
}
}
/**
* Returns the quota for the client with the specified (non-encoded) user principal and client-id.
* Note: this method is expensive, it is meant to be used by tests only
*/
public Quota quota(String user, String clientId) {
var userPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, user);
return quota(userPrincipal, clientId);
}
/**
* Returns the quota for the client with the specified user principal and client-id.
* Note: this method is expensive, it is meant to be used by tests only
*/
public Quota quota(KafkaPrincipal userPrincipal, String clientId) {
var metricTags = quotaCallback.quotaMetricTags(clientQuotaType, userPrincipal, clientId);
return Quota.upperBound(quotaLimit(metricTags));
}
private double quotaLimit(Map<String, String> metricTags) {
var limit = quotaCallback.quotaLimit(clientQuotaType, metricTags);
return limit != null ? limit : Long.MAX_VALUE;
}
/**
* This calculates the amount of time needed to bring the metric within quota
* assuming that no new metrics are recorded.
*/
protected long throttleTime(QuotaViolationException e, long timeMs) {
return QuotaUtils.throttleTime(e, timeMs);
}
/**
* This function either returns the sensors for a given client id or creates them if they don't exist
*/
public ClientSensors getOrCreateQuotaSensors(Session session, String clientId) {
var metricTags = quotaCallback instanceof DefaultQuotaCallback defaultCallback
? defaultCallback.quotaMetricTags(session.sanitizedUser, clientId)
: quotaCallback.quotaMetricTags(clientQuotaType, session.principal, clientId);
var sensors = new ClientSensors(
metricTags,
sensorAccessor.getOrCreate(
getQuotaSensorName(metricTags),
INACTIVE_SENSOR_EXPIRATION_TIME_SECONDS,
sensor -> registerQuotaMetrics(metricTags, sensor) // quotaLimit() called here only for new sensors
),
sensorAccessor.getOrCreate(
getThrottleTimeSensorName(metricTags),
INACTIVE_SENSOR_EXPIRATION_TIME_SECONDS,
sensor -> sensor.add(throttleMetricName(metricTags), new Avg())
)
);
if (quotaCallback.quotaResetRequired(clientQuotaType)) {
updateQuotaMetricConfigs();
}
return sensors;
}
protected void registerQuotaMetrics(Map<String, String> metricTags, Sensor sensor) {
sensor.add(
clientQuotaMetricName(metricTags),
new Rate(),
getQuotaMetricConfig(metricTags)
);
}
private String metricTagsToSensorSuffix(Map<String, String> metricTags) {
return String.join(":", metricTags.values());
}
private String getThrottleTimeSensorName(Map<String, String> metricTags) {
return quotaType.toString() + "ThrottleTime-" + metricTagsToSensorSuffix(metricTags);
}
private String getQuotaSensorName(Map<String, String> metricTags) {
return quotaType.toString() + "-" + metricTagsToSensorSuffix(metricTags);
}
protected MetricConfig getQuotaMetricConfig(Map<String, String> metricTags) {
return getQuotaMetricConfig(quotaLimit(metricTags));
}
private MetricConfig getQuotaMetricConfig(double quotaLimit) {
return new MetricConfig()
.timeWindow(config.quotaWindowSizeSeconds(), TimeUnit.SECONDS)
.samples(config.numQuotaSamples())
.quota(new Quota(quotaLimit, true));
}
protected Sensor getOrCreateSensor(String sensorName, long expirationTimeSeconds, Consumer<Sensor> registerMetrics) {
return sensorAccessor.getOrCreate(
sensorName,
expirationTimeSeconds,
registerMetrics);
}
/**
* Overrides quotas for <user>, <client-id> or <user, client-id> or the dynamic defaults
* for any of these levels.
*
* @param userEntity user to override if quota applies to <user> or <user, client-id>
* @param clientEntity sanitized client entity to override if quota applies to <client-id> or <user, client-id>
* @param quota custom quota to apply or None if quota override is being removed
*/
public void updateQuota(
Optional<ClientQuotaEntity.ConfigEntity> userEntity,
Optional<ClientQuotaEntity.ConfigEntity> clientEntity,
Optional<Quota> quota
) {
/*
* Acquire the write lock to apply changes in the quota objects.
* This method changes the quota in the overriddenQuota map and applies the update on the actual KafkaMetric object (if it exists).
* If the KafkaMetric hasn't been created, the most recent value will be used from the overriddenQuota map.
* The write lock prevents quota update and creation at the same time. It also guards against concurrent quota change
* notifications
*/
lock.writeLock().lock();
try {
var quotaEntity = new KafkaQuotaEntity(userEntity.orElse(null), clientEntity.orElse(null));
// Apply quota changes with proper quota type tracking
if (quota.isPresent()) {
updateQuotaTypes(quotaEntity, true);
quotaCallback.updateQuota(clientQuotaType, quotaEntity, quota.get().bound());
} else {
updateQuotaTypes(quotaEntity, false);
quotaCallback.removeQuota(clientQuotaType, quotaEntity);
}
// Determine which entities need metric config updates
Optional<KafkaQuotaEntity> updatedEntity;
if (userEntity.filter(entity -> entity == DEFAULT_USER_ENTITY).isPresent() ||
clientEntity.filter(entity -> entity == DEFAULT_USER_CLIENT_ID).isPresent()) {
// More than one entity may need updating, so updateQuotaMetricConfigs will go through all metrics
updatedEntity = Optional.empty();
} else {
updatedEntity = Optional.of(quotaEntity);
}
updateQuotaMetricConfigs(updatedEntity);
} finally {
lock.writeLock().unlock();
}
}
/**
* Updates `quotaTypesEnabled` by performing a bitwise OR operation to combine the enabled quota types.
* This method ensures that the `quotaTypesEnabled` field reflects the active quota types based on the
* current state of `activeQuotaEntities`.
* For example:
* - If UserQuotaEnabled = 2 and ClientIdQuotaEnabled = 1, then quotaTypesEnabled = 3 (2 | 1 = 3)
* - If UserClientIdQuotaEnabled = 4 and UserQuotaEnabled = 1, then quotaTypesEnabled = 5 (4 | 1 = 5)
* - If UserClientIdQuotaEnabled = 4 and ClientIdQuotaEnabled = 2, then quotaTypesEnabled = 6 (4 | 2 = 6)
* - If all three are enabled (1 | 2 | 4), then quotaTypesEnabled = 7
*
* @param quotaEntity The entity for which the quota is being updated, which can be a combination of user and client-id.
* @param shouldAdd A boolean indicating whether to add or remove the quota entity.
*/
private void updateQuotaTypes(KafkaQuotaEntity quotaEntity, boolean shouldAdd) {
if (quotaTypesEnabled == CUSTOM_QUOTAS) {
// If custom quotas are enabled, we do not need to update quota types
return;
}
boolean isActive = !(quotaCallback instanceof DefaultQuotaCallback defaultCallback) ||
defaultCallback.getActiveQuotasEntities().contains(quotaEntity);
int activeQuotaType;
if (quotaEntity.userEntity() != null && quotaEntity.clientIdEntity() != null) {
activeQuotaType = USER_CLIENT_ID_QUOTA_ENABLED;
} else if (quotaEntity.userEntity() != null) {
activeQuotaType = USER_QUOTA_ENABLED;
} else if (quotaEntity.clientIdEntity() != null) {
activeQuotaType = CLIENT_ID_QUOTA_ENABLED;
} else {
activeQuotaType = NO_QUOTAS;
}
if (shouldAdd && !isActive) {
activeQuotaEntities.compute(activeQuotaType, (key, currentValue) ->
(currentValue == null || currentValue == 0) ? 1 : currentValue + 1);
quotaTypesEnabled |= activeQuotaType;
} else if (!shouldAdd && isActive) {
activeQuotaEntities.compute(activeQuotaType, (key, currentValue) ->
(currentValue == null || currentValue <= 1) ? 0 : currentValue - 1);
if (activeQuotaEntities.getOrDefault(activeQuotaType, 0) == 0) {
quotaTypesEnabled &= ~activeQuotaType;
}
}
// Log the changes
var quotaTypeNames = Map.of(
USER_CLIENT_ID_QUOTA_ENABLED, "UserClientIdQuota",
CLIENT_ID_QUOTA_ENABLED, "ClientIdQuota",
USER_QUOTA_ENABLED, "UserQuota"
);
var activeEntities = quotaTypeNames.entrySet().stream()
.filter(entry -> activeQuotaEntities.getOrDefault(entry.getKey(), 0) > 0)
.map(Map.Entry::getValue)
.collect(java.util.stream.Collectors.joining(", "));
LOG.info("Quota types enabled has been changed to {} with active quota entities: [{}]",
quotaTypesEnabled, activeEntities);
}
/**
* Updates metrics configs. This is invoked when quota configs are updated when partition leaders change,
* and custom callbacks that implement partition-based quotas have updated quotas.
* Param updatedQuotaEntity If set to one entity and quotas have only been enabled at one
* level, then an optimized update is performed with a single metric update. If None is provided,
* or if custom callbacks are used or if multi-level quotas have been enabled, all metric configs
* are checked and updated if required.
*/
public void updateQuotaMetricConfigs() {
updateQuotaMetricConfigs(Optional.empty());
}
public void updateQuotaMetricConfigs(Optional<KafkaQuotaEntity> updatedQuotaEntity) {
var allMetrics = metrics.metrics();
// If using custom quota callbacks or if multiple-levels of quotas are defined or
// if this is a default quota update, traverse metrics to find all affected values.
// Otherwise, update just the single matching one.
var singleUpdate = switch (quotaTypesEnabled) {
case NO_QUOTAS,
CLIENT_ID_QUOTA_ENABLED,
USER_QUOTA_ENABLED,
USER_CLIENT_ID_QUOTA_ENABLED -> updatedQuotaEntity.isPresent();
default -> false;
};
if (singleUpdate) {
var quotaEntity = updatedQuotaEntity.orElseThrow(
() -> new IllegalStateException("Quota entity not specified"));
var user = quotaEntity.sanitizedUser();
var clientId = quotaEntity.clientId();
var metricTags = Map.of(USER_TAG, user, CLIENT_ID_TAG, clientId);
var quotaMetricName = clientQuotaMetricName(metricTags);
// Change the underlying metric config if the sensor has been created
var metric = allMetrics.get(quotaMetricName);
if (metric != null) {
var newQuota = quotaLimit(metricTags);
LOG.info("Sensor for {} already exists. Changing quota to {} in MetricConfig",
quotaEntity, newQuota);
metric.config(getQuotaMetricConfig(newQuota));
}
} else {
var quotaMetricName = clientQuotaMetricName(Map.of());
allMetrics.forEach((metricName, metric) -> {
if (metricName.name().equals(quotaMetricName.name()) &&
metricName.group().equals(quotaMetricName.group())) {
var metricTags = metricName.tags();
var newQuota = quotaLimit(metricTags);
if (Double.compare(newQuota, metric.config().quota().bound()) != 0) {
LOG.info("Sensor for quota-id {} already exists. Setting quota to {} in MetricConfig",
metricTags, newQuota);
metric.config(getQuotaMetricConfig(newQuota));
}
}
});
}
}
/**
* Returns the MetricName of the metric used for the quota. The name is used to create the
* metric but also to find the metric when the quota is changed.
*/
protected MetricName clientQuotaMetricName(Map<String, String> quotaMetricTags) {
return metrics.metricName("byte-rate", quotaType.toString(),
"Tracking byte-rate per user/client-id",
quotaMetricTags);
}
private MetricName throttleMetricName(Map<String, String> quotaMetricTags) {
return metrics.metricName("throttle-time",
quotaType.toString(),
"Tracking average throttle-time per user/client-id",
quotaMetricTags);
}
public void initiateShutdown() {
throttledChannelReaper.initiateShutdown();
// improve shutdown time by waking up any ShutdownThread(s) blocked on poll by sending a no-op
delayQueue.add(new ThrottledChannel(time, 0, new ThrottleCallback() {
@Override
public void startThrottling() {}
@Override
public void endThrottling() {}
}));
}
public void shutdown() {
initiateShutdown();
try {
throttledChannelReaper.awaitShutdown();
} catch (InterruptedException e) {
LOG.warn("Shutdown was interrupted", e);
Thread.currentThread().interrupt(); // Restore interrupt status
}
}
private | ThrottledChannelReaper |
java | apache__camel | core/camel-management-api/src/main/java/org/apache/camel/api/management/mbean/ManagedValidateMBean.java | {
"start": 916,
"end": 1209
} | interface ____ extends ManagedProcessorMBean {
@ManagedAttribute(description = "The language for the predicate")
String getPredicateLanguage();
@ManagedAttribute(description = "Predicate to determine if the message is valid or not")
String getPredicate();
}
| ManagedValidateMBean |
java | resilience4j__resilience4j | resilience4j-micrometer/src/main/java/io/github/resilience4j/micrometer/tagged/RetryMetricNames.java | {
"start": 1144,
"end": 1928
} | class ____ {
private final RetryMetricNames retryMetricNames = new RetryMetricNames();
/**
* Overrides the default metric name {@value RetryMetricNames#DEFAULT_RETRY_CALLS} with a
* given one.
*
* @param callsMetricName The metric name for retry calls.
* @return The builder.
*/
public Builder callsMetricName(String callsMetricName) {
retryMetricNames.callsMetricName = requireNonNull(callsMetricName);
return this;
}
/**
* Builds {@link RetryMetricNames} instance.
*
* @return The built {@link RetryMetricNames} instance.
*/
public RetryMetricNames build() {
return retryMetricNames;
}
}
}
| Builder |
java | spring-projects__spring-framework | spring-beans/src/testFixtures/java/org/springframework/beans/testfixture/beans/factory/generator/deprecation/DeprecatedMemberConfiguration.java | {
"start": 817,
"end": 1292
} | class ____ {
@Deprecated
public String deprecatedString() {
return "deprecated";
}
@SuppressWarnings("deprecation")
public String deprecatedParameter(DeprecatedBean bean) {
return bean.toString();
}
@SuppressWarnings("deprecation")
public DeprecatedBean deprecatedReturnType() {
return new DeprecatedBean();
}
@SuppressWarnings("deprecation")
DeprecatedBean deprecatedReturnTypeProtected() {
return new DeprecatedBean();
}
}
| DeprecatedMemberConfiguration |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/inheritance/joined/Person.java | {
"start": 473,
"end": 936
} | class ____ {
@Id
@GeneratedValue
private int id;
private String name;
private String firtsname;
public Person() {
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getFirstname() {
return firtsname;
}
public void setFirstname(String firstname) {
this.firtsname = firstname;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
}
| Person |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/serialization/Serdes.java | {
"start": 2172,
"end": 2352
} | class ____ extends WrapperSerde<Long> {
public LongSerde() {
super(new LongSerializer(), new LongDeserializer());
}
}
public static final | LongSerde |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.