proj_name
stringclasses 131
values | relative_path
stringlengths 30
228
| class_name
stringlengths 1
68
| func_name
stringlengths 1
48
| masked_class
stringlengths 78
9.82k
| func_body
stringlengths 46
9.61k
| len_input
int64 29
2.01k
| len_output
int64 14
1.94k
| total
int64 55
2.05k
| relevant_context
stringlengths 0
38.4k
|
|---|---|---|---|---|---|---|---|---|---|
crate_crate
|
crate/server/src/main/java/io/crate/role/RoleManagerService.java
|
RoleManagerService
|
ensureDropRoleTargetIsNotSuperUser
|
class RoleManagerService implements RoleManager {
private static final void ensureDropRoleTargetIsNotSuperUser(Role user) {<FILL_FUNCTION_BODY>}
private static final void ensureAlterPrivilegeTargetIsNotSuperuser(Role user) {
if (user != null && user.isSuperUser()) {
throw new UnsupportedOperationException(String.format(
Locale.ENGLISH, "Cannot alter privileges for superuser '%s'", user.name()));
}
}
private static final RoleManagerDDLModifier DDL_MODIFIER = new RoleManagerDDLModifier();
private final TransportCreateRoleAction transportCreateRoleAction;
private final TransportDropRoleAction transportDropRoleAction;
private final TransportAlterRoleAction transportAlterRoleAction;
private final TransportPrivilegesAction transportPrivilegesAction;
private final Roles roles;
@Inject
public RoleManagerService(TransportCreateRoleAction transportCreateRoleAction,
TransportDropRoleAction transportDropRoleAction,
TransportAlterRoleAction transportAlterRoleAction,
TransportPrivilegesAction transportPrivilegesAction,
SysTableRegistry sysTableRegistry,
Roles roles,
DDLClusterStateService ddlClusterStateService,
ClusterService clusterService) {
this.transportCreateRoleAction = transportCreateRoleAction;
this.transportDropRoleAction = transportDropRoleAction;
this.transportAlterRoleAction = transportAlterRoleAction;
this.transportPrivilegesAction = transportPrivilegesAction;
this.roles = roles;
var userTable = SysUsersTableInfo.create(() -> clusterService.state().metadata().clusterUUID());
sysTableRegistry.registerSysTable(
userTable,
() -> CompletableFuture.completedFuture(
roles.roles().stream().filter(Role::isUser).toList()),
userTable.expressions(),
false
);
var rolesTable = SysRolesTableInfo.create();
sysTableRegistry.registerSysTable(
rolesTable,
() -> CompletableFuture.completedFuture(
roles.roles().stream().filter(r -> r.isUser() == false).toList()),
rolesTable.expressions(),
false
);
var privilegesTable = SysPrivilegesTableInfo.create();
sysTableRegistry.registerSysTable(
privilegesTable,
() -> CompletableFuture.completedFuture(SysPrivilegesTableInfo.buildPrivilegesRows(roles.roles())),
privilegesTable.expressions(),
false
);
ddlClusterStateService.addModifier(DDL_MODIFIER);
}
@Override
public CompletableFuture<Long> createRole(String roleName,
boolean isUser,
@Nullable SecureHash hashedPw,
@Nullable JwtProperties jwtProperties) {
return transportCreateRoleAction.execute(new CreateRoleRequest(roleName, isUser, hashedPw, jwtProperties), r -> {
if (r.doesUserExist()) {
throw new RoleAlreadyExistsException(String.format(Locale.ENGLISH, "Role '%s' already exists", roleName));
}
return 1L;
});
}
@Override
public CompletableFuture<Long> dropRole(String roleName, boolean suppressNotFoundError) {
ensureDropRoleTargetIsNotSuperUser(roles.findUser(roleName));
return transportDropRoleAction.execute(new DropRoleRequest(roleName, suppressNotFoundError), r -> {
if (r.doesUserExist() == false) {
if (suppressNotFoundError) {
return 0L;
}
throw new RoleUnknownException(roleName);
}
return 1L;
});
}
@Override
public CompletableFuture<Long> alterRole(String roleName,
@Nullable SecureHash newHashedPw,
@Nullable JwtProperties newJwtProperties,
boolean resetPassword,
boolean resetJwtProperties) {
return transportAlterRoleAction.execute(
new AlterRoleRequest(roleName, newHashedPw, newJwtProperties, resetPassword, resetJwtProperties),
r -> {
if (r.doesUserExist() == false) {
throw new RoleUnknownException(roleName);
}
return 1L;
}
);
}
public CompletableFuture<Long> applyPrivileges(Collection<String> roleNames,
Collection<Privilege> privileges,
GrantedRolesChange grantedRolesChange) {
roleNames.forEach(s -> ensureAlterPrivilegeTargetIsNotSuperuser(roles.findUser(s)));
return transportPrivilegesAction.execute(new PrivilegesRequest(roleNames, privileges, grantedRolesChange), r -> {
if (!r.unknownUserNames().isEmpty()) {
throw new RoleUnknownException(r.unknownUserNames());
}
return r.affectedRows();
});
}
@Override
public AccessControl getAccessControl(CoordinatorSessionSettings sessionSettings) {
return new AccessControlImpl(roles, sessionSettings);
}
public Collection<Role> roles() {
return roles.roles();
}
}
|
if (user != null && user.isSuperUser()) {
throw new UnsupportedOperationException(String.format(
Locale.ENGLISH, "Cannot drop a superuser '%s'", user.name()));
}
| 1,337
| 58
| 1,395
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/role/UserActions.java
|
UserActions
|
getUserPasswordProperty
|
class UserActions {
private UserActions() {
}
@Nullable
public static SecureHash generateSecureHash(Map<String, Object> properties) throws GeneralSecurityException, IllegalArgumentException {
try (SecureString pw = getUserPasswordProperty(properties)) {
if (pw != null) {
if (pw.isEmpty()) {
throw new IllegalArgumentException("Password must not be empty");
}
return SecureHash.of(pw);
}
return null;
}
}
@VisibleForTesting
@Nullable
static SecureString getUserPasswordProperty(Map<String, Object> properties) {<FILL_FUNCTION_BODY>}
}
|
String value = DataTypes.STRING.sanitizeValue(properties.get(CreateRolePlan.PASSWORD_PROPERTY_KEY));
if (value != null) {
return new SecureString(value.toCharArray());
}
return null;
| 176
| 68
| 244
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/role/metadata/UsersMetadata.java
|
UsersMetadata
|
fromXContent
|
class UsersMetadata extends AbstractNamedDiffable<Metadata.Custom> implements Metadata.Custom {
public static final String TYPE = "users";
private final Map<String, SecureHash> users;
public UsersMetadata() {
this.users = new HashMap<>();
}
public UsersMetadata(Map<String, SecureHash> users) {
this.users = users;
}
public static UsersMetadata newInstance(@Nullable UsersMetadata instance) {
if (instance == null) {
return new UsersMetadata();
}
return new UsersMetadata(new HashMap<>(instance.users));
}
public boolean contains(String name) {
return users.containsKey(name);
}
public void put(String name, @Nullable SecureHash secureHash) {
users.put(name, secureHash);
}
public void remove(String name) {
users.remove(name);
}
public List<String> userNames() {
return new ArrayList<>(users.keySet());
}
public Map<String, SecureHash> users() {
return users;
}
public UsersMetadata(StreamInput in) throws IOException {
int numUsers = in.readVInt();
users = new HashMap<>(numUsers);
for (int i = 0; i < numUsers; i++) {
String userName = in.readString();
SecureHash secureHash = in.readOptionalWriteable(SecureHash::readFrom);
users.put(userName, secureHash);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(users.size());
for (Map.Entry<String, SecureHash> user : users.entrySet()) {
out.writeString(user.getKey());
out.writeOptionalWriteable(user.getValue());
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("users");
for (Map.Entry<String, SecureHash> entry : users.entrySet()) {
builder.startObject(entry.getKey());
if (entry.getValue() != null) {
entry.getValue().toXContent(builder, params);
}
builder.endObject();
}
builder.endObject();
return builder;
}
/**
* UsersMetadata has the form of:
*
* users: {
* "user1": {
* "secure_hash": {
* "iterations": INT,
* "hash": BYTE[],
* "salt": BYTE[]
* }
* },
* "user2": {
* "secure_hash": null
* },
* ...
* }
*/
public static UsersMetadata fromXContent(XContentParser parser) throws IOException {<FILL_FUNCTION_BODY>}
@Override
public EnumSet<Metadata.XContentContext> context() {
return EnumSet.of(Metadata.XContentContext.GATEWAY, Metadata.XContentContext.SNAPSHOT);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
UsersMetadata that = (UsersMetadata) o;
return users.equals(that.users);
}
@Override
public int hashCode() {
return Objects.hash(users);
}
@Override
public String getWriteableName() {
return TYPE;
}
@Override
public Version getMinimalSupportedVersion() {
return Version.V_3_0_1;
}
}
|
Map<String, SecureHash> users = new HashMap<>();
XContentParser.Token token = parser.nextToken();
if (token == XContentParser.Token.FIELD_NAME && parser.currentName().equals(TYPE)) {
token = parser.nextToken();
if (token == XContentParser.Token.START_OBJECT) {
while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
String userName = parser.currentName();
SecureHash secureHash = null;
if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
if (parser.currentName().equals("secure_hash")) {
secureHash = SecureHash.fromXContent(parser);
} else {
throw new ElasticsearchParseException(
"failed to parse users, unexpected field name: " + parser.currentName()
);
}
}
if (parser.currentToken() != XContentParser.Token.END_OBJECT) {
throw new ElasticsearchParseException(
"failed to parse users, expected an object token at the end, got: " + parser.currentToken()
);
}
}
users.put(userName, secureHash);
}
} else {
// each custom metadata is packed inside an object.
throw new ElasticsearchParseException("failed to parse users, expected an object token at start");
}
if (parser.nextToken() != XContentParser.Token.END_OBJECT) {
// each custom metadata is packed inside an object.
// each custom must move the parser to the end otherwise possible following customs won't be read
throw new ElasticsearchParseException("failed to parse users, expected an object token at the end");
}
}
return new UsersMetadata(users);
| 961
| 465
| 1,426
|
<methods>public non-sealed void <init>() ,public Diff<org.elasticsearch.cluster.metadata.Metadata.Custom> diff(org.elasticsearch.cluster.metadata.Metadata.Custom) ,public org.elasticsearch.cluster.metadata.Metadata.Custom get() ,public static NamedDiff<T> readDiffFrom(Class<? extends T>, java.lang.String, org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException<variables>
|
crate_crate
|
crate/server/src/main/java/io/crate/role/scalar/UserFunction.java
|
UserFunction
|
evaluate
|
class UserFunction extends Scalar<String, Object> {
public static final String CURRENT_USER_FUNCTION_NAME = "current_user";
public static final String SESSION_USER_FUNCTION_NAME = "session_user";
public static void register(Functions.Builder builder) {
builder.add(
Signature.scalar(
CURRENT_USER_FUNCTION_NAME,
DataTypes.STRING.getTypeSignature()
).withFeatures(Scalar.NO_FEATURES),
UserFunction::new
);
builder.add(
Signature.scalar(
SESSION_USER_FUNCTION_NAME,
DataTypes.STRING.getTypeSignature()
).withFeatures(Scalar.NO_FEATURES),
UserFunction::new
);
}
public UserFunction(Signature signature, BoundSignature boundSignature) {
super(signature, boundSignature);
}
@Override
public String evaluate(TransactionContext txnCtx, NodeContext nodeCtx, Input<Object>... args) {<FILL_FUNCTION_BODY>}
@Override
public Symbol normalizeSymbol(Function symbol, @Nullable TransactionContext txnCtx, NodeContext nodeCtx) {
if (txnCtx == null) {
return Literal.NULL;
}
return Literal.of(txnCtx.sessionSettings().userName());
}
}
|
assert args.length == 0 : "number of args must be 0";
return txnCtx.sessionSettings().userName();
| 351
| 35
| 386
|
<methods>public BoundSignature boundSignature() ,public Scalar<java.lang.String,java.lang.Object> compile(List<io.crate.expression.symbol.Symbol>, java.lang.String, io.crate.role.Roles) ,public transient abstract java.lang.String evaluate(io.crate.metadata.TransactionContext, io.crate.metadata.NodeContext, Input<java.lang.Object>[]) ,public io.crate.expression.symbol.Symbol normalizeSymbol(io.crate.expression.symbol.Function, io.crate.metadata.TransactionContext, io.crate.metadata.NodeContext) ,public io.crate.metadata.functions.Signature signature() <variables>public static final Set<io.crate.metadata.Scalar.Feature> DETERMINISTIC_AND_COMPARISON_REPLACEMENT,public static final Set<io.crate.metadata.Scalar.Feature> DETERMINISTIC_ONLY,public static final Set<io.crate.metadata.Scalar.Feature> NO_FEATURES,protected final non-sealed BoundSignature boundSignature,protected final non-sealed io.crate.metadata.functions.Signature signature
|
crate_crate
|
crate/server/src/main/java/io/crate/server/cli/EnvironmentAwareCommand.java
|
EnvironmentAwareCommand
|
createEnv
|
class EnvironmentAwareCommand extends Command {
private final OptionSpec<KeyValuePair> settingOption;
/**
* Construct the command with the specified command description. This command will have logging configured without reading Elasticsearch
* configuration files.
*
* @param description the command description
*/
public EnvironmentAwareCommand(final String description) {
this(description, "C", CommandLoggingConfigurator::configureLoggingWithoutConfig);
}
public EnvironmentAwareCommand(String description, String settingOptionName, Runnable beforeMain) {
super(description, beforeMain);
this.settingOption = parser.accepts(settingOptionName, "Configure a setting")
.withRequiredArg()
.ofType(KeyValuePair.class);
}
@Override
protected void execute(Terminal terminal, OptionSet options) throws Exception {
final Map<String, String> settings = new HashMap<>();
for (final KeyValuePair kvp : settingOption.values(options)) {
if (kvp.value.isEmpty()) {
throw new UserException(ExitCodes.USAGE, "setting [" + kvp.key + "] must not be empty");
}
if (settings.containsKey(kvp.key)) {
final String message = String.format(
Locale.ROOT,
"setting [%s] already set, saw [%s] and [%s]",
kvp.key,
settings.get(kvp.key),
kvp.value);
throw new UserException(ExitCodes.USAGE, message);
}
settings.put(kvp.key, kvp.value);
}
putSystemPropertyIfSettingIsMissing(settings, "path.data", "es.path.data");
putSystemPropertyIfSettingIsMissing(settings, "path.home", "es.path.home");
putSystemPropertyIfSettingIsMissing(settings, "path.conf", "es.path.conf");
putSystemPropertyIfSettingIsMissing(settings, "path.logs", "es.path.logs");
execute(terminal, options, createEnv(settings));
}
/** Create an {@link Environment} for the command to use. Overrideable for tests. */
protected Environment createEnv(final Map<String, String> settings) throws UserException {
return createEnv(Settings.EMPTY, settings);
}
/** Create an {@link Environment} for the command to use. Overrideable for tests. */
protected final Environment createEnv(final Settings baseSettings, final Map<String, String> settings) throws UserException {<FILL_FUNCTION_BODY>}
@SuppressForbidden(reason = "need path to construct environment")
private static Path getConfigPath(final String pathConf) {
return Paths.get(pathConf);
}
/** Ensure the given setting exists, reading it from system properties if not already set. */
private static void putSystemPropertyIfSettingIsMissing(final Map<String, String> settings, final String setting, final String key) {
final String value = System.getProperty(key);
if (value != null) {
if (settings.containsKey(setting)) {
final String message =
String.format(
Locale.ROOT,
"duplicate setting [%s] found via command-line [%s] and system property [%s]",
setting,
settings.get(setting),
value);
throw new IllegalArgumentException(message);
} else {
settings.put(setting, value);
}
}
}
/** Execute the command with the initialized {@link Environment}. */
protected abstract void execute(Terminal terminal, OptionSet options, Environment env) throws Exception;
}
|
String pathConf = settings.get("path.conf");
if (pathConf == null) {
throw new UserException(ExitCodes.CONFIG, "the system property [path.conf] must be set. Specify with -Cpath.conf=<path>");
}
return InternalSettingsPreparer.prepareEnvironment(baseSettings, settings,
getConfigPath(pathConf),
// HOSTNAME is set by elasticsearch-env and elasticsearch-env.bat so it is always available
() -> System.getenv("HOSTNAME"));
| 925
| 134
| 1,059
|
<methods>public void <init>(java.lang.String, java.lang.Runnable) ,public void close() throws java.io.IOException,public final int main(java.lang.String[], org.elasticsearch.cli.Terminal) throws java.lang.Exception,public void mainWithoutErrorHandling(java.lang.String[], org.elasticsearch.cli.Terminal) throws java.lang.Exception<variables>private final non-sealed java.lang.Runnable beforeMain,protected final non-sealed java.lang.String description,private final OptionSpec<java.lang.Void> helpOption,protected final OptionParser parser,private final OptionSpec<java.lang.Void> silentOption,private final OptionSpec<java.lang.Void> verboseOption
|
crate_crate
|
crate/server/src/main/java/io/crate/server/xcontent/LoggingDeprecationHandler.java
|
LoggingDeprecationHandler
|
usedDeprecatedName
|
class LoggingDeprecationHandler implements DeprecationHandler {
public static final LoggingDeprecationHandler INSTANCE = new LoggingDeprecationHandler();
/**
* The logger to which to send deprecation messages.
*
* This uses ParseField's logger because that is the logger that
* we have been using for many releases for deprecated fields.
* Changing that will require some research to make super duper
* sure it is safe.
*/
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(ParseField.class));
private LoggingDeprecationHandler() {
// Singleton
}
@Override
public void usedDeprecatedName(String usedName, String modernName) {<FILL_FUNCTION_BODY>}
@Override
public void usedDeprecatedField(String usedName, String replacedWith) {
DEPRECATION_LOGGER.deprecatedAndMaybeLog(
"deprecated_field", "Deprecated field [{}] used, replaced by [{}]", usedName, replacedWith);
}
}
|
DEPRECATION_LOGGER.deprecatedAndMaybeLog(
"deprecated_field", "Deprecated field [{}] used, expected [{}] instead", usedName, modernName);
| 277
| 46
| 323
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/server/xcontent/XContentParserUtils.java
|
XContentParserUtils
|
throwUnknownField
|
class XContentParserUtils {
private XContentParserUtils() {
}
/**
* Makes sure that current token is of type {@link Token#FIELD_NAME} and the field name is equal to the provided one
* @throws ParsingException if the token is not of type {@link Token#FIELD_NAME} or is not equal to the given field name
*/
public static void ensureFieldName(XContentParser parser, Token token, String fieldName) throws IOException {
ensureExpectedToken(Token.FIELD_NAME, token, parser);
String currentName = parser.currentName();
if (currentName.equals(fieldName) == false) {
String message = "Failed to parse object: expecting field with name [%s] but found [%s]";
throw new ParsingException(parser.getTokenLocation(), String.format(Locale.ROOT, message, fieldName, currentName));
}
}
/**
* @throws ParsingException with a "unknown field found" reason
*/
public static void throwUnknownField(String field, XContentLocation location) {<FILL_FUNCTION_BODY>}
/**
* @throws ParsingException with a "unknown token found" reason
*/
public static void throwUnknownToken(Token token, XContentLocation location) {
String message = "Failed to parse object: unexpected token [%s] found";
throw new ParsingException(location, String.format(Locale.ROOT, message, token));
}
/**
* Makes sure that provided token is of the expected type
*
* @throws ParsingException if the token is not equal to the expected type
*/
public static void ensureExpectedToken(Token expected, Token actual, XContentParser parser) {
if (actual != expected) {
throw parsingException(parser, expected, actual);
}
}
private static ParsingException parsingException(XContentParser parser, Token expected, Token actual) {
return new ParsingException(parser.getTokenLocation(),
String.format(Locale.ROOT, "Failed to parse object: expecting token of type [%s] but found [%s]", expected, actual));
}
}
|
String message = "Failed to parse object: unknown field [%s] found";
throw new ParsingException(location, String.format(Locale.ROOT, message, field));
| 548
| 47
| 595
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/statistics/SketchRamAccounting.java
|
SketchRamAccounting
|
addBytes
|
class SketchRamAccounting implements AutoCloseable {
private static final int BLOCK_SIZE = 1024;
private static final int SHIFTED_BLOCK_SIZE = 32;
private final RamAccounting ramAccounting;
private final RateLimiter rateLimiter;
private long blockCache;
private long bytesSinceLastPause;
public SketchRamAccounting(RamAccounting ramAccounting, RateLimiter rateLimiter) {
this.ramAccounting = ramAccounting;
this.rateLimiter = rateLimiter;
}
public void addBytes(long bytes) {<FILL_FUNCTION_BODY>}
private void checkRateLimit(long bytes) {
if (rateLimiter.getMBPerSec() > 0) {
// Throttling is enabled
bytesSinceLastPause += bytes;
if (bytesSinceLastPause >= rateLimiter.getMinPauseCheckBytes()) {
try {
rateLimiter.pause(bytesSinceLastPause); // SimpleRateLimiter does one volatile read of mbPerSec.
} catch (IOException e) {
throw new UncheckedIOException(e);
} finally {
bytesSinceLastPause = 0;
}
}
}
}
@Override
public void close() throws IOException {
ramAccounting.close();
}
}
|
this.blockCache += bytes;
boolean checklimit = false;
while (this.blockCache > BLOCK_SIZE) {
this.blockCache -= BLOCK_SIZE;
ramAccounting.addBytes(SHIFTED_BLOCK_SIZE);
checklimit = true;
}
if (checklimit) {
checkRateLimit(bytes);
}
| 357
| 96
| 453
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/types/CharacterType.java
|
CharacterType
|
explicitCast
|
class CharacterType extends StringType {
public static final String NAME = "character";
public static final int ID = 27;
public static final CharacterType INSTANCE = new CharacterType();
public static CharacterType of(List<Integer> parameters) {
if (parameters.size() != 1) {
throw new IllegalArgumentException(
"The character type can only have a single parameter value, received: " +
parameters.size()
);
}
return CharacterType.of(parameters.get(0));
}
public static CharacterType of(int lengthLimit) {
if (lengthLimit <= 0) {
throw new IllegalArgumentException(
"The character type length must be at least 1, received: " + lengthLimit);
}
return new CharacterType(lengthLimit);
}
private final int lengthLimit;
public CharacterType(StreamInput in) throws IOException {
lengthLimit = in.readInt();
}
public CharacterType(int lengthLimit) {
this.lengthLimit = lengthLimit;
}
private CharacterType() {
this(1);
}
@Override
public String getName() {
return NAME;
}
@Override
public int id() {
return ID;
}
@Override
public int lengthLimit() {
return lengthLimit;
}
@Override
public boolean unbound() {
return false;
}
@Override
public String valueForInsert(String value) {
if (value == null) {
return null;
}
if (value.length() == lengthLimit) {
return value;
} else if (value.length() < lengthLimit) {
return padEnd(value, lengthLimit, ' ');
} else {
if (isBlank(value, lengthLimit, value.length())) {
return value.substring(0, lengthLimit);
} else {
if (value.length() > 20) {
value = value.substring(0, 20) + "...";
}
throw new IllegalArgumentException(
"'" + value + "' is too long for the character type of length: " + lengthLimit);
}
}
}
@Override
public String implicitCast(Object value) throws IllegalArgumentException, ClassCastException {
var s = cast(value);
if (s != null) {
return padEnd(s, lengthLimit, ' ');
}
return s;
}
@Override
public String explicitCast(Object value, SessionSettings sessionSettings) throws IllegalArgumentException, ClassCastException {<FILL_FUNCTION_BODY>}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(lengthLimit);
}
@Override
public ColumnType<Expression> toColumnType(ColumnPolicy columnPolicy,
@Nullable Supplier<List<ColumnDefinition<Expression>>> convertChildColumn) {
return new ColumnType<>(NAME, List.of(lengthLimit));
}
@Override
public Integer characterMaximumLength() {
return lengthLimit;
}
@Override
public TypeSignature getTypeSignature() {
return new TypeSignature(NAME, List.of(TypeSignature.of(lengthLimit)));
}
@Override
public List<DataType<?>> getTypeParameters() {
return List.of(DataTypes.INTEGER);
}
@Override
public Precedence precedence() {
return Precedence.CHARACTER;
}
@Override
public void addMappingOptions(Map<String, Object> mapping) {
mapping.put("length_limit", lengthLimit);
mapping.put("blank_padding", true);
}
}
|
if (value == null) {
return null;
}
var string = cast(value);
if (string.length() <= lengthLimit()) {
return string;
} else {
return string.substring(0, lengthLimit());
}
| 937
| 68
| 1,005
|
<methods>public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public void addMappingOptions(Map<java.lang.String,java.lang.Object>) ,public java.lang.Integer characterMaximumLength() ,public ColumnStatsSupport<java.lang.String> columnStatsSupport() ,public int compare(java.lang.String, java.lang.String) ,public boolean equals(java.lang.Object) ,public java.lang.String explicitCast(java.lang.Object, io.crate.metadata.settings.SessionSettings) throws java.lang.IllegalArgumentException, java.lang.ClassCastException,public java.lang.String getName() ,public List<DataType<?>> getTypeParameters() ,public io.crate.types.TypeSignature getTypeSignature() ,public int hashCode() ,public int id() ,public java.lang.String implicitCast(java.lang.Object) throws java.lang.IllegalArgumentException, java.lang.ClassCastException,public boolean isConvertableTo(DataType<?>, boolean) ,public int lengthLimit() ,public static io.crate.types.StringType of(List<java.lang.Integer>) ,public static io.crate.types.StringType of(int) ,public io.crate.types.DataType.Precedence precedence() ,public java.lang.String readValueFrom(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public java.lang.String sanitizeValue(java.lang.Object) ,public StorageSupport<java.lang.Object> storageSupport() ,public Streamer<java.lang.String> streamer() ,public ColumnType<io.crate.sql.tree.Expression> toColumnType(io.crate.sql.tree.ColumnPolicy, Supplier<List<ColumnDefinition<io.crate.sql.tree.Expression>>>) ,public java.lang.String toString() ,public boolean unbound() ,public long valueBytes(java.lang.String) ,public java.lang.String valueForInsert(java.lang.String) ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException,public void writeValueTo(org.elasticsearch.common.io.stream.StreamOutput, java.lang.String) throws java.io.IOException<variables>public static final java.lang.String F,public static final int ID,public static final io.crate.types.StringType INSTANCE,private static final StorageSupport<java.lang.Object> STORAGE,public static final java.lang.String T,private final non-sealed int lengthLimit
|
crate_crate
|
crate/server/src/main/java/io/crate/types/GeoShapeType.java
|
GeoShapeType
|
compare
|
class GeoShapeType extends DataType<Map<String, Object>> implements Streamer<Map<String, Object>> {
public static final int ID = 14;
public static final GeoShapeType INSTANCE = new GeoShapeType();
private static final StorageSupport<Map<String, Object>> STORAGE = new StorageSupport<>(false, false, null) {
@Override
public ValueIndexer<Map<String, Object>> valueIndexer(RelationName table,
Reference ref,
Function<String, FieldType> getFieldType,
Function<ColumnIdent, Reference> getRef) {
return new GeoShapeIndexer(ref, getFieldType.apply(ref.storageIdent()));
}
};
private GeoShapeType() {
}
@Override
public int id() {
return ID;
}
@Override
public Precedence precedence() {
return Precedence.GEO_SHAPE;
}
@Override
public String getName() {
return "geo_shape";
}
@Override
public Streamer<Map<String, Object>> streamer() {
return this;
}
@Override
@SuppressWarnings("unchecked")
public Map<String, Object> implicitCast(Object value) throws IllegalArgumentException, ClassCastException {
if (value == null) {
return null;
} else if (value instanceof String) {
return GeoJSONUtils.wkt2Map(BytesRefs.toString(value));
} else if (value instanceof Point point) {
return GeoJSONUtils.shape2Map(SpatialContext.GEO.getShapeFactory().pointXY(point.getX(), point.getY()));
} else if (value instanceof Shape shape) {
return GeoJSONUtils.shape2Map(shape);
} else if (value instanceof Map<?, ?> map) {
GeoJSONUtils.validateGeoJson(map);
return (Map<String, Object>) value;
} else {
throw new ClassCastException("Can't cast '" + value + "' to " + getName());
}
}
@Override
@SuppressWarnings("unchecked")
public Map<String, Object> sanitizeValue(Object value) {
if (value == null) {
return null;
} else if (value instanceof Map<?, ?> map) {
GeoJSONUtils.validateGeoJson(map);
return (Map<String, Object>) value;
} else {
return GeoJSONUtils.shape2Map((Shape) value);
}
}
@Override
public int compare(Map<String, Object> val1, Map<String, Object> val2) {<FILL_FUNCTION_BODY>}
@Override
public Map<String, Object> readValueFrom(StreamInput in) throws IOException {
return in.readMap();
}
@Override
public void writeValueTo(StreamOutput out, Map<String, Object> v) throws IOException {
out.writeMap(v);
}
@Override
public StorageSupport<Map<String, Object>> storageSupport() {
return STORAGE;
}
@Override
public long valueBytes(Map<String, Object> value) {
if (value == null) {
return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER;
}
return RamUsageEstimator.sizeOfMap(value);
}
}
|
// TODO: compare without converting to shape
Shape shape1 = GeoJSONUtils.map2Shape(val1);
Shape shape2 = GeoJSONUtils.map2Shape(val2);
return switch (shape1.relate(shape2)) {
case WITHIN -> -1;
case CONTAINS -> 1;
default -> Double.compare(shape1.getArea(JtsSpatialContext.GEO), shape2.getArea(JtsSpatialContext.GEO));
};
| 866
| 128
| 994
|
<methods>public non-sealed void <init>() ,public void addMappingOptions(Map<java.lang.String,java.lang.Object>) ,public java.lang.Integer characterMaximumLength() ,public ColumnStatsSupport<Map<java.lang.String,java.lang.Object>> columnStatsSupport() ,public int compareTo(DataType<?>) ,public boolean equals(java.lang.Object) ,public Map<java.lang.String,java.lang.Object> explicitCast(java.lang.Object, io.crate.metadata.settings.SessionSettings) throws java.lang.IllegalArgumentException, java.lang.ClassCastException,public abstract java.lang.String getName() ,public List<DataType<?>> getTypeParameters() ,public io.crate.types.TypeSignature getTypeSignature() ,public int hashCode() ,public abstract int id() ,public Map<java.lang.String,java.lang.Object> implicitCast(java.lang.Object) throws java.lang.IllegalArgumentException, java.lang.ClassCastException,public boolean isConvertableTo(DataType<?>, boolean) ,public java.lang.Integer numericPrecision() ,public abstract io.crate.types.DataType.Precedence precedence() ,public boolean precedes(DataType<?>) ,public long ramBytesUsed() ,public abstract Map<java.lang.String,java.lang.Object> sanitizeValue(java.lang.Object) ,public StorageSupport<? super Map<java.lang.String,java.lang.Object>> storageSupport() ,public final StorageSupport<? super Map<java.lang.String,java.lang.Object>> storageSupportSafe() ,public abstract Streamer<Map<java.lang.String,java.lang.Object>> streamer() ,public ColumnType<io.crate.sql.tree.Expression> toColumnType(io.crate.sql.tree.ColumnPolicy, Supplier<List<ColumnDefinition<io.crate.sql.tree.Expression>>>) ,public java.lang.String toString() ,public abstract long valueBytes(Map<java.lang.String,java.lang.Object>) ,public Map<java.lang.String,java.lang.Object> valueForInsert(Map<java.lang.String,java.lang.Object>) ,public final ValueIndexer<? super Map<java.lang.String,java.lang.Object>> valueIndexer(io.crate.metadata.RelationName, io.crate.metadata.Reference, Function<java.lang.String,FieldType>, Function<io.crate.metadata.ColumnIdent,io.crate.metadata.Reference>) ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>
|
crate_crate
|
crate/server/src/main/java/io/crate/types/IntEqQuery.java
|
IntEqQuery
|
rangeQuery
|
class IntEqQuery implements EqQuery<Number> {
@Override
public Query termQuery(String field, Number value, boolean hasDocValues, boolean isIndexed) {
if (isIndexed) {
return IntPoint.newExactQuery(field, value.intValue());
}
if (hasDocValues) {
return SortedNumericDocValuesField.newSlowExactQuery(field, value.intValue());
}
return null;
}
@Override
public Query rangeQuery(String field,
Number lowerTerm,
Number upperTerm,
boolean includeLower,
boolean includeUpper,
boolean hasDocValues,
boolean isIndexed) {<FILL_FUNCTION_BODY>}
@Override
public Query termsQuery(String field, List<Number> nonNullValues, boolean hasDocValues, boolean isIndexed) {
if (isIndexed) {
return IntPoint.newSetQuery(field, nonNullValues.stream().mapToInt(Number::intValue).toArray());
}
if (hasDocValues) {
return SortedNumericDocValuesField.newSlowSetQuery(field, nonNullValues.stream().mapToLong(Number::longValue).toArray());
}
return null;
}
}
|
int lower = Integer.MIN_VALUE;
if (lowerTerm != null) {
lower = includeLower ? lowerTerm.intValue() : lowerTerm.intValue() + 1;
}
int upper = Integer.MAX_VALUE;
if (upperTerm != null) {
upper = includeUpper ? upperTerm.intValue() : upperTerm.intValue() - 1;
}
if (isIndexed) {
return IntPoint.newRangeQuery(field, lower, upper);
}
if (hasDocValues) {
return SortedNumericDocValuesField.newSlowRangeQuery(field, lower, upper);
}
return null;
| 318
| 169
| 487
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/types/JsonType.java
|
JsonType
|
explicitCast
|
class JsonType extends DataType<String> implements Streamer<String> {
public static final int ID = 26;
public static final JsonType INSTANCE = new JsonType();
@Override
public int compare(String o1, String o2) {
return o1.compareTo(o2);
}
@Override
public int id() {
return ID;
}
@Override
public Precedence precedence() {
return Precedence.UNCHECKED_OBJECT;
}
@Override
public String getName() {
return "json";
}
@Override
public Streamer<String> streamer() {
return this;
}
@Override
public String sanitizeValue(Object value) {
return (String) value;
}
@Override
public String readValueFrom(StreamInput in) throws IOException {
return in.readOptionalString();
}
@Override
public void writeValueTo(StreamOutput out, String v) throws IOException {
out.writeOptionalString(v);
}
@Override
public String implicitCast(Object value) throws IllegalArgumentException, ClassCastException {
return (String) value;
}
@Override
@SuppressWarnings("unchecked")
public String explicitCast(Object value, SessionSettings sessionSettings) throws IllegalArgumentException, ClassCastException {<FILL_FUNCTION_BODY>}
@Override
public long valueBytes(String value) {
return RamUsageEstimator.sizeOf(value);
}
}
|
if (value instanceof Map<?, ?> map) {
try {
return Strings.toString(JsonXContent.builder().map((Map<String, ?>) map));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
return (String) value;
| 396
| 80
| 476
|
<methods>public non-sealed void <init>() ,public void addMappingOptions(Map<java.lang.String,java.lang.Object>) ,public java.lang.Integer characterMaximumLength() ,public ColumnStatsSupport<java.lang.String> columnStatsSupport() ,public int compareTo(DataType<?>) ,public boolean equals(java.lang.Object) ,public java.lang.String explicitCast(java.lang.Object, io.crate.metadata.settings.SessionSettings) throws java.lang.IllegalArgumentException, java.lang.ClassCastException,public abstract java.lang.String getName() ,public List<DataType<?>> getTypeParameters() ,public io.crate.types.TypeSignature getTypeSignature() ,public int hashCode() ,public abstract int id() ,public java.lang.String implicitCast(java.lang.Object) throws java.lang.IllegalArgumentException, java.lang.ClassCastException,public boolean isConvertableTo(DataType<?>, boolean) ,public java.lang.Integer numericPrecision() ,public abstract io.crate.types.DataType.Precedence precedence() ,public boolean precedes(DataType<?>) ,public long ramBytesUsed() ,public abstract java.lang.String sanitizeValue(java.lang.Object) ,public StorageSupport<? super java.lang.String> storageSupport() ,public final StorageSupport<? super java.lang.String> storageSupportSafe() ,public abstract Streamer<java.lang.String> streamer() ,public ColumnType<io.crate.sql.tree.Expression> toColumnType(io.crate.sql.tree.ColumnPolicy, Supplier<List<ColumnDefinition<io.crate.sql.tree.Expression>>>) ,public java.lang.String toString() ,public abstract long valueBytes(java.lang.String) ,public java.lang.String valueForInsert(java.lang.String) ,public final ValueIndexer<? super java.lang.String> valueIndexer(io.crate.metadata.RelationName, io.crate.metadata.Reference, Function<java.lang.String,FieldType>, Function<io.crate.metadata.ColumnIdent,io.crate.metadata.Reference>) ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>
|
crate_crate
|
crate/server/src/main/java/io/crate/types/LongType.java
|
LongType
|
sanitizeValue
|
class LongType extends DataType<Long> implements FixedWidthType, Streamer<Long> {
public static final LongType INSTANCE = new LongType();
public static final int ID = 10;
public static final int PRECISION = 64;
public static final int LONG_SIZE = (int) RamUsageEstimator.shallowSizeOfInstance(Long.class);
private static final StorageSupport<Long> STORAGE = new StorageSupport<>(
true,
true,
new LongEqQuery()
) {
@Override
public ValueIndexer<Long> valueIndexer(RelationName table,
Reference ref,
Function<String, FieldType> getFieldType,
Function<ColumnIdent, Reference> getRef) {
return new LongIndexer(ref, getFieldType.apply(ref.storageIdent()));
}
};
@Override
public int id() {
return ID;
}
@Override
public Precedence precedence() {
return Precedence.LONG;
}
@Override
public String getName() {
return "bigint";
}
@Override
public Integer numericPrecision() {
return PRECISION;
}
@Override
public Streamer<Long> streamer() {
return this;
}
@Override
public Long implicitCast(Object value) throws IllegalArgumentException, ClassCastException {
if (value == null) {
return null;
} else if (value instanceof Long l) {
return l;
} else if (value instanceof String str) {
return Long.valueOf(str);
} else if (value instanceof BigDecimal bigDecimalValue) {
var max = BigDecimal.valueOf(Long.MAX_VALUE).toBigInteger();
var min = BigDecimal.valueOf(Long.MIN_VALUE).toBigInteger();
if (max.compareTo(bigDecimalValue.toBigInteger()) <= 0
|| min.compareTo(bigDecimalValue.toBigInteger()) >= 0) {
throw new IllegalArgumentException(getName() + " value out of range: " + value);
}
return ((BigDecimal) value).longValue();
} else if (value instanceof Number number) {
return number.longValue();
} else {
throw new ClassCastException("Can't cast '" + value + "' to " + getName());
}
}
@Override
public Long sanitizeValue(Object value) {<FILL_FUNCTION_BODY>}
@Override
public int compare(Long val1, Long val2) {
return Long.compare(val1, val2);
}
@Override
public Long readValueFrom(StreamInput in) throws IOException {
return in.readBoolean() ? null : in.readLong();
}
@Override
public void writeValueTo(StreamOutput out, Long v) throws IOException {
out.writeBoolean(v == null);
if (v != null) {
out.writeLong(v);
}
}
@Override
public int fixedSize() {
return LONG_SIZE;
}
@Override
public StorageSupport<Long> storageSupport() {
return STORAGE;
}
@Override
public ColumnStatsSupport<Long> columnStatsSupport() {
return ColumnStatsSupport.singleValued(Long.class, LongType.this);
}
@Override
public long valueBytes(Long value) {
return LONG_SIZE;
}
}
|
if (value == null) {
return null;
} else if (value instanceof Long l) {
return l;
} else {
return ((Number) value).longValue();
}
| 892
| 53
| 945
|
<methods>public non-sealed void <init>() ,public void addMappingOptions(Map<java.lang.String,java.lang.Object>) ,public java.lang.Integer characterMaximumLength() ,public ColumnStatsSupport<java.lang.Long> columnStatsSupport() ,public int compareTo(DataType<?>) ,public boolean equals(java.lang.Object) ,public java.lang.Long explicitCast(java.lang.Object, io.crate.metadata.settings.SessionSettings) throws java.lang.IllegalArgumentException, java.lang.ClassCastException,public abstract java.lang.String getName() ,public List<DataType<?>> getTypeParameters() ,public io.crate.types.TypeSignature getTypeSignature() ,public int hashCode() ,public abstract int id() ,public java.lang.Long implicitCast(java.lang.Object) throws java.lang.IllegalArgumentException, java.lang.ClassCastException,public boolean isConvertableTo(DataType<?>, boolean) ,public java.lang.Integer numericPrecision() ,public abstract io.crate.types.DataType.Precedence precedence() ,public boolean precedes(DataType<?>) ,public long ramBytesUsed() ,public abstract java.lang.Long sanitizeValue(java.lang.Object) ,public StorageSupport<? super java.lang.Long> storageSupport() ,public final StorageSupport<? super java.lang.Long> storageSupportSafe() ,public abstract Streamer<java.lang.Long> streamer() ,public ColumnType<io.crate.sql.tree.Expression> toColumnType(io.crate.sql.tree.ColumnPolicy, Supplier<List<ColumnDefinition<io.crate.sql.tree.Expression>>>) ,public java.lang.String toString() ,public abstract long valueBytes(java.lang.Long) ,public java.lang.Long valueForInsert(java.lang.Long) ,public final ValueIndexer<? super java.lang.Long> valueIndexer(io.crate.metadata.RelationName, io.crate.metadata.Reference, Function<java.lang.String,FieldType>, Function<io.crate.metadata.ColumnIdent,io.crate.metadata.Reference>) ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>
|
crate_crate
|
crate/server/src/main/java/io/crate/types/Regclass.java
|
Regclass
|
relationOid
|
class Regclass implements Comparable<Regclass>, Writeable {
private final int oid;
private final String name;
public static Regclass relationOid(RelationInfo relation) {<FILL_FUNCTION_BODY>}
public static Regclass primaryOid(RelationInfo relation) {
return new Regclass(
OidHash.primaryKeyOid(relation.ident(), relation.primaryKey()),
relation.ident().fqn()
);
}
public static Regclass fromRelationName(RelationName relationName) {
return new Regclass(
OidHash.relationOid(OidHash.Type.TABLE, relationName),
relationName.fqn()
);
}
public Regclass(int oid, String name) {
this.oid = oid;
this.name = name;
}
public Regclass(StreamInput in) throws IOException {
this.oid = in.readInt();
this.name = in.readString();
}
public int oid() {
return oid;
}
public String name() {
return name;
}
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(oid);
out.writeString(name);
}
@Override
public int compareTo(Regclass o) {
return Integer.compare(oid, o.oid);
}
@Override
public String toString() {
return Integer.toString(oid);
}
@Override
public int hashCode() {
return oid;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Regclass other = (Regclass) obj;
return oid == other.oid;
}
}
|
return new Regclass(
OidHash.relationOid(
OidHash.Type.fromRelationType(relation.relationType()),
relation.ident()
),
relation.ident().fqn()
);
| 508
| 60
| 568
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/types/TimestampType.java
|
TimestampType
|
implicitCast
|
class TimestampType extends DataType<Long>
implements FixedWidthType, Streamer<Long> {
public static final int ID_WITH_TZ = 11;
public static final int ID_WITHOUT_TZ = 15;
public static final TimestampType INSTANCE_WITH_TZ = new TimestampType(
ID_WITH_TZ,
"timestamp with time zone",
TimestampType::parseTimestamp,
Precedence.TIMESTAMP_WITH_TIME_ZONE);
public static final TimestampType INSTANCE_WITHOUT_TZ = new TimestampType(
ID_WITHOUT_TZ,
"timestamp without time zone",
TimestampType::parseTimestampIgnoreTimeZone,
Precedence.TIMESTAMP);
private static final StorageSupport<Long> STORAGE = new StorageSupport<>(true, true, new LongEqQuery()) {
@Override
public ValueIndexer<Long> valueIndexer(RelationName table,
Reference ref,
Function<String, FieldType> getFieldType,
Function<ColumnIdent, Reference> getRef) {
return new LongIndexer(ref, getFieldType.apply(ref.storageIdent()));
}
};
private final int id;
private final String name;
private final Function<String, Long> parse;
private final Precedence precedence;
private TimestampType(int id, String name, Function<String, Long> parse, Precedence precedence) {
this.id = id;
this.name = name;
this.parse = parse;
this.precedence = precedence;
}
@Override
public int id() {
return id;
}
@Override
public String getName() {
return name;
}
@Override
public Precedence precedence() {
return precedence;
}
@Override
public Streamer<Long> streamer() {
return this;
}
@Override
public Long implicitCast(Object value) throws IllegalArgumentException, ClassCastException {<FILL_FUNCTION_BODY>}
@Override
public Long sanitizeValue(Object value) {
if (value == null) {
return null;
} else if (value instanceof Long l) {
return l;
} else {
return ((Number) value).longValue();
}
}
@Override
public int compare(Long val1, Long val2) {
return Long.compare(val1, val2);
}
@Override
public Long readValueFrom(StreamInput in) throws IOException {
return in.readBoolean() ? null : in.readLong();
}
@Override
public void writeValueTo(StreamOutput out, Long v) throws IOException {
out.writeBoolean(v == null);
if (v != null) {
out.writeLong(v);
}
}
@Override
public int fixedSize() {
return LongType.LONG_SIZE;
}
static long parseTimestamp(String timestamp) {
long[] out = StringUtils.PARSE_LONG_BUFFER.get();
if (StringUtils.tryParseLong(timestamp, out)) {
return out[0];
}
TemporalAccessor dt;
try {
dt = TIMESTAMP_PARSER.parseBest(
timestamp, OffsetDateTime::from, LocalDateTime::from, LocalDate::from);
} catch (DateTimeParseException e1) {
throw new IllegalArgumentException(e1.getMessage());
}
if (dt instanceof LocalDateTime) {
LocalDateTime localDateTime = LocalDateTime.from(dt);
return localDateTime.toInstant(UTC).toEpochMilli();
} else if (dt instanceof LocalDate) {
LocalDate localDate = LocalDate.from(dt);
return localDate.atStartOfDay(UTC).toInstant().toEpochMilli();
}
OffsetDateTime offsetDateTime = OffsetDateTime.from(dt);
return offsetDateTime.toInstant().toEpochMilli();
}
public static long parseTimestampIgnoreTimeZone(String timestamp) {
long[] out = StringUtils.PARSE_LONG_BUFFER.get();
if (StringUtils.tryParseLong(timestamp, out)) {
return out[0];
}
TemporalAccessor dt;
try {
dt = TIMESTAMP_PARSER.parseBest(
timestamp, LocalDateTime::from, LocalDate::from);
} catch (DateTimeParseException e1) {
throw new IllegalArgumentException(e1.getMessage());
}
if (dt instanceof LocalDate) {
LocalDate localDate = LocalDate.from(dt);
return localDate.atStartOfDay(UTC).toInstant().toEpochMilli();
}
LocalDateTime localDateTime = LocalDateTime.from(dt);
return localDateTime.toInstant(UTC).toEpochMilli();
}
public static final DateTimeFormatter TIMESTAMP_PARSER = new DateTimeFormatterBuilder()
.parseCaseInsensitive()
.append(ISO_LOCAL_DATE)
.optionalStart()
.padNext(1)
.optionalStart()
.appendLiteral('T')
.optionalEnd()
.append(ISO_LOCAL_TIME)
.optionalStart()
.appendPattern("[Z][VV][x][xx][xxx]")
.toFormatter(Locale.ENGLISH).withResolverStyle(ResolverStyle.STRICT);
@Override
public StorageSupport<Long> storageSupport() {
return STORAGE;
}
@Override
public long valueBytes(Long value) {
return LongType.LONG_SIZE;
}
@Override
public void addMappingOptions(Map<String, Object> mapping) {
mapping.put("format", "epoch_millis||strict_date_optional_time");
if (id == ID_WITHOUT_TZ) {
mapping.put("ignore_timezone", true);
}
}
@Override
public ColumnStatsSupport<Long> columnStatsSupport() {
return ColumnStatsSupport.singleValued(Long.class, TimestampType.this);
}
}
|
if (value == null) {
return null;
} else if (value instanceof Long l) {
return l;
} else if (value instanceof String str) {
return parse.apply(str);
} else if (value instanceof Double) {
// we treat float and double values as seconds with milliseconds as fractions
// see timestamp documentation
return ((Number) (((Double) value) * 1000)).longValue();
} else if (value instanceof Float) {
return ((Number) (((Float) value) * 1000)).longValue();
} else if (value instanceof Number number) {
return number.longValue();
} else {
throw new ClassCastException("Can't cast '" + value + "' to " + getName());
}
| 1,568
| 197
| 1,765
|
<methods>public non-sealed void <init>() ,public void addMappingOptions(Map<java.lang.String,java.lang.Object>) ,public java.lang.Integer characterMaximumLength() ,public ColumnStatsSupport<java.lang.Long> columnStatsSupport() ,public int compareTo(DataType<?>) ,public boolean equals(java.lang.Object) ,public java.lang.Long explicitCast(java.lang.Object, io.crate.metadata.settings.SessionSettings) throws java.lang.IllegalArgumentException, java.lang.ClassCastException,public abstract java.lang.String getName() ,public List<DataType<?>> getTypeParameters() ,public io.crate.types.TypeSignature getTypeSignature() ,public int hashCode() ,public abstract int id() ,public java.lang.Long implicitCast(java.lang.Object) throws java.lang.IllegalArgumentException, java.lang.ClassCastException,public boolean isConvertableTo(DataType<?>, boolean) ,public java.lang.Integer numericPrecision() ,public abstract io.crate.types.DataType.Precedence precedence() ,public boolean precedes(DataType<?>) ,public long ramBytesUsed() ,public abstract java.lang.Long sanitizeValue(java.lang.Object) ,public StorageSupport<? super java.lang.Long> storageSupport() ,public final StorageSupport<? super java.lang.Long> storageSupportSafe() ,public abstract Streamer<java.lang.Long> streamer() ,public ColumnType<io.crate.sql.tree.Expression> toColumnType(io.crate.sql.tree.ColumnPolicy, Supplier<List<ColumnDefinition<io.crate.sql.tree.Expression>>>) ,public java.lang.String toString() ,public abstract long valueBytes(java.lang.Long) ,public java.lang.Long valueForInsert(java.lang.Long) ,public final ValueIndexer<? super java.lang.Long> valueIndexer(io.crate.metadata.RelationName, io.crate.metadata.Reference, Function<java.lang.String,FieldType>, Function<io.crate.metadata.ColumnIdent,io.crate.metadata.Reference>) ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>
|
crate_crate
|
crate/server/src/main/java/io/crate/types/TypeSignaturesASTVisitor.java
|
TypeSignaturesASTVisitor
|
getIdentifier
|
class TypeSignaturesASTVisitor extends TypeSignaturesBaseVisitor<TypeSignature> {
@Override
public TypeSignature visitDoublePrecision(TypeSignaturesParser.DoublePrecisionContext context) {
return new TypeSignature(DataTypes.DOUBLE.getName(), List.of());
}
@Override
public TypeSignature visitTimeStampWithoutTimeZone(TypeSignaturesParser.TimeStampWithoutTimeZoneContext context) {
return new TypeSignature(TimestampType.INSTANCE_WITHOUT_TZ.getName(), List.of());
}
@Override
public TypeSignature visitTimeStampWithTimeZone(TypeSignaturesParser.TimeStampWithTimeZoneContext context) {
return new TypeSignature(TimestampType.INSTANCE_WITH_TZ.getName(), List.of());
}
@Override
public TypeSignature visitTimeWithTimeZone(TypeSignaturesParser.TimeWithTimeZoneContext context) {
return new TypeSignature(TimeTZType.NAME, List.of());
}
@Override
public TypeSignature visitArray(TypeSignaturesParser.ArrayContext context) {
var parameter = visitOptionalContext(context.type());
List<TypeSignature> parameters = parameter == null ? List.of() : List.of(parameter);
return new TypeSignature(ArrayType.NAME, parameters);
}
@Override
public TypeSignature visitObject(TypeSignaturesParser.ObjectContext context) {
return new TypeSignature(ObjectType.NAME, getParameters(context.parameters()));
}
@Override
public TypeSignature visitGeneric(TypeSignaturesParser.GenericContext context) {
return new TypeSignature(getIdentifier(context.identifier()), getParameters(context.parameters()));
}
@Override
public TypeSignature visitRow(TypeSignaturesParser.RowContext context) {
return new TypeSignature(RowType.NAME, getParameters(context.parameters()));
}
@Override
public TypeSignature visitParameter(TypeSignaturesParser.ParameterContext context) {
if (context.INTEGER_VALUE() != null) {
return new IntegerLiteralTypeSignature(Integer.parseInt(context.INTEGER_VALUE().getText()));
} else if (context.identifier() != null) {
return new ParameterTypeSignature(getIdentifier(context.identifier()), visitOptionalContext(context.type()));
} else {
return visit(context.type());
}
}
@Nullable
private String getIdentifier(@Nullable TypeSignaturesParser.IdentifierContext context) {<FILL_FUNCTION_BODY>}
@Nullable
private TypeSignature visitOptionalContext(@Nullable TypeSignaturesParser.TypeContext context) {
if (context != null) {
return visit(context);
}
return null;
}
private List<TypeSignature> getParameters(@Nullable TypeSignaturesParser.ParametersContext context) {
if (context == null || context.parameter().isEmpty()) {
return List.of();
}
var result = new ArrayList<TypeSignature>(context.parameter().size());
for (var p : context.parameter()) {
result.add(p.accept(this));
}
return result;
}
}
|
if (context != null) {
if (context.QUOTED_INDENTIFIER() != null) {
var token = context.QUOTED_INDENTIFIER().getText();
return token.substring(1, token.length() - 1);
}
if (context.UNQUOTED_INDENTIFIER() != null) {
return context.UNQUOTED_INDENTIFIER().getText();
}
}
return null;
| 770
| 122
| 892
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/udc/ping/PingTask.java
|
PingTask
|
run
|
class PingTask extends TimerTask {
private static final TimeValue HTTP_TIMEOUT = new TimeValue(5, TimeUnit.SECONDS);
private static final Logger LOGGER = LogManager.getLogger(PingTask.class);
private final ClusterService clusterService;
private final ExtendedNodeInfo extendedNodeInfo;
private final String pingUrl;
private final AtomicLong successCounter = new AtomicLong(0);
private final AtomicLong failCounter = new AtomicLong(0);
public PingTask(ClusterService clusterService,
ExtendedNodeInfo extendedNodeInfo,
String pingUrl) {
this.clusterService = clusterService;
this.pingUrl = pingUrl;
this.extendedNodeInfo = extendedNodeInfo;
}
private Map<String, String> getKernelData() {
return extendedNodeInfo.kernelData();
}
private String getClusterId() {
return clusterService.state().metadata().clusterUUID();
}
private Boolean isMasterNode() {
return clusterService.state().nodes().isLocalNodeElectedMaster();
}
private Map<String, Object> getCounters() {
return Map.of(
"success", successCounter.get(),
"failure", failCounter.get()
);
}
@Nullable
String getHardwareAddress() {
String macAddress = extendedNodeInfo.networkInfo().primaryInterface().macAddress();
return macAddress.isEmpty() ? null : macAddress;
}
private URL buildPingUrl() throws URISyntaxException, IOException {
final URI uri = new URI(this.pingUrl);
// specifying the initialCapacity based on “expected number of elements / load_factor”
// in this case, the "expected number of elements" = 10 while default load factor = .75
Map<String, String> queryMap = new HashMap<>(14);
queryMap.put("cluster_id", getClusterId());
queryMap.put("kernel", Strings.toString(JsonXContent.builder().map(getKernelData())));
queryMap.put("master", isMasterNode().toString());
queryMap.put("ping_count", Strings.toString(JsonXContent.builder().map(getCounters())));
queryMap.put("hardware_address", getHardwareAddress());
queryMap.put("num_processors", Integer.toString(Runtime.getRuntime().availableProcessors()));
queryMap.put("crate_version", Version.CURRENT.externalNumber());
queryMap.put("java_version", System.getProperty("java.version"));
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Sending data: {}", queryMap);
}
List<String> params = new ArrayList<>(queryMap.size());
for (Map.Entry<String, String> entry : queryMap.entrySet()) {
String value = entry.getValue();
if (value != null) {
params.add(entry.getKey() + '=' + value);
}
}
String query = String.join("&", params);
return new URI(
uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(),
uri.getPath(), query, uri.getFragment()
).toURL();
}
@Override
public void run() {<FILL_FUNCTION_BODY>}
}
|
try {
URL url = buildPingUrl();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Sending UDC information to {}...", url);
}
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setConnectTimeout((int) HTTP_TIMEOUT.millis());
conn.setReadTimeout((int) HTTP_TIMEOUT.millis());
if (conn.getResponseCode() >= 300) {
throw new Exception(String.format(Locale.ENGLISH, "%s Responded with Code %d", url.getHost(), conn.getResponseCode()));
}
if (LOGGER.isDebugEnabled()) {
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8));
String line = reader.readLine();
while (line != null) {
LOGGER.debug(line);
line = reader.readLine();
}
reader.close();
} else {
conn.getInputStream().close();
}
successCounter.incrementAndGet();
} catch (Exception e) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Error sending UDC information", e);
}
failCounter.incrementAndGet();
}
| 847
| 328
| 1,175
|
<methods>public boolean cancel() ,public abstract void run() ,public long scheduledExecutionTime() <variables>static final int CANCELLED,static final int EXECUTED,static final int SCHEDULED,static final int VIRGIN,final java.lang.Object lock,long nextExecutionTime,long period,int state
|
crate_crate
|
crate/server/src/main/java/io/crate/udc/service/UDCService.java
|
UDCService
|
doStart
|
class UDCService extends AbstractLifecycleComponent {
private static final Logger LOGGER = LogManager.getLogger(UDCService.class);
public static final Setting<Boolean> UDC_ENABLED_SETTING = Setting.boolSetting(
"udc.enabled", true, Property.NodeScope, Property.Exposed);
// Explicit generic is required for eclipse JDT, otherwise it won't compile
public static final Setting<String> UDC_URL_SETTING = new Setting<>(
"udc.url", "https://udc.crate.io/",
Function.identity(), DataTypes.STRING, Property.NodeScope, Property.Exposed);
public static final Setting<TimeValue> UDC_INITIAL_DELAY_SETTING = Setting.positiveTimeSetting(
"udc.initial_delay", new TimeValue(10, TimeUnit.MINUTES),
Property.NodeScope,
Property.Exposed);
public static final Setting<TimeValue> UDC_INTERVAL_SETTING = Setting.positiveTimeSetting(
"udc.interval", new TimeValue(24, TimeUnit.HOURS),
Property.NodeScope,
Property.Exposed
);
private final Timer timer;
private final ClusterService clusterService;
private final ExtendedNodeInfo extendedNodeInfo;
private final Settings settings;
@Inject
public UDCService(Settings settings,
ExtendedNodeInfo extendedNodeInfo,
ClusterService clusterService) {
this.settings = settings;
this.extendedNodeInfo = extendedNodeInfo;
this.clusterService = clusterService;
this.timer = new Timer("crate-udc");
}
@Override
protected void doStart() throws ElasticsearchException {<FILL_FUNCTION_BODY>}
@Override
protected void doStop() throws ElasticsearchException {
timer.cancel();
}
@Override
protected void doClose() throws ElasticsearchException {
timer.cancel(); // safety net, in case of unlikely weirdness
}
}
|
String url = UDC_URL_SETTING.get(settings);
TimeValue initialDelay = UDC_INITIAL_DELAY_SETTING.get(settings);
TimeValue interval = UDC_INTERVAL_SETTING.get(settings);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Starting with delay {} and period {}.", initialDelay.seconds(), interval.seconds());
}
PingTask pingTask = new PingTask(clusterService, extendedNodeInfo, url);
timer.scheduleAtFixedRate(pingTask, initialDelay.millis(), interval.millis());
| 519
| 153
| 672
|
<methods>public void addLifecycleListener(org.elasticsearch.common.component.LifecycleListener) ,public void close() ,public org.elasticsearch.common.component.Lifecycle.State lifecycleState() ,public void removeLifecycleListener(org.elasticsearch.common.component.LifecycleListener) ,public void start() ,public void stop() <variables>protected final org.elasticsearch.common.component.Lifecycle lifecycle,private final List<org.elasticsearch.common.component.LifecycleListener> listeners
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/UnavailableShardsException.java
|
UnavailableShardsException
|
buildMessage
|
class UnavailableShardsException extends ElasticsearchException {
public UnavailableShardsException(@Nullable ShardId shardId, String message, Object... args) {
super(buildMessage(shardId, message), args);
}
public UnavailableShardsException(String index, int shardId, String message, Object... args) {
super(buildMessage(index, shardId, message), args);
}
private static String buildMessage(ShardId shardId, String message) {
if (shardId == null) {
return message;
}
return buildMessage(shardId.getIndexName(), shardId.id(), message);
}
private static String buildMessage(String index, int shardId, String message) {<FILL_FUNCTION_BODY>}
public UnavailableShardsException(StreamInput in) throws IOException {
super(in);
}
@Override
public RestStatus status() {
return RestStatus.SERVICE_UNAVAILABLE;
}
}
|
return "[" + index + "][" + shardId + "] " + message;
| 259
| 25
| 284
|
<methods>public void <init>(java.lang.Throwable) ,public transient void <init>(java.lang.String, java.lang.Object[]) ,public transient void <init>(java.lang.String, java.lang.Throwable, java.lang.Object[]) ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public void addHeader(java.lang.String, List<java.lang.String>) ,public transient void addMetadata(java.lang.String, java.lang.String[]) ,public void addMetadata(java.lang.String, List<java.lang.String>) ,public static org.elasticsearch.ElasticsearchException fromXContent(org.elasticsearch.common.xcontent.XContentParser) throws java.io.IOException,public static void generateThrowableXContent(org.elasticsearch.common.xcontent.XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params, java.lang.Throwable) throws java.io.IOException,public java.lang.String getDetailedMessage() ,public static java.lang.String getExceptionName(java.lang.Throwable) ,public List<java.lang.String> getHeader(java.lang.String) ,public Set<java.lang.String> getHeaderKeys() ,public static int getId(Class<? extends org.elasticsearch.ElasticsearchException>) ,public org.elasticsearch.index.Index getIndex() ,public List<java.lang.String> getMetadata(java.lang.String) ,public Set<java.lang.String> getMetadataKeys() ,public java.lang.Throwable getRootCause() ,public org.elasticsearch.index.shard.ShardId getShardId() ,public io.crate.rest.action.HttpErrorStatus httpErrorStatus() ,public static org.elasticsearch.ElasticsearchException innerFromXContent(org.elasticsearch.common.xcontent.XContentParser, boolean) throws java.io.IOException,public static boolean isRegistered(Class<? extends java.lang.Throwable>, org.elasticsearch.Version) ,public io.crate.protocols.postgres.PGErrorStatus pgErrorStatus() ,public static org.elasticsearch.ElasticsearchException readException(org.elasticsearch.common.io.stream.StreamInput, int) throws java.io.IOException,public static T readStackTrace(T, org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public void setIndex(org.elasticsearch.index.Index) ,public void setIndex(java.lang.String) ,public transient void setResources(java.lang.String, java.lang.String[]) ,public void setShard(org.elasticsearch.index.shard.ShardId) ,public org.elasticsearch.rest.RestStatus status() ,public java.lang.String toString() ,public org.elasticsearch.common.xcontent.XContentBuilder toXContent(org.elasticsearch.common.xcontent.XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params) throws java.io.IOException,public java.lang.Throwable unwrapCause() ,public static T writeStackTraces(T, org.elasticsearch.common.io.stream.StreamOutput, Writer<java.lang.Throwable>) throws java.io.IOException,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>private static final java.lang.String CAUSED_BY,private static final non-sealed Map<Class<? extends org.elasticsearch.ElasticsearchException>,org.elasticsearch.ElasticsearchException.ElasticsearchExceptionHandle> CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE,private static final java.lang.String HEADER,private static final non-sealed Map<java.lang.Integer,CheckedFunction<org.elasticsearch.common.io.stream.StreamInput,? extends org.elasticsearch.ElasticsearchException,java.io.IOException>> ID_TO_SUPPLIER,private static final java.lang.String INDEX_METADATA_KEY,private static final java.lang.String INDEX_METADATA_KEY_UUID,private static final java.lang.String REASON,private static final java.lang.String RESOURCE_METADATA_ID_KEY,private static final java.lang.String RESOURCE_METADATA_TYPE_KEY,private static final java.lang.String REST_EXCEPTION_SKIP_CAUSE,private static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT,public static final java.lang.String REST_EXCEPTION_SKIP_STACK_TRACE,public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT,private static final java.lang.String ROOT_CAUSE,private static final java.lang.String SHARD_METADATA_KEY,private static final java.lang.String STACK_TRACE,private static final org.elasticsearch.common.xcontent.ParseField SUPPRESSED,private static final java.lang.String TYPE,private static final org.elasticsearch.Version UNKNOWN_VERSION_ADDED,private final Map<java.lang.String,List<java.lang.String>> headers,private final Map<java.lang.String,List<java.lang.String>> metadata
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java
|
TransportClearVotingConfigExclusionsAction
|
submitClearVotingConfigExclusionsTask
|
class TransportClearVotingConfigExclusionsAction
extends TransportMasterNodeAction<ClearVotingConfigExclusionsRequest, ClearVotingConfigExclusionsResponse> {
@Inject
public TransportClearVotingConfigExclusionsAction(TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool) {
super(ClearVotingConfigExclusionsAction.NAME, transportService, clusterService, threadPool,
ClearVotingConfigExclusionsRequest::new);
}
@Override
protected String executor() {
return Names.SAME;
}
@Override
protected ClearVotingConfigExclusionsResponse read(StreamInput in) throws IOException {
return new ClearVotingConfigExclusionsResponse(in);
}
@Override
protected void masterOperation(ClearVotingConfigExclusionsRequest request,
ClusterState initialState,
ActionListener<ClearVotingConfigExclusionsResponse> listener) throws Exception {
final long startTimeMillis = threadPool.relativeTimeInMillis();
final Predicate<ClusterState> allExclusionsRemoved = newState -> {
for (VotingConfigExclusion tombstone : initialState.getVotingConfigExclusions()) {
// NB checking for the existence of any node with this persistent ID, because persistent IDs are how votes are counted.
if (newState.nodes().nodeExists(tombstone.getNodeId())) {
return false;
}
}
return true;
};
if (request.getWaitForRemoval() && allExclusionsRemoved.test(initialState) == false) {
final ClusterStateObserver clusterStateObserver = new ClusterStateObserver(
initialState,
clusterService,
request.getTimeout(),
logger);
clusterStateObserver.waitForNextChange(new Listener() {
@Override
public void onNewClusterState(ClusterState state) {
submitClearVotingConfigExclusionsTask(request, startTimeMillis, listener);
}
@Override
public void onClusterServiceClose() {
listener.onFailure(new ElasticsearchException("cluster service closed while waiting for removal of nodes "
+ initialState.getVotingConfigExclusions()));
}
@Override
public void onTimeout(TimeValue timeout) {
listener.onFailure(new ElasticsearchTimeoutException(
"timed out waiting for removal of nodes; if nodes should not be removed, set waitForRemoval to false. "
+ initialState.getVotingConfigExclusions()));
}
}, allExclusionsRemoved);
} else {
submitClearVotingConfigExclusionsTask(request, startTimeMillis, listener);
}
}
private void submitClearVotingConfigExclusionsTask(ClearVotingConfigExclusionsRequest request, long startTimeMillis,
ActionListener<ClearVotingConfigExclusionsResponse> listener) {<FILL_FUNCTION_BODY>}
@Override
protected ClusterBlockException checkBlock(ClearVotingConfigExclusionsRequest request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
}
|
clusterService.submitStateUpdateTask("clear-voting-config-exclusions", new ClusterStateUpdateTask(Priority.URGENT) {
@Override
public ClusterState execute(ClusterState currentState) {
CoordinationMetadata newCoordinationMetadata = CoordinationMetadata
.builder(currentState.coordinationMetadata())
.clearVotingConfigExclusions()
.build();
Metadata newMetadata = Metadata
.builder(currentState.metadata())
.coordinationMetadata(newCoordinationMetadata)
.build();
return ClusterState.builder(currentState).metadata(newMetadata).build();
}
@Override
public void onFailure(String source, Exception e) {
listener.onFailure(e);
}
@Override
public TimeValue timeout() {
return TimeValue.timeValueMillis(request.getTimeout().millis() + startTimeMillis - threadPool.relativeTimeInMillis());
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
listener.onResponse(new ClearVotingConfigExclusionsResponse());
}
});
| 767
| 292
| 1,059
|
<methods><variables>protected final non-sealed org.elasticsearch.cluster.service.ClusterService clusterService,private final non-sealed java.lang.String executor,protected final non-sealed org.elasticsearch.threadpool.ThreadPool threadPool,protected final non-sealed org.elasticsearch.transport.TransportService transportService
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java
|
PutRepositoryRequest
|
settings
|
class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryRequest> implements ToXContentObject {
private String name;
private String type;
private boolean verify = true;
private Settings settings = EMPTY_SETTINGS;
public PutRepositoryRequest() {
}
/**
* Constructs a new put repository request with the provided name.
*/
public PutRepositoryRequest(String name) {
this.name = name;
}
/**
* Sets the name of the repository.
*
* @param name repository name
*/
public PutRepositoryRequest name(String name) {
this.name = name;
return this;
}
/**
* The name of the repository.
*
* @return repository name
*/
public String name() {
return this.name;
}
/**
* The type of the repository
* <ul>
* <li>"fs" - shared filesystem repository</li>
* </ul>
*
* @param type repository type
* @return this request
*/
public PutRepositoryRequest type(String type) {
this.type = type;
return this;
}
/**
* Returns repository type
*
* @return repository type
*/
public String type() {
return this.type;
}
/**
* Sets the repository settings
*
* @param settings repository settings
* @return this request
*/
public PutRepositoryRequest settings(Settings settings) {
this.settings = settings;
return this;
}
/**
* Sets the repository settings
*
* @param settings repository settings
* @return this request
*/
public PutRepositoryRequest settings(Settings.Builder settings) {
this.settings = settings.build();
return this;
}
/**
* Sets the repository settings.
*
* @param source repository settings in json or yaml format
* @param xContentType the content type of the source
* @return this request
*/
public PutRepositoryRequest settings(String source, XContentType xContentType) {
this.settings = Settings.builder().loadFromSource(source, xContentType).build();
return this;
}
/**
* Sets the repository settings.
*
* @param source repository settings
* @return this request
*/
public PutRepositoryRequest settings(Map<String, Object> source) {<FILL_FUNCTION_BODY>}
/**
* Returns repository settings
*
* @return repository settings
*/
public Settings settings() {
return this.settings;
}
/**
* Sets whether or not the repository should be verified after creation
*/
public PutRepositoryRequest verify(boolean verify) {
this.verify = verify;
return this;
}
/**
* Returns true if repository should be verified after creation
*/
public boolean verify() {
return this.verify;
}
/**
* Parses repository definition.
*
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(Map<String, Object> repositoryDefinition) {
for (Map.Entry<String, Object> entry : repositoryDefinition.entrySet()) {
String name = entry.getKey();
if (name.equals("type")) {
type(entry.getValue().toString());
} else if (name.equals("settings")) {
if (!(entry.getValue() instanceof Map)) {
throw new IllegalArgumentException("Malformed settings section, should include an inner object");
}
@SuppressWarnings("unchecked")
Map<String, Object> sub = (Map<String, Object>) entry.getValue();
settings(sub);
}
}
return this;
}
public PutRepositoryRequest(StreamInput in) throws IOException {
super(in);
name = in.readString();
type = in.readString();
settings = readSettingsFromStream(in);
verify = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(name);
out.writeString(type);
writeSettingsToStream(out, settings);
out.writeBoolean(verify);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("name", name);
builder.field("type", type);
builder.startObject("settings");
settings.toXContent(builder, params);
builder.endObject();
builder.field("verify", verify);
builder.endObject();
return builder;
}
}
|
try {
XContentBuilder builder = JsonXContent.builder();
builder.map(source);
settings(Strings.toString(builder), builder.contentType());
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
}
return this;
| 1,204
| 84
| 1,288
|
<methods>public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public io.crate.common.unit.TimeValue ackTimeout() ,public final org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest timeout(java.lang.String) ,public final org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest timeout(io.crate.common.unit.TimeValue) ,public final io.crate.common.unit.TimeValue timeout() ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>public static final io.crate.common.unit.TimeValue DEFAULT_ACK_TIMEOUT,protected io.crate.common.unit.TimeValue timeout
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java
|
ClusterRerouteResponseAckedClusterStateUpdateTask
|
onFailure
|
class ClusterRerouteResponseAckedClusterStateUpdateTask extends AckedClusterStateUpdateTask<ClusterRerouteResponse> {
private final ClusterRerouteRequest request;
private final ActionListener<ClusterRerouteResponse> listener;
private final Logger logger;
private final AllocationService allocationService;
private volatile ClusterState clusterStateToSend;
private volatile RoutingExplanations explanations;
ClusterRerouteResponseAckedClusterStateUpdateTask(Logger logger, AllocationService allocationService, ClusterRerouteRequest request,
ActionListener<ClusterRerouteResponse> listener) {
super(Priority.IMMEDIATE, request, listener);
this.request = request;
this.listener = listener;
this.logger = logger;
this.allocationService = allocationService;
}
@Override
protected ClusterRerouteResponse newResponse(boolean acknowledged) {
return new ClusterRerouteResponse(acknowledged, clusterStateToSend, explanations);
}
@Override
public void onAckTimeout() {
listener.onResponse(new ClusterRerouteResponse(false, clusterStateToSend, new RoutingExplanations()));
}
@Override
public void onFailure(String source, Exception e) {<FILL_FUNCTION_BODY>}
@Override
public ClusterState execute(ClusterState currentState) {
AllocationService.CommandsResult commandsResult =
allocationService.reroute(currentState, request.getCommands(), request.explain(), request.isRetryFailed());
clusterStateToSend = commandsResult.getClusterState();
explanations = commandsResult.explanations();
if (request.dryRun()) {
return currentState;
}
return commandsResult.getClusterState();
}
}
|
logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e);
super.onFailure(source, e);
| 447
| 39
| 486
|
<methods><variables>protected final non-sealed org.elasticsearch.cluster.service.ClusterService clusterService,private final non-sealed java.lang.String executor,protected final non-sealed org.elasticsearch.threadpool.ThreadPool threadPool,protected final non-sealed org.elasticsearch.transport.TransportService transportService
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java
|
ClusterUpdateSettingsResponse
|
equals
|
class ClusterUpdateSettingsResponse extends AcknowledgedResponse {
private final Settings transientSettings;
private final Settings persistentSettings;
ClusterUpdateSettingsResponse(boolean acknowledged, Settings transientSettings, Settings persistentSettings) {
super(acknowledged);
this.persistentSettings = persistentSettings;
this.transientSettings = transientSettings;
}
public ClusterUpdateSettingsResponse(StreamInput in) throws IOException {
super(in);
transientSettings = Settings.readSettingsFromStream(in);
persistentSettings = Settings.readSettingsFromStream(in);
}
public Settings getTransientSettings() {
return transientSettings;
}
public Settings getPersistentSettings() {
return persistentSettings;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
Settings.writeSettingsToStream(out, transientSettings);
Settings.writeSettingsToStream(out, persistentSettings);
}
@Override
public boolean equals(Object o) {<FILL_FUNCTION_BODY>}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), transientSettings, persistentSettings);
}
}
|
if (super.equals(o)) {
ClusterUpdateSettingsResponse that = (ClusterUpdateSettingsResponse) o;
return Objects.equals(transientSettings, that.transientSettings) &&
Objects.equals(persistentSettings, that.persistentSettings);
}
return false;
| 303
| 75
| 378
|
<methods>public void <init>(boolean) ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public boolean equals(java.lang.Object) ,public int hashCode() ,public final boolean isAcknowledged() ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>protected final non-sealed boolean acknowledged
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java
|
CreateSnapshotResponse
|
toXContent
|
class CreateSnapshotResponse extends TransportResponse implements ToXContentObject {
@Nullable
private SnapshotInfo snapshotInfo;
CreateSnapshotResponse(@Nullable SnapshotInfo snapshotInfo) {
this.snapshotInfo = snapshotInfo;
}
CreateSnapshotResponse() {
}
/**
* Returns snapshot information if snapshot was completed by the time this method returned or null otherwise.
*
* @return snapshot information or null
*/
public SnapshotInfo getSnapshotInfo() {
return snapshotInfo;
}
public CreateSnapshotResponse(StreamInput in) throws IOException {
snapshotInfo = in.readOptionalWriteable(SnapshotInfo::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalWriteable(snapshotInfo);
}
/**
* Returns HTTP status
* <ul>
* <li>{@link RestStatus#ACCEPTED} if snapshot is still in progress</li>
* <li>{@link RestStatus#OK} if snapshot was successful or partially successful</li>
* <li>{@link RestStatus#INTERNAL_SERVER_ERROR} if snapshot failed completely</li>
* </ul>
*/
public RestStatus status() {
if (snapshotInfo == null) {
return RestStatus.ACCEPTED;
}
return snapshotInfo.status();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {<FILL_FUNCTION_BODY>}
@Override
public String toString() {
return "CreateSnapshotResponse{" +
"snapshotInfo=" + snapshotInfo +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CreateSnapshotResponse that = (CreateSnapshotResponse) o;
return Objects.equals(snapshotInfo, that.snapshotInfo);
}
@Override
public int hashCode() {
return Objects.hash(snapshotInfo);
}
}
|
builder.startObject();
if (snapshotInfo != null) {
builder.field("snapshot");
snapshotInfo.toXContent(builder, params);
} else {
builder.field("accepted", true);
}
builder.endObject();
return builder;
| 538
| 75
| 613
|
<methods>public non-sealed void <init>() <variables>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java
|
DeleteSnapshotRequest
|
writeTo
|
class DeleteSnapshotRequest extends MasterNodeRequest<DeleteSnapshotRequest> {
private String repository;
private String[] snapshots;
/**
* Constructs a new delete snapshots request
*/
public DeleteSnapshotRequest() {
}
/**
* Constructs a new delete snapshots request with repository and snapshot names
*
* @param repository repository name
* @param snapshots snapshot names
*/
public DeleteSnapshotRequest(String repository, String... snapshots) {
this.repository = repository;
this.snapshots = snapshots;
}
public DeleteSnapshotRequest(StreamInput in) throws IOException {
super(in);
repository = in.readString();
if (in.getVersion().onOrAfter(SnapshotsService.MULTI_DELETE_VERSION)) {
snapshots = in.readStringArray();
} else {
snapshots = new String[] {in.readString()};
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {<FILL_FUNCTION_BODY>}
public DeleteSnapshotRequest repository(String repository) {
this.repository = repository;
return this;
}
/**
* Returns repository name
*
* @return repository name
*/
public String repository() {
return this.repository;
}
/**
* Returns snapshot names
*
* @return snapshot names
*/
public String[] snapshots() {
return this.snapshots;
}
/**
* Sets snapshot names
*
* @return this request
*/
public DeleteSnapshotRequest snapshots(String... snapshots) {
this.snapshots = snapshots;
return this;
}
}
|
super.writeTo(out);
out.writeString(repository);
if (out.getVersion().onOrAfter(SnapshotsService.MULTI_DELETE_VERSION)) {
out.writeStringArray(snapshots);
} else {
if (snapshots.length != 1) {
throw new IllegalArgumentException(
"Can't write snapshot delete with more than one snapshot to version [" + out.getVersion() + "]");
}
out.writeString(snapshots[0]);
}
| 436
| 129
| 565
|
<methods>public final org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest masterNodeTimeout(io.crate.common.unit.TimeValue) ,public final org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest masterNodeTimeout(java.lang.String) ,public final io.crate.common.unit.TimeValue masterNodeTimeout() ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>public static final io.crate.common.unit.TimeValue DEFAULT_MASTER_NODE_TIMEOUT,protected io.crate.common.unit.TimeValue masterNodeTimeout
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java
|
ClusterStateResponse
|
writeTo
|
class ClusterStateResponse extends TransportResponse {
private final ClusterName clusterName;
private final ClusterState clusterState;
private boolean waitForTimedOut = false;
public ClusterStateResponse(ClusterName clusterName, ClusterState clusterState, boolean waitForTimedOut) {
this.clusterName = clusterName;
this.clusterState = clusterState;
this.waitForTimedOut = waitForTimedOut;
}
/**
* The requested cluster state. Only the parts of the cluster state that were
* requested are included in the returned {@link ClusterState} instance.
*/
public ClusterState getState() {
return this.clusterState;
}
/**
* The name of the cluster.
*/
public ClusterName getClusterName() {
return this.clusterName;
}
/**
* Returns whether the request timed out waiting for a cluster state with a metadata version equal or
* higher than the specified metadata.
*/
public boolean isWaitForTimedOut() {
return waitForTimedOut;
}
public ClusterStateResponse(StreamInput in) throws IOException {
clusterName = new ClusterName(in);
if (in.getVersion().onOrAfter(Version.V_4_4_0)) {
clusterState = in.readOptionalWriteable(innerIn -> ClusterState.readFrom(innerIn, null));
waitForTimedOut = in.readBoolean();
} else {
clusterState = ClusterState.readFrom(in, null);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {<FILL_FUNCTION_BODY>}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ClusterStateResponse response = (ClusterStateResponse) o;
return waitForTimedOut == response.waitForTimedOut &&
Objects.equals(clusterName, response.clusterName) &&
// Best effort. Only compare cluster state version and master node id,
// because cluster state doesn't implement equals()
Objects.equals(getVersion(clusterState), getVersion(response.clusterState)) &&
Objects.equals(getMasterNodeId(clusterState), getMasterNodeId(response.clusterState));
}
@Override
public int hashCode() {
// Best effort. Only use cluster state version and master node id,
// because cluster state doesn't implement hashcode()
return Objects.hash(
clusterName,
getVersion(clusterState),
getMasterNodeId(clusterState),
waitForTimedOut
);
}
private static String getMasterNodeId(ClusterState clusterState) {
if (clusterState == null) {
return null;
}
DiscoveryNodes nodes = clusterState.nodes();
if (nodes != null) {
return nodes.getMasterNodeId();
} else {
return null;
}
}
private static Long getVersion(ClusterState clusterState) {
if (clusterState != null) {
return clusterState.version();
} else {
return null;
}
}
}
|
clusterName.writeTo(out);
if (out.getVersion().onOrAfter(Version.V_4_4_0)) {
out.writeOptionalWriteable(clusterState);
out.writeBoolean(waitForTimedOut);
} else {
clusterState.writeTo(out);
}
| 819
| 80
| 899
|
<methods>public non-sealed void <init>() <variables>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreatePartitionsRequest.java
|
CreatePartitionsRequest
|
writeTo
|
class CreatePartitionsRequest extends AcknowledgedRequest<CreatePartitionsRequest> {
private final Collection<String> indices;
/**
* Constructs a new request to create indices with the specified names.
*/
public CreatePartitionsRequest(Collection<String> indices) {
this.indices = indices;
}
public Collection<String> indices() {
return indices;
}
public CreatePartitionsRequest(StreamInput in) throws IOException {
super(in);
if (in.getVersion().before(Version.V_5_3_0)) {
// The only usage of jobId was removed in https://github.com/crate/crate/commit/31e0f7f447eaa006e756c20bd32346b2680ebee6
// Nodes < 5.3.0 still send UUID which is written as 2 longs, we consume them but don't create an UUID out of them.
in.readLong();
in.readLong();
}
int numIndices = in.readVInt();
List<String> indicesList = new ArrayList<>(numIndices);
for (int i = 0; i < numIndices; i++) {
indicesList.add(in.readString());
}
this.indices = indicesList;
}
@Override
public void writeTo(StreamOutput out) throws IOException {<FILL_FUNCTION_BODY>}
}
|
super.writeTo(out);
if (out.getVersion().before(Version.V_5_3_0)) {
// Nodes < 5.3.0 still expect 2 longs.
// They are used to construct an UUID but last time they were actually used in CrateDB 0.55.0.
// Hence, sending dummy values.
out.writeLong(0L);
out.writeLong(0L);
}
out.writeVInt(indices.size());
for (String index : indices) {
out.writeString(index);
}
| 370
| 150
| 520
|
<methods>public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public io.crate.common.unit.TimeValue ackTimeout() ,public final org.elasticsearch.action.admin.indices.create.CreatePartitionsRequest timeout(java.lang.String) ,public final org.elasticsearch.action.admin.indices.create.CreatePartitionsRequest timeout(io.crate.common.unit.TimeValue) ,public final io.crate.common.unit.TimeValue timeout() ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>public static final io.crate.common.unit.TimeValue DEFAULT_ACK_TIMEOUT,protected io.crate.common.unit.TimeValue timeout
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java
|
FlushRequest
|
toString
|
class FlushRequest extends BroadcastRequest<FlushRequest> {
private boolean force = false;
private boolean waitIfOngoing = true;
/**
* Constructs a new flush request against one or more indices. If nothing is provided, all indices will
* be flushed.
*/
public FlushRequest(String... indices) {
super(indices);
}
/**
* Returns {@code true} iff a flush should block
* if a another flush operation is already running. Otherwise {@code false}
*/
public boolean waitIfOngoing() {
return this.waitIfOngoing;
}
/**
* if set to {@code true} the flush will block
* if a another flush operation is already running until the flush can be performed.
* The default is <code>true</code>
*/
public FlushRequest waitIfOngoing(boolean waitIfOngoing) {
this.waitIfOngoing = waitIfOngoing;
return this;
}
/**
* Force flushing, even if one is possibly not needed.
*/
public boolean force() {
return force;
}
/**
* Force flushing, even if one is possibly not needed.
*/
public FlushRequest force(boolean force) {
this.force = force;
return this;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(force);
out.writeBoolean(waitIfOngoing);
}
public FlushRequest(StreamInput in) throws IOException {
super(in);
force = in.readBoolean();
waitIfOngoing = in.readBoolean();
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
}
|
return "FlushRequest{" +
"waitIfOngoing=" + waitIfOngoing +
", force=" + force + "}";
| 477
| 43
| 520
|
<methods>public void <init>() ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public java.lang.String[] indices() ,public final transient org.elasticsearch.action.admin.indices.flush.FlushRequest indices(java.lang.String[]) ,public org.elasticsearch.action.support.IndicesOptions indicesOptions() ,public final org.elasticsearch.action.admin.indices.flush.FlushRequest indicesOptions(org.elasticsearch.action.support.IndicesOptions) ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>protected java.lang.String[] indices,private org.elasticsearch.action.support.IndicesOptions indicesOptions
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java
|
UpdateSettingsRequest
|
fromXContent
|
class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsRequest>
implements IndicesRequest.Replaceable, ToXContentObject {
private String[] indices;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true);
private Settings settings = EMPTY_SETTINGS;
private boolean preserveExisting = false;
/**
* Constructs a new request to update settings for one or more indices
*/
public UpdateSettingsRequest(String... indices) {
this.indices = indices;
}
/**
* Constructs a new request to update settings for one or more indices
*/
public UpdateSettingsRequest(Settings settings, String... indices) {
this.indices = indices;
this.settings = settings;
}
@Override
public String[] indices() {
return indices;
}
public Settings settings() {
return settings;
}
/**
* Sets the indices to apply to settings update to
*/
@Override
public UpdateSettingsRequest indices(String... indices) {
this.indices = indices;
return this;
}
@Override
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public UpdateSettingsRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
/**
* Sets the settings to be updated
*/
public UpdateSettingsRequest settings(Settings settings) {
this.settings = settings;
return this;
}
/**
* Sets the settings to be updated
*/
public UpdateSettingsRequest settings(Settings.Builder settings) {
this.settings = settings.build();
return this;
}
/**
* Sets the settings to be updated (either json or yaml format)
*/
public UpdateSettingsRequest settings(String source, XContentType xContentType) {
this.settings = Settings.builder().loadFromSource(source, xContentType).build();
return this;
}
/**
* Returns <code>true</code> iff the settings update should only add but not update settings. If the setting already exists
* it should not be overwritten by this update. The default is <code>false</code>
*/
public boolean isPreserveExisting() {
return preserveExisting;
}
/**
* Iff set to <code>true</code> this settings update will only add settings not already set on an index. Existing settings remain
* unchanged.
*/
public UpdateSettingsRequest setPreserveExisting(boolean preserveExisting) {
this.preserveExisting = preserveExisting;
return this;
}
/**
* Sets the settings to be updated (either json or yaml format)
*/
@SuppressWarnings("unchecked")
public UpdateSettingsRequest settings(Map source) {
try {
XContentBuilder builder = JsonXContent.builder();
builder.map(source);
settings(Strings.toString(builder), builder.contentType());
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
}
return this;
}
public UpdateSettingsRequest(StreamInput in) throws IOException {
super(in);
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
settings = readSettingsFromStream(in);
preserveExisting = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArrayNullable(indices);
indicesOptions.writeIndicesOptions(out);
writeSettingsToStream(out, settings);
out.writeBoolean(preserveExisting);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
settings.toXContent(builder, params);
builder.endObject();
return builder;
}
public UpdateSettingsRequest fromXContent(XContentParser parser) throws IOException {<FILL_FUNCTION_BODY>}
@Override
public String toString() {
return "indices : " + Arrays.toString(indices) + "," + Strings.toString(this);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
UpdateSettingsRequest that = (UpdateSettingsRequest) o;
return masterNodeTimeout.equals(that.masterNodeTimeout)
&& timeout.equals(that.timeout)
&& Objects.equals(settings, that.settings)
&& Objects.equals(indicesOptions, that.indicesOptions)
&& Objects.equals(preserveExisting, that.preserveExisting)
&& Arrays.equals(indices, that.indices);
}
@Override
public int hashCode() {
return Objects.hash(masterNodeTimeout, timeout, settings, indicesOptions, preserveExisting, Arrays.hashCode(indices));
}
}
|
Map<String, Object> settings = new HashMap<>();
Map<String, Object> bodySettings = parser.map();
Object innerBodySettings = bodySettings.get("settings");
// clean up in case the body is wrapped with "settings" : { ... }
if (innerBodySettings instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> innerBodySettingsMap = (Map<String, Object>) innerBodySettings;
settings.putAll(innerBodySettingsMap);
} else {
settings.putAll(bodySettings);
}
return this.settings(settings);
| 1,312
| 153
| 1,465
|
<methods>public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public io.crate.common.unit.TimeValue ackTimeout() ,public final org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest timeout(java.lang.String) ,public final org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest timeout(io.crate.common.unit.TimeValue) ,public final io.crate.common.unit.TimeValue timeout() ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>public static final io.crate.common.unit.TimeValue DEFAULT_ACK_TIMEOUT,protected io.crate.common.unit.TimeValue timeout
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java
|
TransportResizeAction
|
prepareCreateIndexRequest
|
class TransportResizeAction extends TransportMasterNodeAction<ResizeRequest, ResizeResponse> {
private final MetadataCreateIndexService createIndexService;
private final Client client;
private final NodeContext nodeContext;
@Inject
public TransportResizeAction(TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool,
MetadataCreateIndexService createIndexService,
NodeContext nodeContext,
Client client) {
this(
ResizeAction.NAME,
transportService,
clusterService,
threadPool,
createIndexService,
nodeContext,
client
);
}
protected TransportResizeAction(String actionName,
TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool,
MetadataCreateIndexService createIndexService,
NodeContext nodeContext,
Client client) {
super(actionName, transportService, clusterService, threadPool, ResizeRequest::new);
this.createIndexService = createIndexService;
this.client = client;
this.nodeContext = nodeContext;
}
@Override
protected String executor() {
// we go async right away
return ThreadPool.Names.SAME;
}
@Override
protected ResizeResponse read(StreamInput in) throws IOException {
return new ResizeResponse(in);
}
@Override
protected ClusterBlockException checkBlock(ResizeRequest request, ClusterState state) {
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.getTargetIndexRequest().index());
}
@Override
protected void masterOperation(final ResizeRequest resizeRequest,
final ClusterState state,
final ActionListener<ResizeResponse> listener) {
// there is no need to fetch docs stats for split but we keep it simple and do it anyway for simplicity of the code
final String sourceIndex = resizeRequest.getSourceIndex();
final String targetIndex = resizeRequest.getTargetIndexRequest().index();
client.admin().indices().stats(new IndicesStatsRequest()
.clear()
.docs(true))
.whenComplete(ActionListener.delegateFailure(
listener,
(delegate, indicesStatsResponse) -> {
CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(
resizeRequest,
state,
i -> {
IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i);
return shard == null ? null : shard.getPrimary().getDocs();
},
sourceIndex,
targetIndex
);
createIndexService.createIndex(
nodeContext,
updateRequest,
null,
delegate.map(
response -> new ResizeResponse(
response.isAcknowledged(),
response.isShardsAcknowledged(),
updateRequest.index()
)
)
);
}
));
}
// static for unittesting this method
static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final ResizeRequest resizeRequest,
final ClusterState state,
final IntFunction<DocsStats> perShardDocStats,
String sourceIndexName,
String targetIndexName) {<FILL_FUNCTION_BODY>}
}
|
final CreateIndexRequest targetIndex = resizeRequest.getTargetIndexRequest();
final IndexMetadata metadata = state.metadata().index(sourceIndexName);
if (metadata == null) {
throw new IndexNotFoundException(sourceIndexName);
}
final Settings.Builder targetIndexSettingsBuilder = Settings.builder().put(targetIndex.settings())
.normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX);
targetIndexSettingsBuilder.remove(IndexMetadata.SETTING_HISTORY_UUID);
final Settings targetIndexSettings = targetIndexSettingsBuilder.build();
final int numShards;
if (IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {
numShards = IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings);
} else {
assert resizeRequest.getResizeType() == ResizeType.SHRINK : "split must specify the number of shards explicitly";
numShards = 1;
}
for (int i = 0; i < numShards; i++) {
if (resizeRequest.getResizeType() == ResizeType.SHRINK) {
Set<ShardId> shardIds = IndexMetadata.selectShrinkShards(i, metadata, numShards);
long count = 0;
for (ShardId id : shardIds) {
DocsStats docsStats = perShardDocStats.apply(id.id());
if (docsStats != null) {
count += docsStats.getCount();
}
if (count > IndexWriter.MAX_DOCS) {
throw new IllegalStateException("Can't merge index with more than [" + IndexWriter.MAX_DOCS
+ "] docs - too many documents in shards " + shardIds);
}
}
} else {
Objects.requireNonNull(IndexMetadata.selectSplitShard(i, metadata, numShards));
// we just execute this to ensure we get the right exceptions if the number of shards is wrong or less then etc.
}
}
if (IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING.exists(targetIndexSettings)) {
throw new IllegalArgumentException("cannot provide a routing partition size value when resizing an index");
}
if (IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists(targetIndexSettings)) {
throw new IllegalArgumentException("cannot provide index.number_of_routing_shards on resize");
}
if (IndexSettings.INDEX_SOFT_DELETES_SETTING.exists(metadata.getSettings()) &&
IndexSettings.INDEX_SOFT_DELETES_SETTING.get(metadata.getSettings()) &&
IndexSettings.INDEX_SOFT_DELETES_SETTING.exists(targetIndexSettings) &&
IndexSettings.INDEX_SOFT_DELETES_SETTING.get(targetIndexSettings) == false) {
throw new IllegalArgumentException("Can't disable [index.soft_deletes.enabled] setting on resize");
}
String cause = resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT) + "_index";
targetIndex.cause(cause);
Settings.Builder settingsBuilder = Settings.builder().put(targetIndexSettings);
settingsBuilder.put("index.number_of_shards", numShards);
targetIndex.settings(settingsBuilder);
return new CreateIndexClusterStateUpdateRequest(cause, targetIndex.index(), targetIndexName)
// mappings are updated on the node when creating in the shards, this prevents race-conditions since all mapping must be
// applied once we took the snapshot and if somebody messes things up and switches the index read/write and adds docs we
// miss the mappings for everything is corrupted and hard to debug
.ackTimeout(targetIndex.timeout())
.masterNodeTimeout(targetIndex.masterNodeTimeout())
.settings(targetIndex.settings())
.aliases(targetIndex.aliases())
.waitForActiveShards(targetIndex.waitForActiveShards())
.recoverFrom(metadata.getIndex())
.resizeType(resizeRequest.getResizeType())
.copySettings(resizeRequest.getCopySettings() == null ? false : resizeRequest.getCopySettings());
| 832
| 1,066
| 1,898
|
<methods><variables>protected final non-sealed org.elasticsearch.cluster.service.ClusterService clusterService,private final non-sealed java.lang.String executor,protected final non-sealed org.elasticsearch.threadpool.ThreadPool threadPool,protected final non-sealed org.elasticsearch.transport.TransportService transportService
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java
|
IndexStats
|
getIndexShards
|
class IndexStats implements Iterable<IndexShardStats> {
private final String index;
private final String uuid;
private final ShardStats[] shards;
public IndexStats(String index, String uuid, ShardStats[] shards) {
this.index = index;
this.uuid = uuid;
this.shards = shards;
}
public String getIndex() {
return this.index;
}
public String getUuid() {
return uuid;
}
public ShardStats[] getShards() {
return this.shards;
}
private Map<Integer, IndexShardStats> indexShards;
public Map<Integer, IndexShardStats> getIndexShards() {<FILL_FUNCTION_BODY>}
@Override
public Iterator<IndexShardStats> iterator() {
return getIndexShards().values().iterator();
}
private CommonStats total = null;
public CommonStats getTotal() {
if (total != null) {
return total;
}
CommonStats stats = new CommonStats();
for (ShardStats shard : shards) {
stats.add(shard.getStats());
}
total = stats;
return stats;
}
private CommonStats primary = null;
public CommonStats getPrimaries() {
if (primary != null) {
return primary;
}
CommonStats stats = new CommonStats();
for (ShardStats shard : shards) {
if (shard.getShardRouting().primary()) {
stats.add(shard.getStats());
}
}
primary = stats;
return stats;
}
}
|
if (indexShards != null) {
return indexShards;
}
Map<Integer, List<ShardStats>> tmpIndexShards = new HashMap<>();
for (ShardStats shard : shards) {
List<ShardStats> lst = tmpIndexShards.get(shard.getShardRouting().id());
if (lst == null) {
lst = new ArrayList<>();
tmpIndexShards.put(shard.getShardRouting().id(), lst);
}
lst.add(shard);
}
indexShards = new HashMap<>();
for (Map.Entry<Integer, List<ShardStats>> entry : tmpIndexShards.entrySet()) {
indexShards.put(entry.getKey(), new IndexShardStats(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardStats[entry.getValue().size()])));
}
return indexShards;
| 444
| 253
| 697
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java
|
IndicesStatsResponse
|
getIndices
|
class IndicesStatsResponse extends BroadcastResponse {
private final ShardStats[] shards;
IndicesStatsResponse(ShardStats[] shards, int totalShards, int successfulShards, int failedShards,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.shards = shards;
}
public ShardStats[] getShards() {
return this.shards;
}
public IndexStats getIndex(String index) {
return getIndices().get(index);
}
private Map<String, IndexStats> indicesStats;
public Map<String, IndexStats> getIndices() {<FILL_FUNCTION_BODY>}
public IndicesStatsResponse(StreamInput in) throws IOException {
super(in);
shards = in.readArray(ShardStats::new, ShardStats[]::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeArray(shards);
}
}
|
if (indicesStats != null) {
return indicesStats;
}
Map<String, IndexStats> indicesStats = new HashMap<>();
Set<Index> indices = new HashSet<>();
for (ShardStats shard : shards) {
indices.add(shard.getShardRouting().index());
}
for (Index index : indices) {
List<ShardStats> shards = new ArrayList<>();
String indexName = index.getName();
for (ShardStats shard : this.shards) {
if (shard.getShardRouting().getIndexName().equals(indexName)) {
shards.add(shard);
}
}
indicesStats.put(
indexName, new IndexStats(indexName, index.getUUID(), shards.toArray(new ShardStats[0]))
);
}
this.indicesStats = indicesStats;
return indicesStats;
| 288
| 240
| 528
|
<methods>public void <init>(int, int, int, List<org.elasticsearch.action.support.DefaultShardOperationFailedException>) ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public int getFailedShards() ,public org.elasticsearch.action.support.DefaultShardOperationFailedException[] getShardFailures() ,public org.elasticsearch.rest.RestStatus getStatus() ,public int getSuccessfulShards() ,public int getTotalShards() ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>public static final org.elasticsearch.action.support.DefaultShardOperationFailedException[] EMPTY,private final non-sealed int failedShards,private final non-sealed org.elasticsearch.action.support.DefaultShardOperationFailedException[] shardFailures,private final non-sealed int successfulShards,private final non-sealed int totalShards
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java
|
ExponentialBackoffIterator
|
next
|
class ExponentialBackoffIterator implements Iterator<TimeValue> {
private final int numberOfElements;
private final int start;
private int currentlyConsumed;
private ExponentialBackoffIterator(int start, int numberOfElements) {
this.start = start;
this.numberOfElements = numberOfElements;
}
@Override
public boolean hasNext() {
return currentlyConsumed < numberOfElements;
}
@Override
public TimeValue next() {<FILL_FUNCTION_BODY>}
}
|
if (!hasNext()) {
throw new NoSuchElementException("Only up to " + numberOfElements + " elements");
}
int result = start + 10 * ((int) Math.exp(0.8d * (currentlyConsumed)) - 1);
currentlyConsumed++;
return TimeValue.timeValueMillis(result);
| 140
| 89
| 229
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java
|
ActiveShardsObserver
|
waitForActiveShards
|
class ActiveShardsObserver {
private static final Logger LOGGER = LogManager.getLogger(ActiveShardsObserver.class);
private final ClusterService clusterService;
public ActiveShardsObserver(final ClusterService clusterService) {
this.clusterService = clusterService;
}
/**
* Waits on the specified number of active shards to be started before executing the
*
* @param indexNames the indices to wait for active shards on
* @param activeShardCount the number of active shards to wait on before returning
* @param timeout the timeout value
* @param onResult a function that is executed in response to the requisite shards becoming active or a timeout (whichever comes first)
* @param onFailure a function that is executed in response to an error occurring during waiting for the active shards
*/
public void waitForActiveShards(final String[] indexNames,
final ActiveShardCount activeShardCount,
final TimeValue timeout,
final Consumer<Boolean> onResult,
final Consumer<Exception> onFailure) {<FILL_FUNCTION_BODY>}
}
|
// wait for the configured number of active shards to be allocated before executing the result consumer
if (activeShardCount == ActiveShardCount.NONE) {
// not waiting, so just run whatever we were to run when the waiting is
onResult.accept(true);
return;
}
final ClusterState state = clusterService.state();
final ClusterStateObserver observer = new ClusterStateObserver(state, clusterService, null, LOGGER);
if (activeShardCount.enoughShardsActive(state, indexNames)) {
onResult.accept(true);
} else {
final Predicate<ClusterState> shardsAllocatedPredicate = newState -> activeShardCount.enoughShardsActive(newState, indexNames);
final ClusterStateObserver.Listener observerListener = new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
onResult.accept(true);
}
@Override
public void onClusterServiceClose() {
LOGGER.debug("[{}] cluster service closed while waiting for enough shards to be started.", Arrays.toString(indexNames));
onFailure.accept(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
onResult.accept(false);
}
};
observer.waitForNextChange(observerListener, shardsAllocatedPredicate, timeout);
}
| 277
| 363
| 640
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java
|
ChannelActionListener
|
onFailure
|
class ChannelActionListener<
Response extends TransportResponse, Request extends TransportRequest> implements ActionListener<Response> {
private static final Logger LOGGER = LogManager.getLogger(ChannelActionListener.class);
private final TransportChannel channel;
private final Request request;
private final String actionName;
public ChannelActionListener(TransportChannel channel, String actionName, Request request) {
this.channel = channel;
this.request = request;
this.actionName = actionName;
}
@Override
public void onResponse(Response response) {
try {
channel.sendResponse(response);
} catch (Exception e) {
onFailure(e);
}
}
@Override
public void onFailure(Exception e) {<FILL_FUNCTION_BODY>}
}
|
try {
channel.sendResponse(e);
} catch (Exception e1) {
e1.addSuppressed(e);
LOGGER.warn(() -> new ParameterizedMessage(
"Failed to send error response for action [{}] and request [{}]", actionName, request), e1);
}
| 201
| 82
| 283
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java
|
DefaultShardOperationFailedException
|
toString
|
class DefaultShardOperationFailedException extends ShardOperationFailedException {
private static final String INDEX = "index";
private static final String SHARD_ID = "shard";
private static final String REASON = "reason";
private static final ConstructingObjectParser<DefaultShardOperationFailedException, Void> PARSER = new ConstructingObjectParser<>(
"failures", true, arg -> new DefaultShardOperationFailedException((String) arg[0], (int) arg[1] ,(Throwable) arg[2]));
static {
PARSER.declareString(constructorArg(), new ParseField(INDEX));
PARSER.declareInt(constructorArg(), new ParseField(SHARD_ID));
PARSER.declareObject(constructorArg(), (p, c) -> ElasticsearchException.fromXContent(p), new ParseField(REASON));
}
public DefaultShardOperationFailedException(ElasticsearchException e) {
super(
e.getIndex() == null ? null : e.getIndex().getName(),
e.getShardId() == null ? -1 : e.getShardId().id(),
Exceptions.stackTrace(e),
e.status(),
e
);
}
public DefaultShardOperationFailedException(String index, int shardId, Throwable cause) {
super(index, shardId, Exceptions.stackTrace(cause), SQLExceptions.status(cause), cause);
}
public DefaultShardOperationFailedException(StreamInput in) throws IOException {
index = in.readOptionalString();
shardId = in.readVInt();
cause = in.readException();
status = RestStatus.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(index);
out.writeVInt(shardId);
out.writeException(cause);
RestStatus.writeTo(out, status);
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
innerToXContent(builder, params);
builder.endObject();
return builder;
}
protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("shard", shardId());
builder.field("index", index());
builder.field("status", status.name());
if (reason != null) {
builder.startObject("reason");
ElasticsearchException.generateThrowableXContent(builder, params, cause);
builder.endObject();
}
return builder;
}
public static DefaultShardOperationFailedException fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
}
|
return "[" + index + "][" + shardId + "] failed, reason [" + reason() + "]";
| 726
| 32
| 758
|
<methods>public final java.lang.Throwable getCause() ,public final java.lang.String index() ,public final java.lang.String reason() ,public final int shardId() ,public final org.elasticsearch.rest.RestStatus status() <variables>protected java.lang.Throwable cause,protected java.lang.String index,protected java.lang.String reason,protected int shardId,protected org.elasticsearch.rest.RestStatus status
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java
|
DestructiveOperations
|
failDestructive
|
class DestructiveOperations {
/**
* Setting which controls whether wildcard usage (*, prefix*, _all) is allowed.
*/
public static final Setting<Boolean> REQUIRES_NAME_SETTING =
Setting.boolSetting("action.destructive_requires_name", false, Property.Dynamic, Property.NodeScope);
private volatile boolean destructiveRequiresName;
public DestructiveOperations(Settings settings, ClusterSettings clusterSettings) {
destructiveRequiresName = REQUIRES_NAME_SETTING.get(settings);
clusterSettings.addSettingsUpdateConsumer(REQUIRES_NAME_SETTING, this::setDestructiveRequiresName);
}
private void setDestructiveRequiresName(boolean destructiveRequiresName) {
this.destructiveRequiresName = destructiveRequiresName;
}
/**
* Fail if there is wildcard usage in indices and the named is required for destructive operations.
*/
public void failDestructive(String[] aliasesOrIndices) {<FILL_FUNCTION_BODY>}
private static boolean hasWildcardUsage(String aliasOrIndex) {
return "_all".equals(aliasOrIndex) || aliasOrIndex.indexOf('*') != -1;
}
}
|
if (!destructiveRequiresName) {
return;
}
if (aliasesOrIndices == null || aliasesOrIndices.length == 0) {
throw new IllegalArgumentException("Wildcard expressions or all indices are not allowed");
} else if (aliasesOrIndices.length == 1) {
if (hasWildcardUsage(aliasesOrIndices[0])) {
throw new IllegalArgumentException("Wildcard expressions or all indices are not allowed");
}
} else {
for (String aliasesOrIndex : aliasesOrIndices) {
if (hasWildcardUsage(aliasesOrIndex)) {
throw new IllegalArgumentException("Wildcard expressions or all indices are not allowed");
}
}
}
| 320
| 186
| 506
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/support/TransportAction.java
|
TransportAction
|
execute
|
class TransportAction<Request extends TransportRequest, Response extends TransportResponse> {
protected final String actionName;
protected TransportAction(String actionName) {
this.actionName = actionName;
}
public final CompletableFuture<Response> execute(Request request) {
return execute(request, x -> x);
}
public final <T> CompletableFuture<T> execute(Request request, Function<? super Response, ? extends T> mapper) {<FILL_FUNCTION_BODY>}
protected abstract void doExecute(Request request, ActionListener<Response> listener);
}
|
FutureActionListener<Response> listener = new FutureActionListener<>();
try {
doExecute(request, listener);
} catch (Exception e) {
listener.onFailure(e);
}
return listener.thenApply(mapper);
| 148
| 64
| 212
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java
|
AcknowledgedRequest
|
timeout
|
class AcknowledgedRequest<Request extends MasterNodeRequest<Request>> extends MasterNodeRequest<Request>
implements AckedRequest {
public static final TimeValue DEFAULT_ACK_TIMEOUT = timeValueSeconds(30);
protected TimeValue timeout = DEFAULT_ACK_TIMEOUT;
protected AcknowledgedRequest() {
}
/**
* Allows to set the timeout
* @param timeout timeout as a string (e.g. 1s)
* @return the request itself
*/
@SuppressWarnings("unchecked")
public final Request timeout(String timeout) {<FILL_FUNCTION_BODY>}
/**
* Allows to set the timeout
* @param timeout timeout as a {@link TimeValue}
* @return the request itself
*/
@SuppressWarnings("unchecked")
public final Request timeout(TimeValue timeout) {
this.timeout = timeout;
return (Request) this;
}
/**
* Returns the current timeout
* @return the current timeout as a {@link TimeValue}
*/
public final TimeValue timeout() {
return timeout;
}
@Override
public TimeValue ackTimeout() {
return timeout;
}
public AcknowledgedRequest(StreamInput in) throws IOException {
super(in);
timeout = in.readTimeValue();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeTimeValue(timeout);
}
}
|
this.timeout = TimeValue.parseTimeValue(timeout, this.timeout, getClass().getSimpleName() + ".timeout");
return (Request)this;
| 389
| 41
| 430
|
<methods>public final Request masterNodeTimeout(io.crate.common.unit.TimeValue) ,public final Request masterNodeTimeout(java.lang.String) ,public final io.crate.common.unit.TimeValue masterNodeTimeout() ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>public static final io.crate.common.unit.TimeValue DEFAULT_MASTER_NODE_TIMEOUT,protected io.crate.common.unit.TimeValue masterNodeTimeout
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/support/master/ShardsAcknowledgedResponse.java
|
ShardsAcknowledgedResponse
|
equals
|
class ShardsAcknowledgedResponse extends AcknowledgedResponse {
private final boolean shardsAcknowledged;
protected ShardsAcknowledgedResponse(boolean acknowledged, boolean shardsAcknowledged) {
super(acknowledged);
assert acknowledged || shardsAcknowledged == false; // if it's not acknowledged, then shards acked should be false too
this.shardsAcknowledged = shardsAcknowledged;
}
/**
* Returns true if the requisite number of shards were started before
* returning from the index creation operation. If {@link #isAcknowledged()}
* is false, then this also returns false.
*/
public boolean isShardsAcknowledged() {
return shardsAcknowledged;
}
protected ShardsAcknowledgedResponse(StreamInput in) throws IOException {
super(in);
shardsAcknowledged = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(shardsAcknowledged);
}
@Override
public boolean equals(Object o) {<FILL_FUNCTION_BODY>}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), shardsAcknowledged);
}
}
|
if (super.equals(o)) {
ShardsAcknowledgedResponse that = (ShardsAcknowledgedResponse) o;
return shardsAcknowledged == that.shardsAcknowledged;
}
return false;
| 349
| 63
| 412
|
<methods>public void <init>(boolean) ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public boolean equals(java.lang.Object) ,public int hashCode() ,public final boolean isAcknowledged() ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>protected final non-sealed boolean acknowledged
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardRequest.java
|
SingleShardRequest
|
validateNonNullIndex
|
class SingleShardRequest<Request extends SingleShardRequest<Request>> extends TransportRequest implements IndicesRequest {
public static final IndicesOptions INDICES_OPTIONS = IndicesOptions.strictSingleIndexNoExpandForbidClosed();
/**
* The concrete index name
*
* Whether index property is optional depends on the concrete implementation. If index property is required the
* concrete implementation should use {@link #validateNonNullIndex()} to check if the index property has been set
*/
@Nullable
protected String index;
ShardId internalShardId;
public SingleShardRequest() {
}
public SingleShardRequest(StreamInput in) throws IOException {
super(in);
if (in.readBoolean()) {
internalShardId = new ShardId(in);
}
index = in.readOptionalString();
// no need to pass threading over the network, they are always false when coming throw a thread pool
}
protected SingleShardRequest(String index) {
this.index = index;
}
protected void validateNonNullIndex() {<FILL_FUNCTION_BODY>}
/**
* @return The concrete index this request is targeted for or <code>null</code> if index is optional.
* Whether index property is optional depends on the concrete implementation. If index property
* is required the concrete implementation should use {@link #validateNonNullIndex()} to check
* if the index property has been set
*/
@Nullable
public String index() {
return index;
}
/**
* Sets the index.
*/
@SuppressWarnings("unchecked")
public final Request index(String index) {
this.index = index;
return (Request) this;
}
@Override
public String[] indices() {
return new String[]{index};
}
@Override
public IndicesOptions indicesOptions() {
return INDICES_OPTIONS;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalWriteable(internalShardId);
out.writeOptionalString(index);
}
}
|
if (index == null) {
throw new IllegalArgumentException("index is missing");
}
| 553
| 26
| 579
|
<methods>public void <init>() ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public org.elasticsearch.tasks.TaskId getParentTask() ,public void setParentTask(org.elasticsearch.tasks.TaskId) ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>private org.elasticsearch.tasks.TaskId parentTaskId
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java
|
AsyncSingleAction
|
perform
|
class AsyncSingleAction {
private final ActionListener<Response> listener;
private final ShardsIterator shardIt;
private final InternalRequest internalRequest;
private final DiscoveryNodes nodes;
private volatile Exception lastFailure;
private AsyncSingleAction(Request request, ActionListener<Response> listener) {
this.listener = listener;
ClusterState clusterState = clusterService.state();
if (logger.isTraceEnabled()) {
logger.trace("executing [{}] based on cluster state version [{}]", request, clusterState.version());
}
nodes = clusterState.nodes();
ClusterBlockException blockException = checkGlobalBlock(clusterState);
if (blockException != null) {
throw blockException;
}
String concreteSingleIndex;
if (resolveIndex(request)) {
concreteSingleIndex = IndexNameExpressionResolver.concreteSingleIndex(clusterState, request).getName();
} else {
concreteSingleIndex = request.index();
}
this.internalRequest = new InternalRequest(request, concreteSingleIndex);
resolveRequest(clusterState, internalRequest);
blockException = checkRequestBlock(clusterState, internalRequest);
if (blockException != null) {
throw blockException;
}
this.shardIt = shards(clusterState, internalRequest);
}
public void start() {
if (shardIt == null) {
// just execute it on the local node
final Writeable.Reader<Response> reader = getResponseReader();
transportService.sendRequest(
clusterService.localNode(),
transportShardAction,
internalRequest.request(),
new TransportResponseHandler<Response>() {
@Override
public Response read(StreamInput in) throws IOException {
return reader.read(in);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleResponse(final Response response) {
listener.onResponse(response);
}
@Override
public void handleException(TransportException exp) {
listener.onFailure(exp);
}
}
);
} else {
perform(null);
}
}
private void onFailure(ShardRouting shardRouting, Exception e) {
if (e != null) {
logger.trace(() -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting,
internalRequest.request()), e);
}
perform(e);
}
private void perform(@Nullable final Exception currentFailure) {<FILL_FUNCTION_BODY>}
}
|
Exception lastFailure = this.lastFailure;
if (lastFailure == null || TransportActions.isReadOverrideException(currentFailure)) {
lastFailure = currentFailure;
this.lastFailure = currentFailure;
}
final ShardRouting shardRouting = shardIt.nextOrNull();
if (shardRouting == null) {
Exception failure = lastFailure;
if (failure == null || isShardNotAvailableException(failure)) {
failure = new NoShardAvailableActionException(null,
LoggerMessageFormat.format(
"No shard available for [{}]",
internalRequest.request()), failure);
} else {
logger.debug(() -> new ParameterizedMessage("{}: failed to execute [{}]", null,
internalRequest.request()), failure);
}
listener.onFailure(failure);
return;
}
DiscoveryNode node = nodes.get(shardRouting.currentNodeId());
if (node == null) {
onFailure(shardRouting, new NoShardAvailableActionException(shardRouting.shardId()));
} else {
internalRequest.request().internalShardId = shardRouting.shardId();
if (logger.isTraceEnabled()) {
logger.trace(
"sending request [{}] to shard [{}] on node [{}]",
internalRequest.request(),
internalRequest.request().internalShardId,
node
);
}
final Writeable.Reader<Response> reader = getResponseReader();
transportService.sendRequest(
node,
transportShardAction,
internalRequest.request(),
new TransportResponseHandler<Response>() {
@Override
public Response read(StreamInput in) throws IOException {
return reader.read(in);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleResponse(final Response response) {
listener.onResponse(response);
}
@Override
public void handleException(TransportException exp) {
onFailure(shardRouting, exp);
}
}
);
}
| 662
| 551
| 1,213
|
<methods>public final CompletableFuture<Response> execute(Request) ,public final CompletableFuture<T> execute(Request, Function<? super Response,? extends T>) <variables>protected final non-sealed java.lang.String actionName
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java
|
ElasticsearchUncaughtExceptionHandler
|
halt
|
class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionHandler {
private static final Logger LOGGER = LogManager.getLogger(ElasticsearchUncaughtExceptionHandler.class);
@Override
public void uncaughtException(Thread t, Throwable e) {
if (isFatalUncaught(e)) {
try {
onFatalUncaught(t.getName(), e);
} finally {
// we use specific error codes in case the above notification failed, at least we
// will have some indication of the error bringing us down
if (e instanceof InternalError) {
halt(128);
} else if (e instanceof OutOfMemoryError) {
halt(127);
} else if (e instanceof StackOverflowError) {
halt(126);
} else if (e instanceof UnknownError) {
halt(125);
} else if (e instanceof IOError) {
halt(124);
} else {
halt(1);
}
}
} else {
onNonFatalUncaught(t.getName(), e);
}
}
static boolean isFatalUncaught(Throwable e) {
return e instanceof Error;
}
void onFatalUncaught(final String threadName, final Throwable t) {
LOGGER.error(() -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t);
}
void onNonFatalUncaught(final String threadName, final Throwable t) {
LOGGER.warn(() -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t);
}
@SuppressForbidden(reason = "halt")
void halt(int status) {<FILL_FUNCTION_BODY>}
}
|
// we halt to prevent shutdown hooks from running
Runtime.getRuntime().halt(status);
| 465
| 27
| 492
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/bootstrap/Natives.java
|
Natives
|
tryMlockall
|
class Natives {
/** no instantiation */
private Natives() {
}
private static final Logger LOGGER = LogManager.getLogger(Natives.class);
// marker to determine if the JNA class files are available to the JVM
static final boolean JNA_AVAILABLE;
static {
boolean v = false;
try {
// load one of the main JNA classes to see if the classes are available. this does not ensure that all native
// libraries are available, only the ones necessary by JNA to function
Class.forName("com.sun.jna.Native");
v = true;
} catch (ClassNotFoundException e) {
LOGGER.warn("JNA not found. native methods will be disabled.", e);
} catch (UnsatisfiedLinkError e) {
LOGGER.warn("unable to load JNA native support library, native methods will be disabled.", e);
}
JNA_AVAILABLE = v;
}
static void tryMlockall() {<FILL_FUNCTION_BODY>}
static boolean definitelyRunningAsRoot() {
if (!JNA_AVAILABLE) {
LOGGER.warn("cannot check if running as root because JNA is not available");
return false;
}
return JNANatives.definitelyRunningAsRoot();
}
static void tryVirtualLock() {
if (!JNA_AVAILABLE) {
LOGGER.warn("cannot virtual lock because JNA is not available");
return;
}
JNANatives.tryVirtualLock();
}
static void addConsoleCtrlHandler(ConsoleCtrlHandler handler) {
if (!JNA_AVAILABLE) {
LOGGER.warn("cannot register console handler because JNA is not available");
return;
}
JNANatives.addConsoleCtrlHandler(handler);
}
static boolean isMemoryLocked() {
if (!JNA_AVAILABLE) {
return false;
}
return JNANatives.LOCAL_MLOCKALL;
}
static void trySetMaxNumberOfThreads() {
if (!JNA_AVAILABLE) {
LOGGER.warn("cannot getrlimit RLIMIT_NPROC because JNA is not available");
return;
}
JNANatives.trySetMaxNumberOfThreads();
}
static void trySetMaxSizeVirtualMemory() {
if (!JNA_AVAILABLE) {
LOGGER.warn("cannot getrlimit RLIMIT_AS because JNA is not available");
return;
}
JNANatives.trySetMaxSizeVirtualMemory();
}
static void trySetMaxFileSize() {
if (!JNA_AVAILABLE) {
LOGGER.warn("cannot getrlimit RLIMIT_FSIZE because JNA is not available");
return;
}
JNANatives.trySetMaxFileSize();
}
}
|
if (!JNA_AVAILABLE) {
LOGGER.warn("cannot mlockall because JNA is not available");
return;
}
JNANatives.tryMlockall();
| 742
| 53
| 795
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/client/node/NodeClient.java
|
NodeClient
|
transportAction
|
class NodeClient extends AbstractClient {
@SuppressWarnings("rawtypes")
private Map<ActionType, TransportAction> actions;
public NodeClient(Settings settings, ThreadPool threadPool) {
super(settings, threadPool);
}
@SuppressWarnings("rawtypes")
public void initialize(Map<ActionType, TransportAction> actions) {
this.actions = actions;
}
@Override
public void close() {
// nothing really to do
}
@Override
public <Req extends TransportRequest, Resp extends TransportResponse> CompletableFuture<Resp> execute(ActionType<Resp> action, Req request) {
return transportAction(action).execute(request);
}
/**
* Get the {@link TransportAction} for an {@link ActionType}, throwing exceptions if the action isn't available.
*/
@SuppressWarnings("unchecked")
private <Request extends TransportRequest,
Response extends TransportResponse> TransportAction<Request, Response> transportAction(ActionType<Response> action) {<FILL_FUNCTION_BODY>}
}
|
if (actions == null) {
throw new IllegalStateException("NodeClient has not been initialized");
}
TransportAction<Request, Response> transportAction = actions.get(action);
if (transportAction == null) {
throw new IllegalStateException("failed to find action [" + action + "] to execute");
}
return transportAction;
| 277
| 88
| 365
|
<methods>public void <init>(org.elasticsearch.common.settings.Settings, org.elasticsearch.threadpool.ThreadPool) ,public final org.elasticsearch.client.AdminClient admin() ,public final org.elasticsearch.common.settings.Settings settings() ,public final org.elasticsearch.threadpool.ThreadPool threadPool() <variables>private final non-sealed org.elasticsearch.client.support.AbstractClient.Admin admin,protected final non-sealed org.elasticsearch.common.settings.Settings settings,private final non-sealed org.elasticsearch.threadpool.ThreadPool threadPool
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java
|
ReservedSpace
|
writeTo
|
class ReservedSpace implements Writeable {
public static final ReservedSpace EMPTY = new ReservedSpace(0, new ObjectHashSet<>());
private final long total;
private final ObjectHashSet<ShardId> shardIds;
private ReservedSpace(long total, ObjectHashSet<ShardId> shardIds) {
this.total = total;
this.shardIds = shardIds;
}
ReservedSpace(StreamInput in) throws IOException {
total = in.readVLong();
final int shardIdCount = in.readVInt();
shardIds = new ObjectHashSet<>(shardIdCount);
for (int i = 0; i < shardIdCount; i++) {
shardIds.add(new ShardId(in));
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {<FILL_FUNCTION_BODY>}
public long getTotal() {
return total;
}
public boolean containsShardId(ShardId shardId) {
return shardIds.contains(shardId);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ReservedSpace that = (ReservedSpace) o;
return total == that.total &&
shardIds.equals(that.shardIds);
}
@Override
public int hashCode() {
return Objects.hash(total, shardIds);
}
public static class Builder {
private long total;
private ObjectHashSet<ShardId> shardIds = new ObjectHashSet<>();
public ReservedSpace build() {
assert shardIds != null : "already built";
final ReservedSpace reservedSpace = new ReservedSpace(total, shardIds);
shardIds = null;
return reservedSpace;
}
public Builder add(ShardId shardId, long reservedBytes) {
assert shardIds != null : "already built";
assert reservedBytes >= 0 : reservedBytes;
shardIds.add(shardId);
total += reservedBytes;
return this;
}
}
}
|
out.writeVLong(total);
out.writeVInt(shardIds.size());
for (ObjectCursor<ShardId> shardIdCursor : shardIds) {
shardIdCursor.value.writeTo(out);
}
| 576
| 65
| 641
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/ClusterName.java
|
ClusterName
|
equals
|
class ClusterName implements Writeable {
public static final Setting<ClusterName> CLUSTER_NAME_SETTING = new Setting<>(
"cluster.name",
"crate",
(s) -> {
if (s.isEmpty()) {
throw new IllegalArgumentException("[cluster.name] must not be empty");
}
return new ClusterName(s);
},
DataTypes.STRING,
Setting.Property.NodeScope
);
public static final ClusterName DEFAULT = CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY);
private final String value;
public ClusterName(StreamInput input) throws IOException {
this(input.readString());
}
public ClusterName(String value) {
this.value = value.intern();
}
public String value() {
return this.value;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(value);
}
@Override
public boolean equals(Object o) {<FILL_FUNCTION_BODY>}
@Override
public int hashCode() {
return Objects.hash(value);
}
@Override
public String toString() {
return "Cluster [" + value + "]";
}
public Predicate<ClusterName> getEqualityPredicate() {
return new Predicate<ClusterName>() {
@Override
public boolean test(ClusterName o) {
return ClusterName.this.equals(o);
}
@Override
public String toString() {
return "local cluster name [" + ClusterName.this.value() + "]";
}
};
}
}
|
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ClusterName that = (ClusterName) o;
if (value != null ? !value.equals(that.value) : that.value != null) return false;
return true;
| 440
| 85
| 525
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java
|
ClusterStateUpdateTask
|
clusterStatePublished
|
class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor<ClusterStateUpdateTask>, ClusterStateTaskListener {
private final Priority priority;
public ClusterStateUpdateTask() {
this(Priority.NORMAL);
}
public ClusterStateUpdateTask(Priority priority) {
this.priority = priority;
}
@Override
public final ClusterTasksResult<ClusterStateUpdateTask> execute(ClusterState currentState, List<ClusterStateUpdateTask> tasks) throws Exception {
ClusterState result = execute(currentState);
return ClusterTasksResult.<ClusterStateUpdateTask>builder().successes(tasks).build(result);
}
@Override
public String describeTasks(List<ClusterStateUpdateTask> tasks) {
return ""; // one of task, source is enough
}
/**
* Update the cluster state based on the current state. Return the *same instance* if no state
* should be changed.
*/
public abstract ClusterState execute(ClusterState currentState) throws Exception;
/**
* A callback called when execute fails.
*/
public abstract void onFailure(String source, Exception e);
@Override
public final void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {<FILL_FUNCTION_BODY>}
/**
* If the cluster state update task wasn't processed by the provided timeout, call
* {@link ClusterStateTaskListener#onFailure(String, Exception)}. May return null to indicate no timeout is needed (default).
*/
@Nullable
public TimeValue timeout() {
return null;
}
@Override
public Priority priority() {
return priority;
}
/**
* Marked as final as cluster state update tasks should only run on master.
* For local requests, use {@link LocalClusterUpdateTask} instead.
*/
@Override
public final boolean runOnlyOnMaster() {
return true;
}
}
|
// final, empty implementation here as this method should only be defined in combination
// with a batching executor as it will always be executed within the system context.
| 489
| 40
| 529
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java
|
ImmutableOpenMapDiff
|
apply
|
class ImmutableOpenMapDiff<K, T> extends MapDiff<K, T, ImmutableOpenMap<K, T>> {
protected ImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
super(in, keySerializer, valueSerializer);
}
public ImmutableOpenMapDiff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after,
KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
super(keySerializer, valueSerializer);
assert after != null && before != null;
for (ObjectCursor<K> key : before.keys()) {
if (!after.containsKey(key.value)) {
deletes.add(key.value);
}
}
for (ObjectObjectCursor<K, T> partIter : after) {
T beforePart = before.get(partIter.key);
if (beforePart == null) {
upserts.put(partIter.key, partIter.value);
} else if (partIter.value.equals(beforePart) == false) {
if (valueSerializer.supportsDiffableValues()) {
diffs.put(partIter.key, valueSerializer.diff(partIter.value, beforePart));
} else {
upserts.put(partIter.key, partIter.value);
}
}
}
}
@Override
public ImmutableOpenMap<K, T> apply(ImmutableOpenMap<K, T> map) {<FILL_FUNCTION_BODY>}
}
|
ImmutableOpenMap.Builder<K, T> builder = ImmutableOpenMap.builder();
builder.putAll(map);
for (K part : deletes) {
builder.remove(part);
}
for (Map.Entry<K, Diff<T>> diff : diffs.entrySet()) {
builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
}
for (Map.Entry<K, T> upsert : upserts.entrySet()) {
builder.put(upsert.getKey(), upsert.getValue());
}
return builder.build();
| 406
| 164
| 570
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/MasterNodeChangePredicate.java
|
MasterNodeChangePredicate
|
build
|
class MasterNodeChangePredicate {
private MasterNodeChangePredicate() {
}
/**
* builds a predicate that will accept a cluster state only if it was generated after the current has
* (re-)joined the master
*/
public static Predicate<ClusterState> build(ClusterState currentState) {<FILL_FUNCTION_BODY>}
}
|
final long currentVersion = currentState.version();
final DiscoveryNode masterNode = currentState.nodes().getMasterNode();
final String currentMasterId = masterNode == null ? null : masterNode.getEphemeralId();
return newState -> {
final DiscoveryNode newMaster = newState.nodes().getMasterNode();
final boolean accept;
if (newMaster == null) {
accept = false;
} else if (newMaster.getEphemeralId().equals(currentMasterId) == false) {
accept = true;
} else {
accept = newState.version() > currentVersion;
}
return accept;
};
| 93
| 167
| 260
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java
|
ConnectionTarget
|
addListenerAndStartActivity
|
class ConnectionTarget {
private final DiscoveryNode discoveryNode;
private CompletableFuture<Void> future = new CompletableFuture<>();
private ActivityType activityType = ActivityType.IDLE; // indicates what any listeners are awaiting
private final AtomicInteger consecutiveFailureCount = new AtomicInteger();
private final Runnable connectActivity = new AbstractRunnable() {
final AbstractRunnable abstractRunnable = this;
@Override
protected void doRun() {
assert Thread.holdsLock(mutex) == false : "mutex unexpectedly held";
if (transportService.nodeConnected(discoveryNode)) {
// transportService.connectToNode is a no-op if already connected, but we don't want any DEBUG logging in this case
// since we run this for every node on every cluster state update.
LOGGER.trace("still connected to {}", discoveryNode);
onConnected();
} else {
LOGGER.debug("connecting to {}", discoveryNode);
transportService.connectToNode(discoveryNode, new ActionListener<Void>() {
@Override
public void onResponse(Void aVoid) {
assert Thread.holdsLock(mutex) == false : "mutex unexpectedly held";
LOGGER.debug("connected to {}", discoveryNode);
onConnected();
}
@Override
public void onFailure(Exception e) {
abstractRunnable.onFailure(e);
}
});
}
}
private void onConnected() {
consecutiveFailureCount.set(0);
onCompletion(ActivityType.CONNECTING, null, disconnectActivity);
}
@Override
public void onFailure(Exception e) {
assert Thread.holdsLock(mutex) == false : "mutex unexpectedly held";
final int currentFailureCount = consecutiveFailureCount.incrementAndGet();
if (!lifecycle.stoppedOrClosed()) {
// only warn every 6th failure
final Level level = currentFailureCount % 6 == 0 ? Level.WARN : Level.DEBUG;
LOGGER.log(level, new ParameterizedMessage("failed to connect to {} (tried [{}] times lifecycle={})",
discoveryNode, currentFailureCount, lifecycle), e);
}
onCompletion(ActivityType.CONNECTING, e, disconnectActivity);
}
@Override
public String toString() {
return "connect to " + discoveryNode;
}
};
private final Runnable disconnectActivity = new AbstractRunnable() {
@Override
protected void doRun() {
assert Thread.holdsLock(mutex) == false : "mutex unexpectedly held";
transportService.disconnectFromNode(discoveryNode);
consecutiveFailureCount.set(0);
LOGGER.debug("disconnected from {}", discoveryNode);
onCompletion(ActivityType.DISCONNECTING, null, connectActivity);
}
@Override
public void onFailure(Exception e) {
assert Thread.holdsLock(mutex) == false : "mutex unexpectedly held";
consecutiveFailureCount.incrementAndGet();
// we may not have disconnected, but will not retry, so this connection might have leaked
LOGGER.warn(new ParameterizedMessage("failed to disconnect from {}, possible connection leak", discoveryNode), e);
assert false : "failed to disconnect from " + discoveryNode + ", possible connection leak\n" + e;
onCompletion(ActivityType.DISCONNECTING, e, connectActivity);
}
};
ConnectionTarget(DiscoveryNode discoveryNode) {
this.discoveryNode = discoveryNode;
}
Runnable connect(@Nullable ActionListener<Void> listener) {
return addListenerAndStartActivity(listener, ActivityType.CONNECTING, connectActivity,
"disconnection cancelled by reconnection");
}
Runnable disconnect() {
return addListenerAndStartActivity(null, ActivityType.DISCONNECTING, disconnectActivity,
"connection cancelled by disconnection");
}
Runnable ensureConnected(@Nullable ActionListener<Void> listener) {
assert Thread.holdsLock(mutex) : "mutex not held";
if (activityType == ActivityType.IDLE) {
if (transportService.nodeConnected(discoveryNode)) {
return () -> listener.onResponse(null);
} else {
// target is disconnected, and we are currently idle, so start a connection process.
activityType = ActivityType.CONNECTING;
addListener(listener);
return connectActivity;
}
} else {
addListener(listener);
return () -> {
};
}
}
Runnable awaitCurrentActivity(ActionListener<Void> listener) {
assert Thread.holdsLock(mutex) : "mutex not held";
if (activityType == ActivityType.IDLE) {
return () -> listener.onResponse(null);
} else {
addListener(listener);
return () -> {
};
}
}
private void addListener(@Nullable ActionListener<Void> listener) {
assert Thread.holdsLock(mutex) : "mutex not held";
assert activityType != ActivityType.IDLE;
if (listener != null) {
future.whenComplete(listener);
}
}
private CompletableFuture<Void> getAndClearFuture() {
assert Thread.holdsLock(mutex) : "mutex not held";
final CompletableFuture<Void> drainedFuture = future;
future = new CompletableFuture<>();
return drainedFuture;
}
private Runnable addListenerAndStartActivity(@Nullable ActionListener<Void> listener, ActivityType newActivityType,
Runnable activity, String cancellationMessage) {<FILL_FUNCTION_BODY>}
private void onCompletion(ActivityType completedActivityType, @Nullable Exception e, Runnable oppositeActivity) {
assert Thread.holdsLock(mutex) == false : "mutex unexpectedly held";
final Runnable cleanup;
synchronized (mutex) {
assert activityType != ActivityType.IDLE;
if (activityType == completedActivityType) {
final CompletableFuture<Void> oldFuture = getAndClearFuture();
activityType = ActivityType.IDLE;
cleanup = e == null ? () -> oldFuture.complete(null) : () -> oldFuture.completeExceptionally(e);
if (completedActivityType.equals(ActivityType.DISCONNECTING)) {
final ConnectionTarget removedTarget = targetsByNode.remove(discoveryNode);
assert removedTarget == this : removedTarget + " vs " + this;
}
} else {
cleanup = oppositeActivity;
}
}
cleanup.run();
}
boolean isPendingDisconnection() {
assert Thread.holdsLock(mutex) : "mutex not held";
return activityType == ActivityType.DISCONNECTING;
}
@Override
public String toString() {
synchronized (mutex) {
return "ConnectionTarget{" +
"discoveryNode=" + discoveryNode +
", activityType=" + activityType +
'}';
}
}
}
|
assert Thread.holdsLock(mutex) : "mutex not held";
assert newActivityType.equals(ActivityType.IDLE) == false;
if (activityType == ActivityType.IDLE) {
activityType = newActivityType;
addListener(listener);
return activity;
}
if (activityType == newActivityType) {
addListener(listener);
return () -> {
};
}
activityType = newActivityType;
final CompletableFuture<Void> oldFuture = getAndClearFuture();
addListener(listener);
return () -> oldFuture.completeExceptionally(new ElasticsearchException(cancellationMessage));
| 1,821
| 173
| 1,994
|
<methods>public void addLifecycleListener(org.elasticsearch.common.component.LifecycleListener) ,public void close() ,public org.elasticsearch.common.component.Lifecycle.State lifecycleState() ,public void removeLifecycleListener(org.elasticsearch.common.component.LifecycleListener) ,public void start() ,public void stop() <variables>protected final org.elasticsearch.common.component.Lifecycle lifecycle,private final List<org.elasticsearch.common.component.LifecycleListener> listeners
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java
|
NodeMappingRefreshAction
|
nodeMappingRefresh
|
class NodeMappingRefreshAction {
private static final Logger LOGGER = LogManager.getLogger(NodeMappingRefreshAction.class);
public static final String ACTION_NAME = "internal:cluster/node/mapping/refresh";
private final TransportService transportService;
private final MetadataMappingService metadataMappingService;
@Inject
public NodeMappingRefreshAction(TransportService transportService, MetadataMappingService metadataMappingService) {
this.transportService = transportService;
this.metadataMappingService = metadataMappingService;
transportService.registerRequestHandler(ACTION_NAME, ThreadPool.Names.SAME, NodeMappingRefreshRequest::new, new NodeMappingRefreshTransportHandler());
}
public void nodeMappingRefresh(final DiscoveryNode masterNode, final NodeMappingRefreshRequest request) {<FILL_FUNCTION_BODY>}
private class NodeMappingRefreshTransportHandler implements TransportRequestHandler<NodeMappingRefreshRequest> {
@Override
public void messageReceived(NodeMappingRefreshRequest request, TransportChannel channel) throws Exception {
metadataMappingService.refreshMapping(request.index(), request.indexUUID());
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
public static class NodeMappingRefreshRequest extends TransportRequest implements IndicesRequest {
private final String index;
private final String indexUUID;
private final String nodeId;
public NodeMappingRefreshRequest(String index, String indexUUID, String nodeId) {
this.index = index;
this.indexUUID = indexUUID;
this.nodeId = nodeId;
}
@Override
public String[] indices() {
return new String[]{index};
}
@Override
public IndicesOptions indicesOptions() {
return IndicesOptions.strictSingleIndexNoExpandForbidClosed();
}
public String index() {
return index;
}
public String indexUUID() {
return indexUUID;
}
public String nodeId() {
return nodeId;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(index);
out.writeString(nodeId);
out.writeString(indexUUID);
}
public NodeMappingRefreshRequest(StreamInput in) throws IOException {
super(in);
index = in.readString();
nodeId = in.readString();
indexUUID = in.readString();
}
}
}
|
if (masterNode == null) {
LOGGER.warn("can't send mapping refresh for [{}], no master known.", request.index());
return;
}
transportService.sendRequest(masterNode, ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
| 627
| 75
| 702
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
|
FailedShardEntry
|
shardStarted
|
class FailedShardEntry extends TransportRequest {
final ShardId shardId;
final String allocationId;
final long primaryTerm;
final String message;
final Exception failure;
final boolean markAsStale;
FailedShardEntry(StreamInput in) throws IOException {
super(in);
shardId = new ShardId(in);
allocationId = in.readString();
primaryTerm = in.readVLong();
message = in.readString();
failure = in.readException();
markAsStale = in.readBoolean();
}
public FailedShardEntry(ShardId shardId, String allocationId, long primaryTerm, String message, Exception failure, boolean markAsStale) {
this.shardId = shardId;
this.allocationId = allocationId;
this.primaryTerm = primaryTerm;
this.message = message;
this.failure = failure;
this.markAsStale = markAsStale;
}
public ShardId getShardId() {
return shardId;
}
public String getAllocationId() {
return allocationId;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardId.writeTo(out);
out.writeString(allocationId);
out.writeVLong(primaryTerm);
out.writeString(message);
out.writeException(failure);
out.writeBoolean(markAsStale);
}
@Override
public String toString() {
List<String> components = new ArrayList<>(6);
components.add("shard id [" + shardId + "]");
components.add("allocation id [" + allocationId + "]");
components.add("primary term [" + primaryTerm + "]");
components.add("message [" + message + "]");
components.add("markAsStale [" + markAsStale + "]");
if (failure != null) {
components.add("failure [" + Exceptions.stackTrace(failure) + "]");
}
return String.join(", ", components);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FailedShardEntry that = (FailedShardEntry) o;
// Exclude message and exception from equals and hashCode
return Objects.equals(this.shardId, that.shardId) &&
Objects.equals(this.allocationId, that.allocationId) &&
primaryTerm == that.primaryTerm &&
markAsStale == that.markAsStale;
}
@Override
public int hashCode() {
return Objects.hash(shardId, allocationId, primaryTerm, markAsStale);
}
}
public void shardStarted(final ShardRouting shardRouting,
final long primaryTerm,
final String message,
final ActionListener<Void> listener) {
shardStarted(shardRouting, primaryTerm, message, listener, clusterService.state());
}
public void shardStarted(final ShardRouting shardRouting,
final long primaryTerm,
final String message,
final ActionListener<Void> listener,
final ClusterState currentState) {<FILL_FUNCTION_BODY>
|
StartedShardEntry entry = new StartedShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), primaryTerm, message);
sendShardAction(SHARD_STARTED_ACTION_NAME, currentState, entry, listener);
| 871
| 69
| 940
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java
|
ClusterBlockException
|
writeTo
|
class ClusterBlockException extends ElasticsearchException {
private final Set<ClusterBlock> blocks;
public ClusterBlockException(Set<ClusterBlock> blocks) {
super(buildMessage(blocks));
this.blocks = blocks;
}
public ClusterBlockException(StreamInput in) throws IOException {
super(in);
this.blocks = unmodifiableSet(in.readSet(ClusterBlock::new));
}
@Override
public void writeTo(StreamOutput out) throws IOException {<FILL_FUNCTION_BODY>}
public boolean retryable() {
for (ClusterBlock block : blocks) {
if (!block.retryable()) {
return false;
}
}
return true;
}
public Set<ClusterBlock> blocks() {
return blocks;
}
private static String buildMessage(Set<ClusterBlock> blocks) {
StringBuilder sb = new StringBuilder("blocked by: ");
for (ClusterBlock block : blocks) {
sb.append("[").append(block.status()).append("/").append(block.id()).append("/").append(block.description()).append("];");
}
return sb.toString();
}
@Override
public RestStatus status() {
RestStatus status = null;
for (ClusterBlock block : blocks) {
if (status == null) {
status = block.status();
} else if (status.getStatus() < block.status().getStatus()) {
status = block.status();
}
}
return status;
}
}
|
super.writeTo(out);
if (blocks != null) {
out.writeCollection(blocks);
} else {
out.writeVInt(0);
}
| 393
| 49
| 442
|
<methods>public void <init>(java.lang.Throwable) ,public transient void <init>(java.lang.String, java.lang.Object[]) ,public transient void <init>(java.lang.String, java.lang.Throwable, java.lang.Object[]) ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public void addHeader(java.lang.String, List<java.lang.String>) ,public transient void addMetadata(java.lang.String, java.lang.String[]) ,public void addMetadata(java.lang.String, List<java.lang.String>) ,public static org.elasticsearch.ElasticsearchException fromXContent(org.elasticsearch.common.xcontent.XContentParser) throws java.io.IOException,public static void generateThrowableXContent(org.elasticsearch.common.xcontent.XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params, java.lang.Throwable) throws java.io.IOException,public java.lang.String getDetailedMessage() ,public static java.lang.String getExceptionName(java.lang.Throwable) ,public List<java.lang.String> getHeader(java.lang.String) ,public Set<java.lang.String> getHeaderKeys() ,public static int getId(Class<? extends org.elasticsearch.ElasticsearchException>) ,public org.elasticsearch.index.Index getIndex() ,public List<java.lang.String> getMetadata(java.lang.String) ,public Set<java.lang.String> getMetadataKeys() ,public java.lang.Throwable getRootCause() ,public org.elasticsearch.index.shard.ShardId getShardId() ,public io.crate.rest.action.HttpErrorStatus httpErrorStatus() ,public static org.elasticsearch.ElasticsearchException innerFromXContent(org.elasticsearch.common.xcontent.XContentParser, boolean) throws java.io.IOException,public static boolean isRegistered(Class<? extends java.lang.Throwable>, org.elasticsearch.Version) ,public io.crate.protocols.postgres.PGErrorStatus pgErrorStatus() ,public static org.elasticsearch.ElasticsearchException readException(org.elasticsearch.common.io.stream.StreamInput, int) throws java.io.IOException,public static T readStackTrace(T, org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public void setIndex(org.elasticsearch.index.Index) ,public void setIndex(java.lang.String) ,public transient void setResources(java.lang.String, java.lang.String[]) ,public void setShard(org.elasticsearch.index.shard.ShardId) ,public org.elasticsearch.rest.RestStatus status() ,public java.lang.String toString() ,public org.elasticsearch.common.xcontent.XContentBuilder toXContent(org.elasticsearch.common.xcontent.XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params) throws java.io.IOException,public java.lang.Throwable unwrapCause() ,public static T writeStackTraces(T, org.elasticsearch.common.io.stream.StreamOutput, Writer<java.lang.Throwable>) throws java.io.IOException,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>private static final java.lang.String CAUSED_BY,private static final non-sealed Map<Class<? extends org.elasticsearch.ElasticsearchException>,org.elasticsearch.ElasticsearchException.ElasticsearchExceptionHandle> CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE,private static final java.lang.String HEADER,private static final non-sealed Map<java.lang.Integer,CheckedFunction<org.elasticsearch.common.io.stream.StreamInput,? extends org.elasticsearch.ElasticsearchException,java.io.IOException>> ID_TO_SUPPLIER,private static final java.lang.String INDEX_METADATA_KEY,private static final java.lang.String INDEX_METADATA_KEY_UUID,private static final java.lang.String REASON,private static final java.lang.String RESOURCE_METADATA_ID_KEY,private static final java.lang.String RESOURCE_METADATA_TYPE_KEY,private static final java.lang.String REST_EXCEPTION_SKIP_CAUSE,private static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT,public static final java.lang.String REST_EXCEPTION_SKIP_STACK_TRACE,public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT,private static final java.lang.String ROOT_CAUSE,private static final java.lang.String SHARD_METADATA_KEY,private static final java.lang.String STACK_TRACE,private static final org.elasticsearch.common.xcontent.ParseField SUPPRESSED,private static final java.lang.String TYPE,private static final org.elasticsearch.Version UNKNOWN_VERSION_ADDED,private final Map<java.lang.String,List<java.lang.String>> headers,private final Map<java.lang.String,List<java.lang.String>> metadata
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/coordination/DetachClusterCommand.java
|
DetachClusterCommand
|
processNodePaths
|
class DetachClusterCommand extends ElasticsearchNodeCommand {
public static final String NODE_DETACHED_MSG = "Node was successfully detached from the cluster";
public static final String CONFIRMATION_MSG =
DELIMITER +
"\n" +
"You should only run this tool if you have permanently lost all of the\n" +
"master-eligible nodes in this cluster and you cannot restore the cluster\n" +
"from a snapshot, or you have already unsafely bootstrapped a new cluster\n" +
"by running `crate-node unsafe-bootstrap` on a master-eligible\n" +
"node that belonged to the same cluster as this node. This tool can cause\n" +
"arbitrary data loss and its use should be your last resort.\n" +
"\n" +
"Do you want to proceed?\n";
public DetachClusterCommand() {
super("Detaches this node from its cluster, allowing it to unsafely join a new cluster");
}
@Override
protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env) throws IOException {<FILL_FUNCTION_BODY>}
// package-private for tests
static Metadata updateMetadata(Metadata oldMetadata) {
final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder()
.lastAcceptedConfiguration(CoordinationMetadata.VotingConfiguration.MUST_JOIN_ELECTED_MASTER)
.lastCommittedConfiguration(CoordinationMetadata.VotingConfiguration.MUST_JOIN_ELECTED_MASTER)
.term(0)
.build();
return Metadata.builder(oldMetadata)
.coordinationMetadata(coordinationMetadata)
.clusterUUIDCommitted(false)
.build();
}
//package-private for tests
static long updateCurrentTerm() {
return 0;
}
}
|
final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths);
terminal.println(Terminal.Verbosity.VERBOSE, "Loading cluster state");
final ClusterState oldClusterState = loadTermAndClusterState(persistedClusterStateService, env).v2();
final ClusterState newClusterState = ClusterState.builder(oldClusterState)
.metadata(updateMetadata(oldClusterState.metadata())).build();
terminal.println(Terminal.Verbosity.VERBOSE,
"[old cluster state = " + oldClusterState + ", new cluster state = " + newClusterState + "]");
confirm(terminal, CONFIRMATION_MSG);
try (PersistedClusterStateService.Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(updateCurrentTerm(), newClusterState);
}
terminal.println(NODE_DETACHED_MSG);
| 486
| 240
| 726
|
<methods>public void <init>(java.lang.String) ,public static org.elasticsearch.cluster.ClusterState clusterState(org.elasticsearch.env.Environment, org.elasticsearch.gateway.PersistedClusterStateService.OnDiskState) ,public static org.elasticsearch.gateway.PersistedClusterStateService createPersistedClusterStateService(org.elasticsearch.common.settings.Settings, java.nio.file.Path[]) throws java.io.IOException,public final void execute(org.elasticsearch.cli.Terminal, OptionSet, org.elasticsearch.env.Environment) throws java.lang.Exception,public static Tuple<java.lang.Long,org.elasticsearch.cluster.ClusterState> loadTermAndClusterState(org.elasticsearch.gateway.PersistedClusterStateService, org.elasticsearch.env.Environment) throws java.io.IOException<variables>public static final java.lang.String ABORTED_BY_USER_MSG,public static final java.lang.String CS_MISSING_MSG,protected static final java.lang.String DELIMITER,public static final java.lang.String FAILED_TO_OBTAIN_NODE_LOCK_MSG,private static final Logger LOGGER,protected static final org.elasticsearch.common.xcontent.NamedXContentRegistry NAMED_X_CONTENT_REGISTRY,public static final java.lang.String NO_NODE_FOLDER_FOUND_MSG,public static final java.lang.String NO_NODE_METADATA_FOUND_MSG,public static final java.lang.String STOP_WARNING_MSG
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/coordination/Join.java
|
Join
|
hashCode
|
class Join implements Writeable {
private final DiscoveryNode sourceNode;
private final DiscoveryNode targetNode;
private final long term;
private final long lastAcceptedTerm;
private final long lastAcceptedVersion;
public Join(DiscoveryNode sourceNode, DiscoveryNode targetNode, long term, long lastAcceptedTerm, long lastAcceptedVersion) {
assert term >= 0;
assert lastAcceptedTerm >= 0;
assert lastAcceptedVersion >= 0;
this.sourceNode = sourceNode;
this.targetNode = targetNode;
this.term = term;
this.lastAcceptedTerm = lastAcceptedTerm;
this.lastAcceptedVersion = lastAcceptedVersion;
}
public Join(StreamInput in) throws IOException {
sourceNode = new DiscoveryNode(in);
targetNode = new DiscoveryNode(in);
term = in.readLong();
lastAcceptedTerm = in.readLong();
lastAcceptedVersion = in.readLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
sourceNode.writeTo(out);
targetNode.writeTo(out);
out.writeLong(term);
out.writeLong(lastAcceptedTerm);
out.writeLong(lastAcceptedVersion);
}
public DiscoveryNode getSourceNode() {
return sourceNode;
}
public DiscoveryNode getTargetNode() {
return targetNode;
}
public boolean targetMatches(DiscoveryNode matchingNode) {
return targetNode.getId().equals(matchingNode.getId());
}
public long getLastAcceptedVersion() {
return lastAcceptedVersion;
}
public long getTerm() {
return term;
}
public long getLastAcceptedTerm() {
return lastAcceptedTerm;
}
@Override
public String toString() {
return "Join{" +
"term=" + term +
", lastAcceptedTerm=" + lastAcceptedTerm +
", lastAcceptedVersion=" + lastAcceptedVersion +
", sourceNode=" + sourceNode +
", targetNode=" + targetNode +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Join join = (Join) o;
if (sourceNode.equals(join.sourceNode) == false) return false;
if (targetNode.equals(join.targetNode) == false) return false;
if (lastAcceptedVersion != join.lastAcceptedVersion) return false;
if (term != join.term) return false;
return lastAcceptedTerm == join.lastAcceptedTerm;
}
@Override
public int hashCode() {<FILL_FUNCTION_BODY>}
}
|
int result = (int) (lastAcceptedVersion ^ (lastAcceptedVersion >>> 32));
result = 31 * result + sourceNode.hashCode();
result = 31 * result + targetNode.hashCode();
result = 31 * result + (int) (term ^ (term >>> 32));
result = 31 * result + (int) (lastAcceptedTerm ^ (lastAcceptedTerm >>> 32));
return result;
| 730
| 115
| 845
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java
|
JoinRequest
|
equals
|
class JoinRequest extends TransportRequest {
/**
* The sending (i.e. joining) node.
*/
private final DiscoveryNode sourceNode;
/**
* The minimum term for which the joining node will accept any cluster state publications. If the joining node is in a strictly greater
* term than the master it wants to join then the master must enter a new term and hold another election. Doesn't necessarily match
* {@link JoinRequest#optionalJoin} and may be zero in join requests sent prior to {@link Version#V_5_1_0}.
*/
private final long minimumTerm;
/**
* A vote for the receiving node. This vote is optional since the sending node may have voted for a different master in this term.
* That's ok, the sender likely discovered that the master we voted for lost the election and now we're trying to join the winner. Once
* the sender has successfully joined the master, the lack of a vote in its term causes another election (see
* {@link Publication#onMissingJoin(DiscoveryNode)}).
*/
private final Optional<Join> optionalJoin;
public JoinRequest(DiscoveryNode sourceNode, long minimumTerm, Optional<Join> optionalJoin) {
assert optionalJoin.isPresent() == false || optionalJoin.get().getSourceNode().equals(sourceNode);
this.sourceNode = sourceNode;
this.minimumTerm = minimumTerm;
this.optionalJoin = optionalJoin;
}
public JoinRequest(StreamInput in) throws IOException {
super(in);
sourceNode = new DiscoveryNode(in);
if (in.getVersion().onOrAfter(Version.V_5_1_0)) {
minimumTerm = in.readLong();
} else {
minimumTerm = 0L;
}
optionalJoin = Optional.ofNullable(in.readOptionalWriteable(Join::new));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
sourceNode.writeTo(out);
if (out.getVersion().onOrAfter(Version.V_5_1_0)) {
out.writeLong(minimumTerm);
}
out.writeOptionalWriteable(optionalJoin.orElse(null));
}
public DiscoveryNode getSourceNode() {
return sourceNode;
}
public long getMinimumTerm() {
return minimumTerm;
}
public long getTerm() {
// If the join is also present then its term will normally equal the corresponding term, but we do not require callers to
// obtain the term and the join in a synchronized fashion so it's possible that they disagree. Also older nodes do not share the
// minimum term, so for BWC we can take it from the join if present.
return Math.max(minimumTerm, optionalJoin.map(Join::getTerm).orElse(0L));
}
public Optional<Join> getOptionalJoin() {
return optionalJoin;
}
@Override
public boolean equals(Object o) {<FILL_FUNCTION_BODY>}
@Override
public int hashCode() {
return Objects.hash(sourceNode, minimumTerm, optionalJoin);
}
@Override
public String toString() {
return "JoinRequest{" +
"sourceNode=" + sourceNode +
", minimumTerm=" + minimumTerm +
", optionalJoin=" + optionalJoin +
'}';
}
}
|
if (this == o) return true;
if (!(o instanceof JoinRequest)) return false;
JoinRequest that = (JoinRequest) o;
if (minimumTerm != that.minimumTerm) return false;
if (!sourceNode.equals(that.sourceNode)) return false;
return optionalJoin.equals(that.optionalJoin);
| 855
| 87
| 942
|
<methods>public void <init>() ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public org.elasticsearch.tasks.TaskId getParentTask() ,public void setParentTask(org.elasticsearch.tasks.TaskId) ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>private org.elasticsearch.tasks.TaskId parentTaskId
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/coordination/LagDetector.java
|
LagDetector
|
startLagDetector
|
class LagDetector {
private static final Logger LOGGER = LogManager.getLogger(LagDetector.class);
// the timeout for each node to apply a cluster state update after the leader has applied it, before being removed from the cluster
public static final Setting<TimeValue> CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING =
Setting.timeSetting("cluster.follower_lag.timeout",
TimeValue.timeValueMillis(90000), TimeValue.timeValueMillis(1), Setting.Property.NodeScope);
private final TimeValue clusterStateApplicationTimeout;
private final Consumer<DiscoveryNode> onLagDetected;
private final Supplier<DiscoveryNode> localNodeSupplier;
private final ThreadPool threadPool;
private final Map<DiscoveryNode, NodeAppliedStateTracker> appliedStateTrackersByNode = new ConcurrentHashMap<>();
public LagDetector(final Settings settings, final ThreadPool threadPool, final Consumer<DiscoveryNode> onLagDetected,
final Supplier<DiscoveryNode> localNodeSupplier) {
this.threadPool = threadPool;
this.clusterStateApplicationTimeout = CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING.get(settings);
this.onLagDetected = onLagDetected;
this.localNodeSupplier = localNodeSupplier;
}
public void setTrackedNodes(final Iterable<DiscoveryNode> discoveryNodes) {
final Set<DiscoveryNode> discoveryNodeSet = new HashSet<>();
discoveryNodes.forEach(discoveryNodeSet::add);
discoveryNodeSet.remove(localNodeSupplier.get());
appliedStateTrackersByNode.keySet().retainAll(discoveryNodeSet);
discoveryNodeSet.forEach(node -> appliedStateTrackersByNode.putIfAbsent(node, new NodeAppliedStateTracker(node)));
}
public void clearTrackedNodes() {
appliedStateTrackersByNode.clear();
}
public void setAppliedVersion(final DiscoveryNode discoveryNode, final long appliedVersion) {
final NodeAppliedStateTracker nodeAppliedStateTracker = appliedStateTrackersByNode.get(discoveryNode);
if (nodeAppliedStateTracker == null) {
// Received an ack from a node that a later publication has removed (or we are no longer master). No big deal.
LOGGER.trace("node {} applied version {} but this node's version is not being tracked", discoveryNode, appliedVersion);
} else {
nodeAppliedStateTracker.increaseAppliedVersion(appliedVersion);
}
}
public void startLagDetector(final long version) {<FILL_FUNCTION_BODY>}
@Override
public String toString() {
return "LagDetector{" +
"clusterStateApplicationTimeout=" + clusterStateApplicationTimeout +
", appliedStateTrackersByNode=" + appliedStateTrackersByNode.values() +
'}';
}
// for assertions
Set<DiscoveryNode> getTrackedNodes() {
return Collections.unmodifiableSet(appliedStateTrackersByNode.keySet());
}
private class NodeAppliedStateTracker {
private final DiscoveryNode discoveryNode;
private final AtomicLong appliedVersion = new AtomicLong();
NodeAppliedStateTracker(final DiscoveryNode discoveryNode) {
this.discoveryNode = discoveryNode;
}
void increaseAppliedVersion(long appliedVersion) {
long maxAppliedVersion = this.appliedVersion.updateAndGet(v -> Math.max(v, appliedVersion));
LOGGER.trace("{} applied version {}, max now {}", this, appliedVersion, maxAppliedVersion);
}
boolean appliedVersionLessThan(final long version) {
return appliedVersion.get() < version;
}
@Override
public String toString() {
return "NodeAppliedStateTracker{" +
"discoveryNode=" + discoveryNode +
", appliedVersion=" + appliedVersion +
'}';
}
void checkForLag(final long version) {
if (appliedStateTrackersByNode.get(discoveryNode) != this) {
LOGGER.trace("{} no longer active when checking version {}", this, version);
return;
}
long appliedVersion = this.appliedVersion.get();
if (version <= appliedVersion) {
LOGGER.trace("{} satisfied when checking version {}, node applied version {}", this, version, appliedVersion);
return;
}
LOGGER.warn(
"node [{}] is lagging at cluster state version [{}], although publication of cluster state version [{}] completed [{}] ago",
discoveryNode, appliedVersion, version, clusterStateApplicationTimeout);
onLagDetected.accept(discoveryNode);
}
}
}
|
final List<NodeAppliedStateTracker> laggingTrackers
= appliedStateTrackersByNode.values().stream().filter(t -> t.appliedVersionLessThan(version)).collect(Collectors.toList());
if (laggingTrackers.isEmpty()) {
LOGGER.trace("lag detection for version {} is unnecessary: {}", version, appliedStateTrackersByNode.values());
} else {
LOGGER.debug("starting lag detector for version {}: {}", version, laggingTrackers);
threadPool.scheduleUnlessShuttingDown(clusterStateApplicationTimeout, Names.GENERIC, new Runnable() {
@Override
public void run() {
laggingTrackers.forEach(t -> t.checkForLag(version));
}
@Override
public String toString() {
return "lag detector for version " + version + " on " + laggingTrackers;
}
});
}
| 1,219
| 238
| 1,457
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/coordination/PreVoteRequest.java
|
PreVoteRequest
|
equals
|
class PreVoteRequest extends TransportRequest {
private final DiscoveryNode sourceNode;
private final long currentTerm;
public PreVoteRequest(DiscoveryNode sourceNode, long currentTerm) {
this.sourceNode = sourceNode;
this.currentTerm = currentTerm;
}
public PreVoteRequest(StreamInput in) throws IOException {
super(in);
sourceNode = new DiscoveryNode(in);
currentTerm = in.readLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
sourceNode.writeTo(out);
out.writeLong(currentTerm);
}
public DiscoveryNode getSourceNode() {
return sourceNode;
}
public long getCurrentTerm() {
return currentTerm;
}
@Override
public String toString() {
return "PreVoteRequest{" +
"sourceNode=" + sourceNode +
", currentTerm=" + currentTerm +
'}';
}
@Override
public boolean equals(Object o) {<FILL_FUNCTION_BODY>}
@Override
public int hashCode() {
return Objects.hash(sourceNode, currentTerm);
}
}
|
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PreVoteRequest that = (PreVoteRequest) o;
return currentTerm == that.currentTerm &&
Objects.equals(sourceNode, that.sourceNode);
| 322
| 77
| 399
|
<methods>public void <init>() ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public org.elasticsearch.tasks.TaskId getParentTask() ,public void setParentTask(org.elasticsearch.tasks.TaskId) ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>private org.elasticsearch.tasks.TaskId parentTaskId
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java
|
PublicationContext
|
sendApplyCommit
|
class PublicationContext {
private final DiscoveryNodes discoveryNodes;
private final ClusterState newState;
private final ClusterState previousState;
private final boolean sendFullVersion;
private final Map<Version, BytesReference> serializedStates = new HashMap<>();
private final Map<Version, BytesReference> serializedDiffs = new HashMap<>();
PublicationContext(ClusterChangedEvent clusterChangedEvent) {
discoveryNodes = clusterChangedEvent.state().nodes();
newState = clusterChangedEvent.state();
previousState = clusterChangedEvent.previousState();
sendFullVersion = previousState.blocks().disableStatePersistence();
}
void buildDiffAndSerializeStates() {
Diff<ClusterState> diff = null;
for (DiscoveryNode node : discoveryNodes) {
try {
if (sendFullVersion || previousState.nodes().nodeExists(node) == false) {
if (serializedStates.containsKey(node.getVersion()) == false) {
serializedStates.put(node.getVersion(), serializeFullClusterState(newState, node.getVersion()));
}
} else {
// will send a diff
if (diff == null) {
diff = newState.diff(previousState);
}
if (serializedDiffs.containsKey(node.getVersion()) == false) {
final BytesReference serializedDiff = serializeDiffClusterState(diff, node.getVersion());
serializedDiffs.put(node.getVersion(), serializedDiff);
LOGGER.trace("serialized cluster state diff for version [{}] in for node version [{}] with size [{}]",
newState.version(), node.getVersion(), serializedDiff.length());
}
}
} catch (IOException e) {
throw new ElasticsearchException("failed to serialize cluster state for publishing to node {}", e, node);
}
}
}
public void sendPublishRequest(DiscoveryNode destination, PublishRequest publishRequest,
ActionListener<PublishWithJoinResponse> listener) {
assert publishRequest.getAcceptedState() == newState : "state got switched on us";
final ActionListener<PublishWithJoinResponse> responseActionListener;
if (destination.equals(discoveryNodes.getLocalNode())) {
// if publishing to self, use original request instead (see currentPublishRequestToSelf for explanation)
final PublishRequest previousRequest = currentPublishRequestToSelf.getAndSet(publishRequest);
// we might override an in-flight publication to self in case where we failed as master and became master again,
// and the new publication started before the previous one completed (which fails anyhow because of higher current term)
assert previousRequest == null || previousRequest.getAcceptedState().term() < publishRequest.getAcceptedState().term();
responseActionListener = new ActionListener<PublishWithJoinResponse>() {
@Override
public void onResponse(PublishWithJoinResponse publishWithJoinResponse) {
currentPublishRequestToSelf.compareAndSet(publishRequest, null); // only clean-up our mess
listener.onResponse(publishWithJoinResponse);
}
@Override
public void onFailure(Exception e) {
currentPublishRequestToSelf.compareAndSet(publishRequest, null); // only clean-up our mess
listener.onFailure(e);
}
};
} else {
responseActionListener = listener;
}
if (sendFullVersion || previousState.nodes().nodeExists(destination) == false) {
LOGGER.trace("sending full cluster state version [{}] to [{}]", newState.version(), destination);
sendFullClusterState(destination, responseActionListener);
} else {
LOGGER.trace("sending cluster state diff for version [{}] to [{}]", newState.version(), destination);
sendClusterStateDiff(destination, responseActionListener);
}
}
public void sendApplyCommit(DiscoveryNode destination, ApplyCommitRequest applyCommitRequest,
ActionListener<TransportResponse.Empty> listener) {<FILL_FUNCTION_BODY>}
private void sendFullClusterState(DiscoveryNode destination, ActionListener<PublishWithJoinResponse> listener) {
BytesReference bytes = serializedStates.get(destination.getVersion());
if (bytes == null) {
try {
bytes = serializeFullClusterState(newState, destination.getVersion());
serializedStates.put(destination.getVersion(), bytes);
} catch (Exception e) {
LOGGER.warn(() -> new ParameterizedMessage(
"failed to serialize cluster state before publishing it to node {}", destination), e);
listener.onFailure(e);
return;
}
}
sendClusterState(destination, bytes, false, listener);
}
private void sendClusterStateDiff(DiscoveryNode destination, ActionListener<PublishWithJoinResponse> listener) {
final BytesReference bytes = serializedDiffs.get(destination.getVersion());
assert bytes != null
: "failed to find serialized diff for node " + destination + " of version [" + destination.getVersion() + "]";
sendClusterState(destination, bytes, true, listener);
}
private void sendClusterState(DiscoveryNode destination, BytesReference bytes, boolean retryWithFullClusterStateOnFailure,
ActionListener<PublishWithJoinResponse> listener) {
try {
final BytesTransportRequest request = new BytesTransportRequest(bytes, destination.getVersion());
final Consumer<TransportException> transportExceptionHandler = exp -> {
if (retryWithFullClusterStateOnFailure && exp.unwrapCause() instanceof IncompatibleClusterStateVersionException) {
LOGGER.debug("resending full cluster state to node {} reason {}", destination, exp.getDetailedMessage());
sendFullClusterState(destination, listener);
} else {
LOGGER.debug(() -> new ParameterizedMessage("failed to send cluster state to {}", destination), exp);
listener.onFailure(exp);
}
};
final TransportResponseHandler<PublishWithJoinResponse> responseHandler =
new TransportResponseHandler<PublishWithJoinResponse>() {
@Override
public PublishWithJoinResponse read(StreamInput in) throws IOException {
return new PublishWithJoinResponse(in);
}
@Override
public void handleResponse(PublishWithJoinResponse response) {
listener.onResponse(response);
}
@Override
public void handleException(TransportException exp) {
transportExceptionHandler.accept(exp);
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
};
transportService.sendRequest(destination, PUBLISH_STATE_ACTION_NAME, request, stateRequestOptions, responseHandler);
} catch (Exception e) {
LOGGER.warn(() -> new ParameterizedMessage("error sending cluster state to {}", destination), e);
listener.onFailure(e);
}
}
}
|
transportService.sendRequest(destination, COMMIT_STATE_ACTION_NAME, applyCommitRequest, stateRequestOptions,
new TransportResponseHandler<TransportResponse.Empty>() {
@Override
public TransportResponse.Empty read(StreamInput in) {
return TransportResponse.Empty.INSTANCE;
}
@Override
public void handleResponse(TransportResponse.Empty response) {
listener.onResponse(response);
}
@Override
public void handleException(TransportException exp) {
listener.onFailure(exp);
}
@Override
public String executor() {
return ThreadPool.Names.GENERIC;
}
});
| 1,735
| 172
| 1,907
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/coordination/PublishClusterStateStats.java
|
PublishClusterStateStats
|
toXContent
|
class PublishClusterStateStats implements Writeable, ToXContentObject {
private final long fullClusterStateReceivedCount;
private final long incompatibleClusterStateDiffReceivedCount;
private final long compatibleClusterStateDiffReceivedCount;
/**
* @param fullClusterStateReceivedCount the number of times this node has received a full copy of the cluster state from the master.
* @param incompatibleClusterStateDiffReceivedCount the number of times this node has received a cluster-state diff from the master.
* @param compatibleClusterStateDiffReceivedCount the number of times that received cluster-state diffs were compatible with
*/
public PublishClusterStateStats(long fullClusterStateReceivedCount,
long incompatibleClusterStateDiffReceivedCount,
long compatibleClusterStateDiffReceivedCount) {
this.fullClusterStateReceivedCount = fullClusterStateReceivedCount;
this.incompatibleClusterStateDiffReceivedCount = incompatibleClusterStateDiffReceivedCount;
this.compatibleClusterStateDiffReceivedCount = compatibleClusterStateDiffReceivedCount;
}
public PublishClusterStateStats(StreamInput in) throws IOException {
fullClusterStateReceivedCount = in.readVLong();
incompatibleClusterStateDiffReceivedCount = in.readVLong();
compatibleClusterStateDiffReceivedCount = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(fullClusterStateReceivedCount);
out.writeVLong(incompatibleClusterStateDiffReceivedCount);
out.writeVLong(compatibleClusterStateDiffReceivedCount);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {<FILL_FUNCTION_BODY>}
public long getFullClusterStateReceivedCount() {
return fullClusterStateReceivedCount;
}
public long getIncompatibleClusterStateDiffReceivedCount() {
return incompatibleClusterStateDiffReceivedCount;
}
public long getCompatibleClusterStateDiffReceivedCount() {
return compatibleClusterStateDiffReceivedCount;
}
@Override
public String toString() {
return "PublishClusterStateStats(full=" + fullClusterStateReceivedCount
+ ", incompatible=" + incompatibleClusterStateDiffReceivedCount
+ ", compatible=" + compatibleClusterStateDiffReceivedCount
+ ")";
}
}
|
builder.startObject("published_cluster_states");
{
builder.field("full_states", fullClusterStateReceivedCount);
builder.field("incompatible_diffs", incompatibleClusterStateDiffReceivedCount);
builder.field("compatible_diffs", compatibleClusterStateDiffReceivedCount);
}
builder.endObject();
return builder;
| 593
| 91
| 684
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/coordination/PublishResponse.java
|
PublishResponse
|
toString
|
class PublishResponse implements Writeable {
private final long term;
private final long version;
public PublishResponse(long term, long version) {
assert term >= 0;
assert version >= 0;
this.term = term;
this.version = version;
}
public PublishResponse(StreamInput in) throws IOException {
this(in.readLong(), in.readLong());
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeLong(term);
out.writeLong(version);
}
public long getTerm() {
return term;
}
public long getVersion() {
return version;
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PublishResponse response = (PublishResponse) o;
if (term != response.term) return false;
return version == response.version;
}
@Override
public int hashCode() {
int result = (int) (term ^ (term >>> 32));
result = 31 * result + (int) (version ^ (version >>> 32));
return result;
}
}
|
return "PublishResponse{" +
"term=" + term +
", version=" + version +
'}';
| 358
| 35
| 393
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommand.java
|
RemoveCustomsCommand
|
processNodePaths
|
class RemoveCustomsCommand extends ElasticsearchNodeCommand {
static final String CUSTOMS_REMOVED_MSG = "Customs were successfully removed from the cluster state";
static final String CONFIRMATION_MSG =
DELIMITER +
"\n" +
"You should only run this tool if you have broken custom metadata in the\n" +
"cluster state that prevents the cluster state from being loaded.\n" +
"This tool can cause data loss and its use should be your last resort.\n" +
"\n" +
"Do you want to proceed?\n";
private final OptionSpec<String> arguments;
public RemoveCustomsCommand() {
super("Removes custom metadata from the cluster state");
arguments = parser.nonOptions("custom metadata names");
}
@Override
protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env)
throws IOException, UserException {<FILL_FUNCTION_BODY>}
}
|
final List<String> customsToRemove = arguments.values(options);
if (customsToRemove.isEmpty()) {
throw new UserException(ExitCodes.USAGE, "Must supply at least one custom metadata name to remove");
}
final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths);
terminal.println(Terminal.Verbosity.VERBOSE, "Loading cluster state");
final Tuple<Long, ClusterState> termAndClusterState = loadTermAndClusterState(persistedClusterStateService, env);
final ClusterState oldClusterState = termAndClusterState.v2();
terminal.println(Terminal.Verbosity.VERBOSE, "custom metadata names: " + oldClusterState.metadata().customs().keys());
final Metadata.Builder metaDataBuilder = Metadata.builder(oldClusterState.metadata());
for (String customToRemove : customsToRemove) {
boolean matched = false;
for (ObjectCursor<String> customKeyCur : oldClusterState.metadata().customs().keys()) {
final String customKey = customKeyCur.value;
if (Regex.simpleMatch(customToRemove, customKey)) {
metaDataBuilder.removeCustom(customKey);
if (matched == false) {
terminal.println("The following customs will be removed:");
}
matched = true;
terminal.println(customKey);
}
}
if (matched == false) {
throw new UserException(ExitCodes.USAGE,
"No custom metadata matching [" + customToRemove + "] were found on this node");
}
}
final ClusterState newClusterState = ClusterState.builder(oldClusterState).metadata(metaDataBuilder.build()).build();
terminal.println(Terminal.Verbosity.VERBOSE,
"[old cluster state = " + oldClusterState + ", new cluster state = " + newClusterState + "]");
confirm(terminal, CONFIRMATION_MSG);
try (PersistedClusterStateService.Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(termAndClusterState.v1(), newClusterState);
}
terminal.println(CUSTOMS_REMOVED_MSG);
| 251
| 565
| 816
|
<methods>public void <init>(java.lang.String) ,public static org.elasticsearch.cluster.ClusterState clusterState(org.elasticsearch.env.Environment, org.elasticsearch.gateway.PersistedClusterStateService.OnDiskState) ,public static org.elasticsearch.gateway.PersistedClusterStateService createPersistedClusterStateService(org.elasticsearch.common.settings.Settings, java.nio.file.Path[]) throws java.io.IOException,public final void execute(org.elasticsearch.cli.Terminal, OptionSet, org.elasticsearch.env.Environment) throws java.lang.Exception,public static Tuple<java.lang.Long,org.elasticsearch.cluster.ClusterState> loadTermAndClusterState(org.elasticsearch.gateway.PersistedClusterStateService, org.elasticsearch.env.Environment) throws java.io.IOException<variables>public static final java.lang.String ABORTED_BY_USER_MSG,public static final java.lang.String CS_MISSING_MSG,protected static final java.lang.String DELIMITER,public static final java.lang.String FAILED_TO_OBTAIN_NODE_LOCK_MSG,private static final Logger LOGGER,protected static final org.elasticsearch.common.xcontent.NamedXContentRegistry NAMED_X_CONTENT_REGISTRY,public static final java.lang.String NO_NODE_FOLDER_FOUND_MSG,public static final java.lang.String NO_NODE_METADATA_FOUND_MSG,public static final java.lang.String STOP_WARNING_MSG
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommand.java
|
RemoveSettingsCommand
|
processNodePaths
|
class RemoveSettingsCommand extends ElasticsearchNodeCommand {
static final String SETTINGS_REMOVED_MSG = "Settings were successfully removed from the cluster state";
static final String CONFIRMATION_MSG =
DELIMITER +
"\n" +
"You should only run this tool if you have incompatible settings in the\n" +
"cluster state that prevent the cluster from forming.\n" +
"This tool can cause data loss and its use should be your last resort.\n" +
"\n" +
"Do you want to proceed?\n";
private final OptionSpec<String> arguments;
public RemoveSettingsCommand() {
super("Removes persistent settings from the cluster state");
arguments = parser.nonOptions("setting names");
}
@Override
protected void processNodePaths(Terminal terminal, Path[] dataPaths, OptionSet options, Environment env)
throws IOException, UserException {<FILL_FUNCTION_BODY>}
}
|
final List<String> settingsToRemove = arguments.values(options);
if (settingsToRemove.isEmpty()) {
throw new UserException(ExitCodes.USAGE, "Must supply at least one setting to remove");
}
final PersistedClusterStateService persistedClusterStateService = createPersistedClusterStateService(env.settings(), dataPaths);
terminal.println(Terminal.Verbosity.VERBOSE, "Loading cluster state");
final Tuple<Long, ClusterState> termAndClusterState = loadTermAndClusterState(persistedClusterStateService, env);
final ClusterState oldClusterState = termAndClusterState.v2();
final Settings oldPersistentSettings = oldClusterState.metadata().persistentSettings();
terminal.println(Terminal.Verbosity.VERBOSE, "persistent settings: " + oldPersistentSettings);
final Settings.Builder newPersistentSettingsBuilder = Settings.builder().put(oldPersistentSettings);
for (String settingToRemove : settingsToRemove) {
boolean matched = false;
for (String settingKey : oldPersistentSettings.keySet()) {
if (Regex.simpleMatch(settingToRemove, settingKey)) {
newPersistentSettingsBuilder.remove(settingKey);
if (matched == false) {
terminal.println("The following settings will be removed:");
}
matched = true;
terminal.println(settingKey + ": " + oldPersistentSettings.get(settingKey));
}
}
if (matched == false) {
throw new UserException(ExitCodes.USAGE,
"No persistent cluster settings matching [" + settingToRemove + "] were found on this node");
}
}
final ClusterState newClusterState = ClusterState.builder(oldClusterState)
.metadata(Metadata.builder(oldClusterState.metadata()).persistentSettings(newPersistentSettingsBuilder.build()).build())
.build();
terminal.println(Terminal.Verbosity.VERBOSE,
"[old cluster state = " + oldClusterState + ", new cluster state = " + newClusterState + "]");
confirm(terminal, CONFIRMATION_MSG);
try (PersistedClusterStateService.Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(termAndClusterState.v1(), newClusterState);
}
terminal.println(SETTINGS_REMOVED_MSG);
| 244
| 594
| 838
|
<methods>public void <init>(java.lang.String) ,public static org.elasticsearch.cluster.ClusterState clusterState(org.elasticsearch.env.Environment, org.elasticsearch.gateway.PersistedClusterStateService.OnDiskState) ,public static org.elasticsearch.gateway.PersistedClusterStateService createPersistedClusterStateService(org.elasticsearch.common.settings.Settings, java.nio.file.Path[]) throws java.io.IOException,public final void execute(org.elasticsearch.cli.Terminal, OptionSet, org.elasticsearch.env.Environment) throws java.lang.Exception,public static Tuple<java.lang.Long,org.elasticsearch.cluster.ClusterState> loadTermAndClusterState(org.elasticsearch.gateway.PersistedClusterStateService, org.elasticsearch.env.Environment) throws java.io.IOException<variables>public static final java.lang.String ABORTED_BY_USER_MSG,public static final java.lang.String CS_MISSING_MSG,protected static final java.lang.String DELIMITER,public static final java.lang.String FAILED_TO_OBTAIN_NODE_LOCK_MSG,private static final Logger LOGGER,protected static final org.elasticsearch.common.xcontent.NamedXContentRegistry NAMED_X_CONTENT_REGISTRY,public static final java.lang.String NO_NODE_FOLDER_FOUND_MSG,public static final java.lang.String NO_NODE_METADATA_FOUND_MSG,public static final java.lang.String STOP_WARNING_MSG
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/coordination/StartJoinRequest.java
|
StartJoinRequest
|
hashCode
|
class StartJoinRequest extends TransportRequest {
private final DiscoveryNode sourceNode;
private final long term;
public StartJoinRequest(DiscoveryNode sourceNode, long term) {
this.sourceNode = sourceNode;
this.term = term;
}
public StartJoinRequest(StreamInput input) throws IOException {
super(input);
this.sourceNode = new DiscoveryNode(input);
this.term = input.readLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
sourceNode.writeTo(out);
out.writeLong(term);
}
public DiscoveryNode getSourceNode() {
return sourceNode;
}
public long getTerm() {
return term;
}
@Override
public String toString() {
return "StartJoinRequest{" +
"term=" + term +
",node=" + sourceNode + "}";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof StartJoinRequest)) return false;
StartJoinRequest that = (StartJoinRequest) o;
if (term != that.term) return false;
return sourceNode.equals(that.sourceNode);
}
@Override
public int hashCode() {<FILL_FUNCTION_BODY>}
}
|
int result = sourceNode.hashCode();
result = 31 * result + (int) (term ^ (term >>> 32));
return result;
| 364
| 42
| 406
|
<methods>public void <init>() ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public org.elasticsearch.tasks.TaskId getParentTask() ,public void setParentTask(org.elasticsearch.tasks.TaskId) ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>private org.elasticsearch.tasks.TaskId parentTaskId
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeRole.java
|
UnknownRole
|
roleSetting
|
class UnknownRole extends DiscoveryNodeRole {
/**
* Construct an unknown role with the specified role name and role name abbreviation.
*
* @param roleName the role name
* @param roleNameAbbreviation the role name abbreviation
*/
UnknownRole(final String roleName, final String roleNameAbbreviation) {
super(roleName, roleNameAbbreviation);
}
@Override
protected Setting<Boolean> roleSetting() {<FILL_FUNCTION_BODY>}
}
|
// since this setting is not registered, it will always return false when testing if the local node has the role
assert false;
return Setting.boolSetting("node. " + roleName(), false, Setting.Property.NodeScope);
| 131
| 56
| 187
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java
|
DelayedRerouteTask
|
onFailure
|
class DelayedRerouteTask extends ClusterStateUpdateTask {
final TimeValue nextDelay; // delay until submitting the reroute command
final long baseTimestampNanos; // timestamp (in nanos) upon which delay was calculated
volatile Scheduler.Cancellable cancellable;
final AtomicBoolean cancelScheduling = new AtomicBoolean();
DelayedRerouteTask(TimeValue nextDelay, long baseTimestampNanos) {
this.nextDelay = nextDelay;
this.baseTimestampNanos = baseTimestampNanos;
}
public long scheduledTimeToRunInNanos() {
return baseTimestampNanos + nextDelay.nanos();
}
public void cancelScheduling() {
cancelScheduling.set(true);
if (cancellable != null) {
cancellable.cancel();
}
removeIfSameTask(this);
}
public void schedule() {
cancellable = threadPool.schedule(new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
if (cancelScheduling.get()) {
return;
}
clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE, DelayedRerouteTask.this);
}
@Override
public void onFailure(Exception e) {
LOGGER.warn("failed to submit schedule/execute reroute post unassigned shard", e);
removeIfSameTask(DelayedRerouteTask.this);
}
}, nextDelay, ThreadPool.Names.SAME);
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
removeIfSameTask(this);
return allocationService.reroute(currentState, "assign delayed unassigned shards");
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
if (oldState == newState) {
// no state changed, check when we should remove the delay flag from the shards the next time.
// if cluster state changed, we can leave the scheduling of the next delay up to the clusterChangedEvent
// this should not be needed, but we want to be extra safe here
scheduleIfNeeded(currentNanoTime(), newState);
}
}
@Override
public void onFailure(String source, Exception e) {<FILL_FUNCTION_BODY>}
}
|
removeIfSameTask(this);
LOGGER.warn("failed to schedule/execute reroute post unassigned shard", e);
| 605
| 35
| 640
|
<methods>public void addLifecycleListener(org.elasticsearch.common.component.LifecycleListener) ,public void close() ,public org.elasticsearch.common.component.Lifecycle.State lifecycleState() ,public void removeLifecycleListener(org.elasticsearch.common.component.LifecycleListener) ,public void start() ,public void stop() <variables>protected final org.elasticsearch.common.component.Lifecycle lifecycle,private final List<org.elasticsearch.common.component.LifecycleListener> listeners
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/routing/GroupShardsIterator.java
|
GroupShardsIterator
|
totalSize
|
class GroupShardsIterator<ShardIt extends ShardIterator> implements Iterable<ShardIt> {
private final List<ShardIt> iterators;
/**
* Constructs a new sorted GroupShardsIterator from the given list. Items are sorted based on their natural ordering.
* @see PlainShardIterator#compareTo(ShardIterator)
*/
public static <ShardIt extends ShardIterator> GroupShardsIterator<ShardIt> sortAndCreate(List<ShardIt> iterators) {
CollectionUtil.timSort(iterators);
return new GroupShardsIterator<>(iterators);
}
/**
* Constructs a new GroupShardsIterator from the given list.
*/
private GroupShardsIterator(List<ShardIt> iterators) {
this.iterators = iterators;
}
/**
* Returns the total number of shards within all groups
* @return total number of shards
*/
public int totalSize() {<FILL_FUNCTION_BODY>}
/**
* Returns the total number of shards plus the number of empty groups
* @return number of shards and empty groups
*/
public int totalSizeWith1ForEmpty() {
int size = 0;
for (ShardIt shard : iterators) {
size += Math.max(1, shard.size());
}
return size;
}
/**
* Return the number of groups
* @return number of groups
*/
public int size() {
return iterators.size();
}
@Override
public Iterator<ShardIt> iterator() {
return iterators.iterator();
}
public ShardIt get(int index) {
return iterators.get(index);
}
}
|
int size = 0;
for (ShardIterator shard : iterators) {
size += shard.size();
}
return size;
| 450
| 41
| 491
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AbstractAllocationDecision.java
|
AbstractAllocationDecision
|
writeTo
|
class AbstractAllocationDecision implements ToXContentFragment, Writeable {
@Nullable
protected final DiscoveryNode targetNode;
@Nullable
protected final List<NodeAllocationResult> nodeDecisions;
protected AbstractAllocationDecision(@Nullable DiscoveryNode targetNode, @Nullable List<NodeAllocationResult> nodeDecisions) {
this.targetNode = targetNode;
this.nodeDecisions = nodeDecisions != null ? sortNodeDecisions(nodeDecisions) : null;
}
protected AbstractAllocationDecision(StreamInput in) throws IOException {
targetNode = in.readOptionalWriteable(DiscoveryNode::new);
nodeDecisions = in.readBoolean() ? Collections.unmodifiableList(in.readList(NodeAllocationResult::new)) : null;
}
/**
* Returns {@code true} if a decision was taken by the allocator, {@code false} otherwise.
* If no decision was taken, then the rest of the fields in this object cannot be accessed and will
* throw an {@code IllegalStateException}.
*/
public abstract boolean isDecisionTaken();
/**
* Get the node that the allocator will assign the shard to, returning {@code null} if there is no node to
* which the shard will be assigned or moved. If {@link #isDecisionTaken()} returns {@code false}, then
* invoking this method will throw an {@code IllegalStateException}.
*/
@Nullable
public DiscoveryNode getTargetNode() {
checkDecisionState();
return targetNode;
}
/**
* Gets the sorted list of individual node-level decisions that went into making the ultimate decision whether
* to allocate or move the shard. If {@link #isDecisionTaken()} returns {@code false}, then
* invoking this method will throw an {@code IllegalStateException}.
*/
@Nullable
public List<NodeAllocationResult> getNodeDecisions() {
checkDecisionState();
return nodeDecisions;
}
/**
* Gets the explanation for the decision. If {@link #isDecisionTaken()} returns {@code false}, then invoking
* this method will throw an {@code IllegalStateException}.
*/
public abstract String getExplanation();
@Override
public void writeTo(StreamOutput out) throws IOException {<FILL_FUNCTION_BODY>}
protected void checkDecisionState() {
if (isDecisionTaken() == false) {
throw new IllegalStateException("decision was not taken, individual object fields cannot be accessed");
}
}
/**
* Generates X-Content for a {@link DiscoveryNode} that leaves off some of the non-critical fields.
*/
public static XContentBuilder discoveryNodeToXContent(DiscoveryNode node, boolean outerObjectWritten, XContentBuilder builder)
throws IOException {
builder.field(outerObjectWritten ? "id" : "node_id", node.getId());
builder.field(outerObjectWritten ? "name" : "node_name", node.getName());
builder.field("transport_address", node.getAddress().toString());
if (node.getAttributes().isEmpty() == false) {
builder.startObject(outerObjectWritten ? "attributes" : "node_attributes");
for (Map.Entry<String, String> entry : node.getAttributes().entrySet()) {
builder.field(entry.getKey(), entry.getValue());
}
builder.endObject();
}
return builder;
}
/**
* Sorts a list of node level decisions by the decision type, then by weight ranking, and finally by node id.
*/
public List<NodeAllocationResult> sortNodeDecisions(List<NodeAllocationResult> nodeDecisions) {
return Collections.unmodifiableList(nodeDecisions.stream().sorted().collect(Collectors.toList()));
}
/**
* Generates X-Content for the node-level decisions, creating the outer "node_decisions" object
* in which they are serialized.
*/
public XContentBuilder nodeDecisionsToXContent(List<NodeAllocationResult> nodeDecisions, XContentBuilder builder, Params params)
throws IOException {
if (nodeDecisions != null && nodeDecisions.isEmpty() == false) {
builder.startArray("node_allocation_decisions");
{
for (NodeAllocationResult explanation : nodeDecisions) {
explanation.toXContent(builder, params);
}
}
builder.endArray();
}
return builder;
}
/**
* Returns {@code true} if there is at least one node that returned a {@link Type#YES} decision for allocating this shard.
*/
protected boolean atLeastOneNodeWithYesDecision() {
if (nodeDecisions == null) {
return false;
}
for (NodeAllocationResult result : nodeDecisions) {
if (result.getNodeDecision() == AllocationDecision.YES) {
return true;
}
}
return false;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || other instanceof AbstractAllocationDecision == false) {
return false;
}
@SuppressWarnings("unchecked") AbstractAllocationDecision that = (AbstractAllocationDecision) other;
return Objects.equals(targetNode, that.targetNode) && Objects.equals(nodeDecisions, that.nodeDecisions);
}
@Override
public int hashCode() {
return Objects.hash(targetNode, nodeDecisions);
}
}
|
out.writeOptionalWriteable(targetNode);
if (nodeDecisions != null) {
out.writeBoolean(true);
out.writeList(nodeDecisions);
} else {
out.writeBoolean(false);
}
| 1,416
| 64
| 1,480
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java
|
FloodStageValidator
|
setFloodStageRaw
|
class FloodStageValidator implements Setting.Validator<String> {
@Override
public void validate(final String value) {
}
@Override
public void validate(final String value, final Map<Setting<?>, Object> settings) {
final String lowWatermarkRaw = (String) settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING);
final String highWatermarkRaw = (String) settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING);
doValidate(lowWatermarkRaw, highWatermarkRaw, value);
}
@Override
public Iterator<Setting<?>> settings() {
final List<Setting<?>> settings = Arrays.asList(
CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING);
return settings.iterator();
}
}
private static void doValidate(String low, String high, String flood) {
try {
doValidateAsPercentage(low, high, flood);
return; // early return so that we do not try to parse as bytes
} catch (final ElasticsearchParseException e) {
// swallow as we are now going to try to parse as bytes
}
try {
doValidateAsBytes(low, high, flood);
} catch (final ElasticsearchParseException e) {
final String message = String.format(
Locale.ROOT,
"unable to consistently parse [%s=%s], [%s=%s], and [%s=%s] as percentage or bytes",
CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(),
low,
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(),
high,
CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(),
flood);
throw new IllegalArgumentException(message, e);
}
}
private static void doValidateAsPercentage(final String low, final String high, final String flood) {
final double lowWatermarkThreshold = thresholdPercentageFromWatermark(low, false);
final double highWatermarkThreshold = thresholdPercentageFromWatermark(high, false);
final double floodThreshold = thresholdPercentageFromWatermark(flood, false);
if (lowWatermarkThreshold > highWatermarkThreshold) {
throw new IllegalArgumentException(
"low disk watermark [" + low + "] more than high disk watermark [" + high + "]");
}
if (highWatermarkThreshold > floodThreshold) {
throw new IllegalArgumentException(
"high disk watermark [" + high + "] more than flood stage disk watermark [" + flood + "]");
}
}
private static void doValidateAsBytes(final String low, final String high, final String flood) {
final ByteSizeValue lowWatermarkBytes =
thresholdBytesFromWatermark(low, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), false);
final ByteSizeValue highWatermarkBytes =
thresholdBytesFromWatermark(high, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), false);
final ByteSizeValue floodStageBytes =
thresholdBytesFromWatermark(flood, CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), false);
if (lowWatermarkBytes.getBytes() < highWatermarkBytes.getBytes()) {
throw new IllegalArgumentException(
"low disk watermark [" + low + "] less than high disk watermark [" + high + "]");
}
if (highWatermarkBytes.getBytes() < floodStageBytes.getBytes()) {
throw new IllegalArgumentException(
"high disk watermark [" + high + "] less than flood stage disk watermark [" + flood + "]");
}
}
private void setIncludeRelocations(boolean includeRelocations) {
this.includeRelocations = includeRelocations;
}
private void setRerouteInterval(TimeValue rerouteInterval) {
this.rerouteInterval = rerouteInterval;
}
private void setEnabled(boolean enabled) {
this.enabled = enabled;
}
private void setLowWatermark(String lowWatermark) {
// Watermark is expressed in terms of used data, but we need "free" data watermark
this.lowWatermarkRaw = lowWatermark;
this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark);
this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark,
CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey());
}
private void setHighWatermark(String highWatermark) {
// Watermark is expressed in terms of used data, but we need "free" data watermark
this.highWatermarkRaw = highWatermark;
this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark);
this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark,
CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey());
}
private void setFloodStageRaw(String floodStageRaw) {<FILL_FUNCTION_BODY>
|
// Watermark is expressed in terms of used data, but we need "free" data watermark
this.floodStageRaw = floodStageRaw;
this.freeDiskThresholdFloodStage = 100.0 - thresholdPercentageFromWatermark(floodStageRaw);
this.freeBytesThresholdFloodStage = thresholdBytesFromWatermark(floodStageRaw,
CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey());
| 1,523
| 137
| 1,660
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingExplanations.java
|
RoutingExplanations
|
toXContent
|
class RoutingExplanations implements ToXContentFragment {
private final List<RerouteExplanation> explanations;
public RoutingExplanations() {
this.explanations = new ArrayList<>();
}
public RoutingExplanations add(RerouteExplanation explanation) {
this.explanations.add(explanation);
return this;
}
public List<RerouteExplanation> explanations() {
return this.explanations;
}
/**
* Provides feedback from commands with a YES decision that should be displayed to the user after the command has been applied
*/
public List<String> getYesDecisionMessages() {
return explanations().stream()
.filter(explanation -> explanation.decisions().type().equals(Decision.Type.YES))
.map(explanation -> explanation.command().getMessage())
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.toList());
}
/**
* Read in a RoutingExplanations object
*/
public static RoutingExplanations readFrom(StreamInput in) throws IOException {
int exCount = in.readVInt();
RoutingExplanations exp = new RoutingExplanations();
for (int i = 0; i < exCount; i++) {
RerouteExplanation explanation = RerouteExplanation.readFrom(in);
exp.add(explanation);
}
return exp;
}
/**
* Write the RoutingExplanations object
*/
public static void writeTo(RoutingExplanations explanations, StreamOutput out) throws IOException {
out.writeVInt(explanations.explanations.size());
for (RerouteExplanation explanation : explanations.explanations) {
RerouteExplanation.writeTo(explanation, out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {<FILL_FUNCTION_BODY>}
}
|
builder.startArray("explanations");
for (RerouteExplanation explanation : explanations) {
explanation.toXContent(builder, params);
}
builder.endArray();
return builder;
| 523
| 56
| 579
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java
|
ShardAllocationDecision
|
toXContent
|
class ShardAllocationDecision implements ToXContentFragment, Writeable {
public static final ShardAllocationDecision NOT_TAKEN =
new ShardAllocationDecision(AllocateUnassignedDecision.NOT_TAKEN, MoveDecision.NOT_TAKEN);
private final AllocateUnassignedDecision allocateDecision;
private final MoveDecision moveDecision;
public ShardAllocationDecision(AllocateUnassignedDecision allocateDecision,
MoveDecision moveDecision) {
this.allocateDecision = allocateDecision;
this.moveDecision = moveDecision;
}
public ShardAllocationDecision(StreamInput in) throws IOException {
allocateDecision = new AllocateUnassignedDecision(in);
moveDecision = new MoveDecision(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
allocateDecision.writeTo(out);
moveDecision.writeTo(out);
}
/**
* Returns {@code true} if either an allocation decision or a move decision was taken
* for the shard. If no decision was taken, as in the case of initializing or relocating
* shards, then this method returns {@code false}.
*/
public boolean isDecisionTaken() {
return allocateDecision.isDecisionTaken() || moveDecision.isDecisionTaken();
}
/**
* Gets the unassigned allocation decision for the shard. If the shard was not in the unassigned state,
* the instance of {@link AllocateUnassignedDecision} that is returned will have {@link AllocateUnassignedDecision#isDecisionTaken()}
* return {@code false}.
*/
public AllocateUnassignedDecision getAllocateDecision() {
return allocateDecision;
}
/**
* Gets the move decision for the shard. If the shard was not in the started state,
* the instance of {@link MoveDecision} that is returned will have {@link MoveDecision#isDecisionTaken()}
* return {@code false}.
*/
public MoveDecision getMoveDecision() {
return moveDecision;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (allocateDecision.isDecisionTaken()) {
allocateDecision.toXContent(builder, params);
}
if (moveDecision.isDecisionTaken()) {
moveDecision.toXContent(builder, params);
}
return builder;
| 608
| 74
| 682
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
|
ClusterRebalanceAllocationDecider
|
canRebalance
|
class ClusterRebalanceAllocationDecider extends AllocationDecider {
private static final Logger LOGGER = LogManager.getLogger(ClusterRebalanceAllocationDecider.class);
public static final String NAME = "cluster_rebalance";
private static final String CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE = "cluster.routing.allocation.allow_rebalance";
public static final Setting<ClusterRebalanceType> CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING =
new Setting<>(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceType.INDICES_ALL_ACTIVE.toString(),
ClusterRebalanceType::parseString, DataTypes.STRING, Property.Dynamic, Property.NodeScope, Property.Exposed);
/**
* An enum representation for the configured re-balance type.
*/
public enum ClusterRebalanceType {
/**
* Re-balancing is allowed once a shard replication group is active
*/
ALWAYS,
/**
* Re-balancing is allowed only once all primary shards on all indices are active.
*/
INDICES_PRIMARIES_ACTIVE,
/**
* Re-balancing is allowed only once all shards on all indices are active.
*/
INDICES_ALL_ACTIVE;
public static ClusterRebalanceType parseString(String typeString) {
if ("always".equalsIgnoreCase(typeString)) {
return ClusterRebalanceType.ALWAYS;
} else if ("indices_primaries_active".equalsIgnoreCase(typeString) || "indicesPrimariesActive".equalsIgnoreCase(typeString)) {
return ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE;
} else if ("indices_all_active".equalsIgnoreCase(typeString) || "indicesAllActive".equalsIgnoreCase(typeString)) {
return ClusterRebalanceType.INDICES_ALL_ACTIVE;
}
throw new IllegalArgumentException("Illegal value for " +
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING + ": " + typeString);
}
@Override
public String toString() {
return name().toLowerCase(Locale.ROOT);
}
}
private volatile ClusterRebalanceType type;
public ClusterRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
try {
type = CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.get(settings);
} catch (IllegalStateException e) {
LOGGER.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'",
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING,
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getRaw(settings));
type = ClusterRebalanceType.INDICES_ALL_ACTIVE;
}
LOGGER.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type);
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType);
}
private void setType(ClusterRebalanceType type) {
this.type = type;
}
@Override
public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
return canRebalance(allocation);
}
@Override
public Decision canRebalance(RoutingAllocation allocation) {<FILL_FUNCTION_BODY>}
}
|
if (type == ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE) {
// check if there are unassigned primaries.
if (allocation.routingNodes().hasUnassignedPrimaries()) {
return allocation.decision(
Decision.NO,
NAME,
"the cluster has unassigned primary shards and cluster setting [%s] is set to [%s]",
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
type
);
}
// check if there are initializing primaries that don't have a relocatingNodeId entry.
if (allocation.routingNodes().hasInactivePrimaries()) {
return allocation.decision(
Decision.NO,
NAME,
"the cluster has inactive primary shards and cluster setting [%s] is set to [%s]",
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
type
);
}
return allocation.decision(Decision.YES, NAME, "all primary shards are active");
}
if (type == ClusterRebalanceType.INDICES_ALL_ACTIVE) {
// check if there are unassigned shards.
if (allocation.routingNodes().hasUnassignedShards()) {
return allocation.decision(
Decision.NO,
NAME,
"the cluster has unassigned shards and cluster setting [%s] is set to [%s]",
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
type
);
}
// in case all indices are assigned, are there initializing shards which
// are not relocating?
if (allocation.routingNodes().hasInactiveShards()) {
return allocation.decision(
Decision.NO,
NAME,
"the cluster has inactive shards and cluster setting [%s] is set to [%s]",
CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
type
);
}
}
// type == Type.ALWAYS
return allocation.decision(Decision.YES, NAME, "all shards are active");
| 965
| 569
| 1,534
|
<methods>public org.elasticsearch.cluster.routing.allocation.decider.Decision canAllocate(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canAllocate(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canAllocate(org.elasticsearch.cluster.metadata.IndexMetadata, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canForceAllocatePrimary(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canRebalance(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canRebalance(org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canRemain(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision shouldAutoExpandToNode(org.elasticsearch.cluster.metadata.IndexMetadata, org.elasticsearch.cluster.node.DiscoveryNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) <variables>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java
|
NodeVersionAllocationDecider
|
canAllocate
|
class NodeVersionAllocationDecider extends AllocationDecider {
public static final String NAME = "node_version";
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {<FILL_FUNCTION_BODY>}
private Decision isVersionCompatibleRelocatePrimary(final RoutingNodes routingNodes, final String sourceNodeId,
final RoutingNode target, final RoutingAllocation allocation) {
final RoutingNode source = routingNodes.node(sourceNodeId);
if (target.node().getVersion().onOrAfterMajorMinor(source.node().getVersion())) {
return allocation.decision(Decision.YES, NAME,
"can relocate primary shard from a node with version [%s] to a node with equal-or-newer version [%s]",
source.node().getVersion(), target.node().getVersion());
} else {
return allocation.decision(Decision.NO, NAME,
"cannot relocate primary shard from a node with version [%s] to a node with older version [%s]",
source.node().getVersion(), target.node().getVersion());
}
}
private Decision isVersionCompatibleAllocatingReplica(final RoutingNodes routingNodes, final String sourceNodeId,
final RoutingNode target, final RoutingAllocation allocation) {
final RoutingNode source = routingNodes.node(sourceNodeId);
if (target.node().getVersion().onOrAfterMajorMinor(source.node().getVersion())) {
/* we can allocate if we can recover from a node that is younger or on the same version
* if the primary is already running on a newer version that won't work due to possible
* differences in the lucene index format etc.*/
return allocation.decision(Decision.YES, NAME,
"can allocate replica shard to a node with version [%s] since this is equal-or-newer than the primary version [%s]",
target.node().getVersion(), source.node().getVersion());
} else {
return allocation.decision(Decision.NO, NAME,
"cannot allocate replica shard to a node with version [%s] since this is older than the primary version [%s]",
target.node().getVersion(), source.node().getVersion());
}
}
private Decision isVersionCompatible(SnapshotRecoverySource recoverySource, final RoutingNode target,
final RoutingAllocation allocation) {
if (target.node().getVersion().onOrAfter(recoverySource.version())) {
/* we can allocate if we can restore from a snapshot that is older or on the same version */
return allocation.decision(Decision.YES, NAME, "node version [%s] is the same or newer than snapshot version [%s]",
target.node().getVersion(), recoverySource.version());
} else {
return allocation.decision(Decision.NO, NAME, "node version [%s] is older than the snapshot version [%s]",
target.node().getVersion(), recoverySource.version());
}
}
}
|
if (shardRouting.primary()) {
if (shardRouting.currentNodeId() == null) {
if (shardRouting.recoverySource() != null && shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) {
// restoring from a snapshot - check that the node can handle the version
return isVersionCompatible((SnapshotRecoverySource)shardRouting.recoverySource(), node, allocation);
} else {
// existing or fresh primary on the node
return allocation.decision(Decision.YES, NAME, "the primary shard is new or already existed on the node");
}
} else {
// relocating primary, only migrate to newer host
return isVersionCompatibleRelocatePrimary(allocation.routingNodes(), shardRouting.currentNodeId(), node, allocation);
}
} else {
final ShardRouting primary = allocation.routingNodes().activePrimary(shardRouting.shardId());
// check that active primary has a newer version so that peer recovery works
if (primary != null) {
return isVersionCompatibleAllocatingReplica(allocation.routingNodes(), primary.currentNodeId(), node, allocation);
} else {
// ReplicaAfterPrimaryActiveAllocationDecider should prevent this case from occurring
return allocation.decision(Decision.YES, NAME, "no active primary shard yet");
}
}
| 778
| 358
| 1,136
|
<methods>public org.elasticsearch.cluster.routing.allocation.decider.Decision canAllocate(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canAllocate(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canAllocate(org.elasticsearch.cluster.metadata.IndexMetadata, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canForceAllocatePrimary(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canRebalance(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canRebalance(org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canRemain(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision shouldAutoExpandToNode(org.elasticsearch.cluster.metadata.IndexMetadata, org.elasticsearch.cluster.node.DiscoveryNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) <variables>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java
|
ReplicaAfterPrimaryActiveAllocationDecider
|
canAllocate
|
class ReplicaAfterPrimaryActiveAllocationDecider extends AllocationDecider {
private static final String NAME = "replica_after_primary_active";
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
return canAllocate(shardRouting, allocation);
}
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {<FILL_FUNCTION_BODY>}
}
|
if (shardRouting.primary()) {
return allocation.decision(Decision.YES, NAME, "shard is primary and can be allocated");
}
ShardRouting primary = allocation.routingNodes().activePrimary(shardRouting.shardId());
if (primary == null) {
return allocation.decision(Decision.NO, NAME, "primary shard for this replica is not yet active");
}
return allocation.decision(Decision.YES, NAME, "primary shard for this replica is already active");
| 132
| 136
| 268
|
<methods>public org.elasticsearch.cluster.routing.allocation.decider.Decision canAllocate(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canAllocate(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canAllocate(org.elasticsearch.cluster.metadata.IndexMetadata, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canForceAllocatePrimary(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canRebalance(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canRebalance(org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canRemain(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision shouldAutoExpandToNode(org.elasticsearch.cluster.metadata.IndexMetadata, org.elasticsearch.cluster.node.DiscoveryNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) <variables>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java
|
UpdateTask
|
state
|
class UpdateTask extends SourcePrioritizedRunnable implements UnaryOperator<ClusterState> {
final ClusterApplyListener listener;
final UnaryOperator<ClusterState> updateFunction;
UpdateTask(Priority priority, String source, ClusterApplyListener listener,
UnaryOperator<ClusterState> updateFunction) {
super(priority, source);
this.listener = listener;
this.updateFunction = updateFunction;
}
@Override
public ClusterState apply(ClusterState clusterState) {
return updateFunction.apply(clusterState);
}
@Override
public void run() {
runTask(this);
}
}
@Override
protected synchronized void doStop() {
for (Map.Entry<TimeoutClusterStateListener, NotifyTimeout> onGoingTimeout : timeoutClusterStateListeners.entrySet()) {
try {
onGoingTimeout.getValue().cancel();
onGoingTimeout.getKey().onClose();
} catch (Exception ex) {
LOGGER.debug("failed to notify listeners on shutdown", ex);
}
}
ThreadPool.terminate(threadPoolExecutor, 10, TimeUnit.SECONDS);
}
@Override
protected synchronized void doClose() {
}
public ThreadPool threadPool() {
return threadPool;
}
/**
* The current cluster state.
* Should be renamed to appliedClusterState
*/
public ClusterState state() {<FILL_FUNCTION_BODY>
|
assert assertNotCalledFromClusterStateApplier("the applied cluster state is not yet available");
ClusterState clusterState = this.state.get();
assert clusterState != null : "initial cluster state not set yet";
return clusterState;
| 379
| 62
| 441
|
<methods>public void addLifecycleListener(org.elasticsearch.common.component.LifecycleListener) ,public void close() ,public org.elasticsearch.common.component.Lifecycle.State lifecycleState() ,public void removeLifecycleListener(org.elasticsearch.common.component.LifecycleListener) ,public void start() ,public void stop() <variables>protected final org.elasticsearch.common.component.Lifecycle lifecycle,private final List<org.elasticsearch.common.component.LifecycleListener> listeners
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java
|
ClusterService
|
assertClusterOrMasterStateThread
|
class ClusterService extends AbstractLifecycleComponent {
private final MasterService masterService;
private final ClusterApplierService clusterApplierService;
public static final org.elasticsearch.common.settings.Setting.AffixSetting<String> USER_DEFINED_METADATA =
Setting.prefixKeySetting("cluster.metadata.", (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope));
/**
* The node's settings.
*/
private final Settings settings;
private final ClusterName clusterName;
private final OperationRouting operationRouting;
private final ClusterSettings clusterSettings;
public ClusterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) {
this(settings, clusterSettings, new MasterService(settings, clusterSettings, threadPool),
new ClusterApplierService(Node.NODE_NAME_SETTING.get(settings), settings, clusterSettings, threadPool));
}
public ClusterService(Settings settings,
ClusterSettings clusterSettings,
MasterService masterService,
ClusterApplierService clusterApplierService) {
this.settings = settings;
this.masterService = masterService;
this.operationRouting = new OperationRouting(settings, clusterSettings);
this.clusterSettings = clusterSettings;
this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
// Add a no-op update consumer so changes are logged
this.clusterSettings.addAffixUpdateConsumer(USER_DEFINED_METADATA, (first, second) -> {}, (first, second) -> {});
this.clusterApplierService = clusterApplierService;
}
public synchronized void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) {
clusterApplierService.setNodeConnectionsService(nodeConnectionsService);
}
@Override
protected synchronized void doStart() {
clusterApplierService.start();
masterService.start();
}
@Override
protected synchronized void doStop() {
masterService.stop();
clusterApplierService.stop();
}
@Override
protected synchronized void doClose() {
masterService.close();
clusterApplierService.close();
}
/**
* The local node.
*/
public DiscoveryNode localNode() {
DiscoveryNode localNode = state().nodes().getLocalNode();
if (localNode == null) {
throw new IllegalStateException("No local node found. Is the node started?");
}
return localNode;
}
public OperationRouting operationRouting() {
return operationRouting;
}
/**
* The currently applied cluster state.
* TODO: Should be renamed to appliedState / appliedClusterState
*/
public ClusterState state() {
return clusterApplierService.state();
}
/**
* Adds a high priority applier of updated cluster states.
*/
public void addHighPriorityApplier(ClusterStateApplier applier) {
clusterApplierService.addHighPriorityApplier(applier);
}
/**
* Adds an applier which will be called after all high priority and normal appliers have been called.
*/
public void addLowPriorityApplier(ClusterStateApplier applier) {
clusterApplierService.addLowPriorityApplier(applier);
}
/**
* Adds a applier of updated cluster states.
*/
public void addStateApplier(ClusterStateApplier applier) {
clusterApplierService.addStateApplier(applier);
}
/**
* Removes an applier of updated cluster states.
*/
public void removeApplier(ClusterStateApplier applier) {
clusterApplierService.removeApplier(applier);
}
/**
* Add a listener for updated cluster states
*/
public void addListener(ClusterStateListener listener) {
clusterApplierService.addListener(listener);
}
/**
* Removes a listener for updated cluster states.
*/
public void removeListener(ClusterStateListener listener) {
clusterApplierService.removeListener(listener);
}
/**
* Add a listener for on/off local node master events
*/
public void addLocalNodeMasterListener(LocalNodeMasterListener listener) {
clusterApplierService.addLocalNodeMasterListener(listener);
}
public MasterService getMasterService() {
return masterService;
}
public ClusterApplierService getClusterApplierService() {
return clusterApplierService;
}
public static boolean assertClusterOrMasterStateThread() {<FILL_FUNCTION_BODY>}
public ClusterName getClusterName() {
return clusterName;
}
public ClusterSettings getClusterSettings() {
return clusterSettings;
}
/**
* The node's settings.
*/
public Settings getSettings() {
return settings;
}
/**
* Submits a cluster state update task; unlike {@link #submitStateUpdateTask(String, Object, ClusterStateTaskConfig,
* ClusterStateTaskExecutor, ClusterStateTaskListener)}, submitted updates will not be batched.
*
* @param source the source of the cluster state update task
* @param updateTask the full context for the cluster state update
* task
*
*/
public <T extends ClusterStateTaskConfig & ClusterStateTaskExecutor<T> & ClusterStateTaskListener>
void submitStateUpdateTask(String source, T updateTask) {
submitStateUpdateTask(source, updateTask, updateTask, updateTask, updateTask);
}
/**
* Submits a cluster state update task; submitted updates will be
* batched across the same instance of executor. The exact batching
* semantics depend on the underlying implementation but a rough
* guideline is that if the update task is submitted while there
* are pending update tasks for the same executor, these update
* tasks will all be executed on the executor in a single batch
*
* @param source the source of the cluster state update task
* @param task the state needed for the cluster state update task
* @param config the cluster state update task configuration
* @param executor the cluster state update task executor; tasks
* that share the same executor will be executed
* batches on this executor
* @param listener callback after the cluster state update task
* completes
* @param <T> the type of the cluster state update task state
*
*/
public <T> void submitStateUpdateTask(String source,
T task,
ClusterStateTaskConfig config,
ClusterStateTaskExecutor<T> executor,
ClusterStateTaskListener listener) {
submitStateUpdateTasks(source, Collections.singletonMap(task, listener), config, executor);
}
/**
* Submits a batch of cluster state update tasks; submitted updates are guaranteed to be processed together,
* potentially with more tasks of the same executor.
*
* @param source the source of the cluster state update task
* @param tasks a map of update tasks and their corresponding listeners
* @param config the cluster state update task configuration
* @param executor the cluster state update task executor; tasks
* that share the same executor will be executed
* batches on this executor
* @param <T> the type of the cluster state update task state
*
*/
public <T> void submitStateUpdateTasks(final String source,
final Map<T, ClusterStateTaskListener> tasks,
final ClusterStateTaskConfig config,
final ClusterStateTaskExecutor<T> executor) {
masterService.submitStateUpdateTasks(source, tasks, config, executor);
}
}
|
assert Thread.currentThread().getName().contains(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME) ||
Thread.currentThread().getName().contains(MasterService.MASTER_UPDATE_THREAD_NAME) :
"not called from the master/cluster state update thread";
return true;
| 1,963
| 76
| 2,039
|
<methods>public void addLifecycleListener(org.elasticsearch.common.component.LifecycleListener) ,public void close() ,public org.elasticsearch.common.component.Lifecycle.State lifecycleState() ,public void removeLifecycleListener(org.elasticsearch.common.component.LifecycleListener) ,public void start() ,public void stop() <variables>protected final org.elasticsearch.common.component.Lifecycle lifecycle,private final List<org.elasticsearch.common.component.LifecycleListener> listeners
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/Numbers.java
|
Numbers
|
toLongExact
|
class Numbers {
private static final BigInteger MAX_LONG_VALUE = BigInteger.valueOf(Long.MAX_VALUE);
private static final BigInteger MIN_LONG_VALUE = BigInteger.valueOf(Long.MIN_VALUE);
private Numbers() {
}
public static long bytesToLong(BytesRef bytes) {
int high = (bytes.bytes[bytes.offset + 0] << 24) | ((bytes.bytes[bytes.offset + 1] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 2] & 0xff) << 8) | (bytes.bytes[bytes.offset + 3] & 0xff);
int low = (bytes.bytes[bytes.offset + 4] << 24) | ((bytes.bytes[bytes.offset + 5] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 6] & 0xff) << 8) | (bytes.bytes[bytes.offset + 7] & 0xff);
return (((long) high) << 32) | (low & 0x0ffffffffL);
}
/**
* Converts a long to a byte array.
*
* @param val The long to convert to a byte array
* @return The byte array converted
*/
public static byte[] longToBytes(long val) {
byte[] arr = new byte[8];
arr[0] = (byte) (val >>> 56);
arr[1] = (byte) (val >>> 48);
arr[2] = (byte) (val >>> 40);
arr[3] = (byte) (val >>> 32);
arr[4] = (byte) (val >>> 24);
arr[5] = (byte) (val >>> 16);
arr[6] = (byte) (val >>> 8);
arr[7] = (byte) (val);
return arr;
}
/** Return the long that {@code n} stores, or throws an exception if the
* stored value cannot be converted to a long that stores the exact same
* value. */
public static long toLongExact(Number n) {<FILL_FUNCTION_BODY>}
/** Return the long that {@code stringValue} stores or throws an exception if the
* stored value cannot be converted to a long that stores the exact same
* value and {@code coerce} is false. */
public static long toLong(String stringValue, boolean coerce) {
try {
return Long.parseLong(stringValue);
} catch (NumberFormatException e) {
// we will try again with BigDecimal
}
final BigInteger bigIntegerValue;
try {
BigDecimal bigDecimalValue = new BigDecimal(stringValue);
bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact();
} catch (ArithmeticException e) {
throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part");
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Cannot convert input string \"" + stringValue + "\" to biginteger");
}
if (bigIntegerValue.compareTo(MAX_LONG_VALUE) > 0 || bigIntegerValue.compareTo(MIN_LONG_VALUE) < 0) {
throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long");
}
return bigIntegerValue.longValue();
}
/** Return the int that {@code n} stores, or throws an exception if the
* stored value cannot be converted to an int that stores the exact same
* value. */
public static int toIntExact(Number n) {
return Math.toIntExact(toLongExact(n));
}
}
|
if (n instanceof Byte || n instanceof Short || n instanceof Integer
|| n instanceof Long) {
return n.longValue();
} else if (n instanceof Float || n instanceof Double) {
double d = n.doubleValue();
if (d != Math.round(d)) {
throw new IllegalArgumentException(n + " is not an integer value");
}
return n.longValue();
} else if (n instanceof BigDecimal) {
return ((BigDecimal) n).toBigIntegerExact().longValueExact();
} else if (n instanceof BigInteger) {
return ((BigInteger) n).longValueExact();
} else {
throw new IllegalArgumentException("Cannot check whether [" + n + "] of class [" + n.getClass().getName()
+ "] is actually a long");
}
| 961
| 206
| 1,167
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/Randomness.java
|
Randomness
|
get
|
class Randomness {
private static final Method CURRENT_METHOD;
private static final Method GET_RANDOM_METHOD;
static {
Method maybeCurrentMethod;
Method maybeGetRandomMethod;
try {
Class<?> clazz = Class.forName("com.carrotsearch.randomizedtesting.RandomizedContext");
maybeCurrentMethod = clazz.getMethod("current");
maybeGetRandomMethod = clazz.getMethod("getRandom");
} catch (Exception e) {
maybeCurrentMethod = null;
maybeGetRandomMethod = null;
}
CURRENT_METHOD = maybeCurrentMethod;
GET_RANDOM_METHOD = maybeGetRandomMethod;
}
private Randomness() {
}
/**
* Provides a reproducible source of randomness seeded by a long
* seed in the settings with the key setting.
*
* @param settings the settings containing the seed
* @param setting the setting to access the seed
* @return a reproducible source of randomness
*/
public static Random get(Settings settings, Setting<Long> setting) {
if (setting.exists(settings)) {
return new Random(setting.get(settings));
} else {
return get();
}
}
/**
* Provides a source of randomness that is reproducible when
* running under the Elasticsearch test suite, and otherwise
* produces a non-reproducible source of randomness. Reproducible
* sources of randomness are created when the system property
* "tests.seed" is set and the security policy allows reading this
* system property. Otherwise, non-reproducible sources of
* randomness are created.
*
* @return a source of randomness
* @throws IllegalStateException if running tests but was not able
* to acquire an instance of Random from
* RandomizedContext or tests are
* running but tests.seed is not set
*/
public static Random get() {<FILL_FUNCTION_BODY>}
/**
* Provides a secure source of randomness.
*
* This acts exactly similar to {@link #get()}, but returning a new {@link SecureRandom}.
*/
public static SecureRandom createSecure() {
if (CURRENT_METHOD != null && GET_RANDOM_METHOD != null) {
// tests, so just use a seed from the non secure random
byte[] seed = new byte[16];
get().nextBytes(seed);
return new SecureRandom(seed);
} else {
return new SecureRandom();
}
}
@SuppressForbidden(reason = "ThreadLocalRandom is okay when not running tests")
private static Random getWithoutSeed() {
assert CURRENT_METHOD == null && GET_RANDOM_METHOD == null : "running under tests but tried to create non-reproducible random";
return ThreadLocalRandom.current();
}
public static void shuffle(List<?> list) {
Collections.shuffle(list, get());
}
}
|
if (CURRENT_METHOD != null && GET_RANDOM_METHOD != null) {
try {
Object randomizedContext = CURRENT_METHOD.invoke(null);
return (Random) GET_RANDOM_METHOD.invoke(randomizedContext);
} catch (ReflectiveOperationException e) {
// unexpected, bail
throw new IllegalStateException("running tests but failed to invoke RandomizedContext#getRandom", e);
}
} else {
return getWithoutSeed();
}
| 772
| 130
| 902
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/bytes/ByteBufferReference.java
|
ByteBufferReference
|
slice
|
class ByteBufferReference extends AbstractBytesReference {
private final ByteBuffer buffer;
private final int length;
ByteBufferReference(ByteBuffer buffer) {
this.buffer = buffer.slice();
this.length = buffer.remaining();
}
@Override
public byte get(int index) {
return buffer.get(index);
}
@Override
public int getInt(int index) {
return buffer.getInt(index);
}
@Override
public int length() {
return length;
}
@Override
public BytesReference slice(int from, int length) {<FILL_FUNCTION_BODY>}
/**
* This will return a bytes ref composed of the bytes. If this is a direct byte buffer, the bytes will
* have to be copied.
*
* @return the bytes ref
*/
@Override
public BytesRef toBytesRef() {
if (buffer.hasArray()) {
return new BytesRef(buffer.array(), buffer.arrayOffset(), length);
}
final byte[] copy = new byte[length];
buffer.get(copy, 0, length);
return new BytesRef(copy);
}
@Override
public long ramBytesUsed() {
return buffer.capacity();
}
}
|
Objects.checkFromIndexSize(from, length, this.length);
buffer.position(from);
buffer.limit(from + length);
ByteBufferReference newByteBuffer = new ByteBufferReference(buffer);
buffer.position(0);
buffer.limit(this.length);
return newByteBuffer;
| 333
| 80
| 413
|
<methods>public non-sealed void <init>() ,public int compareTo(org.elasticsearch.common.bytes.BytesReference) ,public boolean equals(java.lang.Object) ,public int getInt(int) ,public int hashCode() ,public int indexOf(byte, int) ,public BytesRefIterator iterator() ,public org.elasticsearch.common.io.stream.StreamInput streamInput() throws java.io.IOException,public org.elasticsearch.common.xcontent.XContentBuilder toXContent(org.elasticsearch.common.xcontent.XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params) throws java.io.IOException,public java.lang.String utf8ToString() ,public void writeTo(java.io.OutputStream) throws java.io.IOException<variables>private java.lang.Integer hash
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/bytes/BytesReferenceStreamInput.java
|
BytesReferenceStreamInput
|
read
|
class BytesReferenceStreamInput extends StreamInput {
private final BytesRefIterator iterator;
private int sliceIndex;
private BytesRef slice;
private final int length; // the total size of the stream
private int offset; // the current position of the stream
BytesReferenceStreamInput(BytesRefIterator iterator, final int length) throws IOException {
this.iterator = iterator;
this.slice = iterator.next();
this.length = length;
this.offset = 0;
this.sliceIndex = 0;
}
@Override
public byte readByte() throws IOException {
if (offset >= length) {
throw new EOFException();
}
maybeNextSlice();
byte b = slice.bytes[slice.offset + (sliceIndex++)];
offset++;
return b;
}
private void maybeNextSlice() throws IOException {
while (sliceIndex == slice.length) {
slice = iterator.next();
sliceIndex = 0;
if (slice == null) {
throw new EOFException();
}
}
}
@Override
public void readBytes(byte[] b, int bOffset, int len) throws IOException {
if (offset + len > length) {
throw new IndexOutOfBoundsException("Cannot read " + len + " bytes from stream with length " + length + " at offset " + offset);
}
read(b, bOffset, len);
}
@Override
public int read() throws IOException {
if (offset >= length) {
return -1;
}
return Byte.toUnsignedInt(readByte());
}
@Override
public int read(final byte[] b, final int bOffset, final int len) throws IOException {<FILL_FUNCTION_BODY>}
@Override
public void close() throws IOException {
// do nothing
}
@Override
public int available() throws IOException {
return length - offset;
}
@Override
protected void ensureCanReadBytes(int bytesToRead) throws EOFException {
int bytesAvailable = length - offset;
if (bytesAvailable < bytesToRead) {
throw new EOFException("tried to read: " + bytesToRead + " bytes but only " + bytesAvailable + " remaining");
}
}
@Override
public long skip(long n) throws IOException {
final int skip = (int) Math.min(Integer.MAX_VALUE, n);
final int numBytesSkipped = Math.min(skip, length - offset);
int remaining = numBytesSkipped;
while (remaining > 0) {
maybeNextSlice();
int currentLen = Math.min(remaining, slice.length - sliceIndex);
remaining -= currentLen;
sliceIndex += currentLen;
offset += currentLen;
assert remaining >= 0 : "remaining: " + remaining;
}
return numBytesSkipped;
}
int getOffset() {
return offset;
}
}
|
if (offset >= length) {
return -1;
}
final int numBytesToCopy = Math.min(len, length - offset);
int remaining = numBytesToCopy; // copy the full length or the remaining part
int destOffset = bOffset;
while (remaining > 0) {
maybeNextSlice();
final int currentLen = Math.min(remaining, slice.length - sliceIndex);
assert currentLen > 0 : "length has to be > 0 to make progress but was: " + currentLen;
System.arraycopy(slice.bytes, slice.offset + sliceIndex, b, destOffset, currentLen);
destOffset += currentLen;
remaining -= currentLen;
sliceIndex += currentLen;
offset += currentLen;
assert remaining >= 0 : "remaining: " + remaining;
}
return numBytesToCopy;
| 754
| 215
| 969
|
<methods>public non-sealed void <init>() ,public abstract int available() throws java.io.IOException,public abstract void close() throws java.io.IOException,public org.elasticsearch.Version getVersion() ,public T[] readArray(Reader<T>, IntFunction<T[]>) throws java.io.IOException,public final boolean readBoolean() throws java.io.IOException,public abstract byte readByte() throws java.io.IOException,public byte[] readByteArray() throws java.io.IOException,public abstract void readBytes(byte[], int, int) throws java.io.IOException,public BytesRef readBytesRef() throws java.io.IOException,public BytesRef readBytesRef(int) throws java.io.IOException,public org.elasticsearch.common.bytes.BytesReference readBytesReference() throws java.io.IOException,public org.elasticsearch.common.bytes.BytesReference readBytesReference(int) throws java.io.IOException,public final double readDouble() throws java.io.IOException,public double[] readDoubleArray() throws java.io.IOException,public E readEnum(Class<E>) throws java.io.IOException,public EnumSet<E> readEnumSet(Class<E>) throws java.io.IOException,public T readException() throws java.io.IOException,public final float readFloat() throws java.io.IOException,public float[] readFloatArray() throws java.io.IOException,public void readFully(byte[]) throws java.io.IOException,public java.lang.Object readGenericValue() throws java.io.IOException,public org.elasticsearch.common.geo.GeoPoint readGeoPoint() throws java.io.IOException,public ImmutableOpenMap<K,V> readImmutableMap(Reader<K>, Reader<V>) throws java.io.IOException,public int readInt() throws java.io.IOException,public int[] readIntArray() throws java.io.IOException,public List<T> readList(Reader<T>) throws java.io.IOException,public long readLong() throws java.io.IOException,public long[] readLongArray() throws java.io.IOException,public Map<K,V> readMap(Supplier<Map<K,V>>, Reader<K>, Reader<V>) throws java.io.IOException,public Map<K,V> readMap(Reader<K>, Reader<V>) throws java.io.IOException,public Map<java.lang.String,java.lang.Object> readMap() throws java.io.IOException,public Map<K,List<V>> readMapOfLists(Reader<K>, Reader<V>) throws java.io.IOException,public C readNamedWriteable(Class<C>) throws java.io.IOException,public C readNamedWriteable(Class<C>, java.lang.String) throws java.io.IOException,public List<T> readNamedWriteableList(Class<T>) throws java.io.IOException,public T[] readOptionalArray(Reader<T>, IntFunction<T[]>) throws java.io.IOException,public final java.lang.Boolean readOptionalBoolean() throws java.io.IOException,public org.elasticsearch.common.bytes.BytesReference readOptionalBytesReference() throws java.io.IOException,public final java.lang.Double readOptionalDouble() throws java.io.IOException,public java.lang.Float readOptionalFloat() throws java.io.IOException,public java.lang.Long readOptionalLong() throws java.io.IOException,public C readOptionalNamedWriteable(Class<C>) throws java.io.IOException,public java.lang.String readOptionalString() throws java.io.IOException,public java.lang.String[] readOptionalStringArray() throws java.io.IOException,public io.crate.common.unit.TimeValue readOptionalTimeValue() throws java.io.IOException,public DateTimeZone readOptionalTimeZone() throws java.io.IOException,public java.lang.Integer readOptionalVInt() throws java.io.IOException,public T readOptionalWriteable(Reader<T>) throws java.io.IOException,public Set<T> readSet(Reader<T>) throws java.io.IOException,public short readShort() throws java.io.IOException,public java.lang.String readString() throws java.io.IOException,public java.lang.String[] readStringArray() throws java.io.IOException,public List<java.lang.String> readStringList() throws java.io.IOException,public io.crate.common.unit.TimeValue readTimeValue() throws java.io.IOException,public DateTimeZone readTimeZone() throws java.io.IOException,public int readVInt() throws java.io.IOException,public int[] readVIntArray() throws java.io.IOException,public long readVLong() throws java.io.IOException,public long[] readVLongArray() throws java.io.IOException,public long readZLong() throws java.io.IOException,public void setVersion(org.elasticsearch.Version) ,public static org.elasticsearch.common.io.stream.StreamInput wrap(byte[]) ,public static org.elasticsearch.common.io.stream.StreamInput wrap(byte[], int, int) <variables>private static final non-sealed Map<java.lang.Byte,java.util.concurrent.TimeUnit> BYTE_TIME_UNIT_MAP,private static final ThreadLocal<CharsRef> SMALL_SPARE,private static final int SMALL_STRING_LIMIT,private static final ThreadLocal<byte[]> STRING_READ_BUFFER,private CharsRef largeSpare,private org.elasticsearch.Version version
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java
|
PagedBytesReference
|
iterator
|
class PagedBytesReference extends AbstractBytesReference {
private static final int PAGE_SIZE = PageCacheRecycler.BYTE_PAGE_SIZE;
private final ByteArray byteArray;
private final int offset;
private final int length;
PagedBytesReference(ByteArray byteArray, int from, int length) {
assert byteArray.hasArray() == false : "use BytesReference#fromByteArray";
this.byteArray = byteArray;
this.offset = from;
this.length = length;
}
@Override
public byte get(int index) {
return byteArray.get(offset + index);
}
@Override
public int length() {
return length;
}
@Override
public BytesReference slice(int from, int length) {
Objects.checkFromIndexSize(from, length, this.length);
return new PagedBytesReference(byteArray, offset + from, length);
}
@Override
public BytesRef toBytesRef() {
BytesRef bref = new BytesRef();
// if length <= pagesize this will dereference the page, or materialize the byte[]
if (byteArray != null) {
byteArray.get(offset, length, bref);
}
return bref;
}
@Override
public final BytesRefIterator iterator() {<FILL_FUNCTION_BODY>}
@Override
public long ramBytesUsed() {
return byteArray.ramBytesUsed();
}
}
|
final int offset = this.offset;
final int length = this.length;
// this iteration is page aligned to ensure we do NOT materialize the pages from the ByteArray
// we calculate the initial fragment size here to ensure that if this reference is a slice we are still page aligned
// across the entire iteration. The first page is smaller if our offset != 0 then we start in the middle of the page
// otherwise we iterate full pages until we reach the last chunk which also might end within a page.
final int initialFragmentSize = offset != 0 ? PAGE_SIZE - (offset % PAGE_SIZE) : PAGE_SIZE;
return new BytesRefIterator() {
int position = 0;
int nextFragmentSize = Math.min(length, initialFragmentSize);
// this BytesRef is reused across the iteration on purpose - BytesRefIterator interface was designed for this
final BytesRef slice = new BytesRef();
@Override
public BytesRef next() throws IOException {
if (nextFragmentSize != 0) {
final boolean materialized = byteArray.get(offset + position, nextFragmentSize, slice);
assert materialized == false : "iteration should be page aligned but array got materialized";
position += nextFragmentSize;
final int remaining = length - position;
nextFragmentSize = Math.min(remaining, PAGE_SIZE);
return slice;
} else {
assert nextFragmentSize == 0 : "fragmentSize expected [0] but was: [" + nextFragmentSize + "]";
return null; // we are done with this iteration
}
}
};
| 386
| 389
| 775
|
<methods>public non-sealed void <init>() ,public int compareTo(org.elasticsearch.common.bytes.BytesReference) ,public boolean equals(java.lang.Object) ,public int getInt(int) ,public int hashCode() ,public int indexOf(byte, int) ,public BytesRefIterator iterator() ,public org.elasticsearch.common.io.stream.StreamInput streamInput() throws java.io.IOException,public org.elasticsearch.common.xcontent.XContentBuilder toXContent(org.elasticsearch.common.xcontent.XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params) throws java.io.IOException,public java.lang.String utf8ToString() ,public void writeTo(java.io.OutputStream) throws java.io.IOException<variables>private java.lang.Integer hash
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java
|
AbstractLifecycleComponent
|
start
|
class AbstractLifecycleComponent implements LifecycleComponent {
protected final Lifecycle lifecycle = new Lifecycle();
private final List<LifecycleListener> listeners = new CopyOnWriteArrayList<>();
protected AbstractLifecycleComponent() {
}
@Override
public Lifecycle.State lifecycleState() {
return this.lifecycle.state();
}
@Override
public void addLifecycleListener(LifecycleListener listener) {
listeners.add(listener);
}
@Override
public void removeLifecycleListener(LifecycleListener listener) {
listeners.remove(listener);
}
@Override
public void start() {<FILL_FUNCTION_BODY>}
protected abstract void doStart();
@Override
public void stop() {
synchronized (lifecycle) {
if (!lifecycle.canMoveToStopped()) {
return;
}
for (LifecycleListener listener : listeners) {
listener.beforeStop();
}
lifecycle.moveToStopped();
doStop();
for (LifecycleListener listener : listeners) {
listener.afterStop();
}
}
}
protected abstract void doStop();
@Override
public void close() {
synchronized (lifecycle) {
if (lifecycle.started()) {
stop();
}
if (!lifecycle.canMoveToClosed()) {
return;
}
for (LifecycleListener listener : listeners) {
listener.beforeClose();
}
lifecycle.moveToClosed();
try {
doClose();
} catch (IOException e) {
throw new UncheckedIOException(e);
} finally {
for (LifecycleListener listener : listeners) {
listener.afterClose();
}
}
}
}
protected abstract void doClose() throws IOException;
}
|
synchronized (lifecycle) {
if (!lifecycle.canMoveToStarted()) {
return;
}
for (LifecycleListener listener : listeners) {
listener.beforeStart();
}
doStart();
lifecycle.moveToStarted();
for (LifecycleListener listener : listeners) {
listener.afterStart();
}
}
| 518
| 104
| 622
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java
|
CompressedXContent
|
crc32
|
class CompressedXContent {
private static int crc32(BytesReference data) {<FILL_FUNCTION_BODY>}
private final byte[] bytes;
private final int crc32;
// Used for serialization
private CompressedXContent(byte[] compressed, int crc32) {
this.bytes = compressed;
this.crc32 = crc32;
assertConsistent();
}
/**
* Create a {@link CompressedXContent} out of a {@link ToXContent} instance.
*/
public CompressedXContent(ToXContent xcontent, XContentType type, ToXContent.Params params) throws IOException {
BytesStreamOutput bStream = new BytesStreamOutput();
OutputStream compressedStream = CompressorFactory.COMPRESSOR.threadLocalOutputStream(bStream);
CRC32 crc32 = new CRC32();
try (OutputStream checkedStream = new CheckedOutputStream(compressedStream, crc32)) {
try (XContentBuilder builder = XContentFactory.builder(type, checkedStream)) {
builder.startObject();
xcontent.toXContent(builder, params);
builder.endObject();
}
}
this.bytes = BytesReference.toBytes(bStream.bytes());
this.crc32 = (int) crc32.getValue();
assertConsistent();
}
/**
* Create a {@link CompressedXContent} out of a serialized {@link ToXContent}
* that may already be compressed.
*/
public CompressedXContent(BytesReference data) throws IOException {
Compressor compressor = CompressorFactory.compressor(data);
if (compressor != null) {
// already compressed...
this.bytes = BytesReference.toBytes(data);
this.crc32 = crc32(uncompressed());
} else {
this.bytes = BytesReference.toBytes(CompressorFactory.COMPRESSOR.compress(data));
this.crc32 = crc32(data);
}
assertConsistent();
}
private void assertConsistent() {
assert CompressorFactory.compressor(new BytesArray(bytes)) != null;
assert this.crc32 == crc32(uncompressed());
}
public CompressedXContent(byte[] data) throws IOException {
this(new BytesArray(data));
}
public CompressedXContent(String str) throws IOException {
this(new BytesArray(str.getBytes(StandardCharsets.UTF_8)));
}
/** Return the compressed bytes. */
public byte[] compressed() {
return this.bytes;
}
/** Return the compressed bytes as a {@link BytesReference}. */
public BytesReference compressedReference() {
return new BytesArray(bytes);
}
/** Return the uncompressed bytes. */
public BytesReference uncompressed() {
try {
return CompressorFactory.uncompress(new BytesArray(bytes));
} catch (IOException e) {
throw new IllegalStateException("Cannot decompress compressed string", e);
}
}
public String string() {
return uncompressed().utf8ToString();
}
public static CompressedXContent readCompressedString(StreamInput in) throws IOException {
int crc32 = in.readInt();
return new CompressedXContent(in.readByteArray(), crc32);
}
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(crc32);
out.writeByteArray(bytes);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CompressedXContent that = (CompressedXContent) o;
if (Arrays.equals(compressed(), that.compressed())) {
return true;
}
if (crc32 != that.crc32) {
return false;
}
return uncompressed().equals(that.uncompressed());
}
@Override
public int hashCode() {
return crc32;
}
@Override
public String toString() {
return string();
}
}
|
CRC32 crc32 = new CRC32();
try {
data.writeTo(new CheckedOutputStream(Streams.NULL_OUTPUT_STREAM, crc32));
} catch (IOException bogus) {
// cannot happen
throw new Error(bogus);
}
return (int) crc32.getValue();
| 1,106
| 95
| 1,201
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/hash/MessageDigests.java
|
MessageDigests
|
createThreadLocalMessageDigest
|
class MessageDigests {
private static ThreadLocal<MessageDigest> createThreadLocalMessageDigest(String digest) {<FILL_FUNCTION_BODY>}
private static final ThreadLocal<MessageDigest> MD5_DIGEST = createThreadLocalMessageDigest("MD5");
private static final ThreadLocal<MessageDigest> SHA_1_DIGEST = createThreadLocalMessageDigest("SHA-1");
/**
* Returns a {@link MessageDigest} instance for MD5 digests; note
* that the instance returned is thread local and must not be
* shared amongst threads.
*
* @return a thread local {@link MessageDigest} instance that
* provides MD5 message digest functionality.
*/
public static MessageDigest md5() {
return get(MD5_DIGEST);
}
/**
* Returns a {@link MessageDigest} instance for SHA-1 digests; note
* that the instance returned is thread local and must not be
* shared amongst threads.
*
* @return a thread local {@link MessageDigest} instance that
* provides SHA-1 message digest functionality.
*/
public static MessageDigest sha1() {
return get(SHA_1_DIGEST);
}
private static MessageDigest get(ThreadLocal<MessageDigest> messageDigest) {
MessageDigest instance = messageDigest.get();
instance.reset();
return instance;
}
}
|
return ThreadLocal.withInitial(() -> {
try {
return MessageDigest.getInstance(digest);
} catch (NoSuchAlgorithmException e) {
throw new IllegalStateException("unexpected exception creating MessageDigest instance for [" + digest + "]", e);
}
});
| 365
| 76
| 441
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/io/PathUtils.java
|
PathUtils
|
get
|
class PathUtils {
/** no instantiation */
private PathUtils() {
}
/** the actual JDK default */
static final FileSystem ACTUAL_DEFAULT = FileSystems.getDefault();
/** can be changed by tests */
static volatile FileSystem DEFAULT = ACTUAL_DEFAULT;
/**
* Returns a {@code Path} from name components.
* <p>
* This works just like {@code Paths.get()}.
* Remember: just like {@code Paths.get()} this is NOT A STRING CONCATENATION
* UTILITY FUNCTION.
* <p>
* Remember: this should almost never be used. Usually resolve
* a path against an existing one!
*/
public static Path get(String first, String... more) {
return DEFAULT.getPath(first, more);
}
/**
* Returns a {@code Path} from a URI
* <p>
* This works just like {@code Paths.get()}.
* <p>
* Remember: this should almost never be used. Usually resolve
* a path against an existing one!
*/
public static Path get(URI uri) {
if (uri.getScheme().equalsIgnoreCase("file")) {
return DEFAULT.provider().getPath(uri);
} else {
return Paths.get(uri);
}
}
/**
* Tries to resolve the given path against the list of available roots.
*
* If path starts with one of the listed roots, it returned back by this method, otherwise null is returned.
*/
public static Path get(Path[] roots, String path) {<FILL_FUNCTION_BODY>}
/**
* Tries to resolve the given file uri against the list of available roots.
*
* If uri starts with one of the listed roots, it returned back by this method, otherwise null is returned.
*/
public static Path get(Path[] roots, URI uri) {
return get(roots, PathUtils.get(uri).normalize().toString());
}
/**
* Returns the default FileSystem.
*/
public static FileSystem getDefaultFileSystem() {
return DEFAULT;
}
}
|
for (Path root : roots) {
Path normalizedRoot = root.normalize();
Path normalizedPath = normalizedRoot.resolve(path).normalize();
if (normalizedPath.startsWith(normalizedRoot)) {
return normalizedPath;
}
}
return null;
| 551
| 74
| 625
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/io/stream/ByteBufferStreamInput.java
|
ByteBufferStreamInput
|
readShort
|
class ByteBufferStreamInput extends StreamInput {
private final ByteBuffer buffer;
public ByteBufferStreamInput(ByteBuffer buffer) {
this.buffer = buffer;
}
@Override
public int read() throws IOException {
if (!buffer.hasRemaining()) {
return -1;
}
return buffer.get() & 0xFF;
}
@Override
public byte readByte() throws IOException {
if (!buffer.hasRemaining()) {
throw new EOFException();
}
return buffer.get();
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (!buffer.hasRemaining()) {
return -1;
}
len = Math.min(len, buffer.remaining());
buffer.get(b, off, len);
return len;
}
@Override
public long skip(long n) throws IOException {
if (n > buffer.remaining()) {
int ret = buffer.position();
buffer.position(buffer.limit());
return ret;
}
buffer.position((int) (buffer.position() + n));
return n;
}
@Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
if (buffer.remaining() < len) {
throw new EOFException();
}
buffer.get(b, offset, len);
}
@Override
public short readShort() throws IOException {<FILL_FUNCTION_BODY>}
@Override
public int readInt() throws IOException {
try {
return buffer.getInt();
} catch (BufferUnderflowException ex) {
EOFException eofException = new EOFException();
eofException.initCause(ex);
throw eofException;
}
}
@Override
public long readLong() throws IOException {
try {
return buffer.getLong();
} catch (BufferUnderflowException ex) {
EOFException eofException = new EOFException();
eofException.initCause(ex);
throw eofException;
}
}
@Override
public void reset() throws IOException {
buffer.reset();
}
@Override
public int available() throws IOException {
return buffer.remaining();
}
@Override
protected void ensureCanReadBytes(int length) throws EOFException {
if (buffer.remaining() < length) {
throw new EOFException("tried to read: " + length + " bytes but only " + buffer.remaining() + " remaining");
}
}
@Override
@SuppressWarnings("sync-override")
public void mark(int readlimit) {
buffer.mark();
}
@Override
public boolean markSupported() {
return true;
}
@Override
public void close() throws IOException {
}
}
|
try {
return buffer.getShort();
} catch (BufferUnderflowException ex) {
EOFException eofException = new EOFException();
eofException.initCause(ex);
throw eofException;
}
| 749
| 63
| 812
|
<methods>public non-sealed void <init>() ,public abstract int available() throws java.io.IOException,public abstract void close() throws java.io.IOException,public org.elasticsearch.Version getVersion() ,public T[] readArray(Reader<T>, IntFunction<T[]>) throws java.io.IOException,public final boolean readBoolean() throws java.io.IOException,public abstract byte readByte() throws java.io.IOException,public byte[] readByteArray() throws java.io.IOException,public abstract void readBytes(byte[], int, int) throws java.io.IOException,public BytesRef readBytesRef() throws java.io.IOException,public BytesRef readBytesRef(int) throws java.io.IOException,public org.elasticsearch.common.bytes.BytesReference readBytesReference() throws java.io.IOException,public org.elasticsearch.common.bytes.BytesReference readBytesReference(int) throws java.io.IOException,public final double readDouble() throws java.io.IOException,public double[] readDoubleArray() throws java.io.IOException,public E readEnum(Class<E>) throws java.io.IOException,public EnumSet<E> readEnumSet(Class<E>) throws java.io.IOException,public T readException() throws java.io.IOException,public final float readFloat() throws java.io.IOException,public float[] readFloatArray() throws java.io.IOException,public void readFully(byte[]) throws java.io.IOException,public java.lang.Object readGenericValue() throws java.io.IOException,public org.elasticsearch.common.geo.GeoPoint readGeoPoint() throws java.io.IOException,public ImmutableOpenMap<K,V> readImmutableMap(Reader<K>, Reader<V>) throws java.io.IOException,public int readInt() throws java.io.IOException,public int[] readIntArray() throws java.io.IOException,public List<T> readList(Reader<T>) throws java.io.IOException,public long readLong() throws java.io.IOException,public long[] readLongArray() throws java.io.IOException,public Map<K,V> readMap(Supplier<Map<K,V>>, Reader<K>, Reader<V>) throws java.io.IOException,public Map<K,V> readMap(Reader<K>, Reader<V>) throws java.io.IOException,public Map<java.lang.String,java.lang.Object> readMap() throws java.io.IOException,public Map<K,List<V>> readMapOfLists(Reader<K>, Reader<V>) throws java.io.IOException,public C readNamedWriteable(Class<C>) throws java.io.IOException,public C readNamedWriteable(Class<C>, java.lang.String) throws java.io.IOException,public List<T> readNamedWriteableList(Class<T>) throws java.io.IOException,public T[] readOptionalArray(Reader<T>, IntFunction<T[]>) throws java.io.IOException,public final java.lang.Boolean readOptionalBoolean() throws java.io.IOException,public org.elasticsearch.common.bytes.BytesReference readOptionalBytesReference() throws java.io.IOException,public final java.lang.Double readOptionalDouble() throws java.io.IOException,public java.lang.Float readOptionalFloat() throws java.io.IOException,public java.lang.Long readOptionalLong() throws java.io.IOException,public C readOptionalNamedWriteable(Class<C>) throws java.io.IOException,public java.lang.String readOptionalString() throws java.io.IOException,public java.lang.String[] readOptionalStringArray() throws java.io.IOException,public io.crate.common.unit.TimeValue readOptionalTimeValue() throws java.io.IOException,public DateTimeZone readOptionalTimeZone() throws java.io.IOException,public java.lang.Integer readOptionalVInt() throws java.io.IOException,public T readOptionalWriteable(Reader<T>) throws java.io.IOException,public Set<T> readSet(Reader<T>) throws java.io.IOException,public short readShort() throws java.io.IOException,public java.lang.String readString() throws java.io.IOException,public java.lang.String[] readStringArray() throws java.io.IOException,public List<java.lang.String> readStringList() throws java.io.IOException,public io.crate.common.unit.TimeValue readTimeValue() throws java.io.IOException,public DateTimeZone readTimeZone() throws java.io.IOException,public int readVInt() throws java.io.IOException,public int[] readVIntArray() throws java.io.IOException,public long readVLong() throws java.io.IOException,public long[] readVLongArray() throws java.io.IOException,public long readZLong() throws java.io.IOException,public void setVersion(org.elasticsearch.Version) ,public static org.elasticsearch.common.io.stream.StreamInput wrap(byte[]) ,public static org.elasticsearch.common.io.stream.StreamInput wrap(byte[], int, int) <variables>private static final non-sealed Map<java.lang.Byte,java.util.concurrent.TimeUnit> BYTE_TIME_UNIT_MAP,private static final ThreadLocal<CharsRef> SMALL_SPARE,private static final int SMALL_STRING_LIMIT,private static final ThreadLocal<byte[]> STRING_READ_BUFFER,private CharsRef largeSpare,private org.elasticsearch.Version version
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java
|
Entry
|
getReader
|
class Entry {
/** The superclass of a {@link NamedWriteable} which will be read by {@link #reader}. */
public final Class<?> categoryClass;
/** A name for the writeable which is unique to the {@link #categoryClass}. */
public final String name;
/** A reader capability of reading*/
public final Writeable.Reader<?> reader;
/** Creates a new entry which can be stored by the registry. */
public <T extends NamedWriteable> Entry(Class<T> categoryClass, String name, Writeable.Reader<? extends T> reader) {
this.categoryClass = Objects.requireNonNull(categoryClass);
this.name = Objects.requireNonNull(name);
this.reader = Objects.requireNonNull(reader);
}
}
/**
* The underlying data of the registry maps from the category to an inner
* map of name unique to that category, to the actual reader.
*/
private final Map<Class<?>, Map<String, Writeable.Reader<?>>> registry;
/**
* Constructs a new registry from the given entries.
*/
@SuppressWarnings("rawtypes")
public NamedWriteableRegistry(List<Entry> entries) {
if (entries.isEmpty()) {
registry = Collections.emptyMap();
return;
}
entries = new ArrayList<>(entries);
entries.sort((e1, e2) -> e1.categoryClass.getName().compareTo(e2.categoryClass.getName()));
Map<Class<?>, Map<String, Writeable.Reader<?>>> registry = new HashMap<>();
Map<String, Writeable.Reader<?>> readers = null;
Class currentCategory = null;
for (Entry entry : entries) {
if (currentCategory != entry.categoryClass) {
if (currentCategory != null) {
// we've seen the last of this category, put it into the big map
registry.put(currentCategory, Collections.unmodifiableMap(readers));
}
readers = new HashMap<>();
currentCategory = entry.categoryClass;
}
Writeable.Reader<?> oldReader = readers.put(entry.name, entry.reader);
if (oldReader != null) {
throw new IllegalArgumentException("NamedWriteable [" + currentCategory.getName() + "][" + entry.name + "]" +
" is already registered for [" + oldReader.getClass().getName() + "]," +
" cannot register [" + entry.reader.getClass().getName() + "]");
}
}
// handle the last category
registry.put(currentCategory, Collections.unmodifiableMap(readers));
this.registry = Collections.unmodifiableMap(registry);
}
/**
* Returns a reader for a {@link NamedWriteable} object identified by the
* name provided as argument and its category.
*/
public <T> Writeable.Reader<? extends T> getReader(Class<T> categoryClass, String name) {<FILL_FUNCTION_BODY>
|
Map<String, Writeable.Reader<?>> readers = registry.get(categoryClass);
if (readers == null) {
throw new IllegalArgumentException("Unknown NamedWriteable category [" + categoryClass.getName() + "]");
}
@SuppressWarnings("unchecked")
Writeable.Reader<? extends T> reader = (Writeable.Reader<? extends T>)readers.get(name);
if (reader == null) {
throw new IllegalArgumentException("Unknown NamedWriteable [" + categoryClass.getName() + "][" + name + "]");
}
return reader;
| 773
| 151
| 924
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java
|
DeprecationLogger
|
deprecated
|
class DeprecationLogger {
private static final ThreadLocal<RingBuffer<String>> RECENT_WARNINGS = ThreadLocal.withInitial(() -> new RingBuffer<String>(20));
private final Logger logger;
/**
* Creates a new deprecation logger based on the parent logger. Automatically
* prefixes the logger name with "deprecation", if it starts with "org.elasticsearch.",
* it replaces "org.elasticsearch" with "org.elasticsearch.deprecation" to maintain
* the "org.elasticsearch" namespace.
*/
public DeprecationLogger(Logger parentLogger) {
String name = parentLogger.getName();
if (name.startsWith("org.elasticsearch")) {
name = name.replace("org.elasticsearch.", "org.elasticsearch.deprecation.");
} else {
name = "deprecation." + name;
}
this.logger = LogManager.getLogger(name);
}
// LRU set of keys used to determine if a deprecation message should be emitted to the deprecation logs
private final Set<String> keys = Collections.newSetFromMap(Collections.synchronizedMap(new LinkedHashMap<String, Boolean>() {
@Override
protected boolean removeEldestEntry(final Map.Entry<String, Boolean> eldest) {
return size() > 128;
}
}));
@VisibleForTesting
public void resetLRU() {
keys.clear();
}
/**
* Adds a formatted warning message as a response header on the thread context, and logs a deprecation message if the associated key has
* not recently been seen.
*
* @param key the key used to determine if this deprecation should be logged
* @param msg the message to log
* @param params parameters to the message
*/
public void deprecatedAndMaybeLog(final String key, final String msg, final Object... params) {
deprecated(msg, keys.add(key), params);
}
@SuppressLoggerChecks(reason = "safely delegates to logger")
void deprecated(final String message, final boolean shouldLog, final Object... params) {<FILL_FUNCTION_BODY>}
public static List<String> getRecentWarnings() {
return Lists.of(RECENT_WARNINGS.get());
}
public static void resetWarnings() {
RECENT_WARNINGS.get().reset();
}
}
|
if (shouldLog) {
logger.warn(message, params);
var msg = LoggerMessageFormat.format(message, params);
RECENT_WARNINGS.get().add(msg);
}
| 628
| 55
| 683
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java
|
MinimumScoreCollector
|
setScorer
|
class MinimumScoreCollector<T extends Collector> extends SimpleCollector {
private final T collector;
private final float minimumScore;
private Scorable scorer;
private LeafCollector leafCollector;
public MinimumScoreCollector(T collector, float minimumScore) {
this.collector = collector;
this.minimumScore = minimumScore;
}
public T delegate() {
return collector;
}
@Override
public void setScorer(Scorable scorer) throws IOException {<FILL_FUNCTION_BODY>}
@Override
public void collect(int doc) throws IOException {
if (scorer.score() >= minimumScore) {
leafCollector.collect(doc);
}
}
@Override
public void doSetNextReader(LeafReaderContext context) throws IOException {
leafCollector = collector.getLeafCollector(context);
}
@Override
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE;
}
}
|
if (!(scorer instanceof ScoreCachingWrappingScorer)) {
scorer = ScoreCachingWrappingScorer.wrap(scorer);
}
this.scorer = scorer;
leafCollector.setScorer(scorer);
| 263
| 66
| 329
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/lucene/index/ElasticsearchLeafReader.java
|
ElasticsearchLeafReader
|
getElasticsearchLeafReader
|
class ElasticsearchLeafReader extends SequentialStoredFieldsLeafReader {
private final ShardId shardId;
/**
* <p>Construct a FilterLeafReader based on the specified base reader.
* <p>Note that base reader is closed if this FilterLeafReader is closed.</p>
*
* @param in specified base reader.
*/
public ElasticsearchLeafReader(LeafReader in, ShardId shardId) {
super(in);
this.shardId = shardId;
}
/**
* Returns the shard id this segment belongs to.
*/
public ShardId shardId() {
return this.shardId;
}
@Override
public CacheHelper getCoreCacheHelper() {
return in.getCoreCacheHelper();
}
@Override
public CacheHelper getReaderCacheHelper() {
return in.getReaderCacheHelper();
}
public static ElasticsearchLeafReader getElasticsearchLeafReader(LeafReader reader) {<FILL_FUNCTION_BODY>}
}
|
if (reader instanceof FilterLeafReader) {
if (reader instanceof ElasticsearchLeafReader) {
return (ElasticsearchLeafReader) reader;
} else {
// We need to use FilterLeafReader#getDelegate and not FilterLeafReader#unwrap, because
// If there are multiple levels of filtered leaf readers then with the unwrap() method it immediately
// returns the most inner leaf reader and thus skipping of over any other filtered leaf reader that
// may be instance of ElasticsearchLeafReader. This can cause us to miss the shardId.
return getElasticsearchLeafReader(((FilterLeafReader) reader).getDelegate());
}
}
return null;
| 279
| 172
| 451
|
<methods>public void <init>(LeafReader) ,public StoredFieldsReader getSequentialStoredFieldsReader() throws java.io.IOException<variables>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/lucene/index/SequentialStoredFieldsLeafReader.java
|
SequentialStoredFieldsLeafReader
|
getSequentialStoredFieldsReader
|
class SequentialStoredFieldsLeafReader extends FilterLeafReader {
/**
* <p>Construct a StoredFieldsFilterLeafReader based on the specified base reader.
* <p>Note that base reader is closed if this FilterLeafReader is closed.</p>
*
* @param in specified base reader.
*/
public SequentialStoredFieldsLeafReader(LeafReader in) {
super(in);
}
/**
* Returns a {@link StoredFieldsReader} optimized for sequential access (adjacent doc ids).
*/
public StoredFieldsReader getSequentialStoredFieldsReader() throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (in instanceof CodecReader) {
CodecReader reader = (CodecReader) in;
return reader.getFieldsReader().getMergeInstance();
} else if (in instanceof SequentialStoredFieldsLeafReader) {
SequentialStoredFieldsLeafReader reader = (SequentialStoredFieldsLeafReader) in;
return reader.getSequentialStoredFieldsReader();
} else {
throw new IOException("requires a CodecReader or a SequentialStoredFieldsLeafReader, got " + in.getClass());
}
| 176
| 141
| 317
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java
|
Queries
|
applyMinimumShouldMatch
|
class Queries {
public static Query newUnmappedFieldQuery(String field) {
return new MatchNoDocsQuery("unmapped field [" + (field != null ? field : "null") + "]");
}
public static Query newLenientFieldQuery(String field, RuntimeException e) {
String message = ElasticsearchException.getExceptionName(e) + ":[" + e.getMessage() + "]";
return new MatchNoDocsQuery("failed [" + field + "] query, caused by " + message);
}
/** Return a query that matches all documents but those that match the given query. */
public static Query not(Query q) {
return new BooleanQuery.Builder()
.add(new MatchAllDocsQuery(), Occur.MUST)
.add(q, Occur.MUST_NOT)
.build();
}
public static Query applyMinimumShouldMatch(BooleanQuery query, @Nullable String minimumShouldMatch) {<FILL_FUNCTION_BODY>}
/**
* Potentially apply minimum should match value if we have a query that it can be applied to,
* otherwise return the original query.
*/
public static Query maybeApplyMinimumShouldMatch(Query query, @Nullable String minimumShouldMatch) {
if (query instanceof BooleanQuery) {
return applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch);
} else if (query instanceof ExtendedCommonTermsQuery) {
((ExtendedCommonTermsQuery)query).setLowFreqMinimumNumberShouldMatch(minimumShouldMatch);
}
return query;
}
private static Pattern spaceAroundLessThanPattern = Pattern.compile("(\\s+<\\s*)|(\\s*<\\s+)");
private static Pattern spacePattern = Pattern.compile(" ");
private static Pattern lessThanPattern = Pattern.compile("<");
public static int calculateMinShouldMatch(int optionalClauseCount, String spec) {
int result = optionalClauseCount;
spec = spec.trim();
if (-1 < spec.indexOf("<")) {
/* we have conditional spec(s) */
spec = spaceAroundLessThanPattern.matcher(spec).replaceAll("<");
for (String s : spacePattern.split(spec)) {
String[] parts = lessThanPattern.split(s, 0);
int upperBound = Integer.parseInt(parts[0]);
if (optionalClauseCount <= upperBound) {
return result;
} else {
result = calculateMinShouldMatch(optionalClauseCount, parts[1]);
}
}
return result;
}
/* otherwise, simple expression */
if (-1 < spec.indexOf('%')) {
/* percentage - assume the % was the last char. If not, let Integer.parseInt fail. */
spec = spec.substring(0, spec.length() - 1);
int percent = Integer.parseInt(spec);
float calc = (result * percent) * (1 / 100f);
result = calc < 0 ? result + (int) calc : (int) calc;
} else {
int calc = Integer.parseInt(spec);
result = calc < 0 ? result + calc : calc;
}
return result < 0 ? 0 : result;
}
}
|
if (minimumShouldMatch == null) {
return query;
}
int optionalClauses = 0;
for (BooleanClause c : query.clauses()) {
if (c.getOccur() == BooleanClause.Occur.SHOULD) {
optionalClauses++;
}
}
int msm = calculateMinShouldMatch(optionalClauses, minimumShouldMatch);
if (0 < msm) {
BooleanQuery.Builder builder = new BooleanQuery.Builder();
for (BooleanClause clause : query) {
builder.add(clause);
}
builder.setMinimumNumberShouldMatch(msm);
return builder.build();
} else {
return query;
}
| 826
| 185
| 1,011
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java
|
InputStreamIndexInput
|
read
|
class InputStreamIndexInput extends InputStream {
private final IndexInput indexInput;
private final long limit;
private final long actualSizeToRead;
private long counter = 0;
private long markPointer;
private long markCounter;
public InputStreamIndexInput(IndexInput indexInput, long limit) {
this.indexInput = indexInput;
this.limit = limit;
if ((indexInput.length() - indexInput.getFilePointer()) > limit) {
actualSizeToRead = limit;
} else {
actualSizeToRead = indexInput.length() - indexInput.getFilePointer();
}
}
public long actualSizeToRead() {
return actualSizeToRead;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (b == null) {
throw new NullPointerException();
} else if (off < 0 || len < 0 || len > b.length - off) {
throw new IndexOutOfBoundsException();
}
if (indexInput.getFilePointer() >= indexInput.length()) {
return -1;
}
if (indexInput.getFilePointer() + len > indexInput.length()) {
len = (int) (indexInput.length() - indexInput.getFilePointer());
}
if (counter + len > limit) {
len = (int) (limit - counter);
}
if (len <= 0) {
return -1;
}
indexInput.readBytes(b, off, len, false);
counter += len;
return len;
}
@Override
public int read() throws IOException {<FILL_FUNCTION_BODY>}
@Override
public boolean markSupported() {
return true;
}
@Override
public synchronized void mark(int readlimit) {
markPointer = indexInput.getFilePointer();
markCounter = counter;
}
@Override
public void reset() throws IOException {
indexInput.seek(markPointer);
counter = markCounter;
}
}
|
if (counter++ >= limit) {
return -1;
}
return (indexInput.getFilePointer() < indexInput.length()) ? (indexInput.readByte() & 0xff) : -1;
| 528
| 57
| 585
|
<methods>public void <init>() ,public int available() throws java.io.IOException,public void close() throws java.io.IOException,public synchronized void mark(int) ,public boolean markSupported() ,public static java.io.InputStream nullInputStream() ,public abstract int read() throws java.io.IOException,public int read(byte[]) throws java.io.IOException,public int read(byte[], int, int) throws java.io.IOException,public byte[] readAllBytes() throws java.io.IOException,public byte[] readNBytes(int) throws java.io.IOException,public int readNBytes(byte[], int, int) throws java.io.IOException,public synchronized void reset() throws java.io.IOException,public long skip(long) throws java.io.IOException,public void skipNBytes(long) throws java.io.IOException,public long transferTo(java.io.OutputStream) throws java.io.IOException<variables>private static final int DEFAULT_BUFFER_SIZE,private static final int MAX_BUFFER_SIZE,private static final int MAX_SKIP_BUFFER_SIZE
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java
|
PerThreadIDVersionAndSeqNoLookup
|
lookupSeqNo
|
class PerThreadIDVersionAndSeqNoLookup {
// TODO: do we really need to store all this stuff? some if it might not speed up anything.
// we keep it around for now, to reduce the amount of e.g. hash lookups by field and stuff
/** terms enum for uid field */
final String uidField;
private final TermsEnum termsEnum;
/** Reused for iteration (when the term exists) */
private PostingsEnum docsEnum;
/** used for assertions to make sure class usage meets assumptions */
private final Object readerKey;
/**
* Initialize lookup for the provided segment
*/
PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField) throws IOException {
this.uidField = uidField;
final Terms terms = reader.terms(uidField);
if (terms == null) {
// If a segment contains only no-ops, it does not have _uid but has both _soft_deletes and _tombstone fields.
final NumericDocValues softDeletesDV = reader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD);
final NumericDocValues tombstoneDV = reader.getNumericDocValues(DocSysColumns.Names.TOMBSTONE);
// this is a special case when we pruned away all IDs in a segment since all docs are deleted.
final boolean allDocsDeleted = (softDeletesDV != null && reader.numDocs() == 0);
if ((softDeletesDV == null || tombstoneDV == null) && allDocsDeleted == false) {
throw new IllegalArgumentException("reader does not have _uid terms but not a no-op segment; " +
"_soft_deletes [" + softDeletesDV + "], _tombstone [" + tombstoneDV + "]");
}
termsEnum = null;
} else {
termsEnum = terms.iterator();
}
if (reader.getNumericDocValues(DocSysColumns.VERSION.name()) == null) {
throw new IllegalArgumentException("reader misses the [" + DocSysColumns.VERSION.name() + "] field; _uid terms [" + terms + "]");
}
Object readerKey = null;
assert (readerKey = reader.getCoreCacheHelper().getKey()) != null;
this.readerKey = readerKey;
}
/** Return null if id is not found.
* We pass the {@link LeafReaderContext} as an argument so that things
* still work with reader wrappers that hide some documents while still
* using the same cache key. Otherwise we'd have to disable caching
* entirely for these readers.
*/
public DocIdAndVersion lookupVersion(BytesRef id, boolean loadSeqNo, LeafReaderContext context)
throws IOException {
assert context.reader().getCoreCacheHelper().getKey().equals(readerKey) :
"context's reader is not the same as the reader class was initialized on.";
int docID = getDocID(id, context);
if (docID != DocIdSetIterator.NO_MORE_DOCS) {
final long seqNo;
final long term;
if (loadSeqNo) {
seqNo = readNumericDocValues(context.reader(), DocSysColumns.Names.SEQ_NO, docID);
term = readNumericDocValues(context.reader(), DocSysColumns.Names.PRIMARY_TERM, docID);
} else {
seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
term = 0;
}
final long version = readNumericDocValues(context.reader(), DocSysColumns.VERSION.name(), docID);
return new DocIdAndVersion(docID, version, seqNo, term, context.reader(), context.docBase);
} else {
return null;
}
}
private static long readNumericDocValues(LeafReader reader, String field, int docId) throws IOException {
final NumericDocValues dv = reader.getNumericDocValues(field);
if (dv == null || dv.advanceExact(docId) == false) {
assert false : "document [" + docId + "] does not have docValues for [" + field + "]";
throw new IllegalStateException("document [" + docId + "] does not have docValues for [" + field + "]");
}
return dv.longValue();
}
/**
* returns the internal lucene doc id for the given id bytes.
* {@link DocIdSetIterator#NO_MORE_DOCS} is returned if not found
* */
private int getDocID(BytesRef id, LeafReaderContext context) throws IOException {
// termsEnum can possibly be null here if this leaf contains only no-ops.
if (termsEnum != null && termsEnum.seekExact(id)) {
final Bits liveDocs = context.reader().getLiveDocs();
int docID = DocIdSetIterator.NO_MORE_DOCS;
// there may be more than one matching docID, in the case of nested docs, so we want the last one:
docsEnum = termsEnum.postings(docsEnum, 0);
for (int d = docsEnum.nextDoc(); d != DocIdSetIterator.NO_MORE_DOCS; d = docsEnum.nextDoc()) {
if (liveDocs != null && liveDocs.get(d) == false) {
continue;
}
docID = d;
}
return docID;
} else {
return DocIdSetIterator.NO_MORE_DOCS;
}
}
/** Return null if id is not found. */
DocIdAndSeqNo lookupSeqNo(BytesRef id, LeafReaderContext context) throws IOException {<FILL_FUNCTION_BODY>}
}
|
assert context.reader().getCoreCacheHelper().getKey().equals(readerKey) :
"context's reader is not the same as the reader class was initialized on.";
final int docID = getDocID(id, context);
if (docID != DocIdSetIterator.NO_MORE_DOCS) {
final long seqNo = readNumericDocValues(context.reader(), DocSysColumns.Names.SEQ_NO, docID);
return new DocIdAndSeqNo(docID, seqNo, context);
} else {
return null;
}
| 1,468
| 147
| 1,615
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java
|
VersionsAndSeqNoResolver
|
getLookupState
|
class VersionsAndSeqNoResolver {
static final ConcurrentMap<IndexReader.CacheKey, CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup[]>> LOOKUP_STATES =
ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
// Evict this reader from lookupStates once it's closed:
private static final IndexReader.ClosedListener REMOVE_LOOKUP_STATE = key -> {
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup[]> ctl = LOOKUP_STATES.remove(key);
if (ctl != null) {
ctl.close();
}
};
private static PerThreadIDVersionAndSeqNoLookup[] getLookupState(IndexReader reader, String uidField) throws IOException {<FILL_FUNCTION_BODY>}
private VersionsAndSeqNoResolver() {
}
/** Wraps an {@link LeafReaderContext}, a doc ID <b>relative to the context doc base</b> and a version. */
public static class DocIdAndVersion {
public final int docId;
public final long version;
public final long seqNo;
public final long primaryTerm;
public final LeafReader reader;
public final int docBase;
public DocIdAndVersion(int docId, long version, long seqNo, long primaryTerm, LeafReader reader, int docBase) {
this.docId = docId;
this.version = version;
this.seqNo = seqNo;
this.primaryTerm = primaryTerm;
this.reader = reader;
this.docBase = docBase;
}
}
/** Wraps an {@link LeafReaderContext}, a doc ID <b>relative to the context doc base</b> and a seqNo. */
public static class DocIdAndSeqNo {
public final int docId;
public final long seqNo;
public final LeafReaderContext context;
DocIdAndSeqNo(int docId, long seqNo, LeafReaderContext context) {
this.docId = docId;
this.seqNo = seqNo;
this.context = context;
}
}
/**
* Load the internal doc ID and version for the uid from the reader, returning<ul>
* <li>null if the uid wasn't found,
* <li>a doc ID and a version otherwise
* </ul>
*/
public static DocIdAndVersion loadDocIdAndVersion(IndexReader reader, Term term, boolean loadSeqNo) throws IOException {
PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, term.field());
List<LeafReaderContext> leaves = reader.leaves();
// iterate backwards to optimize for the frequently updated documents
// which are likely to be in the last segments
for (int i = leaves.size() - 1; i >= 0; i--) {
final LeafReaderContext leaf = leaves.get(i);
PerThreadIDVersionAndSeqNoLookup lookup = lookups[leaf.ord];
DocIdAndVersion result = lookup.lookupVersion(term.bytes(), loadSeqNo, leaf);
if (result != null) {
return result;
}
}
return null;
}
/**
* Loads the internal docId and sequence number of the latest copy for a given uid from the provided reader.
* The result is either null or the live and latest version of the given uid.
*/
public static DocIdAndSeqNo loadDocIdAndSeqNo(IndexReader reader, Term term) throws IOException {
final PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, term.field());
final List<LeafReaderContext> leaves = reader.leaves();
// iterate backwards to optimize for the frequently updated documents
// which are likely to be in the last segments
for (int i = leaves.size() - 1; i >= 0; i--) {
final LeafReaderContext leaf = leaves.get(i);
final PerThreadIDVersionAndSeqNoLookup lookup = lookups[leaf.ord];
final DocIdAndSeqNo result = lookup.lookupSeqNo(term.bytes(), leaf);
if (result != null) {
return result;
}
}
return null;
}
}
|
// We cache on the top level
// This means cache entries have a shorter lifetime, maybe as low as 1s with the
// default refresh interval and a steady indexing rate, but on the other hand it
// proved to be cheaper than having to perform a CHM and a TL get for every segment.
// See https://github.com/elastic/elasticsearch/pull/19856.
IndexReader.CacheHelper cacheHelper = reader.getReaderCacheHelper();
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup[]> ctl = LOOKUP_STATES.get(cacheHelper.getKey());
if (ctl == null) {
// First time we are seeing this reader's core; make a new CTL:
ctl = new CloseableThreadLocal<>();
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup[]> other = LOOKUP_STATES.putIfAbsent(cacheHelper.getKey(), ctl);
if (other == null) {
// Our CTL won, we must remove it when the reader is closed:
cacheHelper.addClosedListener(REMOVE_LOOKUP_STATE);
} else {
// Another thread beat us to it: just use their CTL:
ctl = other;
}
}
PerThreadIDVersionAndSeqNoLookup[] lookupState = ctl.get();
if (lookupState == null) {
lookupState = new PerThreadIDVersionAndSeqNoLookup[reader.leaves().size()];
for (LeafReaderContext leaf : reader.leaves()) {
lookupState[leaf.ord] = new PerThreadIDVersionAndSeqNoLookup(leaf.reader(), uidField);
}
ctl.set(lookupState);
}
if (lookupState.length != reader.leaves().size()) {
throw new AssertionError("Mismatched numbers of leaves: " + lookupState.length + " != " + reader.leaves().size());
}
if (lookupState.length > 0 && Objects.equals(lookupState[0].uidField, uidField) == false) {
throw new AssertionError("Index does not consistently use the same uid field: ["
+ uidField + "] != [" + lookupState[0].uidField + "]");
}
return lookupState;
| 1,102
| 584
| 1,686
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/network/IfConfig.java
|
IfConfig
|
formatFlags
|
class IfConfig {
private static final Logger LOGGER = LogManager.getLogger(IfConfig.class);
private static final String INDENT = " ";
/** log interface configuration at debug level, if its enabled */
public static void logIfNecessary() {
if (LOGGER.isDebugEnabled()) {
try {
doLogging();
} catch (IOException e) {
LOGGER.warn("unable to gather network information", e);
}
}
}
/** perform actual logging: might throw exception if things go wrong */
private static void doLogging() throws IOException {
StringBuilder msg = new StringBuilder();
for (NetworkInterface nic : NetworkUtils.getInterfaces()) {
msg.append(System.lineSeparator());
// ordinary name
msg.append(nic.getName());
msg.append(System.lineSeparator());
// display name (e.g. on windows)
if (!nic.getName().equals(nic.getDisplayName())) {
msg.append(INDENT);
msg.append(nic.getDisplayName());
msg.append(System.lineSeparator());
}
// addresses: v4 first, then v6
List<InterfaceAddress> addresses = nic.getInterfaceAddresses();
for (InterfaceAddress address : addresses) {
if (address.getAddress() instanceof Inet6Address == false) {
msg.append(INDENT);
msg.append(formatAddress(address));
msg.append(System.lineSeparator());
}
}
for (InterfaceAddress address : addresses) {
if (address.getAddress() instanceof Inet6Address) {
msg.append(INDENT);
msg.append(formatAddress(address));
msg.append(System.lineSeparator());
}
}
// hardware address
byte[] hardware = nic.getHardwareAddress();
if (hardware != null) {
msg.append(INDENT);
msg.append("hardware ");
for (int i = 0; i < hardware.length; i++) {
if (i > 0) {
msg.append(":");
}
msg.append(String.format(Locale.ROOT, "%02X", hardware[i]));
}
msg.append(System.lineSeparator());
}
// attributes
msg.append(INDENT);
msg.append(formatFlags(nic));
msg.append(System.lineSeparator());
}
LOGGER.debug("configuration:{}{}", System.lineSeparator(), msg);
}
/** format internet address: java's default doesn't include everything useful */
private static String formatAddress(InterfaceAddress interfaceAddress) throws IOException {
StringBuilder sb = new StringBuilder();
InetAddress address = interfaceAddress.getAddress();
if (address instanceof Inet6Address) {
sb.append("inet6 ");
sb.append(NetworkAddress.format(address));
sb.append(" prefixlen:");
sb.append(interfaceAddress.getNetworkPrefixLength());
} else {
sb.append("inet ");
sb.append(NetworkAddress.format(address));
int netmask = 0xFFFFFFFF << (32 - interfaceAddress.getNetworkPrefixLength());
sb.append(" netmask:").append(NetworkAddress.format(InetAddress.getByAddress(new byte[]{
(byte) (netmask >>> 24),
(byte) (netmask >>> 16 & 0xFF),
(byte) (netmask >>> 8 & 0xFF),
(byte) (netmask & 0xFF)
})));
InetAddress broadcast = interfaceAddress.getBroadcast();
if (broadcast != null) {
sb.append(" broadcast:").append(NetworkAddress.format(broadcast));
}
}
if (address.isLoopbackAddress()) {
sb.append(" scope:host");
} else if (address.isLinkLocalAddress()) {
sb.append(" scope:link");
} else if (address.isSiteLocalAddress()) {
sb.append(" scope:site");
}
return sb.toString();
}
/** format network interface flags */
private static String formatFlags(NetworkInterface nic) throws SocketException {<FILL_FUNCTION_BODY>}
}
|
StringBuilder flags = new StringBuilder();
if (nic.isUp()) {
flags.append("UP ");
}
if (nic.supportsMulticast()) {
flags.append("MULTICAST ");
}
if (nic.isLoopback()) {
flags.append("LOOPBACK ");
}
if (nic.isPointToPoint()) {
flags.append("POINTOPOINT ");
}
if (nic.isVirtual()) {
flags.append("VIRTUAL ");
}
flags.append("mtu:").append(nic.getMTU());
flags.append(" index:").append(nic.getIndex());
return flags.toString();
| 1,079
| 178
| 1,257
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java
|
NV
|
close
|
class NV<T> implements Recycler.V<T> {
T value;
NV(T value) {
this.value = value;
}
@Override
public T v() {
return value;
}
@Override
public boolean isRecycled() {
return false;
}
@Override
public void close() {<FILL_FUNCTION_BODY>}
}
|
if (value == null) {
throw new IllegalStateException("recycler entry already released...");
}
value = null;
| 112
| 37
| 149
|
<methods><variables>protected final non-sealed C<T> c
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/regex/Regex.java
|
Regex
|
simpleMatch
|
class Regex {
/**
* This Regex / {@link Pattern} flag is supported from Java 7 on.
* If set on a Java6 JVM the flag will be ignored.
*/
public static final int UNICODE_CHARACTER_CLASS = 0x100; // supported in JAVA7
/**
* Is the str a simple match pattern.
*/
public static boolean isSimpleMatchPattern(String str) {
return str.indexOf('*') != -1;
}
public static boolean isMatchAllPattern(String str) {
return str.equals("*");
}
/** Return an {@link Automaton} that matches the given pattern. */
public static Automaton simpleMatchToAutomaton(String pattern) {
List<Automaton> automata = new ArrayList<>();
int previous = 0;
for (int i = pattern.indexOf('*'); i != -1; i = pattern.indexOf('*', i + 1)) {
automata.add(Automata.makeString(pattern.substring(previous, i)));
automata.add(Automata.makeAnyString());
previous = i + 1;
}
automata.add(Automata.makeString(pattern.substring(previous)));
return Operations.concatenate(automata);
}
/**
* Return an Automaton that matches the union of the provided patterns.
*/
public static Automaton simpleMatchToAutomaton(String... patterns) {
if (patterns.length < 1) {
throw new IllegalArgumentException("There must be at least one pattern, zero given");
}
List<Automaton> automata = new ArrayList<>();
for (String pattern : patterns) {
automata.add(simpleMatchToAutomaton(pattern));
}
return Operations.union(automata);
}
/**
* Match a String against the given pattern, supporting the following simple
* pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy" matches (with an
* arbitrary number of pattern parts), as well as direct equality.
*
* @param pattern the pattern to match against
* @param str the String to match
* @return whether the String matches the given pattern
*/
public static boolean simpleMatch(String pattern, String str) {<FILL_FUNCTION_BODY>}
/**
* Match a String against the given patterns, supporting the following simple
* pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy" matches (with an
* arbitrary number of pattern parts), as well as direct equality.
*
* @param patterns the patterns to match against
* @param str the String to match
* @return whether the String matches any of the given patterns
*/
public static boolean simpleMatch(String[] patterns, String str) {
if (patterns != null) {
for (String pattern : patterns) {
if (simpleMatch(pattern, str)) {
return true;
}
}
}
return false;
}
/**
* Similar to {@link #simpleMatch(String[], String)}, but accepts a list of strings instead of an array of strings for the patterns to
* match.
*/
public static boolean simpleMatch(final List<String> patterns, final String str) {
// #simpleMatch(String[], String) is likely to be inlined into this method
return patterns != null && simpleMatch(patterns.toArray(Strings.EMPTY_ARRAY), str);
}
public static boolean simpleMatch(String[] patterns, String[] types) {
if (patterns != null && types != null) {
for (String type : types) {
for (String pattern : patterns) {
if (simpleMatch(pattern, type)) {
return true;
}
}
}
}
return false;
}
public static Pattern compile(String regex, String flags) {
int pFlags = flags == null ? 0 : flagsFromString(flags);
return Pattern.compile(regex, pFlags);
}
public static int flagsFromString(String flags) {
int pFlags = 0;
for (String s : Strings.delimitedListToStringArray(flags, "|")) {
if (s.isEmpty()) {
continue;
}
s = s.toUpperCase(Locale.ROOT);
if ("CASE_INSENSITIVE".equals(s)) {
pFlags |= Pattern.CASE_INSENSITIVE;
} else if ("MULTILINE".equals(s)) {
pFlags |= Pattern.MULTILINE;
} else if ("DOTALL".equals(s)) {
pFlags |= Pattern.DOTALL;
} else if ("UNICODE_CASE".equals(s)) {
pFlags |= Pattern.UNICODE_CASE;
} else if ("CANON_EQ".equals(s)) {
pFlags |= Pattern.CANON_EQ;
} else if ("UNIX_LINES".equals(s)) {
pFlags |= Pattern.UNIX_LINES;
} else if ("LITERAL".equals(s)) {
pFlags |= Pattern.LITERAL;
} else if ("COMMENTS".equals(s)) {
pFlags |= Pattern.COMMENTS;
} else if (("UNICODE_CHAR_CLASS".equals(s)) || ("UNICODE_CHARACTER_CLASS".equals(s))) {
pFlags |= UNICODE_CHARACTER_CLASS;
} else {
throw new IllegalArgumentException("Unknown regex flag [" + s + "]");
}
}
return pFlags;
}
}
|
if (pattern == null || str == null) {
return false;
}
int firstIndex = pattern.indexOf('*');
if (firstIndex == -1) {
return pattern.equals(str);
}
if (firstIndex == 0) {
if (pattern.length() == 1) {
return true;
}
int nextIndex = pattern.indexOf('*', firstIndex + 1);
if (nextIndex == -1) {
return str.endsWith(pattern.substring(1));
} else if (nextIndex == 1) {
// Double wildcard "**" - skipping the first "*"
return simpleMatch(pattern.substring(1), str);
}
String part = pattern.substring(1, nextIndex);
int partIndex = str.indexOf(part);
while (partIndex != -1) {
if (simpleMatch(pattern.substring(nextIndex), str.substring(partIndex + part.length()))) {
return true;
}
partIndex = str.indexOf(part, partIndex + 1);
}
return false;
}
return (str.length() >= firstIndex &&
pattern.substring(0, firstIndex).equals(str.substring(0, firstIndex)) &&
simpleMatch(pattern.substring(firstIndex), str.substring(firstIndex)));
| 1,437
| 339
| 1,776
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/org/elasticsearch/common/settings/SecureString.java
|
SecureString
|
ensureNotClosed
|
class SecureString implements CharSequence, Closeable {
private char[] chars;
/**
* Constructs a new SecureString which controls the passed in char array.
*
* Note: When this instance is closed, the array will be zeroed out.
*/
public SecureString(char[] chars) {
this.chars = Objects.requireNonNull(chars);
}
/**
* Constructs a new SecureString from an existing String.
*
* NOTE: This is not actually secure, since the provided String cannot be deallocated, but
* this constructor allows for easy compatibility between new and old apis.
*
* @deprecated Only use for compatibility between deprecated string settings and new secure strings
*/
@Deprecated
public SecureString(String s) {
this(s.toCharArray());
}
/** Constant time equality to avoid potential timing attacks. */
@Override
public synchronized boolean equals(Object o) {
ensureNotClosed();
if (this == o) return true;
if (o == null || o instanceof CharSequence == false) return false;
CharSequence that = (CharSequence) o;
if (chars.length != that.length()) {
return false;
}
int equals = 0;
for (int i = 0; i < chars.length; i++) {
equals |= chars[i] ^ that.charAt(i);
}
return equals == 0;
}
@Override
public synchronized int hashCode() {
return Arrays.hashCode(chars);
}
@Override
public synchronized int length() {
ensureNotClosed();
return chars.length;
}
@Override
public synchronized char charAt(int index) {
ensureNotClosed();
return chars[index];
}
@Override
public SecureString subSequence(int start, int end) {
throw new UnsupportedOperationException("Cannot get subsequence of SecureString");
}
/**
* Convert to a {@link String}. This should only be used with APIs that do not take {@link CharSequence}.
*/
@Override
public synchronized String toString() {
return new String(chars);
}
/**
* Closes the string by clearing the underlying char array.
*/
@Override
public synchronized void close() {
if (chars != null) {
Arrays.fill(chars, '\0');
chars = null;
}
}
/**
* Returns a new copy of this object that is backed by its own char array. Closing the new instance has no effect on the instance it
* was created from. This is useful for APIs which accept a char array and you want to be safe about the API potentially modifying the
* char array. For example:
*
* <pre>
* try (SecureString copy = secureString.clone()) {
* // pass thee char[] to a external API
* PasswordAuthentication auth = new PasswordAuthentication(username, copy.getChars());
* ...
* }
* </pre>
*/
@Override
public synchronized SecureString clone() {
ensureNotClosed();
return new SecureString(Arrays.copyOf(chars, chars.length));
}
/**
* Returns the underlying char[]. This is a dangerous operation as the array may be modified while it is being used by other threads
* or a consumer may modify the values in the array. For safety, it is preferable to use {@link #clone()} and pass its chars to the
* consumer when the chars are needed multiple times.
*/
public synchronized char[] getChars() {
ensureNotClosed();
return chars;
}
/** Throw an exception if this string has been closed, indicating something is trying to access the data after being closed. */
private void ensureNotClosed() {<FILL_FUNCTION_BODY>}
}
|
if (chars == null) {
throw new IllegalStateException("SecureString has already been closed");
}
| 1,013
| 32
| 1,045
|
<no_super_class>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.