proj_name
stringclasses
131 values
relative_path
stringlengths
30
228
class_name
stringlengths
1
68
func_name
stringlengths
1
48
masked_class
stringlengths
78
9.82k
func_body
stringlengths
46
9.61k
len_input
int64
29
2.01k
len_output
int64
14
1.94k
total
int64
55
2.05k
relevant_context
stringlengths
0
38.4k
crate_crate
crate/libs/sql-parser/src/main/java/io/crate/sql/tree/SetStatement.java
SetStatement
equals
class SetStatement<T> extends Statement { public enum Scope { GLOBAL, SESSION, LOCAL, TIME_ZONE } public enum SettingType { TRANSIENT, PERSISTENT } private final Scope scope; private final SettingType settingType; private final List<Assignment<T>> assignments; public SetStatement(Scope scope, List<Assignment<T>> assignments) { this(scope, SettingType.TRANSIENT, assignments); } public SetStatement(Scope scope, SettingType settingType, List<Assignment<T>> assignments) { this.scope = scope; this.settingType = settingType; this.assignments = assignments; } public SetStatement(Scope scope, Assignment<T> assignment) { this.scope = scope; this.settingType = SettingType.TRANSIENT; this.assignments = Collections.singletonList(assignment); } public Scope scope() { return scope; } public List<Assignment<T>> assignments() { return assignments; } public SettingType settingType() { return settingType; } public <U> SetStatement<U> map(Function<? super T, ? extends U> mapper) { return new SetStatement<>( scope, settingType, Lists.map(assignments, x -> x.map(mapper)) ); } @Override public boolean equals(Object o) {<FILL_FUNCTION_BODY>} @Override public int hashCode() { return Objects.hash(scope, settingType, assignments); } @Override public String toString() { return "SetStatement{" + "scope=" + scope + ", assignments=" + assignments + ", settingType=" + settingType + '}'; } @Override public <R, C> R accept(AstVisitor<R, C> visitor, C context) { return visitor.visitSetStatement(this, context); } }
if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } SetStatement<?> that = (SetStatement<?>) o; return scope == that.scope && settingType == that.settingType && Objects.equals(assignments, that.assignments);
542
99
641
<methods>public non-sealed void <init>() ,public R accept(AstVisitor<R,C>, C) <variables>
crate_crate
crate/libs/sql-parser/src/main/java/io/crate/sql/tree/SetTransactionStatement.java
Deferrable
toString
class Deferrable implements TransactionMode { private final boolean not; public Deferrable(boolean not) { this.not = not; } @Override public String toString() { return not ? "NOT DEFERRABLE" : "DEFERRABLE"; } } private final List<TransactionMode> transactionModes; public SetTransactionStatement(List<TransactionMode> transactionModes) { this.transactionModes = transactionModes; } public List<TransactionMode> transactionModes() { return transactionModes; } @Override public int hashCode() { return Objects.hashCode(transactionModes); } @Override public boolean equals(Object obj) { return obj instanceof SetTransactionStatement && ((SetTransactionStatement) obj).transactionModes.equals(this.transactionModes); } @Override public <R, C> R accept(AstVisitor<R, C> visitor, C context) { return (R) visitor.visitSetTransaction(this, context); } @Override public String toString() {<FILL_FUNCTION_BODY>
return "SET TRANSACTION " + Lists.joinOn(", ", transactionModes, TransactionMode::toString);
300
32
332
<methods>public non-sealed void <init>() ,public R accept(AstVisitor<R,C>, C) <variables>
crate_crate
crate/libs/sql-parser/src/main/java/io/crate/sql/tree/ShowCreateTable.java
ShowCreateTable
equals
class ShowCreateTable<T> extends Statement { private final Table<T> table; @Override public <R, C> R accept(AstVisitor<R, C> visitor, C context) { return visitor.visitShowCreateTable(this, context); } public ShowCreateTable(Table<T> table) { this.table = table; } @Override public boolean equals(Object o) {<FILL_FUNCTION_BODY>} @Override public int hashCode() { return Objects.hash(table); } @Override public String toString() { return "ShowCreateTable{" + "table=" + table + '}'; } public Table<T> table() { return table; } public <U> ShowCreateTable<U> map(Function<? super T, ? extends U> mapper) { return new ShowCreateTable<>(table.map(mapper)); } }
if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ShowCreateTable<?> that = (ShowCreateTable<?>) o; return Objects.equals(table, that.table);
258
81
339
<methods>public non-sealed void <init>() ,public R accept(AstVisitor<R,C>, C) <variables>
crate_crate
crate/libs/sql-parser/src/main/java/io/crate/sql/tree/ShowSchemas.java
ShowSchemas
equals
class ShowSchemas extends Statement { @Nullable private final String likePattern; private final Optional<Expression> whereExpression; public ShowSchemas(@Nullable String likePattern, Optional<Expression> whereExpr) { this.likePattern = likePattern; this.whereExpression = whereExpr; } @Nullable public String likePattern() { return likePattern; } public Optional<Expression> whereExpression() { return whereExpression; } @Override public <R, C> R accept(AstVisitor<R, C> visitor, C context) { return visitor.visitShowSchemas(this, context); } @Override public boolean equals(Object o) {<FILL_FUNCTION_BODY>} @Override public int hashCode() { return Objects.hash(likePattern, whereExpression); } @Override public String toString() { return "ShowSchemas{" + "likePattern='" + likePattern + '\'' + ", whereExpression=" + whereExpression + '}'; } }
if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ShowSchemas that = (ShowSchemas) o; return Objects.equals(likePattern, that.likePattern) && Objects.equals(whereExpression, that.whereExpression);
284
93
377
<methods>public non-sealed void <init>() ,public R accept(AstVisitor<R,C>, C) <variables>
crate_crate
crate/libs/sql-parser/src/main/java/io/crate/sql/tree/ShowTables.java
ShowTables
toString
class ShowTables extends Statement { @Nullable private final QualifiedName schema; @Nullable private final String likePattern; private final Optional<Expression> whereExpression; public ShowTables(@Nullable QualifiedName schema, @Nullable String likePattern, Optional<Expression> whereExpression) { this.schema = schema; this.whereExpression = whereExpression; this.likePattern = likePattern; } @Nullable public QualifiedName schema() { return schema; } @Nullable public String likePattern() { return likePattern; } public Optional<Expression> whereExpression() { return whereExpression; } @Override public <R, C> R accept(AstVisitor<R, C> visitor, C context) { return visitor.visitShowTables(this, context); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ShowTables that = (ShowTables) o; return Objects.equals(schema, that.schema) && Objects.equals(likePattern, that.likePattern) && Objects.equals(whereExpression, that.whereExpression); } @Override public int hashCode() { return Objects.hash(schema, likePattern, whereExpression); } @Override public String toString() {<FILL_FUNCTION_BODY>} }
return "ShowTables{" + "schema=" + schema + ", likePattern='" + likePattern + '\'' + ", whereExpression=" + whereExpression + '}';
404
51
455
<methods>public non-sealed void <init>() ,public R accept(AstVisitor<R,C>, C) <variables>
crate_crate
crate/libs/sql-parser/src/main/java/io/crate/sql/tree/SimpleCaseExpression.java
SimpleCaseExpression
equals
class SimpleCaseExpression extends Expression { private final Expression operand; private final List<WhenClause> whenClauses; private final Expression defaultValue; public SimpleCaseExpression(Expression operand, List<WhenClause> whenClauses, Expression defaultValue) { this.operand = requireNonNull(operand, "operand is null"); this.whenClauses = Collections.unmodifiableList(whenClauses); this.defaultValue = defaultValue; } public Expression getOperand() { return operand; } public List<WhenClause> getWhenClauses() { return whenClauses; } public Expression getDefaultValue() { return defaultValue; } @Override public <R, C> R accept(AstVisitor<R, C> visitor, C context) { return visitor.visitSimpleCaseExpression(this, context); } @Override public boolean equals(Object o) {<FILL_FUNCTION_BODY>} @Override public int hashCode() { return Objects.hash(operand, whenClauses, defaultValue); } }
if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } SimpleCaseExpression that = (SimpleCaseExpression) o; return Objects.equals(operand, that.operand) && Objects.equals(whenClauses, that.whenClauses) && Objects.equals(defaultValue, that.defaultValue);
300
111
411
<methods>public non-sealed void <init>() ,public R accept(AstVisitor<R,C>, C) ,public final java.lang.String toString() <variables>
crate_crate
crate/libs/sql-parser/src/main/java/io/crate/sql/tree/SubqueryExpression.java
SubqueryExpression
equals
class SubqueryExpression extends Expression { private final Query query; public SubqueryExpression(Query query) { this.query = query; } public Query getQuery() { return query; } @Override public <R, C> R accept(AstVisitor<R, C> visitor, C context) { return visitor.visitSubqueryExpression(this, context); } @Override public boolean equals(Object o) {<FILL_FUNCTION_BODY>} @Override public int hashCode() { return query.hashCode(); } }
if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } SubqueryExpression that = (SubqueryExpression) o; if (!query.equals(that.query)) { return false; } return true;
159
90
249
<methods>public non-sealed void <init>() ,public R accept(AstVisitor<R,C>, C) ,public final java.lang.String toString() <variables>
crate_crate
crate/libs/sql-parser/src/main/java/io/crate/sql/tree/SwapTable.java
SwapTable
equals
class SwapTable<T> extends Statement { private final QualifiedName source; private final QualifiedName target; private final GenericProperties<T> properties; public SwapTable(QualifiedName source, QualifiedName target, GenericProperties<T> properties) { this.source = source; this.target = target; this.properties = properties; } public QualifiedName source() { return source; } public QualifiedName target() { return target; } public GenericProperties<T> properties() { return properties; } @Override public <R, C> R accept(AstVisitor<R, C> visitor, C context) { return visitor.visitSwapTable(this, context); } @Override public boolean equals(Object o) {<FILL_FUNCTION_BODY>} @Override public int hashCode() { return Objects.hash(source, target, properties); } @Override public String toString() { return "SwapTable{" + "source=" + source + ", target=" + target + ", properties=" + properties + '}'; } }
if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } SwapTable<?> swapTable = (SwapTable<?>) o; return Objects.equals(source, swapTable.source) && Objects.equals(target, swapTable.target) && Objects.equals(properties, swapTable.properties);
319
113
432
<methods>public non-sealed void <init>() ,public R accept(AstVisitor<R,C>, C) <variables>
crate_crate
crate/libs/sql-parser/src/main/java/io/crate/sql/tree/Table.java
Table
equals
class Table<T> extends QueryBody { private final QualifiedName name; private final boolean excludePartitions; private final List<Assignment<T>> partitionProperties; public Table(QualifiedName name) { this(name, true); } public Table(QualifiedName name, boolean excludePartitions) { this.name = name; this.excludePartitions = excludePartitions; this.partitionProperties = List.of(); } public Table(QualifiedName name, List<Assignment<T>> partitionProperties) { this.name = name; this.excludePartitions = false; this.partitionProperties = partitionProperties; } public QualifiedName getName() { return name; } public boolean excludePartitions() { return excludePartitions; } public List<Assignment<T>> partitionProperties() { return partitionProperties; } public <U> Table<U> map(Function<? super T, ? extends U> mapper) { if (partitionProperties.isEmpty()) { return new Table<>(name, excludePartitions); } else { return new Table<>(name, Lists.map(partitionProperties, x -> x.map(mapper))); } } @Override public <R, C> R accept(AstVisitor<R, C> visitor, C context) { return visitor.visitTable(this, context); } @Override public boolean equals(Object o) {<FILL_FUNCTION_BODY>} @Override public int hashCode() { return Objects.hash(name, partitionProperties); } @Override public String toString() { return "Table{" + "only=" + excludePartitions + ", " + name + ", partitionProperties=" + partitionProperties + '}'; } }
if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Table<?> table = (Table<?>) o; return Objects.equals(name, table.name) && Objects.equals(partitionProperties, table.partitionProperties);
487
93
580
<methods>public non-sealed void <init>() ,public R accept(AstVisitor<R,C>, C) ,public abstract boolean equals(java.lang.Object) ,public abstract int hashCode() ,public abstract java.lang.String toString() <variables>
crate_crate
crate/libs/sql-parser/src/main/java/io/crate/sql/tree/TableFunction.java
TableFunction
equals
class TableFunction extends QueryBody { private final FunctionCall functionCall; public TableFunction(FunctionCall functionCall) { this.functionCall = functionCall; } public String name() { return functionCall.getName().toString(); } public FunctionCall functionCall() { return functionCall; } @Override public boolean equals(Object o) {<FILL_FUNCTION_BODY>} @Override public int hashCode() { return Objects.hash(functionCall); } @Override public String toString() { return "TableFunction{" + "functionCall=" + functionCall + '}'; } @Override public <R, C> R accept(AstVisitor<R, C> visitor, C context) { return visitor.visitTableFunction(this, context); } }
if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } TableFunction that = (TableFunction) o; return Objects.equals(functionCall, that.functionCall);
227
75
302
<methods>public non-sealed void <init>() ,public R accept(AstVisitor<R,C>, C) ,public abstract boolean equals(java.lang.Object) ,public abstract int hashCode() ,public abstract java.lang.String toString() <variables>
crate_crate
crate/libs/sql-parser/src/main/java/io/crate/sql/tree/WhenClause.java
WhenClause
hashCode
class WhenClause extends Expression { private final Expression operand; private final Expression result; public WhenClause(Expression operand, Expression result) { this.operand = operand; this.result = result; } public Expression getOperand() { return operand; } public Expression getResult() { return result; } @Override public <R, C> R accept(AstVisitor<R, C> visitor, C context) { return visitor.visitWhenClause(this, context); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } WhenClause that = (WhenClause) o; if (!operand.equals(that.operand)) { return false; } if (!result.equals(that.result)) { return false; } return true; } @Override public int hashCode() {<FILL_FUNCTION_BODY>} }
int result1 = operand.hashCode(); result1 = 31 * result1 + result.hashCode(); return result1;
307
37
344
<methods>public non-sealed void <init>() ,public R accept(AstVisitor<R,C>, C) ,public final java.lang.String toString() <variables>
crate_crate
crate/libs/sql-parser/src/main/java/io/crate/sql/tree/Window.java
Window
merge
class Window extends Statement { private final String windowRef; private final List<Expression> partitions; private final List<SortItem> orderBy; private final Optional<WindowFrame> windowFrame; public Window(@Nullable String windowRef, List<Expression> partitions, List<SortItem> orderBy, Optional<WindowFrame> windowFrame) { this.partitions = partitions; this.orderBy = orderBy; this.windowFrame = windowFrame; this.windowRef = windowRef; } @Nullable public String windowRef() { return windowRef; } public List<Expression> getPartitions() { return partitions; } public List<SortItem> getOrderBy() { return orderBy; } public Optional<WindowFrame> getWindowFrame() { return windowFrame; } /** * Merges the provided window definition into the current one * by following the next merge rules: * <ul> * <li> The current window must not specify the partition by clause. * <li> The provided window must not specify the window frame, if * the current window definition is not empty. * <li> The provided window cannot override the order by clause * or window frame. * <ul/> * * @return A new {@link Window} window definition that contains merged * elements of both current and provided windows or a provided * window definition if the current definition is empty. * @throws IllegalArgumentException If the merge rules are violated. */ public Window merge(Window that) {<FILL_FUNCTION_BODY>} private boolean empty() { return partitions.isEmpty() && orderBy.isEmpty() && windowFrame.isEmpty(); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Window window = (Window) o; if (!partitions.equals(window.partitions)) return false; if (!orderBy.equals(window.orderBy)) return false; return windowFrame.equals(window.windowFrame); } @Override public int hashCode() { int result = partitions.hashCode(); result = 31 * result + orderBy.hashCode(); result = 31 * result + windowFrame.hashCode(); return result; } @Override public String toString() { return "Window{" + "partitions=" + partitions + ", orderBy=" + orderBy + ", windowFrame=" + windowFrame + '}'; } @Override public <R, C> R accept(AstVisitor<R, C> visitor, C context) { return visitor.visitWindow(this, context); } }
if (this.empty()) { return that; } final List<Expression> partitionBy; if (!this.partitions.isEmpty()) { throw new IllegalArgumentException( "Cannot override PARTITION BY clause of window " + this.windowRef); } else { partitionBy = that.getPartitions(); } final List<SortItem> orderBy; if (that.getOrderBy().isEmpty()) { orderBy = this.getOrderBy(); } else { if (!this.getOrderBy().isEmpty()) { throw new IllegalArgumentException( "Cannot override ORDER BY clause of window " + this.windowRef); } orderBy = that.getOrderBy(); } if (that.getWindowFrame().isPresent()) { throw new IllegalArgumentException( "Cannot copy window " + this.windowRef() + " because it has a frame clause"); } return new Window(that.windowRef, partitionBy, orderBy, this.getWindowFrame());
725
253
978
<methods>public non-sealed void <init>() ,public R accept(AstVisitor<R,C>, C) <variables>
crate_crate
crate/libs/sql-parser/src/main/java/io/crate/sql/tree/WithQuery.java
WithQuery
toString
class WithQuery extends Node { private final String name; private final Query query; private final List<String> columnNames; public WithQuery(String name, Query query, List<String> columnNames) { this.name = name; this.query = Objects.requireNonNull(query, "query is null"); this.columnNames = columnNames; } public String name() { return name; } public Query query() { return query; } public List<String> columnNames() { return columnNames; } @Override public <R, C> R accept(AstVisitor<R, C> visitor, C context) { return visitor.visitWithQuery(this, context); } @Override public String toString() {<FILL_FUNCTION_BODY>} @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; WithQuery withQuery = (WithQuery) o; return name.equals(withQuery.name) && query.equals(withQuery.query) && Objects.equals(columnNames, withQuery.columnNames); } @Override public int hashCode() { return Objects.hash(name, query, columnNames); } }
return "WithQuery{" + "name=" + name + ", query=" + query + ", columnNames=" + columnNames + '}';
357
45
402
<methods>public non-sealed void <init>() ,public R accept(AstVisitor<R,C>, C) ,public abstract boolean equals(java.lang.Object) ,public abstract int hashCode() ,public abstract java.lang.String toString() <variables>
crate_crate
crate/plugins/cr8-copy-s3/src/main/java/io/crate/copy/s3/S3FileInput.java
S3FileInput
toPreGlobUri
class S3FileInput implements FileInput { private static final Pattern HAS_GLOBS_PATTERN = Pattern.compile("^((s3://)[^\\*]*/)[^\\*]*\\*.*"); private AmazonS3 client; // to prevent early GC during getObjectContent() in getStream() private static final Logger LOGGER = LogManager.getLogger(S3FileInput.class); private final S3ClientHelper clientBuilder; @NotNull private final S3URI normalizedS3URI; @Nullable private final S3URI preGlobUri; @NotNull private final Predicate<S3URI> uriPredicate; @Nullable private final String protocolSetting; public S3FileInput(URI uri, String protocol) { this.clientBuilder = new S3ClientHelper(); this.normalizedS3URI = S3URI.toS3URI(uri); this.preGlobUri = toPreGlobUri(this.normalizedS3URI); this.uriPredicate = new GlobPredicate(this.normalizedS3URI); this.protocolSetting = protocol; } @VisibleForTesting S3FileInput(S3ClientHelper clientBuilder, URI uri, String protocol) { this.clientBuilder = clientBuilder; this.normalizedS3URI = S3URI.toS3URI(uri); this.preGlobUri = toPreGlobUri(this.normalizedS3URI); this.uriPredicate = new GlobPredicate(this.normalizedS3URI); this.protocolSetting = protocol; } @Override public boolean isGlobbed() { return preGlobUri != null; } @Override public URI uri() { return normalizedS3URI.uri(); } @Override public List<URI> expandUri() throws IOException { if (isGlobbed() == false) { return List.of(normalizedS3URI.uri()); } if (client == null) { client = clientBuilder.client(preGlobUri, protocolSetting); } List<URI> uris = new ArrayList<>(); ObjectListing list = client.listObjects(preGlobUri.bucket(), preGlobUri.key()); addKeyUris(uris, list); while (list.isTruncated()) { list = client.listNextBatchOfObjects(list); addKeyUris(uris, list); } return uris; } private void addKeyUris(List<URI> uris, ObjectListing list) { assert preGlobUri != null; List<S3ObjectSummary> summaries = list.getObjectSummaries(); for (S3ObjectSummary summary : summaries) { String key = summary.getKey(); if (!key.endsWith("/")) { S3URI keyUri = preGlobUri.replacePath(summary.getBucketName(), key); if (uriPredicate.test(keyUri)) { uris.add(keyUri.uri()); if (LOGGER.isDebugEnabled()) { LOGGER.debug("{}", keyUri); } } } } } @Override public InputStream getStream(URI uri) throws IOException { S3URI s3URI = S3URI.toS3URI(uri); if (client == null) { client = clientBuilder.client(s3URI, protocolSetting); } S3Object object = client.getObject(s3URI.bucket(), s3URI.key()); if (object != null) { return object.getObjectContent(); } throw new IOException("Failed to load S3 URI: " + uri.toString()); } @Override public boolean sharedStorageDefault() { return true; } @VisibleForTesting @Nullable static S3URI toPreGlobUri(S3URI uri) {<FILL_FUNCTION_BODY>} private static class GlobPredicate implements Predicate<S3URI> { private final Pattern globPattern; GlobPredicate(S3URI uri) { this.globPattern = Pattern.compile(Globs.toUnixRegexPattern(uri.toString())); } @Override public boolean test(@Nullable S3URI input) { return input != null && globPattern.matcher(input.toString()).matches(); } } }
Matcher hasGlobMatcher = HAS_GLOBS_PATTERN.matcher(uri.toString()); S3URI preGlobUri = null; if (hasGlobMatcher.matches()) { preGlobUri = S3URI.toS3URI(URI.create(hasGlobMatcher.group(1))); } return preGlobUri;
1,132
98
1,230
<no_super_class>
crate_crate
crate/plugins/cr8-copy-s3/src/main/java/io/crate/copy/s3/S3FileOutput.java
S3OutputStream
doUploadIfNeeded
class S3OutputStream extends OutputStream { private static final int PART_SIZE = 5 * 1024 * 1024; private final AmazonS3 client; private final InitiateMultipartUploadResult multipartUpload; private final Executor executor; private final String bucketName; private final String key; private final List<CompletableFuture<PartETag>> pendingUploads = new ArrayList<>(); private ByteArrayOutputStream outputStream; long currentPartBytes = 0; int partNumber = 1; private S3OutputStream(Executor executor, S3URI s3URI, S3ClientHelper s3ClientHelper, String protocolSetting) throws IOException { this.executor = executor; bucketName = s3URI.bucket(); key = s3URI.key(); outputStream = new ByteArrayOutputStream(); client = s3ClientHelper.client(s3URI, protocolSetting); multipartUpload = client.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, key)); } @Override public void write(byte[] b) throws IOException { outputStream.write(b); currentPartBytes += b.length; doUploadIfNeeded(); } @Override public void write(byte[] b, int off, int len) throws IOException { outputStream.write(b, off, len); currentPartBytes += len; doUploadIfNeeded(); } @Override public void write(int b) throws IOException { outputStream.write(b); currentPartBytes++; doUploadIfNeeded(); } private void doUploadIfNeeded() throws IOException {<FILL_FUNCTION_BODY>} @Override public void close() throws IOException { UploadPartRequest uploadPartRequest = new UploadPartRequest() .withBucketName(bucketName) .withKey(key) .withPartNumber(partNumber) .withPartSize(outputStream.size()) .withUploadId(multipartUpload.getUploadId()) .withInputStream(new ByteArrayInputStream(outputStream.toByteArray())); UploadPartResult uploadPartResult = client.uploadPart(uploadPartRequest); List<PartETag> partETags; try { partETags = CompletableFutures.allAsList(pendingUploads).get(); } catch (InterruptedException | ExecutionException e) { throw new IOException(e); } partETags.add(uploadPartResult.getPartETag()); client.completeMultipartUpload( new CompleteMultipartUploadRequest( bucketName, key, multipartUpload.getUploadId(), partETags) ); super.close(); } }
if (currentPartBytes >= PART_SIZE) { final ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray()); final int currentPart = partNumber; final long currentPartSize = currentPartBytes; outputStream.close(); outputStream = new ByteArrayOutputStream(); partNumber++; pendingUploads.add(CompletableFutures.supplyAsync(() -> { UploadPartRequest uploadPartRequest = new UploadPartRequest() .withBucketName(bucketName) .withKey(key) .withPartNumber(currentPart) .withPartSize(currentPartSize) .withUploadId(multipartUpload.getUploadId()) .withInputStream(inputStream); return client.uploadPart(uploadPartRequest).getPartETag(); }, executor)); currentPartBytes = 0; }
700
218
918
<no_super_class>
crate_crate
crate/plugins/cr8-copy-s3/src/main/java/io/crate/copy/s3/common/S3URI.java
S3URI
normalize
class S3URI { private static final String INVALID_URI_MSG = "Invalid URI. Please make sure that given URI is encoded properly."; private final URI uri; private final String accessKey; private final String secretKey; private final String bucket; private final String key; private final String endpoint; private S3URI(URI uri, String accessKey, String secretKey, String bucket, String key, String endpoint) { this.uri = uri; this.accessKey = accessKey; this.secretKey = secretKey; this.bucket = bucket; this.key = key; this.endpoint = endpoint; } public static S3URI toS3URI(URI uri) { URI normalizedURI = normalize(uri); String userInfo = getUserInfo(normalizedURI); String accessKey = null; String secretKey = null; if (userInfo != null) { String[] userInfoParts = userInfo.split(":"); try { accessKey = userInfoParts[0]; secretKey = userInfoParts[1]; } catch (ArrayIndexOutOfBoundsException e) { // ignore } // if the URI contains '@' and ':', a UserInfo is in fact given, but could not // be parsed properly because the URI is not valid (e.g. not properly encoded). } else if (normalizedURI.toString().contains("@") && normalizedURI.toString().contains(":")) { throw new IllegalArgumentException(INVALID_URI_MSG); } String bucket; String key; String path = normalizedURI.getPath().substring(1); int splitIndex = path.indexOf('/'); if (splitIndex == -1) { bucket = path; key = ""; } else { bucket = path.substring(0, splitIndex); key = path.substring(splitIndex + 1); } String endpoint = null; if (normalizedURI.getHost() != null) { endpoint = normalizedURI.getHost() + ":" + normalizedURI.getPort(); } return new S3URI(normalizedURI, accessKey, secretKey, bucket, key, endpoint); } @VisibleForTesting static String getUserInfo(URI uri) { // userInfo is provided but is contained in authority, NOT in userInfo. happens when host is not provided String userInfo = null; if (uri.getHost() == null && uri.getPort() == -1 && uri.getUserInfo() == null) { var authority = uri.getAuthority(); if (authority != null) { int idx = authority.indexOf('@'); if (idx != authority.length() - 1) { throw new IllegalArgumentException(INVALID_URI_MSG); } userInfo = uri.getAuthority().substring(0, idx); } } else { userInfo = uri.getUserInfo(); } return userInfo; } /** * As shown in <a href="https://crate.io/docs/crate/reference/en/latest/sql/statements/copy-from.html#sql-copy-from-s3">CrateDB Reference</a>, * the accepted s3 uri format is: * <pre>{@code * s3://[<accesskey>:<secretkey>@][<host>:<port>/]<bucketname>/<path>} * </pre> * which is inconsistent with {@link URI}. * <p> * For example, s3://bucket is an acceptable s3 uri to CrateDB but {@link URI} parses "bucket" as the host instead of the path. * <p> * Another example is, s3://bucket/key1/key2, another valid CrateDB s3 uri. Again, URI parses "bucket" as the host and "/key1/key2" as the path. * <p> * This method resolved this inconsistency such that URI can be utilized. * * @param uri a valid s3 uri * @return a normalized uri such that the path component contains the bucket and the key */ private static URI normalize(URI uri) {<FILL_FUNCTION_BODY>} public S3URI replacePath(String bucket, String key) { StringBuilder sb = new StringBuilder(); sb.append("s3://"); if (uri.getRawAuthority() != null) { sb.append(uri.getRawAuthority()); } sb.append("/").append(bucket).append("/").append(key); return new S3URI(URI.create(sb.toString()), accessKey, secretKey, bucket, key, endpoint); } @Override public String toString() { return uri.toString(); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (!(obj instanceof S3URI that)) { return false; } return uri.equals(that.uri) && Objects.equals(accessKey, that.accessKey) && Objects.equals(secretKey, that.secretKey) && Objects.equals(bucket, that.bucket) && Objects.equals(key, that.key) && Objects.equals(endpoint, that.endpoint); } @Override public int hashCode() { return Objects.hash(uri, accessKey, secretKey, bucket, key, endpoint); } public URI uri() { return uri; } public String bucket() { return bucket; } public String key() { return key; } public String accessKey() { return accessKey; } public String secretKey() { return secretKey; } public String endpoint() { return endpoint; } }
assert "s3".equals(uri.getScheme()); if (uri.getHost() != null) { if (uri.getPath() == null || uri.getPort() == -1) { return URI.create("s3://" + (uri.getRawUserInfo() == null ? "" : uri.getRawUserInfo() + "@") + "/" + uri.getHost() + uri.getPath()); } } return uri;
1,513
124
1,637
<no_super_class>
crate_crate
crate/plugins/es-analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java
CharGroupTokenizerFactory
create
class CharGroupTokenizerFactory extends AbstractTokenizerFactory { private final Set<Integer> tokenizeOnChars = new HashSet<>(); private boolean tokenizeOnSpace = false; private boolean tokenizeOnLetter = false; private boolean tokenizeOnDigit = false; private boolean tokenizeOnPunctuation = false; private boolean tokenizeOnSymbol = false; public CharGroupTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); for (final String c : settings.getAsList("tokenize_on_chars")) { if (c == null || c.length() == 0) { throw new RuntimeException("[tokenize_on_chars] cannot contain empty characters"); } if (c.length() == 1) { tokenizeOnChars.add((int) c.charAt(0)); } else if (c.charAt(0) == '\\') { tokenizeOnChars.add((int) parseEscapedChar(c)); } else { switch (c) { case "letter": tokenizeOnLetter = true; break; case "digit": tokenizeOnDigit = true; break; case "whitespace": tokenizeOnSpace = true; break; case "punctuation": tokenizeOnPunctuation = true; break; case "symbol": tokenizeOnSymbol = true; break; default: throw new RuntimeException("Invalid escaped char in [" + c + "]"); } } } } private char parseEscapedChar(final String s) { int len = s.length(); char c = s.charAt(0); if (c == '\\') { if (1 >= len) throw new RuntimeException("Invalid escaped char in [" + s + "]"); c = s.charAt(1); switch (c) { case '\\': return '\\'; case 'n': return '\n'; case 't': return '\t'; case 'r': return '\r'; case 'b': return '\b'; case 'f': return '\f'; case 'u': if (len > 6) { throw new RuntimeException("Invalid escaped char in [" + s + "]"); } return (char) Integer.parseInt(s.substring(2), 16); default: throw new RuntimeException("Invalid escaped char " + c + " in [" + s + "]"); } } else { throw new RuntimeException("Invalid escaped char [" + s + "]"); } } @Override public Tokenizer create() {<FILL_FUNCTION_BODY>} }
return new CharTokenizer() { @Override protected boolean isTokenChar(int c) { if (tokenizeOnSpace && Character.isWhitespace(c)) { return false; } if (tokenizeOnLetter && Character.isLetter(c)) { return false; } if (tokenizeOnDigit && Character.isDigit(c)) { return false; } if (tokenizeOnPunctuation && CharMatcher.Basic.PUNCTUATION.isTokenChar(c)) { return false; } if (tokenizeOnSymbol && CharMatcher.Basic.SYMBOL.isTokenChar(c)) { return false; } return !tokenizeOnChars.contains(c); } };
717
201
918
<methods>public void <init>(org.elasticsearch.index.IndexSettings, java.lang.String, org.elasticsearch.common.settings.Settings) ,public final Version version() <variables>protected final non-sealed Version version
crate_crate
crate/plugins/es-analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonGramsTokenFilterFactory.java
CommonGramsTokenFilterFactory
create
class CommonGramsTokenFilterFactory extends AbstractTokenFilterFactory { private final CharArraySet words; private final boolean ignoreCase; private final boolean queryMode; CommonGramsTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); this.ignoreCase = settings.getAsBoolean("ignore_case", false); this.queryMode = settings.getAsBoolean("query_mode", false); this.words = Analysis.parseCommonWords(env, settings, null, ignoreCase); if (this.words == null) { throw new IllegalArgumentException( "missing or empty [common_words] or [common_words_path] configuration for common_grams token filter"); } } @Override public TokenStream create(TokenStream tokenStream) {<FILL_FUNCTION_BODY>} }
CommonGramsFilter filter = new CommonGramsFilter(tokenStream, words); if (queryMode) { return new CommonGramsQueryFilter(filter); } else { return filter; }
225
55
280
<methods>public void <init>(org.elasticsearch.index.IndexSettings, java.lang.String, org.elasticsearch.common.settings.Settings) ,public java.lang.String name() ,public final Version version() <variables>private final non-sealed java.lang.String name,protected final non-sealed Version version
crate_crate
crate/plugins/es-analysis-common/src/main/java/org/elasticsearch/analysis/common/FrenchStemTokenFilterFactory.java
FrenchStemTokenFilterFactory
create
class FrenchStemTokenFilterFactory extends AbstractTokenFilterFactory { private final CharArraySet exclusions; FrenchStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET); } @Override public TokenStream create(TokenStream tokenStream) {<FILL_FUNCTION_BODY>} }
tokenStream = new SetKeywordMarkerFilter(tokenStream, exclusions); return new SnowballFilter(tokenStream, new FrenchStemmer());
124
38
162
<methods>public void <init>(org.elasticsearch.index.IndexSettings, java.lang.String, org.elasticsearch.common.settings.Settings) ,public java.lang.String name() ,public final Version version() <variables>private final non-sealed java.lang.String name,protected final non-sealed Version version
crate_crate
crate/plugins/es-analysis-common/src/main/java/org/elasticsearch/analysis/common/MinHashTokenFilterFactory.java
MinHashTokenFilterFactory
convertSettings
class MinHashTokenFilterFactory extends AbstractTokenFilterFactory { private final MinHashFilterFactory minHashFilterFactory; MinHashTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); minHashFilterFactory = new MinHashFilterFactory(convertSettings(settings)); } @Override public TokenStream create(TokenStream tokenStream) { return minHashFilterFactory.create(tokenStream); } private Map<String, String> convertSettings(Settings settings) {<FILL_FUNCTION_BODY>} }
Map<String, String> settingMap = new HashMap<>(); if (settings.hasValue("hash_count")) { settingMap.put("hashCount", settings.get("hash_count")); } if (settings.hasValue("bucket_count")) { settingMap.put("bucketCount", settings.get("bucket_count")); } if (settings.hasValue("hash_set_size")) { settingMap.put("hashSetSize", settings.get("hash_set_size")); } if (settings.hasValue("with_rotation")) { settingMap.put("withRotation", settings.get("with_rotation")); } return settingMap;
149
176
325
<methods>public void <init>(org.elasticsearch.index.IndexSettings, java.lang.String, org.elasticsearch.common.settings.Settings) ,public java.lang.String name() ,public final Version version() <variables>private final non-sealed java.lang.String name,protected final non-sealed Version version
crate_crate
crate/plugins/es-analysis-common/src/main/java/org/elasticsearch/analysis/common/PathHierarchyTokenizerFactory.java
PathHierarchyTokenizerFactory
create
class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory { private final int bufferSize; private final char delimiter; private final char replacement; private final int skip; private final boolean reverse; PathHierarchyTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); bufferSize = settings.getAsInt("buffer_size", 1024); String delimiter = settings.get("delimiter"); if (delimiter == null) { this.delimiter = PathHierarchyTokenizer.DEFAULT_DELIMITER; } else if (delimiter.length() != 1) { throw new IllegalArgumentException("delimiter must be a one char value"); } else { this.delimiter = delimiter.charAt(0); } String replacement = settings.get("replacement"); if (replacement == null) { this.replacement = this.delimiter; } else if (replacement.length() != 1) { throw new IllegalArgumentException("replacement must be a one char value"); } else { this.replacement = replacement.charAt(0); } this.skip = settings.getAsInt("skip", PathHierarchyTokenizer.DEFAULT_SKIP); this.reverse = settings.getAsBoolean("reverse", false); } @Override public Tokenizer create() {<FILL_FUNCTION_BODY>} }
if (reverse) { return new ReversePathHierarchyTokenizer(bufferSize, delimiter, replacement, skip); } return new PathHierarchyTokenizer(bufferSize, delimiter, replacement, skip);
386
61
447
<methods>public void <init>(org.elasticsearch.index.IndexSettings, java.lang.String, org.elasticsearch.common.settings.Settings) ,public final Version version() <variables>protected final non-sealed Version version
crate_crate
crate/plugins/es-analysis-common/src/main/java/org/elasticsearch/analysis/common/StandardHtmlStripAnalyzer.java
StandardHtmlStripAnalyzer
createComponents
class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase { /** * @deprecated use {@link StandardHtmlStripAnalyzer#StandardHtmlStripAnalyzer(CharArraySet)} instead */ @Deprecated public StandardHtmlStripAnalyzer() { super(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); } StandardHtmlStripAnalyzer(CharArraySet stopwords) { super(stopwords); } @Override protected TokenStreamComponents createComponents(final String fieldName) {<FILL_FUNCTION_BODY>} }
final Tokenizer src = new StandardTokenizer(); TokenStream tok = new LowerCaseFilter(src); if (!stopwords.isEmpty()) { tok = new StopFilter(tok, stopwords); } return new TokenStreamComponents(src, tok);
153
70
223
<no_super_class>
crate_crate
crate/plugins/es-analysis-common/src/main/java/org/elasticsearch/analysis/common/UniqueTokenFilter.java
UniqueTokenFilter
incrementToken
class UniqueTokenFilter extends TokenFilter { private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class); private final PositionIncrementAttribute posIncAttribute = addAttribute(PositionIncrementAttribute.class); private final CharArraySet previous = new CharArraySet(8, false); private final boolean onlyOnSamePosition; UniqueTokenFilter(TokenStream in) { this(in, false); } UniqueTokenFilter(TokenStream in, boolean onlyOnSamePosition) { super(in); this.onlyOnSamePosition = onlyOnSamePosition; } @Override public final boolean incrementToken() throws IOException {<FILL_FUNCTION_BODY>} @Override public final void reset() throws IOException { super.reset(); previous.clear(); } }
while (input.incrementToken()) { final char[] term = termAttribute.buffer(); final int length = termAttribute.length(); boolean duplicate; if (onlyOnSamePosition) { final int posIncrement = posIncAttribute.getPositionIncrement(); if (posIncrement > 0) { previous.clear(); } duplicate = (posIncrement == 0 && previous.contains(term, 0, length)); } else { duplicate = previous.contains(term, 0, length); } // clone the term, and add to the set of seen terms. char[] saved = new char[length]; System.arraycopy(term, 0, saved, 0, length); previous.add(saved); if (!duplicate) { return true; } } return false;
211
222
433
<no_super_class>
crate_crate
crate/plugins/es-analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizer.java
XLowerCaseTokenizer
incrementToken
class XLowerCaseTokenizer extends Tokenizer { private int offset = 0; private int bufferIndex = 0; private int dataLen = 0; private int finalOffset = 0; private static final int IO_BUFFER_SIZE = 4096; private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); private final CharacterUtils.CharacterBuffer ioBuffer = CharacterUtils.newCharacterBuffer(IO_BUFFER_SIZE); @Override public final boolean incrementToken() throws IOException {<FILL_FUNCTION_BODY>} @Override public final void end() throws IOException { super.end(); // set final offset offsetAtt.setOffset(finalOffset, finalOffset); } @Override public void reset() throws IOException { super.reset(); bufferIndex = 0; offset = 0; dataLen = 0; finalOffset = 0; ioBuffer.reset(); // make sure to reset the IO buffer!! } }
clearAttributes(); int length = 0; int start = -1; // this variable is always initialized int end = -1; char[] buffer = termAtt.buffer(); while (true) { if (bufferIndex >= dataLen) { offset += dataLen; CharacterUtils.fill(ioBuffer, input); // read supplementary char aware with CharacterUtils if (ioBuffer.getLength() == 0) { dataLen = 0; // so next offset += dataLen won't decrement offset if (length > 0) { break; } else { finalOffset = correctOffset(offset); return false; } } dataLen = ioBuffer.getLength(); bufferIndex = 0; } // use CharacterUtils here to support < 3.1 UTF-16 code unit behavior if the char based methods are gone final int c = Character.codePointAt(ioBuffer.getBuffer(), bufferIndex, ioBuffer.getLength()); final int charCount = Character.charCount(c); bufferIndex += charCount; if (Character.isLetter(c)) { // if it's a token char if (length == 0) { // start of token assert start == -1; start = offset + bufferIndex - charCount; end = start; } else if (length >= buffer.length - 1) { // check if a supplementary could run out of bounds buffer = termAtt.resizeBuffer(2 + length); // make sure a supplementary fits in the buffer } end += charCount; length += Character.toChars(Character.toLowerCase(c), buffer, length); // buffer it, normalized int maxTokenLen = CharTokenizer.DEFAULT_MAX_WORD_LEN; if (length >= maxTokenLen) { // buffer overflow! make sure to check for >= surrogate pair could break == test break; } } else if (length > 0) { // at non-Letter w/ chars break; // return 'em } } termAtt.setLength(length); assert start != -1; offsetAtt.setOffset(correctOffset(start), finalOffset = correctOffset(end)); return true;
273
556
829
<no_super_class>
crate_crate
crate/plugins/es-discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java
AwsEc2ServiceImpl
buildClient
class AwsEc2ServiceImpl implements AwsEc2Service { private static final Logger LOGGER = LogManager.getLogger(AwsEc2ServiceImpl.class); public static final String EC2_METADATA_URL = "http://169.254.169.254/latest/meta-data/"; private final AtomicReference<LazyInitializable<AmazonEc2Reference, ElasticsearchException>> lazyClientReference = new AtomicReference<>(); private AmazonEC2 buildClient(Ec2ClientSettings clientSettings) {<FILL_FUNCTION_BODY>} // proxy for testing AmazonEC2 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { final AmazonEC2 client = new AmazonEC2Client(credentials, configuration); return client; } // pkg private for tests static ClientConfiguration buildConfiguration(Ec2ClientSettings clientSettings) { final ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. clientConfiguration.setResponseMetadataCacheSize(0); clientConfiguration.setProtocol(clientSettings.protocol); if (Strings.hasText(clientSettings.proxyHost)) { // TODO: remove this leniency, these settings should exist together and be validated clientConfiguration.setProxyHost(clientSettings.proxyHost); clientConfiguration.setProxyPort(clientSettings.proxyPort); clientConfiguration.setProxyUsername(clientSettings.proxyUsername); clientConfiguration.setProxyPassword(clientSettings.proxyPassword); } // Increase the number of retries in case of 5xx API responses final Random rand = Randomness.get(); final RetryPolicy retryPolicy = new RetryPolicy( RetryPolicy.RetryCondition.NO_RETRY_CONDITION, (originalRequest, exception, retriesAttempted) -> { // with 10 retries the max delay time is 320s/320000ms (10 * 2^5 * 1 * 1000) LOGGER.warn("EC2 API request failed, retry again. Reason was:", exception); return 1000L * (long) (10d * Math.pow(2, retriesAttempted / 2.0d) * (1.0d + rand.nextDouble())); }, 10, false); clientConfiguration.setRetryPolicy(retryPolicy); clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis); return clientConfiguration; } // pkg private for tests static AWSCredentialsProvider buildCredentials(Logger logger, Ec2ClientSettings clientSettings) { final AWSCredentials credentials = clientSettings.credentials; if (credentials == null) { logger.debug("Using either environment variables, system properties or instance profile credentials"); return new DefaultAWSCredentialsProviderChain(); } else { logger.debug("Using basic key/secret credentials"); return new StaticCredentialsProvider(credentials); } } @Override public AmazonEc2Reference client() { final LazyInitializable<AmazonEc2Reference, ElasticsearchException> clientReference = this.lazyClientReference.get(); if (clientReference == null) { throw new IllegalStateException("Missing ec2 client configs"); } return clientReference.getOrCompute(); } /** * Refreshes the settings for the AmazonEC2 client. The new client will be build * using these new settings. The old client is usable until released. On release it * will be destroyed instead of being returned to the cache. */ @Override public void refreshAndClearCache(Ec2ClientSettings clientSettings) { final LazyInitializable<AmazonEc2Reference, ElasticsearchException> newClient = new LazyInitializable<>( () -> new AmazonEc2Reference(buildClient(clientSettings)), clientReference -> clientReference.incRef(), clientReference -> clientReference.decRef() ); final LazyInitializable<AmazonEc2Reference, ElasticsearchException> oldClient = this.lazyClientReference.getAndSet(newClient); if (oldClient != null) { oldClient.reset(); } } @Override public void close() { final LazyInitializable<AmazonEc2Reference, ElasticsearchException> clientReference = this.lazyClientReference.getAndSet(null); if (clientReference != null) { clientReference.reset(); } // shutdown IdleConnectionReaper background thread // it will be restarted on new client usage IdleConnectionReaper.shutdown(); } }
final AWSCredentialsProvider credentials = buildCredentials(LOGGER, clientSettings); final ClientConfiguration configuration = buildConfiguration(clientSettings); final AmazonEC2 client = buildClient(credentials, configuration); if (Strings.hasText(clientSettings.endpoint)) { LOGGER.debug("using explicit ec2 endpoint [{}]", clientSettings.endpoint); client.setEndpoint(clientSettings.endpoint); } else { Region currentRegion = Regions.getCurrentRegion(); if (currentRegion != null) { LOGGER.debug("using ec2 region [{}]", currentRegion); client.setRegion(currentRegion); } } return client;
1,203
173
1,376
<no_super_class>
crate_crate
crate/plugins/es-discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java
Ec2ClientSettings
loadCredentials
class Ec2ClientSettings { /** The access key (ie login id) for connecting to ec2. */ static final Setting<SecureString> ACCESS_KEY_SETTING = Setting.maskedString("discovery.ec2.access_key"); /** The secret key (ie password) for connecting to ec2. */ static final Setting<SecureString> SECRET_KEY_SETTING = Setting.maskedString("discovery.ec2.secret_key"); /** The session token for connecting to ec2. */ static final Setting<SecureString> SESSION_TOKEN_SETTING = Setting.maskedString("discovery.ec2.session_token"); /** The host name of a proxy to connect to ec2 through. */ static final Setting<String> PROXY_HOST_SETTING = Setting.simpleString("discovery.ec2.proxy.host", Property.NodeScope); /** The port of a proxy to connect to ec2 through. */ static final Setting<Integer> PROXY_PORT_SETTING = Setting.intSetting("discovery.ec2.proxy.port", 80, 0, 1 << 16, Property.NodeScope); /** An override for the ec2 endpoint to connect to. */ static final Setting<String> ENDPOINT_SETTING = new Setting<>( "discovery.ec2.endpoint", "", s -> s.toLowerCase(Locale.ROOT), DataTypes.STRING, Property.NodeScope ); /** The protocol to use to connect to to ec2. */ static final Setting<Protocol> PROTOCOL_SETTING = new Setting<>( "discovery.ec2.protocol", "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), DataTypes.STRING, Property.NodeScope ); /** The username of a proxy to connect to s3 through. */ static final Setting<SecureString> PROXY_USERNAME_SETTING = Setting.maskedString("discovery.ec2.proxy.username"); /** The password of a proxy to connect to s3 through. */ static final Setting<SecureString> PROXY_PASSWORD_SETTING = Setting.maskedString("discovery.ec2.proxy.password"); /** The socket timeout for connecting to s3. */ static final Setting<TimeValue> READ_TIMEOUT_SETTING = Setting.timeSetting("discovery.ec2.read_timeout", TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope); private static final Logger LOGGER = LogManager.getLogger(Ec2ClientSettings.class); private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LOGGER); /** Credentials to authenticate with ec2. */ final AWSCredentials credentials; /** * The ec2 endpoint the client should talk to, or empty string to use the * default. */ final String endpoint; /** The protocol to use to talk to ec2. Defaults to https. */ final Protocol protocol; /** An optional proxy host that requests to ec2 should be made through. */ final String proxyHost; /** The port number the proxy host should be connected on. */ final int proxyPort; // these should be "secure" yet the api for the ec2 client only takes String, so // storing them // as SecureString here won't really help with anything /** An optional username for the proxy host, for basic authentication. */ final String proxyUsername; /** An optional password for the proxy host, for basic authentication. */ final String proxyPassword; /** The read timeout for the ec2 client. */ final int readTimeoutMillis; protected Ec2ClientSettings(AWSCredentials credentials, String endpoint, Protocol protocol, String proxyHost, int proxyPort, String proxyUsername, String proxyPassword, int readTimeoutMillis) { this.credentials = credentials; this.endpoint = endpoint; this.protocol = protocol; this.proxyHost = proxyHost; this.proxyPort = proxyPort; this.proxyUsername = proxyUsername; this.proxyPassword = proxyPassword; this.readTimeoutMillis = readTimeoutMillis; } static AWSCredentials loadCredentials(Settings settings) {<FILL_FUNCTION_BODY>} // pkg private for tests /** Parse settings for a single client. */ static Ec2ClientSettings getClientSettings(Settings settings) { final AWSCredentials credentials = loadCredentials(settings); try (SecureString proxyUsername = PROXY_USERNAME_SETTING.get(settings); SecureString proxyPassword = PROXY_PASSWORD_SETTING.get(settings)) { return new Ec2ClientSettings( credentials, ENDPOINT_SETTING.get(settings), PROTOCOL_SETTING.get(settings), PROXY_HOST_SETTING.get(settings), PROXY_PORT_SETTING.get(settings), proxyUsername.toString(), proxyPassword.toString(), (int)READ_TIMEOUT_SETTING.get(settings).millis()); } } }
try (SecureString key = ACCESS_KEY_SETTING.get(settings); SecureString secret = SECRET_KEY_SETTING.get(settings); SecureString sessionToken = SESSION_TOKEN_SETTING.get(settings)) { if (key.length() == 0 && secret.length() == 0) { if (sessionToken.length() > 0) { throw new SettingsException("Setting [{}] is set but [{}] and [{}] are not", SESSION_TOKEN_SETTING.getKey(), ACCESS_KEY_SETTING.getKey(), SECRET_KEY_SETTING.getKey()); } LOGGER.debug("Using either environment variables, system properties or instance profile credentials"); return null; } else { if (key.length() == 0) { DEPRECATION_LOGGER.deprecatedAndMaybeLog("ec2_invalid_settings", "Setting [{}] is set but [{}] is not, which will be unsupported in future", SECRET_KEY_SETTING.getKey(), ACCESS_KEY_SETTING.getKey()); } if (secret.length() == 0) { DEPRECATION_LOGGER.deprecatedAndMaybeLog("ec2_invalid_settings", "Setting [{}] is set but [{}] is not, which will be unsupported in future", ACCESS_KEY_SETTING.getKey(), SECRET_KEY_SETTING.getKey()); } final AWSCredentials credentials; if (sessionToken.length() == 0) { LOGGER.debug("Using basic key/secret credentials"); credentials = new BasicAWSCredentials(key.toString(), secret.toString()); } else { LOGGER.debug("Using basic session credentials"); credentials = new BasicSessionCredentials(key.toString(), secret.toString(), sessionToken.toString()); } return credentials; } }
1,332
487
1,819
<no_super_class>
crate_crate
crate/plugins/es-discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java
Ec2NameResolver
resolve
class Ec2NameResolver implements CustomNameResolver { private static final Logger LOGGER = LogManager.getLogger(Ec2NameResolver.class); /** * enum that can be added to over time with more meta-data types (such as ipv6 when this is available) * * @author Paul_Loy */ private enum Ec2HostnameType { PRIVATE_IPv4("ec2:privateIpv4", "local-ipv4"), PRIVATE_DNS("ec2:privateDns", "local-hostname"), PUBLIC_IPv4("ec2:publicIpv4", "public-ipv4"), PUBLIC_DNS("ec2:publicDns", "public-hostname"), // some less verbose defaults PUBLIC_IP("ec2:publicIp", PUBLIC_IPv4.ec2Name), PRIVATE_IP("ec2:privateIp", PRIVATE_IPv4.ec2Name), EC2("ec2", PRIVATE_IPv4.ec2Name); final String configName; final String ec2Name; Ec2HostnameType(String configName, String ec2Name) { this.configName = configName; this.ec2Name = ec2Name; } } /** * @param type the ec2 hostname type to discover. * @return the appropriate host resolved from ec2 meta-data, or null if it cannot be obtained. * @see CustomNameResolver#resolveIfPossible(String) */ @SuppressForbidden(reason = "We call getInputStream in doPrivileged and provide SocketPermission") public InetAddress[] resolve(Ec2HostnameType type) throws IOException {<FILL_FUNCTION_BODY>} @Override public InetAddress[] resolveDefault() { return null; // using this, one has to explicitly specify _ec2_ in network setting } @Override public InetAddress[] resolveIfPossible(String value) throws IOException { for (Ec2HostnameType type : Ec2HostnameType.values()) { if (type.configName.equals(value)) { return resolve(type); } } return null; } }
InputStream in = null; String metadataUrl = AwsEc2ServiceImpl.EC2_METADATA_URL + type.ec2Name; try { URL url = new URL(metadataUrl); LOGGER.debug("obtaining ec2 hostname from ec2 meta-data url {}", url); URLConnection urlConnection = url.openConnection(); urlConnection.setConnectTimeout(2000); in = urlConnection.getInputStream(); try (BufferedReader urlReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8))) { String metadataResult = urlReader.readLine(); if (metadataResult == null || metadataResult.length() == 0) { throw new IOException("no gce metadata returned from [" + url + "] for [" + type.configName + "]"); } // only one address: because we explicitly ask for only one via the Ec2HostnameType return new InetAddress[]{InetAddress.getByName(metadataResult)}; } } catch (IOException e) { throw new IOException("IOException caught when fetching InetAddress from [" + metadataUrl + "]", e); } finally { IOUtils.closeWhileHandlingException(in); }
580
309
889
<no_super_class>
crate_crate
crate/plugins/es-repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java
AzureBlobStore
children
class AzureBlobStore implements BlobStore { private final AzureStorageService service; private final String container; private final LocationMode locationMode; public AzureBlobStore(RepositoryMetadata metadata) { this(metadata, new AzureStorageService(AzureStorageSettings.getClientSettings(metadata.settings()))); } @VisibleForTesting AzureBlobStore(RepositoryMetadata metadata, AzureStorageService service) { this.container = Repository.CONTAINER_SETTING.get(metadata.settings()); this.locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); AzureStorageSettings repositorySettings = AzureStorageSettings .getClientSettings(metadata.settings()); service.refreshSettings(repositorySettings); this.service = service; } @Override public String toString() { return container; } /** * Gets the configured {@link LocationMode} for the Azure storage requests. */ public LocationMode getLocationMode() { return locationMode; } @Override public BlobContainer blobContainer(BlobPath path) { return new AzureBlobContainer(path, this); } @Override public void close() { } public boolean blobExists(String blob) throws URISyntaxException, StorageException { return service.blobExists(container, blob); } public void deleteBlob(String blob) throws URISyntaxException, StorageException { service.deleteBlob(container, blob); } public void deleteBlobDirectory(String keyPath) throws URISyntaxException, StorageException, IOException { service.deleteBlobDirectory(container, keyPath); } public Map<String, BlobContainer> children(BlobPath path) throws URISyntaxException, StorageException {<FILL_FUNCTION_BODY>} public InputStream getInputStream(String blob, long position, @Nullable Long length) throws URISyntaxException, StorageException, IOException { return service.getInputStream(container, blob, position, length); } public Map<String, BlobMetadata> listBlobsByPrefix(String keyPath, String prefix) throws URISyntaxException, StorageException { return service.listBlobsByPrefix(container, keyPath, prefix); } public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws URISyntaxException, StorageException, IOException { service.writeBlob(container, blobName, inputStream, blobSize, failIfAlreadyExists); } }
return Collections.unmodifiableMap(service.children(container, path).stream().collect( Collectors.toMap(Function.identity(), name -> new AzureBlobContainer(path.add(name), this))));
669
56
725
<no_super_class>
crate_crate
crate/plugins/es-repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java
AzureStorageSettings
getClientSettings
class AzureStorageSettings { private final String account; private final String key; private final String endpoint; private final String secondaryEndpoint; private final String endpointSuffix; private final TimeValue timeout; private final int maxRetries; private final Proxy proxy; private final LocationMode locationMode; @VisibleForTesting AzureStorageSettings(String account, String key, String endpoint, String secondaryEndpoint, String endpointSuffix, TimeValue timeout, int maxRetries, Proxy proxy, LocationMode locationMode) { this.account = account; this.key = key; this.endpoint = endpoint; this.secondaryEndpoint = secondaryEndpoint; this.endpointSuffix = endpointSuffix; this.timeout = timeout; this.maxRetries = maxRetries; this.proxy = proxy; this.locationMode = locationMode; } private AzureStorageSettings(String account, String key, LocationMode locationMode, String endpoint, String secondaryEndpoint, String endpointSuffix, TimeValue timeout, int maxRetries, Proxy.Type proxyType, String proxyHost, Integer proxyPort) { final boolean hasEndpointSuffix = Strings.hasText(endpointSuffix); final boolean hasEndpoint = Strings.hasText(endpoint); final boolean hasSecondaryEndpoint = Strings.hasText(secondaryEndpoint); if (hasEndpoint && hasEndpointSuffix) { throw new SettingsException("Cannot specify both endpoint and endpoint_suffix parameters"); } if (hasSecondaryEndpoint && hasEndpoint == false) { throw new SettingsException("Cannot specify secondary_endpoint without specifying endpoint"); } this.account = account; this.key = key; this.endpoint = endpoint; this.secondaryEndpoint = secondaryEndpoint; this.endpointSuffix = endpointSuffix; this.timeout = timeout; this.maxRetries = maxRetries; // Register the proxy if we have any // Validate proxy settings if (proxyType.equals(Proxy.Type.DIRECT) && ((proxyPort != 0) || Strings.hasText(proxyHost))) { throw new SettingsException("Azure Proxy port or host have been set but proxy type is not defined."); } if ((proxyType.equals(Proxy.Type.DIRECT) == false) && ((proxyPort == 0) || Strings.isNullOrEmpty(proxyHost))) { throw new SettingsException("Azure Proxy type has been set but proxy host or port is not defined."); } if (proxyType.equals(Proxy.Type.DIRECT)) { proxy = null; } else { try { proxy = new Proxy(proxyType, new InetSocketAddress(InetAddress.getByName(proxyHost), proxyPort)); } catch (final UnknownHostException e) { throw new SettingsException("Azure proxy host is unknown.", e); } } this.locationMode = locationMode; } public TimeValue getTimeout() { return timeout; } public int getMaxRetries() { return maxRetries; } public Proxy getProxy() { return proxy; } public LocationMode getLocationMode() { return locationMode; } public String buildConnectionString() { final StringBuilder connectionStringBuilder = new StringBuilder(); connectionStringBuilder.append("DefaultEndpointsProtocol=https") .append(";AccountName=") .append(account) .append(";AccountKey=") .append(key); if (Strings.hasText(endpointSuffix)) { connectionStringBuilder.append(";EndpointSuffix=").append(endpointSuffix); } if (Strings.hasText(endpoint)) { connectionStringBuilder.append(";BlobEndpoint=").append(endpoint); } if (Strings.hasText(secondaryEndpoint)) { connectionStringBuilder.append(";BlobSecondaryEndpoint=").append(secondaryEndpoint); } return connectionStringBuilder.toString(); } static AzureStorageSettings getClientSettings(Settings settings) {<FILL_FUNCTION_BODY>} private static <T> T getConfigValue(Settings settings, Setting<T> clientSetting) { return clientSetting.get(settings); } static AzureStorageSettings copy(AzureStorageSettings settings) { return new AzureStorageSettings( settings.account, settings.key, settings.endpoint, settings.secondaryEndpoint, settings.endpointSuffix, settings.timeout, settings.maxRetries, settings.proxy, settings.locationMode); } @Override public String toString() { return "AzureStorageSettings{" + "account='" + account + '\'' + ", key='" + key + '\'' + ", timeout=" + timeout + ", endpoint='" + endpoint + '\'' + ", secondaryEndpoint='" + secondaryEndpoint + '\'' + ", endpointSuffix='" + endpointSuffix + '\'' + ", maxRetries=" + maxRetries + ", proxy=" + proxy + ", locationMode='" + locationMode + '\'' + '}'; } }
try (SecureString account = getConfigValue(settings, AzureRepository.Repository.ACCOUNT_SETTING); SecureString key = getConfigValue(settings, AzureRepository.Repository.KEY_SETTING)) { return new AzureStorageSettings( account.toString(), key.toString(), getConfigValue(settings, AzureRepository.Repository.LOCATION_MODE_SETTING), getConfigValue(settings, AzureRepository.Repository.ENDPOINT_SETTING), getConfigValue(settings, AzureRepository.Repository.SECONDARY_ENDPOINT_SETTING), getConfigValue(settings, AzureRepository.Repository.ENDPOINT_SUFFIX_SETTING), getConfigValue(settings, AzureRepository.Repository.TIMEOUT_SETTING), getConfigValue(settings, AzureRepository.Repository.MAX_RETRIES_SETTING), getConfigValue(settings, AzureRepository.Repository.PROXY_TYPE_SETTING), getConfigValue(settings, AzureRepository.Repository.PROXY_HOST_SETTING), getConfigValue(settings, AzureRepository.Repository.PROXY_PORT_SETTING)); }
1,349
282
1,631
<no_super_class>
crate_crate
crate/plugins/es-repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java
S3BlobStore
initStorageClass
class S3BlobStore implements BlobStore { private final S3Service service; private final String bucket; private final ByteSizeValue bufferSize; private final boolean serverSideEncryption; private final CannedAccessControlList cannedACL; private final StorageClass storageClass; private final RepositoryMetadata metadata; S3BlobStore(S3Service service, String bucket, boolean serverSideEncryption, ByteSizeValue bufferSize, String cannedACL, String storageClass, RepositoryMetadata metadata) { this.service = service; this.bucket = bucket; this.serverSideEncryption = serverSideEncryption; this.bufferSize = bufferSize; this.cannedACL = initCannedACL(cannedACL); this.storageClass = initStorageClass(storageClass); this.metadata = metadata; } @Override public String toString() { return bucket; } public AmazonS3Reference clientReference() { return service.client(metadata); } int getMaxRetries() { return MAX_RETRIES_SETTING.get(metadata.settings()); } public String bucket() { return bucket; } public boolean serverSideEncryption() { return serverSideEncryption; } public long bufferSizeInBytes() { return bufferSize.getBytes(); } @Override public BlobContainer blobContainer(BlobPath path) { return new S3BlobContainer(path, this); } @Override public void close() throws IOException { this.service.close(); } public CannedAccessControlList getCannedACL() { return cannedACL; } public StorageClass getStorageClass() { return storageClass; } public static StorageClass initStorageClass(String storageClass) {<FILL_FUNCTION_BODY>} /** * Constructs canned acl from string */ public static CannedAccessControlList initCannedACL(String cannedACL) { if ((cannedACL == null) || cannedACL.equals("")) { return CannedAccessControlList.Private; } for (final CannedAccessControlList cur : CannedAccessControlList.values()) { if (cur.toString().equalsIgnoreCase(cannedACL)) { return cur; } } throw new BlobStoreException("cannedACL is not valid: [" + cannedACL + "]"); } }
if ((storageClass == null) || storageClass.equals("")) { return StorageClass.Standard; } try { final StorageClass _storageClass = StorageClass.fromValue(storageClass.toUpperCase(Locale.ENGLISH)); if (_storageClass.equals(StorageClass.Glacier)) { throw new BlobStoreException("Glacier storage class is not supported"); } return _storageClass; } catch (final IllegalArgumentException illegalArgumentException) { throw new BlobStoreException("`" + storageClass + "` is not a valid S3 Storage Class."); }
659
153
812
<no_super_class>
crate_crate
crate/plugins/es-repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java
S3RepositoryPlugin
getRepositories
class S3RepositoryPlugin extends Plugin implements RepositoryPlugin { static { try { // kick jackson to do some static caching of declared members info Jackson.jsonNodeOf("{}"); // ClientConfiguration clinit has some classloader problems // TODO: fix that Class.forName("com.amazonaws.ClientConfiguration"); } catch (final ClassNotFoundException e) { throw new RuntimeException(e); } } protected final S3Service service; public S3RepositoryPlugin() { this.service = new S3Service(); } @Override public List<Setting<?>> getSettings() { return List.of(ACCESS_KEY_SETTING, SECRET_KEY_SETTING); } @Override public Map<String, Repository.Factory> getRepositories(final Environment env, final NamedXContentRegistry registry, ClusterService clusterService, RecoverySettings recoverySettings) {<FILL_FUNCTION_BODY>} @Override public void close() { service.close(); } }
return Collections.singletonMap( S3Repository.TYPE, new Repository.Factory() { @Override public TypeSettings settings() { return new TypeSettings(List.of(), S3Repository.optionalSettings()); } @Override public Repository create(RepositoryMetadata metadata) throws Exception { return new S3Repository(metadata, registry, service, clusterService, recoverySettings); } } );
282
112
394
<methods>public non-sealed void <init>() ,public org.elasticsearch.common.settings.Settings additionalSettings() ,public void close() throws java.io.IOException,public Collection<java.lang.Object> createComponents(org.elasticsearch.client.Client, org.elasticsearch.cluster.service.ClusterService, org.elasticsearch.threadpool.ThreadPool, org.elasticsearch.common.xcontent.NamedXContentRegistry, org.elasticsearch.env.Environment, org.elasticsearch.env.NodeEnvironment, org.elasticsearch.common.io.stream.NamedWriteableRegistry, Supplier<org.elasticsearch.repositories.RepositoriesService>) ,public Collection<org.elasticsearch.common.inject.Module> createGuiceModules() ,public List<org.elasticsearch.bootstrap.BootstrapCheck> getBootstrapChecks() ,public UnaryOperator<Map<java.lang.String,org.elasticsearch.cluster.metadata.Metadata.Custom>> getCustomMetadataUpgrader() ,public Collection<Class<? extends org.elasticsearch.common.component.LifecycleComponent>> getGuiceServiceClasses() ,public BiFunction<org.elasticsearch.cluster.metadata.IndexMetadata,org.elasticsearch.cluster.metadata.IndexTemplateMetadata,org.elasticsearch.cluster.metadata.IndexMetadata> getIndexMetadataUpgrader() ,public UnaryOperator<Map<java.lang.String,org.elasticsearch.cluster.metadata.IndexTemplateMetadata>> getIndexTemplateMetadataUpgrader() ,public List<org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry> getNamedWriteables() ,public List<org.elasticsearch.common.xcontent.NamedXContentRegistry.Entry> getNamedXContent() ,public Set<org.elasticsearch.cluster.node.DiscoveryNodeRole> getRoles() ,public List<SettingUpgrader<?>> getSettingUpgraders() ,public List<Setting<?>> getSettings() ,public void onIndexModule(org.elasticsearch.index.IndexModule) <variables>
crate_crate
crate/plugins/es-repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java
S3Service
buildClient
class S3Service implements Closeable { private static final Logger LOGGER = LogManager.getLogger(S3Service.class); private volatile Map<S3ClientSettings, AmazonS3Reference> clientsCache = new HashMap<>(); /** * Attempts to retrieve a client by name from the cache. * If the client does not exist it will be created. * * @param metadata {@link RepositoryMetadata} */ public AmazonS3Reference client(RepositoryMetadata metadata) { final S3ClientSettings clientSettings = S3ClientSettings .getClientSettings(metadata.settings()); var client = clientsCache.get(clientSettings); if (client != null && client.tryIncRef()) { return client; } synchronized (this) { var existing = clientsCache.get(clientSettings); if (existing != null && existing.tryIncRef()) { return existing; } final AmazonS3Reference newClientRef = new AmazonS3Reference(buildClient(clientSettings)); newClientRef.incRef(); clientsCache.put(clientSettings, newClientRef); return newClientRef; } } private AmazonS3 buildClient(final S3ClientSettings clientSettings) {<FILL_FUNCTION_BODY>} // pkg private for tests static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { final ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. clientConfiguration.setResponseMetadataCacheSize(0); clientConfiguration.setProtocol(clientSettings.protocol); if (Strings.hasText(clientSettings.proxyHost)) { // TODO: remove this leniency, these settings should exist together and be validated clientConfiguration.setProxyHost(clientSettings.proxyHost); clientConfiguration.setProxyPort(clientSettings.proxyPort); clientConfiguration.setProxyUsername(clientSettings.proxyUsername); clientConfiguration.setProxyPassword(clientSettings.proxyPassword); } clientConfiguration.setMaxErrorRetry(clientSettings.maxRetries); clientConfiguration.setUseThrottleRetries(clientSettings.throttleRetries); clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis); return clientConfiguration; } // pkg private for tests static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings clientSettings) { final AWSCredentials credentials = clientSettings.credentials; if (credentials == null) { logger.debug("Using instance profile credentials"); var ec2ContainerCredentialsProviderWrapper = new EC2ContainerCredentialsProviderWrapper(); try { // Check if credentials are available ec2ContainerCredentialsProviderWrapper.getCredentials(); return ec2ContainerCredentialsProviderWrapper; } catch (SdkClientException e) { throw new InvalidArgumentException( "Cannot find required credentials to create a repository of type s3. " + "Credentials must be provided either as repository options access_key and secret_key or AWS IAM roles." ); } } else { logger.debug("Using basic key/secret credentials"); return new AWSStaticCredentialsProvider(credentials); } } @Override public void close() { synchronized (this) { // the clients will shutdown when they will not be used anymore for (final AmazonS3Reference clientReference : clientsCache.values()) { clientReference.decRef(); } clientsCache = new HashMap<>(); // shutdown IdleConnectionReaper background thread // it will be restarted on new client usage IdleConnectionReaper.shutdown(); } } }
final AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); builder.withCredentials(buildCredentials(LOGGER, clientSettings)); builder.withClientConfiguration(buildConfiguration(clientSettings)); final String endpoint = Strings.hasLength(clientSettings.endpoint) ? clientSettings.endpoint : Constants.S3_HOSTNAME; LOGGER.debug("using endpoint [{}]", endpoint); // If the endpoint configuration isn't set on the builder then the default behaviour is to try // and work out what region we are in and use an appropriate endpoint - see AwsClientBuilder#setRegion. // In contrast, directly-constructed clients use s3.amazonaws.com unless otherwise instructed. We currently // use a directly-constructed client, and need to keep the existing behaviour to avoid a breaking change, // so to move to using the builder we must set it explicitly to keep the existing behaviour. // // We do this because directly constructing the client is deprecated (was already deprecated in 1.1.223 too) // so this change removes that usage of a deprecated API. builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, null)); if (clientSettings.pathStyleAccess) { builder.enablePathStyleAccess(); } return builder.build();
937
330
1,267
<no_super_class>
crate_crate
crate/plugins/es-repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java
URLBlobContainer
readBlob
class URLBlobContainer extends AbstractBlobContainer { protected final URLBlobStore blobStore; protected final URL path; /** * Constructs new URLBlobContainer * * @param blobStore blob store * @param blobPath blob path for this container * @param path URL for this container */ public URLBlobContainer(URLBlobStore blobStore, BlobPath blobPath, URL path) { super(blobPath); this.blobStore = blobStore; this.path = path; } /** * Returns URL for this container * * @return URL for this container */ public URL url() { return this.path; } /** * This operation is not supported by URLBlobContainer */ @Override public boolean blobExists(String blobName) { assert false : "should never be called for a read-only url repo"; throw new UnsupportedOperationException("URL repository doesn't support this operation"); } /** * This operation is not supported by URLBlobContainer */ @Override public Map<String, BlobMetadata> listBlobs() throws IOException { throw new UnsupportedOperationException("URL repository doesn't support this operation"); } @Override public Map<String, BlobContainer> children() throws IOException { throw new UnsupportedOperationException("URL repository doesn't support this operation"); } /** * This operation is not supported by URLBlobContainer */ @Override public Map<String, BlobMetadata> listBlobsByPrefix(String blobNamePrefix) throws IOException { throw new UnsupportedOperationException("URL repository doesn't support this operation"); } /** * This operation is not supported by URLBlobContainer */ @Override public void deleteBlobsIgnoringIfNotExists(List<String> blobNames) { throw new UnsupportedOperationException("URL repository is read only"); } @Override public void delete() { throw new UnsupportedOperationException("URL repository is read only"); } @Override public InputStream readBlob(String name) throws IOException {<FILL_FUNCTION_BODY>} @Override public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { throw new UnsupportedOperationException("URL repository doesn't support this operation"); } }
URL url; try { url = path.toURI().resolve(name).toURL(); } catch (URISyntaxException e) { throw new RuntimeException(e); } try { return new BufferedInputStream(url.openStream(), blobStore.bufferSizeInBytes()); } catch (FileNotFoundException fnfe) { throw new NoSuchFileException("[" + name + "] blob not found"); }
630
113
743
<methods>public org.elasticsearch.common.blobstore.BlobPath path() <variables>private final non-sealed org.elasticsearch.common.blobstore.BlobPath path
crate_crate
crate/plugins/es-repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java
URLRepositoryPlugin
getRepositories
class URLRepositoryPlugin extends Plugin implements RepositoryPlugin { @Override public List<Setting<?>> getSettings() { return Arrays.asList( URLRepository.ALLOWED_URLS_SETTING, URLRepository.REPOSITORIES_URL_SETTING, URLRepository.SUPPORTED_PROTOCOLS_SETTING ); } @Override public Map<String, Repository.Factory> getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, RecoverySettings recoverySettings) {<FILL_FUNCTION_BODY>} }
return Collections.singletonMap( URLRepository.TYPE, new Repository.Factory() { @Override public TypeSettings settings() { return new TypeSettings(URLRepository.mandatorySettings(), List.of()); } @Override public Repository create(RepositoryMetadata metadata) throws Exception { return new URLRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings); } } );
167
113
280
<methods>public non-sealed void <init>() ,public org.elasticsearch.common.settings.Settings additionalSettings() ,public void close() throws java.io.IOException,public Collection<java.lang.Object> createComponents(org.elasticsearch.client.Client, org.elasticsearch.cluster.service.ClusterService, org.elasticsearch.threadpool.ThreadPool, org.elasticsearch.common.xcontent.NamedXContentRegistry, org.elasticsearch.env.Environment, org.elasticsearch.env.NodeEnvironment, org.elasticsearch.common.io.stream.NamedWriteableRegistry, Supplier<org.elasticsearch.repositories.RepositoriesService>) ,public Collection<org.elasticsearch.common.inject.Module> createGuiceModules() ,public List<org.elasticsearch.bootstrap.BootstrapCheck> getBootstrapChecks() ,public UnaryOperator<Map<java.lang.String,org.elasticsearch.cluster.metadata.Metadata.Custom>> getCustomMetadataUpgrader() ,public Collection<Class<? extends org.elasticsearch.common.component.LifecycleComponent>> getGuiceServiceClasses() ,public BiFunction<org.elasticsearch.cluster.metadata.IndexMetadata,org.elasticsearch.cluster.metadata.IndexTemplateMetadata,org.elasticsearch.cluster.metadata.IndexMetadata> getIndexMetadataUpgrader() ,public UnaryOperator<Map<java.lang.String,org.elasticsearch.cluster.metadata.IndexTemplateMetadata>> getIndexTemplateMetadataUpgrader() ,public List<org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry> getNamedWriteables() ,public List<org.elasticsearch.common.xcontent.NamedXContentRegistry.Entry> getNamedXContent() ,public Set<org.elasticsearch.cluster.node.DiscoveryNodeRole> getRoles() ,public List<SettingUpgrader<?>> getSettingUpgraders() ,public List<Setting<?>> getSettings() ,public void onIndexModule(org.elasticsearch.index.IndexModule) <variables>
crate_crate
crate/plugins/es-repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java
URLRepository
checkURL
class URLRepository extends BlobStoreRepository { private static final Logger LOGGER = LogManager.getLogger(URLRepository.class); public static final String TYPE = "url"; public static final Setting<List<String>> SUPPORTED_PROTOCOLS_SETTING = Setting.listSetting("repositories.url.supported_protocols", Arrays.asList("http", "https", "ftp", "file", "jar"), Function.identity(), DataTypes.STRING_ARRAY, Property.NodeScope); public static final Setting<List<URIPattern>> ALLOWED_URLS_SETTING = Setting.listSetting("repositories.url.allowed_urls", Collections.emptyList(), URIPattern::new, DataTypes.STRING_ARRAY, Property.NodeScope); public static final Setting<URL> URL_SETTING = new Setting<>("url", "http:///", URLRepository::parseURL, DataTypes.STRING, Property.NodeScope); public static final Setting<URL> REPOSITORIES_URL_SETTING = new Setting<>( "repositories.url.url", (s) -> s.get("repositories.uri.url", "http:///"), URLRepository::parseURL, DataTypes.STRING, Property.NodeScope ); public static List<Setting<?>> mandatorySettings() { return List.of(URL_SETTING); } private final List<String> supportedProtocols; private final URIPattern[] urlWhiteList; private final Environment environment; private final BlobPath basePath; private final URL url; /** * Constructs a read-only URL-based repository */ public URLRepository(RepositoryMetadata metadata, Environment environment, NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, RecoverySettings recoverySettings) { super(metadata, namedXContentRegistry, clusterService, recoverySettings, BlobPath.cleanPath()); if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(environment.settings()) == false) { throw new RepositoryException(metadata.name(), "missing url"); } this.environment = environment; supportedProtocols = SUPPORTED_PROTOCOLS_SETTING.get(environment.settings()); urlWhiteList = ALLOWED_URLS_SETTING.get(environment.settings()).toArray(new URIPattern[]{}); basePath = BlobPath.cleanPath(); url = URL_SETTING.exists(metadata.settings()) ? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(environment.settings()); } @Override protected BlobStore createBlobStore() { URL normalizedURL = checkURL(url); return new URLBlobStore(environment.settings(), normalizedURL); } // only use for testing @Override protected BlobContainer blobContainer() { return super.blobContainer(); } // only use for testing @Override protected BlobStore getBlobStore() { return super.getBlobStore(); } @Override public BlobPath basePath() { return basePath; } /** * Makes sure that the url is white listed or if it points to the local file system it matches one on of the root path in path.repo */ private URL checkURL(URL url) {<FILL_FUNCTION_BODY>} @Override public boolean isReadOnly() { return true; } private static URL parseURL(String s) { try { return new URI(s).toURL(); } catch (URISyntaxException | MalformedURLException e) { throw new IllegalArgumentException("Unable to parse URL repository setting", e); } } }
String protocol = url.getProtocol(); if (protocol == null) { throw new RepositoryException(getMetadata().name(), "unknown url protocol from URL [" + url + "]"); } for (String supportedProtocol : supportedProtocols) { if (supportedProtocol.equals(protocol)) { try { if (URIPattern.match(urlWhiteList, url.toURI())) { // URL matches white list - no additional processing is needed return url; } } catch (URISyntaxException ex) { LOGGER.warn("cannot parse the specified url [{}]", url); throw new RepositoryException(getMetadata().name(), "cannot parse the specified url [" + url + "]"); } // We didn't match white list - try to resolve against path.repo URL normalizedUrl = environment.resolveRepoURL(url); if (normalizedUrl == null) { String logMessage = "The specified url [{}] doesn't start with any repository paths specified by the " + "path.repo setting or by {} setting: [{}] "; LOGGER.warn(logMessage, url, ALLOWED_URLS_SETTING.getKey(), environment.repoFiles()); String exceptionMessage = "file url [" + url + "] doesn't match any of the locations specified by path.repo or " + ALLOWED_URLS_SETTING.getKey(); throw new RepositoryException(getMetadata().name(), exceptionMessage); } return normalizedUrl; } } throw new RepositoryException(getMetadata().name(), "unsupported url protocol [" + protocol + "] from URL [" + url + "]");
993
410
1,403
<methods>public org.elasticsearch.common.blobstore.BlobPath basePath() ,public org.elasticsearch.common.blobstore.BlobStore blobStore() ,public void deleteSnapshots(Collection<org.elasticsearch.snapshots.SnapshotId>, long, org.elasticsearch.Version, ActionListener<org.elasticsearch.repositories.RepositoryData>) ,public void endVerification(java.lang.String) ,public void executeConsistentStateUpdate(Function<org.elasticsearch.repositories.RepositoryData,org.elasticsearch.cluster.ClusterStateUpdateTask>, java.lang.String, Consumer<java.lang.Exception>) ,public void finalizeSnapshot(org.elasticsearch.repositories.ShardGenerations, long, org.elasticsearch.cluster.metadata.Metadata, org.elasticsearch.snapshots.SnapshotInfo, org.elasticsearch.Version, UnaryOperator<org.elasticsearch.cluster.ClusterState>, ActionListener<org.elasticsearch.repositories.RepositoryData>) ,public org.elasticsearch.cluster.metadata.RepositoryMetadata getMetadata() ,public CompletableFuture<org.elasticsearch.repositories.RepositoryData> getRepositoryData() ,public CompletableFuture<org.elasticsearch.index.snapshots.IndexShardSnapshotStatus> getShardSnapshotStatus(org.elasticsearch.snapshots.SnapshotId, org.elasticsearch.repositories.IndexId, org.elasticsearch.index.shard.ShardId) ,public CompletableFuture<org.elasticsearch.cluster.metadata.Metadata> getSnapshotGlobalMetadata(org.elasticsearch.snapshots.SnapshotId) ,public CompletableFuture<org.elasticsearch.cluster.metadata.IndexMetadata> getSnapshotIndexMetadata(org.elasticsearch.repositories.RepositoryData, org.elasticsearch.snapshots.SnapshotId, org.elasticsearch.repositories.IndexId) ,public CompletableFuture<Collection<org.elasticsearch.cluster.metadata.IndexMetadata>> getSnapshotIndexMetadata(org.elasticsearch.repositories.RepositoryData, org.elasticsearch.snapshots.SnapshotId, Collection<org.elasticsearch.repositories.IndexId>) ,public CompletableFuture<org.elasticsearch.snapshots.SnapshotInfo> getSnapshotInfo(org.elasticsearch.snapshots.SnapshotId) ,public boolean isReadOnly() ,public org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot loadShardSnapshot(org.elasticsearch.common.blobstore.BlobContainer, org.elasticsearch.snapshots.SnapshotId) ,public java.io.InputStream maybeRateLimitRestores(java.io.InputStream) ,public java.io.InputStream maybeRateLimitSnapshots(java.io.InputStream) ,public void restoreShard(org.elasticsearch.index.store.Store, org.elasticsearch.snapshots.SnapshotId, org.elasticsearch.repositories.IndexId, org.elasticsearch.index.shard.ShardId, org.elasticsearch.indices.recovery.RecoveryState, ActionListener<java.lang.Void>) ,public org.elasticsearch.common.blobstore.BlobContainer shardContainer(org.elasticsearch.repositories.IndexId, int) ,public void snapshotShard(org.elasticsearch.index.store.Store, org.elasticsearch.snapshots.SnapshotId, org.elasticsearch.repositories.IndexId, IndexCommit, java.lang.String, org.elasticsearch.index.snapshots.IndexShardSnapshotStatus, org.elasticsearch.Version, ActionListener<java.lang.String>) ,public java.lang.String startVerification() ,public static java.lang.String testBlobPrefix(java.lang.String) ,public org.elasticsearch.threadpool.ThreadPool threadPool() ,public java.lang.String toString() ,public void updateState(org.elasticsearch.cluster.ClusterState) ,public void verify(java.lang.String, org.elasticsearch.cluster.node.DiscoveryNode) <variables>public static final Setting<java.lang.Boolean> COMPRESS_SETTING,public static final ChecksumBlobStoreFormat<org.elasticsearch.cluster.metadata.Metadata> GLOBAL_METADATA_FORMAT,public static final java.lang.String INDEX_FILE_PREFIX,public static final java.lang.String INDEX_LATEST_BLOB,public static final ChecksumBlobStoreFormat<org.elasticsearch.cluster.metadata.IndexMetadata> INDEX_METADATA_FORMAT,public static final ChecksumBlobStoreFormat<org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots> INDEX_SHARD_SNAPSHOTS_FORMAT,public static final ChecksumBlobStoreFormat<org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot> INDEX_SHARD_SNAPSHOT_FORMAT,private static final Setting<org.elasticsearch.common.unit.ByteSizeValue> IO_BUFFER_SIZE_SETTING,private static final Logger LOGGER,public static final java.lang.String METADATA_NAME_FORMAT,public static final java.lang.String METADATA_PREFIX,private static final java.lang.String SNAPSHOT_CODEC,public static final ChecksumBlobStoreFormat<org.elasticsearch.snapshots.SnapshotInfo> SNAPSHOT_FORMAT,private static final java.lang.String SNAPSHOT_INDEX_NAME_FORMAT,private static final java.lang.String SNAPSHOT_INDEX_PREFIX,public static final java.lang.String SNAPSHOT_NAME_FORMAT,public static final java.lang.String SNAPSHOT_PREFIX,private static final java.lang.String TESTS_FILE,private static final java.lang.String UPLOADED_DATA_BLOB_PREFIX,private static final java.lang.String VIRTUAL_DATA_BLOB_PREFIX,private final non-sealed org.elasticsearch.common.blobstore.BlobPath basePath,private volatile boolean bestEffortConsistency,private final SetOnce<org.elasticsearch.common.blobstore.BlobContainer> blobContainer,private final SetOnce<org.elasticsearch.common.blobstore.BlobStore> blobStore,protected final non-sealed int bufferSize,private final non-sealed org.elasticsearch.cluster.service.ClusterService clusterService,private final non-sealed boolean compress,private final java.util.concurrent.atomic.AtomicLong latestKnownRepoGen,private final java.lang.Object lock,protected volatile org.elasticsearch.cluster.metadata.RepositoryMetadata metadata,private final non-sealed org.elasticsearch.common.xcontent.NamedXContentRegistry namedXContentRegistry,private final non-sealed boolean readOnly,private final non-sealed org.elasticsearch.indices.recovery.RecoverySettings recoverySettings,private final non-sealed RateLimiter restoreRateLimiter,private final org.elasticsearch.common.metrics.CounterMetric restoreRateLimitingTimeInNanos,private final non-sealed RateLimiter snapshotRateLimiter,private final org.elasticsearch.common.metrics.CounterMetric snapshotRateLimitingTimeInNanos,protected final non-sealed org.elasticsearch.threadpool.ThreadPool threadPool,private boolean uncleanStart
crate_crate
crate/plugins/repository-gcs/src/main/java/io/crate/gcs/GCSRepository.java
GCSRepository
buildBasePath
class GCSRepository extends BlobStoreRepository { // package private for testing static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); /** * Maximum allowed object size in GCS. * * @see <a href="https://cloud.google.com/storage/quotas#objects">GCS documentation</a> for details. */ static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(5, ByteSizeUnit.TB); static final Setting<String> BUCKET_SETTING = simpleString("bucket", Property.NodeScope, Property.Dynamic); static final Setting<String> BASE_PATH_SETTING = simpleString("base_path", Property.NodeScope, Property.Dynamic); static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING = byteSizeSetting( "chunk_size", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope, Property.Dynamic ); private final GCSService service; private final ByteSizeValue chunkSize; private final String bucket; public GCSRepository( final RepositoryMetadata metadata, final NamedXContentRegistry namedXContentRegistry, final ClusterService clusterService, final GCSService service, final RecoverySettings recoverySettings) { super(metadata, namedXContentRegistry, clusterService, recoverySettings, buildBasePath(metadata)); this.service = service; this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); this.bucket = BUCKET_SETTING.get(metadata.settings()); } @Override protected GCSBlobStore createBlobStore() { return new GCSBlobStore(bucket, service, metadata, bufferSize); } private static BlobPath buildBasePath(RepositoryMetadata metadata) {<FILL_FUNCTION_BODY>} @Override protected ByteSizeValue chunkSize() { return chunkSize; } }
String basePath = BASE_PATH_SETTING.get(metadata.settings()); if (Strings.hasLength(basePath)) { BlobPath path = new BlobPath(); for (String elem : basePath.split("/")) { path = path.add(elem); } return path; } else { return BlobPath.cleanPath(); }
537
100
637
<methods>public org.elasticsearch.common.blobstore.BlobPath basePath() ,public org.elasticsearch.common.blobstore.BlobStore blobStore() ,public void deleteSnapshots(Collection<org.elasticsearch.snapshots.SnapshotId>, long, org.elasticsearch.Version, ActionListener<org.elasticsearch.repositories.RepositoryData>) ,public void endVerification(java.lang.String) ,public void executeConsistentStateUpdate(Function<org.elasticsearch.repositories.RepositoryData,org.elasticsearch.cluster.ClusterStateUpdateTask>, java.lang.String, Consumer<java.lang.Exception>) ,public void finalizeSnapshot(org.elasticsearch.repositories.ShardGenerations, long, org.elasticsearch.cluster.metadata.Metadata, org.elasticsearch.snapshots.SnapshotInfo, org.elasticsearch.Version, UnaryOperator<org.elasticsearch.cluster.ClusterState>, ActionListener<org.elasticsearch.repositories.RepositoryData>) ,public org.elasticsearch.cluster.metadata.RepositoryMetadata getMetadata() ,public CompletableFuture<org.elasticsearch.repositories.RepositoryData> getRepositoryData() ,public CompletableFuture<org.elasticsearch.index.snapshots.IndexShardSnapshotStatus> getShardSnapshotStatus(org.elasticsearch.snapshots.SnapshotId, org.elasticsearch.repositories.IndexId, org.elasticsearch.index.shard.ShardId) ,public CompletableFuture<org.elasticsearch.cluster.metadata.Metadata> getSnapshotGlobalMetadata(org.elasticsearch.snapshots.SnapshotId) ,public CompletableFuture<org.elasticsearch.cluster.metadata.IndexMetadata> getSnapshotIndexMetadata(org.elasticsearch.repositories.RepositoryData, org.elasticsearch.snapshots.SnapshotId, org.elasticsearch.repositories.IndexId) ,public CompletableFuture<Collection<org.elasticsearch.cluster.metadata.IndexMetadata>> getSnapshotIndexMetadata(org.elasticsearch.repositories.RepositoryData, org.elasticsearch.snapshots.SnapshotId, Collection<org.elasticsearch.repositories.IndexId>) ,public CompletableFuture<org.elasticsearch.snapshots.SnapshotInfo> getSnapshotInfo(org.elasticsearch.snapshots.SnapshotId) ,public boolean isReadOnly() ,public org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot loadShardSnapshot(org.elasticsearch.common.blobstore.BlobContainer, org.elasticsearch.snapshots.SnapshotId) ,public java.io.InputStream maybeRateLimitRestores(java.io.InputStream) ,public java.io.InputStream maybeRateLimitSnapshots(java.io.InputStream) ,public void restoreShard(org.elasticsearch.index.store.Store, org.elasticsearch.snapshots.SnapshotId, org.elasticsearch.repositories.IndexId, org.elasticsearch.index.shard.ShardId, org.elasticsearch.indices.recovery.RecoveryState, ActionListener<java.lang.Void>) ,public org.elasticsearch.common.blobstore.BlobContainer shardContainer(org.elasticsearch.repositories.IndexId, int) ,public void snapshotShard(org.elasticsearch.index.store.Store, org.elasticsearch.snapshots.SnapshotId, org.elasticsearch.repositories.IndexId, IndexCommit, java.lang.String, org.elasticsearch.index.snapshots.IndexShardSnapshotStatus, org.elasticsearch.Version, ActionListener<java.lang.String>) ,public java.lang.String startVerification() ,public static java.lang.String testBlobPrefix(java.lang.String) ,public org.elasticsearch.threadpool.ThreadPool threadPool() ,public java.lang.String toString() ,public void updateState(org.elasticsearch.cluster.ClusterState) ,public void verify(java.lang.String, org.elasticsearch.cluster.node.DiscoveryNode) <variables>public static final Setting<java.lang.Boolean> COMPRESS_SETTING,public static final ChecksumBlobStoreFormat<org.elasticsearch.cluster.metadata.Metadata> GLOBAL_METADATA_FORMAT,public static final java.lang.String INDEX_FILE_PREFIX,public static final java.lang.String INDEX_LATEST_BLOB,public static final ChecksumBlobStoreFormat<org.elasticsearch.cluster.metadata.IndexMetadata> INDEX_METADATA_FORMAT,public static final ChecksumBlobStoreFormat<org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots> INDEX_SHARD_SNAPSHOTS_FORMAT,public static final ChecksumBlobStoreFormat<org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot> INDEX_SHARD_SNAPSHOT_FORMAT,private static final Setting<org.elasticsearch.common.unit.ByteSizeValue> IO_BUFFER_SIZE_SETTING,private static final Logger LOGGER,public static final java.lang.String METADATA_NAME_FORMAT,public static final java.lang.String METADATA_PREFIX,private static final java.lang.String SNAPSHOT_CODEC,public static final ChecksumBlobStoreFormat<org.elasticsearch.snapshots.SnapshotInfo> SNAPSHOT_FORMAT,private static final java.lang.String SNAPSHOT_INDEX_NAME_FORMAT,private static final java.lang.String SNAPSHOT_INDEX_PREFIX,public static final java.lang.String SNAPSHOT_NAME_FORMAT,public static final java.lang.String SNAPSHOT_PREFIX,private static final java.lang.String TESTS_FILE,private static final java.lang.String UPLOADED_DATA_BLOB_PREFIX,private static final java.lang.String VIRTUAL_DATA_BLOB_PREFIX,private final non-sealed org.elasticsearch.common.blobstore.BlobPath basePath,private volatile boolean bestEffortConsistency,private final SetOnce<org.elasticsearch.common.blobstore.BlobContainer> blobContainer,private final SetOnce<org.elasticsearch.common.blobstore.BlobStore> blobStore,protected final non-sealed int bufferSize,private final non-sealed org.elasticsearch.cluster.service.ClusterService clusterService,private final non-sealed boolean compress,private final java.util.concurrent.atomic.AtomicLong latestKnownRepoGen,private final java.lang.Object lock,protected volatile org.elasticsearch.cluster.metadata.RepositoryMetadata metadata,private final non-sealed org.elasticsearch.common.xcontent.NamedXContentRegistry namedXContentRegistry,private final non-sealed boolean readOnly,private final non-sealed org.elasticsearch.indices.recovery.RecoverySettings recoverySettings,private final non-sealed RateLimiter restoreRateLimiter,private final org.elasticsearch.common.metrics.CounterMetric restoreRateLimitingTimeInNanos,private final non-sealed RateLimiter snapshotRateLimiter,private final org.elasticsearch.common.metrics.CounterMetric snapshotRateLimitingTimeInNanos,protected final non-sealed org.elasticsearch.threadpool.ThreadPool threadPool,private boolean uncleanStart
crate_crate
crate/plugins/repository-gcs/src/main/java/io/crate/gcs/GCSRepositoryPlugin.java
GCSRepositoryPlugin
settings
class GCSRepositoryPlugin extends Plugin implements RepositoryPlugin { private final GCSService service; public GCSRepositoryPlugin() { this.service = new GCSService(); } @Override public List<Setting<?>> getSettings() { return List.of( GCSRepository.COMPRESS_SETTING, GCSRepository.BUCKET_SETTING, GCSRepository.BASE_PATH_SETTING, GCSRepository.CHUNK_SIZE_SETTING, GCSClientSettings.PROJECT_ID_SETTING, GCSClientSettings.PRIVATE_KEY_ID_SETTING, GCSClientSettings.PRIVATE_KEY_SETTING, GCSClientSettings.CLIENT_EMAIL_SETTING, GCSClientSettings.CLIENT_ID_SETTING, GCSClientSettings.ENDPOINT_SETTING, GCSClientSettings.TOKEN_URI_SETTING, GCSClientSettings.CONNECT_TIMEOUT_SETTING, GCSClientSettings.READ_TIMEOUT_SETTING ); } @Override public Map<String, Repository.Factory> getRepositories( Environment environment, NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, RecoverySettings recoverySettings) { return Map.of( "gcs", new Repository.Factory() { @Override public TypeSettings settings() {<FILL_FUNCTION_BODY>} @Override public Repository create(RepositoryMetadata metadata) { return new GCSRepository(metadata, namedXContentRegistry, clusterService, service, recoverySettings); } } ); } }
return new TypeSettings( // Required settings List.of( GCSRepository.BUCKET_SETTING, GCSClientSettings.PROJECT_ID_SETTING, GCSClientSettings.PRIVATE_KEY_ID_SETTING, GCSClientSettings.PRIVATE_KEY_SETTING, GCSClientSettings.CLIENT_ID_SETTING, GCSClientSettings.CLIENT_EMAIL_SETTING ), // Optional settings List.of( GCSRepository.CHUNK_SIZE_SETTING, GCSRepository.COMPRESS_SETTING, GCSRepository.BASE_PATH_SETTING, GCSClientSettings.ENDPOINT_SETTING, GCSClientSettings.TOKEN_URI_SETTING, GCSClientSettings.CONNECT_TIMEOUT_SETTING, GCSClientSettings.READ_TIMEOUT_SETTING ) );
436
248
684
<methods>public non-sealed void <init>() ,public org.elasticsearch.common.settings.Settings additionalSettings() ,public void close() throws java.io.IOException,public Collection<java.lang.Object> createComponents(org.elasticsearch.client.Client, org.elasticsearch.cluster.service.ClusterService, org.elasticsearch.threadpool.ThreadPool, org.elasticsearch.common.xcontent.NamedXContentRegistry, org.elasticsearch.env.Environment, org.elasticsearch.env.NodeEnvironment, org.elasticsearch.common.io.stream.NamedWriteableRegistry, Supplier<org.elasticsearch.repositories.RepositoriesService>) ,public Collection<org.elasticsearch.common.inject.Module> createGuiceModules() ,public List<org.elasticsearch.bootstrap.BootstrapCheck> getBootstrapChecks() ,public UnaryOperator<Map<java.lang.String,org.elasticsearch.cluster.metadata.Metadata.Custom>> getCustomMetadataUpgrader() ,public Collection<Class<? extends org.elasticsearch.common.component.LifecycleComponent>> getGuiceServiceClasses() ,public BiFunction<org.elasticsearch.cluster.metadata.IndexMetadata,org.elasticsearch.cluster.metadata.IndexTemplateMetadata,org.elasticsearch.cluster.metadata.IndexMetadata> getIndexMetadataUpgrader() ,public UnaryOperator<Map<java.lang.String,org.elasticsearch.cluster.metadata.IndexTemplateMetadata>> getIndexTemplateMetadataUpgrader() ,public List<org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry> getNamedWriteables() ,public List<org.elasticsearch.common.xcontent.NamedXContentRegistry.Entry> getNamedXContent() ,public Set<org.elasticsearch.cluster.node.DiscoveryNodeRole> getRoles() ,public List<SettingUpgrader<?>> getSettingUpgraders() ,public List<Setting<?>> getSettings() ,public void onIndexModule(org.elasticsearch.index.IndexModule) <variables>
crate_crate
crate/server/src/main/java/io/crate/action/sql/Sessions.java
Sessions
cancel
class Sessions { public static final Setting<Boolean> NODE_READ_ONLY_SETTING = Setting.boolSetting( "node.sql.read_only", false, Setting.Property.NodeScope); public static final Setting<TimeValue> STATEMENT_TIMEOUT = Setting.timeSetting( "statement_timeout", TimeValue.timeValueMillis(0), Setting.Property.Dynamic, Setting.Property.NodeScope, Setting.Property.Exposed ); public static final Setting<Integer> MEMORY_LIMIT = Setting.intSetting( "memory.operation_limit", 0, Property.Dynamic, Property.NodeScope, Property.Exposed); private static final Logger LOGGER = LogManager.getLogger(Sessions.class); private final NodeContext nodeCtx; private final Analyzer analyzer; private final Planner planner; private final Provider<DependencyCarrier> executorProvider; private final JobsLogs jobsLogs; private final ClusterService clusterService; private final TableStats tableStats; private final boolean isReadOnly; private final AtomicInteger nextSessionId = new AtomicInteger(); private final ConcurrentMap<Integer, Session> sessions = new ConcurrentHashMap<>(); private volatile boolean disabled; private volatile TimeValue defaultStatementTimeout; private volatile int memoryLimit; @Inject public Sessions(NodeContext nodeCtx, Analyzer analyzer, Planner planner, Provider<DependencyCarrier> executorProvider, JobsLogs jobsLogs, Settings settings, ClusterService clusterService, TableStats tableStats) { this.nodeCtx = nodeCtx; this.analyzer = analyzer; this.planner = planner; this.executorProvider = executorProvider; this.jobsLogs = jobsLogs; this.clusterService = clusterService; this.tableStats = tableStats; this.isReadOnly = NODE_READ_ONLY_SETTING.get(settings); this.defaultStatementTimeout = STATEMENT_TIMEOUT.get(settings); this.memoryLimit = MEMORY_LIMIT.get(settings); ClusterSettings clusterSettings = clusterService.getClusterSettings(); clusterSettings.addSettingsUpdateConsumer(STATEMENT_TIMEOUT, statementTimeout -> { this.defaultStatementTimeout = statementTimeout; }); clusterSettings.addSettingsUpdateConsumer(MEMORY_LIMIT, newLimit -> { this.memoryLimit = newLimit; }); } private Session newSession(CoordinatorSessionSettings sessionSettings) { if (disabled) { throw new NodeDisconnectedException(clusterService.localNode(), "sql"); } int sessionId = nextSessionId.incrementAndGet(); Session session = new Session( sessionId, nodeCtx, analyzer, planner, jobsLogs, isReadOnly, executorProvider.get(), sessionSettings, tableStats, () -> sessions.remove(sessionId) ); sessions.put(sessionId, session); return session; } public Session newSession(@Nullable String defaultSchema, Role authenticatedUser) { CoordinatorSessionSettings sessionSettings; if (defaultSchema == null) { sessionSettings = new CoordinatorSessionSettings(authenticatedUser); } else { sessionSettings = new CoordinatorSessionSettings(authenticatedUser, defaultSchema); } sessionSettings.statementTimeout(defaultStatementTimeout); sessionSettings.memoryLimit(memoryLimit); return newSession(sessionSettings); } public Session newSystemSession() { return newSession(CoordinatorSessionSettings.systemDefaults()); } /** * Disable processing of new sql statements. * {@link io.crate.cluster.gracefulstop.DecommissioningService} must call this while before starting to decommission. */ public void disable() { disabled = true; } /** * (Re-)Enable processing of new sql statements * {@link io.crate.cluster.gracefulstop.DecommissioningService} must call this when decommissioning is aborted. */ public void enable() { disabled = false; } public boolean isEnabled() { return !disabled; } /** * @return true if a session matches the keyData, false otherwise. */ public boolean cancelLocally(KeyData keyData) { Session session = sessions.get(keyData.pid()); if (session != null && session.secret() == keyData.secretKey()) { session.cancelCurrentJob(); return true; } else { return false; } } public void cancel(KeyData keyData) {<FILL_FUNCTION_BODY>} public Iterable<Session> getActive() { return sessions.values(); } public Iterable<Cursor> getCursors(Role user) { return () -> sessions.values().stream() .filter(session -> nodeCtx.roles().hasPrivilege(user, Permission.AL, Securable.CLUSTER, null) || session.sessionSettings().sessionUser().equals(user)) .flatMap(session -> StreamSupport.stream(session.cursors.spliterator(), false)) .iterator(); } }
boolean cancelled = cancelLocally(keyData); if (!cancelled) { var client = executorProvider.get().client(); CancelRequest request = new CancelRequest(keyData); client.execute(TransportCancelAction.ACTION, request).whenComplete((res, err) -> { if (err != null) { LOGGER.error("Error during cancel broadcast", err); } }); }
1,368
111
1,479
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/action/sql/parser/SQLBulkArgsParseElement.java
SQLBulkArgsParseElement
parseSubArrays
class SQLBulkArgsParseElement extends SQLArgsParseElement { @Override public void parse(XContentParser parser, SQLRequestParseContext context) throws Exception { XContentParser.Token token = parser.currentToken(); if (token != XContentParser.Token.START_ARRAY) { throw new SQLParseSourceException("Field [" + parser.currentName() + "] has an invalid value"); } context.bulkArgs(parseSubArrays(parser)); } private List<List<Object>> parseSubArrays(XContentParser parser) throws IOException {<FILL_FUNCTION_BODY>} }
XContentParser.Token token; ArrayList<List<Object>> bulkArgs = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.START_ARRAY) { bulkArgs.add(parseSubArray(parser)); } else { throw new SQLParseSourceException("Field [" + parser.currentName() + "] has an invalid value"); } } return bulkArgs;
155
125
280
<methods>public void parse(org.elasticsearch.common.xcontent.XContentParser, io.crate.action.sql.parser.SQLRequestParseContext) throws java.lang.Exception<variables>
crate_crate
crate/server/src/main/java/io/crate/action/sql/parser/SQLRequestParser.java
Fields
parse
class Fields { static final String STMT = "stmt"; static final String ARGS = "args"; static final String BULK_ARGS = "bulk_args"; } private static final Map<String, SQLParseElement> ELEMENT_PARSERS = Map.of( Fields.STMT, new SQLStmtParseElement(), Fields.ARGS, new SQLArgsParseElement(), Fields.BULK_ARGS, new SQLBulkArgsParseElement() ); private SQLRequestParser() { } private static void validate(SQLRequestParseContext parseContext) throws SQLParseSourceException { if (parseContext.stmt() == null) { throw new SQLParseSourceException("Field [stmt] was not defined"); } } public static SQLRequestParseContext parseSource(BytesReference source) throws IOException { if (source.length() == 0) { throw new SQLParseException("Missing request body"); } XContentParser parser = null; try { SQLRequestParseContext parseContext = new SQLRequestParseContext(); parser = XContentType.JSON.xContent().createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, source.streamInput()); parse(parseContext, parser); validate(parseContext); return parseContext; } catch (Exception e) { String sSource = "_na_"; try { sSource = source.utf8ToString(); } catch (Throwable e1) { // ignore } throw new SQLParseException("Failed to parse source [" + sSource + "]", e); } finally { if (parser != null) { parser.close(); } } } public static void parse(SQLRequestParseContext parseContext, XContentParser parser) throws Exception {<FILL_FUNCTION_BODY>
XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { String fieldName = parser.currentName(); parser.nextToken(); SQLParseElement element = ELEMENT_PARSERS.get(fieldName); if (element == null) { throw new SQLParseException("No parser for element [" + fieldName + "]"); } element.parse(parser, parseContext); } else if (token == null) { break; } }
478
155
633
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/AlterTableRerouteAnalyzer.java
RerouteOptionVisitor
visitReroutePromoteReplica
class RerouteOptionVisitor extends AstVisitor<RerouteAnalyzedStatement, Context> { @Override public RerouteAnalyzedStatement visitRerouteMoveShard(RerouteMoveShard<?> node, Context context) { return new AnalyzedRerouteMoveShard( context.tableInfo, Lists.map( context.partitionProperties, p -> p.map(a -> context.exprAnalyzerWithFields.convert(a, context.exprCtx)) ), node.map(x -> context.exprAnalyzer.convert((Expression) x, context.exprCtx)) ); } @Override public RerouteAnalyzedStatement visitRerouteAllocateReplicaShard(RerouteAllocateReplicaShard<?> node, Context context) { return new AnalyzedRerouteAllocateReplicaShard( context.tableInfo, Lists.map( context.partitionProperties, p -> p.map(a -> context.exprAnalyzerWithFields.convert(a, context.exprCtx)) ), node.map(x -> context.exprAnalyzer.convert((Expression) x, context.exprCtx)) ); } @Override public RerouteAnalyzedStatement visitRerouteCancelShard(RerouteCancelShard<?> node, Context context) { return new AnalyzedRerouteCancelShard( context.tableInfo, Lists.map( context.partitionProperties, p -> p.map(a -> context.exprAnalyzerWithFields.convert(a, context.exprCtx)) ), node.map(x -> context.exprAnalyzer.convert((Expression) x, context.exprCtx)) ); } @Override public RerouteAnalyzedStatement visitReroutePromoteReplica(PromoteReplica<?> node, Context context) {<FILL_FUNCTION_BODY>} }
var promoteReplica = node.map(x -> context.exprAnalyzer.convert((Expression) x, context.exprCtx)); HashMap<String, Symbol> properties = new HashMap<>(promoteReplica.properties().properties()); Symbol acceptDataLoss = properties.remove(PromoteReplica.Properties.ACCEPT_DATA_LOSS); if (!properties.isEmpty()) { throw new IllegalArgumentException( "Unsupported options provided to REROUTE PROMOTE REPLICA: " + properties.keySet()); } return new AnalyzedPromoteReplica( context.tableInfo, Lists.map( context.partitionProperties, p -> p.map(a -> context.exprAnalyzerWithFields.convert(a, context.exprCtx)) ), node.map(x -> context.exprAnalyzer.convert((Expression) x, context.exprCtx)), acceptDataLoss == null ? Literal.BOOLEAN_FALSE : acceptDataLoss );
497
254
751
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/AnalyzedAlterTable.java
AnalyzedAlterTable
visitSymbols
class AnalyzedAlterTable implements DDLStatement { private final DocTableInfo tableInfo; private final AlterTable<Symbol> alterTable; public AnalyzedAlterTable(DocTableInfo tableInfo, AlterTable<Symbol> alterTable) { this.tableInfo = tableInfo; this.alterTable = alterTable; } public DocTableInfo tableInfo() { return tableInfo; } public AlterTable<Symbol> alterTable() { return alterTable; } @Override public void visitSymbols(Consumer<? super Symbol> consumer) {<FILL_FUNCTION_BODY>} @Override public <C, R> R accept(AnalyzedStatementVisitor<C, R> visitor, C context) { return visitor.visitAlterTable(this, context); } }
for (Assignment<Symbol> partitionProperty : alterTable.table().partitionProperties()) { consumer.accept(partitionProperty.expression()); partitionProperty.expressions().forEach(consumer); } alterTable.genericProperties().properties().values().forEach(consumer);
216
68
284
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/AnalyzedCopyTo.java
AnalyzedCopyTo
visitSymbols
class AnalyzedCopyTo implements AnalyzedStatement { private final TableInfo tableInfo; private final Table<Symbol> table; private final Symbol uri; private final GenericProperties<Symbol> properties; private final List<Symbol> columns; @Nullable private final Symbol whereClause; AnalyzedCopyTo(TableInfo tableInfo, Table<Symbol> table, Symbol uri, GenericProperties<Symbol> properties, List<Symbol> columns, @Nullable Symbol whereClause) { this.tableInfo = tableInfo; this.table = table; this.uri = uri; this.properties = properties; this.columns = columns; this.whereClause = whereClause; } public TableInfo tableInfo() { return tableInfo; } public Table<Symbol> table() { return table; } public Symbol uri() { return uri; } public GenericProperties<Symbol> properties() { return properties; } public List<Symbol> columns() { return columns; } @Nullable public Symbol whereClause() { return whereClause; } @Override public <C, R> R accept(AnalyzedStatementVisitor<C, R> analyzedStatementVisitor, C context) { return analyzedStatementVisitor.visitCopyToStatement(this, context); } @Override public void visitSymbols(Consumer<? super Symbol> consumer) {<FILL_FUNCTION_BODY>} @Override public boolean isWriteOperation() { return false; } }
for (var partitionProperty : table.partitionProperties()) { consumer.accept(partitionProperty.columnName()); partitionProperty.expressions().forEach(consumer); } columns.forEach(consumer); if (whereClause != null) { consumer.accept(whereClause); } consumer.accept(uri); properties.properties().values().forEach(consumer);
416
100
516
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/AnalyzedCreateBlobTable.java
AnalyzedCreateBlobTable
visitSymbols
class AnalyzedCreateBlobTable implements AnalyzedStatement { private final RelationName relationName; private final CreateBlobTable<Symbol> createBlobTable; AnalyzedCreateBlobTable(RelationName relationName, CreateBlobTable<Symbol> createBlobTable) { this.relationName = relationName; this.createBlobTable = createBlobTable; } public RelationName relationName() { return relationName; } public CreateBlobTable<Symbol> createBlobTable() { return createBlobTable; } @Override public <C, R> R accept(AnalyzedStatementVisitor<C, R> analyzedStatementVisitor, C context) { return analyzedStatementVisitor.visitAnalyzedCreateBlobTable(this, context); } @Override public boolean isWriteOperation() { return true; } @Override public void visitSymbols(Consumer<? super Symbol> consumer) {<FILL_FUNCTION_BODY>} }
ClusteredBy<Symbol> clusteredBy = createBlobTable.clusteredBy(); if (clusteredBy != null) { clusteredBy.column().ifPresent(consumer); clusteredBy.numberOfShards().ifPresent(consumer); } createBlobTable.genericProperties().properties().values().forEach(consumer);
263
93
356
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/AnalyzedUpdateStatement.java
AnalyzedUpdateStatement
visitSymbols
class AnalyzedUpdateStatement implements AnalyzedStatement { private final AbstractTableRelation<?> table; private final LinkedHashMap<Reference, Symbol> assignmentByTargetCol; private final Symbol query; /** * List of values or expressions used to be retrieved from the updated rows. */ @Nullable private final List<Symbol> returnValues; public AnalyzedUpdateStatement(AbstractTableRelation<?> table, LinkedHashMap<Reference, Symbol> assignmentByTargetCol, Symbol query, @Nullable List<Symbol> returnValues) { this.table = table; this.assignmentByTargetCol = assignmentByTargetCol; this.query = query; this.returnValues = returnValues; } public AbstractTableRelation<?> table() { return table; } public LinkedHashMap<Reference, Symbol> assignmentByTargetCol() { return assignmentByTargetCol; } public Symbol query() { return query; } @Nullable @Override public List<Symbol> outputs() { return returnValues; } @Override public <C, R> R accept(AnalyzedStatementVisitor<C, R> visitor, C context) { return visitor.visitAnalyzedUpdateStatement(this, context); } @Override public boolean isWriteOperation() { return true; } @Override public void visitSymbols(Consumer<? super Symbol> consumer) {<FILL_FUNCTION_BODY>} }
consumer.accept(query); for (Symbol sourceExpr : assignmentByTargetCol.values()) { consumer.accept(sourceExpr); } if (returnValues != null) { for (Symbol returningSymbol : returnValues) { consumer.accept(returningSymbol); } }
387
78
465
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/CreateAnalyzerStatementAnalyzer.java
AnalyzerElementsAnalysisVisitor
visitTokenFilters
class AnalyzerElementsAnalysisVisitor extends DefaultTraversalVisitor<Void, CreateAnalyzerStatementAnalyzer.Context> { static final AnalyzerElementsAnalysisVisitor INSTANCE = new AnalyzerElementsAnalysisVisitor(); static Void analyze(AnalyzerElement<Expression> node, Context context) { node.accept(INSTANCE, context); return null; } @Override public Void visitTokenizer(Tokenizer<?> node, Context context) { var tokenizer = (Tokenizer<Expression>) node; GenericProperties<Symbol> properties = tokenizer.properties() .map(p -> context.exprAnalyzerWithFieldsAsString.convert(p, context.exprContext)); context.tokenizer = new Tuple<>(tokenizer.ident(), properties); return null; } @Override public Void visitGenericProperty(GenericProperty<?> node, Context context) { var property = (GenericProperty<Expression>) node; context.genericAnalyzerProperties.put( property.key(), context.exprAnalyzerWithFieldsAsString.convert( property.value(), context.exprContext) ); return null; } @Override public Void visitTokenFilters(TokenFilters<?> node, Context context) {<FILL_FUNCTION_BODY>} @Override public Void visitCharFilters(CharFilters<?> node, Context context) { var charFilters = (CharFilters<Expression>) node; for (NamedProperties<Expression> charFilter : charFilters.charFilters()) { GenericProperties<Symbol> properties = charFilter.properties() .map(p -> context.exprAnalyzerWithFieldsAsString.convert(p, context.exprContext)); context.charFilters.put(charFilter.ident(), properties); } return null; } }
var tokenFilters = (TokenFilters<Expression>) node; for (NamedProperties<Expression> tokenFilter : tokenFilters.tokenFilters()) { GenericProperties<Symbol> properties = tokenFilter.properties() .map(p -> context.exprAnalyzerWithFieldsAsString.convert(p, context.exprContext)); context.tokenFilters.put(tokenFilter.ident(), properties); } return null;
478
110
588
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/CreateSnapshotAnalyzer.java
CreateSnapshotAnalyzer
validateRepository
class CreateSnapshotAnalyzer { private final RepositoryService repositoryService; private final NodeContext nodeCtx; CreateSnapshotAnalyzer(RepositoryService repositoryService, NodeContext nodeCtx) { this.repositoryService = repositoryService; this.nodeCtx = nodeCtx; } public AnalyzedCreateSnapshot analyze(CreateSnapshot<Expression> createSnapshot, ParamTypeHints paramTypeHints, CoordinatorTxnCtx txnCtx) { String repositoryName = createSnapshot.name().getPrefix() .map(name -> { validateRepository(name); return name.toString(); }) .orElseThrow(() -> new IllegalArgumentException( "Snapshot must be specified by \"<repository_name>\".\"<snapshot_name>\"")); String snapshotName = createSnapshot.name().getSuffix(); var exprCtx = new ExpressionAnalysisContext(txnCtx.sessionSettings()); var exprAnalyzerWithoutFields = new ExpressionAnalyzer( txnCtx, nodeCtx, paramTypeHints, FieldProvider.UNSUPPORTED, null); var exprAnalyzerWithFieldsAsString = new ExpressionAnalyzer( txnCtx, nodeCtx, paramTypeHints, FieldProvider.TO_LITERAL_VALIDATE_NAME, null); List<Table<Symbol>> tables = Lists.map( createSnapshot.tables(), (table) -> table.map(x -> exprAnalyzerWithFieldsAsString.convert(x, exprCtx))); GenericProperties<Symbol> properties = createSnapshot.properties() .map(x -> exprAnalyzerWithoutFields.convert(x, exprCtx)); return new AnalyzedCreateSnapshot(repositoryName, snapshotName, tables, properties); } private void validateRepository(QualifiedName name) {<FILL_FUNCTION_BODY>} }
if (name.getParts().size() != 1) { throw new IllegalArgumentException( String.format(Locale.ENGLISH, "Invalid repository name '%s'", name) ); } repositoryService.failIfRepositoryDoesNotExist(name.toString());
475
73
548
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/CreateTableAsAnalyzer.java
CreateTableAsAnalyzer
analyze
class CreateTableAsAnalyzer { private final CreateTableStatementAnalyzer createTableStatementAnalyzer; private final InsertAnalyzer insertAnalyzer; private final RelationAnalyzer relationAnalyzer; public CreateTableAsAnalyzer(CreateTableStatementAnalyzer createTableStatementAnalyzer, InsertAnalyzer insertAnalyzer, RelationAnalyzer relationAnalyzer) { this.createTableStatementAnalyzer = createTableStatementAnalyzer; this.insertAnalyzer = insertAnalyzer; this.relationAnalyzer = relationAnalyzer; } public AnalyzedCreateTableAs analyze(CreateTableAs<Expression> createTableAs, ParamTypeHints paramTypeHints, CoordinatorTxnCtx txnCtx) {<FILL_FUNCTION_BODY>} }
RelationName relationName = RelationName.of( createTableAs.name().getName(), txnCtx.sessionSettings().searchPath().currentSchema()); relationName.ensureValidForRelationCreation(); AnalyzedRelation analyzedSourceQuery = relationAnalyzer.analyze( createTableAs.query(), new StatementAnalysisContext(paramTypeHints, Operation.READ, txnCtx)); List<TableElement<Expression>> tableElements = Lists.map(analyzedSourceQuery.outputs(), Symbols::toColumnDefinition); CreateTable<Expression> createTable = new CreateTable<Expression>( createTableAs.name(), tableElements, Optional.empty(), Optional.empty(), GenericProperties.empty(), false); // This is only a preliminary analysis to to have the source available for privilege checks. // It will be analyzed again with the target columns from the target table once // the table has been created. AnalyzedRelation sourceRelation = relationAnalyzer.analyze( createTableAs.query(), new StatementAnalysisContext(paramTypeHints, Operation.READ, txnCtx) ); //postponing the analysis of the insert statement, since the table has not been created yet. Supplier<AnalyzedInsertStatement> postponedInsertAnalysis = () -> { Insert<Expression> insert = new Insert<Expression>( createTableAs.name(), createTableAs.query(), Collections.emptyList(), Collections.emptyList(), Insert.DuplicateKeyContext.none()); return insertAnalyzer.analyze(insert, paramTypeHints, txnCtx); }; return new AnalyzedCreateTableAs( createTableStatementAnalyzer.analyze(createTable, paramTypeHints, txnCtx), sourceRelation, postponedInsertAnalysis );
196
472
668
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/DeallocateAnalyzer.java
DeallocateAnalyzer
analyze
class DeallocateAnalyzer { private DeallocateAnalyzer() { } public static AnalyzedDeallocate analyze(DeallocateStatement deallocateStatement) {<FILL_FUNCTION_BODY>} }
Expression preparedStmtExpression = deallocateStatement.preparedStmt(); String preparedStmt = null; if (preparedStmtExpression != null) { if (preparedStmtExpression instanceof StringLiteral) { preparedStmt = ((StringLiteral) preparedStmtExpression).getValue(); } else if (preparedStmtExpression instanceof QualifiedNameReference) { preparedStmt = ((QualifiedNameReference) preparedStmtExpression).getName().toString(); } else { throw new AssertionError("Expression " + preparedStmtExpression.toString() + " not supported as " + "preparedStmt expression for DEALLOCATE"); } } return new AnalyzedDeallocate(preparedStmt);
58
181
239
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/DeleteAnalyzer.java
DeleteAnalyzer
analyze
class DeleteAnalyzer { private final NodeContext nodeCtx; private final RelationAnalyzer relationAnalyzer; DeleteAnalyzer(NodeContext nodeCtx, RelationAnalyzer relationAnalyzer) { this.nodeCtx = nodeCtx; this.relationAnalyzer = relationAnalyzer; } public AnalyzedDeleteStatement analyze(Delete delete, ParamTypeHints typeHints, CoordinatorTxnCtx txnContext) {<FILL_FUNCTION_BODY>} }
StatementAnalysisContext stmtCtx = new StatementAnalysisContext(typeHints, Operation.DELETE, txnContext); final RelationAnalysisContext relationCtx = stmtCtx.startRelation(); AnalyzedRelation relation = relationAnalyzer.analyze(delete.getRelation(), stmtCtx); stmtCtx.endRelation(); MaybeAliasedStatement maybeAliasedStatement = MaybeAliasedStatement.analyze(relation); relation = maybeAliasedStatement.nonAliasedRelation(); if (!(relation instanceof DocTableRelation)) { throw new UnsupportedOperationException("Cannot delete from relations other than base tables"); } DocTableRelation table = (DocTableRelation) relation; EvaluatingNormalizer normalizer = new EvaluatingNormalizer(nodeCtx, RowGranularity.CLUSTER, null, table); ExpressionAnalyzer expressionAnalyzer = new ExpressionAnalyzer( txnContext, nodeCtx, typeHints, new FullQualifiedNameFieldProvider( relationCtx.sources(), relationCtx.parentSources(), txnContext.sessionSettings().searchPath().currentSchema()), new SubqueryAnalyzer(relationAnalyzer, new StatementAnalysisContext(typeHints, Operation.READ, txnContext)) ); Symbol query = Objects.requireNonNullElse( expressionAnalyzer.generateQuerySymbol(delete.getWhere(), new ExpressionAnalysisContext(txnContext.sessionSettings())), Literal.BOOLEAN_TRUE ); query = maybeAliasedStatement.maybeMapFields(query); Symbol normalizedQuery = normalizer.normalize(query, txnContext); return new AnalyzedDeleteStatement(table, normalizedQuery);
126
429
555
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/DropAnalyzerStatementAnalyzer.java
DropAnalyzerStatementAnalyzer
analyze
class DropAnalyzerStatementAnalyzer { private final FulltextAnalyzerResolver ftResolver; DropAnalyzerStatementAnalyzer(FulltextAnalyzerResolver ftResolver) { this.ftResolver = ftResolver; } public AnalyzedDropAnalyzer analyze(String analyzerName) {<FILL_FUNCTION_BODY>} }
if (ftResolver.hasBuiltInAnalyzer(analyzerName)) { throw new IllegalArgumentException("Cannot drop a built-in analyzer"); } if (ftResolver.hasCustomAnalyzer(analyzerName) == false) { throw new AnalyzerUnknownException(analyzerName); } return new AnalyzedDropAnalyzer(analyzerName);
85
91
176
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/DropTableAnalyzer.java
DropTableAnalyzer
analyze
class DropTableAnalyzer { private static final Logger LOGGER = LogManager.getLogger(DropTableAnalyzer.class); private final Schemas schemas; private final ClusterService clusterService; DropTableAnalyzer(ClusterService clusterService, Schemas schemas) { this.clusterService = clusterService; this.schemas = schemas; } public AnalyzedDropTable<DocTableInfo> analyze(DropTable<?> node, CoordinatorSessionSettings sessionSettings) { return analyze(node.table().getName(), node.dropIfExists(), sessionSettings); } public AnalyzedDropTable<BlobTableInfo> analyze(DropBlobTable<?> node, CoordinatorSessionSettings sessionSettings) { List<String> parts = node.table().getName().getParts(); if (parts.size() != 1 && !parts.get(0).equals(BlobSchemaInfo.NAME)) { throw new IllegalArgumentException("No blob tables in schema `" + parts.get(0) + "`"); } else { QualifiedName name = new QualifiedName( List.of(BlobSchemaInfo.NAME, node.table().getName().getSuffix())); return analyze(name, node.ignoreNonExistentTable(), sessionSettings); } } private <T extends TableInfo> AnalyzedDropTable<T> analyze(QualifiedName name, boolean dropIfExists, CoordinatorSessionSettings sessionSettings) {<FILL_FUNCTION_BODY>} }
T tableInfo; RelationName tableName; boolean maybeCorrupt = false; try { tableInfo = schemas.findRelation( name, Operation.DROP, sessionSettings.sessionUser(), sessionSettings.searchPath() ); tableName = tableInfo.ident(); } catch (SchemaUnknownException | RelationUnknown e) { tableName = RelationName.of(name, sessionSettings.searchPath().currentSchema()); var metadata = clusterService.state().metadata(); var indexNameOrAlias = tableName.indexNameOrAlias(); if (metadata.hasIndex(indexNameOrAlias) || metadata.templates().containsKey(indexNameOrAlias)) { tableInfo = null; maybeCorrupt = true; } else if (dropIfExists) { tableInfo = null; } else { throw e; } } catch (OperationOnInaccessibleRelationException e) { throw e; } catch (Throwable t) { if (!sessionSettings.sessionUser().isSuperUser()) { throw t; } tableInfo = null; maybeCorrupt = true; tableName = RelationName.of(name, sessionSettings.searchPath().currentSchema()); LOGGER.info( "Unexpected error resolving table during DROP TABLE operation on {}. " + "Proceeding with operation as table schema may be corrupt (error={})", tableName, t ); } return new AnalyzedDropTable<>(tableInfo, dropIfExists, tableName, maybeCorrupt);
377
403
780
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/GeneratedColumnExpander.java
Context
createAdditionalComparison
class Context { private final HashMap<Reference, ArrayList<GeneratedReference>> referencedRefsToGeneratedColumn; private final NodeContext nodeCtx; public Context(HashMap<Reference, ArrayList<GeneratedReference>> referencedRefsToGeneratedColumn, NodeContext nodeCtx) { this.referencedRefsToGeneratedColumn = referencedRefsToGeneratedColumn; this.nodeCtx = nodeCtx; } } ComparisonReplaceVisitor() { super(); } Symbol addComparisons(Symbol symbol, List<GeneratedReference> generatedCols, List<Reference> expansionCandidates, NodeContext nodeCtx) { HashMap<Reference, ArrayList<GeneratedReference>> referencedSingleReferences = extractGeneratedReferences(generatedCols, expansionCandidates); if (referencedSingleReferences.isEmpty()) { return symbol; } else { Context ctx = new Context(referencedSingleReferences, nodeCtx); return symbol.accept(this, ctx); } } @Override public Symbol visitFunction(Function function, Context context) { if (Operators.COMPARISON_OPERATORS.contains(function.name())) { Reference reference = null; Symbol otherSide = null; for (int i = 0; i < function.arguments().size(); i++) { Symbol arg = function.arguments().get(i); arg = Symbols.unwrapReferenceFromCast(arg); if (arg instanceof Reference ref) { reference = ref; } else { otherSide = arg; } } if (reference != null && otherSide != null && !SymbolVisitors.any(Symbols.IS_GENERATED_COLUMN, otherSide)) { return addComparison(function, reference, otherSide, context); } } return super.visitFunction(function, context); } private Symbol addComparison(Function function, Reference reference, Symbol comparedAgainst, Context context) { ArrayList<GeneratedReference> genColInfos = context.referencedRefsToGeneratedColumn .computeIfAbsent(reference, (k) -> new ArrayList<>()); List<Function> comparisonsToAdd = new ArrayList<>(genColInfos.size()); comparisonsToAdd.add(function); for (GeneratedReference genColInfo : genColInfos) { Function comparison = createAdditionalComparison( function, genColInfo, comparedAgainst, context.nodeCtx ); if (comparison != null) { comparisonsToAdd.add(comparison); } } return AndOperator.join(comparisonsToAdd); } @Nullable private Function createAdditionalComparison(Function function, GeneratedReference generatedReference, Symbol comparedAgainst, NodeContext nodeCtx) {<FILL_FUNCTION_BODY>
if (generatedReference != null && generatedReference.generatedExpression().symbolType().equals(SymbolType.FUNCTION)) { Function generatedFunction = (Function) generatedReference.generatedExpression(); String operatorName = function.name(); if (!operatorName.equals(EqOperator.NAME)) { if (!generatedFunction.signature().hasFeature(Scalar.Feature.COMPARISON_REPLACEMENT)) { return null; } // rewrite operator if (ROUNDING_FUNCTIONS.contains(generatedFunction.name())) { String replacedOperatorName = ROUNDING_FUNCTION_MAPPING.get(operatorName); if (replacedOperatorName != null) { operatorName = replacedOperatorName; } } } Symbol wrapped = wrapInGenerationExpression(comparedAgainst, generatedReference); var funcImpl = nodeCtx.functions().get( null, operatorName, List.of(generatedReference, wrapped), SearchPath.pathWithPGCatalogAndDoc() ); return new Function( funcImpl.signature(), List.of(generatedReference, wrapped), funcImpl.boundSignature().returnType() ); } return null;
728
308
1,036
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/Id.java
Id
compileWithNullValidation
class Id { private static final Function<List<String>, String> RANDOM_ID = ignored -> UUIDs.base64UUID(); private static final Function<List<String>, String> ONLY_ITEM_NULL_VALIDATION = keyValues -> { return ensureNonNull(getOnlyElement(keyValues)); }; private static final Function<List<String>, String> ONLY_ITEM = Lists::getOnlyElement; /** * generates a function which can be used to generate an id and apply null validation. * <p> * This variant doesn't handle the pk = _id case. */ private static Function<List<String>, String> compileWithNullValidation(final int numPks, final int clusteredByPosition) { switch (numPks) { case 0: return RANDOM_ID; case 1: return ONLY_ITEM_NULL_VALIDATION; default: return keyValues -> { if (keyValues.size() != numPks) { throw new IllegalArgumentException("Missing primary key values"); } return encode(keyValues, clusteredByPosition); }; } } /** * generates a function which can be used to generate an id. * <p> * This variant doesn't handle the pk = _id case. */ public static Function<List<String>, String> compile(final int numPks, final int clusteredByPosition) { if (numPks == 1) { return ONLY_ITEM; } return compileWithNullValidation(numPks, clusteredByPosition); } /** * returns a function which can be used to generate an id with null validation. */ public static Function<List<String>, String> compileWithNullValidation(final List<ColumnIdent> pkColumns, final ColumnIdent clusteredBy) {<FILL_FUNCTION_BODY>} @NotNull private static <T> T ensureNonNull(@Nullable T pkValue) throws IllegalArgumentException { if (pkValue == null) { throw new IllegalArgumentException("A primary key value must not be NULL"); } return pkValue; } public static List<String> decode(List<ColumnIdent> primaryKeys, String id) { if (primaryKeys.isEmpty() || primaryKeys.size() == 1) { return List.of(id); } List<String> pkValues = new ArrayList<>(); byte[] inputBytes = Base64.getDecoder().decode(id); try (var in = StreamInput.wrap(inputBytes)) { int size = in.readVInt(); assert size == primaryKeys.size() : "Encoded primary key values must match size of primaryKey column list"; for (int i = 0; i < size; i++) { BytesRef bytesRef = in.readBytesRef(); pkValues.add(bytesRef.utf8ToString()); } } catch (IOException e) { throw new UncheckedIOException(e); } return pkValues; } public static String encode(List<String> values, int clusteredByPosition) { try (BytesStreamOutput out = new BytesStreamOutput(estimateSize(values))) { int size = values.size(); out.writeVInt(size); if (clusteredByPosition >= 0) { out.writeBytesRef(new BytesRef(ensureNonNull(values.get(clusteredByPosition)))); } for (int i = 0; i < size; i++) { if (i != clusteredByPosition) { out.writeBytesRef(new BytesRef(ensureNonNull(values.get(i)))); } } return Base64.getEncoder().encodeToString(BytesReference.toBytes(out.bytes())); } catch (IOException e) { throw new RuntimeException(e); } } /** * estimates the size the bytesRef values will take if written onto a StreamOutput using the String streamer */ private static int estimateSize(Iterable<String> values) { int expectedEncodedSize = 0; for (String value : values) { // 5 bytes for the value of the length itself using vInt expectedEncodedSize += 5 + (value != null ? value.length() : 0); } return expectedEncodedSize; } }
final int numPks = pkColumns.size(); if (numPks == 1 && getOnlyElement(pkColumns).equals(DocSysColumns.ID)) { return RANDOM_ID; } int idx = -1; if (clusteredBy != null) { idx = pkColumns.indexOf(clusteredBy); } return compileWithNullValidation(numPks, idx);
1,118
109
1,227
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/MatchOptionsAnalysis.java
MatchOptionsAnalysis
validate
class MatchOptionsAnalysis { private static final Predicate<Object> POSITIVE_NUMBER = x -> x instanceof Number && ((Number) x).doubleValue() > 0; private static final Predicate<Object> IS_STRING = x -> x instanceof String; private static final Predicate<Object> IS_NUMBER = x -> x instanceof Number; private static final Predicate<Object> NUMBER_OR_STRING = IS_NUMBER.or(IS_STRING); private static final Predicate<Object> IS_OPERATOR = Set.of("or", "and", "OR", "AND")::contains; private static final Map<String, Predicate<Object>> ALLOWED_SETTINGS = Map.ofEntries( entry("analyzer", IS_STRING), entry("boost", POSITIVE_NUMBER), entry("cutoff_frequency", POSITIVE_NUMBER), entry("fuzziness", NUMBER_OR_STRING), // validated at ES QueryParser entry("fuzzy_rewrite", IS_STRING), entry("max_expansions", POSITIVE_NUMBER), entry("minimum_should_match", NUMBER_OR_STRING), entry("operator", IS_OPERATOR), entry("prefix_length", POSITIVE_NUMBER), entry("rewrite", IS_STRING), entry("slop", POSITIVE_NUMBER), entry("tie_breaker", IS_NUMBER), entry("zero_terms_query", IS_STRING) ); public static void validate(Map<String, Object> options) {<FILL_FUNCTION_BODY>} }
for (Map.Entry<String, Object> e : options.entrySet()) { String optionName = e.getKey(); Predicate<Object> validator = ALLOWED_SETTINGS.get(optionName); if (validator == null) { throw new IllegalArgumentException( String.format(Locale.ENGLISH, "unknown match option '%s'", optionName)); } Object value = e.getValue(); if (!validator.test(value)) { throw new IllegalArgumentException(String.format(Locale.ENGLISH, "invalid value for option '%s': %s", optionName, value)); } }
411
164
575
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/MaybeAliasedStatement.java
MaybeAliasedStatement
analyze
class MaybeAliasedStatement { public static MaybeAliasedStatement analyze(AnalyzedRelation relation) {<FILL_FUNCTION_BODY>} private final AnalyzedRelation relation; private final Function<? super Symbol, ? extends Symbol> mapper; private MaybeAliasedStatement(AnalyzedRelation relation, Function<? super Symbol, ? extends Symbol> mapper) { this.relation = relation; this.mapper = mapper; } AnalyzedRelation nonAliasedRelation() { return relation; } Symbol maybeMapFields(Symbol symbol) { return mapper.apply(symbol); } }
if (relation instanceof AliasedAnalyzedRelation) { AliasedAnalyzedRelation aliasedAnalyzedRelation = (AliasedAnalyzedRelation) relation; return new MaybeAliasedStatement( aliasedAnalyzedRelation.relation(), FieldReplacer.bind(aliasedAnalyzedRelation::resolveField) ); } return new MaybeAliasedStatement(relation, s -> s);
166
103
269
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/NegateLiterals.java
NegateLiterals
visitLiteral
class NegateLiterals extends SymbolVisitor<Void, Symbol> { private static final NegateLiterals INSTANCE = new NegateLiterals(); private NegateLiterals() { } public static Symbol negate(Symbol symbol) { return symbol.accept(INSTANCE, null); } @Override public Literal<?> visitLiteral(Literal<?> symbol, Void context) {<FILL_FUNCTION_BODY>} }
Object value = symbol.value(); if (value == null) { return symbol; } DataType<?> valueType = symbol.valueType(); switch (valueType.id()) { case DoubleType.ID: return Literal.ofUnchecked(valueType, (Double) value * -1); case FloatType.ID: return Literal.ofUnchecked(valueType, (Double) value * -1); case ShortType.ID: return Literal.ofUnchecked(valueType, (Short) value * -1); case IntegerType.ID: return Literal.ofUnchecked(valueType, (Integer) value * -1); case LongType.ID: return Literal.ofUnchecked(valueType, (Long) value * -1); default: throw new UnsupportedOperationException(Symbols.format( "Cannot negate %s. You may need to add explicit type casts", symbol)); }
119
247
366
<methods>public non-sealed void <init>() ,public io.crate.expression.symbol.Symbol visitAggregation(io.crate.expression.symbol.Aggregation, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitAlias(io.crate.expression.symbol.AliasSymbol, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitDynamicReference(io.crate.expression.symbol.DynamicReference, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitFetchMarker(io.crate.expression.symbol.FetchMarker, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitFetchReference(io.crate.expression.symbol.FetchReference, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitFetchStub(io.crate.expression.symbol.FetchStub, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitField(io.crate.expression.symbol.ScopedSymbol, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitFunction(io.crate.expression.symbol.Function, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitInputColumn(io.crate.expression.symbol.InputColumn, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitLiteral(Literal<?>, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitMatchPredicate(io.crate.expression.symbol.MatchPredicate, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitOuterColumn(io.crate.expression.symbol.OuterColumn, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitParameterSymbol(io.crate.expression.symbol.ParameterSymbol, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitReference(io.crate.metadata.Reference, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitSelectSymbol(io.crate.expression.symbol.SelectSymbol, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitVoidReference(io.crate.expression.symbol.VoidReference, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitWindowFunction(io.crate.expression.symbol.WindowFunction, java.lang.Void) <variables>
crate_crate
crate/server/src/main/java/io/crate/analyze/OptimizeTableAnalyzer.java
OptimizeTableAnalyzer
analyze
class OptimizeTableAnalyzer { private final Schemas schemas; private final NodeContext nodeCtx; OptimizeTableAnalyzer(Schemas schemas, NodeContext nodeCtx) { this.schemas = schemas; this.nodeCtx = nodeCtx; } public AnalyzedOptimizeTable analyze(OptimizeStatement<Expression> statement, ParamTypeHints paramTypeHints, CoordinatorTxnCtx txnCtx) {<FILL_FUNCTION_BODY>} }
var exprAnalyzerWithFieldsAsString = new ExpressionAnalyzer( txnCtx, nodeCtx, paramTypeHints, FieldProvider.TO_LITERAL_VALIDATE_NAME, null); var exprCtx = new ExpressionAnalysisContext(txnCtx.sessionSettings()); OptimizeStatement<Symbol> analyzedStatement = statement.map(x -> exprAnalyzerWithFieldsAsString.convert(x, exprCtx)); HashMap<Table<Symbol>, TableInfo> analyzedOptimizeTables = new HashMap<>(); for (Table<Symbol> table : analyzedStatement.tables()) { TableInfo tableInfo = schemas.findRelation( table.getName(), Operation.OPTIMIZE, txnCtx.sessionSettings().sessionUser(), txnCtx.sessionSettings().searchPath() ); analyzedOptimizeTables.put(table, tableInfo); } return new AnalyzedOptimizeTable(analyzedOptimizeTables, analyzedStatement.properties());
135
257
392
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/OutputNameFormatter.java
InnerOutputNameFormatter
visitQualifiedNameReference
class InnerOutputNameFormatter extends ExpressionFormatter.Formatter { @Override protected String visitQualifiedNameReference(QualifiedNameReference node, List<Expression> parameters) {<FILL_FUNCTION_BODY>} @Override protected String visitSubscriptExpression(SubscriptExpression node, List<Expression> parameters) { return node.base().accept(this, null) + '[' + node.index().accept(this, null) + ']'; } @Override public String visitArrayComparisonExpression(ArrayComparisonExpression node, List<Expression> parameters) { return node.getLeft().accept(this, null) + ' ' + node.getType().getValue() + ' ' + node.quantifier().name() + '(' + node.getRight().accept(this, null) + ')'; } @Override protected String visitSubqueryExpression(SubqueryExpression node, List<Expression> parameters) { return super.visitSubqueryExpression(node, parameters).replace("\n", ""); } }
List<String> parts = node.getName().getParts(); if (parts.isEmpty()) { throw new NoSuchElementException("Parts of QualifiedNameReference are empty: " + node.getName()); } return parts.get(parts.size() - 1);
261
71
332
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/PrivilegesAnalyzer.java
PrivilegesAnalyzer
permissionsToPrivileges
class PrivilegesAnalyzer { private final Schemas schemas; private static final String ERROR_MESSAGE = "GRANT/DENY/REVOKE Privileges on information_schema is not supported"; PrivilegesAnalyzer(Schemas schemas) { this.schemas = schemas; } AnalyzedPrivileges analyzeGrant(GrantPrivilege node, Role grantor, SearchPath searchPath) { return buildAnalyzedPrivileges(node, grantor, searchPath); } AnalyzedPrivileges analyzeRevoke(RevokePrivilege node, Role grantor, SearchPath searchPath) { return buildAnalyzedPrivileges(node, grantor, searchPath); } AnalyzedPrivileges analyzeDeny(DenyPrivilege node, Role grantor, SearchPath searchPath) { return buildAnalyzedPrivileges(node, grantor, searchPath); } @NotNull private AnalyzedPrivileges buildAnalyzedPrivileges(PrivilegeStatement node, Role grantor, SearchPath searchPath) { Policy policy; switch (node) { case GrantPrivilege ignored -> policy = Policy.GRANT; case RevokePrivilege ignored -> policy = Policy.REVOKE; case DenyPrivilege ignored -> policy = Policy.DENY; } Securable securable = Securable.valueOf(node.securable()); List<String> idents = validatePrivilegeIdents( grantor, securable, node.privilegeIdents(), policy == Policy.REVOKE, searchPath, schemas); if (securable == Securable.CLUSTER && node.all() == false) { List<Permission> permissions = parsePermissions(node.privileges(), false); if (permissions.isEmpty() == false) { if (permissions.size() != node.privileges().size()) { throw new IllegalArgumentException("Mixing up cluster privileges with roles is not allowed"); } else { return AnalyzedPrivileges.ofPrivileges(node.userNames(), permissionsToPrivileges(getPermissions(node.all(), node.privileges()), grantor, policy, idents, securable)); } } if (policy == Policy.DENY) { throw new IllegalArgumentException("Cannot DENY a role"); } if (node.userNames().contains(Role.CRATE_USER.name())) { throw new IllegalArgumentException("Cannot grant roles to " + Role.CRATE_USER.name() + " superuser"); } if (node.privileges().contains(Role.CRATE_USER.name())) { throw new IllegalArgumentException("Cannot grant " + Role.CRATE_USER.name() + " superuser, to other " + "users or roles"); } for (var grantee : node.userNames()) { for (var roleNameToGrant : node.privileges()) { if (roleNameToGrant.equals(grantee)) { throw new IllegalArgumentException(String.format(Locale.ENGLISH, "Cannot grant role %s to itself as a cycle will be created", grantee)); } } } return AnalyzedPrivileges.ofRolePrivileges( node.userNames(), new GrantedRolesChange(policy, new HashSet<>(node.privileges()), grantor.name())); } else { return AnalyzedPrivileges.ofPrivileges(node.userNames(), permissionsToPrivileges( getPermissions(node.all(), node.privileges()), grantor, policy, idents, securable)); } } private static Collection<Permission> getPermissions(boolean all, List<String> permissionNames) { Collection<Permission> permissions; if (all) { permissions = Permission.VALUES; } else { permissions = parsePermissions(permissionNames, true); } return permissions; } private static void validateSchemaNames(List<String> schemaNames) { schemaNames.forEach(PrivilegesAnalyzer::validateSchemaName); } private static void validateSchemaName(String schemaName) { if (InformationSchemaInfo.NAME.equals(schemaName)) { throw new UnsupportedFeatureException(ERROR_MESSAGE); } } private List<String> validatePrivilegeIdents(Role sessionUser, Securable securable, List<QualifiedName> tableOrSchemaNames, boolean isRevoke, SearchPath searchPath, Schemas schemas) { if (Securable.SCHEMA.equals(securable)) { List<String> schemaNames = Lists.map(tableOrSchemaNames, QualifiedName::toString); if (isRevoke) { return schemaNames; } validateSchemaNames(schemaNames); return schemaNames; } else { return resolveAndValidateRelations(tableOrSchemaNames, sessionUser, searchPath, schemas, isRevoke); } } private static List<Permission> parsePermissions(List<String> permissionNames, boolean validate) { List<Permission> permissions = new ArrayList<>(permissionNames.size()); for (String permissionName : permissionNames) { Permission permission; try { permission = Permission.valueOf(permissionName.toUpperCase(Locale.ENGLISH)); permissions.add(permission); } catch (IllegalArgumentException e) { if (validate) { throw new IllegalArgumentException(String.format(Locale.ENGLISH, "Unknown permission '%s'", permissionName)); } } } return permissions; } private static Set<Privilege> permissionsToPrivileges(Collection<Permission> permissions, Role grantor, Policy policy, List<String> idents, Securable securable) {<FILL_FUNCTION_BODY>} private static List<String> resolveAndValidateRelations(List<QualifiedName> relations, Role sessionUser, SearchPath searchPath, Schemas schemas, boolean isRevoke) { return Lists.map(relations, q -> { try { RelationInfo relation = schemas.findRelation(q, Operation.READ, sessionUser, searchPath); RelationName relationName = relation.ident(); if (!isRevoke) { validateSchemaName(relationName.schema()); } return relationName.fqn(); } catch (RelationUnknown e) { if (!isRevoke) { throw e; } else { return RelationName.of(q, searchPath.currentSchema()).fqn(); } } }); } }
Set<Privilege> privileges = new HashSet<>(permissions.size()); if (Securable.CLUSTER.equals(securable)) { for (Permission permission : permissions) { Privilege privilege = new Privilege( policy, permission, securable, null, grantor.name() ); privileges.add(privilege); } } else { for (Permission permission : permissions) { for (String ident : idents) { Privilege privilege = new Privilege( policy, permission, securable, ident, grantor.name() ); privileges.add(privilege); } } } return privileges;
1,775
200
1,975
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/QueriedSelectRelation.java
QueriedSelectRelation
visitSymbols
class QueriedSelectRelation implements AnalyzedRelation { private final List<AnalyzedRelation> from; private final List<JoinPair> joinPairs; private final boolean isDistinct; private final List<Symbol> outputs; private final Symbol whereClause; private final List<Symbol> groupBy; @Nullable private final Symbol having; @Nullable private final OrderBy orderBy; @Nullable private final Symbol offset; @Nullable private final Symbol limit; public QueriedSelectRelation(boolean isDistinct, List<AnalyzedRelation> from, List<JoinPair> joinPairs, List<Symbol> outputs, Symbol whereClause, List<Symbol> groupBy, @Nullable Symbol having, @Nullable OrderBy orderBy, @Nullable Symbol limit, @Nullable Symbol offset) { this.outputs = outputs; this.whereClause = whereClause; this.groupBy = groupBy; this.having = having; this.orderBy = orderBy; this.offset = offset; this.limit = limit; assert from.size() >= 1 : "QueriedSelectRelation must have at least 1 relation in FROM"; this.isDistinct = isDistinct; this.from = from; this.joinPairs = joinPairs; } public List<AnalyzedRelation> from() { return from; } public Symbol getField(ColumnIdent column, Operation operation, boolean errorOnUnknownObjectKey) throws AmbiguousColumnException, ColumnUnknownException, UnsupportedOperationException { Symbol match = null; for (Symbol output : outputs()) { ColumnIdent outputName = Symbols.pathFromSymbol(output); if (outputName.equals(column)) { if (match != null) { throw new AmbiguousColumnException(column, output); } match = output; } } if (match != null || column.isRoot()) { return match; } ColumnIdent root = column.getRoot(); // Try to optimize child-column access to use a Reference instead of a subscript function // // E.g. // // SELECT obj['x'] FROM (select...) // // Should use Reference (obj.x) instead of Function subscript(obj, 'x') // Unless an alias shadows the sub-relation column: // // SELECT obj['x'] FROM (SELECT unnest(obj) as obj FROM ...) // // -> Resolve both root field and child-field from source again. // If the root field matches output -> it's not shadowed for (AnalyzedRelation source : from) { Symbol field = source.getField(column, operation, errorOnUnknownObjectKey); if (field != null) { if (match != null) { throw new AmbiguousColumnException(column, match); } Symbol rootField = source.getField(root, operation, errorOnUnknownObjectKey); for (Symbol output : outputs()) { Symbol symbol = output; while (symbol instanceof AliasSymbol alias) { symbol = alias.symbol(); } if (symbol.equals(rootField) || output.equals(rootField)) { match = field; break; } } } } return match; } public boolean isDistinct() { return isDistinct; } @Override public <C, R> R accept(AnalyzedRelationVisitor<C, R> visitor, C context) { return visitor.visitQueriedSelectRelation(this, context); } @Override public RelationName relationName() { throw new UnsupportedOperationException( "QueriedSelectRelation has no name. It must be beneath an aliased-relation to be addressable by name"); } @NotNull @Override public List<Symbol> outputs() { return outputs; } public Symbol where() { return whereClause; } public List<Symbol> groupBy() { return groupBy; } @Nullable public Symbol having() { return having; } @Nullable public OrderBy orderBy() { return orderBy; } @Nullable public Symbol limit() { return limit; } @Nullable public Symbol offset() { return offset; } @Override public String toString() { return "SELECT " + Lists.joinOn(", ", outputs(), x -> Symbols.pathFromSymbol(x).sqlFqn()) + " FROM (" + Lists.joinOn(", ", from, x -> x.relationName().toString()) + ')'; } @Override public void visitSymbols(Consumer<? super Symbol> consumer) {<FILL_FUNCTION_BODY>} public List<JoinPair> joinPairs() { return joinPairs; } }
for (Symbol output : outputs) { consumer.accept(output); } consumer.accept(whereClause); for (Symbol groupKey : groupBy) { consumer.accept(groupKey); } if (having != null) { consumer.accept(having); } if (orderBy != null) { orderBy.accept(consumer); } if (limit != null) { consumer.accept(limit); } if (offset != null) { consumer.accept(offset); } for (var joinPair : joinPairs) { if (joinPair.condition() != null) { consumer.accept(joinPair.condition()); } }
1,281
189
1,470
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/RefreshTableAnalyzer.java
RefreshTableAnalyzer
analyze
class RefreshTableAnalyzer { private final NodeContext nodeCtx; private final Schemas schemas; RefreshTableAnalyzer(NodeContext nodeCtx, Schemas schemas) { this.nodeCtx = nodeCtx; this.schemas = schemas; } public AnalyzedRefreshTable analyze(RefreshStatement<Expression> refreshStatement, ParamTypeHints paramTypeHints, CoordinatorTxnCtx txnCtx) {<FILL_FUNCTION_BODY>} }
var exprAnalyzerWithFieldsAsString = new ExpressionAnalyzer( txnCtx, nodeCtx, paramTypeHints, FieldProvider.TO_LITERAL_VALIDATE_NAME, null); var exprCtx = new ExpressionAnalysisContext(txnCtx.sessionSettings()); HashMap<Table<Symbol>, DocTableInfo> analyzedTables = new HashMap<>(); for (var table : refreshStatement.tables()) { var analyzedTable = table.map(t -> exprAnalyzerWithFieldsAsString.convert(t, exprCtx)); DocTableInfo tableInfo = schemas.findRelation( table.getName(), Operation.REFRESH, txnCtx.sessionSettings().sessionUser(), txnCtx.sessionSettings().searchPath() ); analyzedTables.put(analyzedTable, tableInfo); } return new AnalyzedRefreshTable(analyzedTables);
134
234
368
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/RerouteAnalyzedStatement.java
RerouteAnalyzedStatement
visitSymbols
class RerouteAnalyzedStatement implements DDLStatement { private final ShardedTable shardedTable; private final List<Assignment<Symbol>> partitionProperties; RerouteAnalyzedStatement(ShardedTable shardedTable, List<Assignment<Symbol>> partitionProperties) { this.shardedTable = shardedTable; this.partitionProperties = partitionProperties; } public ShardedTable shardedTable() { return shardedTable; } public List<Assignment<Symbol>> partitionProperties() { return partitionProperties; } @Override public void visitSymbols(Consumer<? super Symbol> consumer) {<FILL_FUNCTION_BODY>} }
for (var partitionProperty : partitionProperties) { consumer.accept(partitionProperty.columnName()); partitionProperty.expressions().forEach(consumer); }
174
43
217
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/ResetStatementAnalyzer.java
ResetStatementAnalyzer
analyze
class ResetStatementAnalyzer { private final NodeContext nodeCtx; public ResetStatementAnalyzer(NodeContext nodeCtx) { this.nodeCtx = nodeCtx; } public AnalyzedResetStatement analyze(ResetStatement<Expression> node, ParamTypeHints typeHints, CoordinatorTxnCtx txnCtx) {<FILL_FUNCTION_BODY>} }
var exprAnalyzer = new ExpressionAnalyzer( txnCtx, nodeCtx, typeHints, FieldProvider.TO_LITERAL_UNSAFE, null ); var statement = node.map(x -> exprAnalyzer.convert(x, new ExpressionAnalysisContext(txnCtx.sessionSettings()))); return new AnalyzedResetStatement(new HashSet<>(statement.columns()));
108
112
220
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/ScalarsAndRefsToTrue.java
ScalarsAndRefsToTrue
visitFunction
class ScalarsAndRefsToTrue extends SymbolVisitor<Void, Symbol> { private static final ScalarsAndRefsToTrue INSTANCE = new ScalarsAndRefsToTrue(); private ScalarsAndRefsToTrue() { } public static Symbol rewrite(Symbol symbol) { return symbol.accept(INSTANCE, null); } @Override public Symbol visitFunction(Function symbol, Void context) {<FILL_FUNCTION_BODY>} @Override public Symbol visitMatchPredicate(MatchPredicate matchPredicate, Void context) { return Literal.BOOLEAN_TRUE; } @Override protected Symbol visitSymbol(Symbol symbol, Void context) { if (symbol.valueType().id() == DataTypes.UNDEFINED.id()) { return Literal.NULL; } return Literal.BOOLEAN_TRUE; } @Override public Symbol visitLiteral(Literal<?> symbol, Void context) { return symbol; } }
String functionName = symbol.name(); if (functionName.equals(NotPredicate.NAME)) { Symbol argument = symbol.arguments().get(0); if (argument instanceof Reference) { return argument.accept(this, context); } else if (argument instanceof Function) { if (!Operators.LOGICAL_OPERATORS.contains(((Function) argument).name())) { return argument.accept(this, context); } } } List<Symbol> newArgs = new ArrayList<>(symbol.arguments().size()); boolean allLiterals = true; boolean isNull = false; for (Symbol arg : symbol.arguments()) { Symbol processedArg = arg.accept(this, context); newArgs.add(processedArg); if (!processedArg.symbolType().isValueSymbol()) { allLiterals = false; } if (processedArg.valueType().id() == DataTypes.UNDEFINED.id()) { isNull = true; } } if (allLiterals && !Operators.LOGICAL_OPERATORS.contains(functionName)) { return isNull ? Literal.NULL : Literal.BOOLEAN_TRUE; } return new Function(symbol.signature(), newArgs, symbol.valueType());
266
326
592
<methods>public non-sealed void <init>() ,public io.crate.expression.symbol.Symbol visitAggregation(io.crate.expression.symbol.Aggregation, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitAlias(io.crate.expression.symbol.AliasSymbol, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitDynamicReference(io.crate.expression.symbol.DynamicReference, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitFetchMarker(io.crate.expression.symbol.FetchMarker, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitFetchReference(io.crate.expression.symbol.FetchReference, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitFetchStub(io.crate.expression.symbol.FetchStub, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitField(io.crate.expression.symbol.ScopedSymbol, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitFunction(io.crate.expression.symbol.Function, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitInputColumn(io.crate.expression.symbol.InputColumn, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitLiteral(Literal<?>, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitMatchPredicate(io.crate.expression.symbol.MatchPredicate, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitOuterColumn(io.crate.expression.symbol.OuterColumn, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitParameterSymbol(io.crate.expression.symbol.ParameterSymbol, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitReference(io.crate.metadata.Reference, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitSelectSymbol(io.crate.expression.symbol.SelectSymbol, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitVoidReference(io.crate.expression.symbol.VoidReference, java.lang.Void) ,public io.crate.expression.symbol.Symbol visitWindowFunction(io.crate.expression.symbol.WindowFunction, java.lang.Void) <variables>
crate_crate
crate/server/src/main/java/io/crate/analyze/SubscriptVisitor.java
SubscriptNameVisitor
visitExpression
class SubscriptNameVisitor extends AstVisitor<Void, SubscriptContext> { private static final SubscriptNameVisitor INSTANCE = new SubscriptNameVisitor(); @Override protected Void visitSubscriptExpression(SubscriptExpression node, SubscriptContext context) { node.index().accept(SubscriptIndexVisitor.INSTANCE, context); node.base().accept(this, context); return null; } @Override protected Void visitQualifiedNameReference(QualifiedNameReference node, SubscriptContext context) { context.qualifiedName(node.getName()); return null; } @Override public Void visitArrayLiteral(ArrayLiteral node, SubscriptContext context) { context.expression(node); return null; } @Override public Void visitObjectLiteral(ObjectLiteral node, SubscriptContext context) { context.expression(node); return null; } @Override protected Void visitCast(Cast node, SubscriptContext context) { context.expression(node); return null; } @Override protected Void visitTryCast(TryCast node, SubscriptContext context) { context.expression(node); return null; } @Override protected Void visitFunctionCall(FunctionCall node, SubscriptContext context) { context.expression(node); return null; } @Override protected Void visitSubqueryExpression(SubqueryExpression node, SubscriptContext context) { context.expression(node); return null; } @Override protected Void visitExpression(Expression node, SubscriptContext context) {<FILL_FUNCTION_BODY>} }
throw new UnsupportedOperationException(String.format(Locale.ENGLISH, "An expression of type %s cannot have an index accessor ([])", node.getClass().getSimpleName()));
434
52
486
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/TableIdentsExtractor.java
TableIdentsExtractor
extract
class TableIdentsExtractor { private static final TableIdentRelationVisitor RELATION_TABLE_IDENT_EXTRACTOR = new TableIdentRelationVisitor(); private static final TableIdentSymbolVisitor SYMBOL_TABLE_IDENT_EXTRACTOR = new TableIdentSymbolVisitor(); /** * Extracts all table idents from all given symbols if possible (some symbols don't provide any table ident info) */ public static Collection<RelationName> extract(Iterable<? extends Symbol> symbols) { Collection<RelationName> relationNames = new HashSet<>(); for (Symbol symbol : symbols) { symbol.accept(SYMBOL_TABLE_IDENT_EXTRACTOR, relationNames); } return relationNames; } /** * Extracts all table idents from the given symbol if possible (some symbols don't provide any table ident info) */ public static Iterable<RelationName> extract(Symbol symbol) {<FILL_FUNCTION_BODY>} private static class TableIdentSymbolVisitor extends DefaultTraversalSymbolVisitor<Collection<RelationName>, Void> { @Override protected Void visitSymbol(Symbol symbol, Collection<RelationName> context) { throw new IllegalStateException(String.format(Locale.ENGLISH, "Symbol '%s' not supported", Symbol.class.getName())); } @Override public Void visitAggregation(Aggregation symbol, Collection<RelationName> context) { context.addAll(extract(symbol.inputs())); return null; } @Override public Void visitReference(Reference symbol, Collection<RelationName> context) { context.add(symbol.ident().tableIdent()); return null; } @Override public Void visitDynamicReference(DynamicReference symbol, Collection<RelationName> context) { return visitReference(symbol, context); } @Override public Void visitFunction(Function symbol, Collection<RelationName> context) { return null; } @Override public Void visitLiteral(Literal<?> symbol, Collection<RelationName> context) { return null; } @Override public Void visitInputColumn(InputColumn inputColumn, Collection<RelationName> context) { return null; } @Override public Void visitField(ScopedSymbol field, Collection<RelationName> context) { context.add(field.relation()); return null; } @Override public Void visitFetchReference(FetchReference fetchReference, Collection<RelationName> context) { return ((Symbol) fetchReference.ref()).accept(this, context); } @Override public Void visitParameterSymbol(ParameterSymbol parameterSymbol, Collection<RelationName> context) { return null; } @Override public Void visitSelectSymbol(SelectSymbol selectSymbol, Collection<RelationName> context) { return selectSymbol.relation().accept(RELATION_TABLE_IDENT_EXTRACTOR, context); } } private static class TableIdentRelationVisitor extends AnalyzedRelationVisitor<Collection<RelationName>, Void> { @Override protected Void visitAnalyzedRelation(AnalyzedRelation relation, Collection<RelationName> context) { throw new IllegalStateException(String.format(Locale.ENGLISH, "AnalyzedRelation '%s' not supported", relation.getClass())); } @Override public Void visitTableRelation(TableRelation tableRelation, Collection<RelationName> context) { context.add(tableRelation.tableInfo().ident()); return null; } @Override public Void visitDocTableRelation(DocTableRelation relation, Collection<RelationName> context) { context.add(relation.tableInfo().ident()); return null; } @Override public Void visitTableFunctionRelation(TableFunctionRelation tableFunctionRelation, Collection<RelationName> context) { return null; } @Override public Void visitQueriedSelectRelation(QueriedSelectRelation relation, Collection<RelationName> context) { for (AnalyzedRelation analyzedRelation : relation.from()) { analyzedRelation.accept(this, context); } return null; } } }
Set<RelationName> relationNames = new HashSet<>(); symbol.accept(SYMBOL_TABLE_IDENT_EXTRACTOR, relationNames); return relationNames;
1,107
47
1,154
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/TableProperties.java
TableProperties
setDefaults
class TableProperties { private static final String INVALID_MESSAGE = "Invalid property \"%s\" passed to [ALTER | CREATE] TABLE statement"; private TableProperties() { } public static void analyze(TableParameter tableParameter, TableParameters tableParameters, GenericProperties<Object> properties, boolean withDefaults) { Map<String, Setting<?>> settingMap = tableParameters.supportedSettings(); Map<String, Setting<?>> mappingsMap = tableParameters.supportedMappings(); settingsFromProperties( tableParameter.settingsBuilder(), properties, settingMap, withDefaults, mappingsMap::containsKey, INVALID_MESSAGE); settingsFromProperties( tableParameter.mappingsBuilder(), properties, mappingsMap, withDefaults, settingMap::containsKey, INVALID_MESSAGE); } private static void settingsFromProperties(Settings.Builder builder, GenericProperties<Object> properties, Map<String, Setting<?>> supportedSettings, boolean setDefaults, Predicate<String> ignoreProperty, String invalidMessage) { if (setDefaults) { setDefaults(builder, supportedSettings); } for (Map.Entry<String, Object> entry : properties.properties().entrySet()) { String settingName = entry.getKey(); if (ignoreProperty.test(settingName)) { continue; } String groupName = getPossibleGroup(settingName); if (groupName != null && ignoreProperty.test(groupName)) { continue; } SettingHolder settingHolder = getSupportedSetting(supportedSettings, settingName); if (settingHolder == null) { throw new IllegalArgumentException(String.format(Locale.ENGLISH, invalidMessage, entry.getKey())); } Object value = entry.getValue(); if (value == null) { throw new IllegalArgumentException( String.format( Locale.ENGLISH, "Cannot set NULL to property %s.", entry.getKey() ) ); } settingHolder.apply(builder, entry.getValue()); } } /** * Processes the property names which should be reset and updates the settings or mappings with the related * default value. */ public static void analyzeResetProperties(TableParameter tableParameter, TableParameters tableParameters, List<String> properties) { Map<String, Setting<?>> settingMap = tableParameters.supportedSettings(); Map<String, Setting<?>> mappingsMap = tableParameters.supportedMappings(); resetSettingsFromProperties( tableParameter.settingsBuilder(), properties, settingMap, mappingsMap::containsKey, INVALID_MESSAGE); resetSettingsFromProperties( tableParameter.mappingsBuilder(), properties, mappingsMap, settingMap::containsKey, INVALID_MESSAGE); } private static void resetSettingsFromProperties(Settings.Builder builder, List<String> properties, Map<String, Setting<?>> supportedSettings, Predicate<String> ignoreProperty, String invalidMessage) { for (String name : properties) { if (ignoreProperty.test(name)) { continue; } String groupName = getPossibleGroup(name); if (groupName != null && ignoreProperty.test(groupName)) { continue; } SettingHolder settingHolder = getSupportedSetting(supportedSettings, name); if (settingHolder == null) { throw new IllegalArgumentException(String.format(Locale.ENGLISH, invalidMessage, name)); } settingHolder.reset(builder); } } private static void setDefaults(Settings.Builder builder, Map<String, Setting<?>> supportedSettings) {<FILL_FUNCTION_BODY>} @Nullable private static SettingHolder getSupportedSetting(Map<String, Setting<?>> supportedSettings, String settingName) { Setting<?> setting = supportedSettings.get(settingName); if (setting == null) { String groupKey = getPossibleGroup(settingName); if (groupKey != null) { setting = supportedSettings.get(groupKey); if (setting instanceof Setting.AffixSetting<?> affixSetting) { setting = affixSetting.getConcreteSetting(IndexMetadata.INDEX_SETTING_PREFIX + settingName); return new SettingHolder(setting, true); } } } if (setting != null) { return new SettingHolder(setting); } return null; } @Nullable private static String getPossibleGroup(String key) { int idx = key.lastIndexOf('.'); if (idx > 0) { return key.substring(0, idx); } return null; } private static class SettingHolder { private final Setting<?> setting; private final boolean isAffixSetting; private final boolean isChildOfAffixSetting; SettingHolder(Setting<?> setting) { this(setting, false); } SettingHolder(Setting<?> setting, boolean isChildOfAffixSetting) { this.setting = setting; this.isAffixSetting = setting instanceof Setting.AffixSetting; this.isChildOfAffixSetting = isChildOfAffixSetting; } void apply(Settings.Builder builder, Object valueSymbol) { if (isAffixSetting) { // only concrete affix settings are supported otherwise it's not possible to parse values throw new IllegalArgumentException( "Cannot change a dynamic group setting, only concrete settings allowed."); } Settings.Builder singleSettingBuilder = Settings.builder().put(builder.build()); singleSettingBuilder.putStringOrList(setting.getKey(), valueSymbol); Object value = setting.get(singleSettingBuilder.build()); if (value instanceof Settings settings) { builder.put(settings); } else { builder.put(setting.getKey(), value.toString()); } } void reset(Settings.Builder builder) { if (isAffixSetting) { // only concrete affix settings are supported, ES does not support to reset a whole Affix setting group throw new IllegalArgumentException( "Cannot change a dynamic group setting, only concrete settings allowed."); } Object value = setting.getDefault(Settings.EMPTY); if (isChildOfAffixSetting) { // affix settings should be removed on reset, they don't have a default value builder.putNull(setting.getKey()); } else if (value instanceof Settings settings) { builder.put(settings); } else { builder.put(setting.getKey(), value.toString()); } } } }
for (Map.Entry<String, Setting<?>> entry : supportedSettings.entrySet()) { Setting<?> setting = entry.getValue(); // We'd set the "wrong" default for settings that base their default on other settings if (TableParameters.SETTINGS_NOT_INCLUDED_IN_DEFAULT.contains(setting)) { continue; } if (setting instanceof AffixSetting) { continue; } Object value = setting.getDefault(Settings.EMPTY); if (value instanceof Settings settings) { builder.put(settings); } else { builder.put(setting.getKey(), value.toString()); } }
1,733
172
1,905
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/WindowFrameDefinition.java
WindowFrameDefinition
equals
class WindowFrameDefinition implements Writeable { private final Mode mode; private final FrameBoundDefinition start; private final FrameBoundDefinition end; public WindowFrameDefinition(StreamInput in) throws IOException { mode = in.readEnum(Mode.class); start = new FrameBoundDefinition(in); end = in.readOptionalWriteable(FrameBoundDefinition::new); } public WindowFrameDefinition(Mode mode, FrameBoundDefinition start, @Nullable FrameBoundDefinition end) { this.mode = mode; this.start = start; if (end != null) { this.end = end; } else { this.end = new FrameBoundDefinition(FrameBound.Type.CURRENT_ROW, Literal.NULL); } } public Mode mode() { return mode; } public FrameBoundDefinition start() { return start; } public FrameBoundDefinition end() { return end; } public WindowFrameDefinition map(Function<? super Symbol, ? extends Symbol> mapper) { FrameBoundDefinition newStart = start.map(mapper); FrameBoundDefinition newEnd = end.map(mapper); if (newStart == start && newEnd == end) { return this; } else { return new WindowFrameDefinition(mode, newStart, newEnd); } } @Override public void writeTo(StreamOutput out) throws IOException { out.writeEnum(mode); start.writeTo(out); out.writeOptionalWriteable(end); } @Override public boolean equals(Object o) {<FILL_FUNCTION_BODY>} @Override public int hashCode() { return Objects.hash(mode, start, end); } @Override public String toString() { return "WindowFrame{" + "type=" + mode + ", start=" + start + ", end=" + end + '}'; } }
if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; WindowFrameDefinition that = (WindowFrameDefinition) o; return mode == that.mode && Objects.equals(start, that.start) && Objects.equals(end, that.end);
507
85
592
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/expressions/SubqueryAnalyzer.java
SubqueryAnalyzer
analyze
class SubqueryAnalyzer { private final RelationAnalyzer relationAnalyzer; private final StatementAnalysisContext statementAnalysisContext; public SubqueryAnalyzer(RelationAnalyzer relationAnalyzer, StatementAnalysisContext statementAnalysisContext) { this.relationAnalyzer = relationAnalyzer; this.statementAnalysisContext = statementAnalysisContext; } public AnalyzedRelation analyze(Query query) {<FILL_FUNCTION_BODY>} }
// The only non-queried relations are base tables - which cannot occur as part of a subquery. so this cast is safe. return relationAnalyzer.analyze(query, statementAnalysisContext);
113
49
162
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/relations/AbstractTableRelation.java
AbstractTableRelation
equals
class AbstractTableRelation<T extends TableInfo> implements AnalyzedRelation, FieldResolver { protected final T tableInfo; private final List<Symbol> outputs; private final List<Symbol> hiddenOutputs; public AbstractTableRelation(T tableInfo, List<Symbol> outputs, List<Symbol> hiddenOutputs) { this.tableInfo = tableInfo; this.outputs = outputs; this.hiddenOutputs = hiddenOutputs; } public T tableInfo() { return tableInfo; } @NotNull @Override public List<Symbol> outputs() { return outputs; } @Override public List<Symbol> hiddenOutputs() { return hiddenOutputs; } @Nullable public Reference getField(ColumnIdent path) { return tableInfo.getReadReference(path); } @Override public RelationName relationName() { return tableInfo.ident(); } @Override public String toString() { return getClass().getSimpleName() + '{' + this.tableInfo.ident() + '}'; } @Override public boolean equals(Object o) {<FILL_FUNCTION_BODY>} @Override public int hashCode() { return tableInfo.hashCode(); } @Override @Nullable public Reference resolveField(ScopedSymbol field) { if (field.relation().equals(tableInfo.ident())) { return getField(field.column()); } return null; } }
if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; AbstractTableRelation<?> that = (AbstractTableRelation<?>) o; return tableInfo.equals(that.tableInfo);
398
72
470
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/relations/AliasedAnalyzedRelation.java
AliasedAnalyzedRelation
resolveField
class AliasedAnalyzedRelation implements AnalyzedRelation, FieldResolver { private final AnalyzedRelation relation; private final RelationName alias; private final Map<ColumnIdent, ColumnIdent> aliasToColumnMapping; private final ArrayList<Symbol> outputs; private final ArrayList<ScopedSymbol> scopedSymbols; public AliasedAnalyzedRelation(AnalyzedRelation relation, RelationName alias) { this(relation, alias, List.of()); } AliasedAnalyzedRelation(AnalyzedRelation relation, RelationName alias, List<String> columnAliases) { this.relation = relation; this.alias = alias; aliasToColumnMapping = new HashMap<>(columnAliases.size()); this.outputs = new ArrayList<>(relation.outputs().size()); this.scopedSymbols = new ArrayList<>(relation.outputs().size()); for (int i = 0; i < relation.outputs().size(); i++) { Symbol childOutput = relation.outputs().get(i); ColumnIdent childColumn = Symbols.pathFromSymbol(childOutput); ColumnIdent columnAlias = childColumn; if (i < columnAliases.size()) { columnAlias = new ColumnIdent(columnAliases.get(i)); } aliasToColumnMapping.put(columnAlias, childColumn); var scopedSymbol = new ScopedSymbol(alias, columnAlias, childOutput.valueType()); outputs.add(scopedSymbol); scopedSymbols.add(scopedSymbol); } for (int i = 0; i < relation.hiddenOutputs().size(); i++) { Symbol childOutput = relation.hiddenOutputs().get(i); ColumnIdent childColumn = Symbols.pathFromSymbol(childOutput); ColumnIdent columnAlias = childColumn; if (i + relation.outputs().size() < columnAliases.size()) { columnAlias = new ColumnIdent(columnAliases.get(i)); } aliasToColumnMapping.putIfAbsent(columnAlias, childColumn); } } @Override public Symbol getField(ColumnIdent column, Operation operation, boolean errorOnUnknownObjectKey) throws AmbiguousColumnException, ColumnUnknownException, UnsupportedOperationException { if (operation != Operation.READ) { throw new UnsupportedOperationException(operation + " is not supported on " + alias); } ColumnIdent childColumnName = aliasToColumnMapping.get(column); if (childColumnName == null) { if (column.isRoot()) { return null; } childColumnName = aliasToColumnMapping.get(column.getRoot()); if (childColumnName == null) { // The column ident maybe a quoted subscript which points to an alias of a sub relation. // Aliases are always strings but due to the support for quoted subscript expressions, // the select column ident may already be expanded to a subscript. var maybeQuotedSubscriptColumnAlias = new ColumnIdent(column.sqlFqn()); childColumnName = aliasToColumnMapping.get(maybeQuotedSubscriptColumnAlias); if (childColumnName == null) { return null; } column = maybeQuotedSubscriptColumnAlias; } else { childColumnName = new ColumnIdent(childColumnName.name(), column.path()); } } Symbol field = relation.getField(childColumnName, operation, errorOnUnknownObjectKey); if (field == null) { return null; } if (field instanceof VoidReference voidReference) { return new VoidReference( new ReferenceIdent(alias, voidReference.column()), voidReference.position()); } ScopedSymbol scopedSymbol = new ScopedSymbol(alias, column, field.valueType()); // If the scopedSymbol exists already, return that instance. // Otherwise (e.g. lazy-loaded subscript expression) it must be stored to comply with // IdentityHashMap constraints. int i = scopedSymbols.indexOf(scopedSymbol); if (i >= 0) { return scopedSymbols.get(i); } scopedSymbols.add(scopedSymbol); return scopedSymbol; } public AnalyzedRelation relation() { return relation; } @Override public RelationName relationName() { return alias; } @NotNull @Override public List<Symbol> outputs() { return outputs; } @Override public String toString() { return relation + " AS " + alias; } @Override public <C, R> R accept(AnalyzedRelationVisitor<C, R> visitor, C context) { return visitor.visitAliasedAnalyzedRelation(this, context); } @Nullable @Override public Symbol resolveField(ScopedSymbol field) {<FILL_FUNCTION_BODY>} }
if (!field.relation().equals(alias)) { throw new IllegalArgumentException(field + " does not belong to " + relationName()); } ColumnIdent column = field.column(); ColumnIdent childColumnName = aliasToColumnMapping.get(column); if (childColumnName == null && !column.isRoot()) { var childCol = aliasToColumnMapping.get(column.getRoot()); childColumnName = new ColumnIdent(childCol.name(), column.path()); } assert childColumnName != null : "If a ScopedSymbol has been retrieved via `getField`, it must be possible to get the columnIdent"; var result = relation.getField(childColumnName, Operation.READ); if (result == null) { throw new IllegalArgumentException(field + " does not belong to " + relationName()); } return result;
1,237
214
1,451
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/relations/DocTableRelation.java
DocTableRelation
ensureColumnCanBeUpdated
class DocTableRelation extends AbstractTableRelation<DocTableInfo> { public DocTableRelation(DocTableInfo tableInfo) { // System columns are excluded from `tableInfo.columns()` by default, // but parent relations need to be able to see them so that they're selectable. // E.g. in `select a._id from tbl as a` super( tableInfo, List.copyOf(tableInfo.columns()), concat(DocSysColumns.forTable(tableInfo.ident()), tableInfo.indexColumns()) ); } @Override public <C, R> R accept(AnalyzedRelationVisitor<C, R> visitor, C context) { return visitor.visitDocTableRelation(this, context); } @Nullable @Override public Reference getField(ColumnIdent path) { return getField(path, Operation.READ, true); } @Override public Reference getField(ColumnIdent column, Operation operation, boolean errorOnUnknownObjectKey) throws AmbiguousColumnException, ColumnUnknownException, UnsupportedOperationException { if (operation == Operation.UPDATE) { ensureColumnCanBeUpdated(column); } Reference reference = tableInfo.getReadReference(column); if (reference == null) { reference = tableInfo.indexColumn(column); if (reference == null) { reference = tableInfo.getDynamic(column, operation == Operation.INSERT || operation == Operation.UPDATE, errorOnUnknownObjectKey); } } return reference; } /** * @throws io.crate.exceptions.ColumnValidationException if the column cannot be updated */ @VisibleForTesting void ensureColumnCanBeUpdated(ColumnIdent ci) {<FILL_FUNCTION_BODY>} private void ensureNotUpdated(ColumnIdent columnUpdated, ColumnIdent protectedColumnIdent, String errorMessage) { if (columnUpdated.equals(protectedColumnIdent) || protectedColumnIdent.isChildOf(columnUpdated)) { throw new ColumnValidationException(columnUpdated.toString(), tableInfo.ident(), errorMessage); } } }
if (ci.isSystemColumn()) { throw new ColumnValidationException(ci.toString(), tableInfo.ident(), "Updating a system column is not supported"); } for (ColumnIdent pkIdent : tableInfo.primaryKey()) { ensureNotUpdated(ci, pkIdent, "Updating a primary key is not supported"); } if (tableInfo.clusteredBy() != null) { ensureNotUpdated(ci, tableInfo.clusteredBy(), "Updating a clustered-by column is not supported"); } List<GeneratedReference> generatedReferences = tableInfo.generatedColumns(); for (var partitionRef : tableInfo.partitionedByColumns()) { ensureNotUpdated(ci, partitionRef.column(), "Updating a partitioned-by column is not supported"); if (!(partitionRef instanceof GeneratedReference)) { continue; } int idx = generatedReferences.indexOf(partitionRef); if (idx >= 0) { GeneratedReference generatedReference = generatedReferences.get(idx); for (var reference : generatedReference.referencedReferences()) { ensureNotUpdated(ci, reference.column(), "Updating a column which is referenced in a partitioned by generated column expression is not supported"); } } }
536
322
858
<methods>public void <init>(io.crate.metadata.doc.DocTableInfo, List<io.crate.expression.symbol.Symbol>, List<io.crate.expression.symbol.Symbol>) ,public boolean equals(java.lang.Object) ,public io.crate.metadata.Reference getField(io.crate.metadata.ColumnIdent) ,public int hashCode() ,public List<io.crate.expression.symbol.Symbol> hiddenOutputs() ,public List<io.crate.expression.symbol.Symbol> outputs() ,public io.crate.metadata.RelationName relationName() ,public io.crate.metadata.Reference resolveField(io.crate.expression.symbol.ScopedSymbol) ,public io.crate.metadata.doc.DocTableInfo tableInfo() ,public java.lang.String toString() <variables>private final non-sealed List<io.crate.expression.symbol.Symbol> hiddenOutputs,private final non-sealed List<io.crate.expression.symbol.Symbol> outputs,protected final non-sealed io.crate.metadata.doc.DocTableInfo tableInfo
crate_crate
crate/server/src/main/java/io/crate/analyze/relations/JoinPair.java
JoinPair
equals
class JoinPair { private final JoinType joinType; private final RelationName left; private final RelationName right; @Nullable private final Symbol condition; public static JoinPair of(RelationName left, RelationName right, JoinType joinType, Symbol condition) { assert condition != null || joinType == JoinType.CROSS : "condition must be present unless it's a cross-join"; return new JoinPair(left, right, joinType, condition); } private JoinPair(RelationName left, RelationName right, JoinType joinType, @Nullable Symbol condition) { this.left = left; this.right = right; this.joinType = joinType; this.condition = condition; } public RelationName left() { return left; } public RelationName right() { return right; } public JoinType joinType() { return joinType; } @Nullable public Symbol condition() { return condition; } @Override public String toString() { return "Join{" + joinType + " " + left + " ⇔ " + right + '}'; } @Override public boolean equals(Object o) {<FILL_FUNCTION_BODY>} @Override public int hashCode() { return Objects.hash(joinType, left, right, condition); } }
if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } JoinPair joinPair = (JoinPair) o; return joinType == joinPair.joinType && Objects.equals(left, joinPair.left) && Objects.equals(right, joinPair.right) && Objects.equals(condition, joinPair.condition);
370
116
486
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/relations/NameFieldProvider.java
NameFieldProvider
resolveField
class NameFieldProvider implements FieldProvider<Symbol> { private final AnalyzedRelation relation; public NameFieldProvider(AnalyzedRelation relation) { this.relation = relation; } @Override public Symbol resolveField(QualifiedName qualifiedName, @Nullable List<String> path, Operation operation, boolean errorOnUnknownObjectKey) {<FILL_FUNCTION_BODY>} }
List<String> parts = qualifiedName.getParts(); ColumnIdent columnIdent = new ColumnIdent(parts.get(parts.size() - 1), path); if (parts.size() != 1) { throw new IllegalArgumentException(String.format(Locale.ENGLISH, "Column reference \"%s\" has too many parts. " + "A column must not have a schema or a table here.", qualifiedName)); } Symbol field = relation.getField(columnIdent, operation, errorOnUnknownObjectKey); if (field == null) { throw new ColumnUnknownException(columnIdent, relation.relationName()); } return field;
107
166
273
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/relations/TableRelation.java
TableRelation
getField
class TableRelation extends AbstractTableRelation<TableInfo> { public TableRelation(TableInfo tableInfo) { super(tableInfo, List.copyOf(tableInfo.columns()), List.of()); } @Override public <C, R> R accept(AnalyzedRelationVisitor<C, R> visitor, C context) { return visitor.visitTableRelation(this, context); } @Override public Reference getField(ColumnIdent column, Operation operation, boolean errorOnUnknownObjectKey) throws AmbiguousColumnException, ColumnUnknownException, UnsupportedOperationException {<FILL_FUNCTION_BODY>} }
switch (operation) { case READ: case UPDATE: return getField(column); case INSERT: case DELETE: throw new UnsupportedOperationException("getField is only supported for read or update operations on TableRelation"); default: throw new AssertionError("new unhandled operation"); }
160
84
244
<methods>public void <init>(io.crate.metadata.table.TableInfo, List<io.crate.expression.symbol.Symbol>, List<io.crate.expression.symbol.Symbol>) ,public boolean equals(java.lang.Object) ,public io.crate.metadata.Reference getField(io.crate.metadata.ColumnIdent) ,public int hashCode() ,public List<io.crate.expression.symbol.Symbol> hiddenOutputs() ,public List<io.crate.expression.symbol.Symbol> outputs() ,public io.crate.metadata.RelationName relationName() ,public io.crate.metadata.Reference resolveField(io.crate.expression.symbol.ScopedSymbol) ,public io.crate.metadata.table.TableInfo tableInfo() ,public java.lang.String toString() <variables>private final non-sealed List<io.crate.expression.symbol.Symbol> hiddenOutputs,private final non-sealed List<io.crate.expression.symbol.Symbol> outputs,protected final non-sealed io.crate.metadata.table.TableInfo tableInfo
crate_crate
crate/server/src/main/java/io/crate/analyze/relations/UnionSelect.java
UnionSelect
getField
class UnionSelect implements AnalyzedRelation { private final AnalyzedRelation left; private final AnalyzedRelation right; private final List<ScopedSymbol> outputs; private final RelationName name; private final boolean isDistinct; public UnionSelect(AnalyzedRelation left, AnalyzedRelation right, boolean isDistinct) { if (left.outputs().size() != right.outputs().size()) { throw new UnsupportedOperationException("Number of output columns must be the same for all parts of a UNION"); } this.left = left; this.right = right; this.name = new RelationName(null, UUIDs.dirtyUUID().toString()); // SQL semantics dictate that UNION uses the column names from the first relation (top or left side) var leftOutputs = left.outputs(); var rightOutputs = right.outputs(); ArrayList<ScopedSymbol> outputs = new ArrayList<>(leftOutputs.size()); for (int i = 0; i < leftOutputs.size(); i++) { var l = leftOutputs.get(i); var r = rightOutputs.get(i); try { outputs.add(new ScopedSymbol(name, Symbols.pathFromSymbol(l), merge(l.valueType(), r.valueType()))); } catch (IllegalArgumentException e) { throw new UnsupportedOperationException( "Output columns at position " + (i + 1) + " must be compatible for all parts of a UNION. " + "Got `" + l.valueType().getName() + "` and `" + r.valueType().getName() + "`"); } } this.outputs = List.copyOf(outputs); this.isDistinct = isDistinct; } public AnalyzedRelation left() { return left; } public AnalyzedRelation right() { return right; } @Override public <C, R> R accept(AnalyzedRelationVisitor<C, R> visitor, C context) { return visitor.visitUnionSelect(this, context); } @Override public Symbol getField(ColumnIdent column, Operation operation, boolean errorOnUnknownObjectKey) throws AmbiguousColumnException, ColumnUnknownException, UnsupportedOperationException {<FILL_FUNCTION_BODY>} @Override public RelationName relationName() { return name; } @SuppressWarnings({"rawtypes", "unchecked"}) @NotNull @Override public List<Symbol> outputs() { return (List<Symbol>)(List) outputs; } @Override public String toString() { return left + " UNION " + (isDistinct ? "DISTINCT " : "ALL ") + right; } public boolean isDistinct() { return isDistinct; } }
Symbol field = null; for (var output : outputs) { if (output.column().equals(column)) { if (field != null) { throw new AmbiguousColumnException(output.column(), output); } field = output; } } return field;
733
79
812
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/relations/select/SelectAnalysis.java
SelectAnalysis
add
class SelectAnalysis { private final Map<RelationName, AnalyzedRelation> sources; private final ExpressionAnalyzer expressionAnalyzer; private final ExpressionAnalysisContext expressionAnalysisContext; private final List<Symbol> outputSymbols; private final Map<String, Set<Symbol>> outputMap; public SelectAnalysis(int expectedItems, Map<RelationName, AnalyzedRelation> sources, ExpressionAnalyzer expressionAnalyzer, ExpressionAnalysisContext expressionAnalysisContext) { this.sources = sources; this.expressionAnalyzer = expressionAnalyzer; this.expressionAnalysisContext = expressionAnalysisContext; outputMap = new HashMap<>(); outputSymbols = new ArrayList<>(expectedItems); } public List<Symbol> outputSymbols() { return outputSymbols; } Symbol toSymbol(Expression expression) { return expressionAnalyzer.convert(expression, expressionAnalysisContext); } public Map<RelationName, AnalyzedRelation> sources() { return sources; } /** * multiMap containing outputNames() as keys and outputSymbols() as values. * Can be used to resolve expressions in ORDER BY or GROUP BY where it is important to know * if a outputName is unique */ public Map<String, Set<Symbol>> outputMultiMap() { return outputMap; } public void add(ColumnIdent path, Symbol symbol) {<FILL_FUNCTION_BODY>} }
outputSymbols.add(symbol); var symbols = outputMap.get(path.sqlFqn()); if (symbols == null) { symbols = new HashSet<>(); } symbols.add(symbol); outputMap.put(path.sqlFqn(), symbols);
367
76
443
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/repositories/RepositoryParamValidator.java
RepositoryParamValidator
validate
class RepositoryParamValidator { private final Map<String, TypeSettings> typeSettings; @Inject public RepositoryParamValidator(Map<String, TypeSettings> repositoryTypeSettings) { typeSettings = repositoryTypeSettings; } public void validate(String type, GenericProperties<?> genericProperties, Settings settings) {<FILL_FUNCTION_BODY>} public TypeSettings settingsForType(String type) { TypeSettings typeSettings = this.typeSettings.get(type); if (typeSettings == null) { throw new IllegalArgumentException(String.format(Locale.ENGLISH, "Invalid repository type \"%s\"", type)); } return typeSettings; } }
TypeSettings typeSettings = settingsForType(type); Map<String, Setting<?>> allSettings = typeSettings.all(); // create string settings for all dynamic settings GenericProperties<?> dynamicProperties = typeSettings.dynamicProperties(genericProperties); if (!dynamicProperties.isEmpty()) { // allSettings are immutable by default, copy map allSettings = new HashMap<>(allSettings); for (String key : dynamicProperties.properties().keySet()) { allSettings.put(key, Setting.simpleString(key)); } } // validate all settings Set<String> names = settings.keySet(); Set<String> missingRequiredSettings = Sets.difference(typeSettings.required().keySet(), names); if (!missingRequiredSettings.isEmpty()) { throw new IllegalArgumentException( String.format( Locale.ENGLISH, "The following required parameters are missing to create a repository of type \"%s\": [%s]", type, String.join(", ", missingRequiredSettings)) ); }
178
265
443
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/validator/GroupBySymbolValidator.java
InnerValidator
visitFunction
class InnerValidator extends SymbolVisitor<Boolean, Void> { @Override public Void visitFunction(Function function, Boolean insideScalar) {<FILL_FUNCTION_BODY>} @Override public Void visitWindowFunction(WindowFunction symbol, Boolean insideScalar) { throw new IllegalArgumentException("Window functions are not allowed in GROUP BY"); } @Override public Void visitMatchPredicate(MatchPredicate matchPredicate, Boolean insideScalar) { throw new UnsupportedOperationException(String.format(Locale.ENGLISH, "%s predicate cannot be used in a GROUP BY clause", io.crate.expression.predicate.MatchPredicate.NAME)); } @Override protected Void visitSymbol(Symbol symbol, Boolean insideScalar) { return null; } @Override public Void visitAlias(AliasSymbol aliasSymbol, Boolean insideScalar) { return aliasSymbol.symbol().accept(this, insideScalar); } }
switch (function.signature().getKind()) { case SCALAR: for (Symbol argument : function.arguments()) { argument.accept(this, true); } break; case AGGREGATE: throw new IllegalArgumentException("Aggregate functions are not allowed in GROUP BY"); case TABLE: if (insideScalar == false) { throw new IllegalArgumentException("Table functions are not allowed in GROUP BY"); } break; default: throw new UnsupportedOperationException( String.format(Locale.ENGLISH, "FunctionInfo.Type %s not handled", function.signature().getKind())); } return null;
252
167
419
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/validator/SelectSymbolValidator.java
InnerValidator
visitFunction
class InnerValidator extends SymbolVisitor<Void, Void> { @Override public Void visitFunction(Function symbol, Void context) {<FILL_FUNCTION_BODY>} @Override public Void visitMatchPredicate(MatchPredicate matchPredicate, Void context) { throw new UnsupportedOperationException("match predicate cannot be selected"); } @Override public Void visitSymbol(Symbol symbol, Void context) { return null; } }
switch (symbol.signature().getKind()) { case SCALAR: case AGGREGATE: case TABLE: break; default: throw new UnsupportedOperationException(String.format(Locale.ENGLISH, "FunctionInfo.Type %s not handled", symbol.signature().getKind())); } for (Symbol arg : symbol.arguments()) { arg.accept(this, context); } return null;
127
115
242
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/analyze/where/DocKeys.java
DocKey
primaryTerm
class DocKey { private final List<Symbol> key; private DocKey(int pos) { key = docKeys.get(pos); } public String getId(TransactionContext txnCtx, NodeContext nodeCtx, Row params, SubQueryResults subQueryResults) { return idFunction.apply( Lists.mapLazy( key.subList(0, width), s -> StringUtils.nullOrString(SymbolEvaluator.evaluate(txnCtx, nodeCtx, s, params, subQueryResults)) ) ); } public Optional<Long> version(TransactionContext txnCtx, NodeContext nodeCtx, Row params, SubQueryResults subQueryResults) { if (withVersions && key.get(width) != null) { Object val = SymbolEvaluator.evaluate(txnCtx, nodeCtx, key.get(width), params, subQueryResults); return Optional.of(DataTypes.LONG.sanitizeValue(val)); } return Optional.empty(); } public Optional<Long> sequenceNo(TransactionContext txnCtx, NodeContext nodeCtx, Row params, SubQueryResults subQueryResults) { if (withSequenceVersioning && key.get(width) != null) { Object val = SymbolEvaluator.evaluate(txnCtx, nodeCtx, key.get(width), params, subQueryResults); return Optional.of(LongType.INSTANCE.sanitizeValue(val)); } return Optional.empty(); } public Optional<Long> primaryTerm(TransactionContext txnCtx, NodeContext nodeCtx, Row params, SubQueryResults subQueryResults) {<FILL_FUNCTION_BODY>} public List<Symbol> values() { return key; } public List<String> getPartitionValues(TransactionContext txnCtx, NodeContext nodeCtx, Row params, SubQueryResults subQueryResults) { if (partitionIdx == null || partitionIdx.isEmpty()) { return Collections.emptyList(); } return Lists.map( partitionIdx, pIdx -> DataTypes.STRING.implicitCast(SymbolEvaluator.evaluate(txnCtx, nodeCtx, key.get(pIdx), params, subQueryResults))); } public String getRouting(TransactionContext txnCtx, NodeContext nodeCtx, Row params, SubQueryResults subQueryResults) { if (clusteredByIdx >= 0) { return SymbolEvaluator.evaluate(txnCtx, nodeCtx, key.get(clusteredByIdx), params, subQueryResults).toString(); } return getId(txnCtx, nodeCtx, params, subQueryResults); } }
if (withSequenceVersioning && key.get(width + 1) != null) { Object val = SymbolEvaluator.evaluate(txnCtx, nodeCtx, key.get(width + 1), params, subQueryResults); return Optional.of(LongType.INSTANCE.sanitizeValue(val)); } return Optional.empty();
701
92
793
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/auth/AuthSettings.java
AuthSettings
resolveClientAuth
class AuthSettings { private AuthSettings() { } public static final Setting<Boolean> AUTH_HOST_BASED_ENABLED_SETTING = Setting.boolSetting( "auth.host_based.enabled", false, Setting.Property.NodeScope ); public static final Setting<Settings> AUTH_HOST_BASED_CONFIG_SETTING = Setting.groupSetting( "auth.host_based.config.", Setting.Property.NodeScope ); // Explicit generic is required for eclipse JDT, otherwise it won't compile public static final Setting<String> AUTH_TRUST_HTTP_DEFAULT_HEADER = new Setting<String>( "auth.trust.http_default_user", "crate", Function.identity(), DataTypes.STRING, Setting.Property.NodeScope ); public static final String HTTP_HEADER_REAL_IP = "X-Real-Ip"; public static final Setting<Boolean> AUTH_TRUST_HTTP_SUPPORT_X_REAL_IP = Setting.boolSetting( "auth.trust.http_support_x_real_ip", false, Setting.Property.NodeScope ); public static ClientAuth resolveClientAuth(Settings settings, Protocol protocol) {<FILL_FUNCTION_BODY>} }
Settings hbaSettings = AUTH_HOST_BASED_CONFIG_SETTING.get(settings); int numMethods = 0; int numCertMethods = 0; for (var entry : hbaSettings.getAsGroups().entrySet()) { Settings entrySettings = entry.getValue(); String protocolEntry = entrySettings.get("protocol"); if (protocolEntry != null && !protocol.name().equalsIgnoreCase(protocolEntry)) { // We need null check for protocolEntry since we want entry without protocol be matched with any protocol // Without it !equalsIgnoreCase returns true and HBA with only 'cert' entries but all without protocol // might end up with NONE while correct value is 'REQUIRED'. continue; } String method = entrySettings.get("method", "trust"); numMethods++; if (method.equals("cert")) { numCertMethods++; } } if (numCertMethods == 0) { return ClientAuth.NONE; } return numCertMethods == numMethods ? ClientAuth.REQUIRE : ClientAuth.OPTIONAL;
338
277
615
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/auth/HostBasedAuthentication.java
Matchers
isValidAddress
class Matchers { // IPv4 127.0.0.1 -> 2130706433 private static final long IPV4_LOCALHOST = inetAddressToInt(InetAddresses.forString("127.0.0.1")); // IPv6 ::1 -> 1 private static final long IPV6_LOCALHOST = inetAddressToInt(InetAddresses.forString("::1")); static boolean isValidUser(Map.Entry<String, Map<String, String>> entry, String user) { String hbaUser = entry.getValue().get(KEY_USER); return hbaUser == null || user.equals(hbaUser); } static boolean isValidAddress(@Nullable String hbaAddressOrHostname, long address, Supplier<String> getHostname, DnsResolver resolver) {<FILL_FUNCTION_BODY>} static boolean isValidAddress(@Nullable String hbaAddressOrHostname, InetAddress address, DnsResolver resolver) { return isValidAddress(hbaAddressOrHostname, inetAddressToInt(address), address::getCanonicalHostName, resolver); } static boolean isValidProtocol(String hbaProtocol, Protocol protocol) { return hbaProtocol == null || hbaProtocol.equals(protocol.toString()); } static boolean isValidConnection(String hbaConnectionMode, ConnectionProperties connectionProperties) { if (hbaConnectionMode == null || hbaConnectionMode.isEmpty()) { return true; } SSL sslMode = SSL.parseValue(hbaConnectionMode); return switch (sslMode) { case OPTIONAL -> true; case NEVER -> !connectionProperties.hasSSL(); case REQUIRED -> connectionProperties.hasSSL(); }; } static long inetAddressToInt(InetAddress address) { long net = 0; for (byte a : address.getAddress()) { net <<= 8; net |= a & 0xFF; } return net; } }
if (hbaAddressOrHostname == null) { // no IP/CIDR --> 0.0.0.0/0 --> match all return true; } if (hbaAddressOrHostname.equals("_local_")) { // special case "_local_" which matches both IPv4 and IPv6 localhost addresses return address == IPV4_LOCALHOST || address == IPV6_LOCALHOST; } int p = hbaAddressOrHostname.indexOf('/'); if (p < 0) { try { if (hbaAddressOrHostname.startsWith(".")) { // not an ip address, subdomain var clientHostName = getHostname.get(); return clientHostName != null ? clientHostName.endsWith(hbaAddressOrHostname) : false; } else { // SystemDefaultDnsResolver is injected here and internally it uses InetAddress.getAllByName // which tries to treat argument as an ip address and then as a hostname. for (var resolvedAddress : resolver.resolve(hbaAddressOrHostname)) { if (inetAddressToInt(resolvedAddress) == address) { return true; } } return false; } } catch (UnknownHostException e) { LOGGER.warn("Cannot resolve hostname {} specified in the HBA configuration.", hbaAddressOrHostname); return false; } } long[] minAndMax = Cidrs.cidrMaskToMinMax(hbaAddressOrHostname); return minAndMax[0] <= address && address < minAndMax[1];
518
408
926
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/auth/PasswordAuthenticationMethod.java
PasswordAuthenticationMethod
authenticate
class PasswordAuthenticationMethod implements AuthenticationMethod { public static final String NAME = "password"; private final Roles roles; PasswordAuthenticationMethod(Roles roles) { this.roles = roles; } @Nullable @Override public Role authenticate(Credentials credentials, ConnectionProperties connProperties) {<FILL_FUNCTION_BODY>} @Override public String name() { return NAME; } }
var username = credentials.username(); var password = credentials.password(); assert username != null : "User name must be not null on password authentication method"; Role user = roles.findUser(username); if (user != null && password != null && !password.isEmpty()) { SecureHash secureHash = user.password(); if (secureHash != null && secureHash.verifyHash(password)) { return user; } } throw new RuntimeException("password authentication failed for user \"" + username + "\"");
117
137
254
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/blob/BlobContainer.java
RecursiveFileIterator
hasNext
class RecursiveFileIterator implements Iterator<File> { private final File[] subDirs; private int subDirIndex = -1; private File[] files = null; private int fileIndex = -1; private RecursiveFileIterator(File[] subDirs) { this.subDirs = subDirs; } /** * Returns {@code true} if the current sub-directory have files to traverse. Otherwise, iterates * until it finds the next sub-directory with files inside it. * Returns {@code false} only after reaching the last file of the last sub-directory. */ @Override public boolean hasNext() {<FILL_FUNCTION_BODY>} @Override public File next() { if (hasNext()) { return files[++fileIndex]; } throw new NoSuchElementException("List of files is empty"); } @Override public void remove() { throw new UnsupportedOperationException("remove is unsupported for " + BlobContainer.class.getSimpleName()); } }
if (files == null || (fileIndex + 1) == files.length) { files = null; fileIndex = -1; while (subDirIndex + 1 < subDirs.length && (files == null || files.length == 0)) { files = subDirs[++subDirIndex].listFiles(); } } return (files != null && fileIndex + 1 < files.length);
275
105
380
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/blob/BlobTransferRequest.java
BlobTransferRequest
toString
class BlobTransferRequest<T extends ReplicationRequest<T>> extends ReplicationRequest<T> implements IPutChunkRequest { private boolean last; private UUID transferId; private BytesReference content; public BytesReference content() { return content; } public boolean isLast() { return last; } public BlobTransferRequest(ShardId shardId, UUID transferId, BytesReference content, boolean last) { super(shardId); this.transferId = transferId; this.content = content; this.last = last; } public BlobTransferRequest() { } public BlobTransferRequest(StreamInput in) throws IOException { super(in); transferId = new UUID(in.readLong(), in.readLong()); content = in.readBytesReference(); last = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeLong(transferId.getMostSignificantBits()); out.writeLong(transferId.getLeastSignificantBits()); out.writeBytesReference(content); out.writeBoolean(last); } public UUID transferId() { return transferId; } @Override public String toString() {<FILL_FUNCTION_BODY>} }
return "BlobTransferRequest{" + "last=" + last + ", transferId=" + transferId + '}';
368
39
407
<methods>public void <init>() ,public void <init>(org.elasticsearch.index.shard.ShardId) ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public java.lang.String getDescription() ,public java.lang.String index() ,public final T index(java.lang.String) ,public java.lang.String[] indices() ,public org.elasticsearch.action.support.IndicesOptions indicesOptions() ,public void onRetry() ,public T setShardId(org.elasticsearch.index.shard.ShardId) ,public org.elasticsearch.index.shard.ShardId shardId() ,public final T timeout(io.crate.common.unit.TimeValue) ,public final T timeout(java.lang.String) ,public io.crate.common.unit.TimeValue timeout() ,public abstract java.lang.String toString() ,public org.elasticsearch.action.support.ActiveShardCount waitForActiveShards() ,public final T waitForActiveShards(org.elasticsearch.action.support.ActiveShardCount) ,public final T waitForActiveShards(int) ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>public static final io.crate.common.unit.TimeValue DEFAULT_TIMEOUT,protected java.lang.String index,private long routedBasedOnClusterVersion,protected org.elasticsearch.index.shard.ShardId shardId,protected io.crate.common.unit.TimeValue timeout,protected org.elasticsearch.action.support.ActiveShardCount waitForActiveShards
crate_crate
crate/server/src/main/java/io/crate/blob/DigestBlob.java
DigestBlob
waitForHead
class DigestBlob implements Closeable { private final String digest; private final BlobContainer container; private final UUID transferId; protected File file; private FileChannel fileChannel; private FileChannel headFileChannel; private int size; private long headLength; private AtomicLong headSize; private MessageDigest md; private long chunks; private CountDownLatch headCatchedUpLatch; private static final Logger LOGGER = LogManager.getLogger(DigestBlob.class); public DigestBlob(BlobContainer container, String digest, UUID transferId) { this.digest = digest; this.container = container; this.size = 0; this.transferId = transferId; } public String getDigest() { return digest; } public int size() { return size; } public File file() { return file; } private static Path getTmpFilePath(BlobContainer blobContainer, String digest, UUID transferId) { return blobContainer.getTmpDirectory().resolve(digest + "." + transferId.toString()); } private File createTmpFile() throws IOException { File tmpFile = getTmpFilePath(container, digest, transferId).toFile(); tmpFile.createNewFile(); return tmpFile; } private void updateDigest(ByteBuffer bbf) throws IOException { if (md == null) { try { md = MessageDigest.getInstance("SHA-1"); } catch (NoSuchAlgorithmException e) { throw new IOException(e); } } md.update(bbf.slice()); } private void addContent(ByteBuf buffer, boolean last) throws IOException { if (buffer != null) { int readableBytes = buffer.readableBytes(); ByteBuffer byteBuffer = buffer.nioBuffer(); if (file == null) { file = createTmpFile(); } if (fileChannel == null) { FileOutputStream outputStream = new FileOutputStream(file); fileChannel = outputStream.getChannel(); } int written = 0; do { if (headLength == 0) { updateDigest(byteBuffer); } written += fileChannel.write(byteBuffer); } while (written < readableBytes); size += readableBytes; buffer.readerIndex(buffer.readerIndex() + written); chunks++; } if (last) { if (file == null) { file = createTmpFile(); } if (fileChannel == null) { FileOutputStream outputStream = new FileOutputStream(file); fileChannel = outputStream.getChannel(); } fileChannel.force(false); fileChannel.close(); fileChannel = null; } else { if (buffer == null) { throw new NullPointerException("buffer"); } } } private void calculateDigest() { assert headSize.get() == headLength : "Head hasn't catched up, can't calculate digest"; try (FileInputStream stream = new FileInputStream(file)) { stream.skipNBytes(headLength); byte[] buffer = new byte[4096]; int bytesRead; while ((bytesRead = stream.read(buffer, 0, 4096)) > 0) { md.update(buffer, 0, bytesRead); } } catch (IOException ex) { LOGGER.error("error accessing file to calculate digest", ex); } } public File commit() throws DigestMismatchException, BlobAlreadyExistsException { if (headLength > 0) { calculateDigest(); } assert md != null : "MessageDigest should not be null"; try { String contentDigest = Hex.encodeHexString(md.digest()); if (!contentDigest.equals(digest)) { file.delete(); throw new DigestMismatchException(digest, contentDigest); } } finally { IOUtils.closeWhileHandlingException(headFileChannel); headFileChannel = null; } File newFile = container.getFile(digest); Semaphore semaphore = container.digestCoordinator(digest); try { semaphore.acquire(); try { if (Files.exists(newFile.toPath())) { throw new BlobAlreadyExistsException(digest); } file.renameTo(newFile); file = null; } finally { // semaphore was acquired successfully, release it semaphore.release(); } } catch (InterruptedException e) { LOGGER.error("Unable to commit blob {}", e, file.getName()); throw new IllegalStateException("Unable to commit blob because exclusive execution could not be achieved"); } return newFile; } public File getContainerFile() { return container.getFile(digest); } public void addContent(BytesReference content, boolean last) { try { addContent(Netty4Utils.toByteBuf(content), last); } catch (IOException e) { throw new BlobWriteException(digest, size, e); } } public void addToHead(BytesReference content) throws IOException { if (content == null) { return; } int written = 0; ByteBuf byteBuf = Netty4Utils.toByteBuf(content); int readableBytes = byteBuf.readableBytes(); assert readableBytes + headSize.get() <= headLength : "Got too many bytes in addToHead()"; ByteBuffer byteBuffer = byteBuf.nioBuffer(); while (written < readableBytes) { updateDigest(byteBuffer); written += headFileChannel.write(byteBuffer); } headSize.addAndGet(written); if (headSize.get() == headLength) { headCatchedUpLatch.countDown(); } } public long chunks() { return chunks; } public static DigestBlob resumeTransfer(BlobContainer blobContainer, String digest, UUID transferId, long currentPos) { DigestBlob digestBlob = new DigestBlob(blobContainer, digest, transferId); digestBlob.file = getTmpFilePath(blobContainer, digest, transferId).toFile(); try { LOGGER.trace("Resuming DigestBlob {}. CurrentPos {}", digest, currentPos); digestBlob.headFileChannel = new FileOutputStream(digestBlob.file, false).getChannel(); digestBlob.headLength = currentPos; digestBlob.headSize = new AtomicLong(); digestBlob.headCatchedUpLatch = new CountDownLatch(1); RandomAccessFile raf = new RandomAccessFile(digestBlob.file, "rw"); raf.setLength(currentPos); raf.close(); FileOutputStream outputStream = new FileOutputStream(digestBlob.file, true); digestBlob.fileChannel = outputStream.getChannel(); } catch (IOException ex) { LOGGER.error("error resuming transfer of {}, id: {}", ex, digest, transferId); return null; } return digestBlob; } public void waitForHead() {<FILL_FUNCTION_BODY>} @Override public void close() throws IOException { if (file != null) { file.delete(); } } }
if (headLength == 0) { return; } assert headCatchedUpLatch != null : "headCatchedUpLatch should not be null"; try { headCatchedUpLatch.await(); } catch (InterruptedException e) { Thread.interrupted(); }
1,947
83
2,030
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/blob/RemoteDigestBlob.java
RemoteDigestBlob
addContent
class RemoteDigestBlob { public enum Status { FULL((byte) 0), PARTIAL((byte) 1), MISMATCH((byte) 2), EXISTS((byte) 3), FAILED((byte) 4); private final byte id; Status(byte id) { this.id = id; } /** * The internal representation of the status. */ public byte id() { return id; } public static Status fromId(byte id) { switch (id) { case 0: return FULL; case 1: return PARTIAL; case 2: return MISMATCH; case 3: return EXISTS; case 4: return FAILED; default: throw new IllegalArgumentException("No status match for [" + id + "]"); } } } private static final Logger LOGGER = LogManager.getLogger(RemoteDigestBlob.class); private final ShardId shardId; private final String digest; private final Client client; private long size; private StartBlobResponse startResponse; private UUID transferId; private Status status; public RemoteDigestBlob(Client client, ShardId shardId, String digest) { this.digest = digest; this.client = client; this.size = 0; this.shardId = shardId; } public Status status() { return status; } public boolean delete() { LOGGER.trace("delete"); assert transferId == null : "transferId should be null"; DeleteBlobRequest request = new DeleteBlobRequest( shardId, Hex.decodeHex(digest) ); return FutureUtils.get(client.execute(DeleteBlobAction.INSTANCE, request)).deleted; } private Status start(ByteBuf buffer, boolean last) { LOGGER.trace("start blob upload"); assert transferId == null : "transferId should be null"; StartBlobRequest request = new StartBlobRequest( shardId, Hex.decodeHex(digest), Netty4Utils.toBytesReference(buffer), last ); transferId = request.transferId(); size += buffer.readableBytes(); startResponse = FutureUtils.get(client.execute(StartBlobAction.INSTANCE, request)); status = startResponse.status(); return status; } private Status chunk(ByteBuf buffer, boolean last) { assert transferId != null : "transferId should not be null"; PutChunkRequest request = new PutChunkRequest( shardId, Hex.decodeHex(digest), transferId, Netty4Utils.toBytesReference(buffer), size, last ); size += buffer.readableBytes(); PutChunkResponse putChunkResponse = FutureUtils.get(client.execute(PutChunkAction.INSTANCE, request)); return putChunkResponse.status(); } public Status addContent(ByteBuf buffer, boolean last) {<FILL_FUNCTION_BODY>} public long size() { return size; } }
if (startResponse == null) { // this is the first call to addContent return start(buffer, last); } else if (status == Status.EXISTS) { // client probably doesn't support 100-continue and is sending chunked requests // need to ignore the content. return status; } else if (status != Status.PARTIAL) { throw new IllegalStateException("Expected Status.PARTIAL for chunk but got: " + status); } else { return chunk(buffer, last); }
850
136
986
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/blob/TransportPutChunkAction.java
TransportPutChunkAction
shardOperationOnReplica
class TransportPutChunkAction extends TransportReplicationAction<PutChunkRequest, PutChunkReplicaRequest, PutChunkResponse> { private final BlobTransferTarget transferTarget; @Inject public TransportPutChunkAction(Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, BlobTransferTarget transferTarget) { super( settings, PutChunkAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, PutChunkRequest::new, PutChunkReplicaRequest::new, ThreadPool.Names.WRITE ); this.transferTarget = transferTarget; } @Override protected PutChunkResponse newResponseInstance(StreamInput in) throws IOException { return new PutChunkResponse(in); } @Override protected void shardOperationOnPrimary(PutChunkRequest request, IndexShard primary, ActionListener<PrimaryResult<PutChunkReplicaRequest, PutChunkResponse>> listener) { ActionListener.completeWith(listener, () -> { PutChunkResponse response = new PutChunkResponse(); transferTarget.continueTransfer(request, response); final PutChunkReplicaRequest replicaRequest = new PutChunkReplicaRequest( request.shardId(), clusterService.localNode().getId(), request.transferId(), request.currentPos(), request.content(), request.isLast() ); replicaRequest.index(request.index()); return new PrimaryResult<>(replicaRequest, response); }); } @Override protected ReplicaResult shardOperationOnReplica(PutChunkReplicaRequest shardRequest, IndexShard replica) {<FILL_FUNCTION_BODY>} }
PutChunkResponse response = new PutChunkResponse(); transferTarget.continueTransfer(shardRequest, response); return new ReplicaResult();
484
40
524
<methods>public org.elasticsearch.cluster.block.ClusterBlockLevel indexBlockLevel() <variables>public static final Setting<io.crate.common.unit.TimeValue> REPLICATION_INITIAL_RETRY_BACKOFF_BOUND,public static final Setting<io.crate.common.unit.TimeValue> REPLICATION_RETRY_TIMEOUT,protected final non-sealed org.elasticsearch.cluster.service.ClusterService clusterService,protected final non-sealed java.lang.String executor,protected final non-sealed boolean forceExecutionOnPrimary,protected final non-sealed org.elasticsearch.indices.IndicesService indicesService,private volatile io.crate.common.unit.TimeValue initialRetryBackoffBound,protected final Logger logger,private volatile io.crate.common.unit.TimeValue retryTimeout,protected final non-sealed org.elasticsearch.cluster.action.shard.ShardStateAction shardStateAction,private final non-sealed boolean syncGlobalCheckpointAfterOperation,protected final non-sealed org.elasticsearch.threadpool.ThreadPool threadPool,protected final non-sealed org.elasticsearch.transport.TransportRequestOptions transportOptions,protected final non-sealed java.lang.String transportPrimaryAction,protected final non-sealed java.lang.String transportReplicaAction,protected final non-sealed org.elasticsearch.transport.TransportService transportService
crate_crate
crate/server/src/main/java/io/crate/blob/TransportStartBlobAction.java
TransportStartBlobAction
shardOperationOnReplica
class TransportStartBlobAction extends TransportReplicationAction<StartBlobRequest, StartBlobRequest, StartBlobResponse> { private final BlobTransferTarget transferTarget; @Inject public TransportStartBlobAction(Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, BlobTransferTarget transferTarget) { super( settings, StartBlobAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, StartBlobRequest::new, StartBlobRequest::new, ThreadPool.Names.WRITE ); this.transferTarget = transferTarget; logger.trace("Constructor"); } @Override protected StartBlobResponse newResponseInstance(StreamInput in) throws IOException { logger.trace("newResponseInstance"); return new StartBlobResponse(in); } @Override protected void shardOperationOnPrimary(StartBlobRequest request, IndexShard primary, ActionListener<PrimaryResult<StartBlobRequest, StartBlobResponse>> listener) { ActionListener.completeWith(listener, () -> { logger.trace("shardOperationOnPrimary {}", request); final StartBlobResponse response = new StartBlobResponse(); transferTarget.startTransfer(request, response); return new PrimaryResult<>(request, response); }); } @Override protected ReplicaResult shardOperationOnReplica(StartBlobRequest request, IndexShard replica) {<FILL_FUNCTION_BODY>} }
logger.trace("shardOperationOnReplica operating on replica {}", request); final StartBlobResponse response = new StartBlobResponse(); transferTarget.startTransfer(request, response); return new ReplicaResult();
426
58
484
<methods>public org.elasticsearch.cluster.block.ClusterBlockLevel indexBlockLevel() <variables>public static final Setting<io.crate.common.unit.TimeValue> REPLICATION_INITIAL_RETRY_BACKOFF_BOUND,public static final Setting<io.crate.common.unit.TimeValue> REPLICATION_RETRY_TIMEOUT,protected final non-sealed org.elasticsearch.cluster.service.ClusterService clusterService,protected final non-sealed java.lang.String executor,protected final non-sealed boolean forceExecutionOnPrimary,protected final non-sealed org.elasticsearch.indices.IndicesService indicesService,private volatile io.crate.common.unit.TimeValue initialRetryBackoffBound,protected final Logger logger,private volatile io.crate.common.unit.TimeValue retryTimeout,protected final non-sealed org.elasticsearch.cluster.action.shard.ShardStateAction shardStateAction,private final non-sealed boolean syncGlobalCheckpointAfterOperation,protected final non-sealed org.elasticsearch.threadpool.ThreadPool threadPool,protected final non-sealed org.elasticsearch.transport.TransportRequestOptions transportOptions,protected final non-sealed java.lang.String transportPrimaryAction,protected final non-sealed java.lang.String transportReplicaAction,protected final non-sealed org.elasticsearch.transport.TransportService transportService
crate_crate
crate/server/src/main/java/io/crate/blob/transfer/BlobHeadRequestHandler.java
Actions
registerHandler
class Actions { // handlers called on the source node public static final String GET_BLOB_HEAD = "internal:crate:blob/shard/tmp_transfer/get_head"; public static final String GET_TRANSFER_INFO = "internal:crate:blob/shard/tmp_transfer/get_info"; // handlers called on the target node public static final String PUT_BLOB_HEAD_CHUNK = "internal:crate:blob/shard/tmp_transfer/put_head_chunk"; } @Inject public BlobHeadRequestHandler(TransportService transportService, ClusterService clusterService, BlobTransferTarget blobTransferTarget, ThreadPool threadPool) { this.blobTransferTarget = blobTransferTarget; this.clusterService = clusterService; this.transportService = transportService; this.threadPool = threadPool; } public void registerHandler() {<FILL_FUNCTION_BODY>
transportService.registerRequestHandler( Actions.GET_BLOB_HEAD, ThreadPool.Names.GENERIC, GetBlobHeadRequest::new, new GetBlobHeadHandler() ); transportService.registerRequestHandler( Actions.GET_TRANSFER_INFO, ThreadPool.Names.GENERIC, BlobInfoRequest::new, new GetTransferInfoHandler() ); transportService.registerRequestHandler( Actions.PUT_BLOB_HEAD_CHUNK, ThreadPool.Names.GENERIC, PutBlobHeadChunkRequest::new, new PutBlobHeadChunkHandler() );
250
173
423
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/blob/v2/BlobIndicesService.java
BlobIndicesService
afterIndexShardCreated
class BlobIndicesService implements IndexEventListener { private static final Logger LOGGER = LogManager.getLogger(BlobIndicesService.class); public static final Setting<Boolean> SETTING_INDEX_BLOBS_ENABLED = Setting.boolSetting( "index.blobs.enabled", false, Setting.Property.IndexScope); public static final Setting<String> SETTING_INDEX_BLOBS_PATH = Setting.simpleString( "index.blobs.path", Setting.Property.IndexScope); public static final Setting<String> SETTING_BLOBS_PATH = Setting.simpleString( "blobs.path", Setting.Property.NodeScope); private final ClusterService clusterService; final Map<String, BlobIndex> indices = new ConcurrentHashMap<>(); @Nullable private final Path globalBlobPath; public BlobIndicesService(Settings settings, ClusterService clusterService) { this.clusterService = clusterService; globalBlobPath = getGlobalBlobPath(settings); } @Nullable public static Path getGlobalBlobPath(Settings settings) { String customGlobalBlobPathSetting = SETTING_BLOBS_PATH.get(settings); if (Strings.isNullOrEmpty(customGlobalBlobPathSetting)) { return null; } Path globalBlobPath = PathUtils.get(customGlobalBlobPathSetting); ensureExistsAndWritable(globalBlobPath); return globalBlobPath; } @Override public void afterIndexCreated(IndexService indexService) { String indexName = indexService.index().getName(); if (isBlobIndex(indexName)) { BlobIndex oldBlobIndex = indices.put(indexName, new BlobIndex(LOGGER, globalBlobPath)); assert oldBlobIndex == null : "There must not be an index present if a new index is created"; } } @Override public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndicesClusterStateService.AllocatedIndices.IndexRemovalReason reason) { String indexName = index.getName(); if (isBlobIndex(indexName)) { BlobIndex blobIndex = indices.remove(indexName); assert blobIndex != null : "BlobIndex not found on afterIndexDeleted"; } } @Override public void afterIndexShardCreated(IndexShard indexShard) {<FILL_FUNCTION_BODY>} @Override public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) { if (currentState == IndexShardState.POST_RECOVERY) { String index = indexShard.shardId().getIndexName(); if (isBlobIndex(index)) { BlobIndex blobIndex = indices.get(index); blobIndex.initializeShard(indexShard); } } } @Override public void afterIndexShardDeleted(ShardId shardId, Settings indexSettings) { String index = shardId.getIndexName(); if (isBlobIndex(index)) { BlobIndex blobIndex = indices.get(index); if (blobIndex != null) { blobIndex.removeShard(shardId); } } } @Nullable public BlobShard blobShard(ShardId shardId) { BlobIndex blobIndex = indices.get(shardId.getIndexName()); if (blobIndex == null) { return null; } return blobIndex.getShard(shardId.id()); } public BlobShard blobShardSafe(ShardId shardId) { String index = shardId.getIndexName(); if (isBlobIndex(index)) { BlobShard blobShard = blobShard(shardId); if (blobShard == null) { throw new ShardNotFoundException(shardId); } return blobShard; } throw new BlobsDisabledException(shardId.getIndex()); } public BlobShard localBlobShard(String index, String digest) { return blobShardSafe(localShardId(index, digest)); } private ShardId localShardId(String index, String digest) { ShardIterator si = clusterService.operationRouting().getShards( clusterService.state(), index, null, digest, "_only_local"); return si.shardId(); } static boolean ensureExistsAndWritable(Path blobsPath) { if (Files.exists(blobsPath)) { if (!Files.isDirectory(blobsPath)) { throw new SettingsException( String.format(Locale.ENGLISH, "blobs path '%s' is a file, must be a directory", blobsPath)); } if (!Files.isWritable(blobsPath)) { throw new SettingsException( String.format(Locale.ENGLISH, "blobs path '%s' is not writable", blobsPath)); } } else { try { Files.createDirectories(blobsPath); } catch (IOException e) { throw new SettingsException( String.format(Locale.ENGLISH, "blobs path '%s' could not be created", blobsPath)); } } return true; } }
String index = indexShard.shardId().getIndexName(); if (isBlobIndex(index)) { BlobIndex blobIndex = indices.get(index); assert blobIndex != null : "blobIndex must exists if a shard is created in it"; blobIndex.createShard(indexShard); }
1,413
88
1,501
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/breaker/ConcurrentRamAccounting.java
ConcurrentRamAccounting
forCircuitBreaker
class ConcurrentRamAccounting implements RamAccounting { private final AtomicLong usedBytes = new AtomicLong(0L); private final LongConsumer reserveBytes; private final LongConsumer releaseBytes; private final String label; private final int operationMemoryLimit; public static ConcurrentRamAccounting forCircuitBreaker(String label, CircuitBreaker circuitBreaker, int operationMemoryLimit) {<FILL_FUNCTION_BODY>} public ConcurrentRamAccounting(LongConsumer reserveBytes, LongConsumer releaseBytes, String label, int operationMemoryLimit) { this.reserveBytes = reserveBytes; this.releaseBytes = releaseBytes; this.label = label; this.operationMemoryLimit = operationMemoryLimit; } @Override public void addBytes(long bytes) { long currentUsedBytes = usedBytes.addAndGet(bytes); if (operationMemoryLimit > 0 && currentUsedBytes > operationMemoryLimit) { usedBytes.addAndGet(- bytes); throw new CircuitBreakingException(String.format(Locale.ENGLISH, "\"%s\" reached operation memory limit. Used: %s, Limit: %s", label, new ByteSizeValue(currentUsedBytes), new ByteSizeValue(operationMemoryLimit) )); } try { reserveBytes.accept(bytes); } catch (Exception e) { usedBytes.addAndGet(- bytes); throw e; } } @Override public long totalBytes() { return usedBytes.get(); } @Override public void release() { long prevUsedBytes = usedBytes.getAndSet(0); releaseBytes.accept(prevUsedBytes); } @Override public void close() { release(); } }
return new ConcurrentRamAccounting( bytes -> circuitBreaker.addEstimateBytesAndMaybeBreak(bytes, label), bytes -> circuitBreaker.addWithoutBreaking(- bytes), label, operationMemoryLimit );
460
63
523
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/breaker/TypedCellsAccounting.java
TypedCellsAccounting
accountForAndMaybeBreak
class TypedCellsAccounting implements RowAccounting<Object[]> { private final RamAccounting ramAccounting; private final int extraSizePerRow; private final CellsSizeEstimator sizeEstimator; /** * @param columnTypes Column types to use for size estimation * @param ramAccounting {@link RamAccounting} implementing the CircuitBreaker logic * @param extraSizePerRow Extra size that need to be calculated per row. E.g. {@link HashInnerJoinBatchIterator} * might instantiate an ArrayList per row used for the internal hash->row buffer */ public TypedCellsAccounting(List<? extends DataType<?>> columnTypes, RamAccounting ramAccounting, int extraSizePerRow) { this.sizeEstimator = CellsSizeEstimator.forColumns(columnTypes); this.ramAccounting = ramAccounting; this.extraSizePerRow = extraSizePerRow; } /** * Account for the size of the values of the row cells (this can include all the output cells, not just the source * row). * <p> * This should only be used if the values are stored/buffered in another in-memory data structure. */ @Override public long accountForAndMaybeBreak(Object[] rowCells) {<FILL_FUNCTION_BODY>} @Override public void release() { ramAccounting.release(); } }
long rowBytes = sizeEstimator.estimateSize(rowCells) + extraSizePerRow; ramAccounting.addBytes(rowBytes); return rowBytes;
366
46
412
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/breaker/TypedRowAccounting.java
TypedRowAccounting
accountForAndMaybeBreak
class TypedRowAccounting implements RowAccounting<Row> { private final RamAccounting ramAccounting; private final CellsSizeEstimator estimateRowSize; private int extraSizePerRow = 0; /** * See {@link TypedRowAccounting#TypedRowAccounting(List, RamAccounting, int)} */ public TypedRowAccounting(List<? extends DataType<?>> columnTypes, RamAccounting ramAccounting) { this.estimateRowSize = CellsSizeEstimator.forColumns(columnTypes); this.ramAccounting = ramAccounting; } /** * Create a new instance over a set of column types * * @param columnTypes The column types to use for size estimation * @param ramAccounting {@link RamAccounting} implementing the CircuitBreaker logic * @param extraSizePerRow Extra size that need to be calculated per row. E.g. {@link HashInnerJoinBatchIterator} * might instantiate an ArrayList per row used for the internal hash->row buffer */ public TypedRowAccounting(List<? extends DataType<?>> columnTypes, RamAccounting ramAccounting, int extraSizePerRow) { this(columnTypes, ramAccounting); this.extraSizePerRow = extraSizePerRow; } /** * Account for the size of the values of the row. * * This should only be used if the values are stored/buffered in another in-memory data structure. */ @Override public long accountForAndMaybeBreak(Row row) {<FILL_FUNCTION_BODY>} @Override public void release() { ramAccounting.release(); } }
// Container size of the row is excluded because here it's unknown where the values will be saved to. // As size estimation is generally "best-effort" this should be good enough. long bytes = estimateRowSize.estimateSize(row) + extraSizePerRow; ramAccounting.addBytes(bytes); return bytes;
432
85
517
<no_super_class>
crate_crate
crate/server/src/main/java/io/crate/cluster/gracefulstop/DecommissionAllocationDecider.java
DecommissionAllocationDecider
canAllocate
class DecommissionAllocationDecider extends AllocationDecider { public static final String NAME = "decommission"; private Set<String> decommissioningNodes = Set.of(); private DataAvailability dataAvailability; public DecommissionAllocationDecider(Settings settings, ClusterSettings clusterSettings) { updateDecommissioningNodes(DecommissioningService.DECOMMISSION_INTERNAL_SETTING_GROUP.get(settings)); dataAvailability = DecommissioningService.GRACEFUL_STOP_MIN_AVAILABILITY_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer( DecommissioningService.DECOMMISSION_INTERNAL_SETTING_GROUP, this::updateDecommissioningNodes); clusterSettings.addSettingsUpdateConsumer( DecommissioningService.GRACEFUL_STOP_MIN_AVAILABILITY_SETTING, this::updateMinAvailability); } private void updateDecommissioningNodes(Settings decommissionNodesSettings) { decommissioningNodes = decommissionNodesSettings.keySet(); } private void updateMinAvailability(DataAvailability availability) { dataAvailability = availability; } @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {<FILL_FUNCTION_BODY>} @Override public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return canRemainOrAllocate(node.nodeId(), shardRouting, allocation); } private Decision canRemainOrAllocate(String nodeId, ShardRouting shardRouting, RoutingAllocation allocation) { if (dataAvailability == DataAvailability.NONE) { return allocation.decision(Decision.YES, NAME, "dataAvailability=none"); } if (decommissioningNodes.contains(nodeId)) { if (dataAvailability == DataAvailability.PRIMARIES && !shardRouting.primary()) { return allocation.decision(Decision.YES, NAME, "dataAvailability=primaries shard=replica decommissioned=true"); } return allocation.decision(Decision.NO, NAME, "node is being decommissioned"); } return allocation.decision(Decision.YES, NAME, "node isn't decommissioned"); } }
if (decommissioningNodes.contains(node.nodeId()) && dataAvailability == DataAvailability.PRIMARIES && !shardRouting.primary()) { // if primaries are removed from this node it will try to re-balance non-primaries onto this node // prevent this - replicas that are already here can remain, but no new replicas should be assigned return allocation.decision(Decision.NO, NAME, "dataAvailability=primaries, shard=replica, decommissioned=true"); } return canRemainOrAllocate(node.nodeId(), shardRouting, allocation);
629
157
786
<methods>public org.elasticsearch.cluster.routing.allocation.decider.Decision canAllocate(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canAllocate(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canAllocate(org.elasticsearch.cluster.metadata.IndexMetadata, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canForceAllocatePrimary(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canRebalance(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canRebalance(org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision canRemain(org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.cluster.routing.RoutingNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) ,public org.elasticsearch.cluster.routing.allocation.decider.Decision shouldAutoExpandToNode(org.elasticsearch.cluster.metadata.IndexMetadata, org.elasticsearch.cluster.node.DiscoveryNode, org.elasticsearch.cluster.routing.allocation.RoutingAllocation) <variables>
crate_crate
crate/server/src/main/java/io/crate/exceptions/JobKilledException.java
JobKilledException
of
class JobKilledException extends ElasticsearchException implements UnscopedException { public static final String MESSAGE = "Job killed"; public static JobKilledException of(@Nullable String reason) {<FILL_FUNCTION_BODY>} public JobKilledException(final StreamInput in) throws IOException { super(in); } private JobKilledException(String reason) { super("Job killed. " + reason); } private JobKilledException() { super(MESSAGE); } @Override public HttpErrorStatus httpErrorStatus() { return HttpErrorStatus.QUERY_KILLED_BY_STATEMENT; } }
return reason == null ? new JobKilledException() : new JobKilledException(reason);
174
25
199
<methods>public void <init>(java.lang.Throwable) ,public transient void <init>(java.lang.String, java.lang.Object[]) ,public transient void <init>(java.lang.String, java.lang.Throwable, java.lang.Object[]) ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public void addHeader(java.lang.String, List<java.lang.String>) ,public transient void addMetadata(java.lang.String, java.lang.String[]) ,public void addMetadata(java.lang.String, List<java.lang.String>) ,public static org.elasticsearch.ElasticsearchException fromXContent(org.elasticsearch.common.xcontent.XContentParser) throws java.io.IOException,public static void generateThrowableXContent(org.elasticsearch.common.xcontent.XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params, java.lang.Throwable) throws java.io.IOException,public java.lang.String getDetailedMessage() ,public static java.lang.String getExceptionName(java.lang.Throwable) ,public List<java.lang.String> getHeader(java.lang.String) ,public Set<java.lang.String> getHeaderKeys() ,public static int getId(Class<? extends org.elasticsearch.ElasticsearchException>) ,public org.elasticsearch.index.Index getIndex() ,public List<java.lang.String> getMetadata(java.lang.String) ,public Set<java.lang.String> getMetadataKeys() ,public java.lang.Throwable getRootCause() ,public org.elasticsearch.index.shard.ShardId getShardId() ,public io.crate.rest.action.HttpErrorStatus httpErrorStatus() ,public static org.elasticsearch.ElasticsearchException innerFromXContent(org.elasticsearch.common.xcontent.XContentParser, boolean) throws java.io.IOException,public static boolean isRegistered(Class<? extends java.lang.Throwable>, org.elasticsearch.Version) ,public io.crate.protocols.postgres.PGErrorStatus pgErrorStatus() ,public static org.elasticsearch.ElasticsearchException readException(org.elasticsearch.common.io.stream.StreamInput, int) throws java.io.IOException,public static T readStackTrace(T, org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public void setIndex(org.elasticsearch.index.Index) ,public void setIndex(java.lang.String) ,public transient void setResources(java.lang.String, java.lang.String[]) ,public void setShard(org.elasticsearch.index.shard.ShardId) ,public org.elasticsearch.rest.RestStatus status() ,public java.lang.String toString() ,public org.elasticsearch.common.xcontent.XContentBuilder toXContent(org.elasticsearch.common.xcontent.XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params) throws java.io.IOException,public java.lang.Throwable unwrapCause() ,public static T writeStackTraces(T, org.elasticsearch.common.io.stream.StreamOutput, Writer<java.lang.Throwable>) throws java.io.IOException,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>private static final java.lang.String CAUSED_BY,private static final non-sealed Map<Class<? extends org.elasticsearch.ElasticsearchException>,org.elasticsearch.ElasticsearchException.ElasticsearchExceptionHandle> CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE,private static final java.lang.String HEADER,private static final non-sealed Map<java.lang.Integer,CheckedFunction<org.elasticsearch.common.io.stream.StreamInput,? extends org.elasticsearch.ElasticsearchException,java.io.IOException>> ID_TO_SUPPLIER,private static final java.lang.String INDEX_METADATA_KEY,private static final java.lang.String INDEX_METADATA_KEY_UUID,private static final java.lang.String REASON,private static final java.lang.String RESOURCE_METADATA_ID_KEY,private static final java.lang.String RESOURCE_METADATA_TYPE_KEY,private static final java.lang.String REST_EXCEPTION_SKIP_CAUSE,private static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT,public static final java.lang.String REST_EXCEPTION_SKIP_STACK_TRACE,public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT,private static final java.lang.String ROOT_CAUSE,private static final java.lang.String SHARD_METADATA_KEY,private static final java.lang.String STACK_TRACE,private static final org.elasticsearch.common.xcontent.ParseField SUPPRESSED,private static final java.lang.String TYPE,private static final org.elasticsearch.Version UNKNOWN_VERSION_ADDED,private final Map<java.lang.String,List<java.lang.String>> headers,private final Map<java.lang.String,List<java.lang.String>> metadata
crate_crate
crate/server/src/main/java/io/crate/exceptions/RelationUnknown.java
RelationUnknown
of
class RelationUnknown extends ElasticsearchException implements ResourceUnknownException, TableScopeException { private final RelationName relationName; public static RelationUnknown of(String relation, List<String> candidates) {<FILL_FUNCTION_BODY>} public RelationUnknown(String tableName, Throwable e) { super(String.format(Locale.ENGLISH, "Relation '%s' unknown", tableName), e); this.relationName = RelationName.fromIndexName(tableName); } public RelationUnknown(String tableName) { this(RelationName.fromIndexName(tableName), String.format(Locale.ENGLISH, "Relation '%s' unknown", tableName)); } public RelationUnknown(RelationName relationName) { this(relationName, String.format(Locale.ENGLISH, "Relation '%s' unknown", relationName)); } private RelationUnknown(RelationName relationName, String errorMessage) { super(errorMessage); this.relationName = relationName; } public RelationUnknown(StreamInput in) throws IOException { super(in); relationName = new RelationName(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); relationName.writeTo(out); } @Override public Collection<RelationName> getTableIdents() { return Collections.singletonList(relationName); } @Override public HttpErrorStatus httpErrorStatus() { return HttpErrorStatus.RELATION_UNKNOWN; } }
switch (candidates.size()) { case 0: return new RelationUnknown(relation); case 1: { var name = RelationName.fromIndexName(relation); var msg = "Relation '" + relation + "' unknown. Maybe you meant '" + Identifiers.quoteIfNeeded(candidates.get(0)) + "'"; return new RelationUnknown(name, msg); } default: { var name = RelationName.fromIndexName(relation); var msg = "Relation '" + relation + "' unknown. Maybe you meant one of: " + String.join(", ", Lists.map(candidates, Identifiers::quoteIfNeeded)); return new RelationUnknown(name, msg); } }
418
193
611
<methods>public void <init>(java.lang.Throwable) ,public transient void <init>(java.lang.String, java.lang.Object[]) ,public transient void <init>(java.lang.String, java.lang.Throwable, java.lang.Object[]) ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public void addHeader(java.lang.String, List<java.lang.String>) ,public transient void addMetadata(java.lang.String, java.lang.String[]) ,public void addMetadata(java.lang.String, List<java.lang.String>) ,public static org.elasticsearch.ElasticsearchException fromXContent(org.elasticsearch.common.xcontent.XContentParser) throws java.io.IOException,public static void generateThrowableXContent(org.elasticsearch.common.xcontent.XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params, java.lang.Throwable) throws java.io.IOException,public java.lang.String getDetailedMessage() ,public static java.lang.String getExceptionName(java.lang.Throwable) ,public List<java.lang.String> getHeader(java.lang.String) ,public Set<java.lang.String> getHeaderKeys() ,public static int getId(Class<? extends org.elasticsearch.ElasticsearchException>) ,public org.elasticsearch.index.Index getIndex() ,public List<java.lang.String> getMetadata(java.lang.String) ,public Set<java.lang.String> getMetadataKeys() ,public java.lang.Throwable getRootCause() ,public org.elasticsearch.index.shard.ShardId getShardId() ,public io.crate.rest.action.HttpErrorStatus httpErrorStatus() ,public static org.elasticsearch.ElasticsearchException innerFromXContent(org.elasticsearch.common.xcontent.XContentParser, boolean) throws java.io.IOException,public static boolean isRegistered(Class<? extends java.lang.Throwable>, org.elasticsearch.Version) ,public io.crate.protocols.postgres.PGErrorStatus pgErrorStatus() ,public static org.elasticsearch.ElasticsearchException readException(org.elasticsearch.common.io.stream.StreamInput, int) throws java.io.IOException,public static T readStackTrace(T, org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public void setIndex(org.elasticsearch.index.Index) ,public void setIndex(java.lang.String) ,public transient void setResources(java.lang.String, java.lang.String[]) ,public void setShard(org.elasticsearch.index.shard.ShardId) ,public org.elasticsearch.rest.RestStatus status() ,public java.lang.String toString() ,public org.elasticsearch.common.xcontent.XContentBuilder toXContent(org.elasticsearch.common.xcontent.XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params) throws java.io.IOException,public java.lang.Throwable unwrapCause() ,public static T writeStackTraces(T, org.elasticsearch.common.io.stream.StreamOutput, Writer<java.lang.Throwable>) throws java.io.IOException,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>private static final java.lang.String CAUSED_BY,private static final non-sealed Map<Class<? extends org.elasticsearch.ElasticsearchException>,org.elasticsearch.ElasticsearchException.ElasticsearchExceptionHandle> CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE,private static final java.lang.String HEADER,private static final non-sealed Map<java.lang.Integer,CheckedFunction<org.elasticsearch.common.io.stream.StreamInput,? extends org.elasticsearch.ElasticsearchException,java.io.IOException>> ID_TO_SUPPLIER,private static final java.lang.String INDEX_METADATA_KEY,private static final java.lang.String INDEX_METADATA_KEY_UUID,private static final java.lang.String REASON,private static final java.lang.String RESOURCE_METADATA_ID_KEY,private static final java.lang.String RESOURCE_METADATA_TYPE_KEY,private static final java.lang.String REST_EXCEPTION_SKIP_CAUSE,private static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT,public static final java.lang.String REST_EXCEPTION_SKIP_STACK_TRACE,public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT,private static final java.lang.String ROOT_CAUSE,private static final java.lang.String SHARD_METADATA_KEY,private static final java.lang.String STACK_TRACE,private static final org.elasticsearch.common.xcontent.ParseField SUPPRESSED,private static final java.lang.String TYPE,private static final org.elasticsearch.Version UNKNOWN_VERSION_ADDED,private final Map<java.lang.String,List<java.lang.String>> headers,private final Map<java.lang.String,List<java.lang.String>> metadata
crate_crate
crate/server/src/main/java/io/crate/exceptions/SchemaUnknownException.java
SchemaUnknownException
of
class SchemaUnknownException extends ElasticsearchException implements ResourceUnknownException, SchemaScopeException { private static final String MESSAGE_TMPL = "Schema '%s' unknown"; private final String schemaName; public static SchemaUnknownException of(String schema, List<String> candidates) {<FILL_FUNCTION_BODY>} public SchemaUnknownException(String schema) { this(schema, String.format(Locale.ENGLISH, MESSAGE_TMPL, schema)); } private SchemaUnknownException(String schema, String errorMessage) { super(errorMessage); this.schemaName = schema; } public SchemaUnknownException(StreamInput in) throws IOException { super(in); schemaName = in.readString(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(schemaName); } @Override public String getSchemaName() { return schemaName; } @Override public HttpErrorStatus httpErrorStatus() { return HttpErrorStatus.SCHEMA_UNKNOWN; } }
switch (candidates.size()) { case 0: return new SchemaUnknownException(schema); case 1: return new SchemaUnknownException( schema, "Schema '" + schema + "' unknown. Maybe you meant '" + Identifiers.quoteIfNeeded(candidates.get(0)) + "'"); default: String errorMsg = "Schema '" + schema + "' unknown. Maybe you meant one of: " + candidates.stream() .map(Identifiers::quoteIfNeeded) .collect(Collectors.joining(", ")); return new SchemaUnknownException(schema, errorMsg); }
300
162
462
<methods>public void <init>(java.lang.Throwable) ,public transient void <init>(java.lang.String, java.lang.Object[]) ,public transient void <init>(java.lang.String, java.lang.Throwable, java.lang.Object[]) ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public void addHeader(java.lang.String, List<java.lang.String>) ,public transient void addMetadata(java.lang.String, java.lang.String[]) ,public void addMetadata(java.lang.String, List<java.lang.String>) ,public static org.elasticsearch.ElasticsearchException fromXContent(org.elasticsearch.common.xcontent.XContentParser) throws java.io.IOException,public static void generateThrowableXContent(org.elasticsearch.common.xcontent.XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params, java.lang.Throwable) throws java.io.IOException,public java.lang.String getDetailedMessage() ,public static java.lang.String getExceptionName(java.lang.Throwable) ,public List<java.lang.String> getHeader(java.lang.String) ,public Set<java.lang.String> getHeaderKeys() ,public static int getId(Class<? extends org.elasticsearch.ElasticsearchException>) ,public org.elasticsearch.index.Index getIndex() ,public List<java.lang.String> getMetadata(java.lang.String) ,public Set<java.lang.String> getMetadataKeys() ,public java.lang.Throwable getRootCause() ,public org.elasticsearch.index.shard.ShardId getShardId() ,public io.crate.rest.action.HttpErrorStatus httpErrorStatus() ,public static org.elasticsearch.ElasticsearchException innerFromXContent(org.elasticsearch.common.xcontent.XContentParser, boolean) throws java.io.IOException,public static boolean isRegistered(Class<? extends java.lang.Throwable>, org.elasticsearch.Version) ,public io.crate.protocols.postgres.PGErrorStatus pgErrorStatus() ,public static org.elasticsearch.ElasticsearchException readException(org.elasticsearch.common.io.stream.StreamInput, int) throws java.io.IOException,public static T readStackTrace(T, org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public void setIndex(org.elasticsearch.index.Index) ,public void setIndex(java.lang.String) ,public transient void setResources(java.lang.String, java.lang.String[]) ,public void setShard(org.elasticsearch.index.shard.ShardId) ,public org.elasticsearch.rest.RestStatus status() ,public java.lang.String toString() ,public org.elasticsearch.common.xcontent.XContentBuilder toXContent(org.elasticsearch.common.xcontent.XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params) throws java.io.IOException,public java.lang.Throwable unwrapCause() ,public static T writeStackTraces(T, org.elasticsearch.common.io.stream.StreamOutput, Writer<java.lang.Throwable>) throws java.io.IOException,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>private static final java.lang.String CAUSED_BY,private static final non-sealed Map<Class<? extends org.elasticsearch.ElasticsearchException>,org.elasticsearch.ElasticsearchException.ElasticsearchExceptionHandle> CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE,private static final java.lang.String HEADER,private static final non-sealed Map<java.lang.Integer,CheckedFunction<org.elasticsearch.common.io.stream.StreamInput,? extends org.elasticsearch.ElasticsearchException,java.io.IOException>> ID_TO_SUPPLIER,private static final java.lang.String INDEX_METADATA_KEY,private static final java.lang.String INDEX_METADATA_KEY_UUID,private static final java.lang.String REASON,private static final java.lang.String RESOURCE_METADATA_ID_KEY,private static final java.lang.String RESOURCE_METADATA_TYPE_KEY,private static final java.lang.String REST_EXCEPTION_SKIP_CAUSE,private static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT,public static final java.lang.String REST_EXCEPTION_SKIP_STACK_TRACE,public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT,private static final java.lang.String ROOT_CAUSE,private static final java.lang.String SHARD_METADATA_KEY,private static final java.lang.String STACK_TRACE,private static final org.elasticsearch.common.xcontent.ParseField SUPPRESSED,private static final java.lang.String TYPE,private static final org.elasticsearch.Version UNKNOWN_VERSION_ADDED,private final Map<java.lang.String,List<java.lang.String>> headers,private final Map<java.lang.String,List<java.lang.String>> metadata