proj_name
stringclasses 131
values | relative_path
stringlengths 30
228
| class_name
stringlengths 1
68
| func_name
stringlengths 1
48
| masked_class
stringlengths 78
9.82k
| func_body
stringlengths 46
9.61k
| len_input
int64 29
2.01k
| len_output
int64 14
1.94k
| total
int64 55
2.05k
| relevant_context
stringlengths 0
38.4k
|
|---|---|---|---|---|---|---|---|---|---|
crate_crate
|
crate/server/src/main/java/io/crate/metadata/pgcatalog/PgDepend.java
|
PgDepend
|
create
|
class PgDepend {
public static final RelationName NAME = new RelationName(PgCatalogSchemaInfo.NAME, "pg_depend");
private PgDepend() {}
public static SystemTable<Void> create() {<FILL_FUNCTION_BODY>}
}
|
// https://www.postgresql.org/docs/current/catalog-pg-depend.html
return SystemTable.<Void>builder(NAME)
.add("classid", DataTypes.INTEGER, c -> null)
.add("objid", DataTypes.INTEGER, c -> null)
.add("objsubid", DataTypes.INTEGER, c -> null)
.add("refclassid", DataTypes.INTEGER, c -> null)
.add("refobjid", DataTypes.INTEGER, c -> null)
.add("refobjsubid", DataTypes.INTEGER, c -> null)
.add("deptype", DataTypes.CHARACTER, c -> null)
.build();
| 73
| 193
| 266
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/metadata/pgcatalog/PgEnumTable.java
|
PgEnumTable
|
create
|
class PgEnumTable {
public static final RelationName IDENT = new RelationName(PgCatalogSchemaInfo.NAME, "pg_enum");
private PgEnumTable() {}
public static SystemTable<Void> create() {<FILL_FUNCTION_BODY>}
}
|
return SystemTable.<Void>builder(IDENT)
.add("oid", INTEGER, ignored -> null)
.add("enumtypid", INTEGER, ignored -> null)
.add("enumsortorder", DataTypes.FLOAT, ignored -> null)
.add("enumlabel", STRING, ignored -> null)
.build();
| 76
| 94
| 170
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/metadata/pgcatalog/PgIndexTable.java
|
PgIndexTable
|
create
|
class PgIndexTable {
public static final RelationName IDENT = new RelationName(PgCatalogSchemaInfo.NAME, "pg_index");
private PgIndexTable() {}
public static SystemTable<Entry> create() {<FILL_FUNCTION_BODY>}
public static final class Entry {
final Regclass indRelId;
final Regclass indexRelId;
final List<Integer> indKey;
public Entry(Regclass indRelId, Regclass indexRelId, List<Integer> indKey) {
this.indRelId = indRelId;
this.indexRelId = indexRelId;
this.indKey = indKey;
}
}
}
|
return SystemTable.<Entry>builder(IDENT)
.add("indexrelid", REGCLASS, x -> x.indexRelId)
.add("indrelid", REGCLASS, x -> x.indRelId)
.add("indnatts", SHORT, x -> (short) 0)
.add("indnkeyatts", SHORT, x -> (short) 0)
.add("indisunique", BOOLEAN, x -> false)
.add("indisprimary", BOOLEAN, x -> true)
.add("indisexclusion", BOOLEAN, x -> false)
.add("indimmediate", BOOLEAN, x -> true)
.add("indisclustered", BOOLEAN, x -> false)
.add("indisvalid", BOOLEAN, x -> true)
.add("indcheckxmin", BOOLEAN, x -> false)
.add("indisready", BOOLEAN, x -> true)
.add("indislive", BOOLEAN, x -> true)
.add("indisreplident", BOOLEAN, x -> false)
.add("indkey", INTEGER_ARRAY, x -> x.indKey)
.add("indcollation", INTEGER_ARRAY, x -> null)
.add("indclass", INTEGER_ARRAY, x -> null)
.add("indoption", SHORT_ARRAY, x -> null)
.add("indexprs", STRING, x -> null)
.add("indpred", STRING, x -> null)
.build();
| 179
| 421
| 600
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/metadata/pgcatalog/PgLocksTable.java
|
PgLocksTable
|
create
|
class PgLocksTable {
public static final RelationName IDENT = new RelationName(PgCatalogSchemaInfo.NAME, "pg_locks");
private PgLocksTable() {}
public static SystemTable<Void> create() {<FILL_FUNCTION_BODY>}
}
|
return SystemTable.<Void>builder(IDENT)
.add("locktype", DataTypes.STRING, x -> null)
.add("database", DataTypes.INTEGER, x -> null)
.add("relation", DataTypes.INTEGER, x -> null)
.add("page", DataTypes.INTEGER, x -> null)
.add("tuple", DataTypes.SHORT, x -> null)
.add("virtualxid", DataTypes.STRING, x -> null)
.add("transactionid", DataTypes.INTEGER, x -> null)
.add("classid", DataTypes.INTEGER, x -> null)
.add("objid", DataTypes.STRING, x -> null)
.add("objsubid", DataTypes.SHORT, x -> null)
.add("virtualtransaction", DataTypes.STRING, x -> null)
.add("pid", DataTypes.INTEGER, x -> null)
.add("mode", DataTypes.STRING, x -> null)
.add("granted", DataTypes.BOOLEAN, x -> null)
.add("fastpath", DataTypes.BOOLEAN, x -> null)
.add("waitstart", DataTypes.TIMESTAMPZ, x -> null)
.build();
| 78
| 329
| 407
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/metadata/pgcatalog/PgSettingsTable.java
|
PgSettingsTable
|
create
|
class PgSettingsTable {
public static final RelationName IDENT = new RelationName(PgCatalogSchemaInfo.NAME, "pg_settings");
private PgSettingsTable() {}
public static SystemTable<NamedSessionSetting> create() {<FILL_FUNCTION_BODY>}
}
|
return SystemTable.<NamedSessionSetting>builder(IDENT)
.add("name", STRING, NamedSessionSetting::name)
.add("setting", STRING, NamedSessionSetting::value)
.add("unit", STRING, c -> null)
.add("category", STRING, c -> null)
.add("short_desc", STRING, NamedSessionSetting::description)
.add("extra_desc", STRING, c -> null)
.add("context", STRING, c -> null)
.add("vartype", STRING, c -> c.type().getName())
.add("source", STRING, c -> null)
.add("enumvals", STRING_ARRAY, c -> null)
.add("min_val", STRING, c -> null)
.add("max_val", STRING, c -> null)
.add("boot_val", STRING, NamedSessionSetting::defaultValue)
.add("reset_val", STRING, NamedSessionSetting::defaultValue)
.add("sourcefile", STRING, c -> null)
.add("sourceline", INTEGER, c -> null)
.add("pending_restart", BOOLEAN, c -> null)
.build();
| 77
| 322
| 399
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/metadata/pgcatalog/PgShdescriptionTable.java
|
PgShdescriptionTable
|
create
|
class PgShdescriptionTable {
public static final RelationName IDENT = new RelationName(PgCatalogSchemaInfo.NAME, "pg_shdescription");
private PgShdescriptionTable() {}
public static SystemTable<Void> create() {<FILL_FUNCTION_BODY>}
}
|
return SystemTable.<Void>builder(IDENT)
.add("objoid", INTEGER, c -> null)
.add("classoid", INTEGER, c -> null)
.add("description", STRING , c -> null)
.build();
| 79
| 71
| 150
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/metadata/settings/Validators.java
|
StrictStringValidator
|
validate
|
class StrictStringValidator implements Setting.Validator<String> {
private final String key;
StrictStringValidator(String key) {
this.key = key;
}
@Override
public void validate(String value) {<FILL_FUNCTION_BODY>}
}
|
if (Booleans.isBoolean(value)) {
throw new IllegalArgumentException(INVALID_MESSAGE + key + "'");
}
try {
Long.parseLong(value);
throw new IllegalArgumentException(INVALID_MESSAGE + key + "'");
} catch (NumberFormatException e) {
// pass, not a number
}
| 75
| 91
| 166
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/metadata/shard/unassigned/UnassignedShard.java
|
UnassignedShard
|
markAssigned
|
class UnassignedShard {
public static boolean isUnassigned(int shardId) {
return shardId < 0;
}
/**
* valid shard ids are vom 0 to int.max
* this method negates a shard id (0 becomes -1, 1 becomes -2, etc.)
* <p>
* if the given id is already negative it is returned as is
*/
public static int markUnassigned(int id) {
if (id >= 0) {
return (id + 1) * -1;
}
return id;
}
/**
* converts negative shard ids back to positive
* (-1 becomes 0, -2 becomes 1 - the opposite of {@link #markUnassigned(int)}
*/
public static int markAssigned(int shard) {<FILL_FUNCTION_BODY>}
private final String schemaName;
private final String tableName;
private final Boolean primary;
private final int id;
private final String partitionIdent;
private final String state;
private final boolean orphanedPartition;
private static final String UNASSIGNED = "UNASSIGNED";
private static final String INITIALIZING = "INITIALIZING";
public UnassignedShard(int shardId,
String indexName,
ClusterService clusterService,
Boolean primary,
ShardRoutingState state) {
IndexParts indexParts = new IndexParts(indexName);
this.schemaName = indexParts.getSchema();
this.tableName = indexParts.getTable();
this.partitionIdent = indexParts.getPartitionIdent();
this.orphanedPartition = indexParts.isPartitioned()
&& !clusterService.state().metadata().hasConcreteIndex(tableName);
this.primary = primary;
this.id = shardId;
this.state = state == ShardRoutingState.UNASSIGNED ? UNASSIGNED : INITIALIZING;
}
public String tableName() {
return tableName;
}
public int id() {
return id;
}
public String schemaName() {
return schemaName;
}
public String partitionIdent() {
return partitionIdent;
}
public Boolean primary() {
return primary;
}
public String state() {
return state;
}
public Boolean orphanedPartition() {
return orphanedPartition;
}
}
|
if (shard < 0) {
return (shard * -1) - 1;
}
return shard;
| 637
| 36
| 673
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/metadata/sys/ClassifiedMetrics.java
|
Metrics
|
recordValue
|
class Metrics {
private final Classification classification;
private final LongAdder sumOfDurations = new LongAdder();
private final LongAdder failedCount = new LongAdder();
private final Recorder recorder;
private final Histogram totalHistogram = new Histogram(HIGHEST_TRACKABLE_VALUE, NUMBER_OF_SIGNIFICANT_VALUE_DIGITS);
public Metrics(Classification classification) {
this.classification = classification;
this.recorder = new Recorder(HIGHEST_TRACKABLE_VALUE, NUMBER_OF_SIGNIFICANT_VALUE_DIGITS);
}
public void recordValue(long duration) {<FILL_FUNCTION_BODY>}
public void recordFailedExecution(long duration) {
recordValue(duration);
failedCount.increment();
}
public MetricsView createMetricsView() {
Histogram histogram;
synchronized (totalHistogram) {
// getIntervalHistogram resets the internal histogram afterwards;
// so we keep `totalHistogram` to not lose any measurements.
Histogram intervalHistogram = recorder.getIntervalHistogram();
totalHistogram.add(intervalHistogram);
histogram = totalHistogram.copy();
}
return new MetricsView(
histogram,
sumOfDurations.longValue(),
failedCount.longValue(),
classification
);
}
}
|
// We use start and end time to calculate the duration (since we track them anyway)
// If the system time is adjusted this can lead to negative durations
// so we protect here against it.
recorder.recordValue(Math.min(Math.max(0, duration), HIGHEST_TRACKABLE_VALUE));
// we record the real duration (with no upper capping) in the sum of durations as there are no upper limits
// for the values we record as it is the case with the histogram
sumOfDurations.add(Math.max(0, duration));
| 368
| 142
| 510
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/metadata/sys/SysJobsTableInfo.java
|
SysJobsTableInfo
|
create
|
class SysJobsTableInfo {
public static final RelationName IDENT = new RelationName(SysSchemaInfo.NAME, "jobs");
public static SystemTable<JobContext> create(Supplier<DiscoveryNode> localNode) {<FILL_FUNCTION_BODY>}
}
|
return SystemTable.<JobContext>builder(IDENT)
.add("id", STRING, c -> c.id().toString())
.add("username", STRING, JobContext::username)
.startObject("node")
.add("id", STRING, ignored -> localNode.get().getId())
.add("name", STRING, ignored -> localNode.get().getName())
.endObject()
.add("stmt", STRING, JobContext::stmt)
.add("started", TIMESTAMPZ, JobContext::started)
.withRouting((state, routingProvider, sessionSettings) -> Routing.forTableOnAllNodes(IDENT, state.nodes()))
.setPrimaryKeys(new ColumnIdent("id"))
.build();
| 75
| 190
| 265
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/metadata/sys/SysMetricsTableInfo.java
|
SysMetricsTableInfo
|
create
|
class SysMetricsTableInfo {
public static final RelationName NAME = new RelationName(SysSchemaInfo.NAME, "jobs_metrics");
public static SystemTable<MetricsView> create(Supplier<DiscoveryNode> localNode) {<FILL_FUNCTION_BODY>}
}
|
return SystemTable.<MetricsView>builder(NAME)
.add("total_count", LONG, MetricsView::totalCount)
.add("sum_of_durations", LONG, MetricsView::sumOfDurations)
.add("failed_count", LONG, MetricsView::failedCount)
.add("mean", DOUBLE, MetricsView::mean)
.add("stdev", DOUBLE, MetricsView::stdDeviation)
.add("max", LONG, MetricsView::maxValue)
.add("min", LONG, MetricsView::minValue)
.startObject("percentiles")
.add("25", LONG, x -> x.getValueAtPercentile(25.0))
.add("50", LONG, x -> x.getValueAtPercentile(50.0))
.add("75", LONG, x -> x.getValueAtPercentile(75.0))
.add("90", LONG, x -> x.getValueAtPercentile(90.0))
.add("95", LONG, x -> x.getValueAtPercentile(95.0))
.add("99", LONG, x -> x.getValueAtPercentile(99.0))
.endObject()
.startObject("node")
.add("id", STRING, ignored -> localNode.get().getId())
.add("name", STRING, ignored -> localNode.get().getName())
.endObject()
.startObject("classification")
.add("type", STRING, x -> x.classification().type().name())
.add("labels", STRING_ARRAY, x -> List.copyOf(x.classification().labels()))
.endObject()
.withRouting((state, routingProvider, sessionSettings) -> Routing.forTableOnAllNodes(NAME, state.nodes()))
.build();
| 76
| 485
| 561
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/metadata/sys/SysRepositoriesTableInfo.java
|
SysRepositoriesTableInfo
|
create
|
class SysRepositoriesTableInfo {
public static final RelationName IDENT = new RelationName(SysSchemaInfo.NAME, "repositories");
public static SystemTable<Repository> create(List<Setting<?>> maskedSettings) {<FILL_FUNCTION_BODY>}
}
|
var maskedSettingNames = maskedSettings.stream().map(Setting::getKey).collect(Collectors.toSet());
return SystemTable.<Repository>builder(IDENT)
.add("name", STRING, (Repository r) -> r.getMetadata().name())
.add("type", STRING, (Repository r) -> r.getMetadata().type())
.addDynamicObject("settings", STRING, r -> r.getMetadata().settings().getAsStructuredMap(maskedSettingNames))
.setPrimaryKeys(new ColumnIdent("name"))
.withRouting((state, routingProvider, sessionSettings) -> routingProvider.forRandomMasterOrDataNode(IDENT, state.nodes()))
.build();
| 77
| 175
| 252
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/metadata/sys/SysTableDefinitions.java
|
SysTableDefinitions
|
registerTableDefinition
|
class SysTableDefinitions {
private final Map<RelationName, StaticTableDefinition<?>> tableDefinitions = new HashMap<>();
@Inject
public SysTableDefinitions(ClusterService clusterService,
Roles roles,
JobsLogs jobsLogs,
SysSchemaInfo sysSchemaInfo,
Set<SysCheck> sysChecks,
SysNodeChecks sysNodeChecks,
RepositoriesService repositoriesService,
SysSnapshots sysSnapshots,
SysAllocations sysAllocations,
ShardSegments shardSegmentInfos) {
Supplier<DiscoveryNode> localNode = clusterService::localNode;
var sysClusterTableInfo = (SystemTable<Void>) sysSchemaInfo.getTableInfo(SysClusterTableInfo.IDENT.name());
assert sysClusterTableInfo != null : "sys.cluster table must exist in sys schema";
tableDefinitions.put(SysClusterTableInfo.IDENT, new StaticTableDefinition<>(
() -> completedFuture(Collections.singletonList(null)),
sysClusterTableInfo.expressions(),
false
));
var sysJobsTable = SysJobsTableInfo.create(localNode);
tableDefinitions.put(SysJobsTableInfo.IDENT, new StaticTableDefinition<>(
(txnCtx, user) -> completedFuture(
() -> StreamSupport.stream(jobsLogs.activeJobs().spliterator(), false)
.filter(x ->
user.isSuperUser()
|| user.name().equals(x.username())
|| roles.hasPrivilege(user, Permission.AL, Securable.CLUSTER, null))
.iterator()
),
sysJobsTable.expressions(),
false));
var sysJobsLogTable = SysJobsLogTableInfo.create(localNode);
tableDefinitions.put(SysJobsLogTableInfo.IDENT, new StaticTableDefinition<>(
(txnCtx, user) -> completedFuture(
() -> StreamSupport.stream(jobsLogs.jobsLog().spliterator(), false)
.filter(x ->
user.isSuperUser()
|| user.name().equals(x.username())
|| roles.hasPrivilege(user, Permission.AL, Securable.CLUSTER, null))
.iterator()
),
sysJobsLogTable.expressions(),
false));
tableDefinitions.put(SysOperationsTableInfo.IDENT, new StaticTableDefinition<>(
() -> completedFuture(jobsLogs.activeOperations()),
SysOperationsTableInfo.create(localNode).expressions(),
false));
tableDefinitions.put(SysOperationsLogTableInfo.IDENT, new StaticTableDefinition<>(
() -> completedFuture(jobsLogs.operationsLog()),
SysOperationsLogTableInfo.create().expressions(),
false));
SysChecker<SysCheck> sysChecker = new SysChecker<>(sysChecks);
tableDefinitions.put(SysChecksTableInfo.IDENT, new StaticTableDefinition<>(
sysChecker::computeResultAndGet,
SysChecksTableInfo.create().expressions(),
true));
tableDefinitions.put(SysNodeChecksTableInfo.IDENT, new StaticTableDefinition<>(
() -> completedFuture(sysNodeChecks),
SysNodeChecksTableInfo.create().expressions(),
true));
tableDefinitions.put(SysRepositoriesTableInfo.IDENT, new StaticTableDefinition<>(
() -> completedFuture(repositoriesService.getRepositoriesList()),
SysRepositoriesTableInfo.create(clusterService.getClusterSettings().maskedSettings()).expressions(),
false));
tableDefinitions.put(SysSnapshotsTableInfo.IDENT, new StaticTableDefinition<>(
sysSnapshots::currentSnapshots,
SysSnapshotsTableInfo.create().expressions(),
true));
tableDefinitions.put(SysSnapshotRestoreTableInfo.IDENT, new StaticTableDefinition<>(
() -> completedFuture(SysSnapshotRestoreTableInfo.snapshotsRestoreInProgress(
clusterService.state().custom(RestoreInProgress.TYPE))
),
SysSnapshotRestoreTableInfo.create().expressions(),
true));
tableDefinitions.put(SysAllocationsTableInfo.IDENT, new StaticTableDefinition<>(
() -> sysAllocations,
(user, allocation) -> roles.hasAnyPrivilege(user, Securable.TABLE, allocation.fqn()),
SysAllocationsTableInfo.create().expressions()
));
SummitsIterable summits = new SummitsIterable();
tableDefinitions.put(SysSummitsTableInfo.IDENT, new StaticTableDefinition<>(
() -> completedFuture(summits),
SysSummitsTableInfo.create().expressions(),
false));
SystemTable<TableHealth> sysHealth = SysHealth.create();
tableDefinitions.put(SysHealth.IDENT, new StaticTableDefinition<>(
() -> TableHealth.compute(clusterService.state()),
sysHealth.expressions(),
(user, tableHealth) -> roles.hasAnyPrivilege(user, Securable.TABLE, tableHealth.fqn()),
true)
);
tableDefinitions.put(SysMetricsTableInfo.NAME, new StaticTableDefinition<>(
() -> completedFuture(jobsLogs.metrics()),
SysMetricsTableInfo.create(localNode).expressions(),
false));
tableDefinitions.put(SysSegmentsTableInfo.IDENT, new StaticTableDefinition<>(
() -> completedFuture(shardSegmentInfos),
SysSegmentsTableInfo.create(clusterService::localNode).expressions(),
true));
}
public StaticTableDefinition<?> get(RelationName relationName) {
return tableDefinitions.get(relationName);
}
public <R> void registerTableDefinition(RelationName relationName, StaticTableDefinition<R> definition) {<FILL_FUNCTION_BODY>}
}
|
StaticTableDefinition<?> existingDefinition = tableDefinitions.putIfAbsent(relationName, definition);
assert existingDefinition == null : "A static table definition is already registered for ident=" + relationName.toString();
| 1,554
| 56
| 1,610
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/metadata/tablefunctions/TableFunctionImplementation.java
|
TableFunctionImplementation
|
normalizeSymbol
|
class TableFunctionImplementation<T> extends Scalar<Iterable<Row>, T> {
protected TableFunctionImplementation(Signature signature, BoundSignature boundSignature) {
super(signature, boundSignature);
}
/**
* An ObjectType which describes the result of the table function.
*
* This can be the same as {@link #boundSignature()#returnType()},
* but if there is only a single inner type, then {@link #boundSignature()#returnType()} will return that inner-type directly.
*
* See the class documentation for more information about that behavior.
*/
public abstract RowType returnType();
/**
* @return true if the records returned by this table function are generated on-demand.
* See also {@link BatchIterator#hasLazyResultSet()}
*/
public abstract boolean hasLazyResultSet();
@Override
public Symbol normalizeSymbol(Function function, TransactionContext txnCtx, NodeContext nodeCtx) {<FILL_FUNCTION_BODY>}
}
|
// Never normalize table functions;
// The RelationAnalyzer expects a function symbol and can't deal with Literals
return function;
| 260
| 37
| 297
|
<methods>public BoundSignature boundSignature() ,public Scalar<Iterable<io.crate.data.Row>,T> compile(List<io.crate.expression.symbol.Symbol>, java.lang.String, io.crate.role.Roles) ,public transient abstract Iterable<io.crate.data.Row> evaluate(io.crate.metadata.TransactionContext, io.crate.metadata.NodeContext, Input<T>[]) ,public io.crate.expression.symbol.Symbol normalizeSymbol(io.crate.expression.symbol.Function, io.crate.metadata.TransactionContext, io.crate.metadata.NodeContext) ,public io.crate.metadata.functions.Signature signature() <variables>public static final Set<io.crate.metadata.Scalar.Feature> DETERMINISTIC_AND_COMPARISON_REPLACEMENT,public static final Set<io.crate.metadata.Scalar.Feature> DETERMINISTIC_ONLY,public static final Set<io.crate.metadata.Scalar.Feature> NO_FEATURES,protected final non-sealed BoundSignature boundSignature,protected final non-sealed io.crate.metadata.functions.Signature signature
|
crate_crate
|
crate/server/src/main/java/io/crate/monitor/FsInfoHelpers.java
|
Stats
|
bytesRead
|
class Stats {
public static Long readOperations(FsInfo.IoStats ioStats) {
if (ioStats != null) {
return ioStats.getTotalReadOperations();
}
return -1L;
}
public static Long bytesRead(FsInfo.IoStats ioStats) {<FILL_FUNCTION_BODY>}
public static Long writeOperations(FsInfo.IoStats ioStats) {
if (ioStats != null) {
return ioStats.getTotalWriteOperations();
}
return -1L;
}
public static Long bytesWritten(FsInfo.IoStats ioStats) {
if (ioStats != null) {
return ioStats.getTotalWriteKilobytes() * 1024L;
}
return -1L;
}
}
|
if (ioStats != null) {
return ioStats.getTotalReadKilobytes() * 1024L;
}
return -1L;
| 222
| 47
| 269
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/operation/collect/files/CSVLineParser.java
|
CSVLineParser
|
parse
|
class CSVLineParser {
private final ByteArrayOutputStream out = new ByteArrayOutputStream();
private final ArrayList<String> headerKeyList = new ArrayList<>();
private String[] columnNamesArray;
private final List<String> targetColumns;
private final ObjectReader csvReader;
public CSVLineParser(CopyFromParserProperties properties, List<String> columns) {
targetColumns = columns;
if (!properties.fileHeader()) {
columnNamesArray = new String[targetColumns.size()];
for (int i = 0; i < targetColumns.size(); i++) {
columnNamesArray[i] = targetColumns.get(i);
}
}
var mapper = new CsvMapper()
.enable(CsvParser.Feature.TRIM_SPACES);
if (properties.emptyStringAsNull()) {
mapper.enable(CsvParser.Feature.EMPTY_STRING_AS_NULL);
}
var csvSchema = mapper
.typedSchemaFor(String.class)
.withColumnSeparator(properties.columnSeparator());
csvReader = mapper
.readerWithTypedSchemaFor(Object.class)
.with(csvSchema);
}
public void parseHeader(String header) throws IOException {
MappingIterator<String> iterator = csvReader.readValues(header.getBytes(StandardCharsets.UTF_8));
iterator.readAll(headerKeyList);
columnNamesArray = new String[headerKeyList.size()];
for (int i = 0; i < headerKeyList.size(); i++) {
String headerKey = headerKeyList.get(i);
if (targetColumns.isEmpty() || targetColumns.contains(headerKey)) {
columnNamesArray[i] = headerKey;
}
}
HashSet<String> keySet = new HashSet<>(headerKeyList);
keySet.remove("");
if (keySet.size() != headerKeyList.size() || keySet.size() == 0) {
throw new IllegalArgumentException("Invalid header: duplicate entries or no entries present");
}
}
public byte[] parse(String row, long rowNumber) throws IOException {<FILL_FUNCTION_BODY>}
public byte[] parseWithoutHeader(String row, long rowNumber) throws IOException {
MappingIterator<String> iterator = csvReader.readValues(row.getBytes(StandardCharsets.UTF_8));
out.reset();
XContentBuilder jsonBuilder = new XContentBuilder(JsonXContent.JSON_XCONTENT, out).startObject();
int i = 0;
while (iterator.hasNext()) {
if (i >= columnNamesArray.length) {
break;
}
var key = columnNamesArray[i];
var value = iterator.next();
i++;
if (key != null) {
jsonBuilder.field(key, value);
}
}
if (columnNamesArray.length > i) {
throw new IllegalArgumentException(String.format(Locale.ENGLISH, "Expected %d values, " +
"encountered %d at line %d. This is not allowed when there " +
"is no header provided)",columnNamesArray.length, i, rowNumber));
}
jsonBuilder.endObject().close();
return out.toByteArray();
}
}
|
MappingIterator<Object> iterator = csvReader.readValues(row.getBytes(StandardCharsets.UTF_8));
out.reset();
XContentBuilder jsonBuilder = new XContentBuilder(JsonXContent.JSON_XCONTENT, out).startObject();
int i = 0, j = 0;
while (iterator.hasNext()) {
if (i >= headerKeyList.size()) {
throw new IllegalArgumentException(String.format(Locale.ENGLISH, "Number of values exceeds " +
"number of keys in csv file at line %d", rowNumber));
}
if (columnNamesArray.length == j || i >= columnNamesArray.length) {
break;
}
var key = columnNamesArray[i];
var value = iterator.next();
i++;
if (key != null) {
jsonBuilder.field(key, value);
j++;
}
}
jsonBuilder.endObject().close();
return out.toByteArray();
| 832
| 254
| 1,086
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/AnalyzePlan.java
|
AnalyzePlan
|
executeOrFail
|
class AnalyzePlan implements Plan {
AnalyzePlan() {
}
@Override
public StatementType type() {
return StatementType.MANAGEMENT;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) throws Exception {<FILL_FUNCTION_BODY>}
}
|
OneRowActionListener<AcknowledgedResponse> listener = new OneRowActionListener<>(
consumer,
req -> req.isAcknowledged() ? new Row1(1L) : new Row1(0L));
dependencies.analyzeAction().fetchSamplesThenGenerateAndPublishStats().whenComplete(listener);
| 112
| 84
| 196
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/CreateServerPlan.java
|
CreateServerPlan
|
executeOrFail
|
class CreateServerPlan implements Plan {
private final ForeignDataWrappers foreignDataWrappers;
private final AnalyzedCreateServer createServer;
public CreateServerPlan(ForeignDataWrappers foreignDataWrappers,
AnalyzedCreateServer createServer) {
this.foreignDataWrappers = foreignDataWrappers;
this.createServer = createServer;
}
@Override
public StatementType type() {
return StatementType.DDL;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) throws Exception {<FILL_FUNCTION_BODY>}
}
|
CoordinatorTxnCtx transactionContext = plannerContext.transactionContext();
Function<Symbol, Object> convert = new SymbolEvaluator(
transactionContext,
plannerContext.nodeContext(),
subQueryResults
).bind(params);
Settings.Builder optionsBuilder = Settings.builder();
Map<String, Symbol> options = new HashMap<>(createServer.options());
var foreignDataWrapper = foreignDataWrappers.get(createServer.fdw());
for (var option : foreignDataWrapper.mandatoryServerOptions()) {
String optionName = option.getKey();
Symbol symbol = options.remove(optionName);
if (symbol == null) {
throw new IllegalArgumentException(String.format(
Locale.ENGLISH,
"Mandatory server option `%s` for foreign data wrapper `%s` is missing",
optionName,
createServer.fdw()
));
}
optionsBuilder.put(optionName, convert.apply(symbol));
}
if (!options.isEmpty()) {
throw new IllegalArgumentException(String.format(
Locale.ENGLISH,
"Unsupported server options for foreign data wrapper `%s`: %s. Valid options are: %s",
createServer.fdw(),
String.join(", ", options.keySet()),
foreignDataWrapper.mandatoryServerOptions().stream()
.map(x -> x.getKey())
.collect(Collectors.joining(", "))
));
}
CreateServerRequest request = new CreateServerRequest(
createServer.name(),
createServer.fdw(),
transactionContext.sessionSettings().sessionUser().name(),
createServer.ifNotExists(),
optionsBuilder.build()
);
dependencies.client()
.execute(TransportCreateServerAction.ACTION, request)
.whenComplete(OneRowActionListener.oneIfAcknowledged(consumer));
| 186
| 474
| 660
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/DecommissionNodePlan.java
|
DecommissionNodePlan
|
boundNodeIdOrName
|
class DecommissionNodePlan implements Plan {
private final AnalyzedDecommissionNode analyzedDecommissionNode;
DecommissionNodePlan(AnalyzedDecommissionNode analyzedDecommissionNode) {
this.analyzedDecommissionNode = analyzedDecommissionNode;
}
@Override
public StatementType type() {
return StatementType.MANAGEMENT;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) {
var boundedNodeIdOrName = boundNodeIdOrName(
analyzedDecommissionNode,
plannerContext.transactionContext(),
plannerContext.nodeContext(),
params,
subQueryResults);
String targetNodeId = NodeSelection.resolveNodeId(
dependencies.clusterService().state().nodes(),
boundedNodeIdOrName
);
dependencies.client()
.execute(DecommissionNodeAction.INSTANCE, DecommissionRequest.of(targetNodeId))
.whenComplete(new OneRowActionListener<>(
consumer,
r -> r.isAcknowledged() ? new Row1(1L) : new Row1(0L)));
}
@VisibleForTesting
public static String boundNodeIdOrName(AnalyzedDecommissionNode decommissionNode,
CoordinatorTxnCtx txnCtx,
NodeContext nodeCtx,
Row parameters,
SubQueryResults subQueryResults) {<FILL_FUNCTION_BODY>}
}
|
var boundedNodeIdOrName = SymbolEvaluator.evaluate(
txnCtx,
nodeCtx,
decommissionNode.nodeIdOrName(),
parameters,
subQueryResults
);
return DataTypes.STRING.sanitizeValue(boundedNodeIdOrName);
| 399
| 77
| 476
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/GCDangingArtifactsPlan.java
|
GCDangingArtifactsPlan
|
executeOrFail
|
class GCDangingArtifactsPlan implements Plan {
@Override
public StatementType type() {
return StatementType.MANAGEMENT;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) {<FILL_FUNCTION_BODY>}
}
|
var listener = OneRowActionListener.oneIfAcknowledged(consumer);
DeleteIndexRequest deleteRequest = new DeleteIndexRequest(IndexParts.DANGLING_INDICES_PREFIX_PATTERNS.toArray(new String[0]));
dependencies.client().execute(DeleteIndexAction.INSTANCE, deleteRequest).whenComplete(listener);
| 103
| 90
| 193
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/MultiPhasePlan.java
|
MultiPhasePlan
|
executeOrFail
|
class MultiPhasePlan implements Plan {
@VisibleForTesting
protected final Plan rootPlan;
@VisibleForTesting
protected final Map<LogicalPlan, SelectSymbol> dependencies;
public static Plan createIfNeeded(Plan rootPlan, Map<LogicalPlan, SelectSymbol> dependencies) {
if (dependencies.isEmpty()) {
return rootPlan;
}
return new MultiPhasePlan(rootPlan, dependencies);
}
private MultiPhasePlan(Plan rootPlan, Map<LogicalPlan, SelectSymbol> dependencies) {
this.rootPlan = rootPlan;
this.dependencies = dependencies;
}
@Override
public StatementType type() {
return rootPlan.type();
}
@Override
public void executeOrFail(DependencyCarrier dependencyCarrier,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) {<FILL_FUNCTION_BODY>}
}
|
MultiPhaseExecutor.execute(dependencies, dependencyCarrier, plannerContext, params)
.whenComplete((subQueryValues, failure) -> {
if (failure == null) {
rootPlan.execute(dependencyCarrier, plannerContext, consumer, params, subQueryValues);
} else {
consumer.accept(null, failure);
}
});
| 247
| 94
| 341
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/UnionExecutionPlan.java
|
UnionExecutionPlan
|
equals
|
class UnionExecutionPlan implements ExecutionPlan, ResultDescription {
private final ExecutionPlan left;
private final ExecutionPlan right;
private final MergePhase mergePhase;
private int unfinishedLimit;
private int unfinishedOffset;
private int numOutputs;
private final int maxRowsPerNode;
@Nullable
private PositionalOrderBy orderBy;
/**
* Create a Union Plan
*
* @param unfinishedLimit the limit a parent must apply after a merge to get the correct result
* @param unfinishedOffset the offset a parent must apply after a merge to get the correct result
* <p>
* If the data should be limited as part of the Merge, add a {@link LimitAndOffsetProjection},
* if possible. If the limit of the {@link LimitAndOffsetProjection} is final, unfinishedLimit here
* should be set to NO_LIMIT (-1)
* </p>
* <p>
* See also: {@link ResultDescription}
* </p>
*/
public UnionExecutionPlan(ExecutionPlan left,
ExecutionPlan right,
MergePhase mergePhase,
int unfinishedLimit,
int unfinishedOffset,
int numOutputs,
int maxRowsPerNode,
@Nullable PositionalOrderBy orderBy) {
this.left = left;
this.right = right;
if (mergePhase.numInputs() != 2) {
throw new IllegalArgumentException("Number of inputs of MergePhase needs to be two.");
}
this.mergePhase = mergePhase;
this.unfinishedLimit = unfinishedLimit;
this.unfinishedOffset = unfinishedOffset;
this.numOutputs = numOutputs;
this.maxRowsPerNode = maxRowsPerNode;
this.orderBy = orderBy;
}
public MergePhase mergePhase() {
return mergePhase;
}
public ExecutionPlan left() {
return left;
}
public ExecutionPlan right() {
return right;
}
@Override
public <C, R> R accept(ExecutionPlanVisitor<C, R> visitor, C context) {
return visitor.visitUnionPlan(this, context);
}
@Override
public void addProjection(Projection projection) {
mergePhase.addProjection(projection);
numOutputs = projection.outputs().size();
}
@Override
public void addProjection(Projection projection,
int unfinishedLimit,
int unfinishedOffset,
@Nullable PositionalOrderBy unfinishedOrderBy) {
addProjection(projection);
this.unfinishedLimit = unfinishedLimit;
this.unfinishedOffset = unfinishedOffset;
this.orderBy = unfinishedOrderBy;
}
@Override
public ResultDescription resultDescription() {
return this;
}
@Override
public void setDistributionInfo(DistributionInfo distributionInfo) {
mergePhase.distributionInfo(distributionInfo);
}
@Override
public Collection<String> nodeIds() {
return mergePhase.nodeIds();
}
@Nullable
@Override
public PositionalOrderBy orderBy() {
return orderBy;
}
@Override
public int limit() {
return unfinishedLimit;
}
@Override
public int maxRowsPerNode() {
return maxRowsPerNode;
}
@Override
public int offset() {
return unfinishedOffset;
}
@Override
public int numOutputs() {
return numOutputs;
}
@Override
public List<DataType<?>> streamOutputs() {
return mergePhase.outputTypes();
}
@Override
public boolean equals(Object o) {<FILL_FUNCTION_BODY>}
@Override
public int hashCode() {
return Objects.hash(left, right, mergePhase, unfinishedLimit, unfinishedOffset, numOutputs,
maxRowsPerNode, orderBy);
}
}
|
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
UnionExecutionPlan that = (UnionExecutionPlan) o;
return unfinishedLimit == that.unfinishedLimit &&
unfinishedOffset == that.unfinishedOffset &&
numOutputs == that.numOutputs &&
maxRowsPerNode == that.maxRowsPerNode &&
Objects.equals(left, that.left) &&
Objects.equals(right, that.right) &&
Objects.equals(mergePhase, that.mergePhase) &&
Objects.equals(orderBy, that.orderBy);
| 1,032
| 172
| 1,204
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/WhereClauseOptimizer.java
|
DetailedQuery
|
toBoundWhereClause
|
class DetailedQuery {
private final Symbol query;
private final DocKeys docKeys;
private final List<List<Symbol>> partitions;
private final Set<Symbol> clusteredByValues;
private final boolean queryHasPkSymbolsOnly;
DetailedQuery(Symbol query,
DocKeys docKeys,
List<List<Symbol>> partitionValues,
Set<Symbol> clusteredByValues,
boolean queryHasPkSymbolsOnly) {
this.query = query;
this.docKeys = docKeys;
this.partitions = Objects.requireNonNullElse(partitionValues, Collections.emptyList());
this.clusteredByValues = clusteredByValues;
this.queryHasPkSymbolsOnly = queryHasPkSymbolsOnly;
}
public Optional<DocKeys> docKeys() {
return Optional.ofNullable(docKeys);
}
/**
* @return Symbols "pointing" to the values of any `partition_col = S` expressions:
* The outer list contains 1 entry per "equals pair" (e.g. `pcol = ? or pcol = ?` -> 2 entries
*
* The inner list contains 1 entry per partitioned by column.
* The order matches the order of the partitioned by column definition.
*/
public List<List<Symbol>> partitions() {
return partitions;
}
public Symbol query() {
return query;
}
public Set<Symbol> clusteredBy() {
return clusteredByValues;
}
public WhereClause toBoundWhereClause(DocTableInfo table,
Row params,
SubQueryResults subQueryResults,
CoordinatorTxnCtx txnCtx,
NodeContext nodeCtx) {<FILL_FUNCTION_BODY>}
public boolean queryHasPkSymbolsOnly() {
return queryHasPkSymbolsOnly;
}
}
|
SubQueryAndParamBinder binder = new SubQueryAndParamBinder(params, subQueryResults);
Symbol boundQuery = binder.apply(query);
HashSet<Symbol> clusteredBy = HashSet.newHashSet(clusteredByValues.size());
for (Symbol clusteredByValue : clusteredByValues) {
clusteredBy.add(binder.apply(clusteredByValue));
}
if (table.isPartitioned()) {
if (table.partitions().isEmpty()) {
return WhereClause.NO_MATCH;
}
WhereClauseAnalyzer.PartitionResult partitionResult =
WhereClauseAnalyzer.resolvePartitions(boundQuery, table, txnCtx, nodeCtx);
return new WhereClause(
partitionResult.query,
partitionResult.partitions,
clusteredBy
);
} else {
return new WhereClause(
boundQuery,
Collections.emptyList(),
clusteredBy
);
}
| 487
| 250
| 737
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/consumer/CreateTableAsPlan.java
|
CreateTableAsPlan
|
of
|
class CreateTableAsPlan implements Plan {
private final AnalyzedCreateTable analyzedCreateTable;
private final Supplier<LogicalPlan> postponedInsertPlan;
private final TableCreator tableCreator;
private final NumberOfShards numberOfShards;
public static CreateTableAsPlan of(AnalyzedCreateTableAs analyzedCreateTableAs,
NumberOfShards numberOfShards,
TableCreator tableCreator,
PlannerContext context,
LogicalPlanner logicalPlanner) {<FILL_FUNCTION_BODY>}
public CreateTableAsPlan(AnalyzedCreateTable analyzedCreateTable,
Supplier<LogicalPlan> postponedInsertPlan,
TableCreator tableCreator,
NumberOfShards numberOfShards) {
this.analyzedCreateTable = analyzedCreateTable;
this.postponedInsertPlan = postponedInsertPlan;
this.tableCreator = tableCreator;
this.numberOfShards = numberOfShards;
}
@Override
public StatementType type() {
return StatementType.DDL;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) {
BoundCreateTable boundCreateTable = analyzedCreateTable.bind(
numberOfShards,
dependencies.fulltextAnalyzerResolver(),
plannerContext.nodeContext(),
plannerContext.transactionContext(),
params,
subQueryResults
);
tableCreator.create(boundCreateTable, plannerContext.clusterState().nodes().getMinNodeVersion())
.whenComplete((rowCount, err) -> {
if (err == null) {
postponedInsertPlan.get().execute(
dependencies,
plannerContext,
consumer,
params,
subQueryResults
);
} else {
consumer.accept(null, err);
}
});
}
}
|
Supplier<LogicalPlan> postponedInsertPlan =
() -> logicalPlanner.plan(analyzedCreateTableAs.analyzePostponedInsertStatement(), context);
return new CreateTableAsPlan(
analyzedCreateTableAs.analyzedCreateTable(),
postponedInsertPlan,
tableCreator,
numberOfShards
);
| 496
| 89
| 585
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/consumer/OrderByPositionVisitor.java
|
Context
|
orderByPositionsOrNull
|
class Context {
final List<? extends Symbol> sourceSymbols;
IntArrayList orderByPositions;
public Context(List<? extends Symbol> sourceSymbols) {
this.sourceSymbols = sourceSymbols;
this.orderByPositions = new IntArrayList();
}
public int[] orderByPositions() {
return orderByPositions.toArray();
}
}
private OrderByPositionVisitor() {
}
@Nullable
public static int[] orderByPositionsOrNull(Collection<? extends Symbol> orderBySymbols,
List<? extends Symbol> outputSymbols) {<FILL_FUNCTION_BODY>
|
Context context = new Context(outputSymbols);
for (Symbol orderBySymbol : orderBySymbols) {
orderBySymbol.accept(INSTANCE, context);
}
if (context.orderByPositions.size() == orderBySymbols.size()) {
return context.orderByPositions();
}
return null;
| 168
| 86
| 254
|
<methods>public non-sealed void <init>() ,public java.lang.Void visitAggregation(io.crate.expression.symbol.Aggregation, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitAlias(io.crate.expression.symbol.AliasSymbol, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitDynamicReference(io.crate.expression.symbol.DynamicReference, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitFetchMarker(io.crate.expression.symbol.FetchMarker, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitFetchReference(io.crate.expression.symbol.FetchReference, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitFetchStub(io.crate.expression.symbol.FetchStub, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitField(io.crate.expression.symbol.ScopedSymbol, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitFunction(io.crate.expression.symbol.Function, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitInputColumn(io.crate.expression.symbol.InputColumn, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitLiteral(Literal<?>, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitMatchPredicate(io.crate.expression.symbol.MatchPredicate, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitOuterColumn(io.crate.expression.symbol.OuterColumn, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitParameterSymbol(io.crate.expression.symbol.ParameterSymbol, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitReference(io.crate.metadata.Reference, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitSelectSymbol(io.crate.expression.symbol.SelectSymbol, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitVoidReference(io.crate.expression.symbol.VoidReference, io.crate.planner.consumer.OrderByPositionVisitor.Context) ,public java.lang.Void visitWindowFunction(io.crate.expression.symbol.WindowFunction, io.crate.planner.consumer.OrderByPositionVisitor.Context) <variables>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/distribution/DistributionInfo.java
|
DistributionInfo
|
hashCode
|
class DistributionInfo implements Writeable {
public static final DistributionInfo DEFAULT_BROADCAST = new DistributionInfo(DistributionType.BROADCAST);
public static final DistributionInfo DEFAULT_SAME_NODE = new DistributionInfo(DistributionType.SAME_NODE);
public static final DistributionInfo DEFAULT_MODULO = new DistributionInfo(DistributionType.MODULO);
private DistributionType distributionType;
private int distributeByColumn;
public DistributionInfo(DistributionType distributionType, int distributeByColumn) {
this.distributionType = distributionType;
this.distributeByColumn = distributeByColumn;
}
public DistributionInfo(StreamInput in) throws IOException {
distributionType = DistributionType.values()[in.readVInt()];
distributeByColumn = in.readVInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(distributionType.ordinal());
out.writeVInt(distributeByColumn);
}
public DistributionInfo(DistributionType distributionType) {
this(distributionType, 0);
}
public DistributionType distributionType() {
return distributionType;
}
public int distributeByColumn() {
return distributeByColumn;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DistributionInfo that = (DistributionInfo) o;
return distributeByColumn == that.distributeByColumn && distributionType == that.distributionType;
}
@Override
public int hashCode() {<FILL_FUNCTION_BODY>}
@Override
public String toString() {
return "DistributionInfo{" +
"distributionType=" + distributionType +
", distributeByColumn=" + distributeByColumn +
'}';
}
}
|
int result = distributionType.hashCode();
result = 31 * result + distributeByColumn;
return result;
| 496
| 32
| 528
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/fetch/IndexBaseBuilder.java
|
IndexBaseBuilder
|
allocate
|
class IndexBaseBuilder {
private final TreeMap<String, Integer> baseByIndex = new TreeMap<>();
public void allocate(String index, IntIndexedContainer shards) {<FILL_FUNCTION_BODY>}
public TreeMap<String, Integer> build() {
int currentBase = 0;
for (Map.Entry<String, Integer> entry : baseByIndex.entrySet()) {
Integer maxId = entry.getValue();
entry.setValue(currentBase);
currentBase += maxId + 1;
}
return baseByIndex;
}
private static int getMax(IntIndexedContainer shards) {
int max = -1;
for (IntCursor shard: shards) {
if (shard.value > max) {
max = shard.value;
}
}
return max;
}
}
|
if (shards.isEmpty()) {
return;
}
Integer currentMax = baseByIndex.get(index);
int newMax = getMax(shards);
if (currentMax == null || currentMax < newMax) {
baseByIndex.put(index, newMax);
}
| 220
| 78
| 298
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/node/dcl/GenericDCLPlan.java
|
GenericDCLPlan
|
executeOrFail
|
class GenericDCLPlan implements Plan {
private final DCLStatement statement;
public GenericDCLPlan(DCLStatement statement) {
this.statement = statement;
}
@Override
public StatementType type() {
return StatementType.MANAGEMENT;
}
@Override
public void executeOrFail(DependencyCarrier executor,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) {<FILL_FUNCTION_BODY>}
}
|
executor.dclAction().apply(statement, params)
.whenComplete(new OneRowActionListener<>(consumer, rCount -> new Row1(rCount == null ? -1 : rCount)));
| 139
| 53
| 192
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/node/ddl/AlterBlobTablePlan.java
|
AlterBlobTablePlan
|
executeOrFail
|
class AlterBlobTablePlan implements Plan {
private final AnalyzedAlterBlobTable analyzedAlterTable;
public AlterBlobTablePlan(AnalyzedAlterBlobTable analyzedAlterTable) {
this.analyzedAlterTable = analyzedAlterTable;
}
@Override
public StatementType type() {
return StatementType.DDL;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row params, SubQueryResults subQueryResults) {<FILL_FUNCTION_BODY>}
}
|
Function<? super Symbol, Object> eval = x -> SymbolEvaluator.evaluate(
plannerContext.transactionContext(),
plannerContext.nodeContext(),
x,
params,
subQueryResults
);
TableInfo tableInfo = analyzedAlterTable.tableInfo();
AlterTable<Object> alterTable = analyzedAlterTable.alterTable().map(eval);
TableParameter tableParameter = getTableParameter(alterTable, TableParameters.ALTER_BLOB_TABLE_PARAMETERS);
maybeRaiseBlockedException(tableInfo, tableParameter.settings());
BoundAlterTable stmt = new BoundAlterTable(
tableInfo,
null,
tableParameter,
true,
false);
dependencies.alterTableOperation().executeAlterTable(stmt)
.whenComplete(new OneRowActionListener<>(consumer, rCount -> new Row1(rCount == null ? -1 : rCount)));
| 157
| 237
| 394
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/node/ddl/AlterTableDropColumnPlan.java
|
AlterTableDropColumnPlan
|
executeOrFail
|
class AlterTableDropColumnPlan implements Plan {
private final AnalyzedAlterTableDropColumn alterTable;
public AlterTableDropColumnPlan(AnalyzedAlterTableDropColumn alterTable) {
this.alterTable = alterTable;
}
@Override
public StatementType type() {
return StatementType.DDL;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) throws Exception {<FILL_FUNCTION_BODY>}
}
|
var dropColumnRequest = new DropColumnRequest(alterTable.table().ident(), alterTable.columns());
dependencies.alterTableOperation().executeAlterTableDropColumn(dropColumnRequest)
.whenComplete(new OneRowActionListener<>(consumer, rCount -> new Row1(rCount == null ? -1 : rCount)));
| 152
| 81
| 233
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/node/ddl/AlterTableRenameColumnPlan.java
|
AlterTableRenameColumnPlan
|
executeOrFail
|
class AlterTableRenameColumnPlan implements Plan {
private final AnalyzedAlterTableRenameColumn renameColumn;
public AlterTableRenameColumnPlan(AnalyzedAlterTableRenameColumn alterTable) {
this.renameColumn = alterTable;
}
@Override
public Plan.StatementType type() {
return StatementType.DDL;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) throws Exception {<FILL_FUNCTION_BODY>}
}
|
var renameColumnRequest = new RenameColumnRequest(renameColumn.table(), renameColumn.refToRename(), renameColumn.newName());
dependencies.client()
.execute(RenameColumnAction.INSTANCE, renameColumnRequest)
.whenComplete(new OneRowActionListener<>(consumer, r -> r.isAcknowledged() ? new Row1(-1L) : new Row1(0L)));
| 158
| 106
| 264
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/node/ddl/CreateRepositoryPlan.java
|
CreateRepositoryPlan
|
createRequest
|
class CreateRepositoryPlan implements Plan {
private final AnalyzedCreateRepository createRepository;
public CreateRepositoryPlan(AnalyzedCreateRepository createRepository) {
this.createRepository = createRepository;
}
@Override
public StatementType type() {
return StatementType.DDL;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row parameters,
SubQueryResults subQueryResults) {
PutRepositoryRequest request = createRequest(
createRepository,
plannerContext.transactionContext(),
dependencies.nodeContext(),
parameters,
subQueryResults,
dependencies.repositoryParamValidator());
dependencies.repositoryService()
.execute(request)
.whenComplete(new OneRowActionListener<>(consumer, rCount -> new Row1(rCount == null ? -1 : rCount)));
}
@VisibleForTesting
public static PutRepositoryRequest createRequest(AnalyzedCreateRepository createRepository,
CoordinatorTxnCtx txnCtx,
NodeContext nodeCtx,
Row parameters,
SubQueryResults subQueryResults,
RepositoryParamValidator repositoryParamValidator) {<FILL_FUNCTION_BODY>}
}
|
Function<? super Symbol, Object> eval = x -> SymbolEvaluator.evaluate(
txnCtx,
nodeCtx,
x,
parameters,
subQueryResults
);
var genericProperties = createRepository.properties().map(eval);
Map<String, Setting<?>> supportedSettings = repositoryParamValidator.settingsForType(createRepository.type()).all();
genericProperties.ensureContainsOnly(supportedSettings.keySet());
var settings = Settings.builder().put(genericProperties).build();
repositoryParamValidator.validate(
createRepository.type(), createRepository.properties(), settings);
PutRepositoryRequest request = new PutRepositoryRequest(createRepository.name());
request.type(createRepository.type());
request.settings(settings);
return request;
| 317
| 197
| 514
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/node/ddl/CreateRolePlan.java
|
CreateRolePlan
|
executeOrFail
|
class CreateRolePlan implements Plan {
public static final String PASSWORD_PROPERTY_KEY = "password";
public static final String JWT_PROPERTY_KEY = "jwt";
private final AnalyzedCreateRole createRole;
private final RoleManager roleManager;
public CreateRolePlan(AnalyzedCreateRole createRole, RoleManager roleManager) {
this.createRole = createRole;
this.roleManager = roleManager;
}
@Override
public StatementType type() {
return StatementType.DDL;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) throws Exception {<FILL_FUNCTION_BODY>}
public static Map<String, Object> parse(GenericProperties<Symbol> genericProperties,
TransactionContext txnCtx,
NodeContext nodeContext,
Row params,
SubQueryResults subQueryResults) {
Function<? super Symbol, Object> eval = x -> SymbolEvaluator.evaluate(
txnCtx,
nodeContext,
x,
params,
subQueryResults
);
Map<String, Object> parsedProperties = genericProperties.map(eval).properties();
for (var property : parsedProperties.keySet()) {
if (PASSWORD_PROPERTY_KEY.equals(property) == false && JWT_PROPERTY_KEY.equals(property) == false) {
throw new IllegalArgumentException(
String.format(Locale.ENGLISH, "\"%s\" is not a valid user property", property));
}
}
return parsedProperties;
}
}
|
Map<String, Object> properties = parse(
createRole.properties(),
plannerContext.transactionContext(),
plannerContext.nodeContext(),
params,
subQueryResults
);
SecureHash newPassword = UserActions.generateSecureHash(properties);
if (createRole.isUser() == false && newPassword != null) {
throw new UnsupportedOperationException("Creating a ROLE with a password is not allowed, " +
"use CREATE USER instead");
}
JwtProperties jwtProperties = JwtProperties.fromMap(Maps.get(properties, JWT_PROPERTY_KEY));
roleManager.createRole(createRole.roleName(), createRole.isUser(), newPassword, jwtProperties)
.whenComplete(new OneRowActionListener<>(consumer, rCount -> new Row1(rCount == null ? -1 : rCount)));
| 437
| 226
| 663
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/node/ddl/DropFunctionPlan.java
|
DropFunctionPlan
|
executeOrFail
|
class DropFunctionPlan implements Plan {
private final AnalyzedDropFunction analyzedDropFunction;
public DropFunctionPlan(AnalyzedDropFunction analyzedDropFunction) {
this.analyzedDropFunction = analyzedDropFunction;
}
@Override
public StatementType type() {
return StatementType.DDL;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row params, SubQueryResults subQueryResults) throws Exception {<FILL_FUNCTION_BODY>}
}
|
DropUserDefinedFunctionRequest request = new DropUserDefinedFunctionRequest(
analyzedDropFunction.schema(),
analyzedDropFunction.name(),
analyzedDropFunction.argumentTypes(),
analyzedDropFunction.ifExists()
);
OneRowActionListener<AcknowledgedResponse> listener = new OneRowActionListener<>(consumer, r -> new Row1(1L));
dependencies.dropFunctionAction().execute(request).whenComplete(listener);
| 143
| 111
| 254
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/node/ddl/DropRepositoryPlan.java
|
DropRepositoryPlan
|
executeOrFail
|
class DropRepositoryPlan implements Plan {
private final AnalyzedDropRepository dropRepository;
public DropRepositoryPlan(AnalyzedDropRepository dropRepository) {
this.dropRepository = dropRepository;
}
@Override
public StatementType type() {
return StatementType.DDL;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row parameters,
SubQueryResults subQueryResults) {<FILL_FUNCTION_BODY>}
}
|
dependencies.repositoryService().execute(new DeleteRepositoryRequest(dropRepository.name()))
.whenComplete(new OneRowActionListener<>(consumer, rCount -> new Row1(rCount == null ? -1 : rCount)));
| 138
| 57
| 195
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/node/ddl/ResetSettingsPlan.java
|
ResetSettingsPlan
|
buildSettingsFrom
|
class ResetSettingsPlan implements Plan {
private final AnalyzedResetStatement resetAnalyzedStatement;
public ResetSettingsPlan(AnalyzedResetStatement resetAnalyzedStatement) {
this.resetAnalyzedStatement = resetAnalyzedStatement;
}
@Override
public StatementType type() {
return StatementType.MANAGEMENT;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) {
Function<? super Symbol, Object> eval = x -> SymbolEvaluator.evaluate(plannerContext.transactionContext(),
plannerContext.nodeContext(),
x,
params,
subQueryResults);
Settings settings = buildSettingsFrom(resetAnalyzedStatement.settingsToRemove(), eval);
ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest()
.persistentSettings(settings)
.transientSettings(settings);
OneRowActionListener<ClusterUpdateSettingsResponse> actionListener = new OneRowActionListener<>(
consumer,
r -> r.isAcknowledged() ? new Row1(1L) : new Row1(0L));
dependencies.client().execute(ClusterUpdateSettingsAction.INSTANCE, request).whenComplete(actionListener);
}
@VisibleForTesting
static Settings buildSettingsFrom(Set<Symbol> settings, Function<? super Symbol, Object> eval) {<FILL_FUNCTION_BODY>}
}
|
Settings.Builder settingsBuilder = Settings.builder();
for (Symbol symbol : settings) {
String settingsName = eval.apply(symbol).toString();
List<String> settingNames = CrateSettings.settingNamesByPrefix(settingsName);
if (settingNames.size() == 0) {
throw new IllegalArgumentException(String.format(Locale.ENGLISH,
"Setting '%s' is not supported",
settingsName));
}
for (String name : settingNames) {
CrateSettings.checkIfRuntimeSetting(name);
if (CrateSettings.isValidSetting(name) == false) {
throw new IllegalArgumentException("Setting '" + settingNames + "' is not supported");
}
settingsBuilder.put(name, (String) null);
}
}
return settingsBuilder.build();
| 382
| 202
| 584
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/node/ddl/UpdateSettingsPlan.java
|
UpdateSettingsPlan
|
buildSettingsFrom
|
class UpdateSettingsPlan implements Plan {
private final Collection<Assignment<Symbol>> settings;
private final boolean isPersistent;
public UpdateSettingsPlan(Collection<Assignment<Symbol>> settings, boolean isPersistent) {
this.settings = settings;
this.isPersistent = isPersistent;
}
@VisibleForTesting
public Collection<Assignment<Symbol>> settings() {
return settings;
}
@VisibleForTesting
public boolean isPersistent() {
return isPersistent;
}
@Override
public StatementType type() {
return StatementType.MANAGEMENT;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) {
Function<? super Symbol, Object> eval = x -> SymbolEvaluator.evaluate(plannerContext.transactionContext(),
plannerContext.nodeContext(),
x,
params,
subQueryResults);
ClusterUpdateSettingsRequest request = isPersistent
? new ClusterUpdateSettingsRequest().persistentSettings(buildSettingsFrom(settings, eval))
: new ClusterUpdateSettingsRequest().transientSettings(buildSettingsFrom(settings, eval));
OneRowActionListener<ClusterUpdateSettingsResponse> actionListener = new OneRowActionListener<>(
consumer,
r -> r.isAcknowledged() ? new Row1(1L) : new Row1(0L));
dependencies.client().execute(ClusterUpdateSettingsAction.INSTANCE, request)
.whenComplete(actionListener);
}
@VisibleForTesting
static Settings buildSettingsFrom(Collection<Assignment<Symbol>> assignments,
Function<? super Symbol, Object> eval) {<FILL_FUNCTION_BODY>}
}
|
Settings.Builder settingsBuilder = Settings.builder();
for (Assignment<Symbol> entry : assignments) {
String settingsName = eval.apply(entry.columnName()).toString();
if (CrateSettings.isValidSetting(settingsName) == false) {
throw new IllegalArgumentException("Setting '" + settingsName + "' is not supported");
}
Symbol expression = Lists.getOnlyElement(entry.expressions());
Object value = eval.apply(expression);
CrateSettings.flattenSettings(settingsBuilder, settingsName, value);
}
Settings settings = settingsBuilder.build();
for (String checkForRuntime : settings.keySet()) {
CrateSettings.checkIfRuntimeSetting(checkForRuntime);
}
return settings;
| 466
| 187
| 653
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/node/dml/DeleteById.java
|
DeleteRequests
|
addItem
|
class DeleteRequests implements ShardRequestExecutor.RequestGrouper<ShardDeleteRequest> {
private final UUID jobId;
private final TimeValue requestTimeout;
DeleteRequests(UUID jobId, TimeValue requestTimeout) {
this.jobId = jobId;
this.requestTimeout = requestTimeout;
}
@Override
public ShardDeleteRequest newRequest(ShardId shardId) {
ShardDeleteRequest request = new ShardDeleteRequest(shardId, jobId);
request.timeout(requestTimeout);
return request;
}
@Override
public void bind(Row parameters, SubQueryResults subQueryResults) {
}
@Override
public void addItem(ShardDeleteRequest request,
int location,
String id,
long version,
long seqNo,
long primaryTerm) {<FILL_FUNCTION_BODY>}
}
|
ShardDeleteRequest.Item item = new ShardDeleteRequest.Item(id);
item.version(version);
item.seqNo(seqNo);
item.primaryTerm(primaryTerm);
request.add(location, item);
| 228
| 61
| 289
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/node/dql/GroupByConsumer.java
|
GroupByConsumer
|
groupedByPrimaryKeys
|
class GroupByConsumer {
public static boolean groupedByClusteredColumnOrPrimaryKeys(DocTableInfo tableInfo,
WhereClause whereClause,
List<Symbol> groupBySymbols) {
if (groupBySymbols.size() > 1) {
return groupedByPrimaryKeys(tableInfo.primaryKey(), groupBySymbols);
}
/**
* if the table has more than one partition there are multiple shards which might even be on different nodes
* so one shard doesn't contain all "clustered by" values
* -> need to use a distributed group by.
*/
if (tableInfo.isPartitioned() && whereClause.partitions().size() != 1) {
return false;
}
// this also handles the case if there is only one primary key.
// as clustered by column == pk column in that case
Symbol groupByKey = groupBySymbols.get(0);
return groupByKey instanceof Reference ref && ref.column().equals(tableInfo.clusteredBy());
}
private static boolean groupedByPrimaryKeys(List<ColumnIdent> primaryKeys, List<Symbol> groupBy) {<FILL_FUNCTION_BODY>}
}
|
if (groupBy.size() != primaryKeys.size()) {
return false;
}
for (int i = 0, groupBySize = groupBy.size(); i < groupBySize; i++) {
Symbol groupBySymbol = groupBy.get(i);
if (groupBySymbol instanceof Reference ref) {
ColumnIdent columnIdent = ref.column();
ColumnIdent pkIdent = primaryKeys.get(i);
if (!pkIdent.equals(columnIdent)) {
return false;
}
} else {
return false;
}
}
return true;
| 304
| 150
| 454
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/node/management/AlterTableReroutePlan.java
|
InnerVisitor
|
visitReroutePromoteReplica
|
class InnerVisitor extends AnalyzedStatementVisitor<Context, AllocationCommand> {
@Override
protected AllocationCommand visitAnalyzedStatement(AnalyzedStatement analyzedStatement, Context context) {
throw new UnsupportedOperationException(
String.format(Locale.ENGLISH, "Can't handle \"%s\"", analyzedStatement));
}
@Override
public AllocationCommand visitReroutePromoteReplica(AnalyzedPromoteReplica statement,
Context context) {<FILL_FUNCTION_BODY>}
@Override
protected AllocationCommand visitRerouteMoveShard(AnalyzedRerouteMoveShard statement,
Context context) {
var boundedMoveShard = statement.rerouteMoveShard().map(context.eval);
validateShardId(boundedMoveShard.shardId());
String index = getRerouteIndex(
statement.shardedTable(),
Lists.map(statement.partitionProperties(), x -> x.map(context.eval)));
String toNodeId = resolveNodeId(
context.nodes,
DataTypes.STRING.sanitizeValue(boundedMoveShard.toNodeIdOrName()));
return new MoveAllocationCommand(
index,
DataTypes.INTEGER.sanitizeValue(boundedMoveShard.shardId()),
DataTypes.STRING.sanitizeValue(boundedMoveShard.fromNodeIdOrName()),
toNodeId
);
}
@Override
protected AllocationCommand visitRerouteAllocateReplicaShard(AnalyzedRerouteAllocateReplicaShard statement,
Context context) {
var boundedRerouteAllocateReplicaShard = statement
.rerouteAllocateReplicaShard()
.map(context.eval);
validateShardId(boundedRerouteAllocateReplicaShard.shardId());
String index = getRerouteIndex(
statement.shardedTable(),
Lists.map(statement.partitionProperties(), x -> x.map(context.eval)));
String toNodeId = resolveNodeId(
context.nodes,
DataTypes.STRING.sanitizeValue(boundedRerouteAllocateReplicaShard.nodeIdOrName()));
return new AllocateReplicaAllocationCommand(
index,
DataTypes.INTEGER.sanitizeValue(boundedRerouteAllocateReplicaShard.shardId()),
toNodeId
);
}
@Override
protected AllocationCommand visitRerouteCancelShard(AnalyzedRerouteCancelShard statement,
Context context) {
var boundedRerouteCancelShard = statement
.rerouteCancelShard()
.map(context.eval);
validateShardId(boundedRerouteCancelShard.shardId());
boolean allowPrimary = validateCancelRerouteProperty(
"allow_primary", boundedRerouteCancelShard.properties());
String index = getRerouteIndex(
statement.shardedTable(),
Lists.map(statement.partitionProperties(), x -> x.map(context.eval)));
String nodeId = resolveNodeId(
context.nodes,
DataTypes.STRING.sanitizeValue(boundedRerouteCancelShard.nodeIdOrName()));
return new CancelAllocationCommand(
index,
DataTypes.INTEGER.sanitizeValue(boundedRerouteCancelShard.shardId()),
nodeId,
allowPrimary
);
}
private static String getRerouteIndex(ShardedTable shardedTable,
List<Assignment<Object>> partitionsProperties) {
if (shardedTable instanceof DocTableInfo) {
DocTableInfo docTableInfo = (DocTableInfo) shardedTable;
String indexName = docTableInfo.ident().indexNameOrAlias();
PartitionName partitionName = PartitionPropertiesAnalyzer
.createPartitionName(partitionsProperties, docTableInfo);
if (partitionName != null) {
indexName = partitionName.asIndexName();
} else if (docTableInfo.isPartitioned()) {
throw new IllegalArgumentException(
"table is partitioned however no partition clause has been specified");
}
return indexName;
}
// Table is a blob table
assert shardedTable.concreteIndices().length == 1 : "table has to contain only 1 index name";
return shardedTable.concreteIndices()[0];
}
private static boolean validateCancelRerouteProperty(String propertyKey,
GenericProperties<Object> properties) {
for (String key : properties.keys()) {
if (propertyKey.equals(key)) {
return DataTypes.BOOLEAN.sanitizeValue(properties.get(propertyKey));
} else {
throw new IllegalArgumentException(
String.format(Locale.ENGLISH, "\"%s\" is not a valid setting for CANCEL SHARD", key));
}
}
return false;
}
private void validateShardId(Object shardId) {
if (shardId == null) {
throw new IllegalArgumentException("Shard Id cannot be [null]");
}
}
}
|
var boundedPromoteReplica = statement.promoteReplica().map(context.eval);
validateShardId(boundedPromoteReplica.shardId());
String index = getRerouteIndex(
statement.shardedTable(),
Lists.map(statement.partitionProperties(), x -> x.map(context.eval)));
String toNodeId = resolveNodeId(
context.nodes,
DataTypes.STRING.sanitizeValue(boundedPromoteReplica.node()));
return new AllocateStalePrimaryAllocationCommand(
index,
DataTypes.INTEGER.sanitizeValue(boundedPromoteReplica.shardId()),
toNodeId,
DataTypes.BOOLEAN.sanitizeValue(context.eval.apply(statement.acceptDataLoss()))
);
| 1,294
| 206
| 1,500
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/node/management/KillPlan.java
|
KillPlan
|
execute
|
class KillPlan implements Plan {
@Nullable
private final Symbol jobId;
public KillPlan(@Nullable Symbol jobId) {
this.jobId = jobId;
}
@Override
public StatementType type() {
return StatementType.MANAGEMENT;
}
@Override
public void executeOrFail(DependencyCarrier dependencies,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) {
execute(
boundJobId(
jobId,
plannerContext.transactionContext(),
dependencies.nodeContext(),
params,
subQueryResults),
plannerContext.transactionContext().sessionSettings().userName(),
req -> dependencies.client().execute(KillJobsNodeAction.INSTANCE, req),
req -> dependencies.client().execute(KillAllNodeAction.INSTANCE, req),
consumer
);
}
@VisibleForTesting
@Nullable
public static UUID boundJobId(@Nullable Symbol jobId,
CoordinatorTxnCtx txnCtx,
NodeContext nodeCtx,
Row parameters,
SubQueryResults subQueryResults) {
if (jobId != null) {
try {
return UUID.fromString(
DataTypes.STRING.sanitizeValue(
SymbolEvaluator.evaluate(
txnCtx,
nodeCtx,
jobId,
parameters,
subQueryResults
)));
} catch (Exception e) {
throw new IllegalArgumentException("Can not parse job ID: " + jobId, e);
}
}
return null;
}
@VisibleForTesting
void execute(@Nullable UUID jobId,
String userName,
ActionExecutor<KillJobsNodeRequest, KillResponse> killJobsNodeAction,
ActionExecutor<KillAllRequest, KillResponse> killAllNodeAction,
RowConsumer consumer) {<FILL_FUNCTION_BODY>}
}
|
if (jobId != null) {
killJobsNodeAction
.execute(
new KillJobsNodeRequest(
List.of(),
List.of(jobId),
userName,
"KILL invoked by user: " + userName))
.whenComplete(
new OneRowActionListener<>(
consumer,
killResponse -> new Row1(killResponse.numKilled()))
);
} else {
killAllNodeAction
.execute(
new KillAllRequest(userName))
.whenComplete(
new OneRowActionListener<>(
consumer,
killResponse -> new Row1(killResponse.numKilled()))
);
}
| 504
| 179
| 683
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/operators/Distinct.java
|
Distinct
|
create
|
class Distinct {
public static LogicalPlan create(LogicalPlan source, boolean distinct, List<Symbol> outputs) {<FILL_FUNCTION_BODY>}
}
|
if (!distinct) {
return source;
}
return new GroupHashAggregate(source, outputs, Collections.emptyList());
| 44
| 38
| 82
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/operators/EquiJoinDetector.java
|
Visitor
|
visitFunction
|
class Visitor extends SymbolVisitor<Context, Void> {
@Override
public Void visitFunction(Function function, Context context) {<FILL_FUNCTION_BODY>}
@Override
public Void visitField(ScopedSymbol field, Context context) {
context.relations.add(field.relation());
return null;
}
@Override
public Void visitReference(Reference ref, Context context) {
context.relations.add(ref.ident().tableIdent());
return null;
}
}
|
if (context.exit) {
return null;
}
String functionName = function.name();
switch (functionName) {
case NotPredicate.NAME -> {
return null;
}
case OrOperator.NAME -> {
context.isHashJoinPossible = false;
context.exit = true;
return null;
}
case EqOperator.NAME -> {
List<Symbol> arguments = function.arguments();
var left = arguments.get(0);
var leftContext = new Context();
left.accept(this, leftContext);
var right = arguments.get(1);
var rightContext = new Context();
right.accept(this, rightContext);
if (leftContext.relations.size() == 1 &&
rightContext.relations.size() == 1 &&
!leftContext.relations.equals(rightContext.relations)) {
context.isHashJoinPossible = true;
}
}
default -> {
for (Symbol arg : function.arguments()) {
arg.accept(this, context);
}
}
}
return null;
| 135
| 283
| 418
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/operators/Eval.java
|
Eval
|
create
|
class Eval extends ForwardingLogicalPlan {
private final List<Symbol> outputs;
public static LogicalPlan create(LogicalPlan source, List<Symbol> outputs) {<FILL_FUNCTION_BODY>}
Eval(LogicalPlan source, List<Symbol> outputs) {
super(source);
this.outputs = outputs;
}
@Override
public ExecutionPlan build(DependencyCarrier executor,
PlannerContext plannerContext,
Set<PlanHint> planHints,
ProjectionBuilder projectionBuilder,
int limit,
int offset,
@Nullable OrderBy order,
@Nullable Integer pageSizeHint,
Row params,
SubQueryResults subQueryResults) {
ExecutionPlan executionPlan = source.build(
executor, plannerContext, planHints, projectionBuilder, limit, offset, null, pageSizeHint, params, subQueryResults);
if (outputs.equals(source.outputs())) {
return executionPlan;
}
return addEvalProjection(plannerContext, executionPlan, params, subQueryResults);
}
@Override
public List<Symbol> outputs() {
return outputs;
}
@Override
public LogicalPlan replaceSources(List<LogicalPlan> sources) {
return new Eval(Lists.getOnlyElement(sources), outputs);
}
@Override
public LogicalPlan pruneOutputsExcept(SequencedCollection<Symbol> outputsToKeep) {
LogicalPlan newSource = source.pruneOutputsExcept(outputsToKeep);
if (source == newSource) {
return this;
}
return new Eval(newSource, List.copyOf(outputsToKeep));
}
@Nullable
@Override
public FetchRewrite rewriteToFetch(Collection<Symbol> usedColumns) {
FetchRewrite fetchRewrite = source.rewriteToFetch(usedColumns);
if (fetchRewrite == null) {
return null;
}
LogicalPlan newSource = fetchRewrite.newPlan();
UnaryOperator<Symbol> mapToFetchStubs = fetchRewrite.mapToFetchStubs();
LinkedHashMap<Symbol, Symbol> newReplacedOutputs = new LinkedHashMap<>();
ArrayList<Symbol> newOutputs = new ArrayList<>();
for (Symbol sourceOutput : newSource.outputs()) {
if (sourceOutput instanceof FetchMarker) {
newOutputs.add(sourceOutput);
}
}
for (Symbol output : outputs) {
newReplacedOutputs.put(output, mapToFetchStubs.apply(output));
if (SymbolVisitors.any(newSource.outputs()::contains, output)) {
newOutputs.add(output);
}
}
return new FetchRewrite(newReplacedOutputs, Eval.create(newSource, newOutputs));
}
private ExecutionPlan addEvalProjection(PlannerContext plannerContext,
ExecutionPlan executionPlan,
Row params,
SubQueryResults subQueryResults) {
PositionalOrderBy orderBy = executionPlan.resultDescription().orderBy();
PositionalOrderBy newOrderBy = null;
SubQueryAndParamBinder binder = new SubQueryAndParamBinder(params, subQueryResults);
List<Symbol> boundOutputs = Lists.map(outputs, binder);
if (orderBy != null) {
newOrderBy = orderBy.tryMapToNewOutputs(source.outputs(), boundOutputs);
if (newOrderBy == null) {
executionPlan = Merge.ensureOnHandler(executionPlan, plannerContext);
}
}
InputColumns.SourceSymbols ctx = new InputColumns.SourceSymbols(Lists.map(source.outputs(), binder));
EvalProjection projection = new EvalProjection(InputColumns.create(boundOutputs, ctx));
executionPlan.addProjection(
projection,
executionPlan.resultDescription().limit(),
executionPlan.resultDescription().offset(),
newOrderBy
);
return executionPlan;
}
@Override
public String toString() {
return "Eval{" +
"src=" + source +
", out=" + outputs +
'}';
}
@Override
public <C, R> R accept(LogicalPlanVisitor<C, R> visitor, C context) {
return visitor.visitEval(this, context);
}
}
|
if (source.outputs().equals(outputs)) {
return source;
}
return new Eval(source, outputs);
| 1,132
| 37
| 1,169
|
<methods>public void <init>(io.crate.planner.operators.LogicalPlan) ,public Map<io.crate.planner.operators.LogicalPlan,io.crate.expression.symbol.SelectSymbol> dependencies() ,public List<io.crate.expression.symbol.Symbol> outputs() ,public io.crate.planner.operators.LogicalPlan pruneOutputsExcept(SequencedCollection<io.crate.expression.symbol.Symbol>) ,public List<io.crate.metadata.RelationName> relationNames() ,public io.crate.planner.operators.LogicalPlan source() ,public List<io.crate.planner.operators.LogicalPlan> sources() ,public boolean supportsDistributedReads() <variables>final non-sealed io.crate.planner.operators.LogicalPlan source,private final non-sealed List<io.crate.planner.operators.LogicalPlan> sources
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/operators/ForwardingLogicalPlan.java
|
ForwardingLogicalPlan
|
pruneOutputsExcept
|
class ForwardingLogicalPlan implements LogicalPlan {
private final List<LogicalPlan> sources;
final LogicalPlan source;
public ForwardingLogicalPlan(LogicalPlan source) {
this.source = source;
this.sources = List.of(source);
}
public LogicalPlan source() {
return source;
}
@Override
public LogicalPlan pruneOutputsExcept(SequencedCollection<Symbol> outputsToKeep) {<FILL_FUNCTION_BODY>}
@Override
public List<Symbol> outputs() {
return source.outputs();
}
@Override
public List<RelationName> relationNames() {
return source.relationNames();
}
@Override
public List<LogicalPlan> sources() {
return sources;
}
@Override
public Map<LogicalPlan, SelectSymbol> dependencies() {
return source.dependencies();
}
@Override
public boolean supportsDistributedReads() {
return source.supportsDistributedReads();
}
}
|
LogicalPlan newSource = source.pruneOutputsExcept(outputsToKeep);
if (newSource == source) {
return this;
}
return replaceSources(List.of(newSource));
| 278
| 57
| 335
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/operators/HashAggregate.java
|
HashAggregate
|
pruneOutputsExcept
|
class HashAggregate extends ForwardingLogicalPlan {
private static final String MERGE_PHASE_NAME = "mergeOnHandler";
final List<Function> aggregates;
HashAggregate(LogicalPlan source, List<Function> aggregates) {
super(source);
this.aggregates = aggregates;
}
@Override
public ExecutionPlan build(DependencyCarrier executor,
PlannerContext plannerContext,
Set<PlanHint> planHints,
ProjectionBuilder projectionBuilder,
int limit,
int offset,
@Nullable OrderBy order,
@Nullable Integer pageSizeHint,
Row params,
SubQueryResults subQueryResults) {
// Avoid source look-ups for performance reasons. Global aggregations are a pipeline breaker using all data.
// So use column store instead, because it is likely more efficient.
if (planHints.contains(PlanHint.PREFER_SOURCE_LOOKUP)) {
planHints = new HashSet<>(planHints);
planHints.remove(PlanHint.PREFER_SOURCE_LOOKUP);
}
ExecutionPlan executionPlan = source.build(
executor, plannerContext, planHints, projectionBuilder, NO_LIMIT, 0, null, null, params, subQueryResults);
AggregationOutputValidator.validateOutputs(aggregates);
var paramBinder = new SubQueryAndParamBinder(params, subQueryResults);
var sourceOutputs = source.outputs();
if (executionPlan.resultDescription().hasRemainingLimitOrOffset()) {
executionPlan = Merge.ensureOnHandler(executionPlan, plannerContext);
}
if (ExecutionPhases.executesOnHandler(plannerContext.handlerNode(), executionPlan.resultDescription().nodeIds())) {
if (source.preferShardProjections()) {
executionPlan.addProjection(
projectionBuilder.aggregationProjection(
sourceOutputs,
aggregates,
paramBinder,
AggregateMode.ITER_PARTIAL,
RowGranularity.SHARD,
plannerContext.transactionContext().sessionSettings().searchPath()
)
);
executionPlan.addProjection(
projectionBuilder.aggregationProjection(
aggregates,
aggregates,
paramBinder,
AggregateMode.PARTIAL_FINAL,
RowGranularity.CLUSTER,
plannerContext.transactionContext().sessionSettings().searchPath()
)
);
return executionPlan;
}
AggregationProjection fullAggregation = projectionBuilder.aggregationProjection(
sourceOutputs,
aggregates,
paramBinder,
AggregateMode.ITER_FINAL,
RowGranularity.CLUSTER,
plannerContext.transactionContext().sessionSettings().searchPath()
);
executionPlan.addProjection(fullAggregation);
return executionPlan;
}
AggregationProjection toPartial = projectionBuilder.aggregationProjection(
sourceOutputs,
aggregates,
paramBinder,
AggregateMode.ITER_PARTIAL,
source.preferShardProjections() ? RowGranularity.SHARD : RowGranularity.NODE,
plannerContext.transactionContext().sessionSettings().searchPath()
);
executionPlan.addProjection(toPartial);
AggregationProjection toFinal = projectionBuilder.aggregationProjection(
aggregates,
aggregates,
paramBinder,
AggregateMode.PARTIAL_FINAL,
RowGranularity.CLUSTER,
plannerContext.transactionContext().sessionSettings().searchPath()
);
return new Merge(
executionPlan,
new MergePhase(
plannerContext.jobId(),
plannerContext.nextExecutionPhaseId(),
MERGE_PHASE_NAME,
executionPlan.resultDescription().nodeIds().size(),
1,
Collections.singletonList(plannerContext.handlerNode()),
executionPlan.resultDescription().streamOutputs(),
Collections.singletonList(toFinal),
DistributionInfo.DEFAULT_BROADCAST,
null
),
NO_LIMIT,
0,
aggregates.size(),
1,
null
);
}
public List<Function> aggregates() {
return aggregates;
}
@Override
public List<Symbol> outputs() {
return new ArrayList<>(aggregates);
}
@Override
public LogicalPlan replaceSources(List<LogicalPlan> sources) {
return new HashAggregate(Lists.getOnlyElement(sources), aggregates);
}
@Override
public LogicalPlan pruneOutputsExcept(SequencedCollection<Symbol> outputsToKeep) {<FILL_FUNCTION_BODY>}
@Override
public <C, R> R accept(LogicalPlanVisitor<C, R> visitor, C context) {
return visitor.visitHashAggregate(this, context);
}
private static class OutputValidatorContext {
private boolean insideAggregation = false;
}
public static class AggregationOutputValidator extends SymbolVisitor<OutputValidatorContext, Void> {
private static final AggregationOutputValidator INSTANCE = new AggregationOutputValidator();
public static void validateOutputs(Collection<? extends Symbol> outputs) {
OutputValidatorContext ctx = new OutputValidatorContext();
for (Symbol output : outputs) {
ctx.insideAggregation = false;
output.accept(INSTANCE, ctx);
}
}
@Override
public Void visitFunction(Function symbol, OutputValidatorContext context) {
context.insideAggregation =
context.insideAggregation || symbol.signature().getKind().equals(FunctionType.AGGREGATE);
for (Symbol argument : symbol.arguments()) {
argument.accept(this, context);
}
context.insideAggregation = false;
return null;
}
@Override
public Void visitReference(Reference symbol, OutputValidatorContext context) {
if (context.insideAggregation) {
IndexType indexType = symbol.indexType();
if (indexType == IndexType.FULLTEXT) {
throw new IllegalArgumentException(Symbols.format(
"Cannot select analyzed column '%s' within grouping or aggregations", symbol));
}
}
return null;
}
@Override
protected Void visitSymbol(Symbol symbol, OutputValidatorContext context) {
return null;
}
}
}
|
ArrayList<Function> newAggregates = new ArrayList<>();
for (Symbol outputToKeep : outputsToKeep) {
SymbolVisitors.intersection(outputToKeep, aggregates, newAggregates::add);
}
LinkedHashSet<Symbol> toKeep = new LinkedHashSet<>();
for (Function newAggregate : newAggregates) {
SymbolVisitors.intersection(newAggregate, source.outputs(), toKeep::add);
}
LogicalPlan newSource = source.pruneOutputsExcept(toKeep);
if (source == newSource && newAggregates == aggregates) {
return this;
}
return new HashAggregate(newSource, newAggregates);
| 1,660
| 173
| 1,833
|
<methods>public void <init>(io.crate.planner.operators.LogicalPlan) ,public Map<io.crate.planner.operators.LogicalPlan,io.crate.expression.symbol.SelectSymbol> dependencies() ,public List<io.crate.expression.symbol.Symbol> outputs() ,public io.crate.planner.operators.LogicalPlan pruneOutputsExcept(SequencedCollection<io.crate.expression.symbol.Symbol>) ,public List<io.crate.metadata.RelationName> relationNames() ,public io.crate.planner.operators.LogicalPlan source() ,public List<io.crate.planner.operators.LogicalPlan> sources() ,public boolean supportsDistributedReads() <variables>final non-sealed io.crate.planner.operators.LogicalPlan source,private final non-sealed List<io.crate.planner.operators.LogicalPlan> sources
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/operators/JoinPlan.java
|
JoinPlan
|
replaceSources
|
class JoinPlan extends AbstractJoinPlan {
private final boolean isFiltered;
private final boolean rewriteFilterOnOuterJoinToInnerJoinDone;
private final boolean lookUpJoinRuleApplied;
private final boolean moveConstantJoinConditionRuleApplied;
public JoinPlan(LogicalPlan lhs,
LogicalPlan rhs,
JoinType joinType,
@Nullable Symbol joinCondition,
boolean isFiltered,
boolean rewriteFilterOnOuterJoinToInnerJoinDone,
boolean lookUpJoinRuleApplied) {
this(lhs, rhs, joinType, joinCondition, isFiltered, rewriteFilterOnOuterJoinToInnerJoinDone, lookUpJoinRuleApplied, false);
}
@VisibleForTesting
public JoinPlan(LogicalPlan lhs,
LogicalPlan rhs,
JoinType joinType,
@Nullable Symbol joinCondition) {
this(lhs, rhs, joinType, joinCondition, false, false, false, false);
}
private JoinPlan(LogicalPlan lhs,
LogicalPlan rhs,
JoinType joinType,
@Nullable Symbol joinCondition,
boolean isFiltered,
boolean rewriteFilterOnOuterJoinToInnerJoinDone,
boolean lookUpJoinRuleApplied,
boolean moveConstantJoinConditionRuleApplied) {
super(lhs, rhs, joinCondition, joinType);
this.isFiltered = isFiltered;
this.rewriteFilterOnOuterJoinToInnerJoinDone = rewriteFilterOnOuterJoinToInnerJoinDone;
this.lookUpJoinRuleApplied = lookUpJoinRuleApplied;
this.moveConstantJoinConditionRuleApplied = moveConstantJoinConditionRuleApplied;
}
public boolean isLookUpJoinRuleApplied() {
return lookUpJoinRuleApplied;
}
public boolean isFiltered() {
return isFiltered;
}
public boolean isRewriteFilterOnOuterJoinToInnerJoinDone() {
return rewriteFilterOnOuterJoinToInnerJoinDone;
}
public boolean moveConstantJoinConditionRuleApplied() {
return moveConstantJoinConditionRuleApplied;
}
public JoinPlan withMoveConstantJoinConditionRuleApplied(boolean moveConstantJoinConditionRuleApplied) {
return new JoinPlan(
lhs,
rhs,
joinType,
joinCondition,
isFiltered,
rewriteFilterOnOuterJoinToInnerJoinDone,
lookUpJoinRuleApplied,
moveConstantJoinConditionRuleApplied);
}
@Override
public ExecutionPlan build(DependencyCarrier dependencyCarrier,
PlannerContext plannerContext,
Set<PlanHint> planHints,
ProjectionBuilder projectionBuilder,
int limit,
int offset,
@Nullable OrderBy order,
@Nullable Integer pageSizeHint,
Row params,
SubQueryResults subQueryResults) {
throw new UnsupportedOperationException(
"JoinPlan cannot be build, it needs to be converted to a NestedLoop/HashJoin");
}
@Override
public <C, R> R accept(LogicalPlanVisitor<C, R> visitor, C context) {
return visitor.visitJoinPlan(this, context);
}
@Override
public LogicalPlan pruneOutputsExcept(SequencedCollection<Symbol> outputsToKeep) {
LinkedHashSet<Symbol> lhsToKeep = new LinkedHashSet<>();
LinkedHashSet<Symbol> rhsToKeep = new LinkedHashSet<>();
for (Symbol outputToKeep : outputsToKeep) {
SymbolVisitors.intersection(outputToKeep, lhs.outputs(), lhsToKeep::add);
SymbolVisitors.intersection(outputToKeep, rhs.outputs(), rhsToKeep::add);
}
if (joinCondition != null) {
SymbolVisitors.intersection(joinCondition, lhs.outputs(), lhsToKeep::add);
SymbolVisitors.intersection(joinCondition, rhs.outputs(), rhsToKeep::add);
}
LogicalPlan newLhs = lhs.pruneOutputsExcept(lhsToKeep);
LogicalPlan newRhs = rhs.pruneOutputsExcept(rhsToKeep);
if (newLhs == lhs && newRhs == rhs) {
return this;
}
return new JoinPlan(
newLhs,
newRhs,
joinType,
joinCondition,
isFiltered,
rewriteFilterOnOuterJoinToInnerJoinDone,
lookUpJoinRuleApplied,
moveConstantJoinConditionRuleApplied
);
}
@Override
public void print(PrintContext printContext) {
printContext
.text("Join[")
.text(joinType.toString());
if (joinCondition != null) {
printContext
.text(" | ")
.text(joinCondition.toString());
}
printContext.text("]");
printStats(printContext);
printContext.nest(Lists.map(sources(), x -> x::print));
}
@Override
public LogicalPlan replaceSources(List<LogicalPlan> sources) {<FILL_FUNCTION_BODY>}
}
|
return new JoinPlan(
sources.get(0),
sources.get(1),
joinType,
joinCondition,
isFiltered,
rewriteFilterOnOuterJoinToInnerJoinDone,
lookUpJoinRuleApplied,
moveConstantJoinConditionRuleApplied
);
| 1,299
| 76
| 1,375
|
<methods>public Map<io.crate.planner.operators.LogicalPlan,io.crate.expression.symbol.SelectSymbol> dependencies() ,public io.crate.expression.symbol.Symbol joinCondition() ,public io.crate.sql.tree.JoinType joinType() ,public io.crate.planner.operators.LogicalPlan lhs() ,public List<io.crate.expression.symbol.Symbol> outputs() ,public List<io.crate.metadata.RelationName> relationNames() ,public io.crate.planner.operators.LogicalPlan rhs() ,public List<io.crate.planner.operators.LogicalPlan> sources() ,public boolean supportsDistributedReads() <variables>protected final non-sealed io.crate.expression.symbol.Symbol joinCondition,protected final non-sealed io.crate.sql.tree.JoinType joinType,protected final non-sealed io.crate.planner.operators.LogicalPlan lhs,protected final non-sealed io.crate.planner.operators.LogicalPlan rhs
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/operators/Limit.java
|
Limit
|
create
|
class Limit extends ForwardingLogicalPlan {
final Symbol limit;
final Symbol offset;
static LogicalPlan create(LogicalPlan source, @Nullable Symbol limit, @Nullable Symbol offset) {<FILL_FUNCTION_BODY>}
public Limit(LogicalPlan source, Symbol limit, Symbol offset) {
super(source);
this.limit = limit;
this.offset = offset;
}
public Symbol limit() {
return limit;
}
public Symbol offset() {
return offset;
}
@Override
public ExecutionPlan build(DependencyCarrier executor,
PlannerContext plannerContext,
Set<PlanHint> planHints,
ProjectionBuilder projectionBuilder,
int limitHint,
int offsetHint,
@Nullable OrderBy order,
@Nullable Integer pageSizeHint,
Row params,
SubQueryResults subQueryResults) {
int limit = Objects.requireNonNullElse(
DataTypes.INTEGER.sanitizeValue(evaluate(
plannerContext.transactionContext(),
plannerContext.nodeContext(),
this.limit,
params,
subQueryResults)),
NO_LIMIT);
int offset = Objects.requireNonNullElse(
DataTypes.INTEGER.sanitizeValue(evaluate(
plannerContext.transactionContext(),
plannerContext.nodeContext(),
this.offset,
params,
subQueryResults)),
NO_OFFSET);
ExecutionPlan executionPlan = source.build(
executor, plannerContext, planHints, projectionBuilder, limit, offset, order, pageSizeHint, params, subQueryResults);
List<DataType<?>> sourceTypes = Symbols.typeView(source.outputs());
ResultDescription resultDescription = executionPlan.resultDescription();
if (limit == NO_LIMIT && offset == NO_OFFSET) {
return executionPlan;
}
if (resultDescription.hasRemainingLimitOrOffset()
&& (resultDescription.limit() != limit || resultDescription.offset() != offset)) {
executionPlan = Merge.ensureOnHandler(executionPlan, plannerContext);
resultDescription = executionPlan.resultDescription();
}
if (ExecutionPhases.executesOnHandler(plannerContext.handlerNode(), resultDescription.nodeIds())) {
executionPlan.addProjection(
new LimitAndOffsetProjection(limit, offset, sourceTypes), LimitAndOffset.NO_LIMIT, 0, resultDescription.orderBy());
} else if (resultDescription.limit() != limit || resultDescription.offset() != 0) {
executionPlan.addProjection(
new LimitAndOffsetProjection(limit + offset, 0, sourceTypes), limit, offset, resultDescription.orderBy());
}
return executionPlan;
}
@Override
public LogicalPlan replaceSources(List<LogicalPlan> sources) {
return new Limit(Lists.getOnlyElement(sources), limit, offset);
}
@Override
public @Nullable FetchRewrite rewriteToFetch(Collection<Symbol> usedColumns) {
FetchRewrite fetchRewrite = source.rewriteToFetch(usedColumns);
if (fetchRewrite == null) {
return null;
}
return new FetchRewrite(
fetchRewrite.replacedOutputs(),
new Limit(fetchRewrite.newPlan(), this.limit, this.offset)
);
}
@Override
public Map<LogicalPlan, SelectSymbol> dependencies() {
return source.dependencies();
}
@Override
public String toString() {
return "Limit{" +
"source=" + source +
", limit=" + limit +
", offset=" + offset +
'}';
}
@Override
public <C, R> R accept(LogicalPlanVisitor<C, R> visitor, C context) {
return visitor.visitLimit(this, context);
}
@Override
public void print(PrintContext printContext) {
printContext
.text("Limit[")
.text(limit.toString())
.text(";")
.text(offset.toString())
.text("]");
printStats(printContext);
printContext.nest(source::print);
}
static int limitAndOffset(int limit, int offset) {
if (limit == LimitAndOffset.NO_LIMIT) {
return limit;
}
return limit + offset;
}
}
|
if (limit == null && offset == null) {
return source;
} else {
return new Limit(
source,
Objects.requireNonNullElse(limit, Literal.of(-1L)),
Objects.requireNonNullElse(offset, Literal.of(0)));
}
| 1,147
| 81
| 1,228
|
<methods>public void <init>(io.crate.planner.operators.LogicalPlan) ,public Map<io.crate.planner.operators.LogicalPlan,io.crate.expression.symbol.SelectSymbol> dependencies() ,public List<io.crate.expression.symbol.Symbol> outputs() ,public io.crate.planner.operators.LogicalPlan pruneOutputsExcept(SequencedCollection<io.crate.expression.symbol.Symbol>) ,public List<io.crate.metadata.RelationName> relationNames() ,public io.crate.planner.operators.LogicalPlan source() ,public List<io.crate.planner.operators.LogicalPlan> sources() ,public boolean supportsDistributedReads() <variables>final non-sealed io.crate.planner.operators.LogicalPlan source,private final non-sealed List<io.crate.planner.operators.LogicalPlan> sources
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/operators/LimitDistinct.java
|
LimitDistinct
|
build
|
class LimitDistinct extends ForwardingLogicalPlan {
private final Symbol limit;
private final List<Symbol> outputs;
private final Symbol offset;
public LimitDistinct(LogicalPlan source, Symbol limit, Symbol offset, List<Symbol> outputs) {
super(source);
this.limit = limit;
this.offset = offset;
this.outputs = outputs;
}
@Override
public List<Symbol> outputs() {
return outputs;
}
public Symbol limit() {
return this.limit;
}
@Override
public ExecutionPlan build(DependencyCarrier executor,
PlannerContext plannerContext,
Set<PlanHint> planHints,
ProjectionBuilder projectionBuilder,
int limitHint,
int offsetHint,
@Nullable OrderBy order,
@Nullable Integer pageSizeHint,
Row params,
SubQueryResults subQueryResults) {<FILL_FUNCTION_BODY>}
@Override
public LogicalPlan pruneOutputsExcept(SequencedCollection<Symbol> outputsToKeep) {
LinkedHashSet<Symbol> toKeep = new LinkedHashSet<>();
Consumer<Symbol> keep = toKeep::add;
// Pruning unused outputs would change semantics. Need to keep all in any case
for (var output : outputs) {
SymbolVisitors.intersection(output, source.outputs(), keep);
}
LogicalPlan prunedSource = source.pruneOutputsExcept(toKeep);
if (prunedSource == source) {
return this;
}
return new LimitDistinct(prunedSource, limit, offset, outputs);
}
@Override
public LogicalPlan replaceSources(List<LogicalPlan> sources) {
var source = Lists.getOnlyElement(sources);
return new LimitDistinct(source, limit, offset, outputs);
}
@Override
public <C, R> R accept(LogicalPlanVisitor<C, R> visitor, C context) {
return visitor.visitLimitDistinct(this, context);
}
@Override
public void print(PrintContext printContext) {
printContext
.text("LimitDistinct[")
.text(limit.toString())
.text(";")
.text(offset.toString())
.text(" | [")
.text(Lists.joinOn(", ", outputs, Symbol::toString))
.text("]]");
printStats(printContext);
printContext.nest(source::print);
}
}
|
var executionPlan = source.build(
executor,
plannerContext,
planHints,
projectionBuilder,
LimitAndOffset.NO_LIMIT,
LimitAndOffset.NO_OFFSET,
null,
null,
params,
subQueryResults
);
if (executionPlan.resultDescription().hasRemainingLimitOrOffset()) {
executionPlan = Merge.ensureOnHandler(executionPlan, plannerContext);
}
if (!source.outputs().equals(outputs)) {
EvalProjection evalProjection = new EvalProjection(
InputColumns.create(outputs, new InputColumns.SourceSymbols(source.outputs()))
);
executionPlan.addProjection(evalProjection);
}
int limit = DataTypes.INTEGER.sanitizeValue(
evaluate(
plannerContext.transactionContext(),
plannerContext.nodeContext(),
this.limit,
params,
subQueryResults
)
);
int offset = DataTypes.INTEGER.sanitizeValue(
evaluate(
plannerContext.transactionContext(),
plannerContext.nodeContext(),
this.offset,
params,
subQueryResults
)
);
var inputColOutputs = InputColumn.mapToInputColumns(outputs);
executionPlan.addProjection(
new LimitDistinctProjection(
limit + offset,
inputColOutputs,
source.preferShardProjections() ? RowGranularity.SHARD : RowGranularity.CLUSTER
)
);
boolean onHandler = ExecutionPhases.executesOnHandler(
plannerContext.handlerNode(), executionPlan.resultDescription().nodeIds());
if (!onHandler || source.preferShardProjections()) {
if (!onHandler) {
executionPlan = Merge.ensureOnHandler(executionPlan, plannerContext);
}
LimitDistinctProjection limitDistinct = new LimitDistinctProjection(
limit + offset,
inputColOutputs,
RowGranularity.CLUSTER
);
executionPlan.addProjection(limitDistinct);
}
if (offset > 0) {
// LimitDistinctProjection outputs a distinct result-set,
// That allows us to use the LimitAndOffsetProjection to apply the offset
executionPlan.addProjection(
new LimitAndOffsetProjection(limit, offset, Symbols.typeView(inputColOutputs))
);
}
return executionPlan;
| 646
| 636
| 1,282
|
<methods>public void <init>(io.crate.planner.operators.LogicalPlan) ,public Map<io.crate.planner.operators.LogicalPlan,io.crate.expression.symbol.SelectSymbol> dependencies() ,public List<io.crate.expression.symbol.Symbol> outputs() ,public io.crate.planner.operators.LogicalPlan pruneOutputsExcept(SequencedCollection<io.crate.expression.symbol.Symbol>) ,public List<io.crate.metadata.RelationName> relationNames() ,public io.crate.planner.operators.LogicalPlan source() ,public List<io.crate.planner.operators.LogicalPlan> sources() ,public boolean supportsDistributedReads() <variables>final non-sealed io.crate.planner.operators.LogicalPlan source,private final non-sealed List<io.crate.planner.operators.LogicalPlan> sources
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/operators/MultiPhase.java
|
MultiPhase
|
build
|
class MultiPhase extends ForwardingLogicalPlan {
private final Map<LogicalPlan, SelectSymbol> subQueries;
public static LogicalPlan createIfNeeded(Map<LogicalPlan, SelectSymbol> uncorrelatedSubQueries, LogicalPlan source) {
if (uncorrelatedSubQueries.isEmpty()) {
return source;
} else {
return new MultiPhase(source, uncorrelatedSubQueries);
}
}
private MultiPhase(LogicalPlan source, Map<LogicalPlan, SelectSymbol> subQueries) {
super(source);
this.subQueries = subQueries;
}
@Override
public ExecutionPlan build(DependencyCarrier executor,
PlannerContext plannerContext,
Set<PlanHint> planHints,
ProjectionBuilder projectionBuilder,
int limit,
int offset,
@Nullable OrderBy order,
@Nullable Integer pageSizeHint,
Row params,
SubQueryResults subQueryResults) {<FILL_FUNCTION_BODY>}
@Override
public LogicalPlan replaceSources(List<LogicalPlan> sources) {
return new MultiPhase(Lists.getOnlyElement(sources), subQueries);
}
@Override
public Map<LogicalPlan, SelectSymbol> dependencies() {
return Maps.concat(source.dependencies(), subQueries);
}
@Override
public <C, R> R accept(LogicalPlanVisitor<C, R> visitor, C context) {
return visitor.visitMultiPhase(this, context);
}
@Override
public void print(PrintContext printContext) {
printContext
.text("MultiPhase");
printStats(printContext);
printContext.nest(source::print)
.nest(Lists.map(subQueries.keySet(), x -> x::print));
}
}
|
return source.build(
executor, plannerContext, planHints, projectionBuilder, limit, offset, order, pageSizeHint, params, subQueryResults);
| 480
| 43
| 523
|
<methods>public void <init>(io.crate.planner.operators.LogicalPlan) ,public Map<io.crate.planner.operators.LogicalPlan,io.crate.expression.symbol.SelectSymbol> dependencies() ,public List<io.crate.expression.symbol.Symbol> outputs() ,public io.crate.planner.operators.LogicalPlan pruneOutputsExcept(SequencedCollection<io.crate.expression.symbol.Symbol>) ,public List<io.crate.metadata.RelationName> relationNames() ,public io.crate.planner.operators.LogicalPlan source() ,public List<io.crate.planner.operators.LogicalPlan> sources() ,public boolean supportsDistributedReads() <variables>final non-sealed io.crate.planner.operators.LogicalPlan source,private final non-sealed List<io.crate.planner.operators.LogicalPlan> sources
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/operators/Rename.java
|
Rename
|
pruneOutputsExcept
|
class Rename extends ForwardingLogicalPlan implements FieldResolver {
private final List<Symbol> outputs;
private final FieldResolver fieldResolver;
final RelationName name;
public Rename(List<Symbol> outputs, RelationName name, FieldResolver fieldResolver, LogicalPlan source) {
super(source);
this.outputs = outputs;
this.name = name;
this.fieldResolver = fieldResolver;
assert this.outputs.size() == source.outputs().size()
: "Rename operator must have the same number of outputs as the source. Got " + outputs + " and " + source.outputs();
}
@Override
public boolean preferShardProjections() {
return source.preferShardProjections();
}
public RelationName name() {
return name;
}
@Override
public List<Symbol> outputs() {
return outputs;
}
@Override
public LogicalPlan pruneOutputsExcept(SequencedCollection<Symbol> outputsToKeep) {<FILL_FUNCTION_BODY>}
@Nullable
@Override
public FetchRewrite rewriteToFetch(Collection<Symbol> usedColumns) {
IdentityHashMap<Symbol, Symbol> parentToChildMap = new IdentityHashMap<>(outputs.size());
IdentityHashMap<Symbol, Symbol> childToParentMap = new IdentityHashMap<>(outputs.size());
for (int i = 0; i < outputs.size(); i++) {
parentToChildMap.put(outputs.get(i), source.outputs().get(i));
childToParentMap.put(source.outputs().get(i), outputs.get(i));
}
ArrayList<Symbol> mappedUsedColumns = new ArrayList<>();
for (Symbol usedColumn : usedColumns) {
SymbolVisitors.intersection(usedColumn, outputs, s -> {
Symbol childSymbol = parentToChildMap.get(s);
assert childSymbol != null : "There must be a mapping available for symbol " + s;
mappedUsedColumns.add(childSymbol);
});
}
FetchRewrite fetchRewrite = source.rewriteToFetch(mappedUsedColumns);
if (fetchRewrite == null) {
return null;
}
LogicalPlan newSource = fetchRewrite.newPlan();
ArrayList<Symbol> newOutputs = new ArrayList<>();
for (Symbol output : newSource.outputs()) {
if (output instanceof FetchMarker) {
FetchMarker marker = (FetchMarker) output;
FetchMarker newMarker = new FetchMarker(name, marker.fetchRefs(), marker.fetchId());
newOutputs.add(newMarker);
childToParentMap.put(marker, newMarker);
} else {
Symbol mappedOutput = requireNonNull(
childToParentMap.get(output),
() -> "Mapping must exist for output from source. `" + output + "` is missing in " + childToParentMap
);
newOutputs.add(mappedOutput);
}
}
LinkedHashMap<Symbol, Symbol> replacedOutputs = new LinkedHashMap<>();
UnaryOperator<Symbol> convertChildrenToScopedSymbols = s -> MapBackedSymbolReplacer.convert(s, childToParentMap);
for (var entry : fetchRewrite.replacedOutputs().entrySet()) {
Symbol key = entry.getKey();
Symbol value = entry.getValue();
Symbol parentSymbolForKey = requireNonNull(
childToParentMap.get(key),
() -> "Mapping must exist for output from source. `" + key + "` is missing in " + childToParentMap
);
replacedOutputs.put(parentSymbolForKey, convertChildrenToScopedSymbols.apply(value));
}
Rename newRename = new Rename(newOutputs, name, fieldResolver, newSource);
return new FetchRewrite(replacedOutputs, newRename);
}
@Override
public ExecutionPlan build(DependencyCarrier executor,
PlannerContext plannerContext,
Set<PlanHint> hints,
ProjectionBuilder projectionBuilder,
int limit,
int offset,
@Nullable OrderBy order,
@Nullable Integer pageSizeHint,
Row params,
SubQueryResults subQueryResults) {
return source.build(
executor, plannerContext, hints, projectionBuilder, limit, offset, order, pageSizeHint, params, subQueryResults);
}
@Override
public LogicalPlan replaceSources(List<LogicalPlan> sources) {
return new Rename(outputs, name, fieldResolver, Lists.getOnlyElement(sources));
}
@Override
public <C, R> R accept(LogicalPlanVisitor<C, R> visitor, C context) {
return visitor.visitRename(this, context);
}
@Nullable
@Override
public Symbol resolveField(ScopedSymbol field) {
return fieldResolver.resolveField(field);
}
@Override
public List<RelationName> relationNames() {
return List.of(name);
}
@Override
public void print(PrintContext printContext) {
printContext
.text("Rename[")
.text(Lists.joinOn(", ", outputs, Symbol::toString))
.text("] AS ")
.text(name.toString());
printStats(printContext);
printContext.nest(source::print);
}
@Override
public String toString() {
return "Rename{name=" + name + ", outputs=" + outputs + ", src=" + source + "}";
}
}
|
/* In `SELECT * FROM (SELECT t1.*, t2.* FROM tbl AS t1, tbl AS t2) AS tjoin`
* The `ScopedSymbol`s are ambiguous; To map them correctly this uses a IdentityHashMap
*/
IdentityHashMap<Symbol, Symbol> parentToChildMap = new IdentityHashMap<>(outputs.size());
IdentityHashMap<Symbol, Symbol> childToParentMap = new IdentityHashMap<>(outputs.size());
for (int i = 0; i < outputs.size(); i++) {
parentToChildMap.put(outputs.get(i), source.outputs().get(i));
childToParentMap.put(source.outputs().get(i), outputs.get(i));
}
ArrayList<Symbol> mappedToKeep = new ArrayList<>();
for (Symbol outputToKeep : outputsToKeep) {
SymbolVisitors.intersection(outputToKeep, outputs, s -> {
Symbol childSymbol = parentToChildMap.get(s);
assert childSymbol != null : "There must be a mapping available for symbol " + s;
mappedToKeep.add(childSymbol);
});
}
LogicalPlan newSource = source.pruneOutputsExcept(mappedToKeep);
if (newSource == source) {
return this;
}
ArrayList<Symbol> newOutputs = new ArrayList<>(newSource.outputs().size());
for (Symbol sourceOutput : newSource.outputs()) {
newOutputs.add(childToParentMap.get(sourceOutput));
}
return new Rename(
newOutputs,
name,
fieldResolver,
newSource
);
| 1,428
| 420
| 1,848
|
<methods>public void <init>(io.crate.planner.operators.LogicalPlan) ,public Map<io.crate.planner.operators.LogicalPlan,io.crate.expression.symbol.SelectSymbol> dependencies() ,public List<io.crate.expression.symbol.Symbol> outputs() ,public io.crate.planner.operators.LogicalPlan pruneOutputsExcept(SequencedCollection<io.crate.expression.symbol.Symbol>) ,public List<io.crate.metadata.RelationName> relationNames() ,public io.crate.planner.operators.LogicalPlan source() ,public List<io.crate.planner.operators.LogicalPlan> sources() ,public boolean supportsDistributedReads() <variables>final non-sealed io.crate.planner.operators.LogicalPlan source,private final non-sealed List<io.crate.planner.operators.LogicalPlan> sources
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/operators/StatementClassifier.java
|
Classification
|
toString
|
class Classification {
private final Set<String> labels;
private final Plan.StatementType type;
public Classification(Plan.StatementType type, Set<String> labels) {
this.type = type;
this.labels = labels;
}
public Classification(Plan.StatementType type) {
this.type = type;
this.labels = Collections.emptySet();
}
public Set<String> labels() {
return labels;
}
public Plan.StatementType type() {
return type;
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Classification that = (Classification) o;
return Objects.equals(labels, that.labels) &&
type == that.type;
}
@Override
public int hashCode() {
return Objects.hash(labels, type);
}
}
|
return "Classification{type=" + type + ", labels=" + labels + "}";
| 281
| 26
| 307
|
<methods>public non-sealed void <init>() ,public java.lang.Void visitCollect(io.crate.planner.operators.Collect, Set<java.lang.String>) ,public java.lang.Void visitCorrelatedJoin(io.crate.planner.operators.CorrelatedJoin, Set<java.lang.String>) ,public java.lang.Void visitCount(io.crate.planner.operators.Count, Set<java.lang.String>) ,public java.lang.Void visitEval(io.crate.planner.operators.Eval, Set<java.lang.String>) ,public java.lang.Void visitFetch(io.crate.planner.operators.Fetch, Set<java.lang.String>) ,public java.lang.Void visitFilter(io.crate.planner.operators.Filter, Set<java.lang.String>) ,public java.lang.Void visitForeignCollect(io.crate.planner.operators.ForeignCollect, Set<java.lang.String>) ,public java.lang.Void visitGet(io.crate.planner.operators.Get, Set<java.lang.String>) ,public java.lang.Void visitGroupHashAggregate(io.crate.planner.operators.GroupHashAggregate, Set<java.lang.String>) ,public java.lang.Void visitGroupReference(io.crate.planner.optimizer.iterative.GroupReference, Set<java.lang.String>) ,public java.lang.Void visitHashAggregate(io.crate.planner.operators.HashAggregate, Set<java.lang.String>) ,public java.lang.Void visitHashJoin(io.crate.planner.operators.HashJoin, Set<java.lang.String>) ,public java.lang.Void visitInsert(io.crate.planner.operators.Insert, Set<java.lang.String>) ,public java.lang.Void visitInsert(io.crate.planner.operators.InsertFromValues, Set<java.lang.String>) ,public java.lang.Void visitJoinPlan(io.crate.planner.operators.JoinPlan, Set<java.lang.String>) ,public java.lang.Void visitLimit(io.crate.planner.operators.Limit, Set<java.lang.String>) ,public java.lang.Void visitLimitDistinct(io.crate.planner.operators.LimitDistinct, Set<java.lang.String>) ,public java.lang.Void visitMultiPhase(io.crate.planner.operators.MultiPhase, Set<java.lang.String>) ,public java.lang.Void visitNestedLoopJoin(io.crate.planner.operators.NestedLoopJoin, Set<java.lang.String>) ,public java.lang.Void visitOrder(io.crate.planner.operators.Order, Set<java.lang.String>) ,public java.lang.Void visitPlan(io.crate.planner.operators.LogicalPlan, Set<java.lang.String>) ,public java.lang.Void visitProjectSet(io.crate.planner.operators.ProjectSet, Set<java.lang.String>) ,public java.lang.Void visitRename(io.crate.planner.operators.Rename, Set<java.lang.String>) ,public java.lang.Void visitRootRelationBoundary(io.crate.planner.operators.RootRelationBoundary, Set<java.lang.String>) ,public java.lang.Void visitTableFunction(io.crate.planner.operators.TableFunction, Set<java.lang.String>) ,public java.lang.Void visitUnion(io.crate.planner.operators.Union, Set<java.lang.String>) ,public java.lang.Void visitWindowAgg(io.crate.planner.operators.WindowAgg, Set<java.lang.String>) <variables>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/optimizer/Optimizer.java
|
Optimizer
|
tryApplyRules
|
class Optimizer {
private final List<Rule<?>> rules;
private final Supplier<Version> minNodeVersionInCluster;
private final NodeContext nodeCtx;
public Optimizer(NodeContext nodeCtx,
Supplier<Version> minNodeVersionInCluster,
List<Rule<?>> rules) {
this.rules = rules;
this.minNodeVersionInCluster = minNodeVersionInCluster;
this.nodeCtx = nodeCtx;
}
public LogicalPlan optimize(LogicalPlan plan,
PlanStats planStats,
CoordinatorTxnCtx txnCtx,
OptimizerTracer tracer) {
var applicableRules = removeExcludedRules(rules, txnCtx.sessionSettings().excludedOptimizerRules());
LogicalPlan optimizedRoot = tryApplyRules(applicableRules, plan, planStats, txnCtx, tracer);
var optimizedSources = Lists.mapIfChange(optimizedRoot.sources(), x -> optimize(x, planStats, txnCtx, tracer));
return tryApplyRules(
applicableRules,
optimizedSources == optimizedRoot.sources() ? optimizedRoot : optimizedRoot.replaceSources(optimizedSources),
planStats,
txnCtx,
tracer
);
}
public static List<Rule<?>> removeExcludedRules(List<Rule<?>> rules, Set<Class<? extends Rule<?>>> excludedRules) {
if (excludedRules.isEmpty()) {
return rules;
}
var result = new ArrayList<Rule<?>>(rules.size());
for (var rule : rules) {
if (rule.mandatory() == false &&
excludedRules.contains(rule.getClass())) {
continue;
}
result.add(rule);
}
return result;
}
private LogicalPlan tryApplyRules(List<Rule<?>> rules,
LogicalPlan plan,
PlanStats planStats,
TransactionContext txnCtx,
OptimizerTracer tracer) {<FILL_FUNCTION_BODY>}
@Nullable
public static <T> LogicalPlan tryMatchAndApply(Rule<T> rule,
LogicalPlan node,
PlanStats planStats,
NodeContext nodeCtx,
TransactionContext txnCtx,
UnaryOperator<LogicalPlan> resolvePlan,
OptimizerTracer tracer) {
Match<T> match = rule.pattern().accept(node, Captures.empty(), resolvePlan);
if (match.isPresent()) {
tracer.ruleMatched(rule);
return rule.apply(match.value(), match.captures(), planStats, txnCtx, nodeCtx, resolvePlan);
}
return null;
}
}
|
LogicalPlan node = plan;
// Some rules may only become applicable after another rule triggered, so we keep
// trying to re-apply the rules as long as at least one plan was transformed.
boolean done = false;
int numIterations = 0;
UnaryOperator<LogicalPlan> resolvePlan = UnaryOperator.identity();
Version minVersion = minNodeVersionInCluster.get();
while (!done && numIterations < 10_000) {
done = true;
for (Rule<?> rule : rules) {
if (minVersion.before(rule.requiredVersion())) {
continue;
}
LogicalPlan transformedPlan = tryMatchAndApply(rule, node, planStats, nodeCtx, txnCtx, resolvePlan, tracer);
if (transformedPlan != null) {
tracer.ruleApplied(rule, transformedPlan, planStats);
node = transformedPlan;
done = false;
}
}
numIterations++;
}
assert numIterations < 10_000
: "Optimizer reached 10_000 iterations safety guard. This is an indication of a broken rule that matches again and again";
return node;
| 710
| 301
| 1,011
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/optimizer/iterative/IterativeOptimizer.java
|
IterativeOptimizer
|
exploreGroup
|
class IterativeOptimizer {
private final List<Rule<?>> rules;
private final Supplier<Version> minNodeVersionInCluster;
private final NodeContext nodeCtx;
public IterativeOptimizer(NodeContext nodeCtx, Supplier<Version> minNodeVersionInCluster, List<Rule<?>> rules) {
this.rules = rules;
this.minNodeVersionInCluster = minNodeVersionInCluster;
this.nodeCtx = nodeCtx;
}
public LogicalPlan optimize(LogicalPlan plan, PlanStats planStats, CoordinatorTxnCtx txnCtx, OptimizerTracer tracer) {
var memo = new Memo(plan);
var planStatsWithMemo = planStats.withMemo(memo);
// Memo is used to have a mutable view over the tree so it can change nodes without
// having to re-build the full tree all the time.`GroupReference` is used as place-holder
// or proxy that must be resolved to the real plan node
UnaryOperator<LogicalPlan> groupReferenceResolver = node -> {
if (node instanceof GroupReference g) {
return memo.resolve(g.groupId());
}
// not a group reference, return same node
return node;
};
tracer.optimizationStarted(plan, planStatsWithMemo);
var applicableRules = removeExcludedRules(rules, txnCtx.sessionSettings().excludedOptimizerRules());
exploreGroup(memo.getRootGroup(), new Context(memo, groupReferenceResolver, applicableRules, txnCtx, planStatsWithMemo, tracer));
return memo.extract();
}
/**
*
* This processes a group by trying to apply all the rules of the optimizer to the given group and its children.
* If any children are changed by a rule, the given group will be reprocessed to check if additional rules
* can be matched until a fixpoint is reached.
*
* @param group the id of the group to explore
* @param context the context of the optimizer
* @return true if there were any changes of plans on the node or it's children or false if not
*/
private boolean exploreGroup(int group, Context context) {<FILL_FUNCTION_BODY>}
private boolean exploreNode(int group, Context context) {
var rules = context.rules;
var resolvePlan = context.groupReferenceResolver;
var node = context.memo.resolve(group);
int numIteration = 0;
int maxIterations = 10_000;
boolean progress = false;
boolean done = false;
var minVersion = minNodeVersionInCluster.get();
while (!done && numIteration < maxIterations) {
numIteration++;
done = true;
for (Rule<?> rule : rules) {
if (minVersion.before(rule.requiredVersion())) {
continue;
}
LogicalPlan transformed = Optimizer.tryMatchAndApply(
rule,
node,
context.planStats,
nodeCtx,
context.txnCtx,
resolvePlan,
context.tracer
);
if (transformed != null) {
// the plan changed, update memo to reference to the new plan
context.memo.replace(group, transformed);
node = transformed;
done = false;
progress = true;
var tracer = context.tracer;
if (tracer.isActive()) {
tracer.ruleApplied(rule, context.memo.extract(), context.planStats);
}
}
}
}
assert numIteration < maxIterations
: "Optimizer reached 10_000 iterations safety guard. This is an indication of a broken rule that matches again and again";
return progress;
}
private boolean exploreChildren(int group, Context context) {
boolean progress = false;
var expression = context.memo.resolve(group);
for (var child : expression.sources()) {
if (child instanceof GroupReference g) {
if (exploreGroup(g.groupId(), context)) {
progress = true;
}
} else {
throw new IllegalStateException("Expected child to be a group reference. Found: " + child.getClass().getName());
}
}
return progress;
}
private record Context(
Memo memo,
UnaryOperator<LogicalPlan> groupReferenceResolver,
List<Rule<?>> rules,
CoordinatorTxnCtx txnCtx,
PlanStats planStats,
OptimizerTracer tracer
) {}
}
|
// tracks whether this group or any children groups change as
// this method executes
var progress = exploreNode(group, context);
while (exploreChildren(group, context)) {
progress = true;
// This is an important part! We keep track
// if the children changed and try again the
// current group in case we can match additional rules
if (!exploreNode(group, context)) {
// no additional matches, so bail out
break;
}
}
return progress;
| 1,167
| 129
| 1,296
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/optimizer/joinorder/JoinGraph.java
|
GraphBuilder
|
visitJoinPlan
|
class GraphBuilder extends LogicalPlanVisitor<Map<Symbol, LogicalPlan>, JoinGraph> {
private final UnaryOperator<LogicalPlan> resolvePlan;
GraphBuilder(UnaryOperator<LogicalPlan> resolvePlan) {
this.resolvePlan = resolvePlan;
}
@Override
public JoinGraph visitPlan(LogicalPlan logicalPlan, Map<Symbol, LogicalPlan> context) {
for (Symbol output : logicalPlan.outputs()) {
context.put(output, logicalPlan);
}
return new JoinGraph(List.of(logicalPlan), Map.of(), List.of(), false);
}
@Override
public JoinGraph visitGroupReference(GroupReference groupReference, Map<Symbol, LogicalPlan> context) {
return resolvePlan.apply(groupReference).accept(this, context);
}
@Override
public JoinGraph visitFilter(Filter filter, Map<Symbol, LogicalPlan> context) {
var source = filter.source().accept(this, context);
return source.withFilters(List.of(filter.query()));
}
@Override
public JoinGraph visitJoinPlan(JoinPlan joinPlan, Map<Symbol, LogicalPlan> context) {<FILL_FUNCTION_BODY>}
private static class EdgeCollector extends SymbolVisitor<Map<Symbol, LogicalPlan>, Void> {
private final Map<LogicalPlan, Set<Edge>> edges = new HashMap<>();
private final List<LogicalPlan> sources = new ArrayList<>();
@Override
public Void visitField(ScopedSymbol s, Map<Symbol, LogicalPlan> context) {
sources.add(context.get(s));
return null;
}
@Override
public Void visitReference(Reference ref, Map<Symbol, LogicalPlan> context) {
sources.add(context.get(ref));
return null;
}
@Override
public Void visitFunction(io.crate.expression.symbol.Function f, Map<Symbol, LogicalPlan> context) {
var sizeSource = sources.size();
f.arguments().forEach(x -> x.accept(this, context));
if (f.name().equals(EqOperator.NAME)) {
assert sources.size() == sizeSource + 2 : "Source must be collected for each argument";
var fromSymbol = f.arguments().get(0);
var toSymbol = f.arguments().get(1);
var fromRelation = sources.get(sources.size() - 2);
var toRelation = sources.get(sources.size() - 1);
if (fromRelation != null && toRelation != null) {
// Edges are created and indexed for each equi-join condition
// from both directions e.g.:
// a.x = b.y
// becomes:
// a -> Edge[b, a.x, b.y]
// b -> Edge[a, a.x, b.y]
addEdge(fromRelation, new Edge(toRelation, fromSymbol, toSymbol));
addEdge(toRelation, new Edge(fromRelation, fromSymbol, toSymbol));
}
}
return null;
}
private void addEdge(LogicalPlan from, Edge edge) {
var values = edges.get(from);
if (values == null) {
values = Set.of(edge);
} else {
values = new HashSet<>(values);
values.add(edge);
}
edges.put(from, values);
}
}
}
|
var left = joinPlan.lhs().accept(this, context);
var right = joinPlan.rhs().accept(this, context);
if (joinPlan.joinType() == JoinType.CROSS) {
return left.joinWith(right).withCrossJoin(true);
}
if (joinPlan.joinType() != JoinType.INNER) {
return left.joinWith(right);
}
var joinCondition = joinPlan.joinCondition();
var edgeCollector = new EdgeCollector();
var filters = new ArrayList<Symbol>();
if (joinCondition != null) {
var split = QuerySplitter.split(joinCondition);
for (var entry : split.entrySet()) {
// we are only interested in equi-join conditions between
// two tables e.g.: a.x = b.y will result in
// (a,b) -> (a.x = b.y) and we can ignore any other
// filters. Therefore, we only want entries where we have
// two keys.
if (entry.getKey().size() == 2) {
entry.getValue().accept(edgeCollector, context);
} else {
filters.add(entry.getValue());
}
}
}
return left.joinWith(right).withEdges(edgeCollector.edges).withFilters(filters);
| 889
| 343
| 1,232
|
<methods>public non-sealed void <init>() ,public JoinGraph visitCollect(io.crate.planner.operators.Collect, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitCorrelatedJoin(io.crate.planner.operators.CorrelatedJoin, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitCount(io.crate.planner.operators.Count, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitEval(io.crate.planner.operators.Eval, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitFetch(io.crate.planner.operators.Fetch, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitFilter(io.crate.planner.operators.Filter, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitForeignCollect(io.crate.planner.operators.ForeignCollect, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitGet(io.crate.planner.operators.Get, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitGroupHashAggregate(io.crate.planner.operators.GroupHashAggregate, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitGroupReference(io.crate.planner.optimizer.iterative.GroupReference, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitHashAggregate(io.crate.planner.operators.HashAggregate, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitHashJoin(io.crate.planner.operators.HashJoin, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitInsert(io.crate.planner.operators.Insert, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitInsert(io.crate.planner.operators.InsertFromValues, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitJoinPlan(io.crate.planner.operators.JoinPlan, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitLimit(io.crate.planner.operators.Limit, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitLimitDistinct(io.crate.planner.operators.LimitDistinct, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitMultiPhase(io.crate.planner.operators.MultiPhase, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitNestedLoopJoin(io.crate.planner.operators.NestedLoopJoin, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitOrder(io.crate.planner.operators.Order, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitPlan(io.crate.planner.operators.LogicalPlan, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitProjectSet(io.crate.planner.operators.ProjectSet, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitRename(io.crate.planner.operators.Rename, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitRootRelationBoundary(io.crate.planner.operators.RootRelationBoundary, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitTableFunction(io.crate.planner.operators.TableFunction, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitUnion(io.crate.planner.operators.Union, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) ,public JoinGraph visitWindowAgg(io.crate.planner.operators.WindowAgg, Map<io.crate.expression.symbol.Symbol,io.crate.planner.operators.LogicalPlan>) <variables>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/optimizer/matcher/CapturePattern.java
|
CapturePattern
|
accept
|
class CapturePattern<T> extends Pattern<T> {
private final Capture<T> capture;
private final Pattern<T> pattern;
CapturePattern(Capture<T> capture, Pattern<T> pattern) {
this.capture = capture;
this.pattern = pattern;
}
@Override
public Match<T> accept(Object object, Captures captures, UnaryOperator<LogicalPlan> resolvePlan) {<FILL_FUNCTION_BODY>}
}
|
Match<T> match = pattern.accept(object, captures, resolvePlan);
return match.flatMap(val -> Match.of(val, captures.add(Captures.of(capture, val))));
| 128
| 55
| 183
|
<methods>public non-sealed void <init>() ,public abstract Match<T> accept(java.lang.Object, io.crate.planner.optimizer.matcher.Captures, UnaryOperator<io.crate.planner.operators.LogicalPlan>) ,public Match<T> accept(java.lang.Object, io.crate.planner.optimizer.matcher.Captures) ,public Pattern<T> capturedAs(Capture<T>) ,public static Pattern<T> typeOf(Class<T>) ,public Pattern<T> with(Function<? super T,Optional<U>>, Pattern<V>) ,public Pattern<T> with(Predicate<? super T>) <variables>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/optimizer/matcher/WithPropertyPattern.java
|
WithPropertyPattern
|
accept
|
class WithPropertyPattern<T> extends Pattern<T> {
private final Pattern<T> pattern;
private final Predicate<? super T> propertyPredicate;
WithPropertyPattern(Pattern<T> pattern, Predicate<? super T> propertyPredicate) {
this.pattern = pattern;
this.propertyPredicate = propertyPredicate;
}
@Override
public Match<T> accept(Object object, Captures captures, UnaryOperator<LogicalPlan> resolvePlan) {<FILL_FUNCTION_BODY>}
}
|
Match<T> match = pattern.accept(object, captures, resolvePlan);
return match.flatMap(matchedValue -> {
if (propertyPredicate.test(matchedValue)) {
return match;
} else {
return Match.empty();
}
});
| 138
| 74
| 212
|
<methods>public non-sealed void <init>() ,public abstract Match<T> accept(java.lang.Object, io.crate.planner.optimizer.matcher.Captures, UnaryOperator<io.crate.planner.operators.LogicalPlan>) ,public Match<T> accept(java.lang.Object, io.crate.planner.optimizer.matcher.Captures) ,public Pattern<T> capturedAs(Capture<T>) ,public static Pattern<T> typeOf(Class<T>) ,public Pattern<T> with(Function<? super T,Optional<U>>, Pattern<V>) ,public Pattern<T> with(Predicate<? super T>) <variables>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/optimizer/rule/MoveFilterBeneathJoin.java
|
MoveFilterBeneathJoin
|
apply
|
class MoveFilterBeneathJoin implements Rule<Filter> {
private final Capture<AbstractJoinPlan> joinCapture;
private final Pattern<Filter> pattern;
private static final Set<JoinType> SUPPORTED_JOIN_TYPES = EnumSet.of(INNER, LEFT, RIGHT, CROSS);
public MoveFilterBeneathJoin() {
this.joinCapture = new Capture<>();
this.pattern = typeOf(Filter.class)
.with(source(),
typeOf(AbstractJoinPlan.class)
.capturedAs(joinCapture)
.with(join -> SUPPORTED_JOIN_TYPES.contains(join.joinType()))
);
}
@Override
public Pattern<Filter> pattern() {
return pattern;
}
@Override
public LogicalPlan apply(Filter filter,
Captures captures,
PlanStats planStats,
TransactionContext txnCtx,
NodeContext nodeCtx,
UnaryOperator<LogicalPlan> resolvePlan) {<FILL_FUNCTION_BODY>}
static LogicalPlan getNewSource(@Nullable Symbol splitQuery, LogicalPlan source) {
return splitQuery == null ? source : new Filter(source, splitQuery);
}
}
|
var join = captures.get(joinCapture);
var query = filter.query();
if (!WhereClause.canMatch(query)) {
return join.replaceSources(List.of(
getNewSource(query, join.lhs()),
getNewSource(query, join.rhs())
));
}
var splitQueries = QuerySplitter.split(query);
final int initialParts = splitQueries.size();
if (splitQueries.size() == 1 && splitQueries.keySet().iterator().next().size() > 1) {
return null;
}
var lhs = join.lhs();
var rhs = join.rhs();
var lhsRelations = new HashSet<>(lhs.relationNames());
var rhsRelations = new HashSet<>(rhs.relationNames());
var leftQuery = splitQueries.remove(lhsRelations);
var rightQuery = splitQueries.remove(rhsRelations);
if (leftQuery == null && rightQuery == null) {
// we don't have a match for the filter on rhs/lhs yet
// let's see if we have partial match with a subsection of the relations
var it = splitQueries.entrySet().iterator();
while (it.hasNext()) {
var entry = it.next();
var relationNames = entry.getKey();
var splitQuery = entry.getValue();
var matchesLhs = Sets.intersection(lhsRelations, relationNames);
var matchesRhs = Sets.intersection(rhsRelations, relationNames);
if (matchesRhs.isEmpty() == false && matchesLhs.isEmpty()) {
rightQuery = rightQuery == null ? splitQuery : AndOperator.of(rightQuery, splitQuery);
it.remove();
} else if (matchesRhs.isEmpty() && matchesLhs.isEmpty() == false) {
leftQuery = leftQuery == null ? splitQuery : AndOperator.of(leftQuery, splitQuery);
it.remove();
}
}
}
var newLhs = lhs;
var newRhs = rhs;
var joinType = join.joinType();
if (joinType == JoinType.INNER || joinType == JoinType.CROSS) {
newLhs = getNewSource(leftQuery, lhs);
newRhs = getNewSource(rightQuery, rhs);
} else {
if (joinType == JoinType.LEFT) {
newLhs = getNewSource(leftQuery, lhs);
if (rightQuery != null) {
splitQueries.put(rhsRelations, rightQuery);
}
} else if (joinType == JoinType.RIGHT) {
newRhs = getNewSource(rightQuery, rhs);
if (leftQuery != null) {
splitQueries.put(lhsRelations, leftQuery);
}
}
}
if (newLhs == lhs && newRhs == rhs) {
return null;
}
var newJoin = join.replaceSources(List.of(newLhs, newRhs));
if (splitQueries.isEmpty()) {
return newJoin;
} else if (initialParts == splitQueries.size()) {
return null;
} else {
return new Filter(newJoin, AndOperator.join(splitQueries.values()));
}
| 323
| 854
| 1,177
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/optimizer/rule/MoveFilterBeneathUnion.java
|
MoveFilterBeneathUnion
|
createNewFilter
|
class MoveFilterBeneathUnion implements Rule<Filter> {
private final Capture<Union> unionCapture;
private final Pattern<Filter> pattern;
public MoveFilterBeneathUnion() {
this.unionCapture = new Capture<>();
this.pattern = typeOf(Filter.class)
.with(source(), typeOf(Union.class).capturedAs(unionCapture));
}
@Override
public Pattern<Filter> pattern() {
return pattern;
}
@Override
public LogicalPlan apply(Filter filter,
Captures captures,
PlanStats planStats,
TransactionContext txnCtx,
NodeContext nodeCtx,
UnaryOperator<LogicalPlan> resolvePlan) {
Union union = captures.get(unionCapture);
LogicalPlan lhs = union.sources().get(0);
LogicalPlan rhs = union.sources().get(1);
return union.replaceSources(List.of(
createNewFilter(filter, lhs),
createNewFilter(filter, rhs)
));
}
private static Filter createNewFilter(Filter filter, LogicalPlan newSource) {<FILL_FUNCTION_BODY>}
}
|
Symbol newQuery = FieldReplacer.replaceFields(filter.query(), f -> {
int idx = filter.source().outputs().indexOf(f);
if (idx < 0) {
throw new IllegalArgumentException(
"Field used in filter must be present in its source outputs." +
f + " missing in " + filter.source().outputs());
}
return newSource.outputs().get(idx);
});
return new Filter(newSource, newQuery);
| 313
| 120
| 433
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/optimizer/rule/MoveOrderBeneathNestedLoop.java
|
MoveOrderBeneathNestedLoop
|
apply
|
class MoveOrderBeneathNestedLoop implements Rule<Order> {
private final Capture<NestedLoopJoin> nlCapture;
private final Pattern<Order> pattern;
public MoveOrderBeneathNestedLoop() {
this.nlCapture = new Capture<>();
this.pattern = typeOf(Order.class)
.with(source(),
typeOf(NestedLoopJoin.class)
.capturedAs(nlCapture)
.with(nl -> !nl.joinType().isOuter())
);
}
@Override
public Pattern<Order> pattern() {
return pattern;
}
@Override
public LogicalPlan apply(Order order,
Captures captures,
PlanStats planStats,
TransactionContext txnCtx,
NodeContext nodeCtx,
UnaryOperator<LogicalPlan> resolvePlan) {<FILL_FUNCTION_BODY>}
}
|
NestedLoopJoin nestedLoop = captures.get(nlCapture);
Set<RelationName> relationsInOrderBy =
Collections.newSetFromMap(new IdentityHashMap<>());
Consumer<ScopedSymbol> gatherRelationsFromField = f -> relationsInOrderBy.add(f.relation());
Consumer<Reference> gatherRelationsFromRef = r -> relationsInOrderBy.add(r.ident().tableIdent());
OrderBy orderBy = order.orderBy();
for (Symbol orderExpr : orderBy.orderBySymbols()) {
FieldsVisitor.visitFields(orderExpr, gatherRelationsFromField);
RefVisitor.visitRefs(orderExpr, gatherRelationsFromRef);
}
if (relationsInOrderBy.size() == 1) {
var relationInOrderBy = relationsInOrderBy.iterator().next();
var topMostLeftRelation = nestedLoop.relationNames().get(0);
if (relationInOrderBy.equals(topMostLeftRelation)) {
LogicalPlan lhs = nestedLoop.sources().get(0);
LogicalPlan newLhs = order.replaceSources(List.of(lhs));
return new NestedLoopJoin(
newLhs,
nestedLoop.sources().get(1),
nestedLoop.joinType(),
nestedLoop.joinCondition(),
nestedLoop.isFiltered(),
true,
nestedLoop.isRewriteNestedLoopJoinToHashJoinDone()
);
}
}
return null;
| 239
| 376
| 615
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/optimizer/rule/MoveOrderBeneathRename.java
|
MoveOrderBeneathRename
|
apply
|
class MoveOrderBeneathRename implements Rule<Order> {
private final Capture<Rename> renameCapture;
private final Pattern<Order> pattern;
public MoveOrderBeneathRename() {
this.renameCapture = new Capture<>();
this.pattern = typeOf(Order.class)
.with(source(), typeOf(Rename.class).capturedAs(renameCapture));
}
@Override
public Pattern<Order> pattern() {
return pattern;
}
@Override
public LogicalPlan apply(Order plan,
Captures captures,
PlanStats planStats,
TransactionContext txnCtx,
NodeContext nodeCtx,
UnaryOperator<LogicalPlan> resolvePlan) {<FILL_FUNCTION_BODY>}
}
|
Rename rename = captures.get(renameCapture);
Function<? super Symbol, ? extends Symbol> mapField = FieldReplacer.bind(rename::resolveField);
OrderBy mappedOrderBy = plan.orderBy().map(mapField);
if (rename.source().outputs().containsAll(mappedOrderBy.orderBySymbols())) {
Order newOrder = new Order(rename.source(), mappedOrderBy);
return rename.replaceSources(List.of(newOrder));
} else {
return null;
}
| 209
| 139
| 348
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/optimizer/rule/OptimizeCollectWhereClauseAccess.java
|
OptimizeCollectWhereClauseAccess
|
apply
|
class OptimizeCollectWhereClauseAccess implements Rule<Collect> {
private final Pattern<Collect> pattern;
public OptimizeCollectWhereClauseAccess() {
this.pattern = typeOf(Collect.class)
.with(collect ->
collect.relation() instanceof DocTableRelation
&& collect.where().hasQuery()
&& !Symbols.containsColumn(collect.outputs(), DocSysColumns.FETCHID)
);
}
@Override
public Pattern<Collect> pattern() {
return pattern;
}
@Override
public LogicalPlan apply(Collect collect,
Captures captures,
PlanStats planStats,
TransactionContext txnCtx,
NodeContext nodeCtx,
UnaryOperator<LogicalPlan> resolvePlan) {<FILL_FUNCTION_BODY>}
}
|
var relation = (DocTableRelation) collect.relation();
var normalizer = new EvaluatingNormalizer(nodeCtx, RowGranularity.CLUSTER, null, relation);
WhereClause where = collect.where();
var detailedQuery = WhereClauseOptimizer.optimize(
normalizer,
where.queryOrFallback(),
relation.tableInfo(),
txnCtx,
nodeCtx
);
Optional<DocKeys> docKeys = detailedQuery.docKeys();
//noinspection OptionalIsPresent no capturing lambda allocation
if (docKeys.isPresent()) {
return new Get(
relation,
docKeys.get(),
detailedQuery.query(),
collect.outputs(),
detailedQuery.queryHasPkSymbolsOnly()
);
} else if (!detailedQuery.clusteredBy().isEmpty() && collect.detailedQuery() == null) {
return new Collect(collect, detailedQuery);
} else {
return null;
}
| 212
| 254
| 466
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/optimizer/rule/ReorderNestedLoopJoin.java
|
ReorderNestedLoopJoin
|
apply
|
class ReorderNestedLoopJoin implements Rule<NestedLoopJoin> {
private final Pattern<NestedLoopJoin> pattern = typeOf(NestedLoopJoin.class)
.with(j -> j.orderByWasPushedDown() == false &&
j.joinType().supportsInversion() == true);
@Override
public Pattern<NestedLoopJoin> pattern() {
return pattern;
}
@Override
public LogicalPlan apply(NestedLoopJoin nestedLoop,
Captures captures,
PlanStats planStats,
TransactionContext txnCtx,
NodeContext nodeCtx,
UnaryOperator<LogicalPlan> resolvePlan) {<FILL_FUNCTION_BODY>}
}
|
// We move the smaller table to the right side since benchmarking
// revealed that this improves performance in most cases.
var lhStats = planStats.get(nestedLoop.lhs());
var rhStats = planStats.get(nestedLoop.rhs());
boolean expectedRowsAvailable = lhStats.numDocs() != -1 && rhStats.numDocs() != -1;
if (expectedRowsAvailable) {
if (lhStats.numDocs() < rhStats.numDocs()) {
// We need to keep the same order of the output symbols when lhs/rhs are swapped
// therefore we add an Eval on top with original output order
return Eval.create(
new NestedLoopJoin(
nestedLoop.rhs(),
nestedLoop.lhs(),
nestedLoop.joinType().invert(),
nestedLoop.joinCondition(),
nestedLoop.isFiltered(),
nestedLoop.orderByWasPushedDown(),
nestedLoop.isRewriteNestedLoopJoinToHashJoinDone()
),
nestedLoop.outputs());
}
}
return null;
| 184
| 279
| 463
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/optimizer/rule/RewriteGroupByKeysLimitToLimitDistinct.java
|
RewriteGroupByKeysLimitToLimitDistinct
|
eagerTerminateIsLikely
|
class RewriteGroupByKeysLimitToLimitDistinct implements Rule<Limit> {
private final Pattern<Limit> pattern;
private final Capture<GroupHashAggregate> groupCapture;
public RewriteGroupByKeysLimitToLimitDistinct() {
this.groupCapture = new Capture<>();
this.pattern = typeOf(Limit.class)
.with(
source(),
typeOf(GroupHashAggregate.class)
.capturedAs(groupCapture)
.with(groupAggregate -> groupAggregate.aggregates().isEmpty())
);
}
private static boolean eagerTerminateIsLikely(Limit limit,
GroupHashAggregate groupAggregate,
PlanStats planStats) {<FILL_FUNCTION_BODY>}
@Override
public Pattern<Limit> pattern() {
return pattern;
}
@Override
public LogicalPlan apply(Limit limit,
Captures captures,
PlanStats planStats,
TransactionContext txnCtx,
NodeContext nodeCtx,
UnaryOperator<LogicalPlan> resolvePlan) {
GroupHashAggregate groupBy = captures.get(groupCapture);
if (!eagerTerminateIsLikely(limit, groupBy, planStats)) {
return null;
}
return new LimitDistinct(
groupBy.source(),
limit.limit(),
limit.offset(),
groupBy.outputs()
);
}
@Override
public Version requiredVersion() {
return Version.V_4_1_0;
}
}
|
if (groupAggregate.outputs().size() > 1 || !groupAggregate.outputs().get(0).valueType().equals(DataTypes.STRING)) {
// `GroupByOptimizedIterator` can only be used for single text columns.
// If that is not the case we can always use LimitDistinct even if a eagerTerminate isn't likely
// because a regular GROUP BY would have to do at least the same amount of work in any case.
return true;
}
Stats groupHashAggregateStats = planStats.get(groupAggregate);
var limitSymbol = limit.limit();
if (limitSymbol instanceof Literal) {
var limitVal = DataTypes.INTEGER.sanitizeValue(((Literal<?>) limitSymbol).value());
// Would consume all source rows -> prefer default group by implementation which has other optimizations
// which are more beneficial in this scenario
if (limitVal > groupHashAggregateStats.numDocs()) {
return false;
}
}
long sourceRows = planStats.get(groupAggregate.source()).numDocs();
if (sourceRows == 0) {
return false;
}
var cardinalityRatio = groupHashAggregateStats.numDocs() / sourceRows;
/*
* The threshold was chosen after comparing `with limitDistinct` vs. `without limitDistinct`
*
* create table ids_with_tags (id text primary key, tag text not null)
*
* Running:
* select distinct tag from ids_with_tags limit 5
* `ids_with_tags` containing 5000 rows
*
* Data having been generated using:
*
* mkjson --num <num_unique_tags> tag="ulid()" | jq -r '.tag' >! tags.txt
* mkjson --num 5000 id="ulid()" tag="oneOf(fromFile('tags.txt'))" >! specs/data/ids_with_tag.json
*
* Number of unique tags: 1
* median: + 50.91%
* median: + 46.94%
* Number of unique tags: 2
* median: + 10.30%
* median: + 69.29%
* Number of unique tags: 3
* median: + 51.35%
* median: - 15.93%
* Number of unique tags: 4
* median: + 4.35%
* median: + 18.13%
* Number of unique tags: 5
* median: - 157.07%
* median: - 120.55%
* Number of unique tags: 6
* median: - 129.60%
* median: - 150.84%
* Number of unique tags: 7
*
* With 500_000 rows:
*
* Number of unique tags: 1
* median: + 98.69%
* median: + 81.54%
* Number of unique tags: 2
* median: + 73.22%
* median: + 82.40%
* Number of unique tags: 3
* median: + 124.86%
* median: + 107.27%
* Number of unique tags: 4
* median: + 102.73%
* median: + 69.48%
* Number of unique tags: 5
* median: - 199.17%
* median: - 199.36%
* Number of unique tags: 6
*
*/
double threshold = 0.001;
return cardinalityRatio > threshold;
| 403
| 1,005
| 1,408
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/optimizer/symbol/rule/MoveArrayLengthOnReferenceCastToLiteralCastInsideOperators.java
|
MoveArrayLengthOnReferenceCastToLiteralCastInsideOperators
|
apply
|
class MoveArrayLengthOnReferenceCastToLiteralCastInsideOperators implements Rule<Function> {
private final Capture<Function> castCapture;
private final Pattern<Function> pattern;
private final FunctionSymbolResolver functionResolver;
public MoveArrayLengthOnReferenceCastToLiteralCastInsideOperators(FunctionSymbolResolver functionResolver) {
this.functionResolver = functionResolver;
this.castCapture = new Capture<>();
this.pattern = typeOf(Function.class)
.with(f -> COMPARISON_OPERATORS.contains(f.name()))
.with(f -> f.arguments().get(1).symbolType().isValueOrParameterSymbol())
.with(f -> Optional.of(f.arguments().get(0)), typeOf(Function.class).capturedAs(castCapture)
.with(f -> f.isCast())
.with(f -> Optional.of(f.arguments().get(0)), typeOf(Function.class)
.with(f -> f.name().equals(ArrayUpperFunction.ARRAY_LENGTH)
|| f.name().equals(ArrayUpperFunction.ARRAY_UPPER))
.with(f -> f.arguments().get(0).symbolType() == SymbolType.REFERENCE)
)
);
}
@Override
public Pattern<Function> pattern() {
return pattern;
}
@Override
public Symbol apply(Function operator,
Captures captures,
NodeContext nodeCtx,
Symbol parentNode) {<FILL_FUNCTION_BODY>}
}
|
var literalOrParam = operator.arguments().get(1);
var castFunction = captures.get(castCapture);
var function = castFunction.arguments().get(0);
DataType<?> targetType = function.valueType();
return functionResolver.apply(
operator.name(),
List.of(function, literalOrParam.cast(targetType))
);
| 396
| 96
| 492
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/optimizer/symbol/rule/SwapCastsInLikeOperators.java
|
SwapCastsInLikeOperators
|
apply
|
class SwapCastsInLikeOperators implements Rule<Function> {
private final Set<String> LIKE_OPERATORS = Set.of(LikeOperators.OP_LIKE, LikeOperators.OP_ILIKE);
private final Capture<Function> castCapture;
private final Pattern<Function> pattern;
public SwapCastsInLikeOperators(FunctionSymbolResolver functionResolver) {
this.castCapture = new Capture<>();
this.pattern = typeOf(Function.class)
.with(f -> LIKE_OPERATORS.contains(f.name()))
.with(f -> f.arguments().get(1).symbolType().isValueOrParameterSymbol())
.with(f -> Optional.of(f.arguments().get(0)), typeOf(Function.class).capturedAs(castCapture)
.with(f -> f.isCast())
.with(f -> f.arguments().get(0) instanceof Reference ref && ref.valueType().id() == StringType.ID)
);
}
@Override
public Pattern<Function> pattern() {
return pattern;
}
@Override
public Symbol apply(Function likeFunction, Captures captures, NodeContext nodeCtx, Symbol parentNode) {<FILL_FUNCTION_BODY>}
}
|
var literalOrParam = likeFunction.arguments().get(1);
var castFunction = captures.get(castCapture);
var reference = castFunction.arguments().get(0);
CastMode castMode = castFunction.castMode();
assert castMode != null : "Pattern matched, function must be a cast";
Symbol castedLiteral = literalOrParam.cast(StringType.INSTANCE, castMode);
List<Symbol> newArgs;
if (likeFunction.arguments().size() == 3) {
// Don't lose ESCAPE character.
newArgs = List.of(reference, castedLiteral, likeFunction.arguments().get(2));
} else {
newArgs = List.of(reference, castedLiteral);
}
return new Function(
likeFunction.signature(),
newArgs,
likeFunction.valueType()
);
| 330
| 214
| 544
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/statement/DeletePlanner.java
|
DeletePlanner
|
planDelete
|
class DeletePlanner {
public static Plan planDelete(AnalyzedDeleteStatement delete,
SubqueryPlanner subqueryPlanner,
PlannerContext context) {
Plan plan = planDelete(delete, context);
return MultiPhasePlan.createIfNeeded(plan, subqueryPlanner.planSubQueries(delete).uncorrelated());
}
private static Plan planDelete(AnalyzedDeleteStatement delete, PlannerContext context) {<FILL_FUNCTION_BODY>}
@VisibleForTesting
public static class Delete implements Plan {
private final DocTableRelation table;
private final WhereClauseOptimizer.DetailedQuery detailedQuery;
public Delete(DocTableRelation table, WhereClauseOptimizer.DetailedQuery detailedQuery) {
this.table = table;
this.detailedQuery = detailedQuery;
}
@Override
public StatementType type() {
return StatementType.DELETE;
}
@Override
public void executeOrFail(DependencyCarrier executor,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) {
WhereClause where = detailedQuery.toBoundWhereClause(
table.tableInfo(),
params,
subQueryResults,
plannerContext.transactionContext(),
executor.nodeContext());
if (!where.partitions().isEmpty()
&& (!where.hasQuery() || Literal.BOOLEAN_TRUE.equals(where.query()))) {
DeleteIndexRequest request = new DeleteIndexRequest(where.partitions().toArray(new String[0]));
request.indicesOptions(IndicesOptions.lenientExpandOpen());
executor.client().execute(DeleteIndexAction.INSTANCE, request)
.whenComplete(new OneRowActionListener<>(consumer, o -> new Row1(-1L)));
return;
}
ExecutionPlan executionPlan = deleteByQuery(table, plannerContext, where);
NodeOperationTree nodeOpTree = NodeOperationTreeGenerator.fromPlan(executionPlan, executor.localNodeId());
executor.phasesTaskFactory()
.create(plannerContext.jobId(), Collections.singletonList(nodeOpTree))
.execute(consumer, plannerContext.transactionContext());
}
@Override
public List<CompletableFuture<Long>> executeBulk(DependencyCarrier executor,
PlannerContext plannerContext,
List<Row> bulkParams,
SubQueryResults subQueryResults) {
ArrayList<NodeOperationTree> nodeOperationTreeList = new ArrayList<>(bulkParams.size());
for (Row params : bulkParams) {
WhereClause where = detailedQuery.toBoundWhereClause(
table.tableInfo(),
params,
subQueryResults,
plannerContext.transactionContext(),
executor.nodeContext());
ExecutionPlan executionPlan = deleteByQuery(table, plannerContext, where);
nodeOperationTreeList.add(NodeOperationTreeGenerator.fromPlan(executionPlan, executor.localNodeId()));
}
return executor.phasesTaskFactory()
.create(plannerContext.jobId(), nodeOperationTreeList)
.executeBulk(plannerContext.transactionContext());
}
}
private static ExecutionPlan deleteByQuery(DocTableRelation table, PlannerContext context, WhereClause where) {
DocTableInfo tableInfo = table.tableInfo();
Reference idReference = requireNonNull(tableInfo.getReference(DocSysColumns.ID), "Table has to have a _id reference");
DeleteProjection deleteProjection = new DeleteProjection(new InputColumn(0, idReference.valueType()));
var sessionSettings = context.transactionContext().sessionSettings();
Routing routing = context.allocateRouting(
tableInfo, where, RoutingProvider.ShardSelection.PRIMARIES, sessionSettings);
RoutedCollectPhase collectPhase = new RoutedCollectPhase(
context.jobId(),
context.nextExecutionPhaseId(),
"collect",
routing,
tableInfo.rowGranularity(),
List.of(idReference),
List.of(deleteProjection),
Optimizer.optimizeCasts(where.queryOrFallback(), context),
DistributionInfo.DEFAULT_BROADCAST
);
Collect collect = new Collect(collectPhase, LimitAndOffset.NO_LIMIT, 0, 1, 1, null);
return Merge.ensureOnHandler(collect, context, Collections.singletonList(MergeCountProjection.INSTANCE));
}
}
|
DocTableRelation tableRel = delete.relation();
DocTableInfo table = tableRel.tableInfo();
EvaluatingNormalizer normalizer = EvaluatingNormalizer.functionOnlyNormalizer(context.nodeContext());
WhereClauseOptimizer.DetailedQuery detailedQuery = WhereClauseOptimizer.optimize(
normalizer, delete.query(), table, context.transactionContext(), context.nodeContext());
Symbol query = detailedQuery.query();
if (!detailedQuery.partitions().isEmpty()) {
// deleting whole partitions is only valid if the query only contains filters based on partition-by cols
var hasNonPartitionReferences = SymbolVisitors.any(
s -> s instanceof Reference && table.partitionedByColumns().contains(s) == false,
query
);
if (hasNonPartitionReferences == false) {
return new DeletePartitions(table.ident(), detailedQuery.partitions());
}
}
if (detailedQuery.docKeys().isPresent() && detailedQuery.queryHasPkSymbolsOnly()) {
return new DeleteById(tableRel.tableInfo(), detailedQuery.docKeys().get());
}
if (table.isPartitioned() && query instanceof Input<?> input && DataTypes.BOOLEAN.sanitizeValue(input.value())) {
return new DeleteAllPartitions(Lists.map(table.partitions(), IndexParts::toIndexName));
}
return new Delete(tableRel, detailedQuery);
| 1,142
| 361
| 1,503
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/statement/SetSessionAuthorizationPlan.java
|
SetSessionAuthorizationPlan
|
executeOrFail
|
class SetSessionAuthorizationPlan implements Plan {
private final AnalyzedSetSessionAuthorizationStatement setSessionAuthorization;
private final Roles roles;
public SetSessionAuthorizationPlan(AnalyzedSetSessionAuthorizationStatement setSessionAuthorization,
Roles roles) {
this.setSessionAuthorization = setSessionAuthorization;
this.roles = roles;
}
@Override
public StatementType type() {
return StatementType.MANAGEMENT;
}
@Override
public void executeOrFail(DependencyCarrier executor,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) throws Exception {<FILL_FUNCTION_BODY>}
}
|
var sessionSettings = plannerContext.transactionContext().sessionSettings();
String userName = setSessionAuthorization.user();
Role user;
if (userName != null) {
user = roles.getUser(userName);
} else {
user = sessionSettings.authenticatedUser();
}
sessionSettings.setSessionUser(user);
consumer.accept(InMemoryBatchIterator.empty(SENTINEL), null);
| 184
| 112
| 296
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/planner/statement/SetSessionPlan.java
|
SetSessionPlan
|
executeOrFail
|
class SetSessionPlan implements Plan {
private static final Logger LOGGER = LogManager.getLogger(SetSessionPlan.class);
private final List<Assignment<Symbol>> settings;
private final SessionSettingRegistry sessionSettingRegistry;
public SetSessionPlan(List<Assignment<Symbol>> settings, SessionSettingRegistry sessionSettingRegistry) {
this.settings = settings;
this.sessionSettingRegistry = sessionSettingRegistry;
}
@Override
public StatementType type() {
return StatementType.MANAGEMENT;
}
@Override
public void executeOrFail(DependencyCarrier executor,
PlannerContext plannerContext,
RowConsumer consumer,
Row params,
SubQueryResults subQueryResults) throws Exception {<FILL_FUNCTION_BODY>}
@VisibleForTesting
static void ensureNotGlobalSetting(String settingName) {
List<String> nameParts = CrateSettings.settingNamesByPrefix(settingName);
if (nameParts.size() != 0) {
throw new IllegalArgumentException(String.format(
Locale.ENGLISH,
"GLOBAL Cluster setting '%s' cannot be used with SET SESSION / LOCAL",
settingName));
}
}
}
|
Function<? super Symbol, Object> eval = x -> SymbolEvaluator.evaluate(
plannerContext.transactionContext(),
plannerContext.nodeContext(),
x,
params,
subQueryResults
);
var sessionSettings = plannerContext.transactionContext().sessionSettings();
Assignment<Symbol> assignment = settings.get(0);
String settingName = eval.apply(assignment.columnName()).toString();
SessionSetting<?> sessionSetting = sessionSettingRegistry.settings().get(settingName);
if (sessionSetting == null) {
LOGGER.info("SET SESSION STATEMENT WILL BE IGNORED: {}", settingName);
ensureNotGlobalSetting(settingName);
} else {
sessionSetting.apply(sessionSettings, assignment.expressions(), eval);
}
consumer.accept(InMemoryBatchIterator.empty(SENTINEL), null);
| 312
| 225
| 537
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/profile/ProfilingContext.java
|
ProfilingContext
|
resultAsMap
|
class ProfilingContext {
private static final double NS_TO_MS_FACTOR = 1_000_000.0d;
private final HashMap<String, Double> durationInMSByTimer;
private final List<QueryProfiler> profilers;
public ProfilingContext(List<QueryProfiler> profilers) {
this.profilers = profilers;
this.durationInMSByTimer = new HashMap<>();
}
public Map<String, Object> getDurationInMSByTimer() {
HashMap<String, Object> builder = new HashMap<>(durationInMSByTimer);
ArrayList<Map<String, Object>> queryTimings = new ArrayList<>();
for (var profiler : profilers) {
for (var profileResult : profiler.getTree()) {
queryTimings.add(resultAsMap(profileResult));
}
}
if (!queryTimings.isEmpty()) {
builder.put("QueryBreakdown", queryTimings);
}
return Collections.unmodifiableMap(builder);
}
private static Map<String, Object> resultAsMap(ProfileResult profileResult) {<FILL_FUNCTION_BODY>}
public Timer createAndStartTimer(String name) {
Timer timer = createTimer(name);
timer.start();
return timer;
}
public void stopTimerAndStoreDuration(Timer timer) {
timer.stop();
durationInMSByTimer.put(timer.name(), timer.durationNanos() / NS_TO_MS_FACTOR);
}
public Timer createTimer(String name) {
return new Timer(name);
}
public static String generateProfilingKey(int id, String name) {
return id + "-" + name;
}
}
|
HashMap<String, Object> queryTimingsBuilder = new HashMap<>();
queryTimingsBuilder.put("QueryName", profileResult.getQueryName());
queryTimingsBuilder.put("QueryDescription", profileResult.getLuceneDescription());
queryTimingsBuilder.put("Time", profileResult.getTime() / NS_TO_MS_FACTOR);
queryTimingsBuilder.put("BreakDown", profileResult.getTimeBreakdown().entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
e -> e.getKey().endsWith("_count") ? e.getValue() : e.getValue() / NS_TO_MS_FACTOR))
);
List<Map<String, Object>> children = profileResult.getProfiledChildren().stream()
.map(ProfilingContext::resultAsMap)
.collect(Collectors.toList());
if (!children.isEmpty()) {
queryTimingsBuilder.put("Children", children);
}
return Collections.unmodifiableMap(queryTimingsBuilder);
| 459
| 263
| 722
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/SSL.java
|
SSL
|
extractCN
|
class SSL {
/**
* Extract the common name from the subjectDN of a X509 Certificate
*/
@Nullable
public static String extractCN(Certificate certificate) {
if (certificate instanceof X509Certificate) {
return extractCN(((X509Certificate) certificate).getSubjectX500Principal().getName());
}
return null;
}
@Nullable
public static SSLSession getSession(Channel channel) {
SslHandler sslHandler = channel.pipeline().get(SslHandler.class);
if (sslHandler != null) {
return sslHandler.engine().getSession();
}
return null;
}
private static String extractCN(String subjectDN) {<FILL_FUNCTION_BODY>}
}
|
/*
* Get commonName using LdapName API
* The DN of X509 certificates are in rfc2253 format. Ldap uses the same format.
*
* Doesn't use X500Name because it's internal API
*/
try {
LdapName ldapName = new LdapName(subjectDN);
for (Rdn rdn : ldapName.getRdns()) {
if ("CN".equalsIgnoreCase(rdn.getType())) {
return rdn.getValue().toString();
}
}
throw new RuntimeException("Could not extract commonName from certificate subjectDN: " + subjectDN);
} catch (InvalidNameException e) {
throw new RuntimeException("Could not extract commonName from certificate", e);
}
| 205
| 207
| 412
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/http/MainAndStaticFileHandler.java
|
MainAndStaticFileHandler
|
writeResponse
|
class MainAndStaticFileHandler extends SimpleChannelInboundHandler<FullHttpRequest> {
private final Path sitePath;
private final NodeClient client;
private final Netty4CorsConfig corsConfig;
private final String nodeName;
public MainAndStaticFileHandler(String nodeName, Path home, NodeClient client, Netty4CorsConfig corsConfig) {
this.nodeName = nodeName;
this.sitePath = home.resolve("lib").resolve("site");
this.client = client;
this.corsConfig = corsConfig;
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, FullHttpRequest msg) throws Exception {
switch (msg.uri().trim().toLowerCase(Locale.ENGLISH)) {
case "/admin":
case "/_plugin/crate-admin":
writeResponse(ctx, msg, redirectTo("/"));
break;
case "/index.html":
writeResponse(ctx, msg, StaticSite.serveSite(sitePath, msg, ctx.alloc()));
break;
default:
serveJsonOrSite(msg, ctx.alloc())
.whenComplete((resp, err) -> {
if (err == null) {
writeResponse(ctx, msg, resp);
} else {
var errResp = contentResponse(HttpResponseStatus.BAD_REQUEST, ctx.alloc(), err.getMessage());
writeResponse(ctx, msg, errResp);
}
});
break;
}
}
private void writeResponse(ChannelHandlerContext ctx, FullHttpRequest req, FullHttpResponse resp) {<FILL_FUNCTION_BODY>}
private CompletableFuture<FullHttpResponse> serveJsonOrSite(FullHttpRequest request, ByteBufAllocator alloc) throws IOException {
HttpHeaders headers = request.headers();
String userAgent = headers.get(HttpHeaderNames.USER_AGENT);
String accept = headers.get(HttpHeaderNames.ACCEPT);
if (shouldServeJSON(userAgent, accept, request.uri())) {
return serveJSON(request.method(), alloc);
} else {
return completedFuture(StaticSite.serveSite(sitePath, request, alloc));
}
}
private CompletableFuture<FullHttpResponse> serveJSON(HttpMethod method, ByteBufAllocator alloc) {
var requestClusterState = new ClusterStateRequest()
.blocks(true)
.metadata(false)
.nodes(false)
.local(true);
return client.execute(ClusterStateAction.INSTANCE, requestClusterState)
.thenApply(resp -> clusterStateRespToHttpResponse(method, resp, alloc, nodeName));
}
private static FullHttpResponse clusterStateRespToHttpResponse(HttpMethod method,
ClusterStateResponse response,
ByteBufAllocator alloc,
@Nullable String nodeName) {
var httpStatus = response.getState().blocks().hasGlobalBlockWithStatus(RestStatus.SERVICE_UNAVAILABLE)
? HttpResponseStatus.SERVICE_UNAVAILABLE
: HttpResponseStatus.OK;
try {
DefaultFullHttpResponse resp;
if (method == HttpMethod.HEAD) {
resp = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, httpStatus);
HttpUtil.setContentLength(resp, 0);
} else {
var buffer = alloc.buffer();
try (var outputStream = new ByteBufOutputStream(buffer)) {
writeJSON(outputStream, response, httpStatus, nodeName);
}
resp = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, httpStatus, buffer);
resp.headers().set(HttpHeaderNames.CONTENT_TYPE, "application/json");
HttpUtil.setContentLength(resp, buffer.readableBytes());
}
return resp;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static void writeJSON(OutputStream outputStream,
ClusterStateResponse response,
HttpResponseStatus status,
@Nullable String nodeName) throws IOException {
var builder = new XContentBuilder(JsonXContent.JSON_XCONTENT, outputStream);
builder.prettyPrint().lfAtEnd();
builder.startObject();
builder.field("ok", status == HttpResponseStatus.OK);
builder.field("status", HttpResponseStatus.OK.code());
if (nodeName != null && !nodeName.isEmpty()) {
builder.field("name", nodeName);
}
builder.field("cluster_name", response.getClusterName().value());
builder.startObject("version")
.field("number", Version.CURRENT.externalNumber())
.field("build_hash", Build.CURRENT.hash())
.field("build_timestamp", Build.CURRENT.timestamp())
.field("build_snapshot", Version.CURRENT.isSnapshot())
.field("lucene_version", org.apache.lucene.util.Version.LATEST.toString())
.endObject();
builder.endObject();
builder.flush();
builder.close();
}
private static boolean shouldServeJSON(String userAgent, String accept, String uri) {
boolean isRoot = uri.equals("/");
boolean forceJson = isAcceptJson(accept);
return isRoot && (forceJson || !isBrowser(userAgent));
}
}
|
Netty4CorsHandler.setCorsResponseHeaders(req, resp, corsConfig);
ChannelPromise promise = ctx.newPromise();
if (isCloseConnection(req)) {
promise.addListener(ChannelFutureListener.CLOSE);
} else {
Headers.setKeepAlive(req.protocolVersion(), resp);
}
ctx.channel().writeAndFlush(resp, promise);
| 1,325
| 103
| 1,428
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/postgres/ConnectionProperties.java
|
ConnectionProperties
|
clientCert
|
class ConnectionProperties {
private static final Logger LOGGER = LogManager.getLogger(ConnectionProperties.class);
private final InetAddress address;
private final Protocol protocol;
private final boolean hasSSL;
@Nullable
private final SSLSession sslSession;
public ConnectionProperties(InetAddress address, Protocol protocol, @Nullable SSLSession sslSession) {
this.address = address;
this.protocol = protocol;
this.hasSSL = sslSession != null;
this.sslSession = sslSession;
}
public boolean hasSSL() {
return hasSSL;
}
public InetAddress address() {
return address;
}
public Protocol protocol() {
return protocol;
}
public Certificate clientCert() {<FILL_FUNCTION_BODY>}
}
|
// This logic isn't in the constructor to prevent logging in case of SSL without (expected) client-certificate auth
if (sslSession != null) {
try {
return sslSession.getPeerCertificates()[0];
} catch (ArrayIndexOutOfBoundsException | SSLPeerUnverifiedException e) {
LOGGER.debug("Client certificate not available", e);
}
}
return null;
| 214
| 108
| 322
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/postgres/ResultSetReceiver.java
|
ResultSetReceiver
|
batchFinished
|
class ResultSetReceiver extends BaseResultReceiver {
private final String query;
private final DelayableWriteChannel channel;
private final List<PGType<?>> columnTypes;
private final TransactionState transactionState;
private final AccessControl accessControl;
private final Channel directChannel;
private final DelayedWrites delayedWrites;
@Nullable
private final FormatCodes.FormatCode[] formatCodes;
private long rowCount = 0;
ResultSetReceiver(String query,
DelayableWriteChannel channel,
DelayedWrites delayedWrites,
TransactionState transactionState,
AccessControl accessControl,
List<PGType<?>> columnTypes,
@Nullable FormatCodes.FormatCode[] formatCodes) {
this.query = query;
this.channel = channel;
this.delayedWrites = delayedWrites;
this.directChannel = channel.bypassDelay();
this.transactionState = transactionState;
this.accessControl = accessControl;
this.columnTypes = columnTypes;
this.formatCodes = formatCodes;
}
@Override
public void setNextRow(Row row) {
rowCount++;
Messages.sendDataRow(directChannel, row, columnTypes, formatCodes);
if (rowCount % 1000 == 0) {
directChannel.flush();
}
}
@Override
public void batchFinished() {<FILL_FUNCTION_BODY>}
@Override
public void allFinished() {
ChannelFuture sendCommandComplete = Messages.sendCommandComplete(directChannel, query, rowCount);
channel.writePendingMessages(delayedWrites);
channel.flush();
sendCommandComplete.addListener(f -> super.allFinished());
}
@Override
public void fail(@NotNull Throwable throwable) {
ChannelFuture sendErrorResponse = Messages.sendErrorResponse(directChannel, accessControl, throwable);
channel.writePendingMessages(delayedWrites);
channel.flush();
sendErrorResponse.addListener(f -> super.fail(throwable));
}
}
|
ChannelFuture sendPortalSuspended = Messages.sendPortalSuspended(directChannel);
channel.writePendingMessages(delayedWrites);
channel.flush();
// Trigger the completion future but by-pass `sendCompleteComplete`
// This resultReceiver shouldn't be used anymore. The next `execute` message
// from the client will create a new one.
sendPortalSuspended.addListener(f -> super.allFinished());
| 541
| 118
| 659
|
<methods>public non-sealed void <init>() ,public void allFinished() ,public void batchFinished() ,public CompletableFuture<java.lang.Void> completionFuture() ,public void fail(java.lang.Throwable) ,public void setNextRow(io.crate.data.Row) <variables>private CompletableFuture<java.lang.Void> completionFuture
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/postgres/RetryOnFailureResultReceiver.java
|
RetryOnFailureResultReceiver
|
fail
|
class RetryOnFailureResultReceiver<T> implements ResultReceiver<T> {
private static final Logger LOGGER = LogManager.getLogger(RetryOnFailureResultReceiver.class);
private final ClusterService clusterService;
private final ClusterState initialState;
private final Predicate<String> hasIndex;
private final ResultReceiver<T> delegate;
private final UUID jobId;
private final BiConsumer<UUID, ResultReceiver<T>> retryAction;
private int attempt = 1;
public RetryOnFailureResultReceiver(ClusterService clusterService,
ClusterState initialState,
Predicate<String> hasIndex,
ResultReceiver<T> delegate,
UUID jobId,
BiConsumer<UUID, ResultReceiver<T>> retryAction) {
this.clusterService = clusterService;
this.initialState = initialState;
this.hasIndex = hasIndex;
this.delegate = delegate;
this.jobId = jobId;
this.retryAction = retryAction;
}
@Override
public void setNextRow(Row row) {
delegate.setNextRow(row);
}
@Override
public void batchFinished() {
delegate.batchFinished();
}
@Override
public void allFinished() {
delegate.allFinished();
}
@Override
public void fail(Throwable wrappedError) {<FILL_FUNCTION_BODY>}
private boolean indexWasTemporaryUnavailable(Throwable t) {
return t instanceof IndexNotFoundException && hasIndex.test(((IndexNotFoundException) t).getIndex().getName());
}
private void retry() {
UUID newJobId = UUIDs.dirtyUUID();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Retrying statement due to a shard failure, attempt={}, jobId={}->{}", attempt, jobId, newJobId);
}
retryAction.accept(newJobId, this);
}
@Override
public CompletableFuture<T> completionFuture() {
return delegate.completionFuture();
}
@Override
public String toString() {
return "RetryOnFailureResultReceiver{" +
"delegate=" + delegate +
", jobId=" + jobId +
", attempt=" + attempt +
'}';
}
}
|
final Throwable error = SQLExceptions.unwrap(wrappedError);
if (attempt <= Constants.MAX_SHARD_MISSING_RETRIES &&
(SQLExceptions.isShardFailure(error) || error instanceof ConnectTransportException || indexWasTemporaryUnavailable(error))) {
if (clusterService.state().blocks().hasGlobalBlockWithStatus(RestStatus.SERVICE_UNAVAILABLE)) {
delegate.fail(error);
} else {
ClusterStateObserver clusterStateObserver = new ClusterStateObserver(initialState, clusterService, null, LOGGER);
clusterStateObserver.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
attempt += 1;
retry();
}
@Override
public void onClusterServiceClose() {
delegate.fail(error);
}
@Override
public void onTimeout(TimeValue timeout) {
delegate.fail(error);
}
});
}
} else {
delegate.fail(error);
}
| 606
| 275
| 881
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/postgres/types/BooleanType.java
|
BooleanType
|
writeAsBinary
|
class BooleanType extends PGType<Boolean> {
public static final BooleanType INSTANCE = new BooleanType();
static final int OID = 16;
private static final int TYPE_LEN = 1;
private static final int TYPE_MOD = -1;
private static final byte[] TEXT_TRUE = new byte[]{'t'};
private static final byte[] TEXT_FALSE = new byte[]{'f'};
private static final Collection<ByteBuffer> TRUTH_VALUES = Set.of(
ByteBuffer.wrap(new byte[]{'1'}),
ByteBuffer.wrap(new byte[]{'t'}),
ByteBuffer.wrap(new byte[]{'T'}),
ByteBuffer.wrap(new byte[]{'t', 'r', 'u', 'e'}),
ByteBuffer.wrap(new byte[]{'T', 'R', 'U', 'E'})
);
private BooleanType() {
super(OID, TYPE_LEN, TYPE_MOD, "bool");
}
@Override
public int typArray() {
return PGArray.BOOL_ARRAY.oid();
}
@Override
public String typeCategory() {
return TypeCategory.NUMERIC.code();
}
@Override
public String type() {
return Type.BASE.code();
}
@Override
public int writeAsBinary(ByteBuf buffer, @NotNull Boolean value) {<FILL_FUNCTION_BODY>}
@Override
byte[] encodeAsUTF8Text(@NotNull Boolean value) {
return value ? TEXT_TRUE : TEXT_FALSE;
}
@Override
public Boolean readBinaryValue(ByteBuf buffer, int valueLength) {
assert valueLength == TYPE_LEN : "length should be " + TYPE_LEN +
" because boolean is just a byte. Actual length: " + valueLength;
byte value = buffer.readByte();
switch (value) {
case 0:
return false;
case 1:
return true;
default:
throw new IllegalArgumentException("Unsupported binary bool: " + value);
}
}
@Override
Boolean decodeUTF8Text(byte[] bytes) {
return TRUTH_VALUES.contains(ByteBuffer.wrap(bytes));
}
}
|
byte byteValue = (byte) (value ? 1 : 0);
buffer.writeInt(TYPE_LEN);
buffer.writeByte(byteValue);
return INT32_BYTE_SIZE + TYPE_LEN;
| 573
| 60
| 633
|
<methods>public int oid() ,public abstract java.lang.Boolean readBinaryValue(ByteBuf, int) ,public java.lang.Boolean readTextValue(ByteBuf, int) ,public abstract int typArray() ,public java.lang.String typDelim() ,public int typElem() ,public io.crate.types.Regproc typInput() ,public java.lang.String typName() ,public io.crate.types.Regproc typOutput() ,public io.crate.types.Regproc typReceive() ,public io.crate.types.Regproc typSend() ,public abstract java.lang.String type() ,public abstract java.lang.String typeCategory() ,public short typeLen() ,public int typeMod() ,public abstract int writeAsBinary(ByteBuf, java.lang.Boolean) ,public int writeAsText(ByteBuf, java.lang.Boolean) <variables>static final int INT32_BYTE_SIZE,private static final Logger LOGGER,private final non-sealed int oid,private final non-sealed java.lang.String typName,private final non-sealed int typeLen,private final non-sealed int typeMod
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/postgres/types/IntegerType.java
|
IntegerType
|
readBinaryValue
|
class IntegerType extends PGType<Integer> {
static final int OID = 23;
private static final int TYPE_LEN = 4;
private static final int TYPE_MOD = -1;
public static final IntegerType INSTANCE = new IntegerType();
private IntegerType() {
super(OID, TYPE_LEN, TYPE_MOD, "int4");
}
@Override
public int typArray() {
return PGArray.INT4_ARRAY.oid();
}
@Override
public String typeCategory() {
return TypeCategory.NUMERIC.code();
}
@Override
public String type() {
return Type.BASE.code();
}
@Override
public int writeAsBinary(ByteBuf buffer, @NotNull Integer value) {
buffer.writeInt(TYPE_LEN);
buffer.writeInt(value);
return INT32_BYTE_SIZE + TYPE_LEN;
}
@Override
protected byte[] encodeAsUTF8Text(@NotNull Integer value) {
return Integer.toString(value).getBytes(StandardCharsets.UTF_8);
}
@Override
public Integer readBinaryValue(ByteBuf buffer, int valueLength) {<FILL_FUNCTION_BODY>}
@Override
Integer decodeUTF8Text(byte[] bytes) {
return Integer.parseInt(new String(bytes, StandardCharsets.UTF_8));
}
}
|
assert valueLength == TYPE_LEN
: "length should be " + TYPE_LEN + " because int is int32. Actual length: " + valueLength;
return buffer.readInt();
| 368
| 52
| 420
|
<methods>public int oid() ,public abstract java.lang.Integer readBinaryValue(ByteBuf, int) ,public java.lang.Integer readTextValue(ByteBuf, int) ,public abstract int typArray() ,public java.lang.String typDelim() ,public int typElem() ,public io.crate.types.Regproc typInput() ,public java.lang.String typName() ,public io.crate.types.Regproc typOutput() ,public io.crate.types.Regproc typReceive() ,public io.crate.types.Regproc typSend() ,public abstract java.lang.String type() ,public abstract java.lang.String typeCategory() ,public short typeLen() ,public int typeMod() ,public abstract int writeAsBinary(ByteBuf, java.lang.Integer) ,public int writeAsText(ByteBuf, java.lang.Integer) <variables>static final int INT32_BYTE_SIZE,private static final Logger LOGGER,private final non-sealed int oid,private final non-sealed java.lang.String typName,private final non-sealed int typeLen,private final non-sealed int typeMod
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/postgres/types/JsonType.java
|
JsonType
|
encodeAsUTF8Text
|
class JsonType extends PGType<Object> {
public static final JsonType INSTANCE = new JsonType();
static final int OID = 114;
private static final int TYPE_LEN = -1;
private static final int TYPE_MOD = -1;
private JsonType() {
super(OID, TYPE_LEN, TYPE_MOD, "json");
}
@Override
public int typArray() {
return PGArray.JSON_ARRAY.oid();
}
@Override
public String typeCategory() {
return TypeCategory.USER_DEFINED_TYPES.code();
}
@Override
public String type() {
return Type.BASE.code();
}
@Override
public Regproc typSend() {
return Regproc.of("json_send");
}
@Override
public Regproc typReceive() {
return Regproc.of("json_recv");
}
@Override
public int writeAsBinary(ByteBuf buffer, @NotNull Object value) {
byte[] bytes = encodeAsUTF8Text(value);
buffer.writeInt(bytes.length);
buffer.writeBytes(bytes);
return INT32_BYTE_SIZE + bytes.length;
}
@Override
protected byte[] encodeAsUTF8Text(@NotNull Object value) {<FILL_FUNCTION_BODY>}
@Override
public Object readBinaryValue(ByteBuf buffer, int valueLength) {
byte[] bytes = new byte[valueLength];
buffer.readBytes(bytes);
return decodeUTF8Text(bytes);
}
@Override
Object decodeUTF8Text(byte[] bytes) {
try {
XContentParser parser = JsonXContent.JSON_XCONTENT.createParser(
NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytes);
if (bytes.length > 1 && bytes[0] == '[') {
parser.nextToken();
return parser.list();
}
return parser.map();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
|
if (value instanceof String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
try {
XContentBuilder builder = JsonXContent.builder();
if (value instanceof List<?> values) {
builder.startArray();
for (Object o : values) {
builder.value(o);
}
builder.endArray();
} else {
builder.map((Map) value);
}
builder.close();
return BytesReference.toBytes(BytesReference.bytes(builder));
} catch (IOException e) {
throw new RuntimeException(e);
}
| 555
| 159
| 714
|
<methods>public int oid() ,public abstract java.lang.Object readBinaryValue(ByteBuf, int) ,public java.lang.Object readTextValue(ByteBuf, int) ,public abstract int typArray() ,public java.lang.String typDelim() ,public int typElem() ,public io.crate.types.Regproc typInput() ,public java.lang.String typName() ,public io.crate.types.Regproc typOutput() ,public io.crate.types.Regproc typReceive() ,public io.crate.types.Regproc typSend() ,public abstract java.lang.String type() ,public abstract java.lang.String typeCategory() ,public short typeLen() ,public int typeMod() ,public abstract int writeAsBinary(ByteBuf, java.lang.Object) ,public int writeAsText(ByteBuf, java.lang.Object) <variables>static final int INT32_BYTE_SIZE,private static final Logger LOGGER,private final non-sealed int oid,private final non-sealed java.lang.String typName,private final non-sealed int typeLen,private final non-sealed int typeMod
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/postgres/types/OidType.java
|
OidType
|
readBinaryValue
|
class OidType extends PGType<Integer> {
public static final OidType INSTANCE = new OidType();
static final int OID = 26;
private static final int TYPE_LEN = 4;
private static final int TYPE_MOD = -1;
OidType() {
super(OID, TYPE_LEN, TYPE_MOD, "oid");
}
@Override
public int typArray() {
return PGArray.OID_ARRAY.oid();
}
@Override
public String typeCategory() {
return TypeCategory.NUMERIC.code();
}
@Override
public String type() {
return Type.BASE.code();
}
@Override
public int writeAsBinary(ByteBuf buffer, Integer value) {
buffer.writeInt(TYPE_LEN);
buffer.writeInt(value);
return INT32_BYTE_SIZE + TYPE_LEN;
}
@Override
public Integer readBinaryValue(ByteBuf buffer, int valueLength) {<FILL_FUNCTION_BODY>}
@Override
byte[] encodeAsUTF8Text(Integer value) {
return Integer.toString(value).getBytes(StandardCharsets.UTF_8);
}
@Override
Integer decodeUTF8Text(byte[] bytes) {
return Integer.parseInt(new String(bytes, StandardCharsets.UTF_8));
}
}
|
assert valueLength == TYPE_LEN
: "length should be " + TYPE_LEN + " because oid is int32. Actual length: " + valueLength;
return buffer.readInt();
| 366
| 53
| 419
|
<methods>public int oid() ,public abstract java.lang.Integer readBinaryValue(ByteBuf, int) ,public java.lang.Integer readTextValue(ByteBuf, int) ,public abstract int typArray() ,public java.lang.String typDelim() ,public int typElem() ,public io.crate.types.Regproc typInput() ,public java.lang.String typName() ,public io.crate.types.Regproc typOutput() ,public io.crate.types.Regproc typReceive() ,public io.crate.types.Regproc typSend() ,public abstract java.lang.String type() ,public abstract java.lang.String typeCategory() ,public short typeLen() ,public int typeMod() ,public abstract int writeAsBinary(ByteBuf, java.lang.Integer) ,public int writeAsText(ByteBuf, java.lang.Integer) <variables>static final int INT32_BYTE_SIZE,private static final Logger LOGGER,private final non-sealed int oid,private final non-sealed java.lang.String typName,private final non-sealed int typeLen,private final non-sealed int typeMod
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/postgres/types/PGFloatVectorType.java
|
PGFloatVectorType
|
writeAsBinary
|
class PGFloatVectorType extends PGType<float[]> {
public static final PGFloatVectorType INSTANCE = new PGFloatVectorType();
PGFloatVectorType() {
super(PGArray.FLOAT4_ARRAY.oid(), -1, -1, PGArray.FLOAT4_ARRAY.typName());
}
@Override
public int typArray() {
return 0;
}
@Override
public int typElem() {
return PGArray.FLOAT4_ARRAY.typElem();
}
@Override
public String typeCategory() {
return PGArray.FLOAT4_ARRAY.typeCategory();
}
@Override
public String type() {
return PGArray.FLOAT4_ARRAY.type();
}
@Override
public int writeAsBinary(ByteBuf buffer, @NotNull float[] value) {<FILL_FUNCTION_BODY>}
@Override
public float[] readBinaryValue(ByteBuf buffer, int valueLength) {
int dimensions = buffer.readInt();
assert dimensions == 1 : "float_vector should have only 1 dimension";
buffer.readInt(); // flags bit 0: 0=no-nulls, 1=has-nulls
buffer.readInt(); // element oid
int dimension = buffer.readInt();
buffer.readInt(); // lowerBound ignored
return readArrayAsBinary(buffer, dimension);
}
@Override
byte[] encodeAsUTF8Text(float[] value) {
ByteArrayList encodedValues = new ByteArrayList();
encodedValues.add((byte) '{');
for (int i = 0; i < value.length; i++) {
var f = value[i];
if (i > 0) {
encodedValues.add((byte) ',');
}
byte[] bytes = RealType.INSTANCE.encodeAsUTF8Text(f);
encodedValues.add((byte) '"');
encodedValues.add(bytes);
encodedValues.add((byte) '"');
}
encodedValues.add((byte) '}');
return Arrays.copyOfRange(encodedValues.buffer, 0, encodedValues.elementsCount);
}
@SuppressWarnings("unchecked")
@Override
float[] decodeUTF8Text(byte[] bytes) {
var list = (List<Object>) PgArrayParser.parse(bytes, RealType.INSTANCE::decodeUTF8Text);
float[] vector = new float[list.size()];
for (int i = 0; i < vector.length; i++) {
vector[i] = (float) list.get(i);
}
return vector;
}
private int writeArrayAsBinary(ByteBuf buffer, @NotNull float[] array) {
int bytesWritten = 0;
for (float f : array) {
bytesWritten += RealType.INSTANCE.writeAsBinary(buffer, f);
}
return bytesWritten;
}
static float[] readArrayAsBinary(ByteBuf buffer, final int dimension) {
float[] array = new float[dimension];
for (int i = 0; i < dimension; ++i) {
int len = buffer.readInt();
array[i] = RealType.INSTANCE.readBinaryValue(buffer, len);
}
return array;
}
}
|
int arrayLength = value.length;
final int lenIndex = buffer.writerIndex();
buffer.writeInt(0);
buffer.writeInt(1); // one dimension
buffer.writeInt(0); // flags bit 0: 0=no-nulls, 1=has-nulls
buffer.writeInt(typElem());
buffer.writeInt(arrayLength); // upper bound
buffer.writeInt(arrayLength); // lower bound
int bytesWritten = 4 + 4 + 4 + 8;
int len = bytesWritten + writeArrayAsBinary(buffer, value);
buffer.setInt(lenIndex, len);
return INT32_BYTE_SIZE + len; // add also the size of the length itself
| 854
| 184
| 1,038
|
<methods>public int oid() ,public abstract float[] readBinaryValue(ByteBuf, int) ,public float[] readTextValue(ByteBuf, int) ,public abstract int typArray() ,public java.lang.String typDelim() ,public int typElem() ,public io.crate.types.Regproc typInput() ,public java.lang.String typName() ,public io.crate.types.Regproc typOutput() ,public io.crate.types.Regproc typReceive() ,public io.crate.types.Regproc typSend() ,public abstract java.lang.String type() ,public abstract java.lang.String typeCategory() ,public short typeLen() ,public int typeMod() ,public abstract int writeAsBinary(ByteBuf, float[]) ,public int writeAsText(ByteBuf, float[]) <variables>static final int INT32_BYTE_SIZE,private static final Logger LOGGER,private final non-sealed int oid,private final non-sealed java.lang.String typName,private final non-sealed int typeLen,private final non-sealed int typeMod
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/postgres/types/RecordType.java
|
RecordType
|
encodeAsUTF8Text
|
class RecordType extends PGType<Row> {
static final int OID = 2249;
static final String NAME = "record";
static final RecordType EMPTY_RECORD = new RecordType(List.of());
private final List<PGType<?>> fieldTypes;
RecordType(List<PGType<?>> fieldTypes) {
super(OID, -1, -1, NAME);
this.fieldTypes = fieldTypes;
}
@Override
public int typArray() {
return 2287;
}
@Override
public String typeCategory() {
return TypeCategory.PSEUDO.code();
}
@Override
public String type() {
return Type.PSEUDO.code();
}
@Override
public Regproc typSend() {
return Regproc.of(NAME + "_send");
}
@Override
public Regproc typReceive() {
return Regproc.of(NAME + "_recv");
}
@SuppressWarnings({"unchecked", "rawtypes"})
@Override
public int writeAsBinary(ByteBuf buffer, Row record) {
final int startWriterIndex = buffer.writerIndex();
buffer.writeInt(0); // reserve space for the length of the record; updated later
buffer.writeInt(fieldTypes.size());
int bytesWritten = 4;
for (int i = 0; i < fieldTypes.size(); i++) {
PGType fieldType = fieldTypes.get(i);
buffer.writeInt(fieldType.oid());
bytesWritten += 4;
var value = record.get(i);
if (value == null) {
buffer.writeInt(-1); // -1 data length signals a NULL
continue;
}
bytesWritten += fieldType.writeAsBinary(buffer, value);
}
buffer.setInt(startWriterIndex, bytesWritten);
return 4 + bytesWritten;
}
@Override
public Row readBinaryValue(ByteBuf buffer, int valueLength) {
throw new UnsupportedOperationException("Input of anonymous record type values is not implemented");
}
@SuppressWarnings({"unchecked", "rawtypes"})
@Override
byte[] encodeAsUTF8Text(Row record) {<FILL_FUNCTION_BODY>}
@Override
Row decodeUTF8Text(byte[] bytes) {
throw new UnsupportedOperationException("Input of record type values is not implemented");
}
}
|
ByteArrayList bytes = new ByteArrayList();
// See PostgreSQL src/backend/utils/adt/rowtypes.c record_out(PG_FUNCTION_ARGS)
bytes.add((byte) '(');
for (int i = 0; i < record.numColumns(); i++) {
PGType fieldType = fieldTypes.get(i);
var value = record.get(i);
if (i > 0) {
bytes.add((byte) ',');
}
if (value == null) {
continue;
}
byte[] encodedValue = fieldType.encodeAsUTF8Text(value);
boolean needQuotes = encodedValue.length == 0;
for (int j = 0; j < encodedValue.length; j++) {
char c = (char) encodedValue[j];
if (c == '"' || c == '\\' || c == '(' || c == ')' || c == ',' || Character.isWhitespace(c)) {
needQuotes = true;
break;
}
}
if (needQuotes) {
bytes.add((byte) '\"');
}
bytes.add(encodedValue);
if (needQuotes) {
bytes.add((byte) '\"');
}
}
bytes.add((byte) ')');
return bytes.toArray();
| 641
| 342
| 983
|
<methods>public int oid() ,public abstract io.crate.data.Row readBinaryValue(ByteBuf, int) ,public io.crate.data.Row readTextValue(ByteBuf, int) ,public abstract int typArray() ,public java.lang.String typDelim() ,public int typElem() ,public io.crate.types.Regproc typInput() ,public java.lang.String typName() ,public io.crate.types.Regproc typOutput() ,public io.crate.types.Regproc typReceive() ,public io.crate.types.Regproc typSend() ,public abstract java.lang.String type() ,public abstract java.lang.String typeCategory() ,public short typeLen() ,public int typeMod() ,public abstract int writeAsBinary(ByteBuf, io.crate.data.Row) ,public int writeAsText(ByteBuf, io.crate.data.Row) <variables>static final int INT32_BYTE_SIZE,private static final Logger LOGGER,private final non-sealed int oid,private final non-sealed java.lang.String typName,private final non-sealed int typeLen,private final non-sealed int typeMod
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/postgres/types/RegclassType.java
|
RegclassType
|
decodeUTF8Text
|
class RegclassType extends PGType<Regclass> {
static final int OID = 2205;
private static final int TYPE_LEN = 4;
private static final int TYPE_MOD = -1;
public static final RegclassType INSTANCE = new RegclassType();
private RegclassType() {
super(OID, TYPE_LEN, TYPE_MOD, "regclass");
}
@Override
public int typArray() {
return PGArray.REGCLASS_ARRAY.oid();
}
@Override
public String typeCategory() {
return TypeCategory.NUMERIC.code();
}
@Override
public String type() {
return Type.BASE.code();
}
@Override
public int writeAsBinary(ByteBuf buffer, Regclass value) {
buffer.writeInt(TYPE_LEN);
buffer.writeInt(value.oid());
return INT32_BYTE_SIZE + TYPE_LEN;
}
@Override
public Regclass readBinaryValue(ByteBuf buffer, int valueLength) {
int oid = buffer.readInt();
return new Regclass(oid, String.valueOf(oid));
}
@Override
byte[] encodeAsUTF8Text(Regclass value) {
return String.valueOf(value.oid()).getBytes(StandardCharsets.UTF_8);
}
@Override
Regclass decodeUTF8Text(byte[] bytes) {<FILL_FUNCTION_BODY>}
}
|
String oidStr = new String(bytes, StandardCharsets.UTF_8);
try {
int oid = Integer.parseInt(oidStr);
return new Regclass(oid, oidStr);
} catch (NumberFormatException e) {
var indexParts = new IndexParts(oidStr);
return Regclass.fromRelationName(indexParts.toRelationName());
}
| 387
| 103
| 490
|
<methods>public int oid() ,public abstract io.crate.types.Regclass readBinaryValue(ByteBuf, int) ,public io.crate.types.Regclass readTextValue(ByteBuf, int) ,public abstract int typArray() ,public java.lang.String typDelim() ,public int typElem() ,public io.crate.types.Regproc typInput() ,public java.lang.String typName() ,public io.crate.types.Regproc typOutput() ,public io.crate.types.Regproc typReceive() ,public io.crate.types.Regproc typSend() ,public abstract java.lang.String type() ,public abstract java.lang.String typeCategory() ,public short typeLen() ,public int typeMod() ,public abstract int writeAsBinary(ByteBuf, io.crate.types.Regclass) ,public int writeAsText(ByteBuf, io.crate.types.Regclass) <variables>static final int INT32_BYTE_SIZE,private static final Logger LOGGER,private final non-sealed int oid,private final non-sealed java.lang.String typName,private final non-sealed int typeLen,private final non-sealed int typeMod
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/postgres/types/TimeTZType.java
|
TimeTZType
|
readBinaryValue
|
class TimeTZType extends PGType<TimeTZ> {
public static final PGType<TimeTZ> INSTANCE = new TimeTZType();
private static final int OID = 1266;
private static final String OID_TYPE_NAME = "timetz";
private static final int TYPE_MOD = -1;
TimeTZType() {
super(OID, TYPE_SIZE, TYPE_MOD, OID_TYPE_NAME);
}
@Override
public int typArray() {
return PGArray.TIMETZ_ARRAY.oid();
}
@Override
public String typeCategory() {
return TypeCategory.DATETIME.code();
}
@Override
public String type() {
return Type.BASE.code();
}
@Override
public Regproc typSend() {
return Regproc.of("timetz_send");
}
@Override
public Regproc typReceive() {
return Regproc.of("timetz_recv");
}
@Override
public int writeAsBinary(ByteBuf buffer, @NotNull TimeTZ value) {
buffer.writeInt(TYPE_SIZE);
buffer.writeLong(value.getMicrosFromMidnight());
buffer.writeInt(value.getSecondsFromUTC());
return INT32_BYTE_SIZE + TYPE_SIZE;
}
@Override
public TimeTZ readBinaryValue(ByteBuf buffer, int valueLength) {<FILL_FUNCTION_BODY>}
@Override
byte[] encodeAsUTF8Text(@NotNull TimeTZ time) {
return formatTime(time).getBytes(StandardCharsets.UTF_8);
}
@Override
TimeTZ decodeUTF8Text(byte[] bytes) {
return parse(new String(bytes, StandardCharsets.UTF_8));
}
}
|
assert valueLength == TYPE_SIZE : String.format(
Locale.ENGLISH,
"valueLength must be %d because timetz is a 12 byte structure. Actual length: %d",
TYPE_SIZE, valueLength);
return new TimeTZ(buffer.readLong(), buffer.readInt());
| 483
| 81
| 564
|
<methods>public int oid() ,public abstract io.crate.types.TimeTZ readBinaryValue(ByteBuf, int) ,public io.crate.types.TimeTZ readTextValue(ByteBuf, int) ,public abstract int typArray() ,public java.lang.String typDelim() ,public int typElem() ,public io.crate.types.Regproc typInput() ,public java.lang.String typName() ,public io.crate.types.Regproc typOutput() ,public io.crate.types.Regproc typReceive() ,public io.crate.types.Regproc typSend() ,public abstract java.lang.String type() ,public abstract java.lang.String typeCategory() ,public short typeLen() ,public int typeMod() ,public abstract int writeAsBinary(ByteBuf, io.crate.types.TimeTZ) ,public int writeAsText(ByteBuf, io.crate.types.TimeTZ) <variables>static final int INT32_BYTE_SIZE,private static final Logger LOGGER,private final non-sealed int oid,private final non-sealed java.lang.String typName,private final non-sealed int typeLen,private final non-sealed int typeMod
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/postgres/types/TimestampZType.java
|
TimestampZType
|
encodeAsUTF8Text
|
class TimestampZType extends BaseTimestampType {
public static final PGType INSTANCE = new TimestampZType();
private static final int OID = 1184;
private static final String NAME = "timestamptz";
// For Golang if date is AD (after Christ), era abbreviation is not parsed.
private static final DateTimeFormatter ISO_FORMATTER =
DateTimeFormat.forPattern("YYYY-MM-dd HH:mm:ss.SSS+00").withZoneUTC().withLocale(Locale.ENGLISH);
// For Golang if date is BC (before Christ), era abbreviation needs to be appended.
private static final DateTimeFormatter ISO_FORMATTER_WITH_ERA =
DateTimeFormat.forPattern("YYYY-MM-dd HH:mm:ss.SSS+00 G").withZoneUTC().withLocale(Locale.ENGLISH);
private static final DateTimeFormatter[] PARSERS_WITHOUT_ERA = generateParseFormatters(false);
private static final DateTimeFormatter[] PARSERS_WITH_ERA = generateParseFormatters(true);
private static DateTimeFormatter[] generateParseFormatters(boolean withEra) {
DateTimeFormatter[] formatters = new DateTimeFormatter[10];
String prefix = "YYYY-MM-dd HH:mm:ss";
String suffix = "ZZ";
if (withEra) {
suffix = "ZZ G";
}
formatters[0] = DateTimeFormat.forPattern(prefix + suffix).withLocale(Locale.ENGLISH);
for (int i = 1; i < 10; i++) { // 1-9 digits for fraction of second
StringBuilder pattern = new StringBuilder(prefix);
pattern.append('.');
for (int j = 1; j <= i; j++) {
pattern.append('S');
}
pattern.append(suffix);
formatters[i] = DateTimeFormat.forPattern(pattern.toString()).withLocale(Locale.ENGLISH);
}
return formatters;
}
private TimestampZType() {
super(OID, TYPE_LEN, TYPE_MOD, NAME);
}
@Override
public int typArray() {
return PGArray.TIMESTAMPZ_ARRAY.oid();
}
@Override
public Regproc typSend() {
return Regproc.of("timestamptz_send");
}
@Override
public Regproc typReceive() {
return Regproc.of("timestamptz_recv");
}
@Override
byte[] encodeAsUTF8Text(@NotNull Object value) {<FILL_FUNCTION_BODY>}
@Override
Object decodeUTF8Text(byte[] bytes) {
// Currently seems that only GoLang prepared statements are sent as TimestampType with time zone
// Other PostgreSQL clients send the parameter as Bigint or Varchar
String s = new String(bytes, StandardCharsets.UTF_8);
try {
return DataTypes.TIMESTAMPZ.explicitCast(s, CoordinatorTxnCtx.systemTransactionContext().sessionSettings());
} catch (Exception e) {
int endOfSeconds = s.indexOf(".");
int idx = endOfSeconds;
if (endOfSeconds > 0) {
idx++;
while (s.charAt(idx) != '+' && s.charAt(idx) != '-') { // start of timezone
idx++;
}
}
int fractionDigits = idx - endOfSeconds - 1;
fractionDigits = fractionDigits < 0 ? 0 : fractionDigits;
if (fractionDigits > 9) {
throw new IllegalArgumentException("Cannot parse more than 9 digits for fraction of a second");
}
boolean withEra = s.endsWith("BC") || s.endsWith("AD");
if (withEra) {
return PARSERS_WITH_ERA[fractionDigits].parseMillis(s);
}
return PARSERS_WITHOUT_ERA[fractionDigits].parseMillis(s);
}
}
}
|
long msecs = (long) value;
if (msecs >= FIRST_MSEC_AFTER_CHRIST) {
return ISO_FORMATTER.print(msecs).getBytes(StandardCharsets.UTF_8);
} else {
return ISO_FORMATTER_WITH_ERA.print(msecs).getBytes(StandardCharsets.UTF_8);
}
| 1,048
| 101
| 1,149
|
<methods>public java.lang.Object readBinaryValue(ByteBuf, int) ,public java.lang.String type() ,public java.lang.String typeCategory() ,public int writeAsBinary(ByteBuf, java.lang.Object) <variables>private static final long EPOCH_DIFF_IN_MS,protected static final long FIRST_MSEC_AFTER_CHRIST,protected static final int TYPE_LEN,protected static final int TYPE_MOD
|
crate_crate
|
crate/server/src/main/java/io/crate/protocols/postgres/types/VarCharType.java
|
VarCharType
|
readBinaryValue
|
class VarCharType extends PGType<Object> {
static final int OID = 1043;
private static final int ARRAY_OID = 1015;
private static final int TYPE_LEN = -1;
private static final int TYPE_MOD = -1;
public static final VarCharType INSTANCE = new VarCharType(ARRAY_OID);
private final int typArray;
private VarCharType(int typArray) {
super(OID, TYPE_LEN, TYPE_MOD, "varchar");
this.typArray = typArray;
}
private VarCharType(int oid, int typArray, int maxLength, String aliasName) {
super(oid, maxLength, TYPE_MOD, aliasName);
this.typArray = typArray;
}
@Override
public int typArray() {
return typArray;
}
@Override
public String type() {
return Type.BASE.code();
}
@Override
public String typeCategory() {
return TypeCategory.STRING.code();
}
@Override
public int writeAsBinary(ByteBuf buffer, @NotNull Object value) {
String string = DataTypes.STRING.implicitCast(value);
int writerIndex = buffer.writerIndex();
buffer.writeInt(0);
int bytesWritten = buffer.writeCharSequence(string, StandardCharsets.UTF_8);
buffer.setInt(writerIndex, bytesWritten);
return INT32_BYTE_SIZE + bytesWritten;
}
@Override
public int writeAsText(ByteBuf buffer, @NotNull Object value) {
return writeAsBinary(buffer, value);
}
@Override
protected byte[] encodeAsUTF8Text(@NotNull Object value) {
return DataTypes.STRING.implicitCast(value).getBytes(StandardCharsets.UTF_8);
}
@Override
public String readTextValue(ByteBuf buffer, int valueLength) {
return readBinaryValue(buffer, valueLength);
}
@Override
public String readBinaryValue(ByteBuf buffer, int valueLength) {<FILL_FUNCTION_BODY>}
@Override
String decodeUTF8Text(byte[] bytes) {
return new String(bytes, StandardCharsets.UTF_8);
}
static class NameType {
static final int OID = 19;
private static final int ARRAY_OID = -1;
private static final int TYPE_LEN = 64;
static final VarCharType INSTANCE = new VarCharType(OID, ARRAY_OID, TYPE_LEN, "name");
}
static class TextType {
static final int OID = 25;
static final int TEXT_ARRAY_OID = 1009;
static final VarCharType INSTANCE = new VarCharType(OID, TEXT_ARRAY_OID, -1, "text");
}
}
|
int readerIndex = buffer.readerIndex();
buffer.readerIndex(readerIndex + valueLength);
return buffer.toString(readerIndex, valueLength, StandardCharsets.UTF_8);
| 748
| 48
| 796
|
<methods>public int oid() ,public abstract java.lang.Object readBinaryValue(ByteBuf, int) ,public java.lang.Object readTextValue(ByteBuf, int) ,public abstract int typArray() ,public java.lang.String typDelim() ,public int typElem() ,public io.crate.types.Regproc typInput() ,public java.lang.String typName() ,public io.crate.types.Regproc typOutput() ,public io.crate.types.Regproc typReceive() ,public io.crate.types.Regproc typSend() ,public abstract java.lang.String type() ,public abstract java.lang.String typeCategory() ,public short typeLen() ,public int typeMod() ,public abstract int writeAsBinary(ByteBuf, java.lang.Object) ,public int writeAsText(ByteBuf, java.lang.Object) <variables>static final int INT32_BYTE_SIZE,private static final Logger LOGGER,private final non-sealed int oid,private final non-sealed java.lang.String typName,private final non-sealed int typeLen,private final non-sealed int typeMod
|
crate_crate
|
crate/server/src/main/java/io/crate/replication/logical/MetadataTracker.java
|
AckMetadataUpdateRequest
|
updateIndexMetadata
|
class AckMetadataUpdateRequest extends AcknowledgedRequest<AckMetadataUpdateRequest> {
}
@VisibleForTesting
static ClusterState updateIndexMetadata(String subscriptionName,
Subscription subscription,
ClusterState subscriberClusterState,
Response publicationsState,
IndexScopedSettings indexScopedSettings) {<FILL_FUNCTION_BODY>
|
// Check for all the subscribed tables if the index metadata and settings changed and if so apply
// the changes from the publisher cluster state to the subscriber cluster state
var updatedMetadataBuilder = Metadata.builder(subscriberClusterState.metadata());
var updateClusterState = false;
for (var followedTable : subscription.relations().keySet()) {
RelationMetadata relationMetadata = publicationsState.relationsInPublications().get(followedTable);
Map<String, IndexMetadata> publisherIndices = relationMetadata == null
? Map.of()
: relationMetadata.indices()
.stream()
.collect(Collectors.toMap(x -> x.getIndex().getName(), x -> x));
var publisherIndexMetadata = publisherIndices.get(followedTable.indexNameOrAlias());
var subscriberIndexMetadata = subscriberClusterState.metadata().index(followedTable.indexNameOrAlias());
if (publisherIndexMetadata != null && subscriberIndexMetadata != null) {
var updatedIndexMetadataBuilder = IndexMetadata.builder(subscriberIndexMetadata);
var updatedMapping = updateIndexMetadataMappings(publisherIndexMetadata, subscriberIndexMetadata);
if (updatedMapping != null) {
updatedIndexMetadataBuilder.putMapping(updatedMapping).mappingVersion(publisherIndexMetadata.getMappingVersion());
}
var updatedSettings = updateIndexMetadataSettings(
publisherIndexMetadata.getSettings(),
subscriberIndexMetadata.getSettings(),
indexScopedSettings
);
if (updatedSettings != null) {
updatedIndexMetadataBuilder.settings(updatedSettings).settingsVersion(subscriberIndexMetadata.getSettingsVersion() + 1L);
}
if (updatedMapping != null || updatedSettings != null) {
updatedMetadataBuilder.put(updatedIndexMetadataBuilder.build(), true);
updateClusterState = true;
}
}
}
if (updateClusterState) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Updated index metadata for subscription {}", subscriptionName);
}
return ClusterState.builder(subscriberClusterState).metadata(updatedMetadataBuilder).build();
} else {
return subscriberClusterState;
}
| 91
| 536
| 627
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/replication/logical/action/GetFileChunkAction.java
|
TransportAction
|
shardOperation
|
class TransportAction extends TransportSingleShardAction<Request, Response> {
private final IndicesService indicesService;
private final PublisherRestoreService publisherRestoreService;
@Inject
public TransportAction(ThreadPool threadPool,
ClusterService clusterService,
TransportService transportService,
IndicesService indicesService,
PublisherRestoreService publisherRestoreService) {
super(
NAME,
threadPool,
clusterService,
transportService,
Request::new,
ThreadPool.Names.GET
);
this.indicesService = indicesService;
this.publisherRestoreService = publisherRestoreService;
TransportActionProxy.registerProxyAction(transportService, NAME, Response::new);
}
@Override
protected Response shardOperation(Request request,
ShardId shardId) throws IOException {<FILL_FUNCTION_BODY>}
@Override
protected Writeable.Reader<Response> getResponseReader() {
return Response::new;
}
@Override
protected boolean resolveIndex(Request request) {
return true;
}
@Nullable
@Override
protected ShardsIterator shards(ClusterState state,
InternalRequest request) {
return state.routingTable().shardRoutingTable(request.request().shardId()).primaryShardIt();
}
}
|
var indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
var store = indexShard.store();
var buffer = new byte[request.length()];
var bytesRead = 0;
store.incRef();
var fileMetadata = request.storeFileMetadata();
try (var currentInput = publisherRestoreService.openInputStream(
request.restoreUUID(), request, fileMetadata.name(), fileMetadata.length())) {
var offset = request.offset();
if (offset < fileMetadata.length()) {
currentInput.skip(offset);
bytesRead = currentInput.read(buffer);
}
} finally {
store.decRef();
}
return new Response(
request.storeFileMetadata(),
request.offset(),
new BytesArray(buffer, 0, bytesRead)
);
| 339
| 222
| 561
|
<methods>public void <init>(java.lang.String) ,public boolean equals(java.lang.Object) ,public Reader<io.crate.replication.logical.action.GetFileChunkAction.Response> getResponseReader() ,public int hashCode() ,public java.lang.String name() ,public org.elasticsearch.transport.TransportRequestOptions transportOptions(org.elasticsearch.common.settings.Settings) <variables>private final non-sealed java.lang.String name
|
crate_crate
|
crate/server/src/main/java/io/crate/replication/logical/action/PublicationsStateAction.java
|
TransportAction
|
masterOperation
|
class TransportAction extends TransportMasterNodeReadAction<Request, Response> {
private final Roles roles;
@Inject
public TransportAction(TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool,
Roles roles) {
super(Settings.EMPTY,
NAME,
false,
transportService,
clusterService,
threadPool,
Request::new);
this.roles = roles;
TransportActionProxy.registerProxyAction(transportService, NAME, Response::new);
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected Response read(StreamInput in) throws IOException {
return new Response(in);
}
@Override
protected void masterOperation(Request request,
ClusterState state,
ActionListener<Response> listener) throws Exception {<FILL_FUNCTION_BODY>}
@Override
protected ClusterBlockException checkBlock(Request request,
ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
}
}
|
// Ensure subscribing user was not dropped after remote connection was established on another side.
// Subscribing users must be checked on a publisher side as they belong to the publishing cluster.
Role subscriber = roles.findUser(request.subscribingUserName());
if (subscriber == null) {
throw new IllegalStateException(
String.format(
Locale.ENGLISH, "Cannot build publication state, subscribing user '%s' was not found.",
request.subscribingUserName()
)
);
}
PublicationsMetadata publicationsMetadata = state.metadata().custom(PublicationsMetadata.TYPE);
if (publicationsMetadata == null) {
LOGGER.trace("No publications found on remote cluster.");
throw new IllegalStateException("Cannot build publication state, no publications found");
}
Map<RelationName, RelationMetadata> allRelationsInPublications = new HashMap<>();
List<String> unknownPublications = new ArrayList<>();
for (var publicationName : request.publications()) {
var publication = publicationsMetadata.publications().get(publicationName);
if (publication == null) {
unknownPublications.add(publicationName);
continue;
}
// Publication owner cannot be null as we ensure that users who owns publication cannot be dropped.
// Also, before creating publication or subscription we check that owner was not dropped right before creation.
Role publicationOwner = roles.findUser(publication.owner());
allRelationsInPublications.putAll(
publication.resolveCurrentRelations(state, roles, publicationOwner, subscriber, publicationName));
}
listener.onResponse(new Response(allRelationsInPublications, unknownPublications));
| 293
| 420
| 713
|
<methods>public void <init>(java.lang.String) ,public boolean equals(java.lang.Object) ,public Reader<io.crate.replication.logical.action.PublicationsStateAction.Response> getResponseReader() ,public int hashCode() ,public java.lang.String name() ,public org.elasticsearch.transport.TransportRequestOptions transportOptions(org.elasticsearch.common.settings.Settings) <variables>private final non-sealed java.lang.String name
|
crate_crate
|
crate/server/src/main/java/io/crate/replication/logical/action/ReleasePublisherResourcesAction.java
|
TransportAction
|
shardOperation
|
class TransportAction extends TransportSingleShardAction<Request, AcknowledgedResponse> {
private static final Logger LOGGER = LogManager.getLogger(TransportAction.class);
private final PublisherRestoreService publisherRestoreService;
@Inject
public TransportAction(ThreadPool threadPool,
ClusterService clusterService,
TransportService transportService,
PublisherRestoreService publisherRestoreService) {
super(
NAME,
threadPool,
clusterService,
transportService,
Request::new,
ThreadPool.Names.GET
);
this.publisherRestoreService = publisherRestoreService;
TransportActionProxy.registerProxyAction(
transportService,
NAME,
AcknowledgedResponse::new
);
}
@Override
protected AcknowledgedResponse shardOperation(Request request,
ShardId shardId) throws IOException {<FILL_FUNCTION_BODY>}
@Override
protected Writeable.Reader<AcknowledgedResponse> getResponseReader() {
return AcknowledgedResponse::new;
}
@Override
protected boolean resolveIndex(Request request) {
return true;
}
@Nullable
@Override
protected ShardsIterator shards(ClusterState state,
InternalRequest request) {
return state.routingTable().shardRoutingTable(request.request().shardId()).primaryShardIt();
}
}
|
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Releasing resources for {} with restore-id as {}", shardId, request.restoreUUID());
}
publisherRestoreService.removeRestoreContext(request.restoreUUID());
return new AcknowledgedResponse(true);
| 361
| 78
| 439
|
<methods>public void <init>(java.lang.String) ,public boolean equals(java.lang.Object) ,public Reader<org.elasticsearch.action.support.master.AcknowledgedResponse> getResponseReader() ,public int hashCode() ,public java.lang.String name() ,public org.elasticsearch.transport.TransportRequestOptions transportOptions(org.elasticsearch.common.settings.Settings) <variables>private final non-sealed java.lang.String name
|
crate_crate
|
crate/server/src/main/java/io/crate/replication/logical/action/RestoreShardRequest.java
|
RestoreShardRequest
|
toString
|
class RestoreShardRequest<T extends SingleShardRequest<T>> extends SingleShardRequest<T>
implements RemoteClusterAwareRequest {
private final String restoreUUID;
private final DiscoveryNode node;
private final ShardId shardId;
private final String subscriberClusterName;
public RestoreShardRequest(String restoreUUID,
DiscoveryNode node,
ShardId shardId,
String subscriberClusterName) {
super(shardId.getIndexName());
this.restoreUUID = restoreUUID;
this.node = node;
this.shardId = shardId;
this.subscriberClusterName = subscriberClusterName;
}
public RestoreShardRequest(StreamInput in) throws IOException {
super(in);
this.restoreUUID = in.readString();
this.node = new DiscoveryNode(in);
this.shardId = new ShardId(in);
this.subscriberClusterName = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(restoreUUID);
node.writeTo(out);
shardId.writeTo(out);
out.writeString(subscriberClusterName);
}
@Override
public DiscoveryNode getPreferredTargetNode() {
return node;
}
public ShardId shardId() {
return shardId;
}
public String subscriberClusterName() {
return subscriberClusterName;
}
public String restoreUUID() {
return restoreUUID;
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
}
|
return "RestoreShardRequest{" +
"restoreUUID='" + restoreUUID + '\'' +
", node=" + node +
", shardId=" + shardId +
", subscriberClusterName='" + subscriberClusterName + '\'' +
", index='" + index + '\'' +
'}';
| 445
| 86
| 531
|
<methods>public void <init>() ,public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public java.lang.String index() ,public final T index(java.lang.String) ,public java.lang.String[] indices() ,public org.elasticsearch.action.support.IndicesOptions indicesOptions() ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>public static final org.elasticsearch.action.support.IndicesOptions INDICES_OPTIONS,protected java.lang.String index,org.elasticsearch.index.shard.ShardId internalShardId
|
crate_crate
|
crate/server/src/main/java/io/crate/replication/logical/action/TransportCreateSubscriptionAction.java
|
TransportCreateSubscriptionAction
|
checkVersionCompatibility
|
class TransportCreateSubscriptionAction extends TransportMasterNodeAction<CreateSubscriptionRequest, AcknowledgedResponse> {
public static final String ACTION_NAME = "internal:crate:replication/logical/subscription/create";
private final String source;
private final LogicalReplicationService logicalReplicationService;
private final Roles roles;
@Inject
public TransportCreateSubscriptionAction(TransportService transportService,
ClusterService clusterService,
LogicalReplicationService logicalReplicationService,
ThreadPool threadPool,
Roles roles) {
super(ACTION_NAME,
transportService,
clusterService,
threadPool,
CreateSubscriptionRequest::new);
this.logicalReplicationService = logicalReplicationService;
this.roles = roles;
this.source = "create-subscription";
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected AcknowledgedResponse read(StreamInput in) throws IOException {
return new AcknowledgedResponse(in);
}
@Override
protected void masterOperation(CreateSubscriptionRequest request,
ClusterState state,
ActionListener<AcknowledgedResponse> listener) throws Exception {
// Ensure subscription owner exists
if (roles.findUser(request.owner()) == null) {
throw new IllegalStateException(
String.format(
Locale.ENGLISH, "Subscription '%s' cannot be created as the user '%s' owning the subscription has been dropped.",
request.name(),
request.owner()
)
);
}
logicalReplicationService.getPublicationState(
request.name(),
request.publications(),
request.connectionInfo()
)
.thenCompose(
response -> {
if (response.unknownPublications().isEmpty() == false) {
throw new PublicationUnknownException(response.unknownPublications().get(0));
}
// Published tables can have metadata or documents which subscriber with a lower version might not process.
// We check published tables version and not publisher cluster's MinNodeVersion.
// Publisher cluster can have a higher version but contain old tables, restored from a snapshot,
// in this case subscription works fine.
for (RelationMetadata relationMetadata: response.relationsInPublications().values()) {
if (relationMetadata.template() != null) {
checkVersionCompatibility(
relationMetadata.name().fqn(),
state.nodes().getMinNodeVersion(),
relationMetadata.template().settings()
);
}
if (!relationMetadata.indices().isEmpty()) {
// All indices belong to the same table and has same metadata.
IndexMetadata indexMetadata = relationMetadata.indices().get(0);
checkVersionCompatibility(
relationMetadata.name().fqn(),
state.nodes().getMinNodeVersion(),
indexMetadata.getSettings()
);
}
}
logicalReplicationService.verifyTablesDoNotExist(request.name(), response);
return submitClusterStateTask(request, response);
}
)
.whenComplete(
(acknowledgedResponse, err) -> {
if (err == null) {
listener.onResponse(acknowledgedResponse);
} else {
listener.onFailure(Exceptions.toException(err));
}
}
);
}
private static void checkVersionCompatibility(String tableFqn, Version subscriberMinNodeVersion, Settings settings) {<FILL_FUNCTION_BODY>}
private CompletableFuture<AcknowledgedResponse> submitClusterStateTask(CreateSubscriptionRequest request,
PublicationsStateAction.Response publicationsStateResponse) {
AckedClusterStateUpdateTask<AcknowledgedResponse> task = new AckedClusterStateUpdateTask<>(request) {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
Metadata currentMetadata = currentState.metadata();
Metadata.Builder mdBuilder = Metadata.builder(currentMetadata);
var oldMetadata = (SubscriptionsMetadata) mdBuilder.getCustom(SubscriptionsMetadata.TYPE);
if (oldMetadata != null && oldMetadata.subscription().containsKey(request.name())) {
throw new SubscriptionAlreadyExistsException(request.name());
}
HashMap<RelationName, Subscription.RelationState> relations = new HashMap<>();
for (var relation : publicationsStateResponse.tables()) {
relations.put(
relation,
new Subscription.RelationState(Subscription.State.INITIALIZING, null)
);
}
Subscription subscription = new Subscription(
request.owner(),
request.connectionInfo(),
request.publications(),
request.settings(),
relations
);
var newMetadata = SubscriptionsMetadata.newInstance(oldMetadata);
newMetadata.subscription().put(request.name(), subscription);
assert !newMetadata.equals(oldMetadata) : "must not be equal to guarantee the cluster change action";
mdBuilder.putCustom(SubscriptionsMetadata.TYPE, newMetadata);
return ClusterState.builder(currentState).metadata(mdBuilder).build();
}
@Override
protected AcknowledgedResponse newResponse(boolean acknowledged) {
return new AcknowledgedResponse(acknowledged);
}
};
clusterService.submitStateUpdateTask(source, task);
return task.completionFuture();
}
@Override
protected ClusterBlockException checkBlock(CreateSubscriptionRequest request, ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
}
|
Version publishedTableVersion = settings.getAsVersion(IndexMetadata.SETTING_VERSION_CREATED, null);
assert publishedTableVersion != null : "All published tables must have version created setting";
if (subscriberMinNodeVersion.beforeMajorMinor(publishedTableVersion)) {
throw new IllegalStateException(String.format(
Locale.ENGLISH,
"One of the published tables has version higher than subscriber's minimal node version." +
" Table=%s, Table-Version=%s, Local-Minimal-Version: %s",
tableFqn,
publishedTableVersion,
subscriberMinNodeVersion
));
}
| 1,418
| 166
| 1,584
|
<methods><variables>protected final non-sealed org.elasticsearch.cluster.service.ClusterService clusterService,private final non-sealed java.lang.String executor,protected final non-sealed org.elasticsearch.threadpool.ThreadPool threadPool,protected final non-sealed org.elasticsearch.transport.TransportService transportService
|
crate_crate
|
crate/server/src/main/java/io/crate/replication/logical/action/TransportDropPublicationAction.java
|
TransportDropPublicationAction
|
execute
|
class TransportDropPublicationAction extends AbstractDDLTransportAction<DropPublicationRequest, AcknowledgedResponse> {
public static final String ACTION_NAME = "internal:crate:replication/logical/publication/drop";
@Inject
public TransportDropPublicationAction(TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool) {
super(ACTION_NAME,
transportService,
clusterService,
threadPool,
DropPublicationRequest::new,
AcknowledgedResponse::new,
AcknowledgedResponse::new,
"drop-publication");
}
@Override
public ClusterStateTaskExecutor<DropPublicationRequest> clusterStateTaskExecutor(DropPublicationRequest request) {
return new DDLClusterStateTaskExecutor<>() {
@Override
protected ClusterState execute(ClusterState currentState, DropPublicationRequest request) throws Exception {<FILL_FUNCTION_BODY>}
};
}
@Override
protected ClusterBlockException checkBlock(DropPublicationRequest request,
ClusterState state) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
}
|
Metadata currentMetadata = currentState.metadata();
Metadata.Builder mdBuilder = Metadata.builder(currentMetadata);
PublicationsMetadata oldMetadata = (PublicationsMetadata) mdBuilder.getCustom(PublicationsMetadata.TYPE);
if (oldMetadata == null && request.ifExists() == false) {
throw new PublicationUnknownException(request.name());
} else if (oldMetadata != null) {
if (oldMetadata.publications().containsKey(request.name())) {
PublicationsMetadata newMetadata = PublicationsMetadata.newInstance(oldMetadata);
newMetadata.publications().remove(request.name());
assert !newMetadata.equals(oldMetadata) : "must not be equal to guarantee the cluster change action";
mdBuilder.putCustom(PublicationsMetadata.TYPE, newMetadata);
return ClusterState.builder(currentState).metadata(mdBuilder).build();
} else if (request.ifExists() == false) {
throw new PublicationUnknownException(request.name());
}
}
return currentState;
| 303
| 256
| 559
|
<methods>public void <init>(java.lang.String, org.elasticsearch.transport.TransportService, org.elasticsearch.cluster.service.ClusterService, org.elasticsearch.threadpool.ThreadPool, Reader<io.crate.replication.logical.action.DropPublicationRequest>, Reader<org.elasticsearch.action.support.master.AcknowledgedResponse>, Function<java.lang.Boolean,org.elasticsearch.action.support.master.AcknowledgedResponse>, java.lang.String) ,public abstract ClusterStateTaskExecutor<io.crate.replication.logical.action.DropPublicationRequest> clusterStateTaskExecutor(io.crate.replication.logical.action.DropPublicationRequest) <variables>private final non-sealed Function<java.lang.Boolean,org.elasticsearch.action.support.master.AcknowledgedResponse> ackedResponseFunction,private final non-sealed Reader<org.elasticsearch.action.support.master.AcknowledgedResponse> reader,private final non-sealed java.lang.String source
|
crate_crate
|
crate/server/src/main/java/io/crate/replication/logical/analyze/LogicalReplicationAnalyzer.java
|
LogicalReplicationAnalyzer
|
analyze
|
class LogicalReplicationAnalyzer {
private final Schemas schemas;
private final LogicalReplicationService logicalReplicationService;
private final NodeContext nodeCtx;
public LogicalReplicationAnalyzer(Schemas schemas,
LogicalReplicationService logicalReplicationService,
NodeContext nodeCtx) {
this.schemas = schemas;
this.logicalReplicationService = logicalReplicationService;
this.nodeCtx = nodeCtx;
}
public AnalyzedCreatePublication analyze(CreatePublication createPublication, CoordinatorTxnCtx txnCtx) {
if (logicalReplicationService.publications().containsKey(createPublication.name())) {
throw new PublicationAlreadyExistsException(createPublication.name());
}
var tables = Lists.map(
createPublication.tables(),
q -> {
CoordinatorSessionSettings sessionSettings = txnCtx.sessionSettings();
DocTableInfo tableInfo = schemas.findRelation(
q,
Operation.CREATE_PUBLICATION,
sessionSettings.sessionUser(),
sessionSettings.searchPath()
);
boolean softDeletes = IndexSettings.INDEX_SOFT_DELETES_SETTING.get(tableInfo.parameters());
if (softDeletes == false) {
throw new UnsupportedOperationException(
String.format(
Locale.ENGLISH,
"Tables included in a publication must have the table setting " +
"'soft_deletes.enabled' set to `true`, current setting for table '%s': %b",
tableInfo.ident(),
softDeletes)
);
}
return tableInfo.ident();
}
);
return new AnalyzedCreatePublication(createPublication.name(), createPublication.isForAllTables(), tables);
}
public AnalyzedDropPublication analyze(DropPublication dropPublication,
CoordinatorSessionSettings sessionSettings) {
var publication = logicalReplicationService.publications().get(dropPublication.name());
if (dropPublication.ifExists() == false && publication == null) {
throw new PublicationUnknownException(dropPublication.name());
}
if (publication != null
&& publication.owner().equals(sessionSettings.sessionUser().name()) == false
&& sessionSettings.sessionUser().isSuperUser() == false) {
throw new UnauthorizedException("A publication can only be dropped by the owner or a superuser");
}
return new AnalyzedDropPublication(dropPublication.name(), dropPublication.ifExists());
}
public AnalyzedAlterPublication analyze(AlterPublication alterPublication,
CoordinatorSessionSettings sessionSettings) {<FILL_FUNCTION_BODY>}
public AnalyzedCreateSubscription analyze(CreateSubscription<Expression> createSubscription,
ParamTypeHints paramTypeHints,
CoordinatorTxnCtx txnCtx) {
if (logicalReplicationService.subscriptions().containsKey(createSubscription.name())) {
throw new SubscriptionAlreadyExistsException(createSubscription.name());
}
var expressionAnalyzer = new ExpressionAnalyzer(
txnCtx, nodeCtx, paramTypeHints, FieldProvider.TO_LITERAL_VALIDATE_NAME, null);
var exprCtx = new ExpressionAnalysisContext(txnCtx.sessionSettings());
GenericProperties<Symbol> genericProperties = createSubscription.properties()
.map(p -> expressionAnalyzer.convert(p, exprCtx));
Symbol connectionInfo = expressionAnalyzer.convert(createSubscription.connectionInfo(), exprCtx);
return new AnalyzedCreateSubscription(
createSubscription.name(),
connectionInfo,
createSubscription.publications(),
genericProperties
);
}
public AnalyzedDropSubscription analyze(DropSubscription dropSubscription, CoordinatorSessionSettings sessionSettings) {
var subscription = logicalReplicationService.subscriptions().get(dropSubscription.name());
if (dropSubscription.ifExists() == false && subscription == null) {
throw new SubscriptionUnknownException(dropSubscription.name());
}
if (subscription != null
&& subscription.owner().equals(sessionSettings.sessionUser().name()) == false
&& sessionSettings.sessionUser().isSuperUser() == false) {
throw new UnauthorizedException("A subscription can only be dropped by the owner or a superuser");
}
return new AnalyzedDropSubscription(dropSubscription.name(), subscription, dropSubscription.ifExists());
}
public AnalyzedAlterSubscription analyze(AlterSubscription alterSubscription, CoordinatorSessionSettings sessionSettings) {
var subscription = logicalReplicationService.subscriptions().get(alterSubscription.name());
if (subscription == null) {
throw new SubscriptionUnknownException(alterSubscription.name());
}
if (subscription.owner().equals(sessionSettings.sessionUser().name()) == false
&& sessionSettings.sessionUser().isSuperUser() == false) {
throw new UnauthorizedException("A subscription can only be altered by the owner or a superuser");
}
return new AnalyzedAlterSubscription(
alterSubscription.name(),
alterSubscription.mode() == AlterSubscription.Mode.ENABLE
);
}
}
|
var publication = logicalReplicationService.publications().get(alterPublication.name());
if (publication == null) {
throw new PublicationUnknownException(alterPublication.name());
}
if (publication.owner().equals(sessionSettings.sessionUser().name()) == false
&& sessionSettings.sessionUser().isSuperUser() == false) {
throw new UnauthorizedException("A publication can only be altered by the owner or a superuser");
}
if (publication.isForAllTables()) {
throw new InvalidArgumentException(
"Publication '" + alterPublication.name() + "' is defined as FOR ALL TABLES," +
" adding or dropping tables is not supported"
);
}
var defaultSchema = sessionSettings.searchPath().currentSchema();
var tables = Lists.map(
alterPublication.tables(),
q -> {
var relation = RelationName.of(q, defaultSchema);
if (schemas.tableExists(relation) == false) {
throw new RelationUnknown(relation);
}
return relation;
}
);
return new AnalyzedAlterPublication(alterPublication.name(), alterPublication.operation(), tables);
| 1,321
| 297
| 1,618
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/replication/logical/metadata/pgcatalog/PgSubscriptionRelTable.java
|
PgSubscriptionRelTable
|
create
|
class PgSubscriptionRelTable {
public static final RelationName IDENT = new RelationName(PgCatalogSchemaInfo.NAME, "pg_subscription_rel");
public static SystemTable<PgSubscriptionRelTable.PgSubscriptionRelRow> create() {<FILL_FUNCTION_BODY>}
public static Iterable<PgSubscriptionRelTable.PgSubscriptionRelRow> rows(LogicalReplicationService logicalReplicationService) {
return () -> {
Stream<PgSubscriptionRelTable.PgSubscriptionRelRow> s = logicalReplicationService.subscriptions().entrySet().stream()
.mapMulti(
(e, c) -> {
var sub = e.getValue();
sub.relations().forEach(
(r, rs) -> c.accept(
new PgSubscriptionRelRow(
OidHash.subscriptionOid(e.getKey(), sub),
new Regclass(OidHash.relationOid(OidHash.Type.TABLE, r), r.fqn()),
sub.owner(),
rs.state().pg_state(),
rs.reason()
)
)
);
}
);
return s.iterator();
};
}
public record PgSubscriptionRelRow(int subOid, Regclass relOid, String owner, String state, String state_reason) {}
}
|
return SystemTable.<PgSubscriptionRelTable.PgSubscriptionRelRow>builder(IDENT)
.add("srsubid", INTEGER, PgSubscriptionRelRow::subOid)
.add("srrelid", REGCLASS, PgSubscriptionRelRow::relOid)
.add("srsubstate", STRING, PgSubscriptionRelRow::state)
.add("srsubstate_reason", STRING, PgSubscriptionRelRow::state_reason)
// CrateDB doesn't have Log Sequence Number per table, only a seqNo per shard (see SysShardsTable)
.add("srsublsn", LONG, ignored -> null)
.build();
| 349
| 178
| 527
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/replication/logical/metadata/pgcatalog/PgSubscriptionTable.java
|
PgSubscriptionTable
|
create
|
class PgSubscriptionTable {
public static final RelationName IDENT = new RelationName(PgCatalogSchemaInfo.NAME, "pg_subscription");
public static SystemTable<SubscriptionRow> create() {<FILL_FUNCTION_BODY>}
public record SubscriptionRow(String name, Subscription subscription) {
}
}
|
return SystemTable.<SubscriptionRow>builder(IDENT)
.add("oid", INTEGER, r -> OidHash.subscriptionOid(r.name, r.subscription))
.add("subdbid", INTEGER, ignored -> 0)
.add("subname", STRING, r -> r.name)
.add("subowner", STRING, r -> r.subscription.owner())
.add("subenabled", BOOLEAN, r -> true)
.add("subbinary", BOOLEAN, r -> true)
.add("substream", BOOLEAN, r -> false)
.add("subconninfo", STRING, r -> r.subscription.connectionInfo().safeConnectionString())
.add("subslotname", STRING, ignored -> null)
.add("subsynccommit", STRING, ignored -> null)
.add("subpublications", STRING_ARRAY, r -> r.subscription.publications())
.build();
| 89
| 256
| 345
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/replication/logical/repository/RemoteClusterMultiChunkTransfer.java
|
RemoteClusterMultiChunkTransfer
|
executeChunkRequest
|
class RemoteClusterMultiChunkTransfer extends MultiChunkTransfer<StoreFileMetadata, RemoteClusterRepositoryFileChunk> {
private static final String RESTORE_SHARD_TEMP_FILE_PREFIX = "CLUSTER_REPO_TEMP_";
private static final Logger LOGGER = LogManager.getLogger(RemoteClusterMultiChunkTransfer.class);
private final DiscoveryNode remoteNode;
private final String restoreUUID;
private final ShardId remoteShardId;
private final String localClusterName;
private final Client client;
private final ThreadPool threadPool;
private final ByteSizeValue chunkSize;
private final String tempFilePrefix;
private final MultiFileWriter multiFileWriter;
private long offset = 0L;
public RemoteClusterMultiChunkTransfer(Logger logger,
String localClusterName,
Store localStore,
int maxConcurrentChunks,
String restoreUUID,
DiscoveryNode remoteNode,
ShardId remoteShardId,
List<StoreFileMetadata> remoteFiles,
Client client,
ThreadPool threadPool,
RecoveryState recoveryState,
ByteSizeValue chunkSize,
ActionListener<Void> listener) {
super(logger, listener, maxConcurrentChunks, remoteFiles);
this.localClusterName = localClusterName;
this.restoreUUID = restoreUUID;
this.remoteNode = remoteNode;
this.remoteShardId = remoteShardId;
this.client = client;
this.threadPool = threadPool;
this.chunkSize = chunkSize;
this.tempFilePrefix = RESTORE_SHARD_TEMP_FILE_PREFIX + restoreUUID + ".";
this.multiFileWriter = new MultiFileWriter(localStore, recoveryState.getIndex(), tempFilePrefix, logger, () -> {});
// Add all the available files to show the recovery status
for (var fileMetadata : remoteFiles) {
recoveryState.getIndex().addFileDetail(fileMetadata.name(), fileMetadata.length(), false);
}
}
@Override
protected RemoteClusterRepositoryFileChunk nextChunkRequest(StoreFileMetadata resource) throws IOException {
var chunkReq = new RemoteClusterRepositoryFileChunk(resource, offset, chunkSize.bytesAsInt());
offset += chunkSize.bytesAsInt();
return chunkReq;
}
@Override
protected void executeChunkRequest(RemoteClusterRepositoryFileChunk request,
ActionListener<Void> listener) {<FILL_FUNCTION_BODY>}
@Override
protected void handleError(StoreFileMetadata resource, Exception e) throws Exception {
LOGGER.error("Error while transferring segments ", e);
}
@Override
protected void onNewResource(StoreFileMetadata resource) throws IOException {
offset = 0L;
}
@Override
public void close() throws IOException {
multiFileWriter.renameAllTempFiles();
multiFileWriter.close();
}
}
|
var getFileChunkRequest = new GetFileChunkAction.Request(
restoreUUID,
remoteNode,
remoteShardId,
localClusterName,
request.storeFileMetadata(),
request.offset(),
request.length()
);
var fileChunkResponse = client.execute(GetFileChunkAction.INSTANCE, getFileChunkRequest);
fileChunkResponse.whenComplete((response, err) -> {
if (err == null) {
LOGGER.debug("Filename: {}, response_size: {}, response_offset: {}",
request.storeFileMetadata().name(),
response.data().length(),
response.offset()
);
try {
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
try {
multiFileWriter.writeFileChunk(
response.storeFileMetadata(),
response.offset(),
response.data(),
request.lastChunk()
);
listener.onResponse(null);
} catch (IOException e) {
listener.onFailure(new UncheckedIOException(e));
}
});
} catch (EsRejectedExecutionException e) {
listener.onFailure(e);
}
} else {
LOGGER.error(
"Failed to fetch file chunk for {} with offset {}",
request.storeFileMetadata().name(),
request.offset(),
err
);
}
});
| 748
| 360
| 1,108
|
<methods>public final void start() <variables>private org.elasticsearch.index.store.StoreFileMetadata currentSource,private final non-sealed ActionListener<java.lang.Void> listener,private final non-sealed Logger logger,private final non-sealed int maxConcurrentChunks,private final non-sealed AsyncIOProcessor<FileChunkResponseItem<org.elasticsearch.index.store.StoreFileMetadata>> processor,private Tuple<org.elasticsearch.index.store.StoreFileMetadata,io.crate.replication.logical.repository.RemoteClusterRepositoryFileChunk> readAheadRequest,private final non-sealed Iterator<org.elasticsearch.index.store.StoreFileMetadata> remainingSources,private final org.elasticsearch.index.seqno.LocalCheckpointTracker requestSeqIdTracker,private org.elasticsearch.indices.recovery.MultiChunkTransfer.Status status
|
crate_crate
|
crate/server/src/main/java/io/crate/replication/logical/seqno/RetentionLeaseHelper.java
|
RetentionLeaseHelper
|
addRetentionLease
|
class RetentionLeaseHelper {
private static final Logger LOGGER = LogManager.getLogger(RetentionLeaseHelper.class);
private static String retentionLeaseSource(String subscriberClusterName) {
return "logical_replication:" + subscriberClusterName;
}
private static String retentionLeaseIdForShard(String subscriberClusterName, ShardId shardId) {
var retentionLeaseSource = retentionLeaseSource(subscriberClusterName);
return retentionLeaseSource + ":" + shardId;
}
public static void addRetentionLease(ShardId shardId,
long seqNo,
String subscriberClusterName,
Client client,
ActionListener<RetentionLeaseActions.Response> listener) {<FILL_FUNCTION_BODY>}
public static void renewRetentionLease(ShardId shardId,
long seqNo,
String subscriberClusterName,
Client client,
ActionListener<RetentionLeaseActions.Response> listener) {
var retentionLeaseId = retentionLeaseIdForShard(subscriberClusterName, shardId);
var request = new RetentionLeaseActions.AddOrRenewRequest(
shardId,
retentionLeaseId,
seqNo,
retentionLeaseSource(subscriberClusterName)
);
client.execute(RetentionLeaseActions.Renew.INSTANCE, request).whenComplete(listener);
}
public static void attemptRetentionLeaseRemoval(ShardId shardId,
String subscriberClusterName,
Client client,
ActionListener<RetentionLeaseActions.Response> listener) {
var retentionLeaseId = retentionLeaseIdForShard(subscriberClusterName, shardId);
var request = new RetentionLeaseActions.RemoveRequest(shardId, retentionLeaseId);
client.execute(RetentionLeaseActions.Remove.INSTANCE, request)
.whenComplete((response, err) -> {
if (err == null) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Removed retention lease with id - {}", retentionLeaseId);
}
listener.onResponse(response);
} else {
var e = Exceptions.toException(SQLExceptions.unwrap(err));
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Exception in removing retention lease", e);
}
listener.onFailure(e);
}
});
}
}
|
var retentionLeaseId = retentionLeaseIdForShard(subscriberClusterName, shardId);
var request = new RetentionLeaseActions.AddOrRenewRequest(
shardId,
retentionLeaseId,
seqNo,
retentionLeaseSource(subscriberClusterName)
);
client.execute(RetentionLeaseActions.Add.INSTANCE, request)
.whenComplete((response, err) -> {
if (err == null) {
listener.onResponse(response);
} else {
var t = SQLExceptions.unwrap(err);
if (t instanceof RetentionLeaseAlreadyExistsException) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
"Renew retention lease as it already exists {} with {}",
retentionLeaseId,
seqNo
);
}
// Only one retention lease should exists for the follower shard
// Ideally, this should have got cleaned-up
renewRetentionLease(shardId, seqNo, subscriberClusterName, client, listener);
} else {
listener.onFailure(Exceptions.toException(t));
}
}
});
| 651
| 310
| 961
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/rest/action/RestRowCountReceiver.java
|
RestRowCountReceiver
|
finishBuilder
|
class RestRowCountReceiver implements ResultReceiver<XContentBuilder> {
private final long startTimeNs;
private final boolean includeTypes;
private final ResultToXContentBuilder builder;
private final CompletableFuture<XContentBuilder> result = new CompletableFuture<>();
private long rowCount;
RestRowCountReceiver(XContentBuilder builder,
long startTimeNs,
boolean includeTypes) throws IOException {
this.startTimeNs = startTimeNs;
this.includeTypes = includeTypes;
this.builder = ResultToXContentBuilder.builder(builder);
}
@Override
public void setNextRow(Row row) {
rowCount = (long) row.get(0);
}
@Override
public void batchFinished() {
fail(new IllegalStateException("Incremental result streaming not supported via HTTP"));
}
XContentBuilder finishBuilder() throws IOException {<FILL_FUNCTION_BODY>}
@Override
public void allFinished() {
try {
result.complete(finishBuilder());
} catch (IOException e) {
result.completeExceptionally(e);
}
}
@Override
public void fail(Throwable t) {
result.completeExceptionally(t);
}
@Override
public CompletableFuture<XContentBuilder> completionFuture() {
return result;
}
}
|
builder.cols(Collections.emptyList());
if (includeTypes) {
builder.colTypes(Collections.emptyList());
}
builder.startRows()
.addRow(Row.EMPTY, 0)
.finishRows()
.rowCount(rowCount)
.duration(startTimeNs);
return builder.build();
| 356
| 93
| 449
|
<no_super_class>
|
crate_crate
|
crate/server/src/main/java/io/crate/role/AlterRoleRequest.java
|
AlterRoleRequest
|
writeTo
|
class AlterRoleRequest extends AcknowledgedRequest<AlterRoleRequest> {
private final String roleName;
private final SecureHash secureHash;
@Nullable
private final JwtProperties jwtProperties;
private final boolean resetPassword;
private final boolean resetJwtProperties;
public AlterRoleRequest(String roleName,
@Nullable SecureHash secureHash,
@Nullable JwtProperties jwtProperties,
boolean resetPassword,
boolean resetJwtProperties) {
this.roleName = roleName;
this.secureHash = secureHash;
this.jwtProperties = jwtProperties;
this.resetPassword = resetPassword;
this.resetJwtProperties = resetJwtProperties;
}
public String roleName() {
return roleName;
}
@Nullable
public SecureHash secureHash() {
return secureHash;
}
@Nullable
public JwtProperties jwtProperties() {
return jwtProperties;
}
public boolean resetPassword() {
return resetPassword;
}
public boolean resetJwtProperties() {
return resetJwtProperties;
}
public AlterRoleRequest(StreamInput in) throws IOException {
super(in);
roleName = in.readString();
secureHash = in.readOptionalWriteable(SecureHash::readFrom);
if (in.getVersion().onOrAfter(Version.V_5_7_0)) {
this.jwtProperties = in.readOptionalWriteable(JwtProperties::readFrom);
this.resetPassword = in.readBoolean();
this.resetJwtProperties = in.readBoolean();
} else {
this.jwtProperties = null;
this.resetPassword = false;
this.resetJwtProperties = false;
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {<FILL_FUNCTION_BODY>}
}
|
super.writeTo(out);
out.writeString(roleName);
out.writeOptionalWriteable(secureHash);
if (out.getVersion().onOrAfter(Version.V_5_7_0)) {
out.writeOptionalWriteable(jwtProperties);
out.writeBoolean(resetPassword);
out.writeBoolean(resetJwtProperties);
}
| 488
| 96
| 584
|
<methods>public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public io.crate.common.unit.TimeValue ackTimeout() ,public final io.crate.role.AlterRoleRequest timeout(java.lang.String) ,public final io.crate.role.AlterRoleRequest timeout(io.crate.common.unit.TimeValue) ,public final io.crate.common.unit.TimeValue timeout() ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>public static final io.crate.common.unit.TimeValue DEFAULT_ACK_TIMEOUT,protected io.crate.common.unit.TimeValue timeout
|
crate_crate
|
crate/server/src/main/java/io/crate/role/PrivilegesRequest.java
|
PrivilegesRequest
|
writeTo
|
class PrivilegesRequest extends AcknowledgedRequest<PrivilegesRequest> {
private final Collection<String> roleNames;
private final Collection<Privilege> privileges;
@Nullable
private final GrantedRolesChange grantedRolesChange;
PrivilegesRequest(Collection<String> roleNames,
Collection<Privilege> privileges,
@Nullable GrantedRolesChange grantedRolesChange) {
this.roleNames = roleNames;
this.privileges = privileges;
this.grantedRolesChange = grantedRolesChange;
}
Collection<String> roleNames() {
return roleNames;
}
public Collection<Privilege> privileges() {
return privileges;
}
@Nullable
public GrantedRolesChange rolePrivilege() {
return grantedRolesChange;
}
public PrivilegesRequest(StreamInput in) throws IOException {
super(in);
int roleNamesSize = in.readVInt();
roleNames = new ArrayList<>(roleNamesSize);
for (int i = 0; i < roleNamesSize; i++) {
roleNames.add(in.readString());
}
int privilegesSize = in.readVInt();
privileges = new ArrayList<>(privilegesSize);
for (int i = 0; i < privilegesSize; i++) {
privileges.add(new Privilege(in));
}
if (in.getVersion().onOrAfter(V_5_6_0)) {
grantedRolesChange = in.readOptionalWriteable(GrantedRolesChange::new);
} else {
grantedRolesChange = null;
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {<FILL_FUNCTION_BODY>}
}
|
super.writeTo(out);
out.writeVInt(roleNames.size());
for (String roleName : roleNames) {
out.writeString(roleName);
}
out.writeVInt(privileges.size());
for (Privilege privilege : privileges) {
privilege.writeTo(out);
}
if (out.getVersion().onOrAfter(V_5_6_0)) {
out.writeOptionalWriteable(grantedRolesChange);
}
| 458
| 129
| 587
|
<methods>public void <init>(org.elasticsearch.common.io.stream.StreamInput) throws java.io.IOException,public io.crate.common.unit.TimeValue ackTimeout() ,public final io.crate.role.PrivilegesRequest timeout(java.lang.String) ,public final io.crate.role.PrivilegesRequest timeout(io.crate.common.unit.TimeValue) ,public final io.crate.common.unit.TimeValue timeout() ,public void writeTo(org.elasticsearch.common.io.stream.StreamOutput) throws java.io.IOException<variables>public static final io.crate.common.unit.TimeValue DEFAULT_ACK_TIMEOUT,protected io.crate.common.unit.TimeValue timeout
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.