repo stringlengths 7 58 | path stringlengths 12 218 | func_name stringlengths 3 140 | original_string stringlengths 73 34.1k | language stringclasses 1 value | code stringlengths 73 34.1k | code_tokens list | docstring stringlengths 3 16k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 105 339 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/StatementDML.java | StatementDML.executeMergeStatement | Result executeMergeStatement(Session session) {
Result resultOut = null;
RowSetNavigator generatedNavigator = null;
PersistentStore store = session.sessionData.getRowStore(baseTable);
if (generatedIndexes != null) {
resultOut = Result.newUpdateCountResult(generatedResultMetaData,
0);
generatedNavigator = resultOut.getChainedResult().getNavigator();
}
int count = 0;
// data generated for non-matching rows
RowSetNavigatorClient newData = new RowSetNavigatorClient(8);
// rowset for update operation
HashMappedList updateRowSet = new HashMappedList();
RangeVariable[] joinRangeIterators = targetRangeVariables;
// populate insert and update lists
RangeIterator[] rangeIterators =
new RangeIterator[joinRangeIterators.length];
for (int i = 0; i < joinRangeIterators.length; i++) {
rangeIterators[i] = joinRangeIterators[i].getIterator(session);
}
for (int currentIndex = 0; 0 <= currentIndex; ) {
RangeIterator it = rangeIterators[currentIndex];
boolean beforeFirst = it.isBeforeFirst();
if (it.next()) {
if (currentIndex < joinRangeIterators.length - 1) {
currentIndex++;
continue;
}
} else {
if (currentIndex == 1 && beforeFirst) {
Object[] data = getMergeInsertData(session);
if (data != null) {
newData.add(data);
}
}
it.reset();
currentIndex--;
continue;
}
// row matches!
if (updateExpressions != null) {
Row row = it.getCurrentRow(); // this is always the second iterator
Object[] data = getUpdatedData(session, baseTable,
updateColumnMap,
updateExpressions,
baseTable.getColumnTypes(),
row.getData());
updateRowSet.add(row, data);
}
}
// run the transaction as a whole, updating and inserting where needed
// update any matched rows
if (updateRowSet.size() > 0) {
count = update(session, baseTable, updateRowSet);
}
// insert any non-matched rows
newData.beforeFirst();
while (newData.hasNext()) {
Object[] data = newData.getNext();
baseTable.insertRow(session, store, data);
if (generatedNavigator != null) {
Object[] generatedValues = getGeneratedColumns(data);
generatedNavigator.add(generatedValues);
}
}
baseTable.fireAfterTriggers(session, Trigger.INSERT_AFTER, newData);
count += newData.getSize();
if (resultOut == null) {
return Result.getUpdateCountResult(count);
} else {
resultOut.setUpdateCount(count);
return resultOut;
}
} | java | Result executeMergeStatement(Session session) {
Result resultOut = null;
RowSetNavigator generatedNavigator = null;
PersistentStore store = session.sessionData.getRowStore(baseTable);
if (generatedIndexes != null) {
resultOut = Result.newUpdateCountResult(generatedResultMetaData,
0);
generatedNavigator = resultOut.getChainedResult().getNavigator();
}
int count = 0;
// data generated for non-matching rows
RowSetNavigatorClient newData = new RowSetNavigatorClient(8);
// rowset for update operation
HashMappedList updateRowSet = new HashMappedList();
RangeVariable[] joinRangeIterators = targetRangeVariables;
// populate insert and update lists
RangeIterator[] rangeIterators =
new RangeIterator[joinRangeIterators.length];
for (int i = 0; i < joinRangeIterators.length; i++) {
rangeIterators[i] = joinRangeIterators[i].getIterator(session);
}
for (int currentIndex = 0; 0 <= currentIndex; ) {
RangeIterator it = rangeIterators[currentIndex];
boolean beforeFirst = it.isBeforeFirst();
if (it.next()) {
if (currentIndex < joinRangeIterators.length - 1) {
currentIndex++;
continue;
}
} else {
if (currentIndex == 1 && beforeFirst) {
Object[] data = getMergeInsertData(session);
if (data != null) {
newData.add(data);
}
}
it.reset();
currentIndex--;
continue;
}
// row matches!
if (updateExpressions != null) {
Row row = it.getCurrentRow(); // this is always the second iterator
Object[] data = getUpdatedData(session, baseTable,
updateColumnMap,
updateExpressions,
baseTable.getColumnTypes(),
row.getData());
updateRowSet.add(row, data);
}
}
// run the transaction as a whole, updating and inserting where needed
// update any matched rows
if (updateRowSet.size() > 0) {
count = update(session, baseTable, updateRowSet);
}
// insert any non-matched rows
newData.beforeFirst();
while (newData.hasNext()) {
Object[] data = newData.getNext();
baseTable.insertRow(session, store, data);
if (generatedNavigator != null) {
Object[] generatedValues = getGeneratedColumns(data);
generatedNavigator.add(generatedValues);
}
}
baseTable.fireAfterTriggers(session, Trigger.INSERT_AFTER, newData);
count += newData.getSize();
if (resultOut == null) {
return Result.getUpdateCountResult(count);
} else {
resultOut.setUpdateCount(count);
return resultOut;
}
} | [
"Result",
"executeMergeStatement",
"(",
"Session",
"session",
")",
"{",
"Result",
"resultOut",
"=",
"null",
";",
"RowSetNavigator",
"generatedNavigator",
"=",
"null",
";",
"PersistentStore",
"store",
"=",
"session",
".",
"sessionData",
".",
"getRowStore",
"(",
"ba... | Executes a MERGE statement. It is assumed that the argument
is of the correct type.
@return Result object | [
"Executes",
"a",
"MERGE",
"statement",
".",
"It",
"is",
"assumed",
"that",
"the",
"argument",
"is",
"of",
"the",
"correct",
"type",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/StatementDML.java#L498-L598 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/StatementDML.java | StatementDML.executeDeleteStatement | Result executeDeleteStatement(Session session) {
int count = 0;
RowSetNavigatorLinkedList oldRows = new RowSetNavigatorLinkedList();
RangeIterator it = RangeVariable.getIterator(session,
targetRangeVariables);
while (it.next()) {
Row currentRow = it.getCurrentRow();
oldRows.add(currentRow);
}
count = delete(session, baseTable, oldRows);
if (restartIdentity && targetTable.identitySequence != null) {
targetTable.identitySequence.reset();
}
return Result.getUpdateCountResult(count);
} | java | Result executeDeleteStatement(Session session) {
int count = 0;
RowSetNavigatorLinkedList oldRows = new RowSetNavigatorLinkedList();
RangeIterator it = RangeVariable.getIterator(session,
targetRangeVariables);
while (it.next()) {
Row currentRow = it.getCurrentRow();
oldRows.add(currentRow);
}
count = delete(session, baseTable, oldRows);
if (restartIdentity && targetTable.identitySequence != null) {
targetTable.identitySequence.reset();
}
return Result.getUpdateCountResult(count);
} | [
"Result",
"executeDeleteStatement",
"(",
"Session",
"session",
")",
"{",
"int",
"count",
"=",
"0",
";",
"RowSetNavigatorLinkedList",
"oldRows",
"=",
"new",
"RowSetNavigatorLinkedList",
"(",
")",
";",
"RangeIterator",
"it",
"=",
"RangeVariable",
".",
"getIterator",
... | Executes a DELETE statement. It is assumed that the argument is
of the correct type.
@return the result of executing the statement | [
"Executes",
"a",
"DELETE",
"statement",
".",
"It",
"is",
"assumed",
"that",
"the",
"argument",
"is",
"of",
"the",
"correct",
"type",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/StatementDML.java#L706-L726 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/StatementDML.java | StatementDML.delete | int delete(Session session, Table table, RowSetNavigator oldRows) {
if (table.fkMainConstraints.length == 0) {
deleteRows(session, table, oldRows);
oldRows.beforeFirst();
if (table.hasTrigger(Trigger.DELETE_AFTER)) {
table.fireAfterTriggers(session, Trigger.DELETE_AFTER,
oldRows);
}
return oldRows.getSize();
}
HashSet path = session.sessionContext.getConstraintPath();
HashMappedList tableUpdateList =
session.sessionContext.getTableUpdateList();
if (session.database.isReferentialIntegrity()) {
oldRows.beforeFirst();
while (oldRows.hasNext()) {
oldRows.next();
Row row = oldRows.getCurrentRow();
path.clear();
checkCascadeDelete(session, table, tableUpdateList, row,
false, path);
}
}
if (session.database.isReferentialIntegrity()) {
oldRows.beforeFirst();
while (oldRows.hasNext()) {
oldRows.next();
Row row = oldRows.getCurrentRow();
path.clear();
checkCascadeDelete(session, table, tableUpdateList, row, true,
path);
}
}
oldRows.beforeFirst();
while (oldRows.hasNext()) {
oldRows.next();
Row row = oldRows.getCurrentRow();
if (!row.isDeleted(session)) {
table.deleteNoRefCheck(session, row);
}
}
for (int i = 0; i < tableUpdateList.size(); i++) {
Table targetTable = (Table) tableUpdateList.getKey(i);
HashMappedList updateList =
(HashMappedList) tableUpdateList.get(i);
if (updateList.size() > 0) {
targetTable.updateRowSet(session, updateList, null, true);
updateList.clear();
}
}
oldRows.beforeFirst();
if (table.hasTrigger(Trigger.DELETE_AFTER)) {
table.fireAfterTriggers(session, Trigger.DELETE_AFTER, oldRows);
}
path.clear();
return oldRows.getSize();
} | java | int delete(Session session, Table table, RowSetNavigator oldRows) {
if (table.fkMainConstraints.length == 0) {
deleteRows(session, table, oldRows);
oldRows.beforeFirst();
if (table.hasTrigger(Trigger.DELETE_AFTER)) {
table.fireAfterTriggers(session, Trigger.DELETE_AFTER,
oldRows);
}
return oldRows.getSize();
}
HashSet path = session.sessionContext.getConstraintPath();
HashMappedList tableUpdateList =
session.sessionContext.getTableUpdateList();
if (session.database.isReferentialIntegrity()) {
oldRows.beforeFirst();
while (oldRows.hasNext()) {
oldRows.next();
Row row = oldRows.getCurrentRow();
path.clear();
checkCascadeDelete(session, table, tableUpdateList, row,
false, path);
}
}
if (session.database.isReferentialIntegrity()) {
oldRows.beforeFirst();
while (oldRows.hasNext()) {
oldRows.next();
Row row = oldRows.getCurrentRow();
path.clear();
checkCascadeDelete(session, table, tableUpdateList, row, true,
path);
}
}
oldRows.beforeFirst();
while (oldRows.hasNext()) {
oldRows.next();
Row row = oldRows.getCurrentRow();
if (!row.isDeleted(session)) {
table.deleteNoRefCheck(session, row);
}
}
for (int i = 0; i < tableUpdateList.size(); i++) {
Table targetTable = (Table) tableUpdateList.getKey(i);
HashMappedList updateList =
(HashMappedList) tableUpdateList.get(i);
if (updateList.size() > 0) {
targetTable.updateRowSet(session, updateList, null, true);
updateList.clear();
}
}
oldRows.beforeFirst();
if (table.hasTrigger(Trigger.DELETE_AFTER)) {
table.fireAfterTriggers(session, Trigger.DELETE_AFTER, oldRows);
}
path.clear();
return oldRows.getSize();
} | [
"int",
"delete",
"(",
"Session",
"session",
",",
"Table",
"table",
",",
"RowSetNavigator",
"oldRows",
")",
"{",
"if",
"(",
"table",
".",
"fkMainConstraints",
".",
"length",
"==",
"0",
")",
"{",
"deleteRows",
"(",
"session",
",",
"table",
",",
"oldRows",
... | Highest level multiple row delete method. Corresponds to an SQL
DELETE. | [
"Highest",
"level",
"multiple",
"row",
"delete",
"method",
".",
"Corresponds",
"to",
"an",
"SQL",
"DELETE",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/StatementDML.java#L736-L814 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/StatementDML.java | StatementDML.mergeUpdate | static void mergeUpdate(HashMappedList rowSet, Row row, Object[] newData,
int[] cols) {
Object[] data = (Object[]) rowSet.get(row);
if (data != null) {
for (int j = 0; j < cols.length; j++) {
data[cols[j]] = newData[cols[j]];
}
} else {
rowSet.add(row, newData);
}
} | java | static void mergeUpdate(HashMappedList rowSet, Row row, Object[] newData,
int[] cols) {
Object[] data = (Object[]) rowSet.get(row);
if (data != null) {
for (int j = 0; j < cols.length; j++) {
data[cols[j]] = newData[cols[j]];
}
} else {
rowSet.add(row, newData);
}
} | [
"static",
"void",
"mergeUpdate",
"(",
"HashMappedList",
"rowSet",
",",
"Row",
"row",
",",
"Object",
"[",
"]",
"newData",
",",
"int",
"[",
"]",
"cols",
")",
"{",
"Object",
"[",
"]",
"data",
"=",
"(",
"Object",
"[",
"]",
")",
"rowSet",
".",
"get",
"(... | Merges a triggered change with a previous triggered change, or adds to
list. | [
"Merges",
"a",
"triggered",
"change",
"with",
"a",
"previous",
"triggered",
"change",
"or",
"adds",
"to",
"list",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/StatementDML.java#L1243-L1255 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/StatementDML.java | StatementDML.mergeKeepUpdate | static boolean mergeKeepUpdate(Session session, HashMappedList rowSet,
int[] cols, Type[] colTypes, Row row,
Object[] newData) {
Object[] data = (Object[]) rowSet.get(row);
if (data != null) {
if (IndexAVL
.compareRows(row
.getData(), newData, cols, colTypes) != 0 && IndexAVL
.compareRows(newData, data, cols, colTypes) != 0) {
return false;
}
for (int j = 0; j < cols.length; j++) {
newData[cols[j]] = data[cols[j]];
}
rowSet.put(row, newData);
} else {
rowSet.add(row, newData);
}
return true;
} | java | static boolean mergeKeepUpdate(Session session, HashMappedList rowSet,
int[] cols, Type[] colTypes, Row row,
Object[] newData) {
Object[] data = (Object[]) rowSet.get(row);
if (data != null) {
if (IndexAVL
.compareRows(row
.getData(), newData, cols, colTypes) != 0 && IndexAVL
.compareRows(newData, data, cols, colTypes) != 0) {
return false;
}
for (int j = 0; j < cols.length; j++) {
newData[cols[j]] = data[cols[j]];
}
rowSet.put(row, newData);
} else {
rowSet.add(row, newData);
}
return true;
} | [
"static",
"boolean",
"mergeKeepUpdate",
"(",
"Session",
"session",
",",
"HashMappedList",
"rowSet",
",",
"int",
"[",
"]",
"cols",
",",
"Type",
"[",
"]",
"colTypes",
",",
"Row",
"row",
",",
"Object",
"[",
"]",
"newData",
")",
"{",
"Object",
"[",
"]",
"d... | Merge the full triggered change with the updated row, or add to list.
Return false if changes conflict. | [
"Merge",
"the",
"full",
"triggered",
"change",
"with",
"the",
"updated",
"row",
"or",
"add",
"to",
"list",
".",
"Return",
"false",
"if",
"changes",
"conflict",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/StatementDML.java#L1261-L1285 | train |
VoltDB/voltdb | src/frontend/org/voltdb/exportclient/ExportDecoderBase.java | ExportDecoderBase.decodeRow | protected ExportRowData decodeRow(byte[] rowData) throws IOException {
ExportRow row = ExportRow.decodeRow(m_legacyRow, getPartition(), m_startTS, rowData);
return new ExportRowData(row.values, row.partitionValue, row.partitionId);
} | java | protected ExportRowData decodeRow(byte[] rowData) throws IOException {
ExportRow row = ExportRow.decodeRow(m_legacyRow, getPartition(), m_startTS, rowData);
return new ExportRowData(row.values, row.partitionValue, row.partitionId);
} | [
"protected",
"ExportRowData",
"decodeRow",
"(",
"byte",
"[",
"]",
"rowData",
")",
"throws",
"IOException",
"{",
"ExportRow",
"row",
"=",
"ExportRow",
".",
"decodeRow",
"(",
"m_legacyRow",
",",
"getPartition",
"(",
")",
",",
"m_startTS",
",",
"rowData",
")",
... | Decode a byte array of row data into ExportRowData
@param rowData
@return ExportRowData
@throws IOException | [
"Decode",
"a",
"byte",
"array",
"of",
"row",
"data",
"into",
"ExportRowData"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/exportclient/ExportDecoderBase.java#L119-L122 | train |
VoltDB/voltdb | src/frontend/org/voltdb/exportclient/ExportDecoderBase.java | ExportDecoderBase.writeRow | public boolean writeRow(Object row[], CSVWriter writer, boolean skipinternal,
BinaryEncoding binaryEncoding, SimpleDateFormat dateFormatter) {
int firstfield = getFirstField(skipinternal);
try {
String[] fields = new String[m_tableSchema.size() - firstfield];
for (int i = firstfield; i < m_tableSchema.size(); i++) {
if (row[i] == null) {
fields[i - firstfield] = "NULL";
} else if (m_tableSchema.get(i) == VoltType.VARBINARY && binaryEncoding != null) {
if (binaryEncoding == BinaryEncoding.HEX) {
fields[i - firstfield] = Encoder.hexEncode((byte[]) row[i]);
} else {
fields[i - firstfield] = Encoder.base64Encode((byte[]) row[i]);
}
} else if (m_tableSchema.get(i) == VoltType.STRING) {
fields[i - firstfield] = (String) row[i];
} else if (m_tableSchema.get(i) == VoltType.TIMESTAMP && dateFormatter != null) {
TimestampType timestamp = (TimestampType) row[i];
fields[i - firstfield] = dateFormatter.format(timestamp.asApproximateJavaDate());
} else {
fields[i - firstfield] = row[i].toString();
}
}
writer.writeNext(fields);
} catch (Exception x) {
x.printStackTrace();
return false;
}
return true;
} | java | public boolean writeRow(Object row[], CSVWriter writer, boolean skipinternal,
BinaryEncoding binaryEncoding, SimpleDateFormat dateFormatter) {
int firstfield = getFirstField(skipinternal);
try {
String[] fields = new String[m_tableSchema.size() - firstfield];
for (int i = firstfield; i < m_tableSchema.size(); i++) {
if (row[i] == null) {
fields[i - firstfield] = "NULL";
} else if (m_tableSchema.get(i) == VoltType.VARBINARY && binaryEncoding != null) {
if (binaryEncoding == BinaryEncoding.HEX) {
fields[i - firstfield] = Encoder.hexEncode((byte[]) row[i]);
} else {
fields[i - firstfield] = Encoder.base64Encode((byte[]) row[i]);
}
} else if (m_tableSchema.get(i) == VoltType.STRING) {
fields[i - firstfield] = (String) row[i];
} else if (m_tableSchema.get(i) == VoltType.TIMESTAMP && dateFormatter != null) {
TimestampType timestamp = (TimestampType) row[i];
fields[i - firstfield] = dateFormatter.format(timestamp.asApproximateJavaDate());
} else {
fields[i - firstfield] = row[i].toString();
}
}
writer.writeNext(fields);
} catch (Exception x) {
x.printStackTrace();
return false;
}
return true;
} | [
"public",
"boolean",
"writeRow",
"(",
"Object",
"row",
"[",
"]",
",",
"CSVWriter",
"writer",
",",
"boolean",
"skipinternal",
",",
"BinaryEncoding",
"binaryEncoding",
",",
"SimpleDateFormat",
"dateFormatter",
")",
"{",
"int",
"firstfield",
"=",
"getFirstField",
"("... | This is for legacy connector. | [
"This",
"is",
"for",
"legacy",
"connector",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/exportclient/ExportDecoderBase.java#L125-L155 | train |
VoltDB/voltdb | src/frontend/org/voltdb/exportclient/ExportDecoderBase.java | ExportDecoderBase.setPartitionColumnName | public final int setPartitionColumnName(String partitionColumnName) {
if (partitionColumnName == null || partitionColumnName.trim().isEmpty()) {
return PARTITION_ID_INDEX;
}
int idx = -1;
for (String name : m_source.columnNames) {
if (name.equalsIgnoreCase(partitionColumnName)) {
idx = m_source.columnNames.indexOf(name);
break;
}
}
if (idx == -1) {
m_partitionColumnIndex = PARTITION_ID_INDEX;
m_logger.error("Export configuration error: specified " + m_source.tableName + "." + partitionColumnName
+ " does not exist. A default partition or routing key will be used.");
} else {
m_partitionColumnIndex = idx;
}
return m_partitionColumnIndex;
} | java | public final int setPartitionColumnName(String partitionColumnName) {
if (partitionColumnName == null || partitionColumnName.trim().isEmpty()) {
return PARTITION_ID_INDEX;
}
int idx = -1;
for (String name : m_source.columnNames) {
if (name.equalsIgnoreCase(partitionColumnName)) {
idx = m_source.columnNames.indexOf(name);
break;
}
}
if (idx == -1) {
m_partitionColumnIndex = PARTITION_ID_INDEX;
m_logger.error("Export configuration error: specified " + m_source.tableName + "." + partitionColumnName
+ " does not exist. A default partition or routing key will be used.");
} else {
m_partitionColumnIndex = idx;
}
return m_partitionColumnIndex;
} | [
"public",
"final",
"int",
"setPartitionColumnName",
"(",
"String",
"partitionColumnName",
")",
"{",
"if",
"(",
"partitionColumnName",
"==",
"null",
"||",
"partitionColumnName",
".",
"trim",
"(",
")",
".",
"isEmpty",
"(",
")",
")",
"{",
"return",
"PARTITION_ID_IN... | Used for override of column for partitioning. This is for legacy connector only. | [
"Used",
"for",
"override",
"of",
"column",
"for",
"partitioning",
".",
"This",
"is",
"for",
"legacy",
"connector",
"only",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/exportclient/ExportDecoderBase.java#L192-L211 | train |
VoltDB/voltdb | src/frontend/org/voltcore/utils/ShutdownHooks.java | ShutdownHooks.registerShutdownHook | public static void registerShutdownHook(int priority, boolean runOnCrash, Runnable action)
{
m_instance.addHook(priority, runOnCrash, action);
//Any hook registered lets print crash messsage.
ShutdownHooks.m_crashMessage = true;
} | java | public static void registerShutdownHook(int priority, boolean runOnCrash, Runnable action)
{
m_instance.addHook(priority, runOnCrash, action);
//Any hook registered lets print crash messsage.
ShutdownHooks.m_crashMessage = true;
} | [
"public",
"static",
"void",
"registerShutdownHook",
"(",
"int",
"priority",
",",
"boolean",
"runOnCrash",
",",
"Runnable",
"action",
")",
"{",
"m_instance",
".",
"addHook",
"(",
"priority",
",",
"runOnCrash",
",",
"action",
")",
";",
"//Any hook registered lets pr... | Register an action to be run when the JVM exits.
@param priority The priority level at which this action should be run. Lower values will run earlier.
@param runOnCrash Whether or not this action should be performed if the server is shutting down
due to a call to crashVoltDB()
@param action A Runnable containing the action to be run on shutdown. | [
"Register",
"an",
"action",
"to",
"be",
"run",
"when",
"the",
"JVM",
"exits",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltcore/utils/ShutdownHooks.java#L67-L72 | train |
VoltDB/voltdb | third_party/java/src/org/apache/zookeeper_voltpatches/ClientCnxn.java | ClientCnxn.getRemoteSocketAddress | SocketAddress getRemoteSocketAddress() {
// a lot could go wrong here, so rather than put in a bunch of code
// to check for nulls all down the chain let's do it the simple
// yet bulletproof way
try {
return ((SocketChannel) sendThread.sockKey.channel()).socket()
.getRemoteSocketAddress();
} catch (NullPointerException e) {
return null;
}
} | java | SocketAddress getRemoteSocketAddress() {
// a lot could go wrong here, so rather than put in a bunch of code
// to check for nulls all down the chain let's do it the simple
// yet bulletproof way
try {
return ((SocketChannel) sendThread.sockKey.channel()).socket()
.getRemoteSocketAddress();
} catch (NullPointerException e) {
return null;
}
} | [
"SocketAddress",
"getRemoteSocketAddress",
"(",
")",
"{",
"// a lot could go wrong here, so rather than put in a bunch of code",
"// to check for nulls all down the chain let's do it the simple",
"// yet bulletproof way",
"try",
"{",
"return",
"(",
"(",
"SocketChannel",
")",
"sendThrea... | Returns the address to which the socket is connected.
@return ip address of the remote side of the connection or null if not
connected | [
"Returns",
"the",
"address",
"to",
"which",
"the",
"socket",
"is",
"connected",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/apache/zookeeper_voltpatches/ClientCnxn.java#L210-L220 | train |
VoltDB/voltdb | third_party/java/src/org/apache/zookeeper_voltpatches/ClientCnxn.java | ClientCnxn.getLocalSocketAddress | SocketAddress getLocalSocketAddress() {
// a lot could go wrong here, so rather than put in a bunch of code
// to check for nulls all down the chain let's do it the simple
// yet bulletproof way
try {
return ((SocketChannel) sendThread.sockKey.channel()).socket()
.getLocalSocketAddress();
} catch (NullPointerException e) {
return null;
}
} | java | SocketAddress getLocalSocketAddress() {
// a lot could go wrong here, so rather than put in a bunch of code
// to check for nulls all down the chain let's do it the simple
// yet bulletproof way
try {
return ((SocketChannel) sendThread.sockKey.channel()).socket()
.getLocalSocketAddress();
} catch (NullPointerException e) {
return null;
}
} | [
"SocketAddress",
"getLocalSocketAddress",
"(",
")",
"{",
"// a lot could go wrong here, so rather than put in a bunch of code",
"// to check for nulls all down the chain let's do it the simple",
"// yet bulletproof way",
"try",
"{",
"return",
"(",
"(",
"SocketChannel",
")",
"sendThread... | Returns the local address to which the socket is bound.
@return ip address of the remote side of the connection or null if not
connected | [
"Returns",
"the",
"local",
"address",
"to",
"which",
"the",
"socket",
"is",
"bound",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/apache/zookeeper_voltpatches/ClientCnxn.java#L228-L238 | train |
VoltDB/voltdb | third_party/java/src/org/apache/zookeeper_voltpatches/ClientCnxn.java | ClientCnxn.makeThreadName | private static String makeThreadName(String suffix) {
String name = Thread.currentThread().getName()
.replaceAll("-EventThread", "");
return name + suffix;
} | java | private static String makeThreadName(String suffix) {
String name = Thread.currentThread().getName()
.replaceAll("-EventThread", "");
return name + suffix;
} | [
"private",
"static",
"String",
"makeThreadName",
"(",
"String",
"suffix",
")",
"{",
"String",
"name",
"=",
"Thread",
".",
"currentThread",
"(",
")",
".",
"getName",
"(",
")",
".",
"replaceAll",
"(",
"\"-EventThread\"",
",",
"\"\"",
")",
";",
"return",
"nam... | Guard against creating "-EventThread-EventThread-EventThread-..." thread
names when ZooKeeper object is being created from within a watcher. See
ZOOKEEPER-795 for details. | [
"Guard",
"against",
"creating",
"-",
"EventThread",
"-",
"EventThread",
"-",
"EventThread",
"-",
"...",
"thread",
"names",
"when",
"ZooKeeper",
"object",
"is",
"being",
"created",
"from",
"within",
"a",
"watcher",
".",
"See",
"ZOOKEEPER",
"-",
"795",
"for",
... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/apache/zookeeper_voltpatches/ClientCnxn.java#L451-L455 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/jdbc/pool/JDBCXAResource.java | JDBCXAResource.commit | public void commit(Xid xid, boolean onePhase) throws XAException {
// Comment out following debug statement before public release:
System.err.println("Performing a " + (onePhase ? "1-phase"
: "2-phase") + " commit on "
+ xid);
JDBCXAResource resource = xaDataSource.getResource(xid);
if (resource == null) {
throw new XAException("The XADataSource has no such Xid: " + xid);
}
resource.commitThis(onePhase);
} | java | public void commit(Xid xid, boolean onePhase) throws XAException {
// Comment out following debug statement before public release:
System.err.println("Performing a " + (onePhase ? "1-phase"
: "2-phase") + " commit on "
+ xid);
JDBCXAResource resource = xaDataSource.getResource(xid);
if (resource == null) {
throw new XAException("The XADataSource has no such Xid: " + xid);
}
resource.commitThis(onePhase);
} | [
"public",
"void",
"commit",
"(",
"Xid",
"xid",
",",
"boolean",
"onePhase",
")",
"throws",
"XAException",
"{",
"// Comment out following debug statement before public release:",
"System",
".",
"err",
".",
"println",
"(",
"\"Performing a \"",
"+",
"(",
"onePhase",
"?",
... | Per the JDBC 3.0 spec, this commits the transaction for the
specified Xid, not necessarily for the transaction associated
with this XAResource object. | [
"Per",
"the",
"JDBC",
"3",
".",
"0",
"spec",
"this",
"commits",
"the",
"transaction",
"for",
"the",
"specified",
"Xid",
"not",
"necessarily",
"for",
"the",
"transaction",
"associated",
"with",
"this",
"XAResource",
"object",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/jdbc/pool/JDBCXAResource.java#L150-L164 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/jdbc/pool/JDBCXAResource.java | JDBCXAResource.isSameRM | public boolean isSameRM(XAResource xares) throws XAException {
if (!(xares instanceof JDBCXAResource)) {
return false;
}
return xaDataSource == ((JDBCXAResource) xares).getXADataSource();
} | java | public boolean isSameRM(XAResource xares) throws XAException {
if (!(xares instanceof JDBCXAResource)) {
return false;
}
return xaDataSource == ((JDBCXAResource) xares).getXADataSource();
} | [
"public",
"boolean",
"isSameRM",
"(",
"XAResource",
"xares",
")",
"throws",
"XAException",
"{",
"if",
"(",
"!",
"(",
"xares",
"instanceof",
"JDBCXAResource",
")",
")",
"{",
"return",
"false",
";",
"}",
"return",
"xaDataSource",
"==",
"(",
"(",
"JDBCXAResourc... | Stub. See implementation comment in the method for why this is
not implemented yet.
@return false. | [
"Stub",
".",
"See",
"implementation",
"comment",
"in",
"the",
"method",
"for",
"why",
"this",
"is",
"not",
"implemented",
"yet",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/jdbc/pool/JDBCXAResource.java#L280-L287 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/jdbc/pool/JDBCXAResource.java | JDBCXAResource.prepare | public int prepare(Xid xid) throws XAException {
validateXid(xid);
/**
* @todo: This is where the real 2-phase work should be done to
* determine if a commit done here would succeed or not.
*/
/**
* @todo: May improve performance to return XA_RDONLY whenever
* possible, but I don't know.
* Could determine this by checking if DB instance is in RO mode,
* or perhaps (with much difficulty) to determine if there have
* been any modifications performed.
*/
if (state != XA_STATE_ENDED) {
throw new XAException("Invalid XAResource state");
}
// throw new XAException(
// "Sorry. HSQLDB has not implemented 2-phase commits yet");
state = XA_STATE_PREPARED;
return XA_OK; // As noted above, should check non-committed work.
} | java | public int prepare(Xid xid) throws XAException {
validateXid(xid);
/**
* @todo: This is where the real 2-phase work should be done to
* determine if a commit done here would succeed or not.
*/
/**
* @todo: May improve performance to return XA_RDONLY whenever
* possible, but I don't know.
* Could determine this by checking if DB instance is in RO mode,
* or perhaps (with much difficulty) to determine if there have
* been any modifications performed.
*/
if (state != XA_STATE_ENDED) {
throw new XAException("Invalid XAResource state");
}
// throw new XAException(
// "Sorry. HSQLDB has not implemented 2-phase commits yet");
state = XA_STATE_PREPARED;
return XA_OK; // As noted above, should check non-committed work.
} | [
"public",
"int",
"prepare",
"(",
"Xid",
"xid",
")",
"throws",
"XAException",
"{",
"validateXid",
"(",
"xid",
")",
";",
"/**\n * @todo: This is where the real 2-phase work should be done to\n * determine if a commit done here would succeed or not.\n */",
"/**\... | Vote on whether to commit the global transaction.
@throws XAException to vote negative.
@return commitType of XA_RDONLY or XA_OK. (Actually only XA_OK now). | [
"Vote",
"on",
"whether",
"to",
"commit",
"the",
"global",
"transaction",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/jdbc/pool/JDBCXAResource.java#L295-L320 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/jdbc/pool/JDBCXAResource.java | JDBCXAResource.rollback | public void rollback(Xid xid) throws XAException {
JDBCXAResource resource = xaDataSource.getResource(xid);
if (resource == null) {
throw new XAException(
"The XADataSource has no such Xid in prepared state: " + xid);
}
resource.rollbackThis();
} | java | public void rollback(Xid xid) throws XAException {
JDBCXAResource resource = xaDataSource.getResource(xid);
if (resource == null) {
throw new XAException(
"The XADataSource has no such Xid in prepared state: " + xid);
}
resource.rollbackThis();
} | [
"public",
"void",
"rollback",
"(",
"Xid",
"xid",
")",
"throws",
"XAException",
"{",
"JDBCXAResource",
"resource",
"=",
"xaDataSource",
".",
"getResource",
"(",
"xid",
")",
";",
"if",
"(",
"resource",
"==",
"null",
")",
"{",
"throw",
"new",
"XAException",
"... | Per the JDBC 3.0 spec, this rolls back the transaction for the
specified Xid, not necessarily for the transaction associated
with this XAResource object. | [
"Per",
"the",
"JDBC",
"3",
".",
"0",
"spec",
"this",
"rolls",
"back",
"the",
"transaction",
"for",
"the",
"specified",
"Xid",
"not",
"necessarily",
"for",
"the",
"transaction",
"associated",
"with",
"this",
"XAResource",
"object",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/jdbc/pool/JDBCXAResource.java#L338-L348 | train |
VoltDB/voltdb | third_party/java/src/org/apache/commons_voltpatches/cli/Option.java | Option.processValue | private void processValue(String value)
{
// this Option has a separator character
if (hasValueSeparator())
{
// get the separator character
char sep = getValueSeparator();
// store the index for the value separator
int index = value.indexOf(sep);
// while there are more value separators
while (index != -1)
{
// next value to be added
if (values.size() == numberOfArgs - 1)
{
break;
}
// store
add(value.substring(0, index));
// parse
value = value.substring(index + 1);
// get new index
index = value.indexOf(sep);
}
}
// store the actual value or the last value that has been parsed
add(value);
} | java | private void processValue(String value)
{
// this Option has a separator character
if (hasValueSeparator())
{
// get the separator character
char sep = getValueSeparator();
// store the index for the value separator
int index = value.indexOf(sep);
// while there are more value separators
while (index != -1)
{
// next value to be added
if (values.size() == numberOfArgs - 1)
{
break;
}
// store
add(value.substring(0, index));
// parse
value = value.substring(index + 1);
// get new index
index = value.indexOf(sep);
}
}
// store the actual value or the last value that has been parsed
add(value);
} | [
"private",
"void",
"processValue",
"(",
"String",
"value",
")",
"{",
"// this Option has a separator character",
"if",
"(",
"hasValueSeparator",
"(",
")",
")",
"{",
"// get the separator character",
"char",
"sep",
"=",
"getValueSeparator",
"(",
")",
";",
"// store the... | Processes the value. If this Option has a value separator
the value will have to be parsed into individual tokens. When
n-1 tokens have been processed and there are more value separators
in the value, parsing is ceased and the remaining characters are
added as a single token.
@param value The String to be processed.
@since 1.0.1 | [
"Processes",
"the",
"value",
".",
"If",
"this",
"Option",
"has",
"a",
"value",
"separator",
"the",
"value",
"will",
"have",
"to",
"be",
"parsed",
"into",
"individual",
"tokens",
".",
"When",
"n",
"-",
"1",
"tokens",
"have",
"been",
"processed",
"and",
"t... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/apache/commons_voltpatches/cli/Option.java#L451-L484 | train |
VoltDB/voltdb | third_party/java/src/com/google_voltpatches/common/util/concurrent/Monitor.java | Monitor.enterWhenUninterruptibly | public boolean enterWhenUninterruptibly(Guard guard, long time, TimeUnit unit) {
final long timeoutNanos = toSafeNanos(time, unit);
if (guard.monitor != this) {
throw new IllegalMonitorStateException();
}
final ReentrantLock lock = this.lock;
long startTime = 0L;
boolean signalBeforeWaiting = lock.isHeldByCurrentThread();
boolean interrupted = Thread.interrupted();
try {
if (fair || !lock.tryLock()) {
startTime = initNanoTime(timeoutNanos);
for (long remainingNanos = timeoutNanos; ; ) {
try {
if (lock.tryLock(remainingNanos, TimeUnit.NANOSECONDS)) {
break;
} else {
return false;
}
} catch (InterruptedException interrupt) {
interrupted = true;
remainingNanos = remainingNanos(startTime, timeoutNanos);
}
}
}
boolean satisfied = false;
try {
while (true) {
try {
if (guard.isSatisfied()) {
satisfied = true;
} else {
final long remainingNanos;
if (startTime == 0L) {
startTime = initNanoTime(timeoutNanos);
remainingNanos = timeoutNanos;
} else {
remainingNanos = remainingNanos(startTime, timeoutNanos);
}
satisfied = awaitNanos(guard, remainingNanos, signalBeforeWaiting);
}
return satisfied;
} catch (InterruptedException interrupt) {
interrupted = true;
signalBeforeWaiting = false;
}
}
} finally {
if (!satisfied) {
lock.unlock(); // No need to signal if timed out
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
} | java | public boolean enterWhenUninterruptibly(Guard guard, long time, TimeUnit unit) {
final long timeoutNanos = toSafeNanos(time, unit);
if (guard.monitor != this) {
throw new IllegalMonitorStateException();
}
final ReentrantLock lock = this.lock;
long startTime = 0L;
boolean signalBeforeWaiting = lock.isHeldByCurrentThread();
boolean interrupted = Thread.interrupted();
try {
if (fair || !lock.tryLock()) {
startTime = initNanoTime(timeoutNanos);
for (long remainingNanos = timeoutNanos; ; ) {
try {
if (lock.tryLock(remainingNanos, TimeUnit.NANOSECONDS)) {
break;
} else {
return false;
}
} catch (InterruptedException interrupt) {
interrupted = true;
remainingNanos = remainingNanos(startTime, timeoutNanos);
}
}
}
boolean satisfied = false;
try {
while (true) {
try {
if (guard.isSatisfied()) {
satisfied = true;
} else {
final long remainingNanos;
if (startTime == 0L) {
startTime = initNanoTime(timeoutNanos);
remainingNanos = timeoutNanos;
} else {
remainingNanos = remainingNanos(startTime, timeoutNanos);
}
satisfied = awaitNanos(guard, remainingNanos, signalBeforeWaiting);
}
return satisfied;
} catch (InterruptedException interrupt) {
interrupted = true;
signalBeforeWaiting = false;
}
}
} finally {
if (!satisfied) {
lock.unlock(); // No need to signal if timed out
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
} | [
"public",
"boolean",
"enterWhenUninterruptibly",
"(",
"Guard",
"guard",
",",
"long",
"time",
",",
"TimeUnit",
"unit",
")",
"{",
"final",
"long",
"timeoutNanos",
"=",
"toSafeNanos",
"(",
"time",
",",
"unit",
")",
";",
"if",
"(",
"guard",
".",
"monitor",
"!=... | Enters this monitor when the guard is satisfied. Blocks at most the given time, including both
the time to acquire the lock and the time to wait for the guard to be satisfied.
@return whether the monitor was entered, which guarantees that the guard is now satisfied | [
"Enters",
"this",
"monitor",
"when",
"the",
"guard",
"is",
"satisfied",
".",
"Blocks",
"at",
"most",
"the",
"given",
"time",
"including",
"both",
"the",
"time",
"to",
"acquire",
"the",
"lock",
"and",
"the",
"time",
"to",
"wait",
"for",
"the",
"guard",
"t... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/com/google_voltpatches/common/util/concurrent/Monitor.java#L543-L601 | train |
VoltDB/voltdb | third_party/java/src/com/google_voltpatches/common/util/concurrent/Monitor.java | Monitor.enterIfInterruptibly | public boolean enterIfInterruptibly(Guard guard, long time, TimeUnit unit)
throws InterruptedException {
if (guard.monitor != this) {
throw new IllegalMonitorStateException();
}
final ReentrantLock lock = this.lock;
if (!lock.tryLock(time, unit)) {
return false;
}
boolean satisfied = false;
try {
return satisfied = guard.isSatisfied();
} finally {
if (!satisfied) {
lock.unlock();
}
}
} | java | public boolean enterIfInterruptibly(Guard guard, long time, TimeUnit unit)
throws InterruptedException {
if (guard.monitor != this) {
throw new IllegalMonitorStateException();
}
final ReentrantLock lock = this.lock;
if (!lock.tryLock(time, unit)) {
return false;
}
boolean satisfied = false;
try {
return satisfied = guard.isSatisfied();
} finally {
if (!satisfied) {
lock.unlock();
}
}
} | [
"public",
"boolean",
"enterIfInterruptibly",
"(",
"Guard",
"guard",
",",
"long",
"time",
",",
"TimeUnit",
"unit",
")",
"throws",
"InterruptedException",
"{",
"if",
"(",
"guard",
".",
"monitor",
"!=",
"this",
")",
"{",
"throw",
"new",
"IllegalMonitorStateExceptio... | Enters this monitor if the guard is satisfied. Blocks at most the given time acquiring the
lock, but does not wait for the guard to be satisfied, and may be interrupted.
@return whether the monitor was entered, which guarantees that the guard is now satisfied | [
"Enters",
"this",
"monitor",
"if",
"the",
"guard",
"is",
"satisfied",
".",
"Blocks",
"at",
"most",
"the",
"given",
"time",
"acquiring",
"the",
"lock",
"but",
"does",
"not",
"wait",
"for",
"the",
"guard",
"to",
"be",
"satisfied",
"and",
"may",
"be",
"inte... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/com/google_voltpatches/common/util/concurrent/Monitor.java#L680-L698 | train |
VoltDB/voltdb | third_party/java/src/com/google_voltpatches/common/util/concurrent/Monitor.java | Monitor.waitFor | public boolean waitFor(Guard guard, long time, TimeUnit unit) throws InterruptedException {
final long timeoutNanos = toSafeNanos(time, unit);
if (!((guard.monitor == this) & lock.isHeldByCurrentThread())) {
throw new IllegalMonitorStateException();
}
if (guard.isSatisfied()) {
return true;
}
if (Thread.interrupted()) {
throw new InterruptedException();
}
return awaitNanos(guard, timeoutNanos, true);
} | java | public boolean waitFor(Guard guard, long time, TimeUnit unit) throws InterruptedException {
final long timeoutNanos = toSafeNanos(time, unit);
if (!((guard.monitor == this) & lock.isHeldByCurrentThread())) {
throw new IllegalMonitorStateException();
}
if (guard.isSatisfied()) {
return true;
}
if (Thread.interrupted()) {
throw new InterruptedException();
}
return awaitNanos(guard, timeoutNanos, true);
} | [
"public",
"boolean",
"waitFor",
"(",
"Guard",
"guard",
",",
"long",
"time",
",",
"TimeUnit",
"unit",
")",
"throws",
"InterruptedException",
"{",
"final",
"long",
"timeoutNanos",
"=",
"toSafeNanos",
"(",
"time",
",",
"unit",
")",
";",
"if",
"(",
"!",
"(",
... | Waits for the guard to be satisfied. Waits at most the given time, and may be interrupted. May
be called only by a thread currently occupying this monitor.
@return whether the guard is now satisfied
@throws InterruptedException if interrupted while waiting | [
"Waits",
"for",
"the",
"guard",
"to",
"be",
"satisfied",
".",
"Waits",
"at",
"most",
"the",
"given",
"time",
"and",
"may",
"be",
"interrupted",
".",
"May",
"be",
"called",
"only",
"by",
"a",
"thread",
"currently",
"occupying",
"this",
"monitor",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/com/google_voltpatches/common/util/concurrent/Monitor.java#L762-L774 | train |
VoltDB/voltdb | src/frontend/org/voltdb/rejoin/StreamSnapshotAckSender.java | StreamSnapshotAckSender.ack | public void ack(long hsId, boolean isEOS, long targetId, int blockIndex) {
rejoinLog.debug("Queue ack for hsId:" + hsId + " isEOS: " +
isEOS + " targetId:" + targetId + " blockIndex: " + blockIndex);
m_blockIndices.offer(Pair.of(hsId, new RejoinDataAckMessage(isEOS, targetId, blockIndex)));
} | java | public void ack(long hsId, boolean isEOS, long targetId, int blockIndex) {
rejoinLog.debug("Queue ack for hsId:" + hsId + " isEOS: " +
isEOS + " targetId:" + targetId + " blockIndex: " + blockIndex);
m_blockIndices.offer(Pair.of(hsId, new RejoinDataAckMessage(isEOS, targetId, blockIndex)));
} | [
"public",
"void",
"ack",
"(",
"long",
"hsId",
",",
"boolean",
"isEOS",
",",
"long",
"targetId",
",",
"int",
"blockIndex",
")",
"{",
"rejoinLog",
".",
"debug",
"(",
"\"Queue ack for hsId:\"",
"+",
"hsId",
"+",
"\" isEOS: \"",
"+",
"isEOS",
"+",
"\" targetId:\... | Ack with a positive block index.
@param hsId The mailbox to send the ack to
@param blockIndex | [
"Ack",
"with",
"a",
"positive",
"block",
"index",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/rejoin/StreamSnapshotAckSender.java#L50-L54 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.absolute | @Override
public boolean absolute(int row) throws SQLException {
checkClosed();
if (rowCount == 0) {
if (row == 0) {
return true;
}
return false;
}
if (row == 0) {
beforeFirst();
return true;
}
if (rowCount + row < 0) {
beforeFirst();
return false;
}
if (row > rowCount) {
cursorPosition = Position.afterLast;
if(row == rowCount+1) {
return true;
}
else {
return false;
}
}
try {
// for negative row numbers or row numbers lesser then activeRowIndex, resetRowPosition
// method is called and the cursor advances to the desired row from top of the table
if(row < 0) {
row += rowCount;
row++;
}
if(table.getActiveRowIndex() > row || cursorPosition != Position.middle) {
table.resetRowPosition();
table.advanceToRow(0);
}
cursorPosition = Position.middle;
return table.advanceToRow(row-1);
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public boolean absolute(int row) throws SQLException {
checkClosed();
if (rowCount == 0) {
if (row == 0) {
return true;
}
return false;
}
if (row == 0) {
beforeFirst();
return true;
}
if (rowCount + row < 0) {
beforeFirst();
return false;
}
if (row > rowCount) {
cursorPosition = Position.afterLast;
if(row == rowCount+1) {
return true;
}
else {
return false;
}
}
try {
// for negative row numbers or row numbers lesser then activeRowIndex, resetRowPosition
// method is called and the cursor advances to the desired row from top of the table
if(row < 0) {
row += rowCount;
row++;
}
if(table.getActiveRowIndex() > row || cursorPosition != Position.middle) {
table.resetRowPosition();
table.advanceToRow(0);
}
cursorPosition = Position.middle;
return table.advanceToRow(row-1);
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"boolean",
"absolute",
"(",
"int",
"row",
")",
"throws",
"SQLException",
"{",
"checkClosed",
"(",
")",
";",
"if",
"(",
"rowCount",
"==",
"0",
")",
"{",
"if",
"(",
"row",
"==",
"0",
")",
"{",
"return",
"true",
";",
"}",
"r... | Moves the cursor to the given row number in this ResultSet object. | [
"Moves",
"the",
"cursor",
"to",
"the",
"given",
"row",
"number",
"in",
"this",
"ResultSet",
"object",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L99-L141 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.findColumn | @Override
public int findColumn(String columnLabel) throws SQLException {
checkClosed();
try {
return table.getColumnIndex(columnLabel) + 1;
} catch (IllegalArgumentException iax) {
throw SQLError.get(iax, SQLError.COLUMN_NOT_FOUND, columnLabel);
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public int findColumn(String columnLabel) throws SQLException {
checkClosed();
try {
return table.getColumnIndex(columnLabel) + 1;
} catch (IllegalArgumentException iax) {
throw SQLError.get(iax, SQLError.COLUMN_NOT_FOUND, columnLabel);
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"int",
"findColumn",
"(",
"String",
"columnLabel",
")",
"throws",
"SQLException",
"{",
"checkClosed",
"(",
")",
";",
"try",
"{",
"return",
"table",
".",
"getColumnIndex",
"(",
"columnLabel",
")",
"+",
"1",
";",
"}",
"catch",
"(",... | Maps the given ResultSet column label to its ResultSet column index. | [
"Maps",
"the",
"given",
"ResultSet",
"column",
"label",
"to",
"its",
"ResultSet",
"column",
"index",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L187-L197 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getBigDecimal | @Override
public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
final VoltType type = table.getColumnType(columnIndex - 1);
BigDecimal decimalValue = null;
switch(type) {
case TINYINT:
decimalValue = new BigDecimal(table.getLong(columnIndex - 1));
break;
case SMALLINT:
decimalValue = new BigDecimal(table.getLong(columnIndex - 1));
break;
case INTEGER:
decimalValue = new BigDecimal(table.getLong(columnIndex - 1));
break;
case BIGINT:
decimalValue = new BigDecimal(table.getLong(columnIndex - 1));
break;
case FLOAT:
decimalValue = new BigDecimal(table.getDouble(columnIndex - 1));
break;
case DECIMAL:
decimalValue = table.getDecimalAsBigDecimal(columnIndex - 1);
break;
default:
throw new IllegalArgumentException("Cannot get BigDecimal value for column type '" + type + "'");
}
return table.wasNull() ? null : decimalValue;
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
final VoltType type = table.getColumnType(columnIndex - 1);
BigDecimal decimalValue = null;
switch(type) {
case TINYINT:
decimalValue = new BigDecimal(table.getLong(columnIndex - 1));
break;
case SMALLINT:
decimalValue = new BigDecimal(table.getLong(columnIndex - 1));
break;
case INTEGER:
decimalValue = new BigDecimal(table.getLong(columnIndex - 1));
break;
case BIGINT:
decimalValue = new BigDecimal(table.getLong(columnIndex - 1));
break;
case FLOAT:
decimalValue = new BigDecimal(table.getDouble(columnIndex - 1));
break;
case DECIMAL:
decimalValue = table.getDecimalAsBigDecimal(columnIndex - 1);
break;
default:
throw new IllegalArgumentException("Cannot get BigDecimal value for column type '" + type + "'");
}
return table.wasNull() ? null : decimalValue;
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"BigDecimal",
"getBigDecimal",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"try",
"{",
"final",
"VoltType",
"type",
"=",
"table",
".",
"getColumnType",
"(",
"columnI... | ResultSet object as a java.math.BigDecimal with full precision. | [
"ResultSet",
"object",
"as",
"a",
"java",
".",
"math",
".",
"BigDecimal",
"with",
"full",
"precision",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L256-L289 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getBinaryStream | @Override
public InputStream getBinaryStream(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
return new ByteArrayInputStream(
table.getStringAsBytes(columnIndex - 1));
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public InputStream getBinaryStream(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
return new ByteArrayInputStream(
table.getStringAsBytes(columnIndex - 1));
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"InputStream",
"getBinaryStream",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"try",
"{",
"return",
"new",
"ByteArrayInputStream",
"(",
"table",
".",
"getStringAsBytes",... | ResultSet object as a stream of uninterpreted bytes. | [
"ResultSet",
"object",
"as",
"a",
"stream",
"of",
"uninterpreted",
"bytes",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L317-L326 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getBlob | @Override
public Blob getBlob(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
return new SerialBlob(table.getStringAsBytes(columnIndex - 1));
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public Blob getBlob(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
return new SerialBlob(table.getStringAsBytes(columnIndex - 1));
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"Blob",
"getBlob",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"try",
"{",
"return",
"new",
"SerialBlob",
"(",
"table",
".",
"getStringAsBytes",
"(",
"columnIndex",
... | ResultSet object as a Blob object in the Java programming language. | [
"ResultSet",
"object",
"as",
"a",
"Blob",
"object",
"in",
"the",
"Java",
"programming",
"language",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L337-L345 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getBoolean | @Override
public boolean getBoolean(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
// TODO: Tempting to apply a != 0 operation on numbers and
// .equals("true") on strings, but... hacky
try {
return (new Long(table.getLong(columnIndex - 1))).intValue() == 1;
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public boolean getBoolean(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
// TODO: Tempting to apply a != 0 operation on numbers and
// .equals("true") on strings, but... hacky
try {
return (new Long(table.getLong(columnIndex - 1))).intValue() == 1;
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"boolean",
"getBoolean",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"// TODO: Tempting to apply a != 0 operation on numbers and",
"// .equals(\"true\") on strings, but... hacky",
"... | ResultSet object as a boolean in the Java programming language. | [
"ResultSet",
"object",
"as",
"a",
"boolean",
"in",
"the",
"Java",
"programming",
"language",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L396-L406 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getByte | @Override
public byte getByte(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
Long longValue = getPrivateInteger(columnIndex);
if (longValue > Byte.MAX_VALUE || longValue < Byte.MIN_VALUE) {
throw new SQLException("Value out of byte range");
}
return longValue.byteValue();
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public byte getByte(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
Long longValue = getPrivateInteger(columnIndex);
if (longValue > Byte.MAX_VALUE || longValue < Byte.MIN_VALUE) {
throw new SQLException("Value out of byte range");
}
return longValue.byteValue();
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"byte",
"getByte",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"try",
"{",
"Long",
"longValue",
"=",
"getPrivateInteger",
"(",
"columnIndex",
")",
";",
"if",
"(",
... | ResultSet object as a byte in the Java programming language. | [
"ResultSet",
"object",
"as",
"a",
"byte",
"in",
"the",
"Java",
"programming",
"language",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L417-L429 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getBytes | @Override
public byte[] getBytes(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
if (table.getColumnType(columnIndex - 1) == VoltType.STRING)
return table.getStringAsBytes(columnIndex - 1);
else if (table.getColumnType(columnIndex - 1) == VoltType.VARBINARY)
return table.getVarbinary(columnIndex - 1);
else
throw SQLError.get(SQLError.CONVERSION_NOT_FOUND,
table.getColumnType(columnIndex - 1), "byte[]");
} catch (SQLException x) {
throw x;
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public byte[] getBytes(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
if (table.getColumnType(columnIndex - 1) == VoltType.STRING)
return table.getStringAsBytes(columnIndex - 1);
else if (table.getColumnType(columnIndex - 1) == VoltType.VARBINARY)
return table.getVarbinary(columnIndex - 1);
else
throw SQLError.get(SQLError.CONVERSION_NOT_FOUND,
table.getColumnType(columnIndex - 1), "byte[]");
} catch (SQLException x) {
throw x;
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"byte",
"[",
"]",
"getBytes",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"try",
"{",
"if",
"(",
"table",
".",
"getColumnType",
"(",
"columnIndex",
"-",
"1",
"... | ResultSet object as a byte array in the Java programming language. | [
"ResultSet",
"object",
"as",
"a",
"byte",
"array",
"in",
"the",
"Java",
"programming",
"language",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L440-L456 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getClob | @Override
public Clob getClob(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
return new SerialClob(table.getString(columnIndex - 1)
.toCharArray());
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public Clob getClob(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
return new SerialClob(table.getString(columnIndex - 1)
.toCharArray());
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"Clob",
"getClob",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"try",
"{",
"return",
"new",
"SerialClob",
"(",
"table",
".",
"getString",
"(",
"columnIndex",
"-",
... | ResultSet object as a Clob object in the Java programming language. | [
"ResultSet",
"object",
"as",
"a",
"Clob",
"object",
"in",
"the",
"Java",
"programming",
"language",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L489-L498 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getFloat | @Override
public float getFloat(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
final VoltType type = table.getColumnType(columnIndex - 1);
Double doubleValue = null;
switch(type) {
case TINYINT:
doubleValue = new Double(table.getLong(columnIndex - 1));
break;
case SMALLINT:
doubleValue = new Double(table.getLong(columnIndex - 1));
break;
case INTEGER:
doubleValue = new Double(table.getLong(columnIndex - 1));
break;
case BIGINT:
doubleValue = new Double(table.getLong(columnIndex - 1));
break;
case FLOAT:
doubleValue = new Double(table.getDouble(columnIndex - 1));
break;
case DECIMAL:
doubleValue = table.getDecimalAsBigDecimal(columnIndex - 1).doubleValue();
break;
default:
throw new IllegalArgumentException("Cannot get float value for column type '" + type + "'");
}
if (table.wasNull()) {
doubleValue = new Double(0);
} else if (Math.abs(doubleValue) > new Double(Float.MAX_VALUE)) {
throw new SQLException("Value out of float range");
}
return doubleValue.floatValue();
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public float getFloat(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
final VoltType type = table.getColumnType(columnIndex - 1);
Double doubleValue = null;
switch(type) {
case TINYINT:
doubleValue = new Double(table.getLong(columnIndex - 1));
break;
case SMALLINT:
doubleValue = new Double(table.getLong(columnIndex - 1));
break;
case INTEGER:
doubleValue = new Double(table.getLong(columnIndex - 1));
break;
case BIGINT:
doubleValue = new Double(table.getLong(columnIndex - 1));
break;
case FLOAT:
doubleValue = new Double(table.getDouble(columnIndex - 1));
break;
case DECIMAL:
doubleValue = table.getDecimalAsBigDecimal(columnIndex - 1).doubleValue();
break;
default:
throw new IllegalArgumentException("Cannot get float value for column type '" + type + "'");
}
if (table.wasNull()) {
doubleValue = new Double(0);
} else if (Math.abs(doubleValue) > new Double(Float.MAX_VALUE)) {
throw new SQLException("Value out of float range");
}
return doubleValue.floatValue();
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"float",
"getFloat",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"try",
"{",
"final",
"VoltType",
"type",
"=",
"table",
".",
"getColumnType",
"(",
"columnIndex",
"... | ResultSet object as a float in the Java programming language. | [
"ResultSet",
"object",
"as",
"a",
"float",
"in",
"the",
"Java",
"programming",
"language",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L623-L661 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getInt | @Override
public int getInt(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
Long longValue = getPrivateInteger(columnIndex);
if (longValue > Integer.MAX_VALUE || longValue < Integer.MIN_VALUE) {
throw new SQLException("Value out of int range");
}
return longValue.intValue();
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public int getInt(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
Long longValue = getPrivateInteger(columnIndex);
if (longValue > Integer.MAX_VALUE || longValue < Integer.MIN_VALUE) {
throw new SQLException("Value out of int range");
}
return longValue.intValue();
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"int",
"getInt",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"try",
"{",
"Long",
"longValue",
"=",
"getPrivateInteger",
"(",
"columnIndex",
")",
";",
"if",
"(",
... | ResultSet object as an int in the Java programming language. | [
"ResultSet",
"object",
"as",
"an",
"int",
"in",
"the",
"Java",
"programming",
"language",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L678-L690 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getLong | @Override
public long getLong(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
Long longValue = getPrivateInteger(columnIndex);
return longValue;
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public long getLong(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
Long longValue = getPrivateInteger(columnIndex);
return longValue;
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"long",
"getLong",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"try",
"{",
"Long",
"longValue",
"=",
"getPrivateInteger",
"(",
"columnIndex",
")",
";",
"return",
"... | ResultSet object as a long in the Java programming language. | [
"ResultSet",
"object",
"as",
"a",
"long",
"in",
"the",
"Java",
"programming",
"language",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L701-L710 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getNCharacterStream | @Override
public Reader getNCharacterStream(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
String value = table.getString(columnIndex - 1);
if (!wasNull())
return new StringReader(value);
return null;
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public Reader getNCharacterStream(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
String value = table.getString(columnIndex - 1);
if (!wasNull())
return new StringReader(value);
return null;
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"Reader",
"getNCharacterStream",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"try",
"{",
"String",
"value",
"=",
"table",
".",
"getString",
"(",
"columnIndex",
"-",
... | ResultSet object as a java.io.Reader object. | [
"ResultSet",
"object",
"as",
"a",
"java",
".",
"io",
".",
"Reader",
"object",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L729-L740 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getNClob | @Override
public NClob getNClob(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
return new JDBC4NClob(table.getString(columnIndex - 1)
.toCharArray());
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public NClob getNClob(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
return new JDBC4NClob(table.getString(columnIndex - 1)
.toCharArray());
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"NClob",
"getNClob",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"try",
"{",
"return",
"new",
"JDBC4NClob",
"(",
"table",
".",
"getString",
"(",
"columnIndex",
"-"... | ResultSet object as a NClob object in the Java programming language. | [
"ResultSet",
"object",
"as",
"a",
"NClob",
"object",
"in",
"the",
"Java",
"programming",
"language",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L751-L760 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getObject | @Override
public Object getObject(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
VoltType type = table.getColumnType(columnIndex - 1);
if (type == VoltType.TIMESTAMP)
return getTimestamp(columnIndex);
else
return table.get(columnIndex - 1, type);
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public Object getObject(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
VoltType type = table.getColumnType(columnIndex - 1);
if (type == VoltType.TIMESTAMP)
return getTimestamp(columnIndex);
else
return table.get(columnIndex - 1, type);
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"Object",
"getObject",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"try",
"{",
"VoltType",
"type",
"=",
"table",
".",
"getColumnType",
"(",
"columnIndex",
"-",
"1"... | ResultSet object as an Object in the Java programming language. | [
"ResultSet",
"object",
"as",
"an",
"Object",
"in",
"the",
"Java",
"programming",
"language",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L790-L802 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getShort | @Override
public short getShort(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
Long longValue = getPrivateInteger(columnIndex);
if (longValue > Short.MAX_VALUE || longValue < Short.MIN_VALUE) {
throw new SQLException("Value out of short range");
}
return longValue.shortValue();
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public short getShort(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
try {
Long longValue = getPrivateInteger(columnIndex);
if (longValue > Short.MAX_VALUE || longValue < Short.MIN_VALUE) {
throw new SQLException("Value out of short range");
}
return longValue.shortValue();
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"short",
"getShort",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"try",
"{",
"Long",
"longValue",
"=",
"getPrivateInteger",
"(",
"columnIndex",
")",
";",
"if",
"("... | ResultSet object as a short in the Java programming language. | [
"ResultSet",
"object",
"as",
"a",
"short",
"in",
"the",
"Java",
"programming",
"language",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L879-L891 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getUnicodeStream | @Override
@Deprecated
public InputStream getUnicodeStream(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
throw SQLError.noSupport();
} | java | @Override
@Deprecated
public InputStream getUnicodeStream(int columnIndex) throws SQLException {
checkColumnBounds(columnIndex);
throw SQLError.noSupport();
} | [
"@",
"Override",
"@",
"Deprecated",
"public",
"InputStream",
"getUnicodeStream",
"(",
"int",
"columnIndex",
")",
"throws",
"SQLException",
"{",
"checkColumnBounds",
"(",
"columnIndex",
")",
";",
"throw",
"SQLError",
".",
"noSupport",
"(",
")",
";",
"}"
] | Deprecated. use getCharacterStream in place of getUnicodeStream | [
"Deprecated",
".",
"use",
"getCharacterStream",
"in",
"place",
"of",
"getUnicodeStream"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L1039-L1044 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getUnicodeStream | @Override
@Deprecated
public InputStream getUnicodeStream(String columnLabel) throws SQLException {
return getUnicodeStream(findColumn(columnLabel));
} | java | @Override
@Deprecated
public InputStream getUnicodeStream(String columnLabel) throws SQLException {
return getUnicodeStream(findColumn(columnLabel));
} | [
"@",
"Override",
"@",
"Deprecated",
"public",
"InputStream",
"getUnicodeStream",
"(",
"String",
"columnLabel",
")",
"throws",
"SQLException",
"{",
"return",
"getUnicodeStream",
"(",
"findColumn",
"(",
"columnLabel",
")",
")",
";",
"}"
] | Deprecated. use getCharacterStream instead | [
"Deprecated",
".",
"use",
"getCharacterStream",
"instead"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L1047-L1051 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.last | @Override
public boolean last() throws SQLException {
checkClosed();
if (rowCount == 0) {
return false;
}
try {
if (cursorPosition != Position.middle) {
cursorPosition = Position.middle;
table.resetRowPosition();
table.advanceToRow(0);
}
return table.advanceToRow(rowCount - 1);
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public boolean last() throws SQLException {
checkClosed();
if (rowCount == 0) {
return false;
}
try {
if (cursorPosition != Position.middle) {
cursorPosition = Position.middle;
table.resetRowPosition();
table.advanceToRow(0);
}
return table.advanceToRow(rowCount - 1);
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"boolean",
"last",
"(",
")",
"throws",
"SQLException",
"{",
"checkClosed",
"(",
")",
";",
"if",
"(",
"rowCount",
"==",
"0",
")",
"{",
"return",
"false",
";",
"}",
"try",
"{",
"if",
"(",
"cursorPosition",
"!=",
"Position",
"."... | Moves the cursor to the last row in this ResultSet object. | [
"Moves",
"the",
"cursor",
"to",
"the",
"last",
"row",
"in",
"this",
"ResultSet",
"object",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L1145-L1161 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.next | @Override
public boolean next() throws SQLException {
checkClosed();
if (cursorPosition == Position.afterLast || table.getActiveRowIndex() == rowCount - 1) {
cursorPosition = Position.afterLast;
return false;
}
if (cursorPosition == Position.beforeFirst) {
cursorPosition = Position.middle;
}
try {
return table.advanceRow();
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public boolean next() throws SQLException {
checkClosed();
if (cursorPosition == Position.afterLast || table.getActiveRowIndex() == rowCount - 1) {
cursorPosition = Position.afterLast;
return false;
}
if (cursorPosition == Position.beforeFirst) {
cursorPosition = Position.middle;
}
try {
return table.advanceRow();
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"boolean",
"next",
"(",
")",
"throws",
"SQLException",
"{",
"checkClosed",
"(",
")",
";",
"if",
"(",
"cursorPosition",
"==",
"Position",
".",
"afterLast",
"||",
"table",
".",
"getActiveRowIndex",
"(",
")",
"==",
"rowCount",
"-",
... | Moves the cursor forward one row from its current position. | [
"Moves",
"the",
"cursor",
"forward",
"one",
"row",
"from",
"its",
"current",
"position",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L1179-L1194 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.previous | @Override
public boolean previous() throws SQLException {
checkClosed();
if (cursorPosition == Position.afterLast) {
return last();
}
if (cursorPosition == Position.beforeFirst || table.getActiveRowIndex() <= 0) {
beforeFirst();
return false;
}
try {
int tempRowIndex = table.getActiveRowIndex();
table.resetRowPosition();
table.advanceToRow(0);
return table.advanceToRow(tempRowIndex - 1);
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public boolean previous() throws SQLException {
checkClosed();
if (cursorPosition == Position.afterLast) {
return last();
}
if (cursorPosition == Position.beforeFirst || table.getActiveRowIndex() <= 0) {
beforeFirst();
return false;
}
try {
int tempRowIndex = table.getActiveRowIndex();
table.resetRowPosition();
table.advanceToRow(0);
return table.advanceToRow(tempRowIndex - 1);
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"boolean",
"previous",
"(",
")",
"throws",
"SQLException",
"{",
"checkClosed",
"(",
")",
";",
"if",
"(",
"cursorPosition",
"==",
"Position",
".",
"afterLast",
")",
"{",
"return",
"last",
"(",
")",
";",
"}",
"if",
"(",
"cursorPo... | Moves the cursor to the previous row in this ResultSet object. | [
"Moves",
"the",
"cursor",
"to",
"the",
"previous",
"row",
"in",
"this",
"ResultSet",
"object",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L1197-L1215 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.relative | @Override
public boolean relative(int rows) throws SQLException {
checkClosed();
if (rowCount == 0) {
return false;
}
if (cursorPosition == Position.afterLast && rows > 0) {
return false;
}
if (cursorPosition == Position.beforeFirst && rows <= 0) {
return false;
}
if (table.getActiveRowIndex() + rows >= rowCount) {
cursorPosition = Position.afterLast;
if (table.getActiveRowIndex() + rows == rowCount) {
return true;
}
return false;
}
try {
// for negative row numbers, resetRowPosition method is called
// and the cursor advances to the desired row from top of the table
int rowsToMove = table.getActiveRowIndex() + rows;
if (cursorPosition == Position.beforeFirst || rows < 0) {
if(cursorPosition == Position.afterLast) {
rowsToMove = rowCount + rows;
}
else if(cursorPosition == Position.beforeFirst) {
rowsToMove = rows - 1;
}
else {
rowsToMove = table.getActiveRowIndex() + rows;
}
if(rowsToMove < 0){
beforeFirst();
return false;
}
table.resetRowPosition();
table.advanceToRow(0);
}
cursorPosition = Position.middle;
return table.advanceToRow(rowsToMove);
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public boolean relative(int rows) throws SQLException {
checkClosed();
if (rowCount == 0) {
return false;
}
if (cursorPosition == Position.afterLast && rows > 0) {
return false;
}
if (cursorPosition == Position.beforeFirst && rows <= 0) {
return false;
}
if (table.getActiveRowIndex() + rows >= rowCount) {
cursorPosition = Position.afterLast;
if (table.getActiveRowIndex() + rows == rowCount) {
return true;
}
return false;
}
try {
// for negative row numbers, resetRowPosition method is called
// and the cursor advances to the desired row from top of the table
int rowsToMove = table.getActiveRowIndex() + rows;
if (cursorPosition == Position.beforeFirst || rows < 0) {
if(cursorPosition == Position.afterLast) {
rowsToMove = rowCount + rows;
}
else if(cursorPosition == Position.beforeFirst) {
rowsToMove = rows - 1;
}
else {
rowsToMove = table.getActiveRowIndex() + rows;
}
if(rowsToMove < 0){
beforeFirst();
return false;
}
table.resetRowPosition();
table.advanceToRow(0);
}
cursorPosition = Position.middle;
return table.advanceToRow(rowsToMove);
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"boolean",
"relative",
"(",
"int",
"rows",
")",
"throws",
"SQLException",
"{",
"checkClosed",
"(",
")",
";",
"if",
"(",
"rowCount",
"==",
"0",
")",
"{",
"return",
"false",
";",
"}",
"if",
"(",
"cursorPosition",
"==",
"Position"... | Moves the cursor a relative number of rows, either positive or negative. | [
"Moves",
"the",
"cursor",
"a",
"relative",
"number",
"of",
"rows",
"either",
"positive",
"or",
"negative",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L1224-L1270 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.setFetchDirection | @Override
public void setFetchDirection(int direction) throws SQLException {
if ((direction != FETCH_FORWARD) && (direction != FETCH_REVERSE)
&& (direction != FETCH_UNKNOWN))
throw SQLError.get(SQLError.ILLEGAL_STATEMENT, direction);
this.fetchDirection = direction;
} | java | @Override
public void setFetchDirection(int direction) throws SQLException {
if ((direction != FETCH_FORWARD) && (direction != FETCH_REVERSE)
&& (direction != FETCH_UNKNOWN))
throw SQLError.get(SQLError.ILLEGAL_STATEMENT, direction);
this.fetchDirection = direction;
} | [
"@",
"Override",
"public",
"void",
"setFetchDirection",
"(",
"int",
"direction",
")",
"throws",
"SQLException",
"{",
"if",
"(",
"(",
"direction",
"!=",
"FETCH_FORWARD",
")",
"&&",
"(",
"direction",
"!=",
"FETCH_REVERSE",
")",
"&&",
"(",
"direction",
"!=",
"F... | object will be processed. | [
"object",
"will",
"be",
"processed",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L1292-L1298 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.updateAsciiStream | @Override
public void updateAsciiStream(String columnLabel, InputStream x, long length)
throws SQLException {
throw SQLError.noSupport();
} | java | @Override
public void updateAsciiStream(String columnLabel, InputStream x, long length)
throws SQLException {
throw SQLError.noSupport();
} | [
"@",
"Override",
"public",
"void",
"updateAsciiStream",
"(",
"String",
"columnLabel",
",",
"InputStream",
"x",
",",
"long",
"length",
")",
"throws",
"SQLException",
"{",
"throw",
"SQLError",
".",
"noSupport",
"(",
")",
";",
"}"
] | the specified number of bytes. | [
"the",
"specified",
"number",
"of",
"bytes",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L1362-L1366 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.updateBlob | @Override
public void updateBlob(String columnLabel, InputStream inputStream,
long length) throws SQLException {
throw SQLError.noSupport();
} | java | @Override
public void updateBlob(String columnLabel, InputStream inputStream,
long length) throws SQLException {
throw SQLError.noSupport();
} | [
"@",
"Override",
"public",
"void",
"updateBlob",
"(",
"String",
"columnLabel",
",",
"InputStream",
"inputStream",
",",
"long",
"length",
")",
"throws",
"SQLException",
"{",
"throw",
"SQLError",
".",
"noSupport",
"(",
")",
";",
"}"
] | have the specified number of bytes. | [
"have",
"the",
"specified",
"number",
"of",
"bytes",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L1464-L1468 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.updateNClob | @Override
public void updateNClob(String columnLabel, Reader reader, long length)
throws SQLException {
throw SQLError.noSupport();
} | java | @Override
public void updateNClob(String columnLabel, Reader reader, long length)
throws SQLException {
throw SQLError.noSupport();
} | [
"@",
"Override",
"public",
"void",
"updateNClob",
"(",
"String",
"columnLabel",
",",
"Reader",
"reader",
",",
"long",
"length",
")",
"throws",
"SQLException",
"{",
"throw",
"SQLError",
".",
"noSupport",
"(",
")",
";",
"}"
] | given number of characters long. | [
"given",
"number",
"of",
"characters",
"long",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L1721-L1725 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.updateObject | @Override
public void updateObject(String columnLabel, Object x, int scaleOrLength)
throws SQLException {
throw SQLError.noSupport();
} | java | @Override
public void updateObject(String columnLabel, Object x, int scaleOrLength)
throws SQLException {
throw SQLError.noSupport();
} | [
"@",
"Override",
"public",
"void",
"updateObject",
"(",
"String",
"columnLabel",
",",
"Object",
"x",
",",
"int",
"scaleOrLength",
")",
"throws",
"SQLException",
"{",
"throw",
"SQLError",
".",
"noSupport",
"(",
")",
";",
"}"
] | Updates the designated column with an Object value. | [
"Updates",
"the",
"designated",
"column",
"with",
"an",
"Object",
"value",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L1773-L1777 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.wasNull | @Override
public boolean wasNull() throws SQLException {
checkClosed();
try {
return table.wasNull();
} catch (Exception x) {
throw SQLError.get(x);
}
} | java | @Override
public boolean wasNull() throws SQLException {
checkClosed();
try {
return table.wasNull();
} catch (Exception x) {
throw SQLError.get(x);
}
} | [
"@",
"Override",
"public",
"boolean",
"wasNull",
"(",
")",
"throws",
"SQLException",
"{",
"checkClosed",
"(",
")",
";",
"try",
"{",
"return",
"table",
".",
"wasNull",
"(",
")",
";",
"}",
"catch",
"(",
"Exception",
"x",
")",
"{",
"throw",
"SQLError",
".... | Reports whether the last column read had a value of SQL NULL. | [
"Reports",
"whether",
"the",
"last",
"column",
"read",
"had",
"a",
"value",
"of",
"SQL",
"NULL",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L1875-L1883 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java | JDBC4ResultSet.getRowData | public Object[] getRowData() throws SQLException {
Object[] row = new Object[columnCount];
for (int i = 1; i < columnCount + 1; i++) {
row[i - 1] = getObject(i);
}
return row;
} | java | public Object[] getRowData() throws SQLException {
Object[] row = new Object[columnCount];
for (int i = 1; i < columnCount + 1; i++) {
row[i - 1] = getObject(i);
}
return row;
} | [
"public",
"Object",
"[",
"]",
"getRowData",
"(",
")",
"throws",
"SQLException",
"{",
"Object",
"[",
"]",
"row",
"=",
"new",
"Object",
"[",
"columnCount",
"]",
";",
"for",
"(",
"int",
"i",
"=",
"1",
";",
"i",
"<",
"columnCount",
"+",
"1",
";",
"i",
... | Retrieve the raw row data as an array | [
"Retrieve",
"the",
"raw",
"row",
"data",
"as",
"an",
"array"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSet.java#L1909-L1915 | train |
VoltDB/voltdb | examples/callcenter/client/callcenter/NetworkSadnessTransformer.java | NetworkSadnessTransformer.transformAndQueue | void transformAndQueue(T event, long systemCurrentTimeMillis) {
// if you're super unlucky, this blows up the stack
if (rand.nextDouble() < 0.05) {
// duplicate this message (note recursion means maybe more than duped)
transformAndQueue(event, systemCurrentTimeMillis);
}
long delayms = nextZipfDelay();
delayed.add(systemCurrentTimeMillis + delayms, event);
} | java | void transformAndQueue(T event, long systemCurrentTimeMillis) {
// if you're super unlucky, this blows up the stack
if (rand.nextDouble() < 0.05) {
// duplicate this message (note recursion means maybe more than duped)
transformAndQueue(event, systemCurrentTimeMillis);
}
long delayms = nextZipfDelay();
delayed.add(systemCurrentTimeMillis + delayms, event);
} | [
"void",
"transformAndQueue",
"(",
"T",
"event",
",",
"long",
"systemCurrentTimeMillis",
")",
"{",
"// if you're super unlucky, this blows up the stack",
"if",
"(",
"rand",
".",
"nextDouble",
"(",
")",
"<",
"0.05",
")",
"{",
"// duplicate this message (note recursion means... | Possibly duplicate and delay by some random amount. | [
"Possibly",
"duplicate",
"and",
"delay",
"by",
"some",
"random",
"amount",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/examples/callcenter/client/callcenter/NetworkSadnessTransformer.java#L85-L94 | train |
VoltDB/voltdb | examples/callcenter/client/callcenter/NetworkSadnessTransformer.java | NetworkSadnessTransformer.next | @Override
public T next(long systemCurrentTimeMillis) {
// drain all the waiting messages from the source (up to 10k)
while (delayed.size() < 10000) {
T event = source.next(systemCurrentTimeMillis);
if (event == null) {
break;
}
transformAndQueue(event, systemCurrentTimeMillis);
}
return delayed.nextReady(systemCurrentTimeMillis);
} | java | @Override
public T next(long systemCurrentTimeMillis) {
// drain all the waiting messages from the source (up to 10k)
while (delayed.size() < 10000) {
T event = source.next(systemCurrentTimeMillis);
if (event == null) {
break;
}
transformAndQueue(event, systemCurrentTimeMillis);
}
return delayed.nextReady(systemCurrentTimeMillis);
} | [
"@",
"Override",
"public",
"T",
"next",
"(",
"long",
"systemCurrentTimeMillis",
")",
"{",
"// drain all the waiting messages from the source (up to 10k)",
"while",
"(",
"delayed",
".",
"size",
"(",
")",
"<",
"10000",
")",
"{",
"T",
"event",
"=",
"source",
".",
"... | Return the next event that is safe for delivery or null
if there are no safe objects to deliver.
Null response could mean no events, or could mean all events
are scheduled for the future.
@param systemCurrentTimeMillis The current time. | [
"Return",
"the",
"next",
"event",
"that",
"is",
"safe",
"for",
"delivery",
"or",
"null",
"if",
"there",
"are",
"no",
"safe",
"objects",
"to",
"deliver",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/examples/callcenter/client/callcenter/NetworkSadnessTransformer.java#L105-L117 | train |
VoltDB/voltdb | src/frontend/org/voltdb/plannodes/SchemaColumn.java | SchemaColumn.compareNames | public int compareNames(SchemaColumn that) {
String thatTbl;
String thisTbl;
if (m_tableAlias != null && that.m_tableAlias != null) {
thisTbl = m_tableAlias;
thatTbl = that.m_tableAlias;
}
else {
thisTbl = m_tableName;
thatTbl = that.m_tableName;
}
int tblCmp = nullSafeStringCompareTo(thisTbl, thatTbl);
if (tblCmp != 0) {
return tblCmp;
}
String thisCol;
String thatCol;
if (m_columnName != null && that.m_columnName != null) {
thisCol = m_columnName;
thatCol = that.m_columnName;
}
else {
thisCol = m_columnAlias;
thatCol = that.m_columnAlias;
}
int colCmp = nullSafeStringCompareTo(thisCol, thatCol);
return colCmp;
} | java | public int compareNames(SchemaColumn that) {
String thatTbl;
String thisTbl;
if (m_tableAlias != null && that.m_tableAlias != null) {
thisTbl = m_tableAlias;
thatTbl = that.m_tableAlias;
}
else {
thisTbl = m_tableName;
thatTbl = that.m_tableName;
}
int tblCmp = nullSafeStringCompareTo(thisTbl, thatTbl);
if (tblCmp != 0) {
return tblCmp;
}
String thisCol;
String thatCol;
if (m_columnName != null && that.m_columnName != null) {
thisCol = m_columnName;
thatCol = that.m_columnName;
}
else {
thisCol = m_columnAlias;
thatCol = that.m_columnAlias;
}
int colCmp = nullSafeStringCompareTo(thisCol, thatCol);
return colCmp;
} | [
"public",
"int",
"compareNames",
"(",
"SchemaColumn",
"that",
")",
"{",
"String",
"thatTbl",
";",
"String",
"thisTbl",
";",
"if",
"(",
"m_tableAlias",
"!=",
"null",
"&&",
"that",
".",
"m_tableAlias",
"!=",
"null",
")",
"{",
"thisTbl",
"=",
"m_tableAlias",
... | Compare this schema column to the input.
Two SchemaColumns are compared thus:
- Compare the table aliases or names, preferring to compare aliases if
not null for both sides.
- Compare the column names or aliases, preferring to compare names if
not null for both sides. | [
"Compare",
"this",
"schema",
"column",
"to",
"the",
"input",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/plannodes/SchemaColumn.java#L145-L176 | train |
VoltDB/voltdb | src/frontend/org/voltdb/plannodes/SchemaColumn.java | SchemaColumn.copyAndReplaceWithTVE | public SchemaColumn copyAndReplaceWithTVE(int colIndex) {
TupleValueExpression newTve;
if (m_expression instanceof TupleValueExpression) {
newTve = (TupleValueExpression) m_expression.clone();
newTve.setColumnIndex(colIndex);
}
else {
newTve = new TupleValueExpression(m_tableName, m_tableAlias,
m_columnName, m_columnAlias,
m_expression, colIndex);
}
return new SchemaColumn(m_tableName, m_tableAlias,
m_columnName, m_columnAlias,
newTve, m_differentiator);
} | java | public SchemaColumn copyAndReplaceWithTVE(int colIndex) {
TupleValueExpression newTve;
if (m_expression instanceof TupleValueExpression) {
newTve = (TupleValueExpression) m_expression.clone();
newTve.setColumnIndex(colIndex);
}
else {
newTve = new TupleValueExpression(m_tableName, m_tableAlias,
m_columnName, m_columnAlias,
m_expression, colIndex);
}
return new SchemaColumn(m_tableName, m_tableAlias,
m_columnName, m_columnAlias,
newTve, m_differentiator);
} | [
"public",
"SchemaColumn",
"copyAndReplaceWithTVE",
"(",
"int",
"colIndex",
")",
"{",
"TupleValueExpression",
"newTve",
";",
"if",
"(",
"m_expression",
"instanceof",
"TupleValueExpression",
")",
"{",
"newTve",
"=",
"(",
"TupleValueExpression",
")",
"m_expression",
".",... | Return a copy of this SchemaColumn, but with the input expression
replaced by an appropriate TupleValueExpression.
@param colIndex | [
"Return",
"a",
"copy",
"of",
"this",
"SchemaColumn",
"but",
"with",
"the",
"input",
"expression",
"replaced",
"by",
"an",
"appropriate",
"TupleValueExpression",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/plannodes/SchemaColumn.java#L201-L215 | train |
VoltDB/voltdb | src/frontend/org/voltdb/iv2/MpTransactionTaskQueue.java | MpTransactionTaskQueue.offer | @Override
synchronized void offer(TransactionTask task)
{
Iv2Trace.logTransactionTaskQueueOffer(task);
m_backlog.addLast(task);
taskQueueOffer();
} | java | @Override
synchronized void offer(TransactionTask task)
{
Iv2Trace.logTransactionTaskQueueOffer(task);
m_backlog.addLast(task);
taskQueueOffer();
} | [
"@",
"Override",
"synchronized",
"void",
"offer",
"(",
"TransactionTask",
"task",
")",
"{",
"Iv2Trace",
".",
"logTransactionTaskQueueOffer",
"(",
"task",
")",
";",
"m_backlog",
".",
"addLast",
"(",
"task",
")",
";",
"taskQueueOffer",
"(",
")",
";",
"}"
] | Stick this task in the backlog.
Many network threads may be racing to reach here, synchronize to
serialize queue order.
Always returns true in this case, side effect of extending
TransactionTaskQueue. | [
"Stick",
"this",
"task",
"in",
"the",
"backlog",
".",
"Many",
"network",
"threads",
"may",
"be",
"racing",
"to",
"reach",
"here",
"synchronize",
"to",
"serialize",
"queue",
"order",
".",
"Always",
"returns",
"true",
"in",
"this",
"case",
"side",
"effect",
... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/iv2/MpTransactionTaskQueue.java#L88-L94 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/persist/NIOLockFile.java | NIOLockFile.aquireFileLock | private boolean aquireFileLock() {
// PRE:
//
// raf is never null and is never closed upon entry.
//
// Rhetorical question to self: How does one tell if a RandomAccessFile
// is closed, short of invoking an operation and getting an IOException
// the says its closed (assuming you can control the Locale of the error
// message)?
//
final RandomAccessFile lraf = super.raf;
// In an ideal world, we would use a lock region back off approach,
// starting with region MAX_LOCK_REGION, then MAX_NFS_LOCK_REGION,
// then MIN_LOCK_REGION.
//
// In practice, however, it is just generally unwise to mount network
// file system database instances. Be warned.
//
// In general, it is probably also unwise to mount removable media
// database instances that are not read-only.
boolean success = false;
try {
if (this.fileLock != null) {
// API says never throws exception, but I suspect
// it's quite possible some research / FOSS JVMs might
// still throw unsupported operation exceptions on certain
// NIO classes...better to be safe than sorry.
if (this.fileLock.isValid()) {
return true;
} else {
// It's not valid, so releasing it is a no-op.
//
// However, we should still clean up the referenceand hope
// no previous complications exist (a hung FileLock in a
// flaky JVM) or that gc kicks in and saves the day...
// (unlikely, though).
this.releaseFileLock();
}
}
if (isPosixManditoryFileLock()) {
try {
Runtime.getRuntime().exec(new String[] {
"chmod", "g+s,g-x", file.getPath()
});
} catch (Exception ex) {
//ex.printStackTrace();
}
}
// Note: from FileChannel.tryLock(...) JavaDoc:
//
// @return A lock object representing the newly-acquired lock,
// or <tt>null</tt> if the lock could not be acquired
// because another program holds an overlapping lock
this.fileLock = lraf.getChannel().tryLock(0, MIN_LOCK_REGION,
false);
// According to the API, if it's non-null, it must be valid.
// This may not actually yet be the full truth of the matter under
// all commonly available JVM implementations.
// fileLock.isValid() API says it never throws, though, so
// with fingers crossed...
success = (this.fileLock != null && this.fileLock.isValid());
} catch (Exception e) {}
if (!success) {
this.releaseFileLock();
}
return success;
} | java | private boolean aquireFileLock() {
// PRE:
//
// raf is never null and is never closed upon entry.
//
// Rhetorical question to self: How does one tell if a RandomAccessFile
// is closed, short of invoking an operation and getting an IOException
// the says its closed (assuming you can control the Locale of the error
// message)?
//
final RandomAccessFile lraf = super.raf;
// In an ideal world, we would use a lock region back off approach,
// starting with region MAX_LOCK_REGION, then MAX_NFS_LOCK_REGION,
// then MIN_LOCK_REGION.
//
// In practice, however, it is just generally unwise to mount network
// file system database instances. Be warned.
//
// In general, it is probably also unwise to mount removable media
// database instances that are not read-only.
boolean success = false;
try {
if (this.fileLock != null) {
// API says never throws exception, but I suspect
// it's quite possible some research / FOSS JVMs might
// still throw unsupported operation exceptions on certain
// NIO classes...better to be safe than sorry.
if (this.fileLock.isValid()) {
return true;
} else {
// It's not valid, so releasing it is a no-op.
//
// However, we should still clean up the referenceand hope
// no previous complications exist (a hung FileLock in a
// flaky JVM) or that gc kicks in and saves the day...
// (unlikely, though).
this.releaseFileLock();
}
}
if (isPosixManditoryFileLock()) {
try {
Runtime.getRuntime().exec(new String[] {
"chmod", "g+s,g-x", file.getPath()
});
} catch (Exception ex) {
//ex.printStackTrace();
}
}
// Note: from FileChannel.tryLock(...) JavaDoc:
//
// @return A lock object representing the newly-acquired lock,
// or <tt>null</tt> if the lock could not be acquired
// because another program holds an overlapping lock
this.fileLock = lraf.getChannel().tryLock(0, MIN_LOCK_REGION,
false);
// According to the API, if it's non-null, it must be valid.
// This may not actually yet be the full truth of the matter under
// all commonly available JVM implementations.
// fileLock.isValid() API says it never throws, though, so
// with fingers crossed...
success = (this.fileLock != null && this.fileLock.isValid());
} catch (Exception e) {}
if (!success) {
this.releaseFileLock();
}
return success;
} | [
"private",
"boolean",
"aquireFileLock",
"(",
")",
"{",
"// PRE:",
"//",
"// raf is never null and is never closed upon entry.",
"//",
"// Rhetorical question to self: How does one tell if a RandomAccessFile",
"// is closed, short of invoking an operation and getting an IOException",
"// the ... | does the real work of aquiring the FileLock | [
"does",
"the",
"real",
"work",
"of",
"aquiring",
"the",
"FileLock"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/persist/NIOLockFile.java#L370-L447 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/persist/NIOLockFile.java | NIOLockFile.releaseFileLock | private boolean releaseFileLock() {
// Note: Closing the super class RandomAccessFile has the
// side-effect of closing the file lock's FileChannel,
// so we do not deal with this here.
boolean success = false;
if (this.fileLock == null) {
success = true;
} else {
try {
this.fileLock.release();
success = true;
} catch (Exception e) {}
finally {
this.fileLock = null;
}
}
return success;
} | java | private boolean releaseFileLock() {
// Note: Closing the super class RandomAccessFile has the
// side-effect of closing the file lock's FileChannel,
// so we do not deal with this here.
boolean success = false;
if (this.fileLock == null) {
success = true;
} else {
try {
this.fileLock.release();
success = true;
} catch (Exception e) {}
finally {
this.fileLock = null;
}
}
return success;
} | [
"private",
"boolean",
"releaseFileLock",
"(",
")",
"{",
"// Note: Closing the super class RandomAccessFile has the",
"// side-effect of closing the file lock's FileChannel,",
"// so we do not deal with this here.",
"boolean",
"success",
"=",
"false",
";",
"if",
"(",
"t... | does the real work of releasing the FileLock | [
"does",
"the",
"real",
"work",
"of",
"releasing",
"the",
"FileLock"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/persist/NIOLockFile.java#L450-L471 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/store/BaseHashMap.java | BaseHashMap.addOrRemove | protected Object addOrRemove(int intKey, Object objectValue,
boolean remove) {
int hash = intKey;
int index = hashIndex.getHashIndex(hash);
int lookup = hashIndex.hashTable[index];
int lastLookup = -1;
Object returnValue = null;
for (; lookup >= 0;
lastLookup = lookup,
lookup = hashIndex.getNextLookup(lookup)) {
if (intKey == intKeyTable[lookup]) {
break;
}
}
if (lookup >= 0) {
if (remove) {
if (intKey == 0) {
hasZeroKey = false;
zeroKeyIndex = -1;
}
intKeyTable[lookup] = 0;
returnValue = objectValueTable[lookup];
objectValueTable[lookup] = null;
hashIndex.unlinkNode(index, lastLookup, lookup);
if (accessTable != null) {
accessTable[lookup] = 0;
}
return returnValue;
}
if (isObjectValue) {
returnValue = objectValueTable[lookup];
objectValueTable[lookup] = objectValue;
}
if (accessTable != null) {
accessTable[lookup] = accessCount++;
}
return returnValue;
}
// not found
if (remove) {
return returnValue;
}
if (hashIndex.elementCount >= threshold) {
if (reset()) {
return addOrRemove(intKey, objectValue, remove);
} else {
return null;
}
}
lookup = hashIndex.linkNode(index, lastLookup);
intKeyTable[lookup] = intKey;
if (intKey == 0) {
hasZeroKey = true;
zeroKeyIndex = lookup;
}
objectValueTable[lookup] = objectValue;
if (accessTable != null) {
accessTable[lookup] = accessCount++;
}
return returnValue;
} | java | protected Object addOrRemove(int intKey, Object objectValue,
boolean remove) {
int hash = intKey;
int index = hashIndex.getHashIndex(hash);
int lookup = hashIndex.hashTable[index];
int lastLookup = -1;
Object returnValue = null;
for (; lookup >= 0;
lastLookup = lookup,
lookup = hashIndex.getNextLookup(lookup)) {
if (intKey == intKeyTable[lookup]) {
break;
}
}
if (lookup >= 0) {
if (remove) {
if (intKey == 0) {
hasZeroKey = false;
zeroKeyIndex = -1;
}
intKeyTable[lookup] = 0;
returnValue = objectValueTable[lookup];
objectValueTable[lookup] = null;
hashIndex.unlinkNode(index, lastLookup, lookup);
if (accessTable != null) {
accessTable[lookup] = 0;
}
return returnValue;
}
if (isObjectValue) {
returnValue = objectValueTable[lookup];
objectValueTable[lookup] = objectValue;
}
if (accessTable != null) {
accessTable[lookup] = accessCount++;
}
return returnValue;
}
// not found
if (remove) {
return returnValue;
}
if (hashIndex.elementCount >= threshold) {
if (reset()) {
return addOrRemove(intKey, objectValue, remove);
} else {
return null;
}
}
lookup = hashIndex.linkNode(index, lastLookup);
intKeyTable[lookup] = intKey;
if (intKey == 0) {
hasZeroKey = true;
zeroKeyIndex = lookup;
}
objectValueTable[lookup] = objectValue;
if (accessTable != null) {
accessTable[lookup] = accessCount++;
}
return returnValue;
} | [
"protected",
"Object",
"addOrRemove",
"(",
"int",
"intKey",
",",
"Object",
"objectValue",
",",
"boolean",
"remove",
")",
"{",
"int",
"hash",
"=",
"intKey",
";",
"int",
"index",
"=",
"hashIndex",
".",
"getHashIndex",
"(",
"hash",
")",
";",
"int",
"lookup",
... | type-specific method for adding or removing keys in int->Object maps | [
"type",
"-",
"specific",
"method",
"for",
"adding",
"or",
"removing",
"keys",
"in",
"int",
"-",
">",
"Object",
"maps"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/store/BaseHashMap.java#L603-L680 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/store/BaseHashMap.java | BaseHashMap.removeObject | protected Object removeObject(Object objectKey, boolean removeRow) {
if (objectKey == null) {
return null;
}
int hash = objectKey.hashCode();
int index = hashIndex.getHashIndex(hash);
int lookup = hashIndex.hashTable[index];
int lastLookup = -1;
Object returnValue = null;
for (; lookup >= 0;
lastLookup = lookup,
lookup = hashIndex.getNextLookup(lookup)) {
if (objectKeyTable[lookup].equals(objectKey)) {
objectKeyTable[lookup] = null;
hashIndex.unlinkNode(index, lastLookup, lookup);
if (isObjectValue) {
returnValue = objectValueTable[lookup];
objectValueTable[lookup] = null;
}
if (removeRow) {
removeRow(lookup);
}
return returnValue;
}
}
// not found
return returnValue;
} | java | protected Object removeObject(Object objectKey, boolean removeRow) {
if (objectKey == null) {
return null;
}
int hash = objectKey.hashCode();
int index = hashIndex.getHashIndex(hash);
int lookup = hashIndex.hashTable[index];
int lastLookup = -1;
Object returnValue = null;
for (; lookup >= 0;
lastLookup = lookup,
lookup = hashIndex.getNextLookup(lookup)) {
if (objectKeyTable[lookup].equals(objectKey)) {
objectKeyTable[lookup] = null;
hashIndex.unlinkNode(index, lastLookup, lookup);
if (isObjectValue) {
returnValue = objectValueTable[lookup];
objectValueTable[lookup] = null;
}
if (removeRow) {
removeRow(lookup);
}
return returnValue;
}
}
// not found
return returnValue;
} | [
"protected",
"Object",
"removeObject",
"(",
"Object",
"objectKey",
",",
"boolean",
"removeRow",
")",
"{",
"if",
"(",
"objectKey",
"==",
"null",
")",
"{",
"return",
"null",
";",
"}",
"int",
"hash",
"=",
"objectKey",
".",
"hashCode",
"(",
")",
";",
"int",
... | type specific method for Object sets or Object->Object maps | [
"type",
"specific",
"method",
"for",
"Object",
"sets",
"or",
"Object",
"-",
">",
"Object",
"maps"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/store/BaseHashMap.java#L685-L720 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/store/BaseHashMap.java | BaseHashMap.clear | public void clear() {
if (hashIndex.modified) {
accessCount = 0;
accessMin = accessCount;
hasZeroKey = false;
zeroKeyIndex = -1;
clearElementArrays(0, hashIndex.linkTable.length);
hashIndex.clear();
if (minimizeOnEmpty) {
rehash(initialCapacity);
}
}
} | java | public void clear() {
if (hashIndex.modified) {
accessCount = 0;
accessMin = accessCount;
hasZeroKey = false;
zeroKeyIndex = -1;
clearElementArrays(0, hashIndex.linkTable.length);
hashIndex.clear();
if (minimizeOnEmpty) {
rehash(initialCapacity);
}
}
} | [
"public",
"void",
"clear",
"(",
")",
"{",
"if",
"(",
"hashIndex",
".",
"modified",
")",
"{",
"accessCount",
"=",
"0",
";",
"accessMin",
"=",
"accessCount",
";",
"hasZeroKey",
"=",
"false",
";",
"zeroKeyIndex",
"=",
"-",
"1",
";",
"clearElementArrays",
"(... | Clear the map completely. | [
"Clear",
"the",
"map",
"completely",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/store/BaseHashMap.java#L1077-L1092 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/store/BaseHashMap.java | BaseHashMap.getAccessCountCeiling | public int getAccessCountCeiling(int count, int margin) {
return ArrayCounter.rank(accessTable, hashIndex.newNodePointer, count,
accessMin + 1, accessCount, margin);
} | java | public int getAccessCountCeiling(int count, int margin) {
return ArrayCounter.rank(accessTable, hashIndex.newNodePointer, count,
accessMin + 1, accessCount, margin);
} | [
"public",
"int",
"getAccessCountCeiling",
"(",
"int",
"count",
",",
"int",
"margin",
")",
"{",
"return",
"ArrayCounter",
".",
"rank",
"(",
"accessTable",
",",
"hashIndex",
".",
"newNodePointer",
",",
"count",
",",
"accessMin",
"+",
"1",
",",
"accessCount",
"... | Return the max accessCount value for count elements with the lowest
access count. Always return at least accessMin + 1 | [
"Return",
"the",
"max",
"accessCount",
"value",
"for",
"count",
"elements",
"with",
"the",
"lowest",
"access",
"count",
".",
"Always",
"return",
"at",
"least",
"accessMin",
"+",
"1"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/store/BaseHashMap.java#L1098-L1101 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/store/BaseHashMap.java | BaseHashMap.clear | protected void clear(int count, int margin) {
if (margin < 64) {
margin = 64;
}
int maxlookup = hashIndex.newNodePointer;
int accessBase = getAccessCountCeiling(count, margin);
for (int lookup = 0; lookup < maxlookup; lookup++) {
Object o = objectKeyTable[lookup];
if (o != null && accessTable[lookup] < accessBase) {
removeObject(o, false);
}
}
accessMin = accessBase;
} | java | protected void clear(int count, int margin) {
if (margin < 64) {
margin = 64;
}
int maxlookup = hashIndex.newNodePointer;
int accessBase = getAccessCountCeiling(count, margin);
for (int lookup = 0; lookup < maxlookup; lookup++) {
Object o = objectKeyTable[lookup];
if (o != null && accessTable[lookup] < accessBase) {
removeObject(o, false);
}
}
accessMin = accessBase;
} | [
"protected",
"void",
"clear",
"(",
"int",
"count",
",",
"int",
"margin",
")",
"{",
"if",
"(",
"margin",
"<",
"64",
")",
"{",
"margin",
"=",
"64",
";",
"}",
"int",
"maxlookup",
"=",
"hashIndex",
".",
"newNodePointer",
";",
"int",
"accessBase",
"=",
"g... | Clear approximately count elements from the map, starting with
those with low accessTable ranking.
Only for maps with Object key table | [
"Clear",
"approximately",
"count",
"elements",
"from",
"the",
"map",
"starting",
"with",
"those",
"with",
"low",
"accessTable",
"ranking",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/store/BaseHashMap.java#L1121-L1139 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/SubQuery.java | SubQuery.materialise | public void materialise(Session session) {
PersistentStore store;
// table constructors
if (isDataExpression) {
store = session.sessionData.getSubqueryRowStore(table);
dataExpression.insertValuesIntoSubqueryTable(session, store);
return;
}
Result result = queryExpression.getResult(session,
isExistsPredicate ? 1
: 0);
RowSetNavigatorData navigator =
((RowSetNavigatorData) result.getNavigator());
if (uniqueRows) {
navigator.removeDuplicates();
}
store = session.sessionData.getSubqueryRowStore(table);
table.insertResult(store, result);
result.getNavigator().close();
} | java | public void materialise(Session session) {
PersistentStore store;
// table constructors
if (isDataExpression) {
store = session.sessionData.getSubqueryRowStore(table);
dataExpression.insertValuesIntoSubqueryTable(session, store);
return;
}
Result result = queryExpression.getResult(session,
isExistsPredicate ? 1
: 0);
RowSetNavigatorData navigator =
((RowSetNavigatorData) result.getNavigator());
if (uniqueRows) {
navigator.removeDuplicates();
}
store = session.sessionData.getSubqueryRowStore(table);
table.insertResult(store, result);
result.getNavigator().close();
} | [
"public",
"void",
"materialise",
"(",
"Session",
"session",
")",
"{",
"PersistentStore",
"store",
";",
"// table constructors",
"if",
"(",
"isDataExpression",
")",
"{",
"store",
"=",
"session",
".",
"sessionData",
".",
"getSubqueryRowStore",
"(",
"table",
")",
"... | Fills the table with a result set | [
"Fills",
"the",
"table",
"with",
"a",
"result",
"set"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/SubQuery.java#L176-L203 | train |
VoltDB/voltdb | src/frontend/org/voltdb/exportclient/ExportEncoder.java | ExportEncoder.encodeDecimal | static public void encodeDecimal(final FastSerializer fs, BigDecimal value)
throws IOException {
fs.write((byte)VoltDecimalHelper.kDefaultScale);
fs.write((byte)16);
fs.write(VoltDecimalHelper.serializeBigDecimal(value));
} | java | static public void encodeDecimal(final FastSerializer fs, BigDecimal value)
throws IOException {
fs.write((byte)VoltDecimalHelper.kDefaultScale);
fs.write((byte)16);
fs.write(VoltDecimalHelper.serializeBigDecimal(value));
} | [
"static",
"public",
"void",
"encodeDecimal",
"(",
"final",
"FastSerializer",
"fs",
",",
"BigDecimal",
"value",
")",
"throws",
"IOException",
"{",
"fs",
".",
"write",
"(",
"(",
"byte",
")",
"VoltDecimalHelper",
".",
"kDefaultScale",
")",
";",
"fs",
".",
"writ... | Read a decimal according to the Export encoding specification.
@param fds
Fastdeserializer containing Export stream data
@return decoded BigDecimal value
@throws IOException | [
"Read",
"a",
"decimal",
"according",
"to",
"the",
"Export",
"encoding",
"specification",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/exportclient/ExportEncoder.java#L222-L227 | train |
VoltDB/voltdb | src/frontend/org/voltdb/exportclient/ExportEncoder.java | ExportEncoder.encodeGeographyPoint | static public void encodeGeographyPoint(final FastSerializer fs, GeographyPointValue value)
throws IOException {
final int length = GeographyPointValue.getLengthInBytes();
ByteBuffer bb = ByteBuffer.allocate(length);
bb.order(ByteOrder.nativeOrder());
value.flattenToBuffer(bb);
byte[] array = bb.array();
assert(array.length == length);
fs.write(array);
} | java | static public void encodeGeographyPoint(final FastSerializer fs, GeographyPointValue value)
throws IOException {
final int length = GeographyPointValue.getLengthInBytes();
ByteBuffer bb = ByteBuffer.allocate(length);
bb.order(ByteOrder.nativeOrder());
value.flattenToBuffer(bb);
byte[] array = bb.array();
assert(array.length == length);
fs.write(array);
} | [
"static",
"public",
"void",
"encodeGeographyPoint",
"(",
"final",
"FastSerializer",
"fs",
",",
"GeographyPointValue",
"value",
")",
"throws",
"IOException",
"{",
"final",
"int",
"length",
"=",
"GeographyPointValue",
".",
"getLengthInBytes",
"(",
")",
";",
"ByteBuffe... | Encode a GEOGRAPHY_POINT according to the Export encoding specification.
@param fs The serializer to serialize to
@throws IOException | [
"Encode",
"a",
"GEOGRAPHY_POINT",
"according",
"to",
"the",
"Export",
"encoding",
"specification",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/exportclient/ExportEncoder.java#L315-L326 | train |
VoltDB/voltdb | src/frontend/org/voltdb/exportclient/ExportEncoder.java | ExportEncoder.encodeGeography | static public void encodeGeography(final FastSerializer fs, GeographyValue value)
throws IOException {
ByteBuffer bb = ByteBuffer.allocate(value.getLengthInBytes());
bb.order(ByteOrder.nativeOrder());
value.flattenToBuffer(bb);
byte[] array = bb.array();
fs.writeInt(array.length);
fs.write(array);
} | java | static public void encodeGeography(final FastSerializer fs, GeographyValue value)
throws IOException {
ByteBuffer bb = ByteBuffer.allocate(value.getLengthInBytes());
bb.order(ByteOrder.nativeOrder());
value.flattenToBuffer(bb);
byte[] array = bb.array();
fs.writeInt(array.length);
fs.write(array);
} | [
"static",
"public",
"void",
"encodeGeography",
"(",
"final",
"FastSerializer",
"fs",
",",
"GeographyValue",
"value",
")",
"throws",
"IOException",
"{",
"ByteBuffer",
"bb",
"=",
"ByteBuffer",
".",
"allocate",
"(",
"value",
".",
"getLengthInBytes",
"(",
")",
")",
... | Encode a GEOGRAPHY according to the Export encoding specification.
@param fs The serializer to serialize to
@throws IOException | [
"Encode",
"a",
"GEOGRAPHY",
"according",
"to",
"the",
"Export",
"encoding",
"specification",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/exportclient/ExportEncoder.java#L334-L343 | train |
VoltDB/voltdb | src/frontend/org/voltdb/NTProcedureService.java | NTProcedureService.loadSystemProcedures | @SuppressWarnings("unchecked")
private ImmutableMap<String, ProcedureRunnerNTGenerator> loadSystemProcedures(boolean startup) {
ImmutableMap.Builder<String, ProcedureRunnerNTGenerator> builder =
ImmutableMap.<String, ProcedureRunnerNTGenerator>builder();
Set<Entry<String,Config>> entrySet = SystemProcedureCatalog.listing.entrySet();
for (Entry<String, Config> entry : entrySet) {
String procName = entry.getKey();
Config sysProc = entry.getValue();
// transactional sysprocs handled by LoadedProcedureSet
if (sysProc.transactional) {
continue;
}
final String className = sysProc.getClassname();
Class<? extends VoltNonTransactionalProcedure> procClass = null;
// this check is for sysprocs that don't have a procedure class
if (className != null) {
try {
procClass = (Class<? extends VoltNonTransactionalProcedure>) Class.forName(className);
}
catch (final ClassNotFoundException e) {
if (sysProc.commercial) {
continue;
}
VoltDB.crashLocalVoltDB("Missing Java class for NT System Procedure: " + procName);
}
if (startup) {
// This is a startup-time check to make sure we can instantiate
try {
if ((procClass.newInstance() instanceof VoltNTSystemProcedure) == false) {
VoltDB.crashLocalVoltDB("NT System Procedure is incorrect class type: " + procName);
}
}
catch (InstantiationException | IllegalAccessException e) {
VoltDB.crashLocalVoltDB("Unable to instantiate NT System Procedure: " + procName);
}
}
ProcedureRunnerNTGenerator prntg = new ProcedureRunnerNTGenerator(procClass);
builder.put(procName, prntg);
}
}
return builder.build();
} | java | @SuppressWarnings("unchecked")
private ImmutableMap<String, ProcedureRunnerNTGenerator> loadSystemProcedures(boolean startup) {
ImmutableMap.Builder<String, ProcedureRunnerNTGenerator> builder =
ImmutableMap.<String, ProcedureRunnerNTGenerator>builder();
Set<Entry<String,Config>> entrySet = SystemProcedureCatalog.listing.entrySet();
for (Entry<String, Config> entry : entrySet) {
String procName = entry.getKey();
Config sysProc = entry.getValue();
// transactional sysprocs handled by LoadedProcedureSet
if (sysProc.transactional) {
continue;
}
final String className = sysProc.getClassname();
Class<? extends VoltNonTransactionalProcedure> procClass = null;
// this check is for sysprocs that don't have a procedure class
if (className != null) {
try {
procClass = (Class<? extends VoltNonTransactionalProcedure>) Class.forName(className);
}
catch (final ClassNotFoundException e) {
if (sysProc.commercial) {
continue;
}
VoltDB.crashLocalVoltDB("Missing Java class for NT System Procedure: " + procName);
}
if (startup) {
// This is a startup-time check to make sure we can instantiate
try {
if ((procClass.newInstance() instanceof VoltNTSystemProcedure) == false) {
VoltDB.crashLocalVoltDB("NT System Procedure is incorrect class type: " + procName);
}
}
catch (InstantiationException | IllegalAccessException e) {
VoltDB.crashLocalVoltDB("Unable to instantiate NT System Procedure: " + procName);
}
}
ProcedureRunnerNTGenerator prntg = new ProcedureRunnerNTGenerator(procClass);
builder.put(procName, prntg);
}
}
return builder.build();
} | [
"@",
"SuppressWarnings",
"(",
"\"unchecked\"",
")",
"private",
"ImmutableMap",
"<",
"String",
",",
"ProcedureRunnerNTGenerator",
">",
"loadSystemProcedures",
"(",
"boolean",
"startup",
")",
"{",
"ImmutableMap",
".",
"Builder",
"<",
"String",
",",
"ProcedureRunnerNTGen... | Load the system procedures.
Optionally don't load UAC but use parameter instead. | [
"Load",
"the",
"system",
"procedures",
".",
"Optionally",
"don",
"t",
"load",
"UAC",
"but",
"use",
"parameter",
"instead",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/NTProcedureService.java#L252-L299 | train |
VoltDB/voltdb | src/frontend/org/voltdb/NTProcedureService.java | NTProcedureService.update | @SuppressWarnings("unchecked")
synchronized void update(CatalogContext catalogContext) {
CatalogMap<Procedure> procedures = catalogContext.database.getProcedures();
Map<String, ProcedureRunnerNTGenerator> runnerGeneratorMap = new TreeMap<>();
for (Procedure procedure : procedures) {
if (procedure.getTransactional()) {
continue;
}
// this code is mostly lifted from transactional procedures
String className = procedure.getClassname();
Class<? extends VoltNonTransactionalProcedure> clz = null;
try {
clz = (Class<? extends VoltNonTransactionalProcedure>) catalogContext.classForProcedureOrUDF(className);
} catch (ClassNotFoundException e) {
if (className.startsWith("org.voltdb.")) {
String msg = String.format(LoadedProcedureSet.ORGVOLTDB_PROCNAME_ERROR_FMT, className);
VoltDB.crashLocalVoltDB(msg, false, null);
}
else {
String msg = String.format(LoadedProcedureSet.UNABLETOLOAD_ERROR_FMT, className);
VoltDB.crashLocalVoltDB(msg, false, null);
}
}
// The ProcedureRunnerNTGenerator has all of the dangerous and slow
// stuff in it. Like classfinding, instantiation, and reflection.
ProcedureRunnerNTGenerator prntg = new ProcedureRunnerNTGenerator(clz);
runnerGeneratorMap.put(procedure.getTypeName(), prntg);
}
m_procs = ImmutableMap.<String, ProcedureRunnerNTGenerator>builder().putAll(runnerGeneratorMap).build();
// reload all sysprocs
loadSystemProcedures(false);
// Set the system to start accepting work again now that ebertything is updated.
// We had to stop because stats would be wonky if we called a proc while updating
// this stuff.
m_paused = false;
// release all of the pending invocations into the real queue
m_pendingInvocations
.forEach(pi -> callProcedureNT(pi.ciHandle, pi.user, pi.ccxn, pi.isAdmin,
pi.ntPriority, pi.task));
m_pendingInvocations.clear();
} | java | @SuppressWarnings("unchecked")
synchronized void update(CatalogContext catalogContext) {
CatalogMap<Procedure> procedures = catalogContext.database.getProcedures();
Map<String, ProcedureRunnerNTGenerator> runnerGeneratorMap = new TreeMap<>();
for (Procedure procedure : procedures) {
if (procedure.getTransactional()) {
continue;
}
// this code is mostly lifted from transactional procedures
String className = procedure.getClassname();
Class<? extends VoltNonTransactionalProcedure> clz = null;
try {
clz = (Class<? extends VoltNonTransactionalProcedure>) catalogContext.classForProcedureOrUDF(className);
} catch (ClassNotFoundException e) {
if (className.startsWith("org.voltdb.")) {
String msg = String.format(LoadedProcedureSet.ORGVOLTDB_PROCNAME_ERROR_FMT, className);
VoltDB.crashLocalVoltDB(msg, false, null);
}
else {
String msg = String.format(LoadedProcedureSet.UNABLETOLOAD_ERROR_FMT, className);
VoltDB.crashLocalVoltDB(msg, false, null);
}
}
// The ProcedureRunnerNTGenerator has all of the dangerous and slow
// stuff in it. Like classfinding, instantiation, and reflection.
ProcedureRunnerNTGenerator prntg = new ProcedureRunnerNTGenerator(clz);
runnerGeneratorMap.put(procedure.getTypeName(), prntg);
}
m_procs = ImmutableMap.<String, ProcedureRunnerNTGenerator>builder().putAll(runnerGeneratorMap).build();
// reload all sysprocs
loadSystemProcedures(false);
// Set the system to start accepting work again now that ebertything is updated.
// We had to stop because stats would be wonky if we called a proc while updating
// this stuff.
m_paused = false;
// release all of the pending invocations into the real queue
m_pendingInvocations
.forEach(pi -> callProcedureNT(pi.ciHandle, pi.user, pi.ccxn, pi.isAdmin,
pi.ntPriority, pi.task));
m_pendingInvocations.clear();
} | [
"@",
"SuppressWarnings",
"(",
"\"unchecked\"",
")",
"synchronized",
"void",
"update",
"(",
"CatalogContext",
"catalogContext",
")",
"{",
"CatalogMap",
"<",
"Procedure",
">",
"procedures",
"=",
"catalogContext",
".",
"database",
".",
"getProcedures",
"(",
")",
";",... | Refresh the NT procedures when the catalog changes. | [
"Refresh",
"the",
"NT",
"procedures",
"when",
"the",
"catalog",
"changes",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/NTProcedureService.java#L312-L360 | train |
VoltDB/voltdb | src/frontend/org/voltdb/NTProcedureService.java | NTProcedureService.callProcedureNT | synchronized void callProcedureNT(final long ciHandle,
final AuthUser user,
final Connection ccxn,
final boolean isAdmin,
final boolean ntPriority,
final StoredProcedureInvocation task)
{
// If paused, stuff a record of the invocation into a queue that gets
// drained when un-paused. We're counting on regular upstream backpressure
// to prevent this from getting too out of hand.
if (m_paused) {
PendingInvocation pi = new PendingInvocation(ciHandle, user, ccxn, isAdmin, ntPriority, task);
m_pendingInvocations.add(pi);
return;
}
String procName = task.getProcName();
final ProcedureRunnerNTGenerator prntg;
if (procName.startsWith("@")) {
prntg = m_sysProcs.get(procName);
}
else {
prntg = m_procs.get(procName);
}
final ProcedureRunnerNT runner;
try {
runner = prntg.generateProcedureRunnerNT(user, ccxn, isAdmin, ciHandle, task.getClientHandle(), task.getBatchTimeout());
} catch (InstantiationException | IllegalAccessException e1) {
// I don't expect to hit this, but it's here...
// must be done as IRM to CI mailbox for backpressure accounting
ClientResponseImpl response = new ClientResponseImpl(ClientResponseImpl.UNEXPECTED_FAILURE,
new VoltTable[0],
"Could not create running context for " + procName + ".",
task.getClientHandle());
InitiateResponseMessage irm = InitiateResponseMessage.messageForNTProcResponse(ciHandle,
ccxn.connectionId(),
response);
m_mailbox.deliver(irm);
return;
}
m_outstanding.put(runner.m_id, runner);
Runnable invocationRunnable = new Runnable() {
@Override
public void run() {
try {
runner.call(task.getParams().toArray());
}
catch (Throwable ex) {
ex.printStackTrace();
throw ex;
}
}
};
try {
// pick the executor service based on priority
// - new (from user) txns get regular one
// - sub tasks and sub procs generated by nt procs get
// immediate exec service (priority)
if (ntPriority) {
m_priorityExecutorService.submit(invocationRunnable);
}
else {
m_primaryExecutorService.submit(invocationRunnable);
}
}
catch (RejectedExecutionException e) {
handleNTProcEnd(runner);
// I really don't expect this to happen... but it's here.
// must be done as IRM to CI mailbox for backpressure accounting
ClientResponseImpl response = new ClientResponseImpl(ClientResponseImpl.UNEXPECTED_FAILURE,
new VoltTable[0],
"Could not submit NT procedure " + procName + " to exec service for .",
task.getClientHandle());
InitiateResponseMessage irm = InitiateResponseMessage.messageForNTProcResponse(ciHandle,
ccxn.connectionId(),
response);
m_mailbox.deliver(irm);
return;
}
} | java | synchronized void callProcedureNT(final long ciHandle,
final AuthUser user,
final Connection ccxn,
final boolean isAdmin,
final boolean ntPriority,
final StoredProcedureInvocation task)
{
// If paused, stuff a record of the invocation into a queue that gets
// drained when un-paused. We're counting on regular upstream backpressure
// to prevent this from getting too out of hand.
if (m_paused) {
PendingInvocation pi = new PendingInvocation(ciHandle, user, ccxn, isAdmin, ntPriority, task);
m_pendingInvocations.add(pi);
return;
}
String procName = task.getProcName();
final ProcedureRunnerNTGenerator prntg;
if (procName.startsWith("@")) {
prntg = m_sysProcs.get(procName);
}
else {
prntg = m_procs.get(procName);
}
final ProcedureRunnerNT runner;
try {
runner = prntg.generateProcedureRunnerNT(user, ccxn, isAdmin, ciHandle, task.getClientHandle(), task.getBatchTimeout());
} catch (InstantiationException | IllegalAccessException e1) {
// I don't expect to hit this, but it's here...
// must be done as IRM to CI mailbox for backpressure accounting
ClientResponseImpl response = new ClientResponseImpl(ClientResponseImpl.UNEXPECTED_FAILURE,
new VoltTable[0],
"Could not create running context for " + procName + ".",
task.getClientHandle());
InitiateResponseMessage irm = InitiateResponseMessage.messageForNTProcResponse(ciHandle,
ccxn.connectionId(),
response);
m_mailbox.deliver(irm);
return;
}
m_outstanding.put(runner.m_id, runner);
Runnable invocationRunnable = new Runnable() {
@Override
public void run() {
try {
runner.call(task.getParams().toArray());
}
catch (Throwable ex) {
ex.printStackTrace();
throw ex;
}
}
};
try {
// pick the executor service based on priority
// - new (from user) txns get regular one
// - sub tasks and sub procs generated by nt procs get
// immediate exec service (priority)
if (ntPriority) {
m_priorityExecutorService.submit(invocationRunnable);
}
else {
m_primaryExecutorService.submit(invocationRunnable);
}
}
catch (RejectedExecutionException e) {
handleNTProcEnd(runner);
// I really don't expect this to happen... but it's here.
// must be done as IRM to CI mailbox for backpressure accounting
ClientResponseImpl response = new ClientResponseImpl(ClientResponseImpl.UNEXPECTED_FAILURE,
new VoltTable[0],
"Could not submit NT procedure " + procName + " to exec service for .",
task.getClientHandle());
InitiateResponseMessage irm = InitiateResponseMessage.messageForNTProcResponse(ciHandle,
ccxn.connectionId(),
response);
m_mailbox.deliver(irm);
return;
}
} | [
"synchronized",
"void",
"callProcedureNT",
"(",
"final",
"long",
"ciHandle",
",",
"final",
"AuthUser",
"user",
",",
"final",
"Connection",
"ccxn",
",",
"final",
"boolean",
"isAdmin",
",",
"final",
"boolean",
"ntPriority",
",",
"final",
"StoredProcedureInvocation",
... | Invoke an NT procedure asynchronously on one of the exec services.
@returns ClientResponseImpl if something goes wrong. | [
"Invoke",
"an",
"NT",
"procedure",
"asynchronously",
"on",
"one",
"of",
"the",
"exec",
"services",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/NTProcedureService.java#L366-L450 | train |
VoltDB/voltdb | src/frontend/org/voltdb/NTProcedureService.java | NTProcedureService.handleCallbacksForFailedHosts | void handleCallbacksForFailedHosts(final Set<Integer> failedHosts) {
for (ProcedureRunnerNT runner : m_outstanding.values()) {
runner.processAnyCallbacksFromFailedHosts(failedHosts);
}
} | java | void handleCallbacksForFailedHosts(final Set<Integer> failedHosts) {
for (ProcedureRunnerNT runner : m_outstanding.values()) {
runner.processAnyCallbacksFromFailedHosts(failedHosts);
}
} | [
"void",
"handleCallbacksForFailedHosts",
"(",
"final",
"Set",
"<",
"Integer",
">",
"failedHosts",
")",
"{",
"for",
"(",
"ProcedureRunnerNT",
"runner",
":",
"m_outstanding",
".",
"values",
"(",
")",
")",
"{",
"runner",
".",
"processAnyCallbacksFromFailedHosts",
"("... | For all-host NT procs, use site failures to call callbacks for hosts
that will obviously never respond.
ICH and the other plumbing should handle regular, txn procs. | [
"For",
"all",
"-",
"host",
"NT",
"procs",
"use",
"site",
"failures",
"to",
"call",
"callbacks",
"for",
"hosts",
"that",
"will",
"obviously",
"never",
"respond",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/NTProcedureService.java#L466-L470 | train |
VoltDB/voltdb | src/frontend/org/voltdb/compiler/statements/CreateFunctionFromMethod.java | CreateFunctionFromMethod.isDefinedFunctionName | private boolean isDefinedFunctionName(String functionName) {
return FunctionForVoltDB.isFunctionNameDefined(functionName)
|| FunctionSQL.isFunction(functionName)
|| FunctionCustom.getFunctionId(functionName) != ID_NOT_DEFINED
|| (null != m_schema.findChild("ud_function", functionName));
} | java | private boolean isDefinedFunctionName(String functionName) {
return FunctionForVoltDB.isFunctionNameDefined(functionName)
|| FunctionSQL.isFunction(functionName)
|| FunctionCustom.getFunctionId(functionName) != ID_NOT_DEFINED
|| (null != m_schema.findChild("ud_function", functionName));
} | [
"private",
"boolean",
"isDefinedFunctionName",
"(",
"String",
"functionName",
")",
"{",
"return",
"FunctionForVoltDB",
".",
"isFunctionNameDefined",
"(",
"functionName",
")",
"||",
"FunctionSQL",
".",
"isFunction",
"(",
"functionName",
")",
"||",
"FunctionCustom",
"."... | Find out if the function is defined. It might be defined in the
FunctionForVoltDB table. It also might be in the VoltXML.
@param functionName
@return | [
"Find",
"out",
"if",
"the",
"function",
"is",
"defined",
".",
"It",
"might",
"be",
"defined",
"in",
"the",
"FunctionForVoltDB",
"table",
".",
"It",
"also",
"might",
"be",
"in",
"the",
"VoltXML",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/compiler/statements/CreateFunctionFromMethod.java#L86-L91 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/types/CharacterType.java | CharacterType.upper | public Object upper(Session session, Object data) {
if (data == null) {
return null;
}
if (typeCode == Types.SQL_CLOB) {
String result = ((ClobData) data).getSubString(session, 0,
(int) ((ClobData) data).length(session));
result = collation.toUpperCase(result);
ClobData clob = session.createClob(result.length());
clob.setString(session, 0, result);
return clob;
}
return collation.toUpperCase((String) data);
} | java | public Object upper(Session session, Object data) {
if (data == null) {
return null;
}
if (typeCode == Types.SQL_CLOB) {
String result = ((ClobData) data).getSubString(session, 0,
(int) ((ClobData) data).length(session));
result = collation.toUpperCase(result);
ClobData clob = session.createClob(result.length());
clob.setString(session, 0, result);
return clob;
}
return collation.toUpperCase((String) data);
} | [
"public",
"Object",
"upper",
"(",
"Session",
"session",
",",
"Object",
"data",
")",
"{",
"if",
"(",
"data",
"==",
"null",
")",
"{",
"return",
"null",
";",
"}",
"if",
"(",
"typeCode",
"==",
"Types",
".",
"SQL_CLOB",
")",
"{",
"String",
"result",
"=",
... | Memory limits apply to Upper and Lower implementations with Clob data | [
"Memory",
"limits",
"apply",
"to",
"Upper",
"and",
"Lower",
"implementations",
"with",
"Clob",
"data"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/types/CharacterType.java#L794-L814 | train |
VoltDB/voltdb | third_party/java/src/org/HdrHistogram_voltpatches/HistogramLogWriter.java | HistogramLogWriter.outputStartTime | public void outputStartTime(final long startTimeMsec) {
log.format(Locale.US, "#[StartTime: %.3f (seconds since epoch), %s]\n",
startTimeMsec / 1000.0,
(new Date(startTimeMsec)).toString());
} | java | public void outputStartTime(final long startTimeMsec) {
log.format(Locale.US, "#[StartTime: %.3f (seconds since epoch), %s]\n",
startTimeMsec / 1000.0,
(new Date(startTimeMsec)).toString());
} | [
"public",
"void",
"outputStartTime",
"(",
"final",
"long",
"startTimeMsec",
")",
"{",
"log",
".",
"format",
"(",
"Locale",
".",
"US",
",",
"\"#[StartTime: %.3f (seconds since epoch), %s]\\n\"",
",",
"startTimeMsec",
"/",
"1000.0",
",",
"(",
"new",
"Date",
"(",
"... | Log a start time in the log.
@param startTimeMsec time (in milliseconds) since the absolute start time (the epoch) | [
"Log",
"a",
"start",
"time",
"in",
"the",
"log",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/HdrHistogram_voltpatches/HistogramLogWriter.java#L159-L163 | train |
VoltDB/voltdb | src/frontend/org/voltdb/client/ClientStats.java | ClientStats.latencyHistoReport | public String latencyHistoReport() {
ByteArrayOutputStream baos= new ByteArrayOutputStream();
PrintStream pw = null;
try {
pw = new PrintStream(baos, false, Charsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
Throwables.propagate(e);
}
//Get a latency report in milliseconds
m_latencyHistogram.outputPercentileDistributionVolt(pw, 1, 1000.0);
return new String(baos.toByteArray(), Charsets.UTF_8);
} | java | public String latencyHistoReport() {
ByteArrayOutputStream baos= new ByteArrayOutputStream();
PrintStream pw = null;
try {
pw = new PrintStream(baos, false, Charsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
Throwables.propagate(e);
}
//Get a latency report in milliseconds
m_latencyHistogram.outputPercentileDistributionVolt(pw, 1, 1000.0);
return new String(baos.toByteArray(), Charsets.UTF_8);
} | [
"public",
"String",
"latencyHistoReport",
"(",
")",
"{",
"ByteArrayOutputStream",
"baos",
"=",
"new",
"ByteArrayOutputStream",
"(",
")",
";",
"PrintStream",
"pw",
"=",
"null",
";",
"try",
"{",
"pw",
"=",
"new",
"PrintStream",
"(",
"baos",
",",
"false",
",",
... | Generate a human-readable report of latencies in the form of a histogram. Latency is
in milliseconds
@return String containing human-readable report. | [
"Generate",
"a",
"human",
"-",
"readable",
"report",
"of",
"latencies",
"in",
"the",
"form",
"of",
"a",
"histogram",
".",
"Latency",
"is",
"in",
"milliseconds"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/client/ClientStats.java#L499-L512 | train |
VoltDB/voltdb | src/frontend/org/voltdb/sysprocs/saverestore/TableSaveFile.java | TableSaveFile.getNextChunk | public synchronized BBContainer getNextChunk() throws IOException
{
if (m_chunkReaderException != null) {
throw m_chunkReaderException;
}
if (!m_hasMoreChunks.get()) {
final Container c = m_availableChunks.poll();
return c;
}
if (m_chunkReader == null) {
m_chunkReader = new ChunkReader();
m_chunkReaderThread = new Thread(m_chunkReader, "ChunkReader");
m_chunkReaderThread.start();
}
Container c = null;
while (c == null && (m_hasMoreChunks.get() || !m_availableChunks.isEmpty())) {
c = m_availableChunks.poll();
if (c == null) {
try {
wait();
} catch (InterruptedException e) {
throw new IOException(e);
}
}
}
if (c != null) {
m_chunkReads.release();
} else {
if (m_chunkReaderException != null) {
throw m_chunkReaderException;
}
}
return c;
} | java | public synchronized BBContainer getNextChunk() throws IOException
{
if (m_chunkReaderException != null) {
throw m_chunkReaderException;
}
if (!m_hasMoreChunks.get()) {
final Container c = m_availableChunks.poll();
return c;
}
if (m_chunkReader == null) {
m_chunkReader = new ChunkReader();
m_chunkReaderThread = new Thread(m_chunkReader, "ChunkReader");
m_chunkReaderThread.start();
}
Container c = null;
while (c == null && (m_hasMoreChunks.get() || !m_availableChunks.isEmpty())) {
c = m_availableChunks.poll();
if (c == null) {
try {
wait();
} catch (InterruptedException e) {
throw new IOException(e);
}
}
}
if (c != null) {
m_chunkReads.release();
} else {
if (m_chunkReaderException != null) {
throw m_chunkReaderException;
}
}
return c;
} | [
"public",
"synchronized",
"BBContainer",
"getNextChunk",
"(",
")",
"throws",
"IOException",
"{",
"if",
"(",
"m_chunkReaderException",
"!=",
"null",
")",
"{",
"throw",
"m_chunkReaderException",
";",
"}",
"if",
"(",
"!",
"m_hasMoreChunks",
".",
"get",
"(",
")",
... | Will get the next chunk of the table that is just over the chunk size | [
"Will",
"get",
"the",
"next",
"chunk",
"of",
"the",
"table",
"that",
"is",
"just",
"over",
"the",
"chunk",
"size"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/sysprocs/saverestore/TableSaveFile.java#L430-L465 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/jdbc/pool/JDBCConnectionPoolDataSource.java | JDBCConnectionPoolDataSource.validateSpecifiedUserAndPassword | protected void validateSpecifiedUserAndPassword(String user,
String password) throws SQLException {
String configuredUser = connProperties.getProperty("user");
String configuredPassword = connProperties.getProperty("password");
if (((user == null && configuredUser != null) || (user != null && configuredUser == null))
|| (user != null && !user.equals(configuredUser))
|| ((password == null && configuredPassword != null) || (password != null && configuredPassword == null))
|| (password != null
&& !password.equals(configuredPassword))) {
throw new SQLException("Given user name or password does not "
+ "match those configured for this object");
}
} | java | protected void validateSpecifiedUserAndPassword(String user,
String password) throws SQLException {
String configuredUser = connProperties.getProperty("user");
String configuredPassword = connProperties.getProperty("password");
if (((user == null && configuredUser != null) || (user != null && configuredUser == null))
|| (user != null && !user.equals(configuredUser))
|| ((password == null && configuredPassword != null) || (password != null && configuredPassword == null))
|| (password != null
&& !password.equals(configuredPassword))) {
throw new SQLException("Given user name or password does not "
+ "match those configured for this object");
}
} | [
"protected",
"void",
"validateSpecifiedUserAndPassword",
"(",
"String",
"user",
",",
"String",
"password",
")",
"throws",
"SQLException",
"{",
"String",
"configuredUser",
"=",
"connProperties",
".",
"getProperty",
"(",
"\"user\"",
")",
";",
"String",
"configuredPasswo... | Throws a SQLException if given user name or password are not same
as those configured for this object.
@throws SQLException if given user name or password is wrong. | [
"Throws",
"a",
"SQLException",
"if",
"given",
"user",
"name",
"or",
"password",
"are",
"not",
"same",
"as",
"those",
"configured",
"for",
"this",
"object",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/jdbc/pool/JDBCConnectionPoolDataSource.java#L164-L178 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/jdbc/pool/JDBCConnectionPoolDataSource.java | JDBCConnectionPoolDataSource.setConnectionProperty | public Object setConnectionProperty(String name, String value) {
return connProperties.setProperty(name, value);
} | java | public Object setConnectionProperty(String name, String value) {
return connProperties.setProperty(name, value);
} | [
"public",
"Object",
"setConnectionProperty",
"(",
"String",
"name",
",",
"String",
"value",
")",
"{",
"return",
"connProperties",
".",
"setProperty",
"(",
"name",
",",
"value",
")",
";",
"}"
] | Sets JDBC Connection Properties to be used when physical
connections are obtained for the pool. | [
"Sets",
"JDBC",
"Connection",
"Properties",
"to",
"be",
"used",
"when",
"physical",
"connections",
"are",
"obtained",
"for",
"the",
"pool",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/jdbc/pool/JDBCConnectionPoolDataSource.java#L230-L232 | train |
VoltDB/voltdb | src/frontend/org/voltcore/zk/MapCache.java | MapCache.start | @Override
public void start(boolean block) throws InterruptedException, ExecutionException {
Future<?> task = m_es.submit(new ParentEvent(null));
if (block) {
task.get();
}
} | java | @Override
public void start(boolean block) throws InterruptedException, ExecutionException {
Future<?> task = m_es.submit(new ParentEvent(null));
if (block) {
task.get();
}
} | [
"@",
"Override",
"public",
"void",
"start",
"(",
"boolean",
"block",
")",
"throws",
"InterruptedException",
",",
"ExecutionException",
"{",
"Future",
"<",
"?",
">",
"task",
"=",
"m_es",
".",
"submit",
"(",
"new",
"ParentEvent",
"(",
"null",
")",
")",
";",
... | Initialize and start watching the cache. | [
"Initialize",
"and",
"start",
"watching",
"the",
"cache",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltcore/zk/MapCache.java#L80-L86 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/ExpressionAggregate.java | ExpressionAggregate.getAggregatedValue | public Object getAggregatedValue(Session session, Object currValue) {
if (currValue == null) {
// A VoltDB extension APPROX_COUNT_DISTINCT
return opType == OpTypes.COUNT || opType == OpTypes.APPROX_COUNT_DISTINCT ?
ValuePool.INTEGER_0: null;
/* disable 2 lines...
return opType == OpTypes.COUNT ? ValuePool.INTEGER_0
: null;
...disabled 2 lines */
// End of VoltDB extension
}
return ((SetFunction) currValue).getValue();
} | java | public Object getAggregatedValue(Session session, Object currValue) {
if (currValue == null) {
// A VoltDB extension APPROX_COUNT_DISTINCT
return opType == OpTypes.COUNT || opType == OpTypes.APPROX_COUNT_DISTINCT ?
ValuePool.INTEGER_0: null;
/* disable 2 lines...
return opType == OpTypes.COUNT ? ValuePool.INTEGER_0
: null;
...disabled 2 lines */
// End of VoltDB extension
}
return ((SetFunction) currValue).getValue();
} | [
"public",
"Object",
"getAggregatedValue",
"(",
"Session",
"session",
",",
"Object",
"currValue",
")",
"{",
"if",
"(",
"currValue",
"==",
"null",
")",
"{",
"// A VoltDB extension APPROX_COUNT_DISTINCT",
"return",
"opType",
"==",
"OpTypes",
".",
"COUNT",
"||",
"opTy... | Get the result of a SetFunction or an ordinary value
@param currValue instance of set function or value
@param session context
@return object | [
"Get",
"the",
"result",
"of",
"a",
"SetFunction",
"or",
"an",
"ordinary",
"value"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/ExpressionAggregate.java#L293-L307 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.tableListIncludesReadOnlyView | private boolean tableListIncludesReadOnlyView(List<Table> tableList) {
for (Table table : tableList) {
if (table.getMaterializer() != null && !TableType.isStream(table.getMaterializer().getTabletype())) {
return true;
}
}
return false;
} | java | private boolean tableListIncludesReadOnlyView(List<Table> tableList) {
for (Table table : tableList) {
if (table.getMaterializer() != null && !TableType.isStream(table.getMaterializer().getTabletype())) {
return true;
}
}
return false;
} | [
"private",
"boolean",
"tableListIncludesReadOnlyView",
"(",
"List",
"<",
"Table",
">",
"tableList",
")",
"{",
"for",
"(",
"Table",
"table",
":",
"tableList",
")",
"{",
"if",
"(",
"table",
".",
"getMaterializer",
"(",
")",
"!=",
"null",
"&&",
"!",
"TableTyp... | Return true if tableList includes at least one matview. | [
"Return",
"true",
"if",
"tableList",
"includes",
"at",
"least",
"one",
"matview",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L165-L172 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.tableListIncludesExportOnly | private boolean tableListIncludesExportOnly(List<Table> tableList) {
// list of all export tables (assume uppercase)
NavigableSet<String> exportTables = CatalogUtil.getExportTableNames(m_catalogDb);
// this loop is O(number-of-joins * number-of-export-tables)
// which seems acceptable if not great. Probably faster than
// re-hashing the export only tables for faster lookup.
for (Table table : tableList) {
if (exportTables.contains(table.getTypeName()) && TableType.isStream(table.getTabletype())) {
return true;
}
}
return false;
} | java | private boolean tableListIncludesExportOnly(List<Table> tableList) {
// list of all export tables (assume uppercase)
NavigableSet<String> exportTables = CatalogUtil.getExportTableNames(m_catalogDb);
// this loop is O(number-of-joins * number-of-export-tables)
// which seems acceptable if not great. Probably faster than
// re-hashing the export only tables for faster lookup.
for (Table table : tableList) {
if (exportTables.contains(table.getTypeName()) && TableType.isStream(table.getTabletype())) {
return true;
}
}
return false;
} | [
"private",
"boolean",
"tableListIncludesExportOnly",
"(",
"List",
"<",
"Table",
">",
"tableList",
")",
"{",
"// list of all export tables (assume uppercase)",
"NavigableSet",
"<",
"String",
">",
"exportTables",
"=",
"CatalogUtil",
".",
"getExportTableNames",
"(",
"m_catal... | Return true if tableList includes at least one export table. | [
"Return",
"true",
"if",
"tableList",
"includes",
"at",
"least",
"one",
"export",
"table",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L177-L191 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.getBestCostPlanForEphemeralScans | private ParsedResultAccumulator getBestCostPlanForEphemeralScans(List<StmtEphemeralTableScan> scans) {
int nextPlanId = m_planSelector.m_planId;
boolean orderIsDeterministic = true;
boolean hasSignificantOffsetOrLimit = false;
String contentNonDeterminismMessage = null;
for (StmtEphemeralTableScan scan : scans) {
if (scan instanceof StmtSubqueryScan) {
nextPlanId = planForParsedSubquery((StmtSubqueryScan)scan, nextPlanId);
// If we can't plan this, then give up.
if (((StmtSubqueryScan) scan).getBestCostPlan() == null) {
return null;
}
}
else if (scan instanceof StmtCommonTableScan) {
nextPlanId = planForCommonTableQuery((StmtCommonTableScan)scan, nextPlanId);
if (((StmtCommonTableScan) scan).getBestCostBasePlan() == null) {
return null;
}
}
else {
throw new PlanningErrorException("Unknown scan plan type.");
}
orderIsDeterministic = scan.isOrderDeterministic(orderIsDeterministic);
contentNonDeterminismMessage = scan.contentNonDeterminismMessage(contentNonDeterminismMessage);
hasSignificantOffsetOrLimit = scan.hasSignificantOffsetOrLimit(hasSignificantOffsetOrLimit);
}
// need to reset plan id for the entire SQL
m_planSelector.m_planId = nextPlanId;
return new ParsedResultAccumulator(orderIsDeterministic,
hasSignificantOffsetOrLimit,
contentNonDeterminismMessage);
} | java | private ParsedResultAccumulator getBestCostPlanForEphemeralScans(List<StmtEphemeralTableScan> scans) {
int nextPlanId = m_planSelector.m_planId;
boolean orderIsDeterministic = true;
boolean hasSignificantOffsetOrLimit = false;
String contentNonDeterminismMessage = null;
for (StmtEphemeralTableScan scan : scans) {
if (scan instanceof StmtSubqueryScan) {
nextPlanId = planForParsedSubquery((StmtSubqueryScan)scan, nextPlanId);
// If we can't plan this, then give up.
if (((StmtSubqueryScan) scan).getBestCostPlan() == null) {
return null;
}
}
else if (scan instanceof StmtCommonTableScan) {
nextPlanId = planForCommonTableQuery((StmtCommonTableScan)scan, nextPlanId);
if (((StmtCommonTableScan) scan).getBestCostBasePlan() == null) {
return null;
}
}
else {
throw new PlanningErrorException("Unknown scan plan type.");
}
orderIsDeterministic = scan.isOrderDeterministic(orderIsDeterministic);
contentNonDeterminismMessage = scan.contentNonDeterminismMessage(contentNonDeterminismMessage);
hasSignificantOffsetOrLimit = scan.hasSignificantOffsetOrLimit(hasSignificantOffsetOrLimit);
}
// need to reset plan id for the entire SQL
m_planSelector.m_planId = nextPlanId;
return new ParsedResultAccumulator(orderIsDeterministic,
hasSignificantOffsetOrLimit,
contentNonDeterminismMessage);
} | [
"private",
"ParsedResultAccumulator",
"getBestCostPlanForEphemeralScans",
"(",
"List",
"<",
"StmtEphemeralTableScan",
">",
"scans",
")",
"{",
"int",
"nextPlanId",
"=",
"m_planSelector",
".",
"m_planId",
";",
"boolean",
"orderIsDeterministic",
"=",
"true",
";",
"boolean"... | Generate best cost plans for a list of derived tables, which
we call FROM sub-queries and common table queries.
@param subqueryNodes - list of FROM sub-queries.
@return ParsedResultAccumulator | [
"Generate",
"best",
"cost",
"plans",
"for",
"a",
"list",
"of",
"derived",
"tables",
"which",
"we",
"call",
"FROM",
"sub",
"-",
"queries",
"and",
"common",
"table",
"queries",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L602-L634 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.getBestCostPlanForExpressionSubQueries | private boolean getBestCostPlanForExpressionSubQueries(Set<AbstractExpression> subqueryExprs) {
int nextPlanId = m_planSelector.m_planId;
for (AbstractExpression expr : subqueryExprs) {
assert(expr instanceof SelectSubqueryExpression);
if (!(expr instanceof SelectSubqueryExpression)) {
continue; // DEAD CODE?
}
SelectSubqueryExpression subqueryExpr = (SelectSubqueryExpression) expr;
StmtSubqueryScan subqueryScan = subqueryExpr.getSubqueryScan();
nextPlanId = planForParsedSubquery(subqueryScan, nextPlanId);
CompiledPlan bestPlan = subqueryScan.getBestCostPlan();
if (bestPlan == null) {
return false;
}
subqueryExpr.setSubqueryNode(bestPlan.rootPlanGraph);
// The subquery plan must not contain Receive/Send nodes because it will be executed
// multiple times during the parent statement execution.
if (bestPlan.rootPlanGraph.hasAnyNodeOfType(PlanNodeType.SEND)) {
// fail the whole plan
m_recentErrorMsg = IN_EXISTS_SCALAR_ERROR_MESSAGE;
return false;
}
}
// need to reset plan id for the entire SQL
m_planSelector.m_planId = nextPlanId;
return true;
} | java | private boolean getBestCostPlanForExpressionSubQueries(Set<AbstractExpression> subqueryExprs) {
int nextPlanId = m_planSelector.m_planId;
for (AbstractExpression expr : subqueryExprs) {
assert(expr instanceof SelectSubqueryExpression);
if (!(expr instanceof SelectSubqueryExpression)) {
continue; // DEAD CODE?
}
SelectSubqueryExpression subqueryExpr = (SelectSubqueryExpression) expr;
StmtSubqueryScan subqueryScan = subqueryExpr.getSubqueryScan();
nextPlanId = planForParsedSubquery(subqueryScan, nextPlanId);
CompiledPlan bestPlan = subqueryScan.getBestCostPlan();
if (bestPlan == null) {
return false;
}
subqueryExpr.setSubqueryNode(bestPlan.rootPlanGraph);
// The subquery plan must not contain Receive/Send nodes because it will be executed
// multiple times during the parent statement execution.
if (bestPlan.rootPlanGraph.hasAnyNodeOfType(PlanNodeType.SEND)) {
// fail the whole plan
m_recentErrorMsg = IN_EXISTS_SCALAR_ERROR_MESSAGE;
return false;
}
}
// need to reset plan id for the entire SQL
m_planSelector.m_planId = nextPlanId;
return true;
} | [
"private",
"boolean",
"getBestCostPlanForExpressionSubQueries",
"(",
"Set",
"<",
"AbstractExpression",
">",
"subqueryExprs",
")",
"{",
"int",
"nextPlanId",
"=",
"m_planSelector",
".",
"m_planId",
";",
"for",
"(",
"AbstractExpression",
"expr",
":",
"subqueryExprs",
")"... | Generate best cost plans for each Subquery expression from the list
@param subqueryExprs - list of subquery expressions
@return true if a best plan was generated for each subquery, false otherwise | [
"Generate",
"best",
"cost",
"plans",
"for",
"each",
"Subquery",
"expression",
"from",
"the",
"list"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L642-L672 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.getNextPlan | private CompiledPlan getNextPlan() {
CompiledPlan retval;
AbstractParsedStmt nextStmt = null;
if (m_parsedSelect != null) {
nextStmt = m_parsedSelect;
retval = getNextSelectPlan();
} else if (m_parsedInsert != null) {
nextStmt = m_parsedInsert;
retval = getNextInsertPlan();
} else if (m_parsedDelete != null) {
nextStmt = m_parsedDelete;
retval = getNextDeletePlan();
// note that for replicated tables, multi-fragment plans
// need to divide the result by the number of partitions
} else if (m_parsedUpdate != null) {
nextStmt = m_parsedUpdate;
retval = getNextUpdatePlan();
} else if (m_parsedUnion != null) {
nextStmt = m_parsedUnion;
retval = getNextUnionPlan();
} else if (m_parsedSwap != null) {
nextStmt = m_parsedSwap;
retval = getNextSwapPlan();
} else if (m_parsedMigrate != null) {
nextStmt = m_parsedMigrate;
retval = getNextMigratePlan();
} else {
throw new RuntimeException(
"setupForNewPlans encountered unsupported statement type.");
}
if (retval == null || retval.rootPlanGraph == null) {
return null;
}
assert (nextStmt != null);
retval.setParameters(nextStmt.getParameters());
return retval;
} | java | private CompiledPlan getNextPlan() {
CompiledPlan retval;
AbstractParsedStmt nextStmt = null;
if (m_parsedSelect != null) {
nextStmt = m_parsedSelect;
retval = getNextSelectPlan();
} else if (m_parsedInsert != null) {
nextStmt = m_parsedInsert;
retval = getNextInsertPlan();
} else if (m_parsedDelete != null) {
nextStmt = m_parsedDelete;
retval = getNextDeletePlan();
// note that for replicated tables, multi-fragment plans
// need to divide the result by the number of partitions
} else if (m_parsedUpdate != null) {
nextStmt = m_parsedUpdate;
retval = getNextUpdatePlan();
} else if (m_parsedUnion != null) {
nextStmt = m_parsedUnion;
retval = getNextUnionPlan();
} else if (m_parsedSwap != null) {
nextStmt = m_parsedSwap;
retval = getNextSwapPlan();
} else if (m_parsedMigrate != null) {
nextStmt = m_parsedMigrate;
retval = getNextMigratePlan();
} else {
throw new RuntimeException(
"setupForNewPlans encountered unsupported statement type.");
}
if (retval == null || retval.rootPlanGraph == null) {
return null;
}
assert (nextStmt != null);
retval.setParameters(nextStmt.getParameters());
return retval;
} | [
"private",
"CompiledPlan",
"getNextPlan",
"(",
")",
"{",
"CompiledPlan",
"retval",
";",
"AbstractParsedStmt",
"nextStmt",
"=",
"null",
";",
"if",
"(",
"m_parsedSelect",
"!=",
"null",
")",
"{",
"nextStmt",
"=",
"m_parsedSelect",
";",
"retval",
"=",
"getNextSelect... | Generate a unique and correct plan for the current SQL statement context.
This method gets called repeatedly until it returns null, meaning there
are no more plans.
@return A not-previously returned query plan or null if no more
computable plans. | [
"Generate",
"a",
"unique",
"and",
"correct",
"plan",
"for",
"the",
"current",
"SQL",
"statement",
"context",
".",
"This",
"method",
"gets",
"called",
"repeatedly",
"until",
"it",
"returns",
"null",
"meaning",
"there",
"are",
"no",
"more",
"plans",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L683-L721 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.connectChildrenBestPlans | private void connectChildrenBestPlans(AbstractPlanNode parentPlan) {
if (parentPlan instanceof AbstractScanPlanNode) {
AbstractScanPlanNode scanNode = (AbstractScanPlanNode) parentPlan;
StmtTableScan tableScan = scanNode.getTableScan();
if (tableScan instanceof StmtSubqueryScan) {
CompiledPlan bestCostPlan = ((StmtSubqueryScan)tableScan).getBestCostPlan();
assert (bestCostPlan != null);
AbstractPlanNode subQueryRoot = bestCostPlan.rootPlanGraph;
subQueryRoot.disconnectParents();
scanNode.clearChildren();
scanNode.addAndLinkChild(subQueryRoot);
}
else if (tableScan instanceof StmtCommonTableScan) {
assert(parentPlan instanceof SeqScanPlanNode);
SeqScanPlanNode scanPlanNode = (SeqScanPlanNode)parentPlan;
StmtCommonTableScan cteScan = (StmtCommonTableScan)tableScan;
CompiledPlan bestCostBasePlan = cteScan.getBestCostBasePlan();
CompiledPlan bestCostRecursivePlan = cteScan.getBestCostRecursivePlan();
assert(bestCostBasePlan != null);
AbstractPlanNode basePlanRoot = bestCostBasePlan.rootPlanGraph;
scanPlanNode.setCTEBaseNode(basePlanRoot);
if (bestCostRecursivePlan != null) {
// Either the CTE is not recursive, or this is a recursive CTE but we
// got here during the planning of the recurse query when the recurse
// query plan is still being worked on.
AbstractPlanNode recursePlanRoot = bestCostRecursivePlan.rootPlanGraph;
assert(basePlanRoot instanceof CommonTablePlanNode);
CommonTablePlanNode ctePlanNode = (CommonTablePlanNode)basePlanRoot;
ctePlanNode.setRecursiveNode(recursePlanRoot);
}
}
}
else {
for (int i = 0; i < parentPlan.getChildCount(); ++i) {
connectChildrenBestPlans(parentPlan.getChild(i));
}
}
} | java | private void connectChildrenBestPlans(AbstractPlanNode parentPlan) {
if (parentPlan instanceof AbstractScanPlanNode) {
AbstractScanPlanNode scanNode = (AbstractScanPlanNode) parentPlan;
StmtTableScan tableScan = scanNode.getTableScan();
if (tableScan instanceof StmtSubqueryScan) {
CompiledPlan bestCostPlan = ((StmtSubqueryScan)tableScan).getBestCostPlan();
assert (bestCostPlan != null);
AbstractPlanNode subQueryRoot = bestCostPlan.rootPlanGraph;
subQueryRoot.disconnectParents();
scanNode.clearChildren();
scanNode.addAndLinkChild(subQueryRoot);
}
else if (tableScan instanceof StmtCommonTableScan) {
assert(parentPlan instanceof SeqScanPlanNode);
SeqScanPlanNode scanPlanNode = (SeqScanPlanNode)parentPlan;
StmtCommonTableScan cteScan = (StmtCommonTableScan)tableScan;
CompiledPlan bestCostBasePlan = cteScan.getBestCostBasePlan();
CompiledPlan bestCostRecursivePlan = cteScan.getBestCostRecursivePlan();
assert(bestCostBasePlan != null);
AbstractPlanNode basePlanRoot = bestCostBasePlan.rootPlanGraph;
scanPlanNode.setCTEBaseNode(basePlanRoot);
if (bestCostRecursivePlan != null) {
// Either the CTE is not recursive, or this is a recursive CTE but we
// got here during the planning of the recurse query when the recurse
// query plan is still being worked on.
AbstractPlanNode recursePlanRoot = bestCostRecursivePlan.rootPlanGraph;
assert(basePlanRoot instanceof CommonTablePlanNode);
CommonTablePlanNode ctePlanNode = (CommonTablePlanNode)basePlanRoot;
ctePlanNode.setRecursiveNode(recursePlanRoot);
}
}
}
else {
for (int i = 0; i < parentPlan.getChildCount(); ++i) {
connectChildrenBestPlans(parentPlan.getChild(i));
}
}
} | [
"private",
"void",
"connectChildrenBestPlans",
"(",
"AbstractPlanNode",
"parentPlan",
")",
"{",
"if",
"(",
"parentPlan",
"instanceof",
"AbstractScanPlanNode",
")",
"{",
"AbstractScanPlanNode",
"scanNode",
"=",
"(",
"AbstractScanPlanNode",
")",
"parentPlan",
";",
"StmtTa... | For each sub-query or CTE node in the plan tree,
attach the corresponding plans to the parent node.
@param initial plan | [
"For",
"each",
"sub",
"-",
"query",
"or",
"CTE",
"node",
"in",
"the",
"plan",
"tree",
"attach",
"the",
"corresponding",
"plans",
"to",
"the",
"parent",
"node",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L1013-L1050 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.needProjectionNode | private boolean needProjectionNode (AbstractPlanNode root) {
if (!root.planNodeClassNeedsProjectionNode()) {
return false;
}
// If there is a complexGroupby at his point, it means that
// display columns contain all the order by columns and
// does not require another projection node on top of sort node.
// If there is a complex aggregation case, the projection plan node is already added
// right above the group by plan node. In future, we may inline that projection node.
if (m_parsedSelect.hasComplexGroupby() || m_parsedSelect.hasComplexAgg()) {
return false;
}
if (root instanceof AbstractReceivePlanNode &&
m_parsedSelect.hasPartitionColumnInGroupby()) {
// Top aggregate has been removed, its schema is exactly the same to
// its local aggregate node.
return false;
}
return true;
} | java | private boolean needProjectionNode (AbstractPlanNode root) {
if (!root.planNodeClassNeedsProjectionNode()) {
return false;
}
// If there is a complexGroupby at his point, it means that
// display columns contain all the order by columns and
// does not require another projection node on top of sort node.
// If there is a complex aggregation case, the projection plan node is already added
// right above the group by plan node. In future, we may inline that projection node.
if (m_parsedSelect.hasComplexGroupby() || m_parsedSelect.hasComplexAgg()) {
return false;
}
if (root instanceof AbstractReceivePlanNode &&
m_parsedSelect.hasPartitionColumnInGroupby()) {
// Top aggregate has been removed, its schema is exactly the same to
// its local aggregate node.
return false;
}
return true;
} | [
"private",
"boolean",
"needProjectionNode",
"(",
"AbstractPlanNode",
"root",
")",
"{",
"if",
"(",
"!",
"root",
".",
"planNodeClassNeedsProjectionNode",
"(",
")",
")",
"{",
"return",
"false",
";",
"}",
"// If there is a complexGroupby at his point, it means that",
"// di... | Return true if the plan referenced by root node needs a
projection node appended to the top.
This method does a lot of "if this node is an
instance of this class.... else if this node is an
instance of this other class..." Perhaps it could be replaced
by a virtual method on AbstractPlanNode?
@param root The root node of a plan
@return true if a project node is required | [
"Return",
"true",
"if",
"the",
"plan",
"referenced",
"by",
"root",
"node",
"needs",
"a",
"projection",
"node",
"appended",
"to",
"the",
"top",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L1263-L1285 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.deleteIsTruncate | static private boolean deleteIsTruncate(ParsedDeleteStmt stmt, AbstractPlanNode plan) {
if (!(plan instanceof SeqScanPlanNode)) {
return false;
}
// Assume all index scans have filters in this context, so only consider seq scans.
SeqScanPlanNode seqScanNode = (SeqScanPlanNode)plan;
if (seqScanNode.getPredicate() != null) {
return false;
}
if (stmt.hasLimitOrOffset()) {
return false;
}
return true;
} | java | static private boolean deleteIsTruncate(ParsedDeleteStmt stmt, AbstractPlanNode plan) {
if (!(plan instanceof SeqScanPlanNode)) {
return false;
}
// Assume all index scans have filters in this context, so only consider seq scans.
SeqScanPlanNode seqScanNode = (SeqScanPlanNode)plan;
if (seqScanNode.getPredicate() != null) {
return false;
}
if (stmt.hasLimitOrOffset()) {
return false;
}
return true;
} | [
"static",
"private",
"boolean",
"deleteIsTruncate",
"(",
"ParsedDeleteStmt",
"stmt",
",",
"AbstractPlanNode",
"plan",
")",
"{",
"if",
"(",
"!",
"(",
"plan",
"instanceof",
"SeqScanPlanNode",
")",
")",
"{",
"return",
"false",
";",
"}",
"// Assume all index scans hav... | Returns true if this DELETE can be executed in the EE as a truncate operation | [
"Returns",
"true",
"if",
"this",
"DELETE",
"can",
"be",
"executed",
"in",
"the",
"EE",
"as",
"a",
"truncate",
"operation"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L1298-L1314 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.addCoordinatorToDMLNode | private static AbstractPlanNode addCoordinatorToDMLNode(
AbstractPlanNode dmlRoot, boolean isReplicated) {
dmlRoot = SubPlanAssembler.addSendReceivePair(dmlRoot);
AbstractPlanNode sumOrLimitNode;
if (isReplicated) {
// Replicated table DML result doesn't need to be summed. All partitions should
// modify the same number of tuples in replicated table, so just pick the result from
// any partition.
LimitPlanNode limitNode = new LimitPlanNode();
sumOrLimitNode = limitNode;
limitNode.setLimit(1);
}
else {
// create the nodes being pushed on top of dmlRoot.
AggregatePlanNode countNode = new AggregatePlanNode();
sumOrLimitNode = countNode;
// configure the count aggregate (sum) node to produce a single
// output column containing the result of the sum.
// Create a TVE that should match the tuple count input column
// This TVE is magic.
// really really need to make this less hard-wired
TupleValueExpression count_tve = new TupleValueExpression(
AbstractParsedStmt.TEMP_TABLE_NAME,
AbstractParsedStmt.TEMP_TABLE_NAME,
"modified_tuples",
"modified_tuples",
0);
count_tve.setValueType(VoltType.BIGINT);
count_tve.setValueSize(VoltType.BIGINT.getLengthInBytesForFixedTypes());
countNode.addAggregate(ExpressionType.AGGREGATE_SUM, false, 0, count_tve);
// The output column. Not really based on a TVE (it is really the
// count expression represented by the count configured above). But
// this is sufficient for now. This looks identical to the above
// TVE but it's logically different so we'll create a fresh one.
TupleValueExpression tve = new TupleValueExpression(
AbstractParsedStmt.TEMP_TABLE_NAME,
AbstractParsedStmt.TEMP_TABLE_NAME,
"modified_tuples",
"modified_tuples",
0);
tve.setValueType(VoltType.BIGINT);
tve.setValueSize(VoltType.BIGINT.getLengthInBytesForFixedTypes());
NodeSchema count_schema = new NodeSchema();
count_schema.addColumn(
AbstractParsedStmt.TEMP_TABLE_NAME,
AbstractParsedStmt.TEMP_TABLE_NAME,
"modified_tuples",
"modified_tuples",
tve);
countNode.setOutputSchema(count_schema);
}
// connect the nodes to build the graph
sumOrLimitNode.addAndLinkChild(dmlRoot);
SendPlanNode sendNode = new SendPlanNode();
sendNode.addAndLinkChild(sumOrLimitNode);
return sendNode;
} | java | private static AbstractPlanNode addCoordinatorToDMLNode(
AbstractPlanNode dmlRoot, boolean isReplicated) {
dmlRoot = SubPlanAssembler.addSendReceivePair(dmlRoot);
AbstractPlanNode sumOrLimitNode;
if (isReplicated) {
// Replicated table DML result doesn't need to be summed. All partitions should
// modify the same number of tuples in replicated table, so just pick the result from
// any partition.
LimitPlanNode limitNode = new LimitPlanNode();
sumOrLimitNode = limitNode;
limitNode.setLimit(1);
}
else {
// create the nodes being pushed on top of dmlRoot.
AggregatePlanNode countNode = new AggregatePlanNode();
sumOrLimitNode = countNode;
// configure the count aggregate (sum) node to produce a single
// output column containing the result of the sum.
// Create a TVE that should match the tuple count input column
// This TVE is magic.
// really really need to make this less hard-wired
TupleValueExpression count_tve = new TupleValueExpression(
AbstractParsedStmt.TEMP_TABLE_NAME,
AbstractParsedStmt.TEMP_TABLE_NAME,
"modified_tuples",
"modified_tuples",
0);
count_tve.setValueType(VoltType.BIGINT);
count_tve.setValueSize(VoltType.BIGINT.getLengthInBytesForFixedTypes());
countNode.addAggregate(ExpressionType.AGGREGATE_SUM, false, 0, count_tve);
// The output column. Not really based on a TVE (it is really the
// count expression represented by the count configured above). But
// this is sufficient for now. This looks identical to the above
// TVE but it's logically different so we'll create a fresh one.
TupleValueExpression tve = new TupleValueExpression(
AbstractParsedStmt.TEMP_TABLE_NAME,
AbstractParsedStmt.TEMP_TABLE_NAME,
"modified_tuples",
"modified_tuples",
0);
tve.setValueType(VoltType.BIGINT);
tve.setValueSize(VoltType.BIGINT.getLengthInBytesForFixedTypes());
NodeSchema count_schema = new NodeSchema();
count_schema.addColumn(
AbstractParsedStmt.TEMP_TABLE_NAME,
AbstractParsedStmt.TEMP_TABLE_NAME,
"modified_tuples",
"modified_tuples",
tve);
countNode.setOutputSchema(count_schema);
}
// connect the nodes to build the graph
sumOrLimitNode.addAndLinkChild(dmlRoot);
SendPlanNode sendNode = new SendPlanNode();
sendNode.addAndLinkChild(sumOrLimitNode);
return sendNode;
} | [
"private",
"static",
"AbstractPlanNode",
"addCoordinatorToDMLNode",
"(",
"AbstractPlanNode",
"dmlRoot",
",",
"boolean",
"isReplicated",
")",
"{",
"dmlRoot",
"=",
"SubPlanAssembler",
".",
"addSendReceivePair",
"(",
"dmlRoot",
")",
";",
"AbstractPlanNode",
"sumOrLimitNode",... | Add a receive node, a sum or limit node, and a send node to the given DML node.
If the DML target is a replicated table, it will add a limit node,
otherwise it adds a sum node.
@param dmlRoot
@param isReplicated Whether or not the target table is a replicated table.
@return | [
"Add",
"a",
"receive",
"node",
"a",
"sum",
"or",
"limit",
"node",
"and",
"a",
"send",
"node",
"to",
"the",
"given",
"DML",
"node",
".",
"If",
"the",
"DML",
"target",
"is",
"a",
"replicated",
"table",
"it",
"will",
"add",
"a",
"limit",
"node",
"otherw... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L1826-L1886 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.buildOrderByPlanNode | private static OrderByPlanNode buildOrderByPlanNode(List<ParsedColInfo> cols) {
OrderByPlanNode n = new OrderByPlanNode();
for (ParsedColInfo col : cols) {
n.addSortExpression(col.m_expression,
col.m_ascending ? SortDirectionType.ASC
: SortDirectionType.DESC);
}
return n;
} | java | private static OrderByPlanNode buildOrderByPlanNode(List<ParsedColInfo> cols) {
OrderByPlanNode n = new OrderByPlanNode();
for (ParsedColInfo col : cols) {
n.addSortExpression(col.m_expression,
col.m_ascending ? SortDirectionType.ASC
: SortDirectionType.DESC);
}
return n;
} | [
"private",
"static",
"OrderByPlanNode",
"buildOrderByPlanNode",
"(",
"List",
"<",
"ParsedColInfo",
">",
"cols",
")",
"{",
"OrderByPlanNode",
"n",
"=",
"new",
"OrderByPlanNode",
"(",
")",
";",
"for",
"(",
"ParsedColInfo",
"col",
":",
"cols",
")",
"{",
"n",
".... | Given a list of ORDER BY columns, construct and return an OrderByPlanNode. | [
"Given",
"a",
"list",
"of",
"ORDER",
"BY",
"columns",
"construct",
"and",
"return",
"an",
"OrderByPlanNode",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L1942-L1952 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.isOrderByNodeRequired | private static boolean isOrderByNodeRequired(AbstractParsedStmt parsedStmt, AbstractPlanNode root) {
// Only sort when the statement has an ORDER BY.
if ( ! parsedStmt.hasOrderByColumns()) {
return false;
}
// Skip the explicit ORDER BY plan step if an IndexScan is already providing the equivalent ordering.
// Note that even tree index scans that produce values in their own "key order" only report
// their sort direction != SortDirectionType.INVALID
// when they enforce an ordering equivalent to the one requested in the ORDER BY
// or window function clause. Even an intervening non-hash aggregate will not interfere
// in this optimization.
// Is there a window function between the root and the
// scan or join nodes? Also, does this window function
// use the index.
int numberWindowFunctions = 0;
int numberReceiveNodes = 0;
int numberHashAggregates = 0;
// EE keeps the insertion ORDER so that ORDER BY could apply before DISTINCT.
// However, this probably is not optimal if there are low cardinality results.
// Again, we have to replace the TVEs for ORDER BY clause for these cases in planning.
//
// Find the scan or join node.
AbstractPlanNode probe;
for (probe = root;
! ((probe instanceof AbstractJoinPlanNode)
|| (probe instanceof AbstractScanPlanNode))
&& (probe != null);
probe = (probe.getChildCount() > 0) ? probe.getChild(0) : null) {
// Count the number of window functions between the
// root and the join/scan node. Note that we know we
// have a statement level order by (SLOB) here. If the SLOB
// can use the index for ordering the scan or join node,
// we will have recorded it in the scan or join node.
if (probe.getPlanNodeType() == PlanNodeType.WINDOWFUNCTION) {
numberWindowFunctions += 1;
}
// Also, see if there are receive nodes. We need to
// generate an ORDERBY node if there are RECEIVE nodes,
// because the RECEIVE->MERGERECEIVE microoptimization
// needs them.
if (probe.getPlanNodeType() == PlanNodeType.RECEIVE) {
numberReceiveNodes += 1;
}
// Finally, count the number of non-serial aggregate
// nodes. A hash or partial aggregate operation invalidates
// the ordering, but a serial aggregation does not.
if ((probe.getPlanNodeType() == PlanNodeType.HASHAGGREGATE)
|| (probe.getPlanNodeType() == PlanNodeType.PARTIALAGGREGATE)) {
numberHashAggregates += 1;
}
}
if (probe == null) {
// No idea what happened here. We can't find a
// scan or join node at all. This seems unlikely
// to be right. Maybe this should be an assert?
return true;
}
//
// o If the SLOB cannot use the index, then we
// need an order by node always.
// o If there are zero window functions, then
// - If the SLOB cannot use the index than we
// need an order by node.
// - If the SLOB can use the index, then
// = If the statement is a single fragment
// statement then we don't need an order by
// node.
// = If the statement is a two fragment
// statement then we need an order by node.
// This is because we will convert the RECEIVE
// node into a MERGERECEIVE node in the
// microoptimizer, and the MERGERECEIVE
// node needs an inline order by node to do
// the merge.
// o If there is only one window function, then
// - If the window function does not use the index
// then we always need an order by node.
// - If the window function can use the index but
// the SLOB can't use the index, then we need an
// order by node.
// - If both the SLOB and the window function can
// use the index, then we don't need an order
// by, no matter how many fragments this statement
// has. This is because any RECEIVE node will be
// a descendent of the window function node. So
// the RECEIVE to MERGERECEIVE conversion happens
// in the window function and not the order by.
// o If there is more than one window function then
// we always need an order by node. The second
// window function will invalidate the ordering of
// the first one. (Actually, if the SLOB order is
// compatible with the last window function then
// the situation is like the one-window function
// below.)
//
if ( ! (probe instanceof IndexSortablePlanNode)) {
return true;
}
IndexUseForOrderBy indexUse = ((IndexSortablePlanNode)probe).indexUse();
if (indexUse.getSortOrderFromIndexScan() == SortDirectionType.INVALID) {
return true;
}
// Hash aggregates and partial aggregates
// invalidate the index ordering. So, we will need
// an ORDERBY node.
if (numberHashAggregates > 0) {
return true;
}
if ( numberWindowFunctions == 0 ) {
if ( indexUse.getWindowFunctionUsesIndex() == SubPlanAssembler.NO_INDEX_USE ) {
return true;
}
assert( indexUse.getWindowFunctionUsesIndex() == SubPlanAssembler.STATEMENT_LEVEL_ORDER_BY_INDEX );
// Return true for MP (numberReceiveNodes > 0) and
// false for SP (numberReceiveNodes == 0);
return numberReceiveNodes > 0;
}
if (numberWindowFunctions == 1) {
// If the WF uses the index then getWindowFunctionUsesIndex()
// will return 0.
if ( ( indexUse.getWindowFunctionUsesIndex() != 0 )
|| ( ! indexUse.isWindowFunctionCompatibleWithOrderBy() ) ) {
return true;
}
// Both the WF and the SLOB can use the index. Since the
// window function will have the order by node, the SLOB
// does not need one. So this is a false.
return false;
}
// This can actually never happen now,
// because we only support one window function.
return true;
} | java | private static boolean isOrderByNodeRequired(AbstractParsedStmt parsedStmt, AbstractPlanNode root) {
// Only sort when the statement has an ORDER BY.
if ( ! parsedStmt.hasOrderByColumns()) {
return false;
}
// Skip the explicit ORDER BY plan step if an IndexScan is already providing the equivalent ordering.
// Note that even tree index scans that produce values in their own "key order" only report
// their sort direction != SortDirectionType.INVALID
// when they enforce an ordering equivalent to the one requested in the ORDER BY
// or window function clause. Even an intervening non-hash aggregate will not interfere
// in this optimization.
// Is there a window function between the root and the
// scan or join nodes? Also, does this window function
// use the index.
int numberWindowFunctions = 0;
int numberReceiveNodes = 0;
int numberHashAggregates = 0;
// EE keeps the insertion ORDER so that ORDER BY could apply before DISTINCT.
// However, this probably is not optimal if there are low cardinality results.
// Again, we have to replace the TVEs for ORDER BY clause for these cases in planning.
//
// Find the scan or join node.
AbstractPlanNode probe;
for (probe = root;
! ((probe instanceof AbstractJoinPlanNode)
|| (probe instanceof AbstractScanPlanNode))
&& (probe != null);
probe = (probe.getChildCount() > 0) ? probe.getChild(0) : null) {
// Count the number of window functions between the
// root and the join/scan node. Note that we know we
// have a statement level order by (SLOB) here. If the SLOB
// can use the index for ordering the scan or join node,
// we will have recorded it in the scan or join node.
if (probe.getPlanNodeType() == PlanNodeType.WINDOWFUNCTION) {
numberWindowFunctions += 1;
}
// Also, see if there are receive nodes. We need to
// generate an ORDERBY node if there are RECEIVE nodes,
// because the RECEIVE->MERGERECEIVE microoptimization
// needs them.
if (probe.getPlanNodeType() == PlanNodeType.RECEIVE) {
numberReceiveNodes += 1;
}
// Finally, count the number of non-serial aggregate
// nodes. A hash or partial aggregate operation invalidates
// the ordering, but a serial aggregation does not.
if ((probe.getPlanNodeType() == PlanNodeType.HASHAGGREGATE)
|| (probe.getPlanNodeType() == PlanNodeType.PARTIALAGGREGATE)) {
numberHashAggregates += 1;
}
}
if (probe == null) {
// No idea what happened here. We can't find a
// scan or join node at all. This seems unlikely
// to be right. Maybe this should be an assert?
return true;
}
//
// o If the SLOB cannot use the index, then we
// need an order by node always.
// o If there are zero window functions, then
// - If the SLOB cannot use the index than we
// need an order by node.
// - If the SLOB can use the index, then
// = If the statement is a single fragment
// statement then we don't need an order by
// node.
// = If the statement is a two fragment
// statement then we need an order by node.
// This is because we will convert the RECEIVE
// node into a MERGERECEIVE node in the
// microoptimizer, and the MERGERECEIVE
// node needs an inline order by node to do
// the merge.
// o If there is only one window function, then
// - If the window function does not use the index
// then we always need an order by node.
// - If the window function can use the index but
// the SLOB can't use the index, then we need an
// order by node.
// - If both the SLOB and the window function can
// use the index, then we don't need an order
// by, no matter how many fragments this statement
// has. This is because any RECEIVE node will be
// a descendent of the window function node. So
// the RECEIVE to MERGERECEIVE conversion happens
// in the window function and not the order by.
// o If there is more than one window function then
// we always need an order by node. The second
// window function will invalidate the ordering of
// the first one. (Actually, if the SLOB order is
// compatible with the last window function then
// the situation is like the one-window function
// below.)
//
if ( ! (probe instanceof IndexSortablePlanNode)) {
return true;
}
IndexUseForOrderBy indexUse = ((IndexSortablePlanNode)probe).indexUse();
if (indexUse.getSortOrderFromIndexScan() == SortDirectionType.INVALID) {
return true;
}
// Hash aggregates and partial aggregates
// invalidate the index ordering. So, we will need
// an ORDERBY node.
if (numberHashAggregates > 0) {
return true;
}
if ( numberWindowFunctions == 0 ) {
if ( indexUse.getWindowFunctionUsesIndex() == SubPlanAssembler.NO_INDEX_USE ) {
return true;
}
assert( indexUse.getWindowFunctionUsesIndex() == SubPlanAssembler.STATEMENT_LEVEL_ORDER_BY_INDEX );
// Return true for MP (numberReceiveNodes > 0) and
// false for SP (numberReceiveNodes == 0);
return numberReceiveNodes > 0;
}
if (numberWindowFunctions == 1) {
// If the WF uses the index then getWindowFunctionUsesIndex()
// will return 0.
if ( ( indexUse.getWindowFunctionUsesIndex() != 0 )
|| ( ! indexUse.isWindowFunctionCompatibleWithOrderBy() ) ) {
return true;
}
// Both the WF and the SLOB can use the index. Since the
// window function will have the order by node, the SLOB
// does not need one. So this is a false.
return false;
}
// This can actually never happen now,
// because we only support one window function.
return true;
} | [
"private",
"static",
"boolean",
"isOrderByNodeRequired",
"(",
"AbstractParsedStmt",
"parsedStmt",
",",
"AbstractPlanNode",
"root",
")",
"{",
"// Only sort when the statement has an ORDER BY.",
"if",
"(",
"!",
"parsedStmt",
".",
"hasOrderByColumns",
"(",
")",
")",
"{",
"... | Determine if an OrderByPlanNode is needed. This may return false if the
statement has no ORDER BY clause, or if the subtree is already producing
rows in the correct order. Note that a hash aggregate node will cause this
to return true, and a serial or partial aggregate node may cause this
to return true.
@param parsedStmt The statement whose plan may need an OrderByPlanNode
@param root The subtree which may need its output tuples ordered
@return true if the plan needs an OrderByPlanNode, false otherwise | [
"Determine",
"if",
"an",
"OrderByPlanNode",
"is",
"needed",
".",
"This",
"may",
"return",
"false",
"if",
"the",
"statement",
"has",
"no",
"ORDER",
"BY",
"clause",
"or",
"if",
"the",
"subtree",
"is",
"already",
"producing",
"rows",
"in",
"the",
"correct",
"... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L1965-L2102 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.handleOrderBy | private static AbstractPlanNode handleOrderBy(AbstractParsedStmt parsedStmt, AbstractPlanNode root) {
assert (parsedStmt instanceof ParsedSelectStmt || parsedStmt instanceof ParsedUnionStmt ||
parsedStmt instanceof ParsedDeleteStmt);
if (! isOrderByNodeRequired(parsedStmt, root)) {
return root;
}
OrderByPlanNode orderByNode = buildOrderByPlanNode(parsedStmt.orderByColumns());
orderByNode.addAndLinkChild(root);
return orderByNode;
} | java | private static AbstractPlanNode handleOrderBy(AbstractParsedStmt parsedStmt, AbstractPlanNode root) {
assert (parsedStmt instanceof ParsedSelectStmt || parsedStmt instanceof ParsedUnionStmt ||
parsedStmt instanceof ParsedDeleteStmt);
if (! isOrderByNodeRequired(parsedStmt, root)) {
return root;
}
OrderByPlanNode orderByNode = buildOrderByPlanNode(parsedStmt.orderByColumns());
orderByNode.addAndLinkChild(root);
return orderByNode;
} | [
"private",
"static",
"AbstractPlanNode",
"handleOrderBy",
"(",
"AbstractParsedStmt",
"parsedStmt",
",",
"AbstractPlanNode",
"root",
")",
"{",
"assert",
"(",
"parsedStmt",
"instanceof",
"ParsedSelectStmt",
"||",
"parsedStmt",
"instanceof",
"ParsedUnionStmt",
"||",
"parsedS... | Create an order by node as required by the statement and make it a parent of root.
@param parsedStmt Parsed statement, for context
@param root The root of the plan needing ordering
@return new orderByNode (the new root) or the original root if no orderByNode was required. | [
"Create",
"an",
"order",
"by",
"node",
"as",
"required",
"by",
"the",
"statement",
"and",
"make",
"it",
"a",
"parent",
"of",
"root",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L2110-L2121 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.handleSelectLimitOperator | private AbstractPlanNode handleSelectLimitOperator(AbstractPlanNode root)
{
// The coordinator's top limit graph fragment for a MP plan.
// If planning "order by ... limit", getNextSelectPlan()
// will have already added an order by to the coordinator frag.
// This is the only limit node in a SP plan
LimitPlanNode topLimit = m_parsedSelect.getLimitNodeTop();
assert(topLimit != null);
/*
* TODO: allow push down limit with distinct (select distinct C from T limit 5)
* , DISTINCT in aggregates and DISTINCT PUSH DOWN with partition column included.
*/
AbstractPlanNode sendNode = null;
// Whether or not we can push the limit node down
boolean canPushDown = ! m_parsedSelect.hasDistinctWithGroupBy();
if (canPushDown) {
sendNode = checkLimitPushDownViability(root);
if (sendNode == null) {
canPushDown = false;
}
else {
canPushDown = m_parsedSelect.getCanPushdownLimit();
}
}
if (m_parsedSelect.m_mvFixInfo.needed()) {
// Do not push down limit for mv based distributed query.
canPushDown = false;
}
/*
* Push down the limit plan node when possible even if offset is set. If
* the plan is for a partitioned table, do the push down. Otherwise,
* there is no need to do the push down work, the limit plan node will
* be run in the partition.
*/
if (canPushDown) {
/*
* For partitioned table, the pushed-down limit plan node has a limit based
* on the combined limit and offset, which may require an expression if either of these
* was not a hard-coded constant and didn't get parameterized.
* The top level limit plan node remains the same, with the original limit and offset values.
*/
LimitPlanNode distLimit = m_parsedSelect.getLimitNodeDist();
// Disconnect the distributed parts of the plan below the SEND node
AbstractPlanNode distributedPlan = sendNode.getChild(0);
distributedPlan.clearParents();
sendNode.clearChildren();
// If the distributed limit must be performed on ordered input,
// ensure the order of the data on each partition.
if (m_parsedSelect.hasOrderByColumns()) {
distributedPlan = handleOrderBy(m_parsedSelect, distributedPlan);
}
if (isInlineLimitPlanNodePossible(distributedPlan)) {
// Inline the distributed limit.
distributedPlan.addInlinePlanNode(distLimit);
sendNode.addAndLinkChild(distributedPlan);
}
else {
distLimit.addAndLinkChild(distributedPlan);
// Add the distributed work back to the plan
sendNode.addAndLinkChild(distLimit);
}
}
// In future, inline LIMIT for join, Receive
// Then we do not need to distinguish the order by node.
return inlineLimitOperator(root, topLimit);
} | java | private AbstractPlanNode handleSelectLimitOperator(AbstractPlanNode root)
{
// The coordinator's top limit graph fragment for a MP plan.
// If planning "order by ... limit", getNextSelectPlan()
// will have already added an order by to the coordinator frag.
// This is the only limit node in a SP plan
LimitPlanNode topLimit = m_parsedSelect.getLimitNodeTop();
assert(topLimit != null);
/*
* TODO: allow push down limit with distinct (select distinct C from T limit 5)
* , DISTINCT in aggregates and DISTINCT PUSH DOWN with partition column included.
*/
AbstractPlanNode sendNode = null;
// Whether or not we can push the limit node down
boolean canPushDown = ! m_parsedSelect.hasDistinctWithGroupBy();
if (canPushDown) {
sendNode = checkLimitPushDownViability(root);
if (sendNode == null) {
canPushDown = false;
}
else {
canPushDown = m_parsedSelect.getCanPushdownLimit();
}
}
if (m_parsedSelect.m_mvFixInfo.needed()) {
// Do not push down limit for mv based distributed query.
canPushDown = false;
}
/*
* Push down the limit plan node when possible even if offset is set. If
* the plan is for a partitioned table, do the push down. Otherwise,
* there is no need to do the push down work, the limit plan node will
* be run in the partition.
*/
if (canPushDown) {
/*
* For partitioned table, the pushed-down limit plan node has a limit based
* on the combined limit and offset, which may require an expression if either of these
* was not a hard-coded constant and didn't get parameterized.
* The top level limit plan node remains the same, with the original limit and offset values.
*/
LimitPlanNode distLimit = m_parsedSelect.getLimitNodeDist();
// Disconnect the distributed parts of the plan below the SEND node
AbstractPlanNode distributedPlan = sendNode.getChild(0);
distributedPlan.clearParents();
sendNode.clearChildren();
// If the distributed limit must be performed on ordered input,
// ensure the order of the data on each partition.
if (m_parsedSelect.hasOrderByColumns()) {
distributedPlan = handleOrderBy(m_parsedSelect, distributedPlan);
}
if (isInlineLimitPlanNodePossible(distributedPlan)) {
// Inline the distributed limit.
distributedPlan.addInlinePlanNode(distLimit);
sendNode.addAndLinkChild(distributedPlan);
}
else {
distLimit.addAndLinkChild(distributedPlan);
// Add the distributed work back to the plan
sendNode.addAndLinkChild(distLimit);
}
}
// In future, inline LIMIT for join, Receive
// Then we do not need to distinguish the order by node.
return inlineLimitOperator(root, topLimit);
} | [
"private",
"AbstractPlanNode",
"handleSelectLimitOperator",
"(",
"AbstractPlanNode",
"root",
")",
"{",
"// The coordinator's top limit graph fragment for a MP plan.",
"// If planning \"order by ... limit\", getNextSelectPlan()",
"// will have already added an order by to the coordinator frag.",
... | Add a limit, pushed-down if possible, and return the new root.
@param root top of the original plan
@return new plan's root node | [
"Add",
"a",
"limit",
"pushed",
"-",
"down",
"if",
"possible",
"and",
"return",
"the",
"new",
"root",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L2128-L2199 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.handleUnionLimitOperator | private AbstractPlanNode handleUnionLimitOperator(AbstractPlanNode root) {
// The coordinator's top limit graph fragment for a MP plan.
// If planning "order by ... limit", getNextUnionPlan()
// will have already added an order by to the coordinator frag.
// This is the only limit node in a SP plan
LimitPlanNode topLimit = m_parsedUnion.getLimitNodeTop();
assert(topLimit != null);
return inlineLimitOperator(root, topLimit);
} | java | private AbstractPlanNode handleUnionLimitOperator(AbstractPlanNode root) {
// The coordinator's top limit graph fragment for a MP plan.
// If planning "order by ... limit", getNextUnionPlan()
// will have already added an order by to the coordinator frag.
// This is the only limit node in a SP plan
LimitPlanNode topLimit = m_parsedUnion.getLimitNodeTop();
assert(topLimit != null);
return inlineLimitOperator(root, topLimit);
} | [
"private",
"AbstractPlanNode",
"handleUnionLimitOperator",
"(",
"AbstractPlanNode",
"root",
")",
"{",
"// The coordinator's top limit graph fragment for a MP plan.",
"// If planning \"order by ... limit\", getNextUnionPlan()",
"// will have already added an order by to the coordinator frag.",
... | Add a limit, and return the new root.
@param root top of the original plan
@return new plan's root node | [
"Add",
"a",
"limit",
"and",
"return",
"the",
"new",
"root",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L2206-L2214 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.inlineLimitOperator | private AbstractPlanNode inlineLimitOperator(AbstractPlanNode root,
LimitPlanNode topLimit) {
if (isInlineLimitPlanNodePossible(root)) {
root.addInlinePlanNode(topLimit);
}
else if (root instanceof ProjectionPlanNode &&
isInlineLimitPlanNodePossible(root.getChild(0)) ) {
// In future, inlined this projection node for OrderBy and Aggregate
// Then we could delete this ELSE IF block.
root.getChild(0).addInlinePlanNode(topLimit);
}
else {
topLimit.addAndLinkChild(root);
root = topLimit;
}
return root;
} | java | private AbstractPlanNode inlineLimitOperator(AbstractPlanNode root,
LimitPlanNode topLimit) {
if (isInlineLimitPlanNodePossible(root)) {
root.addInlinePlanNode(topLimit);
}
else if (root instanceof ProjectionPlanNode &&
isInlineLimitPlanNodePossible(root.getChild(0)) ) {
// In future, inlined this projection node for OrderBy and Aggregate
// Then we could delete this ELSE IF block.
root.getChild(0).addInlinePlanNode(topLimit);
}
else {
topLimit.addAndLinkChild(root);
root = topLimit;
}
return root;
} | [
"private",
"AbstractPlanNode",
"inlineLimitOperator",
"(",
"AbstractPlanNode",
"root",
",",
"LimitPlanNode",
"topLimit",
")",
"{",
"if",
"(",
"isInlineLimitPlanNodePossible",
"(",
"root",
")",
")",
"{",
"root",
".",
"addInlinePlanNode",
"(",
"topLimit",
")",
";",
... | Inline Limit plan node if possible
@param root
@param topLimit
@return | [
"Inline",
"Limit",
"plan",
"node",
"if",
"possible"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L2222-L2238 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.isInlineLimitPlanNodePossible | static private boolean isInlineLimitPlanNodePossible(AbstractPlanNode pn) {
if (pn instanceof OrderByPlanNode ||
pn.getPlanNodeType() == PlanNodeType.AGGREGATE) {
return true;
}
return false;
} | java | static private boolean isInlineLimitPlanNodePossible(AbstractPlanNode pn) {
if (pn instanceof OrderByPlanNode ||
pn.getPlanNodeType() == PlanNodeType.AGGREGATE) {
return true;
}
return false;
} | [
"static",
"private",
"boolean",
"isInlineLimitPlanNodePossible",
"(",
"AbstractPlanNode",
"pn",
")",
"{",
"if",
"(",
"pn",
"instanceof",
"OrderByPlanNode",
"||",
"pn",
".",
"getPlanNodeType",
"(",
")",
"==",
"PlanNodeType",
".",
"AGGREGATE",
")",
"{",
"return",
... | Inline limit plan node can be applied with ORDER BY node
and serial aggregation node
@param pn
@return | [
"Inline",
"limit",
"plan",
"node",
"can",
"be",
"applied",
"with",
"ORDER",
"BY",
"node",
"and",
"serial",
"aggregation",
"node"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L2246-L2252 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.switchToIndexScanForGroupBy | private boolean switchToIndexScanForGroupBy(AbstractPlanNode candidate,
IndexGroupByInfo gbInfo) {
if (! m_parsedSelect.isGrouped()) {
return false;
}
if (candidate instanceof IndexScanPlanNode) {
calculateIndexGroupByInfo((IndexScanPlanNode) candidate, gbInfo);
if (gbInfo.m_coveredGroupByColumns != null &&
!gbInfo.m_coveredGroupByColumns.isEmpty()) {
// The candidate index does cover all or some
// of the GROUP BY columns and can be serialized
gbInfo.m_indexAccess = candidate;
return true;
}
return false;
}
AbstractPlanNode sourceSeqScan = findSeqScanCandidateForGroupBy(candidate);
if (sourceSeqScan == null) {
return false;
}
assert(sourceSeqScan instanceof SeqScanPlanNode);
AbstractPlanNode parent = null;
if (sourceSeqScan.getParentCount() > 0) {
parent = sourceSeqScan.getParent(0);
}
AbstractPlanNode indexAccess = indexAccessForGroupByExprs(
(SeqScanPlanNode)sourceSeqScan, gbInfo);
if (indexAccess.getPlanNodeType() != PlanNodeType.INDEXSCAN) {
// does not find proper index to replace sequential scan
return false;
}
gbInfo.m_indexAccess = indexAccess;
if (parent != null) {
// have a parent and would like to replace
// the sequential scan with an index scan
indexAccess.clearParents();
// For two children join node, index 0 is its outer side
parent.replaceChild(0, indexAccess);
return false;
}
// parent is null and switched to index scan from sequential scan
return true;
} | java | private boolean switchToIndexScanForGroupBy(AbstractPlanNode candidate,
IndexGroupByInfo gbInfo) {
if (! m_parsedSelect.isGrouped()) {
return false;
}
if (candidate instanceof IndexScanPlanNode) {
calculateIndexGroupByInfo((IndexScanPlanNode) candidate, gbInfo);
if (gbInfo.m_coveredGroupByColumns != null &&
!gbInfo.m_coveredGroupByColumns.isEmpty()) {
// The candidate index does cover all or some
// of the GROUP BY columns and can be serialized
gbInfo.m_indexAccess = candidate;
return true;
}
return false;
}
AbstractPlanNode sourceSeqScan = findSeqScanCandidateForGroupBy(candidate);
if (sourceSeqScan == null) {
return false;
}
assert(sourceSeqScan instanceof SeqScanPlanNode);
AbstractPlanNode parent = null;
if (sourceSeqScan.getParentCount() > 0) {
parent = sourceSeqScan.getParent(0);
}
AbstractPlanNode indexAccess = indexAccessForGroupByExprs(
(SeqScanPlanNode)sourceSeqScan, gbInfo);
if (indexAccess.getPlanNodeType() != PlanNodeType.INDEXSCAN) {
// does not find proper index to replace sequential scan
return false;
}
gbInfo.m_indexAccess = indexAccess;
if (parent != null) {
// have a parent and would like to replace
// the sequential scan with an index scan
indexAccess.clearParents();
// For two children join node, index 0 is its outer side
parent.replaceChild(0, indexAccess);
return false;
}
// parent is null and switched to index scan from sequential scan
return true;
} | [
"private",
"boolean",
"switchToIndexScanForGroupBy",
"(",
"AbstractPlanNode",
"candidate",
",",
"IndexGroupByInfo",
"gbInfo",
")",
"{",
"if",
"(",
"!",
"m_parsedSelect",
".",
"isGrouped",
"(",
")",
")",
"{",
"return",
"false",
";",
"}",
"if",
"(",
"candidate",
... | For a seqscan feeding a GROUP BY, consider substituting an IndexScan
that pre-sorts by the GROUP BY keys.
If a candidate is already an indexscan,
simply calculate GROUP BY column coverage
@param candidate
@param gbInfo
@return true when planner can switch to index scan
from a sequential scan, and when the index scan
has no parent plan node or the candidate is already
an indexscan and covers all or some GROUP BY columns | [
"For",
"a",
"seqscan",
"feeding",
"a",
"GROUP",
"BY",
"consider",
"substituting",
"an",
"IndexScan",
"that",
"pre",
"-",
"sorts",
"by",
"the",
"GROUP",
"BY",
"keys",
".",
"If",
"a",
"candidate",
"is",
"already",
"an",
"indexscan",
"simply",
"calculate",
"G... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L2419-L2468 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.handleWindowedOperators | private AbstractPlanNode handleWindowedOperators(AbstractPlanNode root) {
// Get the windowed expression. We need to set its output
// schema from the display list.
WindowFunctionExpression winExpr = m_parsedSelect.getWindowFunctionExpressions().get(0);
assert(winExpr != null);
// This will set the output schema to contain the
// windowed schema column only. In generateOutputSchema
// we will add the input columns.
WindowFunctionPlanNode pnode = new WindowFunctionPlanNode();
pnode.setWindowFunctionExpression(winExpr);
// We always need an order by plan node, even if the sort
// is optimized away by an index. This may be turned
// into an inline order by in a MergeReceivePlanNode.
IndexUseForOrderBy scanNode = findScanNodeForWindowFunction(root);
AbstractPlanNode cnode = null;
int winfunc = (scanNode == null) ? SubPlanAssembler.NO_INDEX_USE : scanNode.getWindowFunctionUsesIndex();
// If we have an index which is compatible with the statement
// level order by, and we have a window function which can't
// use the index we have to ignore the statement level order by
// index use. We will need to order the input according to the
// window function first, and that will in general invalidate the
// statement level order by ordering.
if ((SubPlanAssembler.STATEMENT_LEVEL_ORDER_BY_INDEX == winfunc)
|| (SubPlanAssembler.NO_INDEX_USE == winfunc)) {
// No index. Calculate the expression order here and stuff it into
// the order by node. Note that if we support more than one window
// function this would be the case when scanNode.getWindowFunctionUsesIndex()
// returns a window function number which is different from the number
// of winExpr.
List<AbstractExpression> partitionByExpressions = winExpr.getPartitionByExpressions();
// If the order by expression list contains a partition by expression then
// we won't have to sort by it twice. We sort by the partition by expressions
// first, and we don't care what order we sort by them. So, find the
// sort direction in the order by list and use that in the partition by
// list, and then mark that it was deleted in the order by
// list.
//
// We choose to make this dontsort rather than dosort because the
// Java default value for boolean is false, and we want to sort by
// default.
boolean dontsort[] = new boolean[winExpr.getOrderbySize()];
List<AbstractExpression> orderByExpressions = winExpr.getOrderByExpressions();
List<SortDirectionType> orderByDirections = winExpr.getOrderByDirections();
OrderByPlanNode onode = new OrderByPlanNode();
for (int idx = 0; idx < winExpr.getPartitionbySize(); ++idx) {
SortDirectionType pdir = SortDirectionType.ASC;
AbstractExpression partitionByExpression = partitionByExpressions.get(idx);
int sidx = winExpr.getSortIndexOfOrderByExpression(partitionByExpression);
if (0 <= sidx) {
pdir = orderByDirections.get(sidx);
dontsort[sidx] = true;
}
onode.addSortExpression(partitionByExpression, pdir);
}
for (int idx = 0; idx < winExpr.getOrderbySize(); ++idx) {
if (!dontsort[idx]) {
AbstractExpression orderByExpr = orderByExpressions.get(idx);
SortDirectionType orderByDir = orderByDirections.get(idx);
onode.addSortExpression(orderByExpr, orderByDir);
}
}
onode.addAndLinkChild(root);
cnode = onode;
} else {
assert(scanNode != null);
// This means the index is good for this window function.
// If this is an MP statement we still need to generate the
// order by node, because we may need to turn it into an
// inline order by node of a MergeReceive node.
assert( 0 == scanNode.getWindowFunctionUsesIndex() );
if (m_partitioning.requiresTwoFragments()) {
OrderByPlanNode onode = new OrderByPlanNode();
SortDirectionType dir = scanNode.getSortOrderFromIndexScan();
assert(dir != SortDirectionType.INVALID);
// This was created when the index was determined.
// We cached it in the scan node.
List<AbstractExpression> orderExprs = scanNode.getFinalExpressionOrderFromIndexScan();
assert(orderExprs != null);
for (AbstractExpression ae : orderExprs) {
onode.addSortExpression(ae, dir);
}
// Link in the OrderByNode.
onode.addAndLinkChild(root);
cnode = onode;
} else {
// Don't create and link in the order by node.
cnode = root;
}
}
pnode.addAndLinkChild(cnode);
return pnode;
} | java | private AbstractPlanNode handleWindowedOperators(AbstractPlanNode root) {
// Get the windowed expression. We need to set its output
// schema from the display list.
WindowFunctionExpression winExpr = m_parsedSelect.getWindowFunctionExpressions().get(0);
assert(winExpr != null);
// This will set the output schema to contain the
// windowed schema column only. In generateOutputSchema
// we will add the input columns.
WindowFunctionPlanNode pnode = new WindowFunctionPlanNode();
pnode.setWindowFunctionExpression(winExpr);
// We always need an order by plan node, even if the sort
// is optimized away by an index. This may be turned
// into an inline order by in a MergeReceivePlanNode.
IndexUseForOrderBy scanNode = findScanNodeForWindowFunction(root);
AbstractPlanNode cnode = null;
int winfunc = (scanNode == null) ? SubPlanAssembler.NO_INDEX_USE : scanNode.getWindowFunctionUsesIndex();
// If we have an index which is compatible with the statement
// level order by, and we have a window function which can't
// use the index we have to ignore the statement level order by
// index use. We will need to order the input according to the
// window function first, and that will in general invalidate the
// statement level order by ordering.
if ((SubPlanAssembler.STATEMENT_LEVEL_ORDER_BY_INDEX == winfunc)
|| (SubPlanAssembler.NO_INDEX_USE == winfunc)) {
// No index. Calculate the expression order here and stuff it into
// the order by node. Note that if we support more than one window
// function this would be the case when scanNode.getWindowFunctionUsesIndex()
// returns a window function number which is different from the number
// of winExpr.
List<AbstractExpression> partitionByExpressions = winExpr.getPartitionByExpressions();
// If the order by expression list contains a partition by expression then
// we won't have to sort by it twice. We sort by the partition by expressions
// first, and we don't care what order we sort by them. So, find the
// sort direction in the order by list and use that in the partition by
// list, and then mark that it was deleted in the order by
// list.
//
// We choose to make this dontsort rather than dosort because the
// Java default value for boolean is false, and we want to sort by
// default.
boolean dontsort[] = new boolean[winExpr.getOrderbySize()];
List<AbstractExpression> orderByExpressions = winExpr.getOrderByExpressions();
List<SortDirectionType> orderByDirections = winExpr.getOrderByDirections();
OrderByPlanNode onode = new OrderByPlanNode();
for (int idx = 0; idx < winExpr.getPartitionbySize(); ++idx) {
SortDirectionType pdir = SortDirectionType.ASC;
AbstractExpression partitionByExpression = partitionByExpressions.get(idx);
int sidx = winExpr.getSortIndexOfOrderByExpression(partitionByExpression);
if (0 <= sidx) {
pdir = orderByDirections.get(sidx);
dontsort[sidx] = true;
}
onode.addSortExpression(partitionByExpression, pdir);
}
for (int idx = 0; idx < winExpr.getOrderbySize(); ++idx) {
if (!dontsort[idx]) {
AbstractExpression orderByExpr = orderByExpressions.get(idx);
SortDirectionType orderByDir = orderByDirections.get(idx);
onode.addSortExpression(orderByExpr, orderByDir);
}
}
onode.addAndLinkChild(root);
cnode = onode;
} else {
assert(scanNode != null);
// This means the index is good for this window function.
// If this is an MP statement we still need to generate the
// order by node, because we may need to turn it into an
// inline order by node of a MergeReceive node.
assert( 0 == scanNode.getWindowFunctionUsesIndex() );
if (m_partitioning.requiresTwoFragments()) {
OrderByPlanNode onode = new OrderByPlanNode();
SortDirectionType dir = scanNode.getSortOrderFromIndexScan();
assert(dir != SortDirectionType.INVALID);
// This was created when the index was determined.
// We cached it in the scan node.
List<AbstractExpression> orderExprs = scanNode.getFinalExpressionOrderFromIndexScan();
assert(orderExprs != null);
for (AbstractExpression ae : orderExprs) {
onode.addSortExpression(ae, dir);
}
// Link in the OrderByNode.
onode.addAndLinkChild(root);
cnode = onode;
} else {
// Don't create and link in the order by node.
cnode = root;
}
}
pnode.addAndLinkChild(cnode);
return pnode;
} | [
"private",
"AbstractPlanNode",
"handleWindowedOperators",
"(",
"AbstractPlanNode",
"root",
")",
"{",
"// Get the windowed expression. We need to set its output",
"// schema from the display list.",
"WindowFunctionExpression",
"winExpr",
"=",
"m_parsedSelect",
".",
"getWindowFunctionEx... | Create nodes for windowed operations.
@param root
@return | [
"Create",
"nodes",
"for",
"windowed",
"operations",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L2476-L2568 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.updatePartialIndex | private static void updatePartialIndex(IndexScanPlanNode scan) {
if (scan.getPredicate() == null && scan.getPartialIndexPredicate() != null) {
if (scan.isForSortOrderOnly()) {
scan.setPredicate(Collections.singletonList(scan.getPartialIndexPredicate()));
}
scan.setForPartialIndexOnly();
}
} | java | private static void updatePartialIndex(IndexScanPlanNode scan) {
if (scan.getPredicate() == null && scan.getPartialIndexPredicate() != null) {
if (scan.isForSortOrderOnly()) {
scan.setPredicate(Collections.singletonList(scan.getPartialIndexPredicate()));
}
scan.setForPartialIndexOnly();
}
} | [
"private",
"static",
"void",
"updatePartialIndex",
"(",
"IndexScanPlanNode",
"scan",
")",
"{",
"if",
"(",
"scan",
".",
"getPredicate",
"(",
")",
"==",
"null",
"&&",
"scan",
".",
"getPartialIndexPredicate",
"(",
")",
"!=",
"null",
")",
"{",
"if",
"(",
"scan... | Check if the index for the scan node is a partial index, and if so, make sure that the
scan contains index predicate, and update index reason as needed for @Explain.
@param scan index scan plan node | [
"Check",
"if",
"the",
"index",
"for",
"the",
"scan",
"node",
"is",
"a",
"partial",
"index",
"and",
"if",
"so",
"make",
"sure",
"that",
"the",
"scan",
"contains",
"index",
"predicate",
"and",
"update",
"index",
"reason",
"as",
"needed",
"for"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L2594-L2601 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.calculateIndexGroupByInfo | private void calculateIndexGroupByInfo(IndexScanPlanNode root,
IndexGroupByInfo gbInfo) {
String fromTableAlias = root.getTargetTableAlias();
assert(fromTableAlias != null);
Index index = root.getCatalogIndex();
if ( ! IndexType.isScannable(index.getType())) {
return;
}
ArrayList<AbstractExpression> bindings = new ArrayList<>();
gbInfo.m_coveredGroupByColumns = calculateGroupbyColumnsCovered(
index, fromTableAlias, bindings);
gbInfo.m_canBeFullySerialized =
(gbInfo.m_coveredGroupByColumns.size() ==
m_parsedSelect.groupByColumns().size());
} | java | private void calculateIndexGroupByInfo(IndexScanPlanNode root,
IndexGroupByInfo gbInfo) {
String fromTableAlias = root.getTargetTableAlias();
assert(fromTableAlias != null);
Index index = root.getCatalogIndex();
if ( ! IndexType.isScannable(index.getType())) {
return;
}
ArrayList<AbstractExpression> bindings = new ArrayList<>();
gbInfo.m_coveredGroupByColumns = calculateGroupbyColumnsCovered(
index, fromTableAlias, bindings);
gbInfo.m_canBeFullySerialized =
(gbInfo.m_coveredGroupByColumns.size() ==
m_parsedSelect.groupByColumns().size());
} | [
"private",
"void",
"calculateIndexGroupByInfo",
"(",
"IndexScanPlanNode",
"root",
",",
"IndexGroupByInfo",
"gbInfo",
")",
"{",
"String",
"fromTableAlias",
"=",
"root",
".",
"getTargetTableAlias",
"(",
")",
";",
"assert",
"(",
"fromTableAlias",
"!=",
"null",
")",
"... | Sets IndexGroupByInfo for an IndexScan | [
"Sets",
"IndexGroupByInfo",
"for",
"an",
"IndexScan"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L2836-L2852 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/PlanAssembler.java | PlanAssembler.indexAccessForGroupByExprs | private AbstractPlanNode indexAccessForGroupByExprs(SeqScanPlanNode root,
IndexGroupByInfo gbInfo) {
if (! root.isPersistentTableScan()) {
// subquery and common tables are not handled
return root;
}
String fromTableAlias = root.getTargetTableAlias();
assert(fromTableAlias != null);
List<ParsedColInfo> groupBys = m_parsedSelect.groupByColumns();
Table targetTable = m_catalogDb.getTables().get(root.getTargetTableName());
assert(targetTable != null);
CatalogMap<Index> allIndexes = targetTable.getIndexes();
List<Integer> maxCoveredGroupByColumns = new ArrayList<>();
ArrayList<AbstractExpression> maxCoveredBindings = null;
Index pickedUpIndex = null;
boolean foundAllGroupByCoveredIndex = false;
for (Index index : allIndexes) {
if ( ! IndexType.isScannable(index.getType())) {
continue;
}
if ( ! index.getPredicatejson().isEmpty()) {
// do not try to look at Partial/Sparse index
continue;
}
ArrayList<AbstractExpression> bindings = new ArrayList<>();
List<Integer> coveredGroupByColumns = calculateGroupbyColumnsCovered(
index, fromTableAlias, bindings);
if (coveredGroupByColumns.size() > maxCoveredGroupByColumns.size()) {
maxCoveredGroupByColumns = coveredGroupByColumns;
pickedUpIndex = index;
maxCoveredBindings = bindings;
if (maxCoveredGroupByColumns.size() == groupBys.size()) {
foundAllGroupByCoveredIndex = true;
break;
}
}
}
if (pickedUpIndex == null) {
return root;
}
IndexScanPlanNode indexScanNode = new IndexScanPlanNode(
root, null, pickedUpIndex, SortDirectionType.INVALID);
indexScanNode.setForGroupingOnly();
indexScanNode.setBindings(maxCoveredBindings);
gbInfo.m_coveredGroupByColumns = maxCoveredGroupByColumns;
gbInfo.m_canBeFullySerialized = foundAllGroupByCoveredIndex;
return indexScanNode;
} | java | private AbstractPlanNode indexAccessForGroupByExprs(SeqScanPlanNode root,
IndexGroupByInfo gbInfo) {
if (! root.isPersistentTableScan()) {
// subquery and common tables are not handled
return root;
}
String fromTableAlias = root.getTargetTableAlias();
assert(fromTableAlias != null);
List<ParsedColInfo> groupBys = m_parsedSelect.groupByColumns();
Table targetTable = m_catalogDb.getTables().get(root.getTargetTableName());
assert(targetTable != null);
CatalogMap<Index> allIndexes = targetTable.getIndexes();
List<Integer> maxCoveredGroupByColumns = new ArrayList<>();
ArrayList<AbstractExpression> maxCoveredBindings = null;
Index pickedUpIndex = null;
boolean foundAllGroupByCoveredIndex = false;
for (Index index : allIndexes) {
if ( ! IndexType.isScannable(index.getType())) {
continue;
}
if ( ! index.getPredicatejson().isEmpty()) {
// do not try to look at Partial/Sparse index
continue;
}
ArrayList<AbstractExpression> bindings = new ArrayList<>();
List<Integer> coveredGroupByColumns = calculateGroupbyColumnsCovered(
index, fromTableAlias, bindings);
if (coveredGroupByColumns.size() > maxCoveredGroupByColumns.size()) {
maxCoveredGroupByColumns = coveredGroupByColumns;
pickedUpIndex = index;
maxCoveredBindings = bindings;
if (maxCoveredGroupByColumns.size() == groupBys.size()) {
foundAllGroupByCoveredIndex = true;
break;
}
}
}
if (pickedUpIndex == null) {
return root;
}
IndexScanPlanNode indexScanNode = new IndexScanPlanNode(
root, null, pickedUpIndex, SortDirectionType.INVALID);
indexScanNode.setForGroupingOnly();
indexScanNode.setBindings(maxCoveredBindings);
gbInfo.m_coveredGroupByColumns = maxCoveredGroupByColumns;
gbInfo.m_canBeFullySerialized = foundAllGroupByCoveredIndex;
return indexScanNode;
} | [
"private",
"AbstractPlanNode",
"indexAccessForGroupByExprs",
"(",
"SeqScanPlanNode",
"root",
",",
"IndexGroupByInfo",
"gbInfo",
")",
"{",
"if",
"(",
"!",
"root",
".",
"isPersistentTableScan",
"(",
")",
")",
"{",
"// subquery and common tables are not handled",
"return",
... | Turn sequential scan to index scan for group by if possible | [
"Turn",
"sequential",
"scan",
"to",
"index",
"scan",
"for",
"group",
"by",
"if",
"possible"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/PlanAssembler.java#L2855-L2912 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.