repo stringlengths 7 58 | path stringlengths 12 218 | func_name stringlengths 3 140 | original_string stringlengths 73 34.1k | language stringclasses 1 value | code stringlengths 73 34.1k | code_tokens list | docstring stringlengths 3 16k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 105 339 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/persist/Log.java | Log.processScript | private void processScript() {
ScriptReaderBase scr = null;
try {
if (database.isFilesInJar()
|| fa.isStreamElement(scriptFileName)) {
scr = ScriptReaderBase.newScriptReader(database,
scriptFileName,
scriptFormat);
Session session =
database.sessionManager.getSysSessionForScript(database);
scr.readAll(session);
scr.close();
}
} catch (Throwable e) {
if (scr != null) {
scr.close();
if (cache != null) {
cache.close(false);
}
closeAllTextCaches(false);
}
database.logger.appLog.logContext(e, null);
if (e instanceof HsqlException) {
throw (HsqlException) e;
} else if (e instanceof IOException) {
throw Error.error(ErrorCode.FILE_IO_ERROR, e.toString());
} else if (e instanceof OutOfMemoryError) {
throw Error.error(ErrorCode.OUT_OF_MEMORY);
} else {
throw Error.error(ErrorCode.GENERAL_ERROR, e.toString());
}
}
} | java | private void processScript() {
ScriptReaderBase scr = null;
try {
if (database.isFilesInJar()
|| fa.isStreamElement(scriptFileName)) {
scr = ScriptReaderBase.newScriptReader(database,
scriptFileName,
scriptFormat);
Session session =
database.sessionManager.getSysSessionForScript(database);
scr.readAll(session);
scr.close();
}
} catch (Throwable e) {
if (scr != null) {
scr.close();
if (cache != null) {
cache.close(false);
}
closeAllTextCaches(false);
}
database.logger.appLog.logContext(e, null);
if (e instanceof HsqlException) {
throw (HsqlException) e;
} else if (e instanceof IOException) {
throw Error.error(ErrorCode.FILE_IO_ERROR, e.toString());
} else if (e instanceof OutOfMemoryError) {
throw Error.error(ErrorCode.OUT_OF_MEMORY);
} else {
throw Error.error(ErrorCode.GENERAL_ERROR, e.toString());
}
}
} | [
"private",
"void",
"processScript",
"(",
")",
"{",
"ScriptReaderBase",
"scr",
"=",
"null",
";",
"try",
"{",
"if",
"(",
"database",
".",
"isFilesInJar",
"(",
")",
"||",
"fa",
".",
"isStreamElement",
"(",
"scriptFileName",
")",
")",
"{",
"scr",
"=",
"Scrip... | Performs all the commands in the .script file. | [
"Performs",
"all",
"the",
"commands",
"in",
"the",
".",
"script",
"file",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/persist/Log.java#L768-L808 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/persist/Log.java | Log.processDataFile | private void processDataFile() {
// OOo related code
if (database.isStoredFileAccess()) {
return;
}
// OOo end
if (cache == null || filesReadOnly
|| !fa.isStreamElement(logFileName)) {
return;
}
File file = new File(logFileName);
long logLength = file.length();
long dataLength = cache.getFileFreePos();
if (logLength + dataLength > cache.maxDataFileSize) {
database.logger.needsCheckpoint = true;
}
} | java | private void processDataFile() {
// OOo related code
if (database.isStoredFileAccess()) {
return;
}
// OOo end
if (cache == null || filesReadOnly
|| !fa.isStreamElement(logFileName)) {
return;
}
File file = new File(logFileName);
long logLength = file.length();
long dataLength = cache.getFileFreePos();
if (logLength + dataLength > cache.maxDataFileSize) {
database.logger.needsCheckpoint = true;
}
} | [
"private",
"void",
"processDataFile",
"(",
")",
"{",
"// OOo related code",
"if",
"(",
"database",
".",
"isStoredFileAccess",
"(",
")",
")",
"{",
"return",
";",
"}",
"// OOo end",
"if",
"(",
"cache",
"==",
"null",
"||",
"filesReadOnly",
"||",
"!",
"fa",
".... | Defrag large data files when the sum of .log and .data files is large. | [
"Defrag",
"large",
"data",
"files",
"when",
"the",
"sum",
"of",
".",
"log",
"and",
".",
"data",
"files",
"is",
"large",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/persist/Log.java#L813-L833 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/persist/Log.java | Log.processLog | private void processLog() {
if (!database.isFilesInJar() && fa.isStreamElement(logFileName)) {
ScriptRunner.runScript(database, logFileName,
ScriptWriterBase.SCRIPT_TEXT_170);
}
} | java | private void processLog() {
if (!database.isFilesInJar() && fa.isStreamElement(logFileName)) {
ScriptRunner.runScript(database, logFileName,
ScriptWriterBase.SCRIPT_TEXT_170);
}
} | [
"private",
"void",
"processLog",
"(",
")",
"{",
"if",
"(",
"!",
"database",
".",
"isFilesInJar",
"(",
")",
"&&",
"fa",
".",
"isStreamElement",
"(",
"logFileName",
")",
")",
"{",
"ScriptRunner",
".",
"runScript",
"(",
"database",
",",
"logFileName",
",",
... | Performs all the commands in the .log file. | [
"Performs",
"all",
"the",
"commands",
"in",
"the",
".",
"log",
"file",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/persist/Log.java#L838-L844 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/persist/Log.java | Log.restoreBackup | private void restoreBackup() {
if (incBackup) {
restoreBackupIncremental();
return;
}
// in case data file cannot be deleted, reset it
DataFileCache.deleteOrResetFreePos(database, fileName + ".data");
try {
FileArchiver.unarchive(fileName + ".backup", fileName + ".data",
database.getFileAccess(),
FileArchiver.COMPRESSION_ZIP);
} catch (Exception e) {
throw Error.error(ErrorCode.FILE_IO_ERROR,
ErrorCode.M_Message_Pair, new Object[] {
fileName + ".backup", e.toString()
});
}
} | java | private void restoreBackup() {
if (incBackup) {
restoreBackupIncremental();
return;
}
// in case data file cannot be deleted, reset it
DataFileCache.deleteOrResetFreePos(database, fileName + ".data");
try {
FileArchiver.unarchive(fileName + ".backup", fileName + ".data",
database.getFileAccess(),
FileArchiver.COMPRESSION_ZIP);
} catch (Exception e) {
throw Error.error(ErrorCode.FILE_IO_ERROR,
ErrorCode.M_Message_Pair, new Object[] {
fileName + ".backup", e.toString()
});
}
} | [
"private",
"void",
"restoreBackup",
"(",
")",
"{",
"if",
"(",
"incBackup",
")",
"{",
"restoreBackupIncremental",
"(",
")",
";",
"return",
";",
"}",
"// in case data file cannot be deleted, reset it",
"DataFileCache",
".",
"deleteOrResetFreePos",
"(",
"database",
",",
... | Restores a compressed backup or the .data file. | [
"Restores",
"a",
"compressed",
"backup",
"or",
"the",
".",
"data",
"file",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/persist/Log.java#L849-L870 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/persist/Log.java | Log.restoreBackupIncremental | private void restoreBackupIncremental() {
try {
if (fa.isStreamElement(fileName + ".backup")) {
RAShadowFile.restoreFile(fileName + ".backup",
fileName + ".data");
} else {
/*
// this is to ensure file has been written fully but it is not necessary
// as semantics dictate that if a backup does not exist, the file
// was never changed or was fully written to
if (FileUtil.exists(cacheFileName)) {
int flags = DataFileCache.getFlags(cacheFileName);
if (!BitMap.isSet(flags, DataFileCache.FLAG_ISSAVED)) {
FileUtil.delete(cacheFileName);
}
}
*/
}
deleteBackup();
} catch (IOException e) {
throw Error.error(ErrorCode.FILE_IO_ERROR, fileName + ".backup");
}
} | java | private void restoreBackupIncremental() {
try {
if (fa.isStreamElement(fileName + ".backup")) {
RAShadowFile.restoreFile(fileName + ".backup",
fileName + ".data");
} else {
/*
// this is to ensure file has been written fully but it is not necessary
// as semantics dictate that if a backup does not exist, the file
// was never changed or was fully written to
if (FileUtil.exists(cacheFileName)) {
int flags = DataFileCache.getFlags(cacheFileName);
if (!BitMap.isSet(flags, DataFileCache.FLAG_ISSAVED)) {
FileUtil.delete(cacheFileName);
}
}
*/
}
deleteBackup();
} catch (IOException e) {
throw Error.error(ErrorCode.FILE_IO_ERROR, fileName + ".backup");
}
} | [
"private",
"void",
"restoreBackupIncremental",
"(",
")",
"{",
"try",
"{",
"if",
"(",
"fa",
".",
"isStreamElement",
"(",
"fileName",
"+",
"\".backup\"",
")",
")",
"{",
"RAShadowFile",
".",
"restoreFile",
"(",
"fileName",
"+",
"\".backup\"",
",",
"fileName",
"... | Restores in from an incremental backup | [
"Restores",
"in",
"from",
"an",
"incremental",
"backup"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/persist/Log.java#L875-L900 | train |
VoltDB/voltdb | src/frontend/org/voltdb/iv2/MpInitiator.java | MpInitiator.updateCatalog | public void updateCatalog(String diffCmds, CatalogContext context, boolean isReplay,
boolean requireCatalogDiffCmdsApplyToEE, boolean requiresNewExportGeneration)
{
// note this will never require snapshot isolation because the MPI has no snapshot funtionality
m_executionSite.updateCatalog(diffCmds, context, false, true, Long.MIN_VALUE, Long.MIN_VALUE, Long.MIN_VALUE,
isReplay, requireCatalogDiffCmdsApplyToEE, requiresNewExportGeneration);
m_scheduler.updateCatalog(diffCmds, context);
} | java | public void updateCatalog(String diffCmds, CatalogContext context, boolean isReplay,
boolean requireCatalogDiffCmdsApplyToEE, boolean requiresNewExportGeneration)
{
// note this will never require snapshot isolation because the MPI has no snapshot funtionality
m_executionSite.updateCatalog(diffCmds, context, false, true, Long.MIN_VALUE, Long.MIN_VALUE, Long.MIN_VALUE,
isReplay, requireCatalogDiffCmdsApplyToEE, requiresNewExportGeneration);
m_scheduler.updateCatalog(diffCmds, context);
} | [
"public",
"void",
"updateCatalog",
"(",
"String",
"diffCmds",
",",
"CatalogContext",
"context",
",",
"boolean",
"isReplay",
",",
"boolean",
"requireCatalogDiffCmdsApplyToEE",
",",
"boolean",
"requiresNewExportGeneration",
")",
"{",
"// note this will never require snapshot is... | Update the MPI's Site's catalog. Unlike the SPI, this is not going to
run from the same Site's thread; this is actually going to run from some
other local SPI's Site thread. Since the MPI's site thread is going to
be blocked running the EveryPartitionTask for the catalog update, this
is currently safe with no locking. And yes, I'm a horrible person. | [
"Update",
"the",
"MPI",
"s",
"Site",
"s",
"catalog",
".",
"Unlike",
"the",
"SPI",
"this",
"is",
"not",
"going",
"to",
"run",
"from",
"the",
"same",
"Site",
"s",
"thread",
";",
"this",
"is",
"actually",
"going",
"to",
"run",
"from",
"some",
"other",
"... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/iv2/MpInitiator.java#L216-L223 | train |
VoltDB/voltdb | src/frontend/org/voltdb/AbstractTopology.java | AbstractTopology.mutateAddNewHosts | public static Pair<AbstractTopology, ImmutableList<Integer>> mutateAddNewHosts(AbstractTopology currentTopology,
Map<Integer, HostInfo> newHostInfos) {
int startingPartitionId = getNextFreePartitionId(currentTopology);
TopologyBuilder topologyBuilder = addPartitionsToHosts(newHostInfos, Collections.emptySet(),
currentTopology.getReplicationFactor(), startingPartitionId);
ImmutableList.Builder<Integer> newPartitions = ImmutableList.builder();
for (PartitionBuilder pb : topologyBuilder.m_partitions) {
newPartitions.add(pb.m_id);
}
return Pair.of(new AbstractTopology(currentTopology, topologyBuilder), newPartitions.build());
} | java | public static Pair<AbstractTopology, ImmutableList<Integer>> mutateAddNewHosts(AbstractTopology currentTopology,
Map<Integer, HostInfo> newHostInfos) {
int startingPartitionId = getNextFreePartitionId(currentTopology);
TopologyBuilder topologyBuilder = addPartitionsToHosts(newHostInfos, Collections.emptySet(),
currentTopology.getReplicationFactor(), startingPartitionId);
ImmutableList.Builder<Integer> newPartitions = ImmutableList.builder();
for (PartitionBuilder pb : topologyBuilder.m_partitions) {
newPartitions.add(pb.m_id);
}
return Pair.of(new AbstractTopology(currentTopology, topologyBuilder), newPartitions.build());
} | [
"public",
"static",
"Pair",
"<",
"AbstractTopology",
",",
"ImmutableList",
"<",
"Integer",
">",
">",
"mutateAddNewHosts",
"(",
"AbstractTopology",
"currentTopology",
",",
"Map",
"<",
"Integer",
",",
"HostInfo",
">",
"newHostInfos",
")",
"{",
"int",
"startingPartit... | Add new hosts to an existing topology and layout partitions on those hosts
@param currentTopology to extend
@param newHostInfos new hosts to add to topology
@return update {@link AbstractTopology} with new hosts and {@link ImmutableList} of new partition IDs
@throws RuntimeException if hosts are not valid for topology | [
"Add",
"new",
"hosts",
"to",
"an",
"existing",
"topology",
"and",
"layout",
"partitions",
"on",
"those",
"hosts"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/AbstractTopology.java#L923-L936 | train |
VoltDB/voltdb | src/frontend/org/voltdb/AbstractTopology.java | AbstractTopology.mutateRemoveHosts | public static Pair<AbstractTopology, Set<Integer>> mutateRemoveHosts(AbstractTopology currentTopology,
Set<Integer> removalHosts) {
Set<Integer> removalPartitionIds = getPartitionIdsForHosts(currentTopology, removalHosts);
return Pair.of(new AbstractTopology(currentTopology, removalHosts, removalPartitionIds), removalPartitionIds);
} | java | public static Pair<AbstractTopology, Set<Integer>> mutateRemoveHosts(AbstractTopology currentTopology,
Set<Integer> removalHosts) {
Set<Integer> removalPartitionIds = getPartitionIdsForHosts(currentTopology, removalHosts);
return Pair.of(new AbstractTopology(currentTopology, removalHosts, removalPartitionIds), removalPartitionIds);
} | [
"public",
"static",
"Pair",
"<",
"AbstractTopology",
",",
"Set",
"<",
"Integer",
">",
">",
"mutateRemoveHosts",
"(",
"AbstractTopology",
"currentTopology",
",",
"Set",
"<",
"Integer",
">",
"removalHosts",
")",
"{",
"Set",
"<",
"Integer",
">",
"removalPartitionId... | Remove hosts from an existing topology
@param currentTopology to extend
@param removalHostInfos hosts to be removed from topology
@return update {@link AbstractTopology} with remaining hosts and removed partition IDs
@throws RuntimeException if hosts are not valid for topology | [
"Remove",
"hosts",
"from",
"an",
"existing",
"topology"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/AbstractTopology.java#L946-L950 | train |
VoltDB/voltdb | src/frontend/org/voltdb/AbstractTopology.java | AbstractTopology.getPartitionGroupPeers | public Set<Integer> getPartitionGroupPeers(int hostId) {
Set<Integer> peers = Sets.newHashSet();
for (Partition p : hostsById.get(hostId).partitions) {
peers.addAll(p.hostIds);
}
return peers;
} | java | public Set<Integer> getPartitionGroupPeers(int hostId) {
Set<Integer> peers = Sets.newHashSet();
for (Partition p : hostsById.get(hostId).partitions) {
peers.addAll(p.hostIds);
}
return peers;
} | [
"public",
"Set",
"<",
"Integer",
">",
"getPartitionGroupPeers",
"(",
"int",
"hostId",
")",
"{",
"Set",
"<",
"Integer",
">",
"peers",
"=",
"Sets",
".",
"newHashSet",
"(",
")",
";",
"for",
"(",
"Partition",
"p",
":",
"hostsById",
".",
"get",
"(",
"hostId... | get all the hostIds in the partition group where the host with the given host id belongs
@param hostId the given hostId
@return all the hostIds in the partition group | [
"get",
"all",
"the",
"hostIds",
"in",
"the",
"partition",
"group",
"where",
"the",
"host",
"with",
"the",
"given",
"host",
"id",
"belongs"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/AbstractTopology.java#L1393-L1399 | train |
VoltDB/voltdb | src/frontend/org/voltdb/AbstractTopology.java | AbstractTopology.sortHostIdByHGDistance | public static List<Collection<Integer>> sortHostIdByHGDistance(int hostId, Map<Integer, String> hostGroups) {
String localHostGroup = hostGroups.get(hostId);
Preconditions.checkArgument(localHostGroup != null);
HAGroup localHaGroup = new HAGroup(localHostGroup);
// Memorize the distance, map the distance to host ids.
Multimap<Integer, Integer> distanceMap = MultimapBuilder.treeKeys(Comparator.<Integer>naturalOrder().reversed())
.arrayListValues().build();
for (Map.Entry<Integer, String> entry : hostGroups.entrySet()) {
if (hostId == entry.getKey()) {
continue;
}
distanceMap.put(localHaGroup.getRelationshipTo(entry.getValue()).m_distance, entry.getKey());
}
return new ArrayList<>(distanceMap.asMap().values());
} | java | public static List<Collection<Integer>> sortHostIdByHGDistance(int hostId, Map<Integer, String> hostGroups) {
String localHostGroup = hostGroups.get(hostId);
Preconditions.checkArgument(localHostGroup != null);
HAGroup localHaGroup = new HAGroup(localHostGroup);
// Memorize the distance, map the distance to host ids.
Multimap<Integer, Integer> distanceMap = MultimapBuilder.treeKeys(Comparator.<Integer>naturalOrder().reversed())
.arrayListValues().build();
for (Map.Entry<Integer, String> entry : hostGroups.entrySet()) {
if (hostId == entry.getKey()) {
continue;
}
distanceMap.put(localHaGroup.getRelationshipTo(entry.getValue()).m_distance, entry.getKey());
}
return new ArrayList<>(distanceMap.asMap().values());
} | [
"public",
"static",
"List",
"<",
"Collection",
"<",
"Integer",
">",
">",
"sortHostIdByHGDistance",
"(",
"int",
"hostId",
",",
"Map",
"<",
"Integer",
",",
"String",
">",
"hostGroups",
")",
"{",
"String",
"localHostGroup",
"=",
"hostGroups",
".",
"get",
"(",
... | Sort all nodes in reverse hostGroup distance order, then group by rack-aware group, local host id is excluded.
@param hostId the local host id
@param hostGroups a host id to group map
@return sorted grouped host ids from farthest to nearest | [
"Sort",
"all",
"nodes",
"in",
"reverse",
"hostGroup",
"distance",
"order",
"then",
"group",
"by",
"rack",
"-",
"aware",
"group",
"local",
"host",
"id",
"is",
"excluded",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/AbstractTopology.java#L1435-L1452 | train |
VoltDB/voltdb | src/frontend/org/voltdb/importer/ImporterStatsCollector.java | ImporterStatsCollector.reportQueued | public void reportQueued(String importerName, String procName) {
StatsInfo statsInfo = getStatsInfo(importerName, procName);
statsInfo.m_pendingCount.incrementAndGet();
} | java | public void reportQueued(String importerName, String procName) {
StatsInfo statsInfo = getStatsInfo(importerName, procName);
statsInfo.m_pendingCount.incrementAndGet();
} | [
"public",
"void",
"reportQueued",
"(",
"String",
"importerName",
",",
"String",
"procName",
")",
"{",
"StatsInfo",
"statsInfo",
"=",
"getStatsInfo",
"(",
"importerName",
",",
"procName",
")",
";",
"statsInfo",
".",
"m_pendingCount",
".",
"incrementAndGet",
"(",
... | An insert request was queued | [
"An",
"insert",
"request",
"was",
"queued"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/importer/ImporterStatsCollector.java#L74-L77 | train |
VoltDB/voltdb | src/frontend/org/voltdb/importer/ImporterStatsCollector.java | ImporterStatsCollector.reportFailure | public void reportFailure(String importerName, String procName, boolean decrementPending) {
StatsInfo statsInfo = getStatsInfo(importerName, procName);
if (decrementPending) {
statsInfo.m_pendingCount.decrementAndGet();
}
statsInfo.m_failureCount.incrementAndGet();
} | java | public void reportFailure(String importerName, String procName, boolean decrementPending) {
StatsInfo statsInfo = getStatsInfo(importerName, procName);
if (decrementPending) {
statsInfo.m_pendingCount.decrementAndGet();
}
statsInfo.m_failureCount.incrementAndGet();
} | [
"public",
"void",
"reportFailure",
"(",
"String",
"importerName",
",",
"String",
"procName",
",",
"boolean",
"decrementPending",
")",
"{",
"StatsInfo",
"statsInfo",
"=",
"getStatsInfo",
"(",
"importerName",
",",
"procName",
")",
";",
"if",
"(",
"decrementPending",... | Use this when the insert fails even before the request is queued by the InternalConnectionHandler | [
"Use",
"this",
"when",
"the",
"insert",
"fails",
"even",
"before",
"the",
"request",
"is",
"queued",
"by",
"the",
"InternalConnectionHandler"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/importer/ImporterStatsCollector.java#L85-L91 | train |
VoltDB/voltdb | src/frontend/org/voltdb/importer/ImporterStatsCollector.java | ImporterStatsCollector.reportSuccess | private void reportSuccess(String importerName, String procName) {
StatsInfo statsInfo = getStatsInfo(importerName, procName);
statsInfo.m_pendingCount.decrementAndGet();
statsInfo.m_successCount.incrementAndGet();
} | java | private void reportSuccess(String importerName, String procName) {
StatsInfo statsInfo = getStatsInfo(importerName, procName);
statsInfo.m_pendingCount.decrementAndGet();
statsInfo.m_successCount.incrementAndGet();
} | [
"private",
"void",
"reportSuccess",
"(",
"String",
"importerName",
",",
"String",
"procName",
")",
"{",
"StatsInfo",
"statsInfo",
"=",
"getStatsInfo",
"(",
"importerName",
",",
"procName",
")",
";",
"statsInfo",
".",
"m_pendingCount",
".",
"decrementAndGet",
"(",
... | One insert succeeded | [
"One",
"insert",
"succeeded"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/importer/ImporterStatsCollector.java#L99-L103 | train |
VoltDB/voltdb | src/frontend/org/voltdb/importer/ImporterStatsCollector.java | ImporterStatsCollector.reportRetry | private void reportRetry(String importerName, String procName) {
StatsInfo statsInfo = getStatsInfo(importerName, procName);
statsInfo.m_retryCount.incrementAndGet();
} | java | private void reportRetry(String importerName, String procName) {
StatsInfo statsInfo = getStatsInfo(importerName, procName);
statsInfo.m_retryCount.incrementAndGet();
} | [
"private",
"void",
"reportRetry",
"(",
"String",
"importerName",
",",
"String",
"procName",
")",
"{",
"StatsInfo",
"statsInfo",
"=",
"getStatsInfo",
"(",
"importerName",
",",
"procName",
")",
";",
"statsInfo",
".",
"m_retryCount",
".",
"incrementAndGet",
"(",
")... | One insert was retried | [
"One",
"insert",
"was",
"retried"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/importer/ImporterStatsCollector.java#L106-L109 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/lib/tar/TarFileOutputStream.java | TarFileOutputStream.writeBlock | public void writeBlock(byte[] block) throws IOException {
if (block.length != 512) {
throw new IllegalArgumentException(
RB.singleton.getString(RB.BAD_BLOCK_WRITE_LEN, block.length));
}
write(block, block.length);
} | java | public void writeBlock(byte[] block) throws IOException {
if (block.length != 512) {
throw new IllegalArgumentException(
RB.singleton.getString(RB.BAD_BLOCK_WRITE_LEN, block.length));
}
write(block, block.length);
} | [
"public",
"void",
"writeBlock",
"(",
"byte",
"[",
"]",
"block",
")",
"throws",
"IOException",
"{",
"if",
"(",
"block",
".",
"length",
"!=",
"512",
")",
"{",
"throw",
"new",
"IllegalArgumentException",
"(",
"RB",
".",
"singleton",
".",
"getString",
"(",
"... | Write a user-specified 512-byte block.
For efficiency, write(int) should be used when writing file body content.
@see #write(int) | [
"Write",
"a",
"user",
"-",
"specified",
"512",
"-",
"byte",
"block",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/lib/tar/TarFileOutputStream.java#L219-L227 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/lib/tar/TarFileOutputStream.java | TarFileOutputStream.writePadBlocks | public void writePadBlocks(int blockCount) throws IOException {
for (int i = 0; i < blockCount; i++) {
write(ZERO_BLOCK, ZERO_BLOCK.length);
}
} | java | public void writePadBlocks(int blockCount) throws IOException {
for (int i = 0; i < blockCount; i++) {
write(ZERO_BLOCK, ZERO_BLOCK.length);
}
} | [
"public",
"void",
"writePadBlocks",
"(",
"int",
"blockCount",
")",
"throws",
"IOException",
"{",
"for",
"(",
"int",
"i",
"=",
"0",
";",
"i",
"<",
"blockCount",
";",
"i",
"++",
")",
"{",
"write",
"(",
"ZERO_BLOCK",
",",
"ZERO_BLOCK",
".",
"length",
")",... | Writes the specified quantity of zero'd blocks. | [
"Writes",
"the",
"specified",
"quantity",
"of",
"zero",
"d",
"blocks",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/lib/tar/TarFileOutputStream.java#L232-L237 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/util/ZaurusEditor.java | ZaurusEditor.getAllTables | private Vector getAllTables() {
Vector result = new Vector(20);
try {
if (cConn == null) {
return null;
}
dbmeta = cConn.getMetaData();
String[] tableTypes = { "TABLE" };
ResultSet allTables = dbmeta.getTables(null, null, null,
tableTypes);
while (allTables.next()) {
String aktTable = allTables.getString("TABLE_NAME");
ResultSet primKeys = dbmeta.getPrimaryKeys(null, null,
aktTable);
// take only table with a primary key
if (primKeys.next()) {
result.addElement(aktTable);
}
primKeys.close();
}
allTables.close();
} catch (SQLException e) {
// System.out.println("SQL Exception: " + e.getMessage());
}
return result;
} | java | private Vector getAllTables() {
Vector result = new Vector(20);
try {
if (cConn == null) {
return null;
}
dbmeta = cConn.getMetaData();
String[] tableTypes = { "TABLE" };
ResultSet allTables = dbmeta.getTables(null, null, null,
tableTypes);
while (allTables.next()) {
String aktTable = allTables.getString("TABLE_NAME");
ResultSet primKeys = dbmeta.getPrimaryKeys(null, null,
aktTable);
// take only table with a primary key
if (primKeys.next()) {
result.addElement(aktTable);
}
primKeys.close();
}
allTables.close();
} catch (SQLException e) {
// System.out.println("SQL Exception: " + e.getMessage());
}
return result;
} | [
"private",
"Vector",
"getAllTables",
"(",
")",
"{",
"Vector",
"result",
"=",
"new",
"Vector",
"(",
"20",
")",
";",
"try",
"{",
"if",
"(",
"cConn",
"==",
"null",
")",
"{",
"return",
"null",
";",
"}",
"dbmeta",
"=",
"cConn",
".",
"getMetaData",
"(",
... | exclude tables without primary key | [
"exclude",
"tables",
"without",
"primary",
"key"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/util/ZaurusEditor.java#L572-L607 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/util/ZaurusEditor.java | ZaurusEditor.getChoosenTableIndex | private int getChoosenTableIndex() {
String tableName = cTables.getSelectedItem();
// System.out.println("in getChoosenTableIndex, selected Item is "+tableName);
int index = getTableIndex(tableName);
if (index >= 0) {
// System.out.println("table found, index: " + index);
return index;
} // end of if (index >= 0)
ZaurusTableForm tableForm = new ZaurusTableForm(tableName, cConn);
pForm.add(tableName, tableForm);
vHoldTableNames.addElement(tableName);
vHoldForms.addElement(tableForm);
// System.out.println("new tableform for table "+tableName+", index: " + index);
return vHoldTableNames.size() - 1;
} | java | private int getChoosenTableIndex() {
String tableName = cTables.getSelectedItem();
// System.out.println("in getChoosenTableIndex, selected Item is "+tableName);
int index = getTableIndex(tableName);
if (index >= 0) {
// System.out.println("table found, index: " + index);
return index;
} // end of if (index >= 0)
ZaurusTableForm tableForm = new ZaurusTableForm(tableName, cConn);
pForm.add(tableName, tableForm);
vHoldTableNames.addElement(tableName);
vHoldForms.addElement(tableForm);
// System.out.println("new tableform for table "+tableName+", index: " + index);
return vHoldTableNames.size() - 1;
} | [
"private",
"int",
"getChoosenTableIndex",
"(",
")",
"{",
"String",
"tableName",
"=",
"cTables",
".",
"getSelectedItem",
"(",
")",
";",
"// System.out.println(\"in getChoosenTableIndex, selected Item is \"+tableName);",
"int",
"index",
"=",
"getTableIndex",
"(",
"tableName",... | if the table name is not in vHoldTableNames, create a ZaurusTableForm for it | [
"if",
"the",
"table",
"name",
"is",
"not",
"in",
"vHoldTableNames",
"create",
"a",
"ZaurusTableForm",
"for",
"it"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/util/ZaurusEditor.java#L611-L632 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/util/ZaurusEditor.java | ZaurusEditor.getTableIndex | private int getTableIndex(String tableName) {
int index;
// System.out.println("begin searching for "+tableName);
for (index = 0; index < vHoldTableNames.size(); index++) {
// System.out.println("in getTableIndex searching for "+tableName+", index: "+index);
if (tableName.equals((String) vHoldTableNames.elementAt(index))) {
return index;
} // end of if (tableName.equals(vHoldTableNames.elementAt(index)))
} // end of for (index = 0; index < vHoldTableNames.size(); index ++)
return -1;
} | java | private int getTableIndex(String tableName) {
int index;
// System.out.println("begin searching for "+tableName);
for (index = 0; index < vHoldTableNames.size(); index++) {
// System.out.println("in getTableIndex searching for "+tableName+", index: "+index);
if (tableName.equals((String) vHoldTableNames.elementAt(index))) {
return index;
} // end of if (tableName.equals(vHoldTableNames.elementAt(index)))
} // end of for (index = 0; index < vHoldTableNames.size(); index ++)
return -1;
} | [
"private",
"int",
"getTableIndex",
"(",
"String",
"tableName",
")",
"{",
"int",
"index",
";",
"// System.out.println(\"begin searching for \"+tableName);",
"for",
"(",
"index",
"=",
"0",
";",
"index",
"<",
"vHoldTableNames",
".",
"size",
"(",
")",
";",
"index",
... | if the name is not in vHoldTableNames, answer -1 | [
"if",
"the",
"name",
"is",
"not",
"in",
"vHoldTableNames",
"answer",
"-",
"1"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/util/ZaurusEditor.java#L636-L650 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/util/ZaurusEditor.java | ZaurusEditor.getWords | private String[] getWords() {
StringTokenizer tokenizer =
new StringTokenizer(fSearchWords.getText());
String[] result = new String[tokenizer.countTokens()];
int i = 0;
while (tokenizer.hasMoreTokens()) {
result[i++] = tokenizer.nextToken();
} // end of while ((tokenizer.hasMoreTokens()))
return result;
} | java | private String[] getWords() {
StringTokenizer tokenizer =
new StringTokenizer(fSearchWords.getText());
String[] result = new String[tokenizer.countTokens()];
int i = 0;
while (tokenizer.hasMoreTokens()) {
result[i++] = tokenizer.nextToken();
} // end of while ((tokenizer.hasMoreTokens()))
return result;
} | [
"private",
"String",
"[",
"]",
"getWords",
"(",
")",
"{",
"StringTokenizer",
"tokenizer",
"=",
"new",
"StringTokenizer",
"(",
"fSearchWords",
".",
"getText",
"(",
")",
")",
";",
"String",
"[",
"]",
"result",
"=",
"new",
"String",
"[",
"tokenizer",
".",
"... | convert the search words in the textfield to an array of words | [
"convert",
"the",
"search",
"words",
"in",
"the",
"textfield",
"to",
"an",
"array",
"of",
"words"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/util/ZaurusEditor.java#L653-L665 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/util/ZaurusEditor.java | ZaurusEditor.initButtons | private void initButtons() {
// the buttons for the search form
bSearchRow = new Button("Search Rows");
bNewRow = new Button("Insert New Row");
bSearchRow.addActionListener(this);
bNewRow.addActionListener(this);
pSearchButs = new Panel();
pSearchButs.setLayout(new GridLayout(1, 0, 4, 4));
pSearchButs.add(bSearchRow);
pSearchButs.add(bNewRow);
// the buttons for editing a row
bCancel1 = new Button("Cancel");
bPrev = new Button("Prev");
bNext = new Button("Next");
bDelete = new Button("Delete");
lastButtonDelete = false;
bNewSearch = new Button("Search");
bCancel1.addActionListener(this);
bPrev.addActionListener(this);
bNext.addActionListener(this);
bDelete.addActionListener(this);
bNewSearch.addActionListener(this);
pEditButs = new Panel();
pEditButs.setLayout(new GridLayout(1, 0, 4, 4));
pEditButs.add(bCancel1);
pEditButs.add(bPrev);
pEditButs.add(bNext);
pEditButs.add(bDelete);
pEditButs.add(bNewSearch);
// the buttons for inserting a new row
pInsertButs = new Panel();
pInsertButs.setLayout(new GridLayout(1, 0, 4, 4));
bCancel2 = new Button("Cancel Insert");
bNewInsert = new Button("New Insert");
bNewSearch1 = new Button("Search");
bCancel2.addActionListener(this);
bNewInsert.addActionListener(this);
bNewSearch1.addActionListener(this);
pInsertButs.add(bCancel2);
pInsertButs.add(bNewInsert);
pInsertButs.add(bNewSearch1);
} | java | private void initButtons() {
// the buttons for the search form
bSearchRow = new Button("Search Rows");
bNewRow = new Button("Insert New Row");
bSearchRow.addActionListener(this);
bNewRow.addActionListener(this);
pSearchButs = new Panel();
pSearchButs.setLayout(new GridLayout(1, 0, 4, 4));
pSearchButs.add(bSearchRow);
pSearchButs.add(bNewRow);
// the buttons for editing a row
bCancel1 = new Button("Cancel");
bPrev = new Button("Prev");
bNext = new Button("Next");
bDelete = new Button("Delete");
lastButtonDelete = false;
bNewSearch = new Button("Search");
bCancel1.addActionListener(this);
bPrev.addActionListener(this);
bNext.addActionListener(this);
bDelete.addActionListener(this);
bNewSearch.addActionListener(this);
pEditButs = new Panel();
pEditButs.setLayout(new GridLayout(1, 0, 4, 4));
pEditButs.add(bCancel1);
pEditButs.add(bPrev);
pEditButs.add(bNext);
pEditButs.add(bDelete);
pEditButs.add(bNewSearch);
// the buttons for inserting a new row
pInsertButs = new Panel();
pInsertButs.setLayout(new GridLayout(1, 0, 4, 4));
bCancel2 = new Button("Cancel Insert");
bNewInsert = new Button("New Insert");
bNewSearch1 = new Button("Search");
bCancel2.addActionListener(this);
bNewInsert.addActionListener(this);
bNewSearch1.addActionListener(this);
pInsertButs.add(bCancel2);
pInsertButs.add(bNewInsert);
pInsertButs.add(bNewSearch1);
} | [
"private",
"void",
"initButtons",
"(",
")",
"{",
"// the buttons for the search form",
"bSearchRow",
"=",
"new",
"Button",
"(",
"\"Search Rows\"",
")",
";",
"bNewRow",
"=",
"new",
"Button",
"(",
"\"Insert New Row\"",
")",
";",
"bSearchRow",
".",
"addActionListener",... | init the three boxes for buttons | [
"init",
"the",
"three",
"boxes",
"for",
"buttons"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/util/ZaurusEditor.java#L668-L721 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/util/ZaurusEditor.java | ZaurusEditor.resetTableForms | private void resetTableForms() {
lForm.show(pForm, "search");
lButton.show(pButton, "search");
Vector vAllTables = getAllTables();
// fill the drop down list again
// get all table names and show a drop down list of them in cTables
cTables.removeAll();
for (Enumeration e = vAllTables.elements(); e.hasMoreElements(); ) {
cTables.addItem((String) e.nextElement());
}
// remove all form panels from pForm
for (Enumeration e = vHoldForms.elements(); e.hasMoreElements(); ) {
pForm.remove((ZaurusTableForm) e.nextElement());
} // end of while (Enumeration e = vHoldForms.elements(); e.hasMoreElements();)
// initialize a new list for the table names which have a form in pForm
vHoldTableNames = new Vector(20);
vHoldForms = new Vector(20);
} | java | private void resetTableForms() {
lForm.show(pForm, "search");
lButton.show(pButton, "search");
Vector vAllTables = getAllTables();
// fill the drop down list again
// get all table names and show a drop down list of them in cTables
cTables.removeAll();
for (Enumeration e = vAllTables.elements(); e.hasMoreElements(); ) {
cTables.addItem((String) e.nextElement());
}
// remove all form panels from pForm
for (Enumeration e = vHoldForms.elements(); e.hasMoreElements(); ) {
pForm.remove((ZaurusTableForm) e.nextElement());
} // end of while (Enumeration e = vHoldForms.elements(); e.hasMoreElements();)
// initialize a new list for the table names which have a form in pForm
vHoldTableNames = new Vector(20);
vHoldForms = new Vector(20);
} | [
"private",
"void",
"resetTableForms",
"(",
")",
"{",
"lForm",
".",
"show",
"(",
"pForm",
",",
"\"search\"",
")",
";",
"lButton",
".",
"show",
"(",
"pButton",
",",
"\"search\"",
")",
";",
"Vector",
"vAllTables",
"=",
"getAllTables",
"(",
")",
";",
"// fil... | reset everything after changes in the database | [
"reset",
"everything",
"after",
"changes",
"in",
"the",
"database"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/util/ZaurusEditor.java#L735-L758 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/ParsedUnionStmt.java | ParsedUnionStmt.getLeftmostSelectStmt | private ParsedSelectStmt getLeftmostSelectStmt() {
assert (!m_children.isEmpty());
AbstractParsedStmt firstChild = m_children.get(0);
if (firstChild instanceof ParsedSelectStmt) {
return (ParsedSelectStmt) firstChild;
} else {
assert(firstChild instanceof ParsedUnionStmt);
return ((ParsedUnionStmt)firstChild).getLeftmostSelectStmt();
}
} | java | private ParsedSelectStmt getLeftmostSelectStmt() {
assert (!m_children.isEmpty());
AbstractParsedStmt firstChild = m_children.get(0);
if (firstChild instanceof ParsedSelectStmt) {
return (ParsedSelectStmt) firstChild;
} else {
assert(firstChild instanceof ParsedUnionStmt);
return ((ParsedUnionStmt)firstChild).getLeftmostSelectStmt();
}
} | [
"private",
"ParsedSelectStmt",
"getLeftmostSelectStmt",
"(",
")",
"{",
"assert",
"(",
"!",
"m_children",
".",
"isEmpty",
"(",
")",
")",
";",
"AbstractParsedStmt",
"firstChild",
"=",
"m_children",
".",
"get",
"(",
"0",
")",
";",
"if",
"(",
"firstChild",
"inst... | Return the leftmost child SELECT statement
@return ParsedSelectStmt | [
"Return",
"the",
"leftmost",
"child",
"SELECT",
"statement"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/ParsedUnionStmt.java#L353-L362 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/ParsedUnionStmt.java | ParsedUnionStmt.calculateContentDeterminismMessage | @Override
public String calculateContentDeterminismMessage() {
String ans = null;
for (AbstractParsedStmt child : m_children) {
ans = child.getContentDeterminismMessage();
if (ans != null) {
return ans;
}
}
return null;
} | java | @Override
public String calculateContentDeterminismMessage() {
String ans = null;
for (AbstractParsedStmt child : m_children) {
ans = child.getContentDeterminismMessage();
if (ans != null) {
return ans;
}
}
return null;
} | [
"@",
"Override",
"public",
"String",
"calculateContentDeterminismMessage",
"(",
")",
"{",
"String",
"ans",
"=",
"null",
";",
"for",
"(",
"AbstractParsedStmt",
"child",
":",
"m_children",
")",
"{",
"ans",
"=",
"child",
".",
"getContentDeterminismMessage",
"(",
")... | Here we search all the children, finding if each is content
deterministic. If it is we return right away. | [
"Here",
"we",
"search",
"all",
"the",
"children",
"finding",
"if",
"each",
"is",
"content",
"deterministic",
".",
"If",
"it",
"is",
"we",
"return",
"right",
"away",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/ParsedUnionStmt.java#L477-L487 | train |
VoltDB/voltdb | third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java | FileTxnLog.rollLog | public synchronized void rollLog() throws IOException {
if (logStream != null) {
this.logStream.flush();
this.logStream = null;
oa = null;
}
} | java | public synchronized void rollLog() throws IOException {
if (logStream != null) {
this.logStream.flush();
this.logStream = null;
oa = null;
}
} | [
"public",
"synchronized",
"void",
"rollLog",
"(",
")",
"throws",
"IOException",
"{",
"if",
"(",
"logStream",
"!=",
"null",
")",
"{",
"this",
".",
"logStream",
".",
"flush",
"(",
")",
";",
"this",
".",
"logStream",
"=",
"null",
";",
"oa",
"=",
"null",
... | rollover the current log file to a new one.
@throws IOException | [
"rollover",
"the",
"current",
"log",
"file",
"to",
"a",
"new",
"one",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java#L163-L169 | train |
VoltDB/voltdb | third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java | FileTxnLog.close | public synchronized void close() throws IOException {
if (logStream != null) {
logStream.close();
}
for (FileOutputStream log : streamsToFlush) {
log.close();
}
} | java | public synchronized void close() throws IOException {
if (logStream != null) {
logStream.close();
}
for (FileOutputStream log : streamsToFlush) {
log.close();
}
} | [
"public",
"synchronized",
"void",
"close",
"(",
")",
"throws",
"IOException",
"{",
"if",
"(",
"logStream",
"!=",
"null",
")",
"{",
"logStream",
".",
"close",
"(",
")",
";",
"}",
"for",
"(",
"FileOutputStream",
"log",
":",
"streamsToFlush",
")",
"{",
"log... | close all the open file handles
@throws IOException | [
"close",
"all",
"the",
"open",
"file",
"handles"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java#L175-L182 | train |
VoltDB/voltdb | third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java | FileTxnLog.append | public synchronized boolean append(TxnHeader hdr, Record txn)
throws IOException
{
if (hdr != null) {
if (hdr.getZxid() <= lastZxidSeen) {
LOG.warn("Current zxid " + hdr.getZxid()
+ " is <= " + lastZxidSeen + " for "
+ hdr.getType());
}
if (logStream==null) {
if(LOG.isInfoEnabled()){
LOG.info("Creating new log file: log." +
Long.toHexString(hdr.getZxid()));
}
logFileWrite = new File(logDir, ("log." +
Long.toHexString(hdr.getZxid())));
fos = new FileOutputStream(logFileWrite);
logStream=new BufferedOutputStream(fos);
oa = BinaryOutputArchive.getArchive(logStream);
FileHeader fhdr = new FileHeader(TXNLOG_MAGIC,VERSION, dbId);
fhdr.serialize(oa, "fileheader");
currentSize = fos.getChannel().position();
streamsToFlush.add(fos);
}
padFile(fos);
byte[] buf = Util.marshallTxnEntry(hdr, txn);
if (buf == null || buf.length == 0) {
throw new IOException("Faulty serialization for header " +
"and txn");
}
Checksum crc = makeChecksumAlgorithm();
crc.update(buf, 0, buf.length);
oa.writeLong(crc.getValue(), "txnEntryCRC");
Util.writeTxnBytes(oa, buf);
return true;
}
return false;
} | java | public synchronized boolean append(TxnHeader hdr, Record txn)
throws IOException
{
if (hdr != null) {
if (hdr.getZxid() <= lastZxidSeen) {
LOG.warn("Current zxid " + hdr.getZxid()
+ " is <= " + lastZxidSeen + " for "
+ hdr.getType());
}
if (logStream==null) {
if(LOG.isInfoEnabled()){
LOG.info("Creating new log file: log." +
Long.toHexString(hdr.getZxid()));
}
logFileWrite = new File(logDir, ("log." +
Long.toHexString(hdr.getZxid())));
fos = new FileOutputStream(logFileWrite);
logStream=new BufferedOutputStream(fos);
oa = BinaryOutputArchive.getArchive(logStream);
FileHeader fhdr = new FileHeader(TXNLOG_MAGIC,VERSION, dbId);
fhdr.serialize(oa, "fileheader");
currentSize = fos.getChannel().position();
streamsToFlush.add(fos);
}
padFile(fos);
byte[] buf = Util.marshallTxnEntry(hdr, txn);
if (buf == null || buf.length == 0) {
throw new IOException("Faulty serialization for header " +
"and txn");
}
Checksum crc = makeChecksumAlgorithm();
crc.update(buf, 0, buf.length);
oa.writeLong(crc.getValue(), "txnEntryCRC");
Util.writeTxnBytes(oa, buf);
return true;
}
return false;
} | [
"public",
"synchronized",
"boolean",
"append",
"(",
"TxnHeader",
"hdr",
",",
"Record",
"txn",
")",
"throws",
"IOException",
"{",
"if",
"(",
"hdr",
"!=",
"null",
")",
"{",
"if",
"(",
"hdr",
".",
"getZxid",
"(",
")",
"<=",
"lastZxidSeen",
")",
"{",
"LOG"... | append an entry to the transaction log
@param hdr the header of the transaction
@param txn the transaction part of the entry
returns true iff something appended, otw false | [
"append",
"an",
"entry",
"to",
"the",
"transaction",
"log"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java#L190-L229 | train |
VoltDB/voltdb | third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java | FileTxnLog.padFile | private void padFile(FileOutputStream out) throws IOException {
currentSize = Util.padLogFile(out, currentSize, preAllocSize);
} | java | private void padFile(FileOutputStream out) throws IOException {
currentSize = Util.padLogFile(out, currentSize, preAllocSize);
} | [
"private",
"void",
"padFile",
"(",
"FileOutputStream",
"out",
")",
"throws",
"IOException",
"{",
"currentSize",
"=",
"Util",
".",
"padLogFile",
"(",
"out",
",",
"currentSize",
",",
"preAllocSize",
")",
";",
"}"
] | pad the current file to increase its size
@param out the outputstream to be padded
@throws IOException | [
"pad",
"the",
"current",
"file",
"to",
"increase",
"its",
"size"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java#L236-L238 | train |
VoltDB/voltdb | third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java | FileTxnLog.getLogFiles | public static File[] getLogFiles(File[] logDirList,long snapshotZxid) {
List<File> files = Util.sortDataDir(logDirList, "log", true);
long logZxid = 0;
// Find the log file that starts before or at the same time as the
// zxid of the snapshot
for (File f : files) {
long fzxid = Util.getZxidFromName(f.getName(), "log");
if (fzxid > snapshotZxid) {
continue;
}
// the files
// are sorted with zxid's
if (fzxid > logZxid) {
logZxid = fzxid;
}
}
List<File> v=new ArrayList<File>(5);
for (File f : files) {
long fzxid = Util.getZxidFromName(f.getName(), "log");
if (fzxid < logZxid) {
continue;
}
v.add(f);
}
return v.toArray(new File[0]);
} | java | public static File[] getLogFiles(File[] logDirList,long snapshotZxid) {
List<File> files = Util.sortDataDir(logDirList, "log", true);
long logZxid = 0;
// Find the log file that starts before or at the same time as the
// zxid of the snapshot
for (File f : files) {
long fzxid = Util.getZxidFromName(f.getName(), "log");
if (fzxid > snapshotZxid) {
continue;
}
// the files
// are sorted with zxid's
if (fzxid > logZxid) {
logZxid = fzxid;
}
}
List<File> v=new ArrayList<File>(5);
for (File f : files) {
long fzxid = Util.getZxidFromName(f.getName(), "log");
if (fzxid < logZxid) {
continue;
}
v.add(f);
}
return v.toArray(new File[0]);
} | [
"public",
"static",
"File",
"[",
"]",
"getLogFiles",
"(",
"File",
"[",
"]",
"logDirList",
",",
"long",
"snapshotZxid",
")",
"{",
"List",
"<",
"File",
">",
"files",
"=",
"Util",
".",
"sortDataDir",
"(",
"logDirList",
",",
"\"log\"",
",",
"true",
")",
";... | Find the log file that starts at, or just before, the snapshot. Return
this and all subsequent logs. Results are ordered by zxid of file,
ascending order.
@param logDirList array of files
@param snapshotZxid return files at, or before this zxid
@return | [
"Find",
"the",
"log",
"file",
"that",
"starts",
"at",
"or",
"just",
"before",
"the",
"snapshot",
".",
"Return",
"this",
"and",
"all",
"subsequent",
"logs",
".",
"Results",
"are",
"ordered",
"by",
"zxid",
"of",
"file",
"ascending",
"order",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java#L248-L274 | train |
VoltDB/voltdb | third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java | FileTxnLog.getLastLoggedZxid | public long getLastLoggedZxid() {
File[] files = getLogFiles(logDir.listFiles(), 0);
long maxLog=files.length>0?
Util.getZxidFromName(files[files.length-1].getName(),"log"):-1;
// if a log file is more recent we must scan it to find
// the highest zxid
long zxid = maxLog;
try {
FileTxnLog txn = new FileTxnLog(logDir);
TxnIterator itr = txn.read(maxLog);
while (true) {
if(!itr.next())
break;
TxnHeader hdr = itr.getHeader();
zxid = hdr.getZxid();
}
} catch (IOException e) {
LOG.warn("Unexpected exception", e);
}
return zxid;
} | java | public long getLastLoggedZxid() {
File[] files = getLogFiles(logDir.listFiles(), 0);
long maxLog=files.length>0?
Util.getZxidFromName(files[files.length-1].getName(),"log"):-1;
// if a log file is more recent we must scan it to find
// the highest zxid
long zxid = maxLog;
try {
FileTxnLog txn = new FileTxnLog(logDir);
TxnIterator itr = txn.read(maxLog);
while (true) {
if(!itr.next())
break;
TxnHeader hdr = itr.getHeader();
zxid = hdr.getZxid();
}
} catch (IOException e) {
LOG.warn("Unexpected exception", e);
}
return zxid;
} | [
"public",
"long",
"getLastLoggedZxid",
"(",
")",
"{",
"File",
"[",
"]",
"files",
"=",
"getLogFiles",
"(",
"logDir",
".",
"listFiles",
"(",
")",
",",
"0",
")",
";",
"long",
"maxLog",
"=",
"files",
".",
"length",
">",
"0",
"?",
"Util",
".",
"getZxidFro... | get the last zxid that was logged in the transaction logs
@return the last zxid logged in the transaction logs | [
"get",
"the",
"last",
"zxid",
"that",
"was",
"logged",
"in",
"the",
"transaction",
"logs"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java#L280-L301 | train |
VoltDB/voltdb | third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java | FileTxnLog.commit | public synchronized void commit() throws IOException {
if (logStream != null) {
logStream.flush();
}
for (FileOutputStream log : streamsToFlush) {
log.flush();
if (forceSync) {
log.getChannel().force(false);
}
}
while (streamsToFlush.size() > 1) {
streamsToFlush.removeFirst().close();
}
} | java | public synchronized void commit() throws IOException {
if (logStream != null) {
logStream.flush();
}
for (FileOutputStream log : streamsToFlush) {
log.flush();
if (forceSync) {
log.getChannel().force(false);
}
}
while (streamsToFlush.size() > 1) {
streamsToFlush.removeFirst().close();
}
} | [
"public",
"synchronized",
"void",
"commit",
"(",
")",
"throws",
"IOException",
"{",
"if",
"(",
"logStream",
"!=",
"null",
")",
"{",
"logStream",
".",
"flush",
"(",
")",
";",
"}",
"for",
"(",
"FileOutputStream",
"log",
":",
"streamsToFlush",
")",
"{",
"lo... | commit the logs. make sure that evertyhing hits the
disk | [
"commit",
"the",
"logs",
".",
"make",
"sure",
"that",
"evertyhing",
"hits",
"the",
"disk"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java#L307-L320 | train |
VoltDB/voltdb | third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java | FileTxnLog.truncate | public boolean truncate(long zxid) throws IOException {
FileTxnIterator itr = new FileTxnIterator(this.logDir, zxid);
PositionInputStream input = itr.inputStream;
long pos = input.getPosition();
// now, truncate at the current position
RandomAccessFile raf=new RandomAccessFile(itr.logFile,"rw");
raf.setLength(pos);
raf.close();
while(itr.goToNextLog()) {
if (!itr.logFile.delete()) {
LOG.warn("Unable to truncate " + itr.logFile);
}
}
return true;
} | java | public boolean truncate(long zxid) throws IOException {
FileTxnIterator itr = new FileTxnIterator(this.logDir, zxid);
PositionInputStream input = itr.inputStream;
long pos = input.getPosition();
// now, truncate at the current position
RandomAccessFile raf=new RandomAccessFile(itr.logFile,"rw");
raf.setLength(pos);
raf.close();
while(itr.goToNextLog()) {
if (!itr.logFile.delete()) {
LOG.warn("Unable to truncate " + itr.logFile);
}
}
return true;
} | [
"public",
"boolean",
"truncate",
"(",
"long",
"zxid",
")",
"throws",
"IOException",
"{",
"FileTxnIterator",
"itr",
"=",
"new",
"FileTxnIterator",
"(",
"this",
".",
"logDir",
",",
"zxid",
")",
";",
"PositionInputStream",
"input",
"=",
"itr",
".",
"inputStream",... | truncate the current transaction logs
@param zxid the zxid to truncate the logs to
@return true if successful false if not | [
"truncate",
"the",
"current",
"transaction",
"logs"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java#L337-L351 | train |
VoltDB/voltdb | third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java | FileTxnLog.readHeader | private static FileHeader readHeader(File file) throws IOException {
InputStream is =null;
try {
is = new BufferedInputStream(new FileInputStream(file));
InputArchive ia=BinaryInputArchive.getArchive(is);
FileHeader hdr = new FileHeader();
hdr.deserialize(ia, "fileheader");
return hdr;
} finally {
try {
if (is != null) is.close();
} catch (IOException e) {
LOG.warn("Ignoring exception during close", e);
}
}
} | java | private static FileHeader readHeader(File file) throws IOException {
InputStream is =null;
try {
is = new BufferedInputStream(new FileInputStream(file));
InputArchive ia=BinaryInputArchive.getArchive(is);
FileHeader hdr = new FileHeader();
hdr.deserialize(ia, "fileheader");
return hdr;
} finally {
try {
if (is != null) is.close();
} catch (IOException e) {
LOG.warn("Ignoring exception during close", e);
}
}
} | [
"private",
"static",
"FileHeader",
"readHeader",
"(",
"File",
"file",
")",
"throws",
"IOException",
"{",
"InputStream",
"is",
"=",
"null",
";",
"try",
"{",
"is",
"=",
"new",
"BufferedInputStream",
"(",
"new",
"FileInputStream",
"(",
"file",
")",
")",
";",
... | read the header of the transaction file
@param file the transaction file to read
@return header that was read fomr the file
@throws IOException | [
"read",
"the",
"header",
"of",
"the",
"transaction",
"file"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java#L359-L374 | train |
VoltDB/voltdb | third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java | FileTxnLog.getDbId | public long getDbId() throws IOException {
FileTxnIterator itr = new FileTxnIterator(logDir, 0);
FileHeader fh=readHeader(itr.logFile);
itr.close();
if(fh==null)
throw new IOException("Unsupported Format.");
return fh.getDbid();
} | java | public long getDbId() throws IOException {
FileTxnIterator itr = new FileTxnIterator(logDir, 0);
FileHeader fh=readHeader(itr.logFile);
itr.close();
if(fh==null)
throw new IOException("Unsupported Format.");
return fh.getDbid();
} | [
"public",
"long",
"getDbId",
"(",
")",
"throws",
"IOException",
"{",
"FileTxnIterator",
"itr",
"=",
"new",
"FileTxnIterator",
"(",
"logDir",
",",
"0",
")",
";",
"FileHeader",
"fh",
"=",
"readHeader",
"(",
"itr",
".",
"logFile",
")",
";",
"itr",
".",
"clo... | the dbid of this transaction database
@return the dbid of this database | [
"the",
"dbid",
"of",
"this",
"transaction",
"database"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/org/apache/zookeeper_voltpatches/server/persistence/FileTxnLog.java#L380-L387 | train |
VoltDB/voltdb | src/frontend/org/voltdb/exportclient/decode/EndpointExpander.java | EndpointExpander.verifyForHdfsUse | public static void verifyForHdfsUse(String sb) throws IllegalArgumentException {
Preconditions.checkArgument(
sb != null && !sb.trim().isEmpty(),
"null or empty hdfs endpoint"
);
int mask = conversionMaskFor(sb);
boolean hasDateConversion = (mask & DATE) == DATE;
Preconditions.checkArgument(
(mask & HDFS_MASK) == HDFS_MASK,
"hdfs endpoint \"" + sb
+ "\" must contain the (%t)able, the (%p)artition, and the (%g) generation conversions"
);
final String tn = "__IMPROBABLE_TABLE_NAME__";
final int pn = Integer.MIN_VALUE;
final long gn = Long.MIN_VALUE;
final Date dt = new Date(0);
final String fmtd = hasDateConversion ? new SimpleDateFormat(DATE_FORMAT).format(dt) : "";
URI uri = URI.create(expand(sb, tn, pn, gn, dt));
String path = uri.getPath();
List<String> missing = new ArrayList<>();
if (!path.contains(tn)) missing.add("%t");
if (!path.contains(Integer.toString(pn))) missing.add("%p");
if (!path.contains(Long.toString(gn,Character.MAX_RADIX))) missing.add("%g");
if (hasDateConversion && !path.contains(fmtd)) missing.add("%d");
if (!missing.isEmpty()) {
String notInPath = Joiner.on(", ").join(missing);
throw new IllegalArgumentException(
"hdfs enpoint \"" + sb
+ "\" does not contain conversion(s) " + notInPath
+ " in the path element of the URL");
}
} | java | public static void verifyForHdfsUse(String sb) throws IllegalArgumentException {
Preconditions.checkArgument(
sb != null && !sb.trim().isEmpty(),
"null or empty hdfs endpoint"
);
int mask = conversionMaskFor(sb);
boolean hasDateConversion = (mask & DATE) == DATE;
Preconditions.checkArgument(
(mask & HDFS_MASK) == HDFS_MASK,
"hdfs endpoint \"" + sb
+ "\" must contain the (%t)able, the (%p)artition, and the (%g) generation conversions"
);
final String tn = "__IMPROBABLE_TABLE_NAME__";
final int pn = Integer.MIN_VALUE;
final long gn = Long.MIN_VALUE;
final Date dt = new Date(0);
final String fmtd = hasDateConversion ? new SimpleDateFormat(DATE_FORMAT).format(dt) : "";
URI uri = URI.create(expand(sb, tn, pn, gn, dt));
String path = uri.getPath();
List<String> missing = new ArrayList<>();
if (!path.contains(tn)) missing.add("%t");
if (!path.contains(Integer.toString(pn))) missing.add("%p");
if (!path.contains(Long.toString(gn,Character.MAX_RADIX))) missing.add("%g");
if (hasDateConversion && !path.contains(fmtd)) missing.add("%d");
if (!missing.isEmpty()) {
String notInPath = Joiner.on(", ").join(missing);
throw new IllegalArgumentException(
"hdfs enpoint \"" + sb
+ "\" does not contain conversion(s) " + notInPath
+ " in the path element of the URL");
}
} | [
"public",
"static",
"void",
"verifyForHdfsUse",
"(",
"String",
"sb",
")",
"throws",
"IllegalArgumentException",
"{",
"Preconditions",
".",
"checkArgument",
"(",
"sb",
"!=",
"null",
"&&",
"!",
"sb",
".",
"trim",
"(",
")",
".",
"isEmpty",
"(",
")",
",",
"\"n... | Verifies that given endpoint format string specifies all the required hdfs
conversions in the path portion of the endpoint.
@param sb format string
@throws IllegalArgumentException when verification fails | [
"Verifies",
"that",
"given",
"endpoint",
"format",
"string",
"specifies",
"all",
"the",
"required",
"hdfs",
"conversions",
"in",
"the",
"path",
"portion",
"of",
"the",
"endpoint",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/exportclient/decode/EndpointExpander.java#L259-L296 | train |
VoltDB/voltdb | src/frontend/org/voltdb/exportclient/decode/EndpointExpander.java | EndpointExpander.verifyForBatchUse | public static void verifyForBatchUse(String sb) throws IllegalArgumentException {
Preconditions.checkArgument(
sb != null && !sb.trim().isEmpty(),
"null or empty hdfs endpoint"
);
int mask = conversionMaskFor(sb);
Preconditions.checkArgument(
(mask & HDFS_MASK) == HDFS_MASK,
"batch mode endpoint \"" + sb
+ "\" must contain the (%t)able, the (%p)artition, and the (%g) generation conversions"
);
} | java | public static void verifyForBatchUse(String sb) throws IllegalArgumentException {
Preconditions.checkArgument(
sb != null && !sb.trim().isEmpty(),
"null or empty hdfs endpoint"
);
int mask = conversionMaskFor(sb);
Preconditions.checkArgument(
(mask & HDFS_MASK) == HDFS_MASK,
"batch mode endpoint \"" + sb
+ "\" must contain the (%t)able, the (%p)artition, and the (%g) generation conversions"
);
} | [
"public",
"static",
"void",
"verifyForBatchUse",
"(",
"String",
"sb",
")",
"throws",
"IllegalArgumentException",
"{",
"Preconditions",
".",
"checkArgument",
"(",
"sb",
"!=",
"null",
"&&",
"!",
"sb",
".",
"trim",
"(",
")",
".",
"isEmpty",
"(",
")",
",",
"\"... | Verifies that given endpoint format string specifies all the required batch mode
conversions.
@param sb format string
@throws IllegalArgumentException when verification fails | [
"Verifies",
"that",
"given",
"endpoint",
"format",
"string",
"specifies",
"all",
"the",
"required",
"batch",
"mode",
"conversions",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/exportclient/decode/EndpointExpander.java#L305-L317 | train |
VoltDB/voltdb | src/frontend/org/voltdb/SystemCatalogAgent.java | SystemCatalogAgent.handleJSONMessage | @Override
protected void handleJSONMessage(JSONObject obj) throws Exception {
hostLog.warn("SystemCatalogAgent received a JSON message, which should be impossible.");
VoltTable[] results = null;
sendOpsResponse(results, obj);
} | java | @Override
protected void handleJSONMessage(JSONObject obj) throws Exception {
hostLog.warn("SystemCatalogAgent received a JSON message, which should be impossible.");
VoltTable[] results = null;
sendOpsResponse(results, obj);
} | [
"@",
"Override",
"protected",
"void",
"handleJSONMessage",
"(",
"JSONObject",
"obj",
")",
"throws",
"Exception",
"{",
"hostLog",
".",
"warn",
"(",
"\"SystemCatalogAgent received a JSON message, which should be impossible.\"",
")",
";",
"VoltTable",
"[",
"]",
"results",
... | SystemCatalog shouldn't currently get here, make it so we don't die or do anything | [
"SystemCatalog",
"shouldn",
"t",
"currently",
"get",
"here",
"make",
"it",
"so",
"we",
"don",
"t",
"die",
"or",
"do",
"anything"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/SystemCatalogAgent.java#L87-L92 | train |
VoltDB/voltdb | third_party/java/src/com/google_voltpatches/common/collect/MapConstraints.java | MapConstraints.constrainedMap | public static <K, V> Map<K, V> constrainedMap(
Map<K, V> map, MapConstraint<? super K, ? super V> constraint) {
return new ConstrainedMap<K, V>(map, constraint);
} | java | public static <K, V> Map<K, V> constrainedMap(
Map<K, V> map, MapConstraint<? super K, ? super V> constraint) {
return new ConstrainedMap<K, V>(map, constraint);
} | [
"public",
"static",
"<",
"K",
",",
"V",
">",
"Map",
"<",
"K",
",",
"V",
">",
"constrainedMap",
"(",
"Map",
"<",
"K",
",",
"V",
">",
"map",
",",
"MapConstraint",
"<",
"?",
"super",
"K",
",",
"?",
"super",
"V",
">",
"constraint",
")",
"{",
"retur... | Returns a constrained view of the specified map, using the specified
constraint. Any operations that add new mappings will call the provided
constraint. However, this method does not verify that existing mappings
satisfy the constraint.
<p>The returned map is not serializable.
@param map the map to constrain
@param constraint the constraint that validates added entries
@return a constrained view of the specified map | [
"Returns",
"a",
"constrained",
"view",
"of",
"the",
"specified",
"map",
"using",
"the",
"specified",
"constraint",
".",
"Any",
"operations",
"that",
"add",
"new",
"mappings",
"will",
"call",
"the",
"provided",
"constraint",
".",
"However",
"this",
"method",
"d... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/com/google_voltpatches/common/collect/MapConstraints.java#L65-L68 | train |
VoltDB/voltdb | third_party/java/src/com/google_voltpatches/common/collect/MapConstraints.java | MapConstraints.constrainedListMultimap | public static <K, V> ListMultimap<K, V> constrainedListMultimap(
ListMultimap<K, V> multimap, MapConstraint<? super K, ? super V> constraint) {
return new ConstrainedListMultimap<K, V>(multimap, constraint);
} | java | public static <K, V> ListMultimap<K, V> constrainedListMultimap(
ListMultimap<K, V> multimap, MapConstraint<? super K, ? super V> constraint) {
return new ConstrainedListMultimap<K, V>(multimap, constraint);
} | [
"public",
"static",
"<",
"K",
",",
"V",
">",
"ListMultimap",
"<",
"K",
",",
"V",
">",
"constrainedListMultimap",
"(",
"ListMultimap",
"<",
"K",
",",
"V",
">",
"multimap",
",",
"MapConstraint",
"<",
"?",
"super",
"K",
",",
"?",
"super",
"V",
">",
"con... | Returns a constrained view of the specified list multimap, using the
specified constraint. Any operations that add new mappings will call the
provided constraint. However, this method does not verify that existing
mappings satisfy the constraint.
<p>Note that the generated multimap's {@link Multimap#removeAll} and
{@link Multimap#replaceValues} methods return collections that are not
constrained.
<p>The returned multimap is not serializable.
@param multimap the multimap to constrain
@param constraint the constraint that validates added entries
@return a constrained view of the specified multimap | [
"Returns",
"a",
"constrained",
"view",
"of",
"the",
"specified",
"list",
"multimap",
"using",
"the",
"specified",
"constraint",
".",
"Any",
"operations",
"that",
"add",
"new",
"mappings",
"will",
"call",
"the",
"provided",
"constraint",
".",
"However",
"this",
... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/com/google_voltpatches/common/collect/MapConstraints.java#L86-L89 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/ExpressionWindowed.java | ExpressionWindowed.validateWindowedSyntax | private void validateWindowedSyntax() {
// Check that the aggregate is one of the supported ones, and
// that the number of aggregate parameters is right.
switch (opType) {
case OpTypes.WINDOWED_RANK:
case OpTypes.WINDOWED_DENSE_RANK:
case OpTypes.WINDOWED_ROW_NUMBER:
if (nodes.length != 0) {
throw Error.error("Windowed Aggregate " + OpTypes.aggregateName(opType) + " expects no arguments.", "", 0);
}
break;
case OpTypes.WINDOWED_COUNT:
case OpTypes.WINDOWED_MIN:
case OpTypes.WINDOWED_MAX:
case OpTypes.WINDOWED_SUM:
break;
default:
throw Error.error("Unsupported window function " + OpTypes.aggregateName(opType), "", 0);
}
} | java | private void validateWindowedSyntax() {
// Check that the aggregate is one of the supported ones, and
// that the number of aggregate parameters is right.
switch (opType) {
case OpTypes.WINDOWED_RANK:
case OpTypes.WINDOWED_DENSE_RANK:
case OpTypes.WINDOWED_ROW_NUMBER:
if (nodes.length != 0) {
throw Error.error("Windowed Aggregate " + OpTypes.aggregateName(opType) + " expects no arguments.", "", 0);
}
break;
case OpTypes.WINDOWED_COUNT:
case OpTypes.WINDOWED_MIN:
case OpTypes.WINDOWED_MAX:
case OpTypes.WINDOWED_SUM:
break;
default:
throw Error.error("Unsupported window function " + OpTypes.aggregateName(opType), "", 0);
}
} | [
"private",
"void",
"validateWindowedSyntax",
"(",
")",
"{",
"// Check that the aggregate is one of the supported ones, and",
"// that the number of aggregate parameters is right.",
"switch",
"(",
"opType",
")",
"{",
"case",
"OpTypes",
".",
"WINDOWED_RANK",
":",
"case",
"OpTypes... | Validate that this is a collection of values. | [
"Validate",
"that",
"this",
"is",
"a",
"collection",
"of",
"values",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/ExpressionWindowed.java#L68-L87 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/StatementInsert.java | StatementInsert.getResult | Result getResult(Session session) {
Table table = baseTable;
Result resultOut = null;
RowSetNavigator generatedNavigator = null;
PersistentStore store = session.sessionData.getRowStore(baseTable);
if (generatedIndexes != null) {
resultOut = Result.newUpdateCountResult(generatedResultMetaData,
0);
generatedNavigator = resultOut.getChainedResult().getNavigator();
}
RowSetNavigator newDataNavigator = queryExpression == null
? getInsertValuesNavigator(session)
: getInsertSelectNavigator(session);
Expression checkCondition = null;
RangeIteratorBase checkIterator = null;
if (targetTable != baseTable) {
QuerySpecification select =
((TableDerived) targetTable).getQueryExpression()
.getMainSelect();
checkCondition = select.checkQueryCondition;
if (checkCondition != null) {
checkIterator = select.rangeVariables[0].getIterator(session);
}
}
while (newDataNavigator.hasNext()) {
Object[] data = newDataNavigator.getNext();
if (checkCondition != null) {
checkIterator.currentData = data;
boolean check = checkCondition.testCondition(session);
if (!check) {
throw Error.error(ErrorCode.X_44000);
}
}
table.insertRow(session, store, data);
if (generatedNavigator != null) {
Object[] generatedValues = getGeneratedColumns(data);
generatedNavigator.add(generatedValues);
}
}
newDataNavigator.beforeFirst();
table.fireAfterTriggers(session, Trigger.INSERT_AFTER,
newDataNavigator);
if (resultOut == null) {
resultOut =
Result.getUpdateCountResult(newDataNavigator.getSize());
} else {
resultOut.setUpdateCount(newDataNavigator.getSize());
}
return resultOut;
} | java | Result getResult(Session session) {
Table table = baseTable;
Result resultOut = null;
RowSetNavigator generatedNavigator = null;
PersistentStore store = session.sessionData.getRowStore(baseTable);
if (generatedIndexes != null) {
resultOut = Result.newUpdateCountResult(generatedResultMetaData,
0);
generatedNavigator = resultOut.getChainedResult().getNavigator();
}
RowSetNavigator newDataNavigator = queryExpression == null
? getInsertValuesNavigator(session)
: getInsertSelectNavigator(session);
Expression checkCondition = null;
RangeIteratorBase checkIterator = null;
if (targetTable != baseTable) {
QuerySpecification select =
((TableDerived) targetTable).getQueryExpression()
.getMainSelect();
checkCondition = select.checkQueryCondition;
if (checkCondition != null) {
checkIterator = select.rangeVariables[0].getIterator(session);
}
}
while (newDataNavigator.hasNext()) {
Object[] data = newDataNavigator.getNext();
if (checkCondition != null) {
checkIterator.currentData = data;
boolean check = checkCondition.testCondition(session);
if (!check) {
throw Error.error(ErrorCode.X_44000);
}
}
table.insertRow(session, store, data);
if (generatedNavigator != null) {
Object[] generatedValues = getGeneratedColumns(data);
generatedNavigator.add(generatedValues);
}
}
newDataNavigator.beforeFirst();
table.fireAfterTriggers(session, Trigger.INSERT_AFTER,
newDataNavigator);
if (resultOut == null) {
resultOut =
Result.getUpdateCountResult(newDataNavigator.getSize());
} else {
resultOut.setUpdateCount(newDataNavigator.getSize());
}
return resultOut;
} | [
"Result",
"getResult",
"(",
"Session",
"session",
")",
"{",
"Table",
"table",
"=",
"baseTable",
";",
"Result",
"resultOut",
"=",
"null",
";",
"RowSetNavigator",
"generatedNavigator",
"=",
"null",
";",
"PersistentStore",
"store",
"=",
"session",
".",
"sessionData... | Executes an INSERT_SELECT statement. It is assumed that the argument
is of the correct type.
@return the result of executing the statement | [
"Executes",
"an",
"INSERT_SELECT",
"statement",
".",
"It",
"is",
"assumed",
"that",
"the",
"argument",
"is",
"of",
"the",
"correct",
"type",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/StatementInsert.java#L99-L164 | train |
VoltDB/voltdb | src/frontend/org/voltdb/expressions/TupleValueExpression.java | TupleValueExpression.resolveForTable | @Override
public void resolveForTable(Table table) {
assert(table != null);
// It MAY be that for the case in which this function is called (expression indexes), the column's
// table name is not specified (and not missed?).
// It is possible to "correct" that here by cribbing it from the supplied table (base table for the index)
// -- not bothering for now.
Column column = table.getColumns().getExact(m_columnName);
assert(column != null);
m_tableName = table.getTypeName();
m_columnIndex = column.getIndex();
setTypeSizeAndInBytes(column);
} | java | @Override
public void resolveForTable(Table table) {
assert(table != null);
// It MAY be that for the case in which this function is called (expression indexes), the column's
// table name is not specified (and not missed?).
// It is possible to "correct" that here by cribbing it from the supplied table (base table for the index)
// -- not bothering for now.
Column column = table.getColumns().getExact(m_columnName);
assert(column != null);
m_tableName = table.getTypeName();
m_columnIndex = column.getIndex();
setTypeSizeAndInBytes(column);
} | [
"@",
"Override",
"public",
"void",
"resolveForTable",
"(",
"Table",
"table",
")",
"{",
"assert",
"(",
"table",
"!=",
"null",
")",
";",
"// It MAY be that for the case in which this function is called (expression indexes), the column's",
"// table name is not specified (and not mi... | Resolve a TVE in the context of the given table. Since
this is a TVE, it is a leaf node in the expression tree.
We just look up the metadata from the table and copy it
here, to this object. | [
"Resolve",
"a",
"TVE",
"in",
"the",
"context",
"of",
"the",
"given",
"table",
".",
"Since",
"this",
"is",
"a",
"TVE",
"it",
"is",
"a",
"leaf",
"node",
"in",
"the",
"expression",
"tree",
".",
"We",
"just",
"look",
"up",
"the",
"metadata",
"from",
"the... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/expressions/TupleValueExpression.java#L400-L412 | train |
VoltDB/voltdb | src/frontend/org/voltdb/expressions/TupleValueExpression.java | TupleValueExpression.setColumnIndexUsingSchema | public int setColumnIndexUsingSchema(NodeSchema inputSchema) {
int index = inputSchema.getIndexOfTve(this);
if (index < 0) {
//* enable to debug*/ System.out.println("DEBUG: setColumnIndex miss: " + this);
//* enable to debug*/ System.out.println("DEBUG: setColumnIndex candidates: " + inputSchema);
return index;
}
setColumnIndex(index);
if (getValueType() == null) {
// In case of sub-queries the TVE may not have its
// value type and size resolved yet. Try to resolve it now
SchemaColumn inputColumn = inputSchema.getColumn(index);
setTypeSizeAndInBytes(inputColumn);
}
return index;
} | java | public int setColumnIndexUsingSchema(NodeSchema inputSchema) {
int index = inputSchema.getIndexOfTve(this);
if (index < 0) {
//* enable to debug*/ System.out.println("DEBUG: setColumnIndex miss: " + this);
//* enable to debug*/ System.out.println("DEBUG: setColumnIndex candidates: " + inputSchema);
return index;
}
setColumnIndex(index);
if (getValueType() == null) {
// In case of sub-queries the TVE may not have its
// value type and size resolved yet. Try to resolve it now
SchemaColumn inputColumn = inputSchema.getColumn(index);
setTypeSizeAndInBytes(inputColumn);
}
return index;
} | [
"public",
"int",
"setColumnIndexUsingSchema",
"(",
"NodeSchema",
"inputSchema",
")",
"{",
"int",
"index",
"=",
"inputSchema",
".",
"getIndexOfTve",
"(",
"this",
")",
";",
"if",
"(",
"index",
"<",
"0",
")",
"{",
"//* enable to debug*/ System.out.println(\"DEBUG: setC... | Given an input schema, resolve this TVE expression. | [
"Given",
"an",
"input",
"schema",
"resolve",
"this",
"TVE",
"expression",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/expressions/TupleValueExpression.java#L417-L433 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSetMetaData.java | JDBC4ResultSetMetaData.getColumnClassName | public String getColumnClassName(int column) throws SQLException
{
sourceResultSet.checkColumnBounds(column);
VoltType type = sourceResultSet.table.getColumnType(column - 1);
String result = type.getJdbcClass();
if (result == null) {
throw SQLError.get(SQLError.TRANSLATION_NOT_FOUND, type);
}
return result;
} | java | public String getColumnClassName(int column) throws SQLException
{
sourceResultSet.checkColumnBounds(column);
VoltType type = sourceResultSet.table.getColumnType(column - 1);
String result = type.getJdbcClass();
if (result == null) {
throw SQLError.get(SQLError.TRANSLATION_NOT_FOUND, type);
}
return result;
} | [
"public",
"String",
"getColumnClassName",
"(",
"int",
"column",
")",
"throws",
"SQLException",
"{",
"sourceResultSet",
".",
"checkColumnBounds",
"(",
"column",
")",
";",
"VoltType",
"type",
"=",
"sourceResultSet",
".",
"table",
".",
"getColumnType",
"(",
"column",... | Returns the fully-qualified name of the Java class whose instances are manufactured if the method ResultSet.getObject is called to retrieve a value from the column. | [
"Returns",
"the",
"fully",
"-",
"qualified",
"name",
"of",
"the",
"Java",
"class",
"whose",
"instances",
"are",
"manufactured",
"if",
"the",
"method",
"ResultSet",
".",
"getObject",
"is",
"called",
"to",
"retrieve",
"a",
"value",
"from",
"the",
"column",
"."... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSetMetaData.java#L42-L51 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSetMetaData.java | JDBC4ResultSetMetaData.getPrecision | public int getPrecision(int column) throws SQLException
{
sourceResultSet.checkColumnBounds(column);
VoltType type = sourceResultSet.table.getColumnType(column - 1);
Integer result = type.getTypePrecisionAndRadix()[0];
if (result == null) {
result = 0;
}
return result;
} | java | public int getPrecision(int column) throws SQLException
{
sourceResultSet.checkColumnBounds(column);
VoltType type = sourceResultSet.table.getColumnType(column - 1);
Integer result = type.getTypePrecisionAndRadix()[0];
if (result == null) {
result = 0;
}
return result;
} | [
"public",
"int",
"getPrecision",
"(",
"int",
"column",
")",
"throws",
"SQLException",
"{",
"sourceResultSet",
".",
"checkColumnBounds",
"(",
"column",
")",
";",
"VoltType",
"type",
"=",
"sourceResultSet",
".",
"table",
".",
"getColumnType",
"(",
"column",
"-",
... | Get the designated column's specified column size. | [
"Get",
"the",
"designated",
"column",
"s",
"specified",
"column",
"size",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSetMetaData.java#L133-L142 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSetMetaData.java | JDBC4ResultSetMetaData.getScale | public int getScale(int column) throws SQLException
{
sourceResultSet.checkColumnBounds(column);
VoltType type = sourceResultSet.table.getColumnType(column - 1);
Integer result = type.getMaximumScale();
if (result == null) {
result = 0;
}
return result;
} | java | public int getScale(int column) throws SQLException
{
sourceResultSet.checkColumnBounds(column);
VoltType type = sourceResultSet.table.getColumnType(column - 1);
Integer result = type.getMaximumScale();
if (result == null) {
result = 0;
}
return result;
} | [
"public",
"int",
"getScale",
"(",
"int",
"column",
")",
"throws",
"SQLException",
"{",
"sourceResultSet",
".",
"checkColumnBounds",
"(",
"column",
")",
";",
"VoltType",
"type",
"=",
"sourceResultSet",
".",
"table",
".",
"getColumnType",
"(",
"column",
"-",
"1"... | Gets the designated column's number of digits to right of the decimal point. | [
"Gets",
"the",
"designated",
"column",
"s",
"number",
"of",
"digits",
"to",
"right",
"of",
"the",
"decimal",
"point",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSetMetaData.java#L145-L154 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSetMetaData.java | JDBC4ResultSetMetaData.isCaseSensitive | public boolean isCaseSensitive(int column) throws SQLException
{
sourceResultSet.checkColumnBounds(column);
VoltType type = sourceResultSet.table.getColumnType(column - 1);
return type.isCaseSensitive();
} | java | public boolean isCaseSensitive(int column) throws SQLException
{
sourceResultSet.checkColumnBounds(column);
VoltType type = sourceResultSet.table.getColumnType(column - 1);
return type.isCaseSensitive();
} | [
"public",
"boolean",
"isCaseSensitive",
"(",
"int",
"column",
")",
"throws",
"SQLException",
"{",
"sourceResultSet",
".",
"checkColumnBounds",
"(",
"column",
")",
";",
"VoltType",
"type",
"=",
"sourceResultSet",
".",
"table",
".",
"getColumnType",
"(",
"column",
... | Indicates whether a column's case matters. | [
"Indicates",
"whether",
"a",
"column",
"s",
"case",
"matters",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSetMetaData.java#L180-L185 | train |
VoltDB/voltdb | src/frontend/org/voltdb/jdbc/JDBC4ResultSetMetaData.java | JDBC4ResultSetMetaData.isSigned | public boolean isSigned(int column) throws SQLException
{
sourceResultSet.checkColumnBounds(column);
VoltType type = sourceResultSet.table.getColumnType(column - 1);
Boolean result = type.isUnsigned();
if (result == null) {
// Null return value means 'not signed' as far as this interface goes
return false;
}
return !result;
} | java | public boolean isSigned(int column) throws SQLException
{
sourceResultSet.checkColumnBounds(column);
VoltType type = sourceResultSet.table.getColumnType(column - 1);
Boolean result = type.isUnsigned();
if (result == null) {
// Null return value means 'not signed' as far as this interface goes
return false;
}
return !result;
} | [
"public",
"boolean",
"isSigned",
"(",
"int",
"column",
")",
"throws",
"SQLException",
"{",
"sourceResultSet",
".",
"checkColumnBounds",
"(",
"column",
")",
";",
"VoltType",
"type",
"=",
"sourceResultSet",
".",
"table",
".",
"getColumnType",
"(",
"column",
"-",
... | Indicates whether values in the designated column are signed numbers. | [
"Indicates",
"whether",
"values",
"in",
"the",
"designated",
"column",
"are",
"signed",
"numbers",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/jdbc/JDBC4ResultSetMetaData.java#L222-L232 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/microoptimizations/InlineOrderByIntoMergeReceive.java | InlineOrderByIntoMergeReceive.applyOptimization | private AbstractPlanNode applyOptimization(WindowFunctionPlanNode plan) {
assert(plan.getChildCount() == 1);
assert(plan.getChild(0) != null);
AbstractPlanNode child = plan.getChild(0);
assert(child != null);
// SP Plans which have an index which can provide
// the window function ordering don't create
// an order by node.
if ( ! ( child instanceof OrderByPlanNode ) ) {
return plan;
}
OrderByPlanNode onode = (OrderByPlanNode)child;
child = onode.getChild(0);
// The order by node needs a RECEIVE node child
// for this optimization to work.
if ( ! ( child instanceof ReceivePlanNode)) {
return plan;
}
ReceivePlanNode receiveNode = (ReceivePlanNode)child;
assert(receiveNode.getChildCount() == 1);
child = receiveNode.getChild(0);
// The Receive node needs a send node child.
assert( child instanceof SendPlanNode );
SendPlanNode sendNode = (SendPlanNode)child;
child = sendNode.getChild(0);
// If this window function does not use the
// index then this optimization is not possible.
// We've recorded a number of the window function
// in the root of the subplan, which will be
// the first child of the send node.
//
// Right now the only window function has number
// 0, and we don't record that in the
// WINDOWFUNCTION plan node. If there were
// more than one window function we would need
// to record a number in the plan node and
// then check that child.getWindowFunctionUsesIndex()
// returns the number in the plan node.
if ( ! ( child instanceof IndexSortablePlanNode)) {
return plan;
}
IndexSortablePlanNode indexed = (IndexSortablePlanNode)child;
if (indexed.indexUse().getWindowFunctionUsesIndex() != 0) {
return plan;
}
// Remove the Receive node and the Order by node
// and replace them with a MergeReceive node. Leave
// the order by node inline in the MergeReceive node,
// since we need it to calculate the merge.
plan.clearChildren();
receiveNode.removeFromGraph();
MergeReceivePlanNode mrnode = new MergeReceivePlanNode();
mrnode.addInlinePlanNode(onode);
mrnode.addAndLinkChild(sendNode);
plan.addAndLinkChild(mrnode);
return plan;
} | java | private AbstractPlanNode applyOptimization(WindowFunctionPlanNode plan) {
assert(plan.getChildCount() == 1);
assert(plan.getChild(0) != null);
AbstractPlanNode child = plan.getChild(0);
assert(child != null);
// SP Plans which have an index which can provide
// the window function ordering don't create
// an order by node.
if ( ! ( child instanceof OrderByPlanNode ) ) {
return plan;
}
OrderByPlanNode onode = (OrderByPlanNode)child;
child = onode.getChild(0);
// The order by node needs a RECEIVE node child
// for this optimization to work.
if ( ! ( child instanceof ReceivePlanNode)) {
return plan;
}
ReceivePlanNode receiveNode = (ReceivePlanNode)child;
assert(receiveNode.getChildCount() == 1);
child = receiveNode.getChild(0);
// The Receive node needs a send node child.
assert( child instanceof SendPlanNode );
SendPlanNode sendNode = (SendPlanNode)child;
child = sendNode.getChild(0);
// If this window function does not use the
// index then this optimization is not possible.
// We've recorded a number of the window function
// in the root of the subplan, which will be
// the first child of the send node.
//
// Right now the only window function has number
// 0, and we don't record that in the
// WINDOWFUNCTION plan node. If there were
// more than one window function we would need
// to record a number in the plan node and
// then check that child.getWindowFunctionUsesIndex()
// returns the number in the plan node.
if ( ! ( child instanceof IndexSortablePlanNode)) {
return plan;
}
IndexSortablePlanNode indexed = (IndexSortablePlanNode)child;
if (indexed.indexUse().getWindowFunctionUsesIndex() != 0) {
return plan;
}
// Remove the Receive node and the Order by node
// and replace them with a MergeReceive node. Leave
// the order by node inline in the MergeReceive node,
// since we need it to calculate the merge.
plan.clearChildren();
receiveNode.removeFromGraph();
MergeReceivePlanNode mrnode = new MergeReceivePlanNode();
mrnode.addInlinePlanNode(onode);
mrnode.addAndLinkChild(sendNode);
plan.addAndLinkChild(mrnode);
return plan;
} | [
"private",
"AbstractPlanNode",
"applyOptimization",
"(",
"WindowFunctionPlanNode",
"plan",
")",
"{",
"assert",
"(",
"plan",
".",
"getChildCount",
"(",
")",
"==",
"1",
")",
";",
"assert",
"(",
"plan",
".",
"getChild",
"(",
"0",
")",
"!=",
"null",
")",
";",
... | Convert ReceivePlanNodes into MergeReceivePlanNodes when the
RECEIVE node's nearest parent is a window function. We won't
have any inline limits or aggregates here, so this is somewhat
simpler than the order by case.
@param plan
@return | [
"Convert",
"ReceivePlanNodes",
"into",
"MergeReceivePlanNodes",
"when",
"the",
"RECEIVE",
"node",
"s",
"nearest",
"parent",
"is",
"a",
"window",
"function",
".",
"We",
"won",
"t",
"have",
"any",
"inline",
"limits",
"or",
"aggregates",
"here",
"so",
"this",
"is... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/microoptimizations/InlineOrderByIntoMergeReceive.java#L113-L169 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/microoptimizations/InlineOrderByIntoMergeReceive.java | InlineOrderByIntoMergeReceive.convertToSerialAggregation | AbstractPlanNode convertToSerialAggregation(AbstractPlanNode aggregateNode, OrderByPlanNode orderbyNode) {
assert(aggregateNode instanceof HashAggregatePlanNode);
HashAggregatePlanNode hashAggr = (HashAggregatePlanNode) aggregateNode;
List<AbstractExpression> groupbys = new ArrayList<>(hashAggr.getGroupByExpressions());
List<AbstractExpression> orderbys = new ArrayList<>(orderbyNode.getSortExpressions());
Set<Integer> coveredGroupByColumns = new HashSet<>();
Iterator<AbstractExpression> orderbyIt = orderbys.iterator();
while (orderbyIt.hasNext()) {
AbstractExpression orderby = orderbyIt.next();
int idx = 0;
for (AbstractExpression groupby : groupbys) {
if (!coveredGroupByColumns.contains(idx)) {
if (orderby.equals(groupby)) {
orderbyIt.remove();
coveredGroupByColumns.add(idx);
break;
}
}
++idx;
}
}
if (orderbys.isEmpty() && groupbys.size() == coveredGroupByColumns.size()) {
// All GROUP BY expressions are also ORDER BY - Serial aggregation
return AggregatePlanNode.convertToSerialAggregatePlanNode(hashAggr);
}
if (orderbys.isEmpty() && !coveredGroupByColumns.isEmpty() ) {
// Partial aggregation
List<Integer> coveredGroupByColumnList = new ArrayList<>();
coveredGroupByColumnList.addAll(coveredGroupByColumns);
return AggregatePlanNode.convertToPartialAggregatePlanNode(hashAggr, coveredGroupByColumnList);
}
return aggregateNode;
} | java | AbstractPlanNode convertToSerialAggregation(AbstractPlanNode aggregateNode, OrderByPlanNode orderbyNode) {
assert(aggregateNode instanceof HashAggregatePlanNode);
HashAggregatePlanNode hashAggr = (HashAggregatePlanNode) aggregateNode;
List<AbstractExpression> groupbys = new ArrayList<>(hashAggr.getGroupByExpressions());
List<AbstractExpression> orderbys = new ArrayList<>(orderbyNode.getSortExpressions());
Set<Integer> coveredGroupByColumns = new HashSet<>();
Iterator<AbstractExpression> orderbyIt = orderbys.iterator();
while (orderbyIt.hasNext()) {
AbstractExpression orderby = orderbyIt.next();
int idx = 0;
for (AbstractExpression groupby : groupbys) {
if (!coveredGroupByColumns.contains(idx)) {
if (orderby.equals(groupby)) {
orderbyIt.remove();
coveredGroupByColumns.add(idx);
break;
}
}
++idx;
}
}
if (orderbys.isEmpty() && groupbys.size() == coveredGroupByColumns.size()) {
// All GROUP BY expressions are also ORDER BY - Serial aggregation
return AggregatePlanNode.convertToSerialAggregatePlanNode(hashAggr);
}
if (orderbys.isEmpty() && !coveredGroupByColumns.isEmpty() ) {
// Partial aggregation
List<Integer> coveredGroupByColumnList = new ArrayList<>();
coveredGroupByColumnList.addAll(coveredGroupByColumns);
return AggregatePlanNode.convertToPartialAggregatePlanNode(hashAggr, coveredGroupByColumnList);
}
return aggregateNode;
} | [
"AbstractPlanNode",
"convertToSerialAggregation",
"(",
"AbstractPlanNode",
"aggregateNode",
",",
"OrderByPlanNode",
"orderbyNode",
")",
"{",
"assert",
"(",
"aggregateNode",
"instanceof",
"HashAggregatePlanNode",
")",
";",
"HashAggregatePlanNode",
"hashAggr",
"=",
"(",
"Hash... | The Hash aggregate can be converted to a Serial or Partial aggregate if
- all GROUP BY and ORDER BY expressions bind to each other - Serial Aggregate
- a subset of the GROUP BY expressions covers all of the ORDER BY - Partial
- anything else - remains a Hash Aggregate
@param aggregateNode
@param orderbyNode
@return new aggregate node if the conversion is possible or the original hash aggregate otherwise | [
"The",
"Hash",
"aggregate",
"can",
"be",
"converted",
"to",
"a",
"Serial",
"or",
"Partial",
"aggregate",
"if",
"-",
"all",
"GROUP",
"BY",
"and",
"ORDER",
"BY",
"expressions",
"bind",
"to",
"each",
"other",
"-",
"Serial",
"Aggregate",
"-",
"a",
"subset",
... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/microoptimizations/InlineOrderByIntoMergeReceive.java#L311-L344 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/persist/LockFile.java | LockFile.startHeartbeat | private final void startHeartbeat() {
if (timerTask == null || HsqlTimer.isCancelled(timerTask)) {
Runnable runner = new HeartbeatRunner();
timerTask = timer.schedulePeriodicallyAfter(0, HEARTBEAT_INTERVAL,
runner, true);
}
} | java | private final void startHeartbeat() {
if (timerTask == null || HsqlTimer.isCancelled(timerTask)) {
Runnable runner = new HeartbeatRunner();
timerTask = timer.schedulePeriodicallyAfter(0, HEARTBEAT_INTERVAL,
runner, true);
}
} | [
"private",
"final",
"void",
"startHeartbeat",
"(",
")",
"{",
"if",
"(",
"timerTask",
"==",
"null",
"||",
"HsqlTimer",
".",
"isCancelled",
"(",
"timerTask",
")",
")",
"{",
"Runnable",
"runner",
"=",
"new",
"HeartbeatRunner",
"(",
")",
";",
"timerTask",
"=",... | Schedules the lock heartbeat task. | [
"Schedules",
"the",
"lock",
"heartbeat",
"task",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/persist/LockFile.java#L1177-L1185 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/persist/LockFile.java | LockFile.stopHeartbeat | private final void stopHeartbeat() {
if (timerTask != null && !HsqlTimer.isCancelled(timerTask)) {
HsqlTimer.cancel(timerTask);
timerTask = null;
}
} | java | private final void stopHeartbeat() {
if (timerTask != null && !HsqlTimer.isCancelled(timerTask)) {
HsqlTimer.cancel(timerTask);
timerTask = null;
}
} | [
"private",
"final",
"void",
"stopHeartbeat",
"(",
")",
"{",
"if",
"(",
"timerTask",
"!=",
"null",
"&&",
"!",
"HsqlTimer",
".",
"isCancelled",
"(",
"timerTask",
")",
")",
"{",
"HsqlTimer",
".",
"cancel",
"(",
"timerTask",
")",
";",
"timerTask",
"=",
"null... | Cancels the lock heartbeat task. | [
"Cancels",
"the",
"lock",
"heartbeat",
"task",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/persist/LockFile.java#L1190-L1197 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/persist/LockFile.java | LockFile.isLocked | public final static boolean isLocked(final String path) {
boolean locked = true;
try {
LockFile lockFile = LockFile.newLockFile(path);
lockFile.checkHeartbeat(false);
locked = false;
} catch (Exception e) {}
return locked;
} | java | public final static boolean isLocked(final String path) {
boolean locked = true;
try {
LockFile lockFile = LockFile.newLockFile(path);
lockFile.checkHeartbeat(false);
locked = false;
} catch (Exception e) {}
return locked;
} | [
"public",
"final",
"static",
"boolean",
"isLocked",
"(",
"final",
"String",
"path",
")",
"{",
"boolean",
"locked",
"=",
"true",
";",
"try",
"{",
"LockFile",
"lockFile",
"=",
"LockFile",
".",
"newLockFile",
"(",
"path",
")",
";",
"lockFile",
".",
"checkHear... | Retrieves whether there is potentially already a cooperative lock,
operating system lock or some other situation preventing a cooperative
lock condition from being aquired using the specified path.
@param path the path to test
@return <tt>true</tt> if there is currently something preventing the
acquisition of a cooperative lock condition using the specified
<tt>path</tt>, else <tt>false</tt> | [
"Retrieves",
"whether",
"there",
"is",
"potentially",
"already",
"a",
"cooperative",
"lock",
"operating",
"system",
"lock",
"or",
"some",
"other",
"situation",
"preventing",
"a",
"cooperative",
"lock",
"condition",
"from",
"being",
"aquired",
"using",
"the",
"spec... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/persist/LockFile.java#L1384-L1397 | train |
VoltDB/voltdb | src/frontend/org/voltdb/probe/MeshProber.java | MeshProber.hosts | public static ImmutableSortedSet<String> hosts(String option) {
checkArgument(option != null, "option is null");
if (option.trim().isEmpty()) {
return ImmutableSortedSet.of(
HostAndPort.fromParts("", Constants.DEFAULT_INTERNAL_PORT).toString());
}
Splitter commaSplitter = Splitter.on(',').omitEmptyStrings().trimResults();
ImmutableSortedSet.Builder<String> sbld = ImmutableSortedSet.naturalOrder();
for (String h: commaSplitter.split(option)) {
checkArgument(isValidCoordinatorSpec(h), "%s is not a valid host spec", h);
sbld.add(HostAndPort.fromString(h).withDefaultPort(Constants.DEFAULT_INTERNAL_PORT).toString());
}
return sbld.build();
} | java | public static ImmutableSortedSet<String> hosts(String option) {
checkArgument(option != null, "option is null");
if (option.trim().isEmpty()) {
return ImmutableSortedSet.of(
HostAndPort.fromParts("", Constants.DEFAULT_INTERNAL_PORT).toString());
}
Splitter commaSplitter = Splitter.on(',').omitEmptyStrings().trimResults();
ImmutableSortedSet.Builder<String> sbld = ImmutableSortedSet.naturalOrder();
for (String h: commaSplitter.split(option)) {
checkArgument(isValidCoordinatorSpec(h), "%s is not a valid host spec", h);
sbld.add(HostAndPort.fromString(h).withDefaultPort(Constants.DEFAULT_INTERNAL_PORT).toString());
}
return sbld.build();
} | [
"public",
"static",
"ImmutableSortedSet",
"<",
"String",
">",
"hosts",
"(",
"String",
"option",
")",
"{",
"checkArgument",
"(",
"option",
"!=",
"null",
",",
"\"option is null\"",
")",
";",
"if",
"(",
"option",
".",
"trim",
"(",
")",
".",
"isEmpty",
"(",
... | Helper method that takes a comma delimited list of host specs, validates it,
and converts it to a set of valid coordinators
@param option a string that contains comma delimited list of host specs
@return a set of valid coordinators | [
"Helper",
"method",
"that",
"takes",
"a",
"comma",
"delimited",
"list",
"of",
"host",
"specs",
"validates",
"it",
"and",
"converts",
"it",
"to",
"a",
"set",
"of",
"valid",
"coordinators"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/probe/MeshProber.java#L104-L117 | train |
VoltDB/voltdb | src/frontend/org/voltdb/probe/MeshProber.java | MeshProber.hosts | public static ImmutableSortedSet<String> hosts(int...ports) {
if (ports.length == 0) {
return ImmutableSortedSet.of(
HostAndPort.fromParts("", Constants.DEFAULT_INTERNAL_PORT).toString());
}
ImmutableSortedSet.Builder<String> sbld = ImmutableSortedSet.naturalOrder();
for (int p: ports) {
sbld.add(HostAndPort.fromParts("", p).toString());
}
return sbld.build();
} | java | public static ImmutableSortedSet<String> hosts(int...ports) {
if (ports.length == 0) {
return ImmutableSortedSet.of(
HostAndPort.fromParts("", Constants.DEFAULT_INTERNAL_PORT).toString());
}
ImmutableSortedSet.Builder<String> sbld = ImmutableSortedSet.naturalOrder();
for (int p: ports) {
sbld.add(HostAndPort.fromParts("", p).toString());
}
return sbld.build();
} | [
"public",
"static",
"ImmutableSortedSet",
"<",
"String",
">",
"hosts",
"(",
"int",
"...",
"ports",
")",
"{",
"if",
"(",
"ports",
".",
"length",
"==",
"0",
")",
"{",
"return",
"ImmutableSortedSet",
".",
"of",
"(",
"HostAndPort",
".",
"fromParts",
"(",
"\"... | Convenience method mainly used in local cluster testing
@param ports a list of ports
@return a set of coordinator specs | [
"Convenience",
"method",
"mainly",
"used",
"in",
"local",
"cluster",
"testing"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/probe/MeshProber.java#L125-L135 | train |
VoltDB/voltdb | src/frontend/org/voltdb/ProcedureRunner.java | ProcedureRunner.call | public ClientResponseImpl call(Object... paramListIn) {
m_perCallStats = m_statsCollector.beginProcedure();
// if we're keeping track, calculate parameter size
if (m_perCallStats != null) {
StoredProcedureInvocation invoc = (m_txnState != null ? m_txnState.getInvocation() : null);
ParameterSet params = (invoc != null ? invoc.getParams() : ParameterSet.fromArrayNoCopy(paramListIn));
m_perCallStats.setParameterSize(params.getSerializedSize());
}
ClientResponseImpl result = coreCall(paramListIn);
// if we're keeping track, calculate result size
if (m_perCallStats != null) {
m_perCallStats.setResultSize(result.getResults());
}
m_statsCollector.endProcedure(result.getStatus() == ClientResponse.USER_ABORT,
(result.getStatus() != ClientResponse.USER_ABORT) &&
(result.getStatus() != ClientResponse.SUCCESS),
m_perCallStats);
// allow the GC to collect per-call stats if this proc isn't called for a while
m_perCallStats = null;
return result;
} | java | public ClientResponseImpl call(Object... paramListIn) {
m_perCallStats = m_statsCollector.beginProcedure();
// if we're keeping track, calculate parameter size
if (m_perCallStats != null) {
StoredProcedureInvocation invoc = (m_txnState != null ? m_txnState.getInvocation() : null);
ParameterSet params = (invoc != null ? invoc.getParams() : ParameterSet.fromArrayNoCopy(paramListIn));
m_perCallStats.setParameterSize(params.getSerializedSize());
}
ClientResponseImpl result = coreCall(paramListIn);
// if we're keeping track, calculate result size
if (m_perCallStats != null) {
m_perCallStats.setResultSize(result.getResults());
}
m_statsCollector.endProcedure(result.getStatus() == ClientResponse.USER_ABORT,
(result.getStatus() != ClientResponse.USER_ABORT) &&
(result.getStatus() != ClientResponse.SUCCESS),
m_perCallStats);
// allow the GC to collect per-call stats if this proc isn't called for a while
m_perCallStats = null;
return result;
} | [
"public",
"ClientResponseImpl",
"call",
"(",
"Object",
"...",
"paramListIn",
")",
"{",
"m_perCallStats",
"=",
"m_statsCollector",
".",
"beginProcedure",
"(",
")",
";",
"// if we're keeping track, calculate parameter size",
"if",
"(",
"m_perCallStats",
"!=",
"null",
")",... | Wraps coreCall with statistics code. | [
"Wraps",
"coreCall",
"with",
"statistics",
"code",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/ProcedureRunner.java#L279-L304 | train |
VoltDB/voltdb | src/frontend/org/voltdb/ProcedureRunner.java | ProcedureRunner.checkPartition | public boolean checkPartition(TransactionState txnState, TheHashinator hashinator) {
if (m_isSinglePartition) {
// can happen when a proc changes from multi-to-single after it's routed
if (hashinator == null) {
return false; // this will kick it back to CI for re-routing
}
if (m_site.getCorrespondingPartitionId() == MpInitiator.MP_INIT_PID) {
// SP txn misrouted to MPI, possible to happen during catalog update
throw new ExpectedProcedureException("Single-partition procedure routed to multi-partition initiator");
}
StoredProcedureInvocation invocation = txnState.getInvocation();
VoltType parameterType;
Object parameterAtIndex;
// check if AdHoc_RO_SP or AdHoc_RW_SP
if (m_procedure instanceof AdHocBase) {
// ClientInterface should pre-validate this param is valid
parameterAtIndex = invocation.getParameterAtIndex(0);
parameterType = VoltType.get((Byte) invocation.getParameterAtIndex(1));
if (parameterAtIndex == null && m_isReadOnly) {
assert (m_procedure instanceof AdHoc_RO_SP);
// Replicated table reads can run on any partition, skip check
return true;
}
} else {
parameterType = m_partitionColumnType;
parameterAtIndex = invocation.getParameterAtIndex(m_partitionColumn);
}
// Note that @LoadSinglepartitionTable has problems if the parititoning param
// uses integers as bytes and isn't padded to 8b or using the right byte order.
// Since this is not exposed to users, we're ok for now. The right fix is to probably
// accept the right partitioning type from the user, then rewrite the params internally
// before we initiate the proc (like adhocs).
try {
int partition = hashinator.getHashedPartitionForParameter(parameterType, parameterAtIndex);
if (partition == m_site.getCorrespondingPartitionId()) {
return true;
} else {
// Wrong partition, should restart the txn
if (HOST_TRACE_ENABLED) {
log.trace("Txn " + txnState.getInvocation().getProcName() +
" will be restarted");
}
}
} catch (Exception e) {
log.warn("Unable to check partitioning of transaction " + txnState.m_spHandle, e);
}
return false;
} else {
if (!m_catProc.getEverysite() && m_site.getCorrespondingPartitionId() != MpInitiator.MP_INIT_PID) {
log.warn("Detected MP transaction misrouted to SPI. This can happen during a schema update. " +
"Otherwise, it is unexpected behavior. " +
"Please report the following information to support@voltdb.com");
log.warn("procedure name: " + m_catProc.getTypeName() +
", site partition id: " + m_site.getCorrespondingPartitionId() +
", site HSId: " + m_site.getCorrespondingHostId() + ":" + m_site.getCorrespondingSiteId() +
", txnState initiatorHSId: " + CoreUtils.hsIdToString(txnState.initiatorHSId));
if (txnState.getNotice() instanceof Iv2InitiateTaskMessage) {
Iv2InitiateTaskMessage initiateTaskMessage = (Iv2InitiateTaskMessage) txnState.getNotice();
log.warn("Iv2InitiateTaskMessage: sourceHSId: " +
CoreUtils.hsIdToString(initiateTaskMessage.m_sourceHSId) +
", dump: " + initiateTaskMessage);
}
// MP txn misrouted to SPI, possible to happen during catalog update
throw new ExpectedProcedureException("Multi-partition procedure routed to single-partition initiator");
}
// For n-partition transactions, we need to rehash the partitioning values and check
// if they still hash to the assigned partitions.
//
// Note that when n-partition transaction runs, it's run on the MPI site, so calling
// m_site.getCorrespondingPartitionId() will return the MPI's partition ID. We need
// another way of getting what partitions were assigned to this transaction.
return true;
}
} | java | public boolean checkPartition(TransactionState txnState, TheHashinator hashinator) {
if (m_isSinglePartition) {
// can happen when a proc changes from multi-to-single after it's routed
if (hashinator == null) {
return false; // this will kick it back to CI for re-routing
}
if (m_site.getCorrespondingPartitionId() == MpInitiator.MP_INIT_PID) {
// SP txn misrouted to MPI, possible to happen during catalog update
throw new ExpectedProcedureException("Single-partition procedure routed to multi-partition initiator");
}
StoredProcedureInvocation invocation = txnState.getInvocation();
VoltType parameterType;
Object parameterAtIndex;
// check if AdHoc_RO_SP or AdHoc_RW_SP
if (m_procedure instanceof AdHocBase) {
// ClientInterface should pre-validate this param is valid
parameterAtIndex = invocation.getParameterAtIndex(0);
parameterType = VoltType.get((Byte) invocation.getParameterAtIndex(1));
if (parameterAtIndex == null && m_isReadOnly) {
assert (m_procedure instanceof AdHoc_RO_SP);
// Replicated table reads can run on any partition, skip check
return true;
}
} else {
parameterType = m_partitionColumnType;
parameterAtIndex = invocation.getParameterAtIndex(m_partitionColumn);
}
// Note that @LoadSinglepartitionTable has problems if the parititoning param
// uses integers as bytes and isn't padded to 8b or using the right byte order.
// Since this is not exposed to users, we're ok for now. The right fix is to probably
// accept the right partitioning type from the user, then rewrite the params internally
// before we initiate the proc (like adhocs).
try {
int partition = hashinator.getHashedPartitionForParameter(parameterType, parameterAtIndex);
if (partition == m_site.getCorrespondingPartitionId()) {
return true;
} else {
// Wrong partition, should restart the txn
if (HOST_TRACE_ENABLED) {
log.trace("Txn " + txnState.getInvocation().getProcName() +
" will be restarted");
}
}
} catch (Exception e) {
log.warn("Unable to check partitioning of transaction " + txnState.m_spHandle, e);
}
return false;
} else {
if (!m_catProc.getEverysite() && m_site.getCorrespondingPartitionId() != MpInitiator.MP_INIT_PID) {
log.warn("Detected MP transaction misrouted to SPI. This can happen during a schema update. " +
"Otherwise, it is unexpected behavior. " +
"Please report the following information to support@voltdb.com");
log.warn("procedure name: " + m_catProc.getTypeName() +
", site partition id: " + m_site.getCorrespondingPartitionId() +
", site HSId: " + m_site.getCorrespondingHostId() + ":" + m_site.getCorrespondingSiteId() +
", txnState initiatorHSId: " + CoreUtils.hsIdToString(txnState.initiatorHSId));
if (txnState.getNotice() instanceof Iv2InitiateTaskMessage) {
Iv2InitiateTaskMessage initiateTaskMessage = (Iv2InitiateTaskMessage) txnState.getNotice();
log.warn("Iv2InitiateTaskMessage: sourceHSId: " +
CoreUtils.hsIdToString(initiateTaskMessage.m_sourceHSId) +
", dump: " + initiateTaskMessage);
}
// MP txn misrouted to SPI, possible to happen during catalog update
throw new ExpectedProcedureException("Multi-partition procedure routed to single-partition initiator");
}
// For n-partition transactions, we need to rehash the partitioning values and check
// if they still hash to the assigned partitions.
//
// Note that when n-partition transaction runs, it's run on the MPI site, so calling
// m_site.getCorrespondingPartitionId() will return the MPI's partition ID. We need
// another way of getting what partitions were assigned to this transaction.
return true;
}
} | [
"public",
"boolean",
"checkPartition",
"(",
"TransactionState",
"txnState",
",",
"TheHashinator",
"hashinator",
")",
"{",
"if",
"(",
"m_isSinglePartition",
")",
"{",
"// can happen when a proc changes from multi-to-single after it's routed",
"if",
"(",
"hashinator",
"==",
"... | Check if the txn hashes to this partition. If not, it should be restarted.
@param txnState
@return true if the txn hashes to the current partition, false otherwise | [
"Check",
"if",
"the",
"txn",
"hashes",
"to",
"this",
"partition",
".",
"If",
"not",
"it",
"should",
"be",
"restarted",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/ProcedureRunner.java#L517-L598 | train |
VoltDB/voltdb | src/frontend/org/voltdb/ProcedureRunner.java | ProcedureRunner.isProcedureStackTraceElement | public static boolean isProcedureStackTraceElement(String procedureName, StackTraceElement stel) {
int lastPeriodPos = stel.getClassName().lastIndexOf('.');
if (lastPeriodPos == -1) {
lastPeriodPos = 0;
} else {
++lastPeriodPos;
}
// Account for inner classes too. Inner classes names comprise of the parent
// class path followed by a dollar sign
String simpleName = stel.getClassName().substring(lastPeriodPos);
return simpleName.equals(procedureName)
|| (simpleName.startsWith(procedureName) && simpleName.charAt(procedureName.length()) == '$');
} | java | public static boolean isProcedureStackTraceElement(String procedureName, StackTraceElement stel) {
int lastPeriodPos = stel.getClassName().lastIndexOf('.');
if (lastPeriodPos == -1) {
lastPeriodPos = 0;
} else {
++lastPeriodPos;
}
// Account for inner classes too. Inner classes names comprise of the parent
// class path followed by a dollar sign
String simpleName = stel.getClassName().substring(lastPeriodPos);
return simpleName.equals(procedureName)
|| (simpleName.startsWith(procedureName) && simpleName.charAt(procedureName.length()) == '$');
} | [
"public",
"static",
"boolean",
"isProcedureStackTraceElement",
"(",
"String",
"procedureName",
",",
"StackTraceElement",
"stel",
")",
"{",
"int",
"lastPeriodPos",
"=",
"stel",
".",
"getClassName",
"(",
")",
".",
"lastIndexOf",
"(",
"'",
"'",
")",
";",
"if",
"(... | Test whether or not the given stack frame is within a procedure invocation
@param The name of the procedure
@param stel a stack trace element
@return true if it is, false it is not | [
"Test",
"whether",
"or",
"not",
"the",
"given",
"stack",
"frame",
"is",
"within",
"a",
"procedure",
"invocation"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/ProcedureRunner.java#L1174-L1188 | train |
VoltDB/voltdb | src/frontend/org/voltdb/utils/DeploymentRequestServlet.java | DeploymentRequestServlet.handleUpdateDeployment | public void handleUpdateDeployment(String jsonp,
HttpServletRequest request,
HttpServletResponse response, AuthenticationResult ar)
throws IOException, ServletException {
String deployment = request.getParameter("deployment");
if (deployment == null || deployment.length() == 0) {
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Failed to get deployment information."));
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
return;
}
try {
DeploymentType newDeployment = m_mapper.readValue(deployment, DeploymentType.class);
if (newDeployment == null) {
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Failed to parse deployment information."));
return;
}
DeploymentType currentDeployment = this.getDeployment();
if (currentDeployment.getUsers() != null) {
newDeployment.setUsers(currentDeployment.getUsers());
}
// reset the host count so that it wont fail the deployment checks
newDeployment.getCluster().setHostcount(currentDeployment.getCluster().getHostcount());
String dep = CatalogUtil.getDeployment(newDeployment);
if (dep == null || dep.trim().length() <= 0) {
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Failed to build deployment information."));
return;
}
Object[] params = new Object[]{null, dep};
SyncCallback cb = new SyncCallback();
httpClientInterface.callProcedure(request.getRemoteHost(), ar, BatchTimeoutOverrideType.NO_TIMEOUT, cb, "@UpdateApplicationCatalog", params);
cb.waitForResponse();
ClientResponseImpl r = ClientResponseImpl.class.cast(cb.getResponse());
if (r.getStatus() == ClientResponse.SUCCESS) {
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.SUCCESS, "Deployment Updated."));
} else {
response.getWriter().print(HTTPClientInterface.asJsonp(jsonp, r.toJSONString()));
}
} catch (JsonParseException e) {
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Unparsable JSON"));
} catch (Exception ex) {
m_log.error("Failed to update deployment from API", ex);
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, Throwables.getStackTraceAsString(ex)));
}
} | java | public void handleUpdateDeployment(String jsonp,
HttpServletRequest request,
HttpServletResponse response, AuthenticationResult ar)
throws IOException, ServletException {
String deployment = request.getParameter("deployment");
if (deployment == null || deployment.length() == 0) {
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Failed to get deployment information."));
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
return;
}
try {
DeploymentType newDeployment = m_mapper.readValue(deployment, DeploymentType.class);
if (newDeployment == null) {
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Failed to parse deployment information."));
return;
}
DeploymentType currentDeployment = this.getDeployment();
if (currentDeployment.getUsers() != null) {
newDeployment.setUsers(currentDeployment.getUsers());
}
// reset the host count so that it wont fail the deployment checks
newDeployment.getCluster().setHostcount(currentDeployment.getCluster().getHostcount());
String dep = CatalogUtil.getDeployment(newDeployment);
if (dep == null || dep.trim().length() <= 0) {
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Failed to build deployment information."));
return;
}
Object[] params = new Object[]{null, dep};
SyncCallback cb = new SyncCallback();
httpClientInterface.callProcedure(request.getRemoteHost(), ar, BatchTimeoutOverrideType.NO_TIMEOUT, cb, "@UpdateApplicationCatalog", params);
cb.waitForResponse();
ClientResponseImpl r = ClientResponseImpl.class.cast(cb.getResponse());
if (r.getStatus() == ClientResponse.SUCCESS) {
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.SUCCESS, "Deployment Updated."));
} else {
response.getWriter().print(HTTPClientInterface.asJsonp(jsonp, r.toJSONString()));
}
} catch (JsonParseException e) {
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Unparsable JSON"));
} catch (Exception ex) {
m_log.error("Failed to update deployment from API", ex);
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, Throwables.getStackTraceAsString(ex)));
}
} | [
"public",
"void",
"handleUpdateDeployment",
"(",
"String",
"jsonp",
",",
"HttpServletRequest",
"request",
",",
"HttpServletResponse",
"response",
",",
"AuthenticationResult",
"ar",
")",
"throws",
"IOException",
",",
"ServletException",
"{",
"String",
"deployment",
"=",
... | Update the deployment | [
"Update",
"the",
"deployment"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/utils/DeploymentRequestServlet.java#L296-L343 | train |
VoltDB/voltdb | src/frontend/org/voltdb/utils/DeploymentRequestServlet.java | DeploymentRequestServlet.handleRemoveUser | public void handleRemoveUser(String jsonp, String target,
HttpServletRequest request,
HttpServletResponse response, AuthenticationResult ar)
throws IOException, ServletException {
try {
DeploymentType newDeployment = CatalogUtil.getDeployment(new ByteArrayInputStream(getDeploymentBytes()));
User user = null;
String[] splitTarget = target.split("/");
if (splitTarget.length == 3) {
user = findUser(splitTarget[2], newDeployment);
}
if (user == null) {
response.setStatus(HttpServletResponse.SC_NOT_FOUND);
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "User not found"));
return;
}
if (newDeployment.getUsers().getUser().size() == 1) {
newDeployment.setUsers(null);
} else {
newDeployment.getUsers().getUser().remove(user);
}
String dep = CatalogUtil.getDeployment(newDeployment);
if (dep == null || dep.trim().length() <= 0) {
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Failed to build deployment information."));
return;
}
Object[] params = new Object[]{null, dep};
//Call sync as nothing else can happen when this is going on.
SyncCallback cb = new SyncCallback();
httpClientInterface.callProcedure(request.getRemoteHost(), ar, BatchTimeoutOverrideType.NO_TIMEOUT, cb, "@UpdateApplicationCatalog", params);
cb.waitForResponse();
ClientResponseImpl r = ClientResponseImpl.class.cast(cb.getResponse());
response.setStatus(HttpServletResponse.SC_NO_CONTENT);
if (r.getStatus() == ClientResponse.SUCCESS) {
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.SUCCESS, "User Removed."));
} else {
response.getWriter().print(HTTPClientInterface.asJsonp(jsonp, r.toJSONString()));
}
} catch (Exception ex) {
m_log.error("Failed to update role from API", ex);
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, Throwables.getStackTraceAsString(ex)));
}
} | java | public void handleRemoveUser(String jsonp, String target,
HttpServletRequest request,
HttpServletResponse response, AuthenticationResult ar)
throws IOException, ServletException {
try {
DeploymentType newDeployment = CatalogUtil.getDeployment(new ByteArrayInputStream(getDeploymentBytes()));
User user = null;
String[] splitTarget = target.split("/");
if (splitTarget.length == 3) {
user = findUser(splitTarget[2], newDeployment);
}
if (user == null) {
response.setStatus(HttpServletResponse.SC_NOT_FOUND);
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "User not found"));
return;
}
if (newDeployment.getUsers().getUser().size() == 1) {
newDeployment.setUsers(null);
} else {
newDeployment.getUsers().getUser().remove(user);
}
String dep = CatalogUtil.getDeployment(newDeployment);
if (dep == null || dep.trim().length() <= 0) {
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Failed to build deployment information."));
return;
}
Object[] params = new Object[]{null, dep};
//Call sync as nothing else can happen when this is going on.
SyncCallback cb = new SyncCallback();
httpClientInterface.callProcedure(request.getRemoteHost(), ar, BatchTimeoutOverrideType.NO_TIMEOUT, cb, "@UpdateApplicationCatalog", params);
cb.waitForResponse();
ClientResponseImpl r = ClientResponseImpl.class.cast(cb.getResponse());
response.setStatus(HttpServletResponse.SC_NO_CONTENT);
if (r.getStatus() == ClientResponse.SUCCESS) {
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.SUCCESS, "User Removed."));
} else {
response.getWriter().print(HTTPClientInterface.asJsonp(jsonp, r.toJSONString()));
}
} catch (Exception ex) {
m_log.error("Failed to update role from API", ex);
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, Throwables.getStackTraceAsString(ex)));
}
} | [
"public",
"void",
"handleRemoveUser",
"(",
"String",
"jsonp",
",",
"String",
"target",
",",
"HttpServletRequest",
"request",
",",
"HttpServletResponse",
"response",
",",
"AuthenticationResult",
"ar",
")",
"throws",
"IOException",
",",
"ServletException",
"{",
"try",
... | Handle DELETE for users | [
"Handle",
"DELETE",
"for",
"users"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/utils/DeploymentRequestServlet.java#L475-L520 | train |
VoltDB/voltdb | src/frontend/org/voltdb/utils/DeploymentRequestServlet.java | DeploymentRequestServlet.handleGetUsers | public void handleGetUsers(String jsonp, String target,
HttpServletRequest request,
HttpServletResponse response)
throws IOException, ServletException {
ObjectMapper mapper = new ObjectMapper();
User user = null;
String[] splitTarget = target.split("/");
if (splitTarget.length < 3 || splitTarget[2].isEmpty()) {
if (jsonp != null) {
response.getWriter().write(jsonp + "(");
}
if (getDeployment().getUsers() != null) {
List<IdUser> id = new ArrayList<>();
for (UsersType.User u : getDeployment().getUsers().getUser()) {
id.add(new IdUser(u, getHostHeader()));
}
mapper.writeValue(response.getWriter(), id);
} else {
response.getWriter().write("[]");
}
if (jsonp != null) {
response.getWriter().write(")");
}
return;
}
user = findUser(splitTarget[2], getDeployment());
if (user == null) {
response.setStatus(HttpServletResponse.SC_NOT_FOUND);
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "User not found"));
return;
} else {
if (jsonp != null) {
response.getWriter().write(jsonp + "(");
}
mapper.writeValue(response.getWriter(), new IdUser(user, getHostHeader()));
if (jsonp != null) {
response.getWriter().write(")");
}
}
} | java | public void handleGetUsers(String jsonp, String target,
HttpServletRequest request,
HttpServletResponse response)
throws IOException, ServletException {
ObjectMapper mapper = new ObjectMapper();
User user = null;
String[] splitTarget = target.split("/");
if (splitTarget.length < 3 || splitTarget[2].isEmpty()) {
if (jsonp != null) {
response.getWriter().write(jsonp + "(");
}
if (getDeployment().getUsers() != null) {
List<IdUser> id = new ArrayList<>();
for (UsersType.User u : getDeployment().getUsers().getUser()) {
id.add(new IdUser(u, getHostHeader()));
}
mapper.writeValue(response.getWriter(), id);
} else {
response.getWriter().write("[]");
}
if (jsonp != null) {
response.getWriter().write(")");
}
return;
}
user = findUser(splitTarget[2], getDeployment());
if (user == null) {
response.setStatus(HttpServletResponse.SC_NOT_FOUND);
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "User not found"));
return;
} else {
if (jsonp != null) {
response.getWriter().write(jsonp + "(");
}
mapper.writeValue(response.getWriter(), new IdUser(user, getHostHeader()));
if (jsonp != null) {
response.getWriter().write(")");
}
}
} | [
"public",
"void",
"handleGetUsers",
"(",
"String",
"jsonp",
",",
"String",
"target",
",",
"HttpServletRequest",
"request",
",",
"HttpServletResponse",
"response",
")",
"throws",
"IOException",
",",
"ServletException",
"{",
"ObjectMapper",
"mapper",
"=",
"new",
"Obje... | Handle GET for users | [
"Handle",
"GET",
"for",
"users"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/utils/DeploymentRequestServlet.java#L523-L562 | train |
VoltDB/voltdb | src/frontend/org/voltdb/utils/DeploymentRequestServlet.java | DeploymentRequestServlet.handleGetExportTypes | public void handleGetExportTypes(String jsonp, HttpServletResponse response)
throws IOException, ServletException {
if (jsonp != null) {
response.getWriter().write(jsonp + "(");
}
JSONObject exportTypes = new JSONObject();
HashSet<String> exportList = new HashSet<>();
for (ServerExportEnum type : ServerExportEnum.values()) {
exportList.add(type.value().toUpperCase());
}
try {
exportTypes.put("types", exportList);
} catch (JSONException e) {
m_log.error("Failed to generate exportTypes JSON: ", e);
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Type list failed to build"));
return;
}
response.getWriter().write(exportTypes.toString());
if (jsonp != null) {
response.getWriter().write(")");
}
} | java | public void handleGetExportTypes(String jsonp, HttpServletResponse response)
throws IOException, ServletException {
if (jsonp != null) {
response.getWriter().write(jsonp + "(");
}
JSONObject exportTypes = new JSONObject();
HashSet<String> exportList = new HashSet<>();
for (ServerExportEnum type : ServerExportEnum.values()) {
exportList.add(type.value().toUpperCase());
}
try {
exportTypes.put("types", exportList);
} catch (JSONException e) {
m_log.error("Failed to generate exportTypes JSON: ", e);
response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Type list failed to build"));
return;
}
response.getWriter().write(exportTypes.toString());
if (jsonp != null) {
response.getWriter().write(")");
}
} | [
"public",
"void",
"handleGetExportTypes",
"(",
"String",
"jsonp",
",",
"HttpServletResponse",
"response",
")",
"throws",
"IOException",
",",
"ServletException",
"{",
"if",
"(",
"jsonp",
"!=",
"null",
")",
"{",
"response",
".",
"getWriter",
"(",
")",
".",
"writ... | Handle GET for export types | [
"Handle",
"GET",
"for",
"export",
"types"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/utils/DeploymentRequestServlet.java#L565-L587 | train |
VoltDB/voltdb | src/frontend/org/voltdb/RestoreAgent.java | RestoreAgent.createZKDirectory | void createZKDirectory(String path) {
try {
try {
m_zk.create(path, new byte[0],
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
} catch (KeeperException e) {
if (e.code() != Code.NODEEXISTS) {
throw e;
}
}
} catch (Exception e) {
VoltDB.crashGlobalVoltDB("Failed to create Zookeeper node: " + e.getMessage(),
false, e);
}
} | java | void createZKDirectory(String path) {
try {
try {
m_zk.create(path, new byte[0],
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
} catch (KeeperException e) {
if (e.code() != Code.NODEEXISTS) {
throw e;
}
}
} catch (Exception e) {
VoltDB.crashGlobalVoltDB("Failed to create Zookeeper node: " + e.getMessage(),
false, e);
}
} | [
"void",
"createZKDirectory",
"(",
"String",
"path",
")",
"{",
"try",
"{",
"try",
"{",
"m_zk",
".",
"create",
"(",
"path",
",",
"new",
"byte",
"[",
"0",
"]",
",",
"Ids",
".",
"OPEN_ACL_UNSAFE",
",",
"CreateMode",
".",
"PERSISTENT",
")",
";",
"}",
"cat... | Creates a ZooKeeper directory if it doesn't exist. Crashes VoltDB if the
creation fails for any reason other then the path already existing.
@param path | [
"Creates",
"a",
"ZooKeeper",
"directory",
"if",
"it",
"doesn",
"t",
"exist",
".",
"Crashes",
"VoltDB",
"if",
"the",
"creation",
"fails",
"for",
"any",
"reason",
"other",
"then",
"the",
"path",
"already",
"existing",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/RestoreAgent.java#L420-L434 | train |
VoltDB/voltdb | src/frontend/org/voltdb/RestoreAgent.java | RestoreAgent.findRestoreCatalog | public Pair<Integer, String> findRestoreCatalog() {
enterRestore();
try {
m_snapshotToRestore = generatePlans();
} catch (Exception e) {
VoltDB.crashGlobalVoltDB(e.getMessage(), true, e);
}
if (m_snapshotToRestore != null) {
int hostId = m_snapshotToRestore.hostId;
File file = new File(m_snapshotToRestore.path,
m_snapshotToRestore.nonce + ".jar");
String path = file.getPath();
return Pair.of(hostId, path);
}
return null;
} | java | public Pair<Integer, String> findRestoreCatalog() {
enterRestore();
try {
m_snapshotToRestore = generatePlans();
} catch (Exception e) {
VoltDB.crashGlobalVoltDB(e.getMessage(), true, e);
}
if (m_snapshotToRestore != null) {
int hostId = m_snapshotToRestore.hostId;
File file = new File(m_snapshotToRestore.path,
m_snapshotToRestore.nonce + ".jar");
String path = file.getPath();
return Pair.of(hostId, path);
}
return null;
} | [
"public",
"Pair",
"<",
"Integer",
",",
"String",
">",
"findRestoreCatalog",
"(",
")",
"{",
"enterRestore",
"(",
")",
";",
"try",
"{",
"m_snapshotToRestore",
"=",
"generatePlans",
"(",
")",
";",
"}",
"catch",
"(",
"Exception",
"e",
")",
"{",
"VoltDB",
"."... | Generate restore and replay plans and return the catalog associated with
the snapshot to restore if there is anything to restore.
@return The (host ID, catalog path) pair, or null if there is no snapshot
to restore. | [
"Generate",
"restore",
"and",
"replay",
"plans",
"and",
"return",
"the",
"catalog",
"associated",
"with",
"the",
"snapshot",
"to",
"restore",
"if",
"there",
"is",
"anything",
"to",
"restore",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/RestoreAgent.java#L529-L547 | train |
VoltDB/voltdb | src/frontend/org/voltdb/RestoreAgent.java | RestoreAgent.enterRestore | void enterRestore() {
createZKDirectory(VoltZK.restore);
createZKDirectory(VoltZK.restore_barrier);
createZKDirectory(VoltZK.restore_barrier2);
try {
m_generatedRestoreBarrier2 = m_zk.create(VoltZK.restore_barrier2 + "/counter", null,
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL);
} catch (Exception e) {
VoltDB.crashGlobalVoltDB("Failed to create Zookeeper node: " + e.getMessage(),
false, e);
}
} | java | void enterRestore() {
createZKDirectory(VoltZK.restore);
createZKDirectory(VoltZK.restore_barrier);
createZKDirectory(VoltZK.restore_barrier2);
try {
m_generatedRestoreBarrier2 = m_zk.create(VoltZK.restore_barrier2 + "/counter", null,
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL);
} catch (Exception e) {
VoltDB.crashGlobalVoltDB("Failed to create Zookeeper node: " + e.getMessage(),
false, e);
}
} | [
"void",
"enterRestore",
"(",
")",
"{",
"createZKDirectory",
"(",
"VoltZK",
".",
"restore",
")",
";",
"createZKDirectory",
"(",
"VoltZK",
".",
"restore_barrier",
")",
";",
"createZKDirectory",
"(",
"VoltZK",
".",
"restore_barrier2",
")",
";",
"try",
"{",
"m_gen... | Enters the restore process. Creates ZooKeeper barrier node for this host. | [
"Enters",
"the",
"restore",
"process",
".",
"Creates",
"ZooKeeper",
"barrier",
"node",
"for",
"this",
"host",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/RestoreAgent.java#L552-L564 | train |
VoltDB/voltdb | src/frontend/org/voltdb/RestoreAgent.java | RestoreAgent.exitRestore | void exitRestore() {
try {
m_zk.delete(m_generatedRestoreBarrier2, -1);
} catch (Exception e) {
VoltDB.crashLocalVoltDB("Unable to delete zk node " + m_generatedRestoreBarrier2, false, e);
}
if (m_callback != null) {
m_callback.onSnapshotRestoreCompletion();
}
LOG.debug("Waiting for all hosts to complete restore");
List<String> children = null;
while (true) {
try {
children = m_zk.getChildren(VoltZK.restore_barrier2, false);
} catch (KeeperException e2) {
VoltDB.crashGlobalVoltDB(e2.getMessage(), false, e2);
} catch (InterruptedException e2) {
continue;
}
if (children.size() > 0) {
try {
Thread.sleep(500);
} catch (InterruptedException e) {}
} else {
break;
}
}
// Clean up the ZK snapshot ID node so that we're good for next time.
try
{
m_zk.delete(VoltZK.restore_snapshot_id, -1);
}
catch (Exception ignore) {}
} | java | void exitRestore() {
try {
m_zk.delete(m_generatedRestoreBarrier2, -1);
} catch (Exception e) {
VoltDB.crashLocalVoltDB("Unable to delete zk node " + m_generatedRestoreBarrier2, false, e);
}
if (m_callback != null) {
m_callback.onSnapshotRestoreCompletion();
}
LOG.debug("Waiting for all hosts to complete restore");
List<String> children = null;
while (true) {
try {
children = m_zk.getChildren(VoltZK.restore_barrier2, false);
} catch (KeeperException e2) {
VoltDB.crashGlobalVoltDB(e2.getMessage(), false, e2);
} catch (InterruptedException e2) {
continue;
}
if (children.size() > 0) {
try {
Thread.sleep(500);
} catch (InterruptedException e) {}
} else {
break;
}
}
// Clean up the ZK snapshot ID node so that we're good for next time.
try
{
m_zk.delete(VoltZK.restore_snapshot_id, -1);
}
catch (Exception ignore) {}
} | [
"void",
"exitRestore",
"(",
")",
"{",
"try",
"{",
"m_zk",
".",
"delete",
"(",
"m_generatedRestoreBarrier2",
",",
"-",
"1",
")",
";",
"}",
"catch",
"(",
"Exception",
"e",
")",
"{",
"VoltDB",
".",
"crashLocalVoltDB",
"(",
"\"Unable to delete zk node \"",
"+",
... | Exists the restore process. Waits for all other hosts to complete first.
This method blocks. | [
"Exists",
"the",
"restore",
"process",
".",
"Waits",
"for",
"all",
"other",
"hosts",
"to",
"complete",
"first",
".",
"This",
"method",
"blocks",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/RestoreAgent.java#L570-L607 | train |
VoltDB/voltdb | src/frontend/org/voltdb/RestoreAgent.java | RestoreAgent.consolidateSnapshotInfos | static SnapshotInfo consolidateSnapshotInfos(Collection<SnapshotInfo> lastSnapshot)
{
SnapshotInfo chosen = null;
if (lastSnapshot != null) {
Iterator<SnapshotInfo> i = lastSnapshot.iterator();
while (i.hasNext()) {
SnapshotInfo next = i.next();
if (chosen == null) {
chosen = next;
} else if (next.hostId < chosen.hostId) {
next.partitionToTxnId.putAll(chosen.partitionToTxnId);
chosen = next;
}
else {
// create a full mapping of txn ids to partition ids.
chosen.partitionToTxnId.putAll(next.partitionToTxnId);
}
}
}
return chosen;
} | java | static SnapshotInfo consolidateSnapshotInfos(Collection<SnapshotInfo> lastSnapshot)
{
SnapshotInfo chosen = null;
if (lastSnapshot != null) {
Iterator<SnapshotInfo> i = lastSnapshot.iterator();
while (i.hasNext()) {
SnapshotInfo next = i.next();
if (chosen == null) {
chosen = next;
} else if (next.hostId < chosen.hostId) {
next.partitionToTxnId.putAll(chosen.partitionToTxnId);
chosen = next;
}
else {
// create a full mapping of txn ids to partition ids.
chosen.partitionToTxnId.putAll(next.partitionToTxnId);
}
}
}
return chosen;
} | [
"static",
"SnapshotInfo",
"consolidateSnapshotInfos",
"(",
"Collection",
"<",
"SnapshotInfo",
">",
"lastSnapshot",
")",
"{",
"SnapshotInfo",
"chosen",
"=",
"null",
";",
"if",
"(",
"lastSnapshot",
"!=",
"null",
")",
"{",
"Iterator",
"<",
"SnapshotInfo",
">",
"i",... | Picks a snapshot info for restore. A single snapshot might have different
files scattered across multiple machines. All nodes must pick the same
SnapshotInfo or different nodes will pick different catalogs to restore.
Pick one SnapshotInfo and consolidate the per-node state into it. | [
"Picks",
"a",
"snapshot",
"info",
"for",
"restore",
".",
"A",
"single",
"snapshot",
"might",
"have",
"different",
"files",
"scattered",
"across",
"multiple",
"machines",
".",
"All",
"nodes",
"must",
"pick",
"the",
"same",
"SnapshotInfo",
"or",
"different",
"no... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/RestoreAgent.java#L906-L926 | train |
VoltDB/voltdb | src/frontend/org/voltdb/RestoreAgent.java | RestoreAgent.sendSnapshotTxnId | private void sendSnapshotTxnId(SnapshotInfo toRestore) {
long txnId = toRestore != null ? toRestore.txnId : 0;
String jsonData = toRestore != null ? toRestore.toJSONObject().toString() : "{}";
LOG.debug("Sending snapshot ID " + txnId + " for restore to other nodes");
try {
m_zk.create(VoltZK.restore_snapshot_id, jsonData.getBytes(Constants.UTF8ENCODING),
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
} catch (Exception e) {
VoltDB.crashGlobalVoltDB("Failed to create Zookeeper node: " + e.getMessage(),
false, e);
}
} | java | private void sendSnapshotTxnId(SnapshotInfo toRestore) {
long txnId = toRestore != null ? toRestore.txnId : 0;
String jsonData = toRestore != null ? toRestore.toJSONObject().toString() : "{}";
LOG.debug("Sending snapshot ID " + txnId + " for restore to other nodes");
try {
m_zk.create(VoltZK.restore_snapshot_id, jsonData.getBytes(Constants.UTF8ENCODING),
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
} catch (Exception e) {
VoltDB.crashGlobalVoltDB("Failed to create Zookeeper node: " + e.getMessage(),
false, e);
}
} | [
"private",
"void",
"sendSnapshotTxnId",
"(",
"SnapshotInfo",
"toRestore",
")",
"{",
"long",
"txnId",
"=",
"toRestore",
"!=",
"null",
"?",
"toRestore",
".",
"txnId",
":",
"0",
";",
"String",
"jsonData",
"=",
"toRestore",
"!=",
"null",
"?",
"toRestore",
".",
... | Send the txnId of the snapshot that was picked to restore from to the
other hosts. If there was no snapshot to restore from, send 0.
@param txnId | [
"Send",
"the",
"txnId",
"of",
"the",
"snapshot",
"that",
"was",
"picked",
"to",
"restore",
"from",
"to",
"the",
"other",
"hosts",
".",
"If",
"there",
"was",
"no",
"snapshot",
"to",
"restore",
"from",
"send",
"0",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/RestoreAgent.java#L934-L945 | train |
VoltDB/voltdb | src/frontend/org/voltdb/RestoreAgent.java | RestoreAgent.sendLocalRestoreInformation | private void sendLocalRestoreInformation(Long max, Set<SnapshotInfo> snapshots) {
String jsonData = serializeRestoreInformation(max, snapshots);
String zkNode = VoltZK.restore + "/" + m_hostId;
try {
m_zk.create(zkNode, jsonData.getBytes(StandardCharsets.UTF_8),
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
} catch (Exception e) {
throw new RuntimeException("Failed to create Zookeeper node: " +
e.getMessage(), e);
}
} | java | private void sendLocalRestoreInformation(Long max, Set<SnapshotInfo> snapshots) {
String jsonData = serializeRestoreInformation(max, snapshots);
String zkNode = VoltZK.restore + "/" + m_hostId;
try {
m_zk.create(zkNode, jsonData.getBytes(StandardCharsets.UTF_8),
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
} catch (Exception e) {
throw new RuntimeException("Failed to create Zookeeper node: " +
e.getMessage(), e);
}
} | [
"private",
"void",
"sendLocalRestoreInformation",
"(",
"Long",
"max",
",",
"Set",
"<",
"SnapshotInfo",
">",
"snapshots",
")",
"{",
"String",
"jsonData",
"=",
"serializeRestoreInformation",
"(",
"max",
",",
"snapshots",
")",
";",
"String",
"zkNode",
"=",
"VoltZK"... | Send the information about the local snapshot files to the other hosts to
generate restore plan.
@param max
The maximum txnId of the last txn across all initiators in the
local command log.
@param snapshots
The information of the local snapshot files. | [
"Send",
"the",
"information",
"about",
"the",
"local",
"snapshot",
"files",
"to",
"the",
"other",
"hosts",
"to",
"generate",
"restore",
"plan",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/RestoreAgent.java#L987-L997 | train |
VoltDB/voltdb | src/frontend/org/voltdb/RestoreAgent.java | RestoreAgent.deserializeRestoreInformation | private Long deserializeRestoreInformation(List<String> children,
Map<String, Set<SnapshotInfo>> snapshotFragments) throws Exception
{
try {
int recover = m_action.ordinal();
Long clStartTxnId = null;
for (String node : children) {
//This might be created before we are done fetching the restore info
if (node.equals("snapshot_id")) {
continue;
}
byte[] data = null;
data = m_zk.getData(VoltZK.restore + "/" + node, false, null);
String jsonData = new String(data, "UTF8");
JSONObject json = new JSONObject(jsonData);
long maxTxnId = json.optLong("max", Long.MIN_VALUE);
if (maxTxnId != Long.MIN_VALUE) {
if (clStartTxnId == null || maxTxnId > clStartTxnId) {
clStartTxnId = maxTxnId;
}
}
int remoteRecover = json.getInt("action");
if (remoteRecover != recover) {
String msg = "Database actions are not consistent. Remote node action is not 'recover'. " +
"Please enter the same database action on the command-line.";
VoltDB.crashLocalVoltDB(msg, false, null);
}
JSONArray snapInfos = json.getJSONArray("snapInfos");
int snapInfoCnt = snapInfos.length();
for (int i=0; i < snapInfoCnt; i++) {
JSONObject jsonInfo = snapInfos.getJSONObject(i);
SnapshotInfo info = new SnapshotInfo(jsonInfo);
Set<SnapshotInfo> fragments = snapshotFragments.get(info.nonce);
if (fragments == null) {
fragments = new HashSet<SnapshotInfo>();
snapshotFragments.put(info.nonce, fragments);
}
fragments.add(info);
}
}
return clStartTxnId;
} catch (JSONException je) {
VoltDB.crashLocalVoltDB("Error exchanging snapshot information", true, je);
}
throw new RuntimeException("impossible");
} | java | private Long deserializeRestoreInformation(List<String> children,
Map<String, Set<SnapshotInfo>> snapshotFragments) throws Exception
{
try {
int recover = m_action.ordinal();
Long clStartTxnId = null;
for (String node : children) {
//This might be created before we are done fetching the restore info
if (node.equals("snapshot_id")) {
continue;
}
byte[] data = null;
data = m_zk.getData(VoltZK.restore + "/" + node, false, null);
String jsonData = new String(data, "UTF8");
JSONObject json = new JSONObject(jsonData);
long maxTxnId = json.optLong("max", Long.MIN_VALUE);
if (maxTxnId != Long.MIN_VALUE) {
if (clStartTxnId == null || maxTxnId > clStartTxnId) {
clStartTxnId = maxTxnId;
}
}
int remoteRecover = json.getInt("action");
if (remoteRecover != recover) {
String msg = "Database actions are not consistent. Remote node action is not 'recover'. " +
"Please enter the same database action on the command-line.";
VoltDB.crashLocalVoltDB(msg, false, null);
}
JSONArray snapInfos = json.getJSONArray("snapInfos");
int snapInfoCnt = snapInfos.length();
for (int i=0; i < snapInfoCnt; i++) {
JSONObject jsonInfo = snapInfos.getJSONObject(i);
SnapshotInfo info = new SnapshotInfo(jsonInfo);
Set<SnapshotInfo> fragments = snapshotFragments.get(info.nonce);
if (fragments == null) {
fragments = new HashSet<SnapshotInfo>();
snapshotFragments.put(info.nonce, fragments);
}
fragments.add(info);
}
}
return clStartTxnId;
} catch (JSONException je) {
VoltDB.crashLocalVoltDB("Error exchanging snapshot information", true, je);
}
throw new RuntimeException("impossible");
} | [
"private",
"Long",
"deserializeRestoreInformation",
"(",
"List",
"<",
"String",
">",
"children",
",",
"Map",
"<",
"String",
",",
"Set",
"<",
"SnapshotInfo",
">",
">",
"snapshotFragments",
")",
"throws",
"Exception",
"{",
"try",
"{",
"int",
"recover",
"=",
"m... | This function, like all good functions, does three things.
It produces the command log start transaction Id.
It produces a map of SnapshotInfo objects.
And, it errors if the remote start action does not match the local action. | [
"This",
"function",
"like",
"all",
"good",
"functions",
"does",
"three",
"things",
".",
"It",
"produces",
"the",
"command",
"log",
"start",
"transaction",
"Id",
".",
"It",
"produces",
"a",
"map",
"of",
"SnapshotInfo",
"objects",
".",
"And",
"it",
"errors",
... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/RestoreAgent.java#L1180-L1228 | train |
VoltDB/voltdb | src/frontend/org/voltdb/RestoreAgent.java | RestoreAgent.changeState | private void changeState() {
if (m_state == State.RESTORE) {
fetchSnapshotTxnId();
exitRestore();
m_state = State.REPLAY;
/*
* Add the interest here so that we can use the barriers in replay
* agent to synchronize.
*/
m_snapshotMonitor.addInterest(this);
m_replayAgent.replay();
} else if (m_state == State.REPLAY) {
m_state = State.TRUNCATE;
} else if (m_state == State.TRUNCATE) {
m_snapshotMonitor.removeInterest(this);
if (m_callback != null) {
m_callback.onReplayCompletion(m_truncationSnapshot, m_truncationSnapshotPerPartition);
}
// Call balance partitions after enabling transactions on the node to shorten the recovery time
if (m_isLeader) {
m_replayAgent.resumeElasticOperationIfNecessary();
}
}
} | java | private void changeState() {
if (m_state == State.RESTORE) {
fetchSnapshotTxnId();
exitRestore();
m_state = State.REPLAY;
/*
* Add the interest here so that we can use the barriers in replay
* agent to synchronize.
*/
m_snapshotMonitor.addInterest(this);
m_replayAgent.replay();
} else if (m_state == State.REPLAY) {
m_state = State.TRUNCATE;
} else if (m_state == State.TRUNCATE) {
m_snapshotMonitor.removeInterest(this);
if (m_callback != null) {
m_callback.onReplayCompletion(m_truncationSnapshot, m_truncationSnapshotPerPartition);
}
// Call balance partitions after enabling transactions on the node to shorten the recovery time
if (m_isLeader) {
m_replayAgent.resumeElasticOperationIfNecessary();
}
}
} | [
"private",
"void",
"changeState",
"(",
")",
"{",
"if",
"(",
"m_state",
"==",
"State",
".",
"RESTORE",
")",
"{",
"fetchSnapshotTxnId",
"(",
")",
";",
"exitRestore",
"(",
")",
";",
"m_state",
"=",
"State",
".",
"REPLAY",
";",
"/*\n * Add the intere... | Change the state of the restore agent based on the current state. | [
"Change",
"the",
"state",
"of",
"the",
"restore",
"agent",
"based",
"on",
"the",
"current",
"state",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/RestoreAgent.java#L1285-L1311 | train |
VoltDB/voltdb | src/frontend/org/voltdb/RestoreAgent.java | RestoreAgent.getSnapshots | private Map<String, Snapshot> getSnapshots() {
/*
* Use the individual snapshot directories instead of voltroot, because
* they can be set individually
*/
Map<String, SnapshotPathType> paths = new HashMap<String, SnapshotPathType>();
if (VoltDB.instance().getConfig().m_isEnterprise) {
if (m_clSnapshotPath != null) {
paths.put(m_clSnapshotPath, SnapshotPathType.SNAP_CL);
}
}
if (m_snapshotPath != null) {
paths.put(m_snapshotPath, SnapshotPathType.SNAP_AUTO);
}
HashMap<String, Snapshot> snapshots = new HashMap<String, Snapshot>();
FileFilter filter = new SnapshotUtil.SnapshotFilter();
for (String path : paths.keySet()) {
SnapshotUtil.retrieveSnapshotFiles(new File(path), snapshots, filter, false, paths.get(path), LOG);
}
return snapshots;
} | java | private Map<String, Snapshot> getSnapshots() {
/*
* Use the individual snapshot directories instead of voltroot, because
* they can be set individually
*/
Map<String, SnapshotPathType> paths = new HashMap<String, SnapshotPathType>();
if (VoltDB.instance().getConfig().m_isEnterprise) {
if (m_clSnapshotPath != null) {
paths.put(m_clSnapshotPath, SnapshotPathType.SNAP_CL);
}
}
if (m_snapshotPath != null) {
paths.put(m_snapshotPath, SnapshotPathType.SNAP_AUTO);
}
HashMap<String, Snapshot> snapshots = new HashMap<String, Snapshot>();
FileFilter filter = new SnapshotUtil.SnapshotFilter();
for (String path : paths.keySet()) {
SnapshotUtil.retrieveSnapshotFiles(new File(path), snapshots, filter, false, paths.get(path), LOG);
}
return snapshots;
} | [
"private",
"Map",
"<",
"String",
",",
"Snapshot",
">",
"getSnapshots",
"(",
")",
"{",
"/*\n * Use the individual snapshot directories instead of voltroot, because\n * they can be set individually\n */",
"Map",
"<",
"String",
",",
"SnapshotPathType",
">",
"... | Finds all the snapshots in all the places we know of which could possibly
store snapshots, like command log snapshots, auto snapshots, etc.
@return All snapshots | [
"Finds",
"all",
"the",
"snapshots",
"in",
"all",
"the",
"places",
"we",
"know",
"of",
"which",
"could",
"possibly",
"store",
"snapshots",
"like",
"command",
"log",
"snapshots",
"auto",
"snapshots",
"etc",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/RestoreAgent.java#L1383-L1405 | train |
VoltDB/voltdb | src/frontend/org/voltdb/RestoreAgent.java | RestoreAgent.snapshotCompleted | @Override
public CountDownLatch snapshotCompleted(SnapshotCompletionEvent event) {
if (!event.truncationSnapshot || !event.didSucceed) {
VoltDB.crashGlobalVoltDB("Failed to truncate command logs by snapshot",
false, null);
} else {
m_truncationSnapshot = event.multipartTxnId;
m_truncationSnapshotPerPartition = event.partitionTxnIds;
m_replayAgent.returnAllSegments();
changeState();
}
return new CountDownLatch(0);
} | java | @Override
public CountDownLatch snapshotCompleted(SnapshotCompletionEvent event) {
if (!event.truncationSnapshot || !event.didSucceed) {
VoltDB.crashGlobalVoltDB("Failed to truncate command logs by snapshot",
false, null);
} else {
m_truncationSnapshot = event.multipartTxnId;
m_truncationSnapshotPerPartition = event.partitionTxnIds;
m_replayAgent.returnAllSegments();
changeState();
}
return new CountDownLatch(0);
} | [
"@",
"Override",
"public",
"CountDownLatch",
"snapshotCompleted",
"(",
"SnapshotCompletionEvent",
"event",
")",
"{",
"if",
"(",
"!",
"event",
".",
"truncationSnapshot",
"||",
"!",
"event",
".",
"didSucceed",
")",
"{",
"VoltDB",
".",
"crashGlobalVoltDB",
"(",
"\"... | All nodes will be notified about the completion of the truncation
snapshot. | [
"All",
"nodes",
"will",
"be",
"notified",
"about",
"the",
"completion",
"of",
"the",
"truncation",
"snapshot",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/RestoreAgent.java#L1411-L1423 | train |
VoltDB/voltdb | src/frontend/org/voltcore/network/VoltNetwork.java | VoltNetwork.shutdown | void shutdown() throws InterruptedException {
m_shouldStop = true;
if (m_thread != null) {
m_selector.wakeup();
m_thread.join();
}
} | java | void shutdown() throws InterruptedException {
m_shouldStop = true;
if (m_thread != null) {
m_selector.wakeup();
m_thread.join();
}
} | [
"void",
"shutdown",
"(",
")",
"throws",
"InterruptedException",
"{",
"m_shouldStop",
"=",
"true",
";",
"if",
"(",
"m_thread",
"!=",
"null",
")",
"{",
"m_selector",
".",
"wakeup",
"(",
")",
";",
"m_thread",
".",
"join",
"(",
")",
";",
"}",
"}"
] | Instruct the network to stop after the current loop | [
"Instruct",
"the",
"network",
"to",
"stop",
"after",
"the",
"current",
"loop"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltcore/network/VoltNetwork.java#L148-L154 | train |
VoltDB/voltdb | src/frontend/org/voltcore/network/VoltNetwork.java | VoltNetwork.registerChannel | Connection registerChannel(
final SocketChannel channel,
final InputHandler handler,
final int interestOps,
final ReverseDNSPolicy dns,
final CipherExecutor cipherService,
final SSLEngine sslEngine) throws IOException {
synchronized(channel.blockingLock()) {
channel.configureBlocking (false);
channel.socket().setKeepAlive(true);
}
Callable<Connection> registerTask = new Callable<Connection>() {
@Override
public Connection call() throws Exception {
final VoltPort port = VoltPortFactory.createVoltPort(
channel,
VoltNetwork.this,
handler,
(InetSocketAddress)channel.socket().getRemoteSocketAddress(),
m_pool,
cipherService,
sslEngine);
port.registering();
/*
* This means we are used by a client. No need to wait then, trigger
* the reverse DNS lookup now.
*/
if (dns != ReverseDNSPolicy.NONE) {
port.resolveHostname(dns == ReverseDNSPolicy.SYNCHRONOUS);
}
try {
SelectionKey key = channel.register (m_selector, interestOps, null);
port.setKey (key);
port.registered();
//Fix a bug witnessed on the mini where the registration lock and the selector wakeup contained
//within was not enough to prevent the selector from returning the port after it was registered,
//but before setKey was called. Suspect a bug in the selector.wakeup() or register() implementation
//on the mac.
//The null check in invokeCallbacks will catch the null attachment, continue, and do the work
//next time through the selection loop
key.attach(port);
return port;
} finally {
m_ports.add(port);
m_numPorts.incrementAndGet();
}
}
};
FutureTask<Connection> ft = new FutureTask<Connection>(registerTask);
m_tasks.offer(ft);
m_selector.wakeup();
try {
return ft.get();
} catch (Exception e) {
throw new IOException(e);
}
} | java | Connection registerChannel(
final SocketChannel channel,
final InputHandler handler,
final int interestOps,
final ReverseDNSPolicy dns,
final CipherExecutor cipherService,
final SSLEngine sslEngine) throws IOException {
synchronized(channel.blockingLock()) {
channel.configureBlocking (false);
channel.socket().setKeepAlive(true);
}
Callable<Connection> registerTask = new Callable<Connection>() {
@Override
public Connection call() throws Exception {
final VoltPort port = VoltPortFactory.createVoltPort(
channel,
VoltNetwork.this,
handler,
(InetSocketAddress)channel.socket().getRemoteSocketAddress(),
m_pool,
cipherService,
sslEngine);
port.registering();
/*
* This means we are used by a client. No need to wait then, trigger
* the reverse DNS lookup now.
*/
if (dns != ReverseDNSPolicy.NONE) {
port.resolveHostname(dns == ReverseDNSPolicy.SYNCHRONOUS);
}
try {
SelectionKey key = channel.register (m_selector, interestOps, null);
port.setKey (key);
port.registered();
//Fix a bug witnessed on the mini where the registration lock and the selector wakeup contained
//within was not enough to prevent the selector from returning the port after it was registered,
//but before setKey was called. Suspect a bug in the selector.wakeup() or register() implementation
//on the mac.
//The null check in invokeCallbacks will catch the null attachment, continue, and do the work
//next time through the selection loop
key.attach(port);
return port;
} finally {
m_ports.add(port);
m_numPorts.incrementAndGet();
}
}
};
FutureTask<Connection> ft = new FutureTask<Connection>(registerTask);
m_tasks.offer(ft);
m_selector.wakeup();
try {
return ft.get();
} catch (Exception e) {
throw new IOException(e);
}
} | [
"Connection",
"registerChannel",
"(",
"final",
"SocketChannel",
"channel",
",",
"final",
"InputHandler",
"handler",
",",
"final",
"int",
"interestOps",
",",
"final",
"ReverseDNSPolicy",
"dns",
",",
"final",
"CipherExecutor",
"cipherService",
",",
"final",
"SSLEngine",... | Register a channel with the selector and create a Connection that will pass incoming events
to the provided handler.
@param channel
@param handler
@throws IOException | [
"Register",
"a",
"channel",
"with",
"the",
"selector",
"and",
"create",
"a",
"Connection",
"that",
"will",
"pass",
"incoming",
"events",
"to",
"the",
"provided",
"handler",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltcore/network/VoltNetwork.java#L170-L235 | train |
VoltDB/voltdb | src/frontend/org/voltcore/network/VoltNetwork.java | VoltNetwork.unregisterChannel | Future<?> unregisterChannel (Connection c) {
FutureTask<Object> ft = new FutureTask<Object>(getUnregisterRunnable(c), null);
m_tasks.offer(ft);
m_selector.wakeup();
return ft;
} | java | Future<?> unregisterChannel (Connection c) {
FutureTask<Object> ft = new FutureTask<Object>(getUnregisterRunnable(c), null);
m_tasks.offer(ft);
m_selector.wakeup();
return ft;
} | [
"Future",
"<",
"?",
">",
"unregisterChannel",
"(",
"Connection",
"c",
")",
"{",
"FutureTask",
"<",
"Object",
">",
"ft",
"=",
"new",
"FutureTask",
"<",
"Object",
">",
"(",
"getUnregisterRunnable",
"(",
"c",
")",
",",
"null",
")",
";",
"m_tasks",
".",
"o... | Unregister a channel. The connections streams are not drained before finishing.
@param c | [
"Unregister",
"a",
"channel",
".",
"The",
"connections",
"streams",
"are",
"not",
"drained",
"before",
"finishing",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltcore/network/VoltNetwork.java#L271-L276 | train |
VoltDB/voltdb | src/frontend/org/voltcore/network/VoltNetwork.java | VoltNetwork.addToChangeList | void addToChangeList(final VoltPort port, final boolean runFirst) {
if (runFirst) {
m_tasks.offer(new Runnable() {
@Override
public void run() {
callPort(port);
}
});
} else {
m_tasks.offer(new Runnable() {
@Override
public void run() {
installInterests(port);
}
});
}
m_selector.wakeup();
} | java | void addToChangeList(final VoltPort port, final boolean runFirst) {
if (runFirst) {
m_tasks.offer(new Runnable() {
@Override
public void run() {
callPort(port);
}
});
} else {
m_tasks.offer(new Runnable() {
@Override
public void run() {
installInterests(port);
}
});
}
m_selector.wakeup();
} | [
"void",
"addToChangeList",
"(",
"final",
"VoltPort",
"port",
",",
"final",
"boolean",
"runFirst",
")",
"{",
"if",
"(",
"runFirst",
")",
"{",
"m_tasks",
".",
"offer",
"(",
"new",
"Runnable",
"(",
")",
"{",
"@",
"Override",
"public",
"void",
"run",
"(",
... | Set interest registrations for a port | [
"Set",
"interest",
"registrations",
"for",
"a",
"port"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltcore/network/VoltNetwork.java#L283-L300 | train |
VoltDB/voltdb | src/frontend/org/voltcore/network/VoltNetwork.java | VoltNetwork.invokeCallbacks | protected void invokeCallbacks(ThreadLocalRandom r) {
final Set<SelectionKey> selectedKeys = m_selector.selectedKeys();
final int keyCount = selectedKeys.size();
int startInx = r.nextInt(keyCount);
int itInx = 0;
Iterator<SelectionKey> it = selectedKeys.iterator();
while(itInx < startInx) {
it.next();
itInx++;
}
while(itInx < keyCount) {
final Object obj = it.next().attachment();
if (obj == null) {
continue;
}
final VoltPort port = (VoltPort)obj;
callPort(port);
itInx++;
}
itInx = 0;
it = selectedKeys.iterator();
while(itInx < startInx) {
final Object obj = it.next().attachment();
if (obj == null) {
continue;
}
final VoltPort port = (VoltPort)obj;
callPort(port);
itInx++;
}
selectedKeys.clear();
} | java | protected void invokeCallbacks(ThreadLocalRandom r) {
final Set<SelectionKey> selectedKeys = m_selector.selectedKeys();
final int keyCount = selectedKeys.size();
int startInx = r.nextInt(keyCount);
int itInx = 0;
Iterator<SelectionKey> it = selectedKeys.iterator();
while(itInx < startInx) {
it.next();
itInx++;
}
while(itInx < keyCount) {
final Object obj = it.next().attachment();
if (obj == null) {
continue;
}
final VoltPort port = (VoltPort)obj;
callPort(port);
itInx++;
}
itInx = 0;
it = selectedKeys.iterator();
while(itInx < startInx) {
final Object obj = it.next().attachment();
if (obj == null) {
continue;
}
final VoltPort port = (VoltPort)obj;
callPort(port);
itInx++;
}
selectedKeys.clear();
} | [
"protected",
"void",
"invokeCallbacks",
"(",
"ThreadLocalRandom",
"r",
")",
"{",
"final",
"Set",
"<",
"SelectionKey",
">",
"selectedKeys",
"=",
"m_selector",
".",
"selectedKeys",
"(",
")",
";",
"final",
"int",
"keyCount",
"=",
"selectedKeys",
".",
"size",
"(",... | Set the selected interest set on the port and run it. | [
"Set",
"the",
"selected",
"interest",
"set",
"on",
"the",
"port",
"and",
"run",
"it",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltcore/network/VoltNetwork.java#L444-L475 | train |
VoltDB/voltdb | src/frontend/org/voltcore/zk/ZKUtil.java | ZKUtil.path | public static String path(String... components)
{
String path = components[0];
for (int i=1; i < components.length; i++) {
path = ZKUtil.joinZKPath(path, components[i]);
}
return path;
} | java | public static String path(String... components)
{
String path = components[0];
for (int i=1; i < components.length; i++) {
path = ZKUtil.joinZKPath(path, components[i]);
}
return path;
} | [
"public",
"static",
"String",
"path",
"(",
"String",
"...",
"components",
")",
"{",
"String",
"path",
"=",
"components",
"[",
"0",
"]",
";",
"for",
"(",
"int",
"i",
"=",
"1",
";",
"i",
"<",
"components",
".",
"length",
";",
"i",
"++",
")",
"{",
"... | Helper to produce a valid path from variadic strings. | [
"Helper",
"to",
"produce",
"a",
"valid",
"path",
"from",
"variadic",
"strings",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltcore/zk/ZKUtil.java#L78-L85 | train |
VoltDB/voltdb | src/frontend/org/voltdb/utils/PersistentBinaryDeque.java | PersistentBinaryDeque.getSegmentFileName | private String getSegmentFileName(long currentId, long previousId) {
return PbdSegmentName.createName(m_nonce, currentId, previousId, false);
} | java | private String getSegmentFileName(long currentId, long previousId) {
return PbdSegmentName.createName(m_nonce, currentId, previousId, false);
} | [
"private",
"String",
"getSegmentFileName",
"(",
"long",
"currentId",
",",
"long",
"previousId",
")",
"{",
"return",
"PbdSegmentName",
".",
"createName",
"(",
"m_nonce",
",",
"currentId",
",",
"previousId",
",",
"false",
")",
";",
"}"
] | Return a segment file name from m_nonce and current + previous segment ids.
@see parseFiles for file name structure
@param currentId current segment id
@param previousId previous segment id
@return segment file name | [
"Return",
"a",
"segment",
"file",
"name",
"from",
"m_nonce",
"and",
"current",
"+",
"previous",
"segment",
"ids",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/utils/PersistentBinaryDeque.java#L463-L465 | train |
VoltDB/voltdb | src/frontend/org/voltdb/utils/PersistentBinaryDeque.java | PersistentBinaryDeque.getPreviousSegmentId | private long getPreviousSegmentId(File file) {
PbdSegmentName segmentName = PbdSegmentName.parseFile(m_usageSpecificLog, file);
if (segmentName.m_result != PbdSegmentName.Result.OK) {
throw new IllegalStateException("Invalid file name: " + file.getName());
}
return segmentName.m_prevId;
} | java | private long getPreviousSegmentId(File file) {
PbdSegmentName segmentName = PbdSegmentName.parseFile(m_usageSpecificLog, file);
if (segmentName.m_result != PbdSegmentName.Result.OK) {
throw new IllegalStateException("Invalid file name: " + file.getName());
}
return segmentName.m_prevId;
} | [
"private",
"long",
"getPreviousSegmentId",
"(",
"File",
"file",
")",
"{",
"PbdSegmentName",
"segmentName",
"=",
"PbdSegmentName",
".",
"parseFile",
"(",
"m_usageSpecificLog",
",",
"file",
")",
";",
"if",
"(",
"segmentName",
".",
"m_result",
"!=",
"PbdSegmentName",... | Extract the previous segment id from a file name.
Note that the filename is assumed valid at this point.
@see parseFiles for file name structure
@param file
@return | [
"Extract",
"the",
"previous",
"segment",
"id",
"from",
"a",
"file",
"name",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/utils/PersistentBinaryDeque.java#L476-L482 | train |
VoltDB/voltdb | src/frontend/org/voltdb/utils/PersistentBinaryDeque.java | PersistentBinaryDeque.deleteStalePbdFile | private void deleteStalePbdFile(File file) throws IOException {
try {
PBDSegment.setFinal(file, false);
if (m_usageSpecificLog.isDebugEnabled()) {
m_usageSpecificLog.debug("Segment " + file.getName()
+ " (final: " + PBDSegment.isFinal(file) + "), will be closed and deleted during init");
}
file.delete();
} catch (Exception e) {
if (e instanceof NoSuchFileException) {
// Concurrent delete, noop
} else {
throw e;
}
}
} | java | private void deleteStalePbdFile(File file) throws IOException {
try {
PBDSegment.setFinal(file, false);
if (m_usageSpecificLog.isDebugEnabled()) {
m_usageSpecificLog.debug("Segment " + file.getName()
+ " (final: " + PBDSegment.isFinal(file) + "), will be closed and deleted during init");
}
file.delete();
} catch (Exception e) {
if (e instanceof NoSuchFileException) {
// Concurrent delete, noop
} else {
throw e;
}
}
} | [
"private",
"void",
"deleteStalePbdFile",
"(",
"File",
"file",
")",
"throws",
"IOException",
"{",
"try",
"{",
"PBDSegment",
".",
"setFinal",
"(",
"file",
",",
"false",
")",
";",
"if",
"(",
"m_usageSpecificLog",
".",
"isDebugEnabled",
"(",
")",
")",
"{",
"m_... | Delete a PBD segment that was identified as 'stale' i.e. produced by earlier VoltDB releases
Note that this file may be concurrently deleted from multiple instances so we ignore
NoSuchFileException.
@param file
@throws IOException | [
"Delete",
"a",
"PBD",
"segment",
"that",
"was",
"identified",
"as",
"stale",
"i",
".",
"e",
".",
"produced",
"by",
"earlier",
"VoltDB",
"releases"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/utils/PersistentBinaryDeque.java#L595-L610 | train |
VoltDB/voltdb | src/frontend/org/voltdb/utils/PersistentBinaryDeque.java | PersistentBinaryDeque.recoverSegment | private void recoverSegment(long segmentIndex, long segmentId, PbdSegmentName segmentName) throws IOException {
PBDSegment segment;
if (segmentName.m_quarantined) {
segment = new PbdQuarantinedSegment(segmentName.m_file, segmentIndex, segmentId);
} else {
segment = newSegment(segmentIndex, segmentId, segmentName.m_file);
try {
if (segment.getNumEntries() == 0) {
if (m_usageSpecificLog.isDebugEnabled()) {
m_usageSpecificLog.debug("Found Empty Segment with entries: " + segment.getNumEntries()
+ " For: " + segment.file().getName());
m_usageSpecificLog.debug("Segment " + segment.file() + " (final: " + segment.isFinal()
+ "), will be closed and deleted during init");
}
segment.closeAndDelete();
return;
}
// Any recovered segment that is not final should be checked
// for internal consistency.
if (!segment.isFinal()) {
m_usageSpecificLog.warn("Segment " + segment.file() + " (final: " + segment.isFinal()
+ "), has been recovered but is not in a final state");
} else if (m_usageSpecificLog.isDebugEnabled()) {
m_usageSpecificLog.debug(
"Segment " + segment.file() + " (final: " + segment.isFinal() + "), has been recovered");
}
m_segments.put(segment.segmentIndex(), segment);
} catch (IOException e) {
m_usageSpecificLog.warn(
"Failed to retrieve entry count from segment " + segment.file() + ". Quarantining segment", e);
quarantineSegment(segment);
return;
} finally {
segment.close();
}
}
m_segments.put(segment.segmentIndex(), segment);
} | java | private void recoverSegment(long segmentIndex, long segmentId, PbdSegmentName segmentName) throws IOException {
PBDSegment segment;
if (segmentName.m_quarantined) {
segment = new PbdQuarantinedSegment(segmentName.m_file, segmentIndex, segmentId);
} else {
segment = newSegment(segmentIndex, segmentId, segmentName.m_file);
try {
if (segment.getNumEntries() == 0) {
if (m_usageSpecificLog.isDebugEnabled()) {
m_usageSpecificLog.debug("Found Empty Segment with entries: " + segment.getNumEntries()
+ " For: " + segment.file().getName());
m_usageSpecificLog.debug("Segment " + segment.file() + " (final: " + segment.isFinal()
+ "), will be closed and deleted during init");
}
segment.closeAndDelete();
return;
}
// Any recovered segment that is not final should be checked
// for internal consistency.
if (!segment.isFinal()) {
m_usageSpecificLog.warn("Segment " + segment.file() + " (final: " + segment.isFinal()
+ "), has been recovered but is not in a final state");
} else if (m_usageSpecificLog.isDebugEnabled()) {
m_usageSpecificLog.debug(
"Segment " + segment.file() + " (final: " + segment.isFinal() + "), has been recovered");
}
m_segments.put(segment.segmentIndex(), segment);
} catch (IOException e) {
m_usageSpecificLog.warn(
"Failed to retrieve entry count from segment " + segment.file() + ". Quarantining segment", e);
quarantineSegment(segment);
return;
} finally {
segment.close();
}
}
m_segments.put(segment.segmentIndex(), segment);
} | [
"private",
"void",
"recoverSegment",
"(",
"long",
"segmentIndex",
",",
"long",
"segmentId",
",",
"PbdSegmentName",
"segmentName",
")",
"throws",
"IOException",
"{",
"PBDSegment",
"segment",
";",
"if",
"(",
"segmentName",
".",
"m_quarantined",
")",
"{",
"segment",
... | Recover a PBD segment and add it to m_segments
@param segment
@param deleteEmpty
@throws IOException | [
"Recover",
"a",
"PBD",
"segment",
"and",
"add",
"it",
"to",
"m_segments"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/utils/PersistentBinaryDeque.java#L663-L702 | train |
VoltDB/voltdb | src/frontend/org/voltdb/utils/PersistentBinaryDeque.java | PersistentBinaryDeque.numOpenSegments | int numOpenSegments() {
int numOpen = 0;
for (PBDSegment segment : m_segments.values()) {
if (!segment.isClosed()) {
numOpen++;
}
}
return numOpen;
} | java | int numOpenSegments() {
int numOpen = 0;
for (PBDSegment segment : m_segments.values()) {
if (!segment.isClosed()) {
numOpen++;
}
}
return numOpen;
} | [
"int",
"numOpenSegments",
"(",
")",
"{",
"int",
"numOpen",
"=",
"0",
";",
"for",
"(",
"PBDSegment",
"segment",
":",
"m_segments",
".",
"values",
"(",
")",
")",
"{",
"if",
"(",
"!",
"segment",
".",
"isClosed",
"(",
")",
")",
"{",
"numOpen",
"++",
";... | Used by test only | [
"Used",
"by",
"test",
"only"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/utils/PersistentBinaryDeque.java#L1252-L1261 | train |
VoltDB/voltdb | third_party/java/src/com/google_voltpatches/common/cache/CacheBuilder.java | CacheBuilder.expireAfterWrite | public CacheBuilder<K, V> expireAfterWrite(long duration, TimeUnit unit) {
checkState(
expireAfterWriteNanos == UNSET_INT,
"expireAfterWrite was already set to %s ns",
expireAfterWriteNanos);
checkArgument(duration >= 0, "duration cannot be negative: %s %s", duration, unit);
this.expireAfterWriteNanos = unit.toNanos(duration);
return this;
} | java | public CacheBuilder<K, V> expireAfterWrite(long duration, TimeUnit unit) {
checkState(
expireAfterWriteNanos == UNSET_INT,
"expireAfterWrite was already set to %s ns",
expireAfterWriteNanos);
checkArgument(duration >= 0, "duration cannot be negative: %s %s", duration, unit);
this.expireAfterWriteNanos = unit.toNanos(duration);
return this;
} | [
"public",
"CacheBuilder",
"<",
"K",
",",
"V",
">",
"expireAfterWrite",
"(",
"long",
"duration",
",",
"TimeUnit",
"unit",
")",
"{",
"checkState",
"(",
"expireAfterWriteNanos",
"==",
"UNSET_INT",
",",
"\"expireAfterWrite was already set to %s ns\"",
",",
"expireAfterWri... | Specifies that each entry should be automatically removed from the cache once a fixed duration
has elapsed after the entry's creation, or the most recent replacement of its value.
<p>When {@code duration} is zero, this method hands off to {@link #maximumSize(long)
maximumSize}{@code (0)}, ignoring any otherwise-specificed maximum size or weight. This can be
useful in testing, or to disable caching temporarily without a code change.
<p>Expired entries may be counted in {@link Cache#size}, but will never be visible to read or
write operations. Expired entries are cleaned up as part of the routine maintenance described
in the class javadoc.
@param duration the length of time after an entry is created that it should be automatically
removed
@param unit the unit that {@code duration} is expressed in
@return this {@code CacheBuilder} instance (for chaining)
@throws IllegalArgumentException if {@code duration} is negative
@throws IllegalStateException if the time to live or time to idle was already set | [
"Specifies",
"that",
"each",
"entry",
"should",
"be",
"automatically",
"removed",
"from",
"the",
"cache",
"once",
"a",
"fixed",
"duration",
"has",
"elapsed",
"after",
"the",
"entry",
"s",
"creation",
"or",
"the",
"most",
"recent",
"replacement",
"of",
"its",
... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/com/google_voltpatches/common/cache/CacheBuilder.java#L621-L629 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/rights/Grantee.java | Grantee.revoke | public void revoke(Grantee role) {
if (!hasRoleDirect(role)) {
throw Error.error(ErrorCode.X_0P503, role.getNameString());
}
roles.remove(role);
} | java | public void revoke(Grantee role) {
if (!hasRoleDirect(role)) {
throw Error.error(ErrorCode.X_0P503, role.getNameString());
}
roles.remove(role);
} | [
"public",
"void",
"revoke",
"(",
"Grantee",
"role",
")",
"{",
"if",
"(",
"!",
"hasRoleDirect",
"(",
"role",
")",
")",
"{",
"throw",
"Error",
".",
"error",
"(",
"ErrorCode",
".",
"X_0P503",
",",
"role",
".",
"getNameString",
"(",
")",
")",
";",
"}",
... | Revoke a direct role only | [
"Revoke",
"a",
"direct",
"role",
"only"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/rights/Grantee.java#L224-L231 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/rights/Grantee.java | Grantee.addGranteeAndRoles | private OrderedHashSet addGranteeAndRoles(OrderedHashSet set) {
Grantee candidateRole;
set.add(this);
for (int i = 0; i < roles.size(); i++) {
candidateRole = (Grantee) roles.get(i);
if (!set.contains(candidateRole)) {
candidateRole.addGranteeAndRoles(set);
}
}
return set;
} | java | private OrderedHashSet addGranteeAndRoles(OrderedHashSet set) {
Grantee candidateRole;
set.add(this);
for (int i = 0; i < roles.size(); i++) {
candidateRole = (Grantee) roles.get(i);
if (!set.contains(candidateRole)) {
candidateRole.addGranteeAndRoles(set);
}
}
return set;
} | [
"private",
"OrderedHashSet",
"addGranteeAndRoles",
"(",
"OrderedHashSet",
"set",
")",
"{",
"Grantee",
"candidateRole",
";",
"set",
".",
"add",
"(",
"this",
")",
";",
"for",
"(",
"int",
"i",
"=",
"0",
";",
"i",
"<",
"roles",
".",
"size",
"(",
")",
";",
... | Adds to given Set this.sName plus all roles and nested roles.
@return Given role with new elements added. | [
"Adds",
"to",
"given",
"Set",
"this",
".",
"sName",
"plus",
"all",
"roles",
"and",
"nested",
"roles",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/rights/Grantee.java#L302-L317 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/rights/Grantee.java | Grantee.addAllRoles | public void addAllRoles(HashMap map) {
for (int i = 0; i < roles.size(); i++) {
Grantee role = (Grantee) roles.get(i);
map.put(role.granteeName.name, role.roles);
}
} | java | public void addAllRoles(HashMap map) {
for (int i = 0; i < roles.size(); i++) {
Grantee role = (Grantee) roles.get(i);
map.put(role.granteeName.name, role.roles);
}
} | [
"public",
"void",
"addAllRoles",
"(",
"HashMap",
"map",
")",
"{",
"for",
"(",
"int",
"i",
"=",
"0",
";",
"i",
"<",
"roles",
".",
"size",
"(",
")",
";",
"i",
"++",
")",
"{",
"Grantee",
"role",
"=",
"(",
"Grantee",
")",
"roles",
".",
"get",
"(",
... | returns a map with grantee name keys and sets of granted roles as value | [
"returns",
"a",
"map",
"with",
"grantee",
"name",
"keys",
"and",
"sets",
"of",
"granted",
"roles",
"as",
"value"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/rights/Grantee.java#L322-L329 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/rights/Grantee.java | Grantee.clearPrivileges | void clearPrivileges() {
roles.clear();
directRightsMap.clear();
grantedRightsMap.clear();
fullRightsMap.clear();
isAdmin = false;
} | java | void clearPrivileges() {
roles.clear();
directRightsMap.clear();
grantedRightsMap.clear();
fullRightsMap.clear();
isAdmin = false;
} | [
"void",
"clearPrivileges",
"(",
")",
"{",
"roles",
".",
"clear",
"(",
")",
";",
"directRightsMap",
".",
"clear",
"(",
")",
";",
"grantedRightsMap",
".",
"clear",
"(",
")",
";",
"fullRightsMap",
".",
"clear",
"(",
")",
";",
"isAdmin",
"=",
"false",
";",... | Revokes all rights from this Grantee object. The map is cleared and
the database administrator role attribute is set false. | [
"Revokes",
"all",
"rights",
"from",
"this",
"Grantee",
"object",
".",
"The",
"map",
"is",
"cleared",
"and",
"the",
"database",
"administrator",
"role",
"attribute",
"is",
"set",
"false",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/rights/Grantee.java#L473-L481 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/rights/Grantee.java | Grantee.updateNestedRoles | boolean updateNestedRoles(Grantee role) {
boolean hasNested = false;
if (role != this) {
for (int i = 0; i < roles.size(); i++) {
Grantee currentRole = (Grantee) roles.get(i);
hasNested |= currentRole.updateNestedRoles(role);
}
}
if (hasNested) {
updateAllRights();
}
return hasNested || role == this;
} | java | boolean updateNestedRoles(Grantee role) {
boolean hasNested = false;
if (role != this) {
for (int i = 0; i < roles.size(); i++) {
Grantee currentRole = (Grantee) roles.get(i);
hasNested |= currentRole.updateNestedRoles(role);
}
}
if (hasNested) {
updateAllRights();
}
return hasNested || role == this;
} | [
"boolean",
"updateNestedRoles",
"(",
"Grantee",
"role",
")",
"{",
"boolean",
"hasNested",
"=",
"false",
";",
"if",
"(",
"role",
"!=",
"this",
")",
"{",
"for",
"(",
"int",
"i",
"=",
"0",
";",
"i",
"<",
"roles",
".",
"size",
"(",
")",
";",
"i",
"++... | Recursive method used with ROLE Grantee objects to set the fullRightsMap
and admin flag for all the roles.
If a new ROLE is granted to a ROLE Grantee object, the ROLE should first
be added to the Set of ROLE Grantee objects (roles) for the grantee.
The grantee will be the parameter.
If the direct permissions granted to an existing ROLE Grentee is
modified no extra initial action is necessary.
The existing Grantee will be the parameter.
If an existing ROLE is REVOKEed from a ROLE, it should first be removed
from the set of ROLE Grantee objects in the containing ROLE.
The containing ROLE will be the parameter.
If an existing ROLE is DROPped, all its privileges should be cleared
first. The ROLE will be the parameter. After calling this method on
all other roles, the DROPped role should be removed from all grantees.
After the initial modification, this method should be called iteratively
on all the ROLE Grantee objects contained in RoleManager.
The updateAllRights() method is then called iteratively on all the
USER Grantee objects contained in UserManager.
@param role a modified, revoked or dropped role.
@return true if this Grantee has possibly changed as a result | [
"Recursive",
"method",
"used",
"with",
"ROLE",
"Grantee",
"objects",
"to",
"set",
"the",
"fullRightsMap",
"and",
"admin",
"flag",
"for",
"all",
"the",
"roles",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/rights/Grantee.java#L805-L822 | train |
VoltDB/voltdb | src/hsqldb19b3/org/hsqldb_voltpatches/rights/Grantee.java | Grantee.addToFullRights | void addToFullRights(HashMap map) {
Iterator it = map.keySet().iterator();
while (it.hasNext()) {
Object key = it.next();
Right add = (Right) map.get(key);
Right existing = (Right) fullRightsMap.get(key);
if (existing == null) {
existing = add.duplicate();
fullRightsMap.put(key, existing);
} else {
existing.add(add);
}
if (add.grantableRights == null) {
continue;
}
if (existing.grantableRights == null) {
existing.grantableRights = add.grantableRights.duplicate();
} else {
existing.grantableRights.add(add.grantableRights);
}
}
} | java | void addToFullRights(HashMap map) {
Iterator it = map.keySet().iterator();
while (it.hasNext()) {
Object key = it.next();
Right add = (Right) map.get(key);
Right existing = (Right) fullRightsMap.get(key);
if (existing == null) {
existing = add.duplicate();
fullRightsMap.put(key, existing);
} else {
existing.add(add);
}
if (add.grantableRights == null) {
continue;
}
if (existing.grantableRights == null) {
existing.grantableRights = add.grantableRights.duplicate();
} else {
existing.grantableRights.add(add.grantableRights);
}
}
} | [
"void",
"addToFullRights",
"(",
"HashMap",
"map",
")",
"{",
"Iterator",
"it",
"=",
"map",
".",
"keySet",
"(",
")",
".",
"iterator",
"(",
")",
";",
"while",
"(",
"it",
".",
"hasNext",
"(",
")",
")",
"{",
"Object",
"key",
"=",
"it",
".",
"next",
"(... | Full or partial rights are added to existing | [
"Full",
"or",
"partial",
"rights",
"are",
"added",
"to",
"existing"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/hsqldb19b3/org/hsqldb_voltpatches/rights/Grantee.java#L858-L885 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/parseinfo/BranchNode.java | BranchNode.toLeftJoin | public void toLeftJoin() {
assert((m_leftNode != null && m_rightNode != null) || (m_leftNode == null && m_rightNode == null));
if (m_leftNode == null && m_rightNode == null) {
// End of recursion
return;
}
// recursive calls
if (m_leftNode instanceof BranchNode) {
((BranchNode)m_leftNode).toLeftJoin();
}
if (m_rightNode instanceof BranchNode) {
((BranchNode)m_rightNode).toLeftJoin();
}
// Swap own children
if (m_joinType == JoinType.RIGHT) {
JoinNode node = m_rightNode;
m_rightNode = m_leftNode;
m_leftNode = node;
m_joinType = JoinType.LEFT;
}
} | java | public void toLeftJoin() {
assert((m_leftNode != null && m_rightNode != null) || (m_leftNode == null && m_rightNode == null));
if (m_leftNode == null && m_rightNode == null) {
// End of recursion
return;
}
// recursive calls
if (m_leftNode instanceof BranchNode) {
((BranchNode)m_leftNode).toLeftJoin();
}
if (m_rightNode instanceof BranchNode) {
((BranchNode)m_rightNode).toLeftJoin();
}
// Swap own children
if (m_joinType == JoinType.RIGHT) {
JoinNode node = m_rightNode;
m_rightNode = m_leftNode;
m_leftNode = node;
m_joinType = JoinType.LEFT;
}
} | [
"public",
"void",
"toLeftJoin",
"(",
")",
"{",
"assert",
"(",
"(",
"m_leftNode",
"!=",
"null",
"&&",
"m_rightNode",
"!=",
"null",
")",
"||",
"(",
"m_leftNode",
"==",
"null",
"&&",
"m_rightNode",
"==",
"null",
")",
")",
";",
"if",
"(",
"m_leftNode",
"==... | Transform all RIGHT joins from the tree into the LEFT ones by swapping the nodes and their join types | [
"Transform",
"all",
"RIGHT",
"joins",
"from",
"the",
"tree",
"into",
"the",
"LEFT",
"ones",
"by",
"swapping",
"the",
"nodes",
"and",
"their",
"join",
"types"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/parseinfo/BranchNode.java#L321-L342 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/parseinfo/BranchNode.java | BranchNode.extractSubTree | @Override
protected void extractSubTree(List<JoinNode> leafNodes) {
JoinNode[] children = {m_leftNode, m_rightNode};
for (JoinNode child : children) {
// Leaf nodes don't have a significant join type,
// test for them first and never attempt to start a new tree at a leaf.
if ( ! (child instanceof BranchNode)) {
continue;
}
if (((BranchNode)child).m_joinType == m_joinType) {
// The join type for this node is the same as the root's one
// Keep walking down the tree
child.extractSubTree(leafNodes);
} else {
// The join type for this join differs from the root's one
// Terminate the sub-tree
leafNodes.add(child);
// Replace the join node with the temporary node having the id negated
JoinNode tempNode = new TableLeafNode(
-child.m_id, child.m_joinExpr, child.m_whereExpr, null);
if (child == m_leftNode) {
m_leftNode = tempNode;
} else {
m_rightNode = tempNode;
}
}
}
} | java | @Override
protected void extractSubTree(List<JoinNode> leafNodes) {
JoinNode[] children = {m_leftNode, m_rightNode};
for (JoinNode child : children) {
// Leaf nodes don't have a significant join type,
// test for them first and never attempt to start a new tree at a leaf.
if ( ! (child instanceof BranchNode)) {
continue;
}
if (((BranchNode)child).m_joinType == m_joinType) {
// The join type for this node is the same as the root's one
// Keep walking down the tree
child.extractSubTree(leafNodes);
} else {
// The join type for this join differs from the root's one
// Terminate the sub-tree
leafNodes.add(child);
// Replace the join node with the temporary node having the id negated
JoinNode tempNode = new TableLeafNode(
-child.m_id, child.m_joinExpr, child.m_whereExpr, null);
if (child == m_leftNode) {
m_leftNode = tempNode;
} else {
m_rightNode = tempNode;
}
}
}
} | [
"@",
"Override",
"protected",
"void",
"extractSubTree",
"(",
"List",
"<",
"JoinNode",
">",
"leafNodes",
")",
"{",
"JoinNode",
"[",
"]",
"children",
"=",
"{",
"m_leftNode",
",",
"m_rightNode",
"}",
";",
"for",
"(",
"JoinNode",
"child",
":",
"children",
")",... | Starting from the root recurse to its children stopping at the first join node
of the different type and discontinue the tree at this point by replacing the join node with
the temporary node which id matches the join node id. This join node is the root of the next
sub-tree.
@param root - The root of the join tree
@param leafNodes - the list of the root nodes of the next sub-trees | [
"Starting",
"from",
"the",
"root",
"recurse",
"to",
"its",
"children",
"stopping",
"at",
"the",
"first",
"join",
"node",
"of",
"the",
"different",
"type",
"and",
"discontinue",
"the",
"tree",
"at",
"this",
"point",
"by",
"replacing",
"the",
"join",
"node",
... | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/parseinfo/BranchNode.java#L352-L381 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/parseinfo/BranchNode.java | BranchNode.hasOuterJoin | @Override
public boolean hasOuterJoin() {
assert(m_leftNode != null && m_rightNode != null);
return m_joinType != JoinType.INNER ||
m_leftNode.hasOuterJoin() || m_rightNode.hasOuterJoin();
} | java | @Override
public boolean hasOuterJoin() {
assert(m_leftNode != null && m_rightNode != null);
return m_joinType != JoinType.INNER ||
m_leftNode.hasOuterJoin() || m_rightNode.hasOuterJoin();
} | [
"@",
"Override",
"public",
"boolean",
"hasOuterJoin",
"(",
")",
"{",
"assert",
"(",
"m_leftNode",
"!=",
"null",
"&&",
"m_rightNode",
"!=",
"null",
")",
";",
"return",
"m_joinType",
"!=",
"JoinType",
".",
"INNER",
"||",
"m_leftNode",
".",
"hasOuterJoin",
"(",... | Returns true if one of the tree nodes has outer join | [
"Returns",
"true",
"if",
"one",
"of",
"the",
"tree",
"nodes",
"has",
"outer",
"join"
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/parseinfo/BranchNode.java#L386-L391 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/parseinfo/BranchNode.java | BranchNode.extractEphemeralTableQueries | @Override
public void extractEphemeralTableQueries(List<StmtEphemeralTableScan> scans) {
if (m_leftNode != null) {
m_leftNode.extractEphemeralTableQueries(scans);
}
if (m_rightNode != null) {
m_rightNode.extractEphemeralTableQueries(scans);
}
} | java | @Override
public void extractEphemeralTableQueries(List<StmtEphemeralTableScan> scans) {
if (m_leftNode != null) {
m_leftNode.extractEphemeralTableQueries(scans);
}
if (m_rightNode != null) {
m_rightNode.extractEphemeralTableQueries(scans);
}
} | [
"@",
"Override",
"public",
"void",
"extractEphemeralTableQueries",
"(",
"List",
"<",
"StmtEphemeralTableScan",
">",
"scans",
")",
"{",
"if",
"(",
"m_leftNode",
"!=",
"null",
")",
"{",
"m_leftNode",
".",
"extractEphemeralTableQueries",
"(",
"scans",
")",
";",
"}"... | Returns a list of immediate sub-queries which are part of this query.
@return List<AbstractParsedStmt> - list of sub-queries from this query | [
"Returns",
"a",
"list",
"of",
"immediate",
"sub",
"-",
"queries",
"which",
"are",
"part",
"of",
"this",
"query",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/parseinfo/BranchNode.java#L397-L405 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/parseinfo/BranchNode.java | BranchNode.allInnerJoins | @Override
public boolean allInnerJoins() {
return m_joinType == JoinType.INNER &&
(m_leftNode == null || m_leftNode.allInnerJoins()) &&
(m_rightNode == null || m_rightNode.allInnerJoins());
} | java | @Override
public boolean allInnerJoins() {
return m_joinType == JoinType.INNER &&
(m_leftNode == null || m_leftNode.allInnerJoins()) &&
(m_rightNode == null || m_rightNode.allInnerJoins());
} | [
"@",
"Override",
"public",
"boolean",
"allInnerJoins",
"(",
")",
"{",
"return",
"m_joinType",
"==",
"JoinType",
".",
"INNER",
"&&",
"(",
"m_leftNode",
"==",
"null",
"||",
"m_leftNode",
".",
"allInnerJoins",
"(",
")",
")",
"&&",
"(",
"m_rightNode",
"==",
"n... | Returns if all the join operations within this join tree are inner joins.
@return true or false. | [
"Returns",
"if",
"all",
"the",
"join",
"operations",
"within",
"this",
"join",
"tree",
"are",
"inner",
"joins",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/parseinfo/BranchNode.java#L455-L460 | train |
VoltDB/voltdb | src/frontend/org/voltdb/planner/ScanDeterminizer.java | ScanDeterminizer.apply | public static void apply(CompiledPlan plan, DeterminismMode detMode)
{
if (detMode == DeterminismMode.FASTER) {
return;
}
if (plan.hasDeterministicStatement()) {
return;
}
AbstractPlanNode planGraph = plan.rootPlanGraph;
if (planGraph.isOrderDeterministic()) {
return;
}
AbstractPlanNode root = plan.rootPlanGraph;
root = recursivelyApply(root);
plan.rootPlanGraph = root;
} | java | public static void apply(CompiledPlan plan, DeterminismMode detMode)
{
if (detMode == DeterminismMode.FASTER) {
return;
}
if (plan.hasDeterministicStatement()) {
return;
}
AbstractPlanNode planGraph = plan.rootPlanGraph;
if (planGraph.isOrderDeterministic()) {
return;
}
AbstractPlanNode root = plan.rootPlanGraph;
root = recursivelyApply(root);
plan.rootPlanGraph = root;
} | [
"public",
"static",
"void",
"apply",
"(",
"CompiledPlan",
"plan",
",",
"DeterminismMode",
"detMode",
")",
"{",
"if",
"(",
"detMode",
"==",
"DeterminismMode",
".",
"FASTER",
")",
"{",
"return",
";",
"}",
"if",
"(",
"plan",
".",
"hasDeterministicStatement",
"(... | Only applies when stronger determinism is needed. | [
"Only",
"applies",
"when",
"stronger",
"determinism",
"is",
"needed",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/planner/ScanDeterminizer.java#L45-L61 | train |
VoltDB/voltdb | src/frontend/org/voltdb/iv2/Scheduler.java | Scheduler.updateLastSeenUniqueIds | public void updateLastSeenUniqueIds(VoltMessage message)
{
long sequenceWithUniqueId = Long.MIN_VALUE;
boolean commandLog = (message instanceof TransactionInfoBaseMessage &&
(((TransactionInfoBaseMessage)message).isForReplay()));
boolean sentinel = message instanceof MultiPartitionParticipantMessage;
// if replay
if (commandLog || sentinel) {
sequenceWithUniqueId = ((TransactionInfoBaseMessage)message).getUniqueId();
// Update last seen and last polled txnId for replicas
m_replaySequencer.updateLastSeenUniqueId(sequenceWithUniqueId,
(TransactionInfoBaseMessage) message);
m_replaySequencer.updateLastPolledUniqueId(sequenceWithUniqueId,
(TransactionInfoBaseMessage) message);
}
} | java | public void updateLastSeenUniqueIds(VoltMessage message)
{
long sequenceWithUniqueId = Long.MIN_VALUE;
boolean commandLog = (message instanceof TransactionInfoBaseMessage &&
(((TransactionInfoBaseMessage)message).isForReplay()));
boolean sentinel = message instanceof MultiPartitionParticipantMessage;
// if replay
if (commandLog || sentinel) {
sequenceWithUniqueId = ((TransactionInfoBaseMessage)message).getUniqueId();
// Update last seen and last polled txnId for replicas
m_replaySequencer.updateLastSeenUniqueId(sequenceWithUniqueId,
(TransactionInfoBaseMessage) message);
m_replaySequencer.updateLastPolledUniqueId(sequenceWithUniqueId,
(TransactionInfoBaseMessage) message);
}
} | [
"public",
"void",
"updateLastSeenUniqueIds",
"(",
"VoltMessage",
"message",
")",
"{",
"long",
"sequenceWithUniqueId",
"=",
"Long",
".",
"MIN_VALUE",
";",
"boolean",
"commandLog",
"=",
"(",
"message",
"instanceof",
"TransactionInfoBaseMessage",
"&&",
"(",
"(",
"(",
... | Update last seen uniqueIds in the replay sequencer. This is used on MPI repair.
@param message | [
"Update",
"last",
"seen",
"uniqueIds",
"in",
"the",
"replay",
"sequencer",
".",
"This",
"is",
"used",
"on",
"MPI",
"repair",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/iv2/Scheduler.java#L185-L203 | train |
VoltDB/voltdb | src/frontend/org/voltdb/sysprocs/SnapshotRestoreResultSet.java | SnapshotRestoreResultSet.parseRestoreResultRow | public void parseRestoreResultRow(VoltTable vt)
{
RestoreResultKey key = new RestoreResultKey(
(int)vt.getLong("HOST_ID"),
(int)vt.getLong("PARTITION_ID"),
vt.getString("TABLE"));
if (containsKey(key)) {
get(key).mergeData(vt.getString("RESULT").equals("SUCCESS"),
vt.getString("ERR_MSG"));
}
else {
put(key, new RestoreResultValue((int)vt.getLong("SITE_ID"),
vt.getString("RESULT").equals("SUCCESS"),
vt.getString("HOSTNAME"),
vt.getString("ERR_MSG")));
}
} | java | public void parseRestoreResultRow(VoltTable vt)
{
RestoreResultKey key = new RestoreResultKey(
(int)vt.getLong("HOST_ID"),
(int)vt.getLong("PARTITION_ID"),
vt.getString("TABLE"));
if (containsKey(key)) {
get(key).mergeData(vt.getString("RESULT").equals("SUCCESS"),
vt.getString("ERR_MSG"));
}
else {
put(key, new RestoreResultValue((int)vt.getLong("SITE_ID"),
vt.getString("RESULT").equals("SUCCESS"),
vt.getString("HOSTNAME"),
vt.getString("ERR_MSG")));
}
} | [
"public",
"void",
"parseRestoreResultRow",
"(",
"VoltTable",
"vt",
")",
"{",
"RestoreResultKey",
"key",
"=",
"new",
"RestoreResultKey",
"(",
"(",
"int",
")",
"vt",
".",
"getLong",
"(",
"\"HOST_ID\"",
")",
",",
"(",
"int",
")",
"vt",
".",
"getLong",
"(",
... | Parse a restore result table row and add to the set.
@param vt restore result table | [
"Parse",
"a",
"restore",
"result",
"table",
"row",
"and",
"add",
"to",
"the",
"set",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/src/frontend/org/voltdb/sysprocs/SnapshotRestoreResultSet.java#L157-L173 | train |
VoltDB/voltdb | third_party/java/src/com/google_voltpatches/common/collect/SortedLists.java | SortedLists.binarySearch | public static <E extends Comparable> int binarySearch(
List<? extends E> list,
E e,
KeyPresentBehavior presentBehavior,
KeyAbsentBehavior absentBehavior) {
checkNotNull(e);
return binarySearch(list, e, Ordering.natural(), presentBehavior, absentBehavior);
} | java | public static <E extends Comparable> int binarySearch(
List<? extends E> list,
E e,
KeyPresentBehavior presentBehavior,
KeyAbsentBehavior absentBehavior) {
checkNotNull(e);
return binarySearch(list, e, Ordering.natural(), presentBehavior, absentBehavior);
} | [
"public",
"static",
"<",
"E",
"extends",
"Comparable",
">",
"int",
"binarySearch",
"(",
"List",
"<",
"?",
"extends",
"E",
">",
"list",
",",
"E",
"e",
",",
"KeyPresentBehavior",
"presentBehavior",
",",
"KeyAbsentBehavior",
"absentBehavior",
")",
"{",
"checkNotN... | Searches the specified naturally ordered list for the specified object using the binary search
algorithm.
<p>Equivalent to {@link #binarySearch(List, Function, Object, Comparator, KeyPresentBehavior,
KeyAbsentBehavior)} using {@link Ordering#natural}. | [
"Searches",
"the",
"specified",
"naturally",
"ordered",
"list",
"for",
"the",
"specified",
"object",
"using",
"the",
"binary",
"search",
"algorithm",
"."
] | 8afc1031e475835344b5497ea9e7203bc95475ac | https://github.com/VoltDB/voltdb/blob/8afc1031e475835344b5497ea9e7203bc95475ac/third_party/java/src/com/google_voltpatches/common/collect/SortedLists.java#L188-L195 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.