_id stringlengths 2 7 | title stringlengths 3 140 | partition stringclasses 3
values | text stringlengths 73 34.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q162400 | BookKeeperJournalManager.purgeLogsOlderThan | train | @Override
public void purgeLogsOlderThan(long minTxIdToKeep) throws IOException {
checkEnv();
Collection<EditLogLedgerMetadata> ledgers =
metadataManager.listLedgers(false); // Don't list in-progress ledgers
for (EditLogLedgerMetadata ledger : ledgers) {
if (ledger.getFirstTxId() < minTxIdToKeep &&
ledger.getLastTxId() < minTxIdToKeep) {
LOG.info("Purging edit log segment: " + ledger);
// Try to delete the associated ZooKeeper metadata
if (!metadataManager.deleteLedgerMetadata(ledger, -1)) {
// It's possible that another process has already deleted the
// metadata
LOG.warn(ledger + " has already been purged!");
} else {
try {
// Remove the ledger from BookKeeper itself to reclaim diskspace.
bookKeeperClient.deleteLedger(ledger.getLedgerId());
} catch (BKException e) {
bkException("Unrecoverable error deleting " + ledger +
" from BookKeeper", e);
} catch (InterruptedException e) {
interruptedException("Interrupted deleting " + ledger +
" from BookKeeper", e);
}
}
}
}
} | java | {
"resource": ""
} |
q162401 | CoronaSerializer.readToken | train | public void readToken(String parentFieldName, JsonToken expectedToken)
throws IOException {
JsonToken currentToken = jsonParser.nextToken();
if (currentToken != expectedToken) {
throw new IOException("Expected a " + expectedToken.toString() +
" token when reading the value of the field: " +
parentFieldName +
" but found a " +
currentToken.toString() + " token");
}
} | java | {
"resource": ""
} |
q162402 | CoronaSerializer.getFieldName | train | public String getFieldName() throws IOException {
if (jsonParser.getCurrentToken() != JsonToken.FIELD_NAME) {
throw new IOException("Expected a field of type " + JsonToken.FIELD_NAME +
", but found a field of type " +
jsonParser.getCurrentToken());
}
return jsonParser.getCurrentName();
} | java | {
"resource": ""
} |
q162403 | BlockListAsLongs.setBlock | train | void setBlock(final int index, final Block b) {
blockList[index2BlockId(index)] = b.getBlockId();
blockList[index2BlockLen(index)] = b.getNumBytes();
blockList[index2BlockGenStamp(index)] = b.getGenerationStamp();
} | java | {
"resource": ""
} |
q162404 | UtilizationReporter.main | train | public static void main(String argv[]) throws Exception {
try {
Configuration conf = new Configuration();
UtilizationReporter utilizationReporter =
new UtilizationReporter(conf);
utilizationReporter.start();
} catch (Throwable e) {
System.err.println(e);
LOG.error(StringUtils.stringifyException(e));
System.exit(-1);
}
} | java | {
"resource": ""
} |
q162405 | AvatarDataNode.getNameNodeAddress | train | static InetSocketAddress getNameNodeAddress(Configuration conf,
String cname, String rpcKey, String cname2) {
String fs = conf.get(cname);
String fs1 = conf.get(rpcKey);
String fs2 = conf.get(cname2);
Configuration newconf = new Configuration(conf);
newconf.set("fs.default.name", fs);
if (fs1 != null) {
newconf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, fs1);
}
if (fs2 != null) {
newconf.set("dfs.namenode.dn-address", fs2);
}
return DataNode.getNameNodeAddress(newconf);
} | java | {
"resource": ""
} |
q162406 | MD5MD5CRC32FileChecksum.write | train | public static void write(XMLOutputter xml, MD5MD5CRC32FileChecksum that
) throws IOException {
xml.startTag(MD5MD5CRC32FileChecksum.class.getName());
if (that != null) {
xml.attribute("bytesPerCRC", "" + that.bytesPerCRC);
xml.attribute("crcPerBlock", "" + that.crcPerBlock);
xml.attribute("md5", "" + that.md5);
}
xml.endTag();
} | java | {
"resource": ""
} |
q162407 | MD5MD5CRC32FileChecksum.valueOf | train | public static MD5MD5CRC32FileChecksum valueOf(Attributes attrs
) throws SAXException {
final String bytesPerCRC = attrs.getValue("bytesPerCRC");
final String crcPerBlock = attrs.getValue("crcPerBlock");
final String md5 = attrs.getValue("md5");
if (bytesPerCRC == null || crcPerBlock == null || md5 == null) {
return null;
}
try {
return new MD5MD5CRC32FileChecksum(Integer.valueOf(bytesPerCRC),
Integer.valueOf(crcPerBlock), new MD5Hash(md5));
} catch(Exception e) {
throw new SAXException("Invalid attributes: bytesPerCRC=" + bytesPerCRC
+ ", crcPerBlock=" + crcPerBlock + ", md5=" + md5, e);
}
} | java | {
"resource": ""
} |
q162408 | XmlTokenizer.getNextElementsValue | train | private String getNextElementsValue(String wantedName) throws IOException {
boolean gotSTART_ELEMENT = false;
try {
int eventType = in.getEventType();
while(true) {
switch(eventType) {
case XMLStreamConstants.CHARACTERS: // 4
if(gotSTART_ELEMENT) {
// XML returns "\n" instead of empty (zero-length) string
// for elements like <x></x>
return in.getText().trim();
}
break;
case XMLStreamConstants.END_DOCUMENT: // 8
throw new IOException("End of XML while looking for element [" +
wantedName + "]");
// break;
case XMLStreamConstants.START_ELEMENT : // 1
if(gotSTART_ELEMENT) {
throw new IOException("START_ELEMENT [" +
in.getName() +
" event when expecting CHARACTERS event for [" +
wantedName + "]");
} else if(in.getName().toString().equals(wantedName)) {
gotSTART_ELEMENT = true;
} else {
throw new IOException("unexpected element name [" +
in.getName() + "], was expecting [" +
wantedName + "]");
}
break;
case XMLStreamConstants.COMMENT:
case XMLStreamConstants.END_ELEMENT: // 2
case XMLStreamConstants.SPACE:
case XMLStreamConstants.START_DOCUMENT: // 7
// these are in XML but we don't need them
break;
// these should never appear in edits XML
case XMLStreamConstants.ATTRIBUTE:
case XMLStreamConstants.CDATA:
case XMLStreamConstants.DTD:
case XMLStreamConstants.ENTITY_DECLARATION:
case XMLStreamConstants.ENTITY_REFERENCE:
case XMLStreamConstants.NAMESPACE:
case XMLStreamConstants.NOTATION_DECLARATION:
case XMLStreamConstants.PROCESSING_INSTRUCTION:
default:
throw new IOException("Unsupported event type [" +
eventType + "] (see XMLStreamConstants)");
}
if(!in.hasNext()) { break; }
eventType = in.next();
}
} catch(XMLStreamException e) {
throw new IOException("Error reading XML stream", e);
}
throw new IOException(
"Error reading XML stream, should never reach this line, " +
"most likely XML does not have elements we are loking for");
} | java | {
"resource": ""
} |
q162409 | CurrentInProgressMetadata.update | train | public void update(String newPath) throws IOException {
String id = hostname + Thread.currentThread().getId();
CurrentInProgressMetadataWritable cip = localWritable.get();
cip.set(id, newPath);
byte[] data = WritableUtil.writableToByteArray(cip);
try {
zooKeeper.setData(fullyQualifiedZNode, data, expectedZNodeVersion.get());
if (LOG.isDebugEnabled()) {
LOG.debug("Set " + fullyQualifiedZNode + " to point to " + newPath);
}
} catch (KeeperException.BadVersionException e) {
// Throw an exception if we try to update without having read the
// current version
LOG.error(fullyQualifiedZNode + " has been updated by another process",
e);
throw new StaleVersionException(fullyQualifiedZNode +
"has been updated by another process!");
} catch (KeeperException e) {
keeperException("Unrecoverable ZooKeeper error updating " +
fullyQualifiedZNode, e);
} catch (InterruptedException e) {
interruptedException("Interrupted updating " + fullyQualifiedZNode, e);
}
} | java | {
"resource": ""
} |
q162410 | CurrentInProgressMetadata.read | train | public String read() throws IOException {
CurrentInProgressMetadataWritable cip = localWritable.get();
if (readAndUpdateVersion(cip)) {
return cip.getPath();
} else {
if (LOG.isDebugEnabled()) {
LOG.debug(fullyQualifiedZNode + " is currently clear.");
}
}
return null;
} | java | {
"resource": ""
} |
q162411 | CurrentInProgressMetadata.clear | train | public void clear() throws IOException {
try {
zooKeeper.setData(fullyQualifiedZNode, null, expectedZNodeVersion.get());
} catch (KeeperException.BadVersionException e) {
LOG.error(fullyQualifiedZNode + " has been updated by another process",
e);
throw new StaleVersionException(fullyQualifiedZNode +
"has been updated by another process!");
} catch (KeeperException e) {
keeperException("Unrecoverable ZooKeeper error clearing " +
fullyQualifiedZNode, e);
} catch (InterruptedException e) {
interruptedException("Interrupted clearing " + fullyQualifiedZNode, e);
}
} | java | {
"resource": ""
} |
q162412 | ProgressWheel.onSizeChanged | train | @Override protected void onSizeChanged(int w, int h, int oldw, int oldh) {
super.onSizeChanged(w, h, oldw, oldh);
setupBounds(w, h);
setupPaints();
invalidate();
} | java | {
"resource": ""
} |
q162413 | ProgressWheel.setupPaints | train | private void setupPaints() {
barPaint.setColor(barColor);
barPaint.setAntiAlias(true);
barPaint.setStyle(Style.STROKE);
barPaint.setStrokeWidth(barWidth);
rimPaint.setColor(rimColor);
rimPaint.setAntiAlias(true);
rimPaint.setStyle(Style.STROKE);
rimPaint.setStrokeWidth(rimWidth);
} | java | {
"resource": ""
} |
q162414 | ProgressWheel.setupBounds | train | private void setupBounds(int layout_width, int layout_height) {
int paddingTop = getPaddingTop();
int paddingBottom = getPaddingBottom();
int paddingLeft = getPaddingLeft();
int paddingRight = getPaddingRight();
if (!fillRadius) {
// Width should equal to Height, find the min value to setup the circle
int minValue = Math.min(layout_width - paddingLeft - paddingRight,
layout_height - paddingBottom - paddingTop);
int circleDiameter = Math.min(minValue, circleRadius * 2 - barWidth * 2);
// Calc the Offset if needed for centering the wheel in the available space
int xOffset = (layout_width - paddingLeft - paddingRight - circleDiameter) / 2 + paddingLeft;
int yOffset = (layout_height - paddingTop - paddingBottom - circleDiameter) / 2 + paddingTop;
circleBounds =
new RectF(xOffset + barWidth, yOffset + barWidth, xOffset + circleDiameter - barWidth,
yOffset + circleDiameter - barWidth);
} else {
circleBounds = new RectF(paddingLeft + barWidth, paddingTop + barWidth,
layout_width - paddingRight - barWidth, layout_height - paddingBottom - barWidth);
}
} | java | {
"resource": ""
} |
q162415 | ProgressWheel.parseAttributes | train | private void parseAttributes(TypedArray a) {
// We transform the default values from DIP to pixels
DisplayMetrics metrics = getContext().getResources().getDisplayMetrics();
barWidth = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, barWidth, metrics);
rimWidth = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, rimWidth, metrics);
circleRadius =
(int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, circleRadius, metrics);
circleRadius =
(int) a.getDimension(R.styleable.ProgressWheel_matProg_circleRadius, circleRadius);
fillRadius = a.getBoolean(R.styleable.ProgressWheel_matProg_fillRadius, false);
barWidth = (int) a.getDimension(R.styleable.ProgressWheel_matProg_barWidth, barWidth);
rimWidth = (int) a.getDimension(R.styleable.ProgressWheel_matProg_rimWidth, rimWidth);
float baseSpinSpeed =
a.getFloat(R.styleable.ProgressWheel_matProg_spinSpeed, spinSpeed / 360.0f);
spinSpeed = baseSpinSpeed * 360;
barSpinCycleTime =
a.getInt(R.styleable.ProgressWheel_matProg_barSpinCycleTime, (int) barSpinCycleTime);
barColor = a.getColor(R.styleable.ProgressWheel_matProg_barColor, barColor);
rimColor = a.getColor(R.styleable.ProgressWheel_matProg_rimColor, rimColor);
linearProgress = a.getBoolean(R.styleable.ProgressWheel_matProg_linearProgress, false);
if (a.getBoolean(R.styleable.ProgressWheel_matProg_progressIndeterminate, false)) {
spin();
}
// Recycle
a.recycle();
} | java | {
"resource": ""
} |
q162416 | ProgressWheel.setInstantProgress | train | public void setInstantProgress(float progress) {
if (isSpinning) {
mProgress = 0.0f;
isSpinning = false;
}
if (progress > 1.0f) {
progress -= 1.0f;
} else if (progress < 0) {
progress = 0;
}
if (progress == mTargetProgress) {
return;
}
mTargetProgress = Math.min(progress * 360.0f, 360.0f);
mProgress = mTargetProgress;
lastTimeAnimated = SystemClock.uptimeMillis();
invalidate();
} | java | {
"resource": ""
} |
q162417 | ProgressWheel.setProgress | train | public void setProgress(float progress) {
if (isSpinning) {
mProgress = 0.0f;
isSpinning = false;
runCallback();
}
if (progress > 1.0f) {
progress -= 1.0f;
} else if (progress < 0) {
progress = 0;
}
if (progress == mTargetProgress) {
return;
}
// If we are currently in the right position
// we set again the last time animated so the
// animation starts smooth from here
if (mProgress == mTargetProgress) {
lastTimeAnimated = SystemClock.uptimeMillis();
}
mTargetProgress = Math.min(progress * 360.0f, 360.0f);
invalidate();
} | java | {
"resource": ""
} |
q162418 | AbstractRefreshWebappMojo.execute | train | @Override
public void execute() throws MojoExecutionException {
if (StringUtils.isBlank(this.refreshPort)) {
this.refreshPort = "8080";
}
// Do a ping to see if the server is up, if not, log and just exit
if (!ping()) {
getLog().warn("Connection failed to " + this.refreshHost + ":" + this.refreshPort + ", " + getAbortedMsg());
return;
}
executeRefresh();
} | java | {
"resource": ""
} |
q162419 | AbstractRefreshWebappMojo.refreshWebScripts | train | protected void refreshWebScripts(String url) {
// Create the Refresh URL for the Alfresco Tomcat server
URL alfrescoTomcatUrl = buildFinalUrl(url);
if (alfrescoTomcatUrl == null) {
getLog().error("Could not build refresh URL for " + refreshWebappName + ", " + getAbortedMsg());
}
// Set up the data we need to POST to the server for the refresh to work
List<NameValuePair> postData = new ArrayList<NameValuePair>();
postData.add(new BasicNameValuePair("reset", "on"));
postData.add(new BasicNameValuePair("submit", "Refresh Web Scripts"));
// Do the refresh
makePostCall(alfrescoTomcatUrl, postData, "Refresh Web Scripts");
} | java | {
"resource": ""
} |
q162420 | AbstractRefreshWebappMojo.clearDependencyCaches | train | protected void clearDependencyCaches(String url) {
// Create the Clear Cache URL for the Alfresco Tomcat server
URL alfrescoTomcatUrl = buildFinalUrl(url);
if (alfrescoTomcatUrl == null) {
getLog().error("Could not build clear dependency caches URL for " +
refreshWebappName + ", " + getAbortedMsg());
}
// Do the refresh
makePostCall(alfrescoTomcatUrl, null, "Clear Dependency Caches");
} | java | {
"resource": ""
} |
q162421 | AbstractRefreshWebappMojo.makePostCall | train | private void makePostCall(URL alfrescoTomcatUrl, List<NameValuePair> postData, String operation) {
CloseableHttpClient client = null;
CloseableHttpResponse response = null;
try {
// Set up a HTTP POST request to the Alfresco Webapp we are targeting
HttpHost targetHost = new HttpHost(
alfrescoTomcatUrl.getHost(), alfrescoTomcatUrl.getPort(), alfrescoTomcatUrl.getProtocol());
// Set up authentication parameters
CredentialsProvider credsProvider = new BasicCredentialsProvider();
credsProvider.setCredentials(new AuthScope(targetHost.getHostName(), targetHost.getPort()),
new UsernamePasswordCredentials(refreshUsername, refreshPassword));
// Create the HTTP Client we will use to make the call
client = HttpClients.custom().setDefaultCredentialsProvider(credsProvider).build();
// Create AuthCache instance
AuthCache authCache = new BasicAuthCache();
// Generate BASIC scheme object and add it to the local auth cache
BasicScheme basicAuth = new BasicScheme();
authCache.put(targetHost, basicAuth);
// Add AuthCache to the execution context
HttpClientContext localContext = HttpClientContext.create();
localContext.setAuthCache(authCache);
// Make the call to Refresh the Alfresco Webapp
HttpPost httpPost = new HttpPost(alfrescoTomcatUrl.toURI());
response = client.execute(httpPost);
if (postData != null) {
UrlEncodedFormEntity entity = new UrlEncodedFormEntity(postData, "UTF-8");
httpPost.setEntity(entity);
}
httpPost.setHeader("Accept-Charset", "iso-8859-1,utf-8");
httpPost.setHeader("Accept-Language", "en-us");
response = client.execute(httpPost);
// If no response, no method has been passed
if (response == null) {
getLog().error("POST request failed to " + alfrescoTomcatUrl.toString() + ", " + getAbortedMsg());
return;
}
// Check if we got a successful response or not
int statusCode = response.getStatusLine().getStatusCode();
if (statusCode == HttpStatus.SC_OK) {
getLog().info("Successfull " + operation + " for " + refreshWebappName);
} else {
String reasonPhrase = response.getStatusLine().getReasonPhrase();
getLog().warn("Failed to " + operation + " for " + refreshWebappName + ". Response status: " +
statusCode + ", message: " + reasonPhrase);
}
} catch (Exception ex) {
getLog().error("POST request failed to " + alfrescoTomcatUrl.toString() + ", " + getAbortedMsg());
getLog().error("Exception Msg: " + ex.getMessage());
} finally {
closeQuietly(response);
closeQuietly(client);
}
} | java | {
"resource": ""
} |
q162422 | AbstractRunMojo.getPort | train | protected String getPort() {
String port = tomcatPort;
if (mavenTomcatPort != null) {
port = mavenTomcatPort;
getLog().info( "Tomcat Port overridden by property maven.tomcat.port");
}
return port;
} | java | {
"resource": ""
} |
q162423 | AbstractRunMojo.tomcatIsRunning | train | protected boolean tomcatIsRunning() {
CloseableHttpClient client= HttpClients.createDefault();
CloseableHttpResponse response = null;
try {
HttpGet httpget = new HttpGet("http://localhost:" + getPort() + "/alfresco");
response = client.execute(httpget);
getLog().info("Tomcat is running on port "+ getPort());
return true;
} catch (Exception ex) {
getLog().info("Tomcat is not running on port " + getPort() );
return false;
}
} | java | {
"resource": ""
} |
q162424 | AbstractRunMojo.copyAlfrescoLicense | train | protected void copyAlfrescoLicense() throws MojoExecutionException {
final String warOutputDir = getWarOutputDir(PLATFORM_WAR_PREFIX_NAME);
final String licDestDir = warOutputDir + "/WEB-INF/classes/alfresco/extension/license";
getLog().info("Copying Alfresco Enterprise license to: " + licDestDir);
executeMojo(
plugin(
groupId("org.apache.maven.plugins"),
artifactId("maven-resources-plugin"),
version(MAVEN_RESOURCE_PLUGIN_VERSION)
),
goal("copy-resources"),
configuration(
element(name("outputDirectory"), licDestDir),
element(name("resources"),
element(name("resource"),
element(name("directory"), "src/test/license"),
element(name("includes"),
element(name("include"), "*.lic")
),
element(name("filtering"), "false")
)
)
),
execEnv
);
} | java | {
"resource": ""
} |
q162425 | AbstractRunMojo.copyShareConfigCustom | train | protected void copyShareConfigCustom() throws MojoExecutionException {
final String warOutputDir = getWarOutputDir(SHARE_WAR_PREFIX_NAME);
final String distDir = warOutputDir + "/WEB-INF/classes/alfresco/web-extension/";
String repoUrl = project.getProperties().getProperty("alfresco.repo.url");
if (repoUrl == null) {
project.getProperties().setProperty("alfresco.repo.url", "http://localhost:" + getPort() + "/alfresco");
}
getLog().info("Copying Share config custom to: " + distDir);
executeMojo(
plugin(
groupId("org.apache.maven.plugins"),
artifactId("maven-resources-plugin"),
version(MAVEN_RESOURCE_PLUGIN_VERSION)
),
goal("copy-resources"),
configuration(
element(name("outputDirectory"), distDir),
element(name("resources"),
element(name("resource"),
element(name("directory"), "src/test/resources/share"),
element(name("includes"),
element(name("include"), "*.xml")
),
element(name("filtering"), "true")
)
)
),
execEnv
);
} | java | {
"resource": ""
} |
q162426 | AbstractRunMojo.copyHotswapAgentProperties | train | protected void copyHotswapAgentProperties(String warPrefix) throws MojoExecutionException {
if ( copyHotswapAgentConfig == false ) {
return;
}
final String warOutputDir = getWarOutputDir(warPrefix) + "/WEB-INF/classes/";
getLog().info("Copying " + warPrefix + "-hotswap-agent.properties to " + warOutputDir);
executeMojo(
plugin(
groupId("com.coderplus.maven.plugins"),
artifactId("copy-rename-maven-plugin"),
version("1.0")
),
goal("rename"),
configuration(
element(name("sourceFile"), project.getBuild().getTestOutputDirectory() + "/" + warPrefix + "-hotswap-agent.properties"),
element(name("destinationFile"), warOutputDir + "hotswap-agent.properties")
),
execEnv
);
} | java | {
"resource": ""
} |
q162427 | AbstractRunMojo.packageAndInstallCustomWar | train | protected String packageAndInstallCustomWar(String warName) throws MojoExecutionException {
final String warArtifactId = "${project.artifactId}-" + warName;
final String warSourceDir = getWarOutputDir(warName);
// Package the customized war file
// Note. don't use the maven-war-plugin here as it will package the module files twice, once from the
// target/classes dir and once via the JAR
String warPath = project.getBuild().getDirectory() + "/" + warName + ".war";
ZipUtil.pack(new File(warSourceDir), new File(warPath));
// Install the customized war file in the local maven repo
executeMojo(
plugin(
groupId("org.apache.maven.plugins"),
artifactId("maven-install-plugin"),
version(MAVEN_INSTALL_PLUGIN_VERSION)
),
goal("install-file"),
configuration(
element(name("file"), warPath),
element(name("groupId"), "${project.groupId}"),
element(name("artifactId"), warArtifactId),
element(name("version"), "${project.version}"),
element(name("packaging"), "war") // Don't forget, otherwise installed as .POM
)
, execEnv
);
return warArtifactId;
} | java | {
"resource": ""
} |
q162428 | DatasetTarget.outputBundle | train | private static FormatBundle<DatasetKeyOutputFormat> outputBundle(Configuration conf) {
FormatBundle<DatasetKeyOutputFormat> bundle = FormatBundle
.forOutput(DatasetKeyOutputFormat.class);
for (Map.Entry<String, String> entry : conf) {
bundle.set(entry.getKey(), entry.getValue());
}
return bundle;
} | java | {
"resource": ""
} |
q162429 | SchemaValidationUtil.hasErrors | train | private static boolean hasErrors(Symbol symbol) {
switch(symbol.kind) {
case ALTERNATIVE:
return hasErrors(symbol, ((Symbol.Alternative) symbol).symbols);
case EXPLICIT_ACTION:
return false;
case IMPLICIT_ACTION:
return symbol instanceof Symbol.ErrorAction;
case REPEATER:
Symbol.Repeater r = (Symbol.Repeater) symbol;
return hasErrors(r.end) || hasErrors(symbol, r.production);
case ROOT:
case SEQUENCE:
return hasErrors(symbol, symbol.production);
case TERMINAL:
return false;
default:
throw new RuntimeException("unknown symbol kind: " + symbol.kind);
}
} | java | {
"resource": ""
} |
q162430 | JSONSchemaCommand.run | train | @Override
public int run() throws IOException {
Preconditions.checkArgument(samplePaths != null && !samplePaths.isEmpty(),
"Sample JSON path is required");
Preconditions.checkArgument(samplePaths.size() == 1,
"Only one JSON sample can be given");
// assume fields are nullable by default, users can easily change this
Schema sampleSchema = JsonUtil.inferSchema(
open(samplePaths.get(0)), recordName, numRecords);
if (sampleSchema != null) {
output(sampleSchema.toString(!minimize), console, outputPath);
return 0;
} else {
console.error("Sample file did not contain any records");
return 1;
}
} | java | {
"resource": ""
} |
q162431 | KiteConfigurationService.loadKiteConf | train | private void loadKiteConf(Services services) throws IOException {
String[] paths = services.getConf().getStrings(KITE_CONFIGURATION);
if (paths != null && paths.length != 0) {
kiteConf = new Configuration(false);
for(String path : paths) {
if (path.startsWith("hdfs")) {
Path p = new Path(path);
HadoopAccessorService has = services.get(HadoopAccessorService.class);
try {
FileSystem fs = has.createFileSystem(
System.getProperty("user.name"), p.toUri(), has.createJobConf(p.toUri().getAuthority()));
if (fs.exists(p)) {
FSDataInputStream is = null;
try {
is = fs.open(p);
Configuration partialConf = new XConfiguration(is);
kiteConf = merge(kiteConf, partialConf);
} finally {
if (is != null) {
is.close();
}
}
LOG.info("Loaded Kite Configuration: " + path);
} else {
LOG.warn("Kite Configuration could not be found at [" + path + "]");
}
} catch (HadoopAccessorException hae) {
throw new IOException(hae);
}
} else {
File f = new File(path);
if (f.exists()) {
InputStream is = null;
try {
is = new FileInputStream(f);
Configuration partialConf = new XConfiguration(is);
kiteConf = merge(kiteConf, partialConf);
} finally {
if (is != null) {
is.close();
}
}
LOG.info("Loaded Kite Configuration: " + path);
} else {
LOG.warn("Kite Configuration could not be found at [" + path + "]");
}
}
}
} else {
LOG.info("Kite Configuration not specified");
}
} | java | {
"resource": ""
} |
q162432 | HBaseClientTemplate.get | train | public Result get(Get get) {
HTableInterface table = pool.getTable(tableName);
try {
for (GetModifier getModifier : getModifiers) {
get = getModifier.modifyGet(get);
}
try {
return table.get(get);
} catch (IOException e) {
throw new DatasetIOException("Error performing get", e);
}
} finally {
if (table != null) {
try {
table.close();
} catch (IOException e) {
throw new DatasetIOException("Error putting table back into pool", e);
}
}
}
} | java | {
"resource": ""
} |
q162433 | HBaseClientTemplate.get | train | public Result get(Get get, GetModifier getModifier) {
if (getModifier != null) {
get = getModifier.modifyGet(get);
}
return get(get);
} | java | {
"resource": ""
} |
q162434 | HBaseClientTemplate.put | train | public boolean put(PutAction putAction) {
HTableInterface table = pool.getTable(tableName);
try {
return put(putAction, table);
} finally {
if (table != null) {
try {
table.close();
} catch (IOException e) {
throw new DatasetIOException("Error putting table back into pool", e);
}
}
}
} | java | {
"resource": ""
} |
q162435 | HBaseClientTemplate.put | train | public boolean put(PutAction putAction, HTableInterface table) {
for (PutActionModifier putActionModifier : putActionModifiers) {
putAction = putActionModifier.modifyPutAction(putAction);
}
Put put = putAction.getPut();
if (putAction.getVersionCheckAction() != null) {
byte[] versionBytes = null;
long version = putAction.getVersionCheckAction().getVersion();
if (version != 0) {
versionBytes = Bytes.toBytes(version);
}
try {
return table.checkAndPut(put.getRow(), Constants.SYS_COL_FAMILY,
Constants.VERSION_CHECK_COL_QUALIFIER, versionBytes, put);
} catch (IOException e) {
throw new DatasetIOException(
"Error putting row from table with checkAndPut", e);
}
} else {
try {
table.put(put);
return true;
} catch (IOException e) {
throw new DatasetIOException("Error putting row from table", e);
}
}
} | java | {
"resource": ""
} |
q162436 | HBaseClientTemplate.put | train | public boolean put(PutAction putAction, PutActionModifier putActionModifier) {
if (putActionModifier != null) {
putAction = putActionModifier.modifyPutAction(putAction);
}
return put(putAction);
} | java | {
"resource": ""
} |
q162437 | HBaseClientTemplate.put | train | public <E> boolean put(E entity, EntityMapper<E> entityMapper) {
return put(entity, null, entityMapper);
} | java | {
"resource": ""
} |
q162438 | HBaseClientTemplate.put | train | public <E> boolean put(E entity, PutActionModifier putActionModifier,
EntityMapper<E> entityMapper) {
PutAction putAction = entityMapper.mapFromEntity(entity);
return put(putAction, putActionModifier);
} | java | {
"resource": ""
} |
q162439 | HBaseClientTemplate.increment | train | public <E> long increment(PartitionKey key, String fieldName, long amount,
EntityMapper<E> entityMapper) {
Increment increment = entityMapper.mapToIncrement(key, fieldName, amount);
HTableInterface table = pool.getTable(tableName);
Result result;
try {
result = table.increment(increment);
} catch (IOException e) {
throw new DatasetIOException("Error incrementing field.", e);
}
return entityMapper.mapFromIncrementResult(result, fieldName);
} | java | {
"resource": ""
} |
q162440 | HBaseClientTemplate.delete | train | public boolean delete(DeleteAction deleteAction) {
HTableInterface table = pool.getTable(tableName);
try {
for (DeleteActionModifier deleteActionModifier : deleteActionModifiers) {
deleteAction = deleteActionModifier.modifyDeleteAction(deleteAction);
}
Delete delete = deleteAction.getDelete();
if (deleteAction.getVersionCheckAction() != null) {
byte[] versionBytes = Bytes.toBytes(deleteAction
.getVersionCheckAction().getVersion());
try {
return table.checkAndDelete(delete.getRow(),
Constants.SYS_COL_FAMILY, Constants.VERSION_CHECK_COL_QUALIFIER,
versionBytes, delete);
} catch (IOException e) {
throw new DatasetIOException(
"Error deleteing row from table with checkAndDelete", e);
}
} else {
try {
table.delete(delete);
return true;
} catch (IOException e) {
throw new DatasetIOException("Error deleteing row from table", e);
}
}
} finally {
if (table != null) {
try {
table.close();
} catch (IOException e) {
throw new DatasetIOException("Error putting table back into pool", e);
}
}
}
} | java | {
"resource": ""
} |
q162441 | HBaseClientTemplate.delete | train | public boolean delete(DeleteAction deleteAction,
DeleteActionModifier deleteActionModifier) {
if (deleteActionModifier != null) {
deleteAction = deleteActionModifier.modifyDeleteAction(deleteAction);
}
return delete(deleteAction);
} | java | {
"resource": ""
} |
q162442 | HBaseClientTemplate.delete | train | public boolean delete(PartitionKey key, Set<String> columns,
VersionCheckAction checkAction, KeySerDe keySerDe) {
return delete(key, columns, checkAction, null, keySerDe);
} | java | {
"resource": ""
} |
q162443 | HBaseClientTemplate.delete | train | public boolean delete(PartitionKey key, Set<String> columns,
VersionCheckAction checkAction,
DeleteActionModifier deleteActionModifier, KeySerDe keySerDe) {
byte[] keyBytes = keySerDe.serialize(key);
Delete delete = new Delete(keyBytes);
for (String requiredColumn : columns) {
String[] familyAndColumn = requiredColumn.split(":");
if (familyAndColumn.length == 1) {
delete.deleteFamily(Bytes.toBytes(familyAndColumn[0]));
} else {
delete.deleteColumns(Bytes.toBytes(familyAndColumn[0]),
Bytes.toBytes(familyAndColumn[1]));
}
}
return delete(new DeleteAction(delete, checkAction), deleteActionModifier);
} | java | {
"resource": ""
} |
q162444 | HBaseClientTemplate.getScannerBuilder | train | public <E> EntityScannerBuilder<E> getScannerBuilder(
EntityMapper<E> entityMapper) {
EntityScannerBuilder<E> builder = new BaseEntityScanner.Builder<E>(pool,
tableName, entityMapper);
for (ScanModifier scanModifier : scanModifiers) {
builder.addScanModifier(scanModifier);
}
return builder;
} | java | {
"resource": ""
} |
q162445 | HBaseClientTemplate.createBatch | train | public <E> EntityBatch<E> createBatch(EntityMapper<E> entityMapper,
long writeBufferSize) {
return new BaseEntityBatch<E>(this, entityMapper, pool, tableName,
writeBufferSize);
} | java | {
"resource": ""
} |
q162446 | PartialPathConversion.toKey | train | public StorageKey toKey(Path fromPath, StorageKey storage) {
final List<FieldPartitioner> partitioners =
Accessor.getDefault().getFieldPartitioners(storage.getPartitionStrategy());
//Strip off the root directory to get partition segments
String truncatedPath = fromPath.toString();
if(truncatedPath.startsWith(rootPath.toString())){
truncatedPath = truncatedPath.substring(rootPath.toString().length());
}
List<String> pathParts = new LinkedList<String>();
//Check that there are segments to parse.
if(!truncatedPath.isEmpty()) {
Path currentPath = new Path(truncatedPath);
while (currentPath != null) {
String name = currentPath.getName();
if(!name.isEmpty()) {
pathParts.add(currentPath.getName());
}
currentPath = currentPath.getParent();
}
//list is now last -> first so reverse the list to be first -> last
Collections.reverse(pathParts);
}
final List<Object> values = Lists.newArrayList(
new Object[pathParts.size()]);
//for each segment we have get the value for the key
for(int i = 0; i < pathParts.size(); i++){
values.set(i, valueForDirname(
(FieldPartitioner<?, ?>) partitioners.get(i),
pathParts.get(i)));
}
storage.replaceValues(values);
return storage;
} | java | {
"resource": ""
} |
q162447 | FluentIterable.toArray | train | @GwtIncompatible("Array.newArray(Class, int)")
public final E[] toArray(Class<E> type) {
return Iterables.toArray(iterable, type);
} | java | {
"resource": ""
} |
q162448 | KeyRangeIterable.start | train | private static <T> Iterator<T> start(T singleton) {
return Collections.singleton(singleton).iterator();
} | java | {
"resource": ""
} |
q162449 | CSVProperties.coalesce | train | private static <T> T coalesce(T... values) {
for (T value : values) {
if (value != null) {
return value;
}
}
return null;
} | java | {
"resource": ""
} |
q162450 | MemcmpEncoder.writeInt | train | @Override
public void writeInt(int n) throws IOException {
byte[] intBytes = new byte[] { (byte) ((n >>> 24) ^ 0x80),
(byte) (n >>> 16), (byte) (n >>> 8), (byte) n };
out.write(intBytes);
} | java | {
"resource": ""
} |
q162451 | MemcmpEncoder.writeLong | train | @Override
public void writeLong(long n) throws IOException {
byte[] intBytes = new byte[] { (byte) ((n >>> 56) ^ 0x80),
(byte) (n >>> 48), (byte) (n >>> 40), (byte) (n >>> 32),
(byte) (n >>> 24), (byte) (n >>> 16), (byte) (n >>> 8), (byte) n };
out.write(intBytes);
} | java | {
"resource": ""
} |
q162452 | UserProfileExample.printUserProfies | train | public void printUserProfies() {
EntityScanner<UserProfileModel> scanner = userProfileDao.getScanner();
scanner.initialize();
try {
for (UserProfileModel entity : scanner) {
System.out.println(entity.toString());
}
} finally {
// scanners need to be closed.
scanner.close();
}
} | java | {
"resource": ""
} |
q162453 | UserProfileExample.printUserProfileActionsForLastName | train | public void printUserProfileActionsForLastName(String lastName) {
// Create a partial key that will allow us to start the scanner from the
// first user record that has last name equal to the one provided.
PartitionKey startKey = new PartitionKey("lastName");
// Get the scanner with the start key. Null for stopKey in the getScanner
// method indicates that the scanner will scan to the end of the table. Our
// loop will break out when it encounters a record without the last name.
EntityScanner<UserProfileActionsModel> scanner = userProfileActionsDao
.getScanner(startKey, null);
scanner.initialize();
try {
// scan until we find a last name not equal to the one provided
for (UserProfileActionsModel entity : scanner) {
if (!entity.getUserProfileModel().getLastName().equals(lastName)) {
// last name of row different, break out of the scan.
break;
}
System.out.println(entity.toString());
}
} finally {
// scanners need to be closed.
scanner.close();
}
} | java | {
"resource": ""
} |
q162454 | UserProfileExample.create | train | public void create(String firstName, String lastName, boolean married) {
long ts = System.currentTimeMillis();
UserProfileModel profileModel = UserProfileModel.newBuilder()
.setFirstName(firstName).setLastName(lastName).setMarried(married)
.setCreated(ts).build();
UserActionsModel actionsModel = UserActionsModel.newBuilder()
.setFirstName(firstName).setLastName(lastName)
.setActions(new HashMap<String, String>()).build();
actionsModel.getActions().put("profile_created", Long.toString(ts));
UserProfileActionsModel profileActionsModel = UserProfileActionsModel
.newBuilder().setUserProfileModel(profileModel)
.setUserActionsModel(actionsModel).build();
if (!userProfileActionsDao.put(profileActionsModel)) {
// If put returns false, a user already existed at this row
System.out
.println("Creating a new user profile failed due to a write conflict.");
}
} | java | {
"resource": ""
} |
q162455 | UserProfileExample.updateUserProfile | train | public void updateUserProfile(String firstName, String lastName,
boolean married) {
// Get the timestamp we'll use to set the value of the profile_updated
// action.
long ts = System.currentTimeMillis();
// Construct the key we'll use to fetch the user.
PartitionKey key = new PartitionKey(lastName, firstName);
// Get the profile and actions entity from the composite dao.
UserProfileActionsModel profileActionsModel = userProfileActionsDao
.get(key);
// Updating the married status is hairy since our avro compiler isn't setup
// to compile setters for fields. We have to construct a clone through the
// builder.
UserProfileActionsModel updatedProfileActionsModel = UserProfileActionsModel
.newBuilder(profileActionsModel)
.setUserProfileModel(
UserProfileModel
.newBuilder(profileActionsModel.getUserProfileModel())
.setMarried(married).build()).build();
// Since maps are mutable, we can update the actions map without having to
// go through the builder like above.
updatedProfileActionsModel.getUserActionsModel().getActions()
.put("profile_updated", Long.toString(ts));
if (!userProfileActionsDao.put(updatedProfileActionsModel)) {
// If put returns false, a write conflict occurred where someone else
// updated the row between the times we did the get and put.
System.out
.println("Updating the user profile failed due to a write conflict");
}
} | java | {
"resource": ""
} |
q162456 | UserProfileExample.addAction | train | public void addAction(String firstName, String lastName, String actionType,
String actionValue) {
// Create a new UserActionsModel, and add a new actions map to it with a
// single action value. Even if one exists in this row, since it has a lone
// keyAsColumn field, it won't remove any actions that already exist in the
// actions column family.
UserActionsModel actionsModel = UserActionsModel.newBuilder()
.setLastName(lastName).setFirstName(firstName)
.setActions(new HashMap<String, String>()).build();
actionsModel.getActions().put(actionType, actionValue);
// Perform the put.
userActionsDao.put(actionsModel);
} | java | {
"resource": ""
} |
q162457 | UserProfileExample.registerSchemas | train | private void registerSchemas(Configuration conf, SchemaManager schemaManager)
throws InterruptedException {
HBaseAdmin admin;
try {
// Construct an HBaseAdmin object (required by schema tool), and delete it
// if it exists so we start fresh.
admin = new HBaseAdmin(conf);
if (admin.tableExists("kite_example_user_profiles")) {
admin.disableTable("kite_example_user_profiles");
admin.deleteTable("kite_example_user_profiles");
}
} catch (IOException e) {
throw new RuntimeException(e);
}
// Use the SchemaTool to create the schemas that are in the example-models
// directory, and create the table and column families required by those
// schemas.
SchemaTool tool = new SchemaTool(admin, schemaManager);
tool.createOrMigrateSchemaDirectory("classpath:example-models", true);
} | java | {
"resource": ""
} |
q162458 | UserProfileExample.main | train | public static void main(String[] args) throws InterruptedException {
UserProfileExample example = new UserProfileExample();
// Let's create some user profiles
example.create("John", "Doe", true);
example.create("Jane", "Doe", false);
example.create("Foo", "Bar", false);
// Now print those user profiles. This doesn't include actions
example.printUserProfies();
// Now we'll add some user actions to each user
example.addAction("Jane", "Doe", "last_login", "2013-07-30 00:00:00");
example.addAction("Jane", "Doe", "ad_click", "example.com_ad_id");
example.addAction("Foo", "Bar", "last_login", "2013-07-30 00:00:00");
// Print the user profiles and actions for the Does. This will include the
// above actions, as well as a profile_created action set when creating the
// user profiles.
example.printUserProfileActionsForLastName("Doe");
// Update Jane to a married status.
example.updateUserProfile("Jane", "Doe", true);
// Reprint the user profiles and actions. Jane should now have married true,
// as well as a new profile_updated timestamp.
example.printUserProfileActionsForLastName("Doe");
} | java | {
"resource": ""
} |
q162459 | FastJavaScriptEngine.parse | train | private Class parse(String str, ScriptContext ctx) throws ScriptException {
String fileName = getFileName(ctx);
String sourcePath = getSourcePath(ctx);
String classPath = getClassPath(ctx);
Map<String, byte[]> classBytes = compiler.compile(fileName, str,
ctx.getErrorWriter(), sourcePath, classPath);
if (classBytes == null) {
throw new ScriptException("compilation failed");
}
// create a ClassLoader to load classes from MemoryJavaFileManager
MemoryClassLoader loader = new MemoryClassLoader(classBytes, classPath,
getParentLoader(ctx));
String mainClassName = getMainClassName(ctx);
if (mainClassName != null) {
try {
Class clazz = loader.load(mainClassName);
Method mainMethod = findMainMethod(clazz);
if (mainMethod == null) {
throw new ScriptException("no main method in " + mainClassName);
}
return clazz;
} catch (ClassNotFoundException cnfe) {
throw new ScriptException(cnfe);
}
}
// no main class configured - load all compiled classes
Iterable<Class> classes;
try {
classes = loader.loadAll();
} catch (ClassNotFoundException exp) {
throw new ScriptException(exp);
}
// search for class with main method
Class c = findMainClass(classes);
if (c != null) {
return c;
} else {
// if class with "main" method, then
// return first class
Iterator<Class> itr = classes.iterator();
if (itr.hasNext()) {
return itr.next();
} else {
return null;
}
}
} | java | {
"resource": ""
} |
q162460 | AvroEntitySerDe.getColumnDecoder | train | private Decoder getColumnDecoder(Schema writtenFieldAvroSchema, InputStream in) {
// Use a special Avro decoder that has special handling for int, long,
// and String types. See ColumnDecoder for more information.
if (writtenFieldAvroSchema.getType() == Type.INT
|| writtenFieldAvroSchema.getType() == Type.LONG
|| writtenFieldAvroSchema.getType() == Type.STRING) {
return new ColumnDecoder(in);
} else {
return DecoderFactory.get().binaryDecoder(in, null);
}
} | java | {
"resource": ""
} |
q162461 | AvroEntitySerDe.getColumnEncoder | train | private Encoder getColumnEncoder(Schema fieldAvroSchema, OutputStream out) {
// Use a special Avro encoder that has special handling for int, long,
// and String types. See ColumnEncoder for more information.
if (fieldAvroSchema.getType() == Type.INT
|| fieldAvroSchema.getType() == Type.LONG
|| fieldAvroSchema.getType() == Type.STRING) {
return new ColumnEncoder(out);
} else {
return EncoderFactory.get().binaryEncoder(out, null);
}
} | java | {
"resource": ""
} |
q162462 | EntityScannerBuilder.addEqualFilter | train | public EntityScannerBuilder<E> addEqualFilter(String fieldName,
Object filterValue) {
SingleFieldEntityFilter singleFieldEntityFilter = new SingleFieldEntityFilter(
entityMapper.getEntitySchema(), entityMapper.getEntitySerDe(),
fieldName, filterValue, CompareFilter.CompareOp.EQUAL);
filterList.add(singleFieldEntityFilter.getFilter());
return this;
} | java | {
"resource": ""
} |
q162463 | EntityScannerBuilder.addRegexMatchFilter | train | public EntityScannerBuilder<E> addRegexMatchFilter(String fieldName,
String regexString) {
RegexEntityFilter regexEntityFilter = new RegexEntityFilter(
entityMapper.getEntitySchema(), entityMapper.getEntitySerDe(),
fieldName, regexString);
filterList.add(regexEntityFilter.getFilter());
return this;
} | java | {
"resource": ""
} |
q162464 | EntityScannerBuilder.addIsMissingFilter | train | public EntityScannerBuilder<E> addIsMissingFilter(String fieldName) {
SingleFieldEntityFilter singleFieldEntityFilter = new SingleFieldEntityFilter(
entityMapper.getEntitySchema(), entityMapper.getEntitySerDe(),
fieldName, "++++NON_SHALL_PASS++++", CompareFilter.CompareOp.EQUAL);
SingleColumnValueFilter filter = (SingleColumnValueFilter) singleFieldEntityFilter
.getFilter();
filter.setFilterIfMissing(false);
filterList.add(filter);
return this;
} | java | {
"resource": ""
} |
q162465 | AvroEntityComposer.initRecordBuilderFactories | train | private void initRecordBuilderFactories() {
for (FieldMapping fieldMapping : avroSchema.getColumnMappingDescriptor().getFieldMappings()) {
if (fieldMapping.getMappingType() == MappingType.KEY_AS_COLUMN) {
String fieldName = fieldMapping.getFieldName();
Schema fieldSchema = avroSchema.getAvroSchema().getField(fieldName)
.schema();
Schema.Type fieldSchemaType = fieldSchema.getType();
if (fieldSchemaType == Schema.Type.RECORD) {
AvroRecordBuilderFactory<E> factory = buildAvroRecordBuilderFactory(fieldSchema);
kacRecordBuilderFactories.put(fieldName, factory);
}
}
}
} | java | {
"resource": ""
} |
q162466 | ColumnMapping.getRequiredColumns | train | public Set<String> getRequiredColumns() {
Set<String> set = new HashSet<String>();
for (FieldMapping fieldMapping : fieldMappings) {
if (FieldMapping.MappingType.KEY == fieldMapping.getMappingType()) {
continue;
} else if (FieldMapping.MappingType.KEY_AS_COLUMN == fieldMapping.getMappingType()) {
set.add(fieldMapping.getFamilyAsString() + ":");
} else {
set.add(fieldMapping.getFamilyAsString() + ":"
+ fieldMapping.getQualifierAsString());
}
}
return set;
} | java | {
"resource": ""
} |
q162467 | ColumnMapping.getRequiredColumnFamilies | train | public Set<String> getRequiredColumnFamilies() {
Set<String> set = new HashSet<String>();
for (FieldMapping mapping : fieldMappings) {
if (FieldMapping.MappingType.KEY != mapping.getMappingType())
set.add(mapping.getFamilyAsString());
}
return set;
} | java | {
"resource": ""
} |
q162468 | HdfsService.shouldFormatDFSCluster | train | private static boolean shouldFormatDFSCluster(String localDFSLocation,
boolean clean) {
boolean format = true;
File f = new File(localDFSLocation);
if (f.exists() && f.isDirectory() && !clean) {
format = false;
}
return format;
} | java | {
"resource": ""
} |
q162469 | HdfsService.configureDFSCluster | train | private static Configuration configureDFSCluster(Configuration config,
String localDFSLocation, String bindIP, int namenodeRpcPort,
int namenodeHttpPort, int datanodePort, int datanodeIpcPort,
int datanodeHttpPort) {
logger.info("HDFS force binding to ip: " + bindIP);
config = new KiteCompatibleConfiguration(config, bindIP, namenodeRpcPort,
namenodeHttpPort);
config.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + bindIP + ":"
+ namenodeRpcPort);
config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, bindIP + ":"
+ datanodePort);
config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, bindIP + ":"
+ datanodeIpcPort);
config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, bindIP + ":"
+ datanodeHttpPort);
// When a datanode registers with the namenode, the Namenode do a hostname
// check of the datanode which will fail on OpenShift due to reverse DNS
// issues with the internal IP addresses. This config disables that check,
// and will allow a datanode to connect regardless.
config.setBoolean("dfs.namenode.datanode.registration.ip-hostname-check",
false);
config.set("hdfs.minidfs.basedir", localDFSLocation);
// allow current user to impersonate others
String user = System.getProperty("user.name");
config.set("hadoop.proxyuser." + user + ".groups", "*");
config.set("hadoop.proxyuser." + user + ".hosts", "*");
return config;
} | java | {
"resource": ""
} |
q162470 | Constraints.filter | train | public <E> Iterator<E> filter(Iterator<E> iterator, EntityAccessor<E> accessor) {
return Iterators.filter(iterator, toEntityPredicate(accessor));
} | java | {
"resource": ""
} |
q162471 | Constraints.alignedWithBoundaries | train | @SuppressWarnings("unchecked")
public boolean alignedWithBoundaries() {
if (constraints.isEmpty()) {
return true;
} else if (strategy == null) {
// constraints must align with partitions, which requires a strategy
return false;
}
Multimap<String, FieldPartitioner> partitioners = HashMultimap.create();
Set<String> partitionFields = Sets.newHashSet();
for (FieldPartitioner fp : Accessor.getDefault().getFieldPartitioners(strategy)) {
partitioners.put(fp.getSourceName(), fp);
partitionFields.add(fp.getName());
}
// The key predicate is equivalent to a constraint set when the permissive
// projection for each predicate can be used in its place. This happens if
// fp.project(predicate) == fp.projectStrict(predicate):
//
// let D = some value domain
// let pred : D -> {0, 1}
// let D_{pred} = {x \in D | pred(x) == 1} (a subset of D selected by pred)
//
// let fp : D -> S (also a value domain)
// let fp.strict(pred) = pred_{fp.strict} : S -> {0, 1} (project strict)
// s.t. pred_{fp.strict}(fp(x)) == 1 => pred(x) == 1
// let fp.project(pred) = pred_{fp.project} : S -> {0, 1} (project)
// s.t. pred(x) == 1 => pred_{fp.project}(fp(x)) == 1
//
// lemma. {x \in D | pred_{fp.strict}(fp(x))} is a subset of D_{pred}
// pred_{fp.strict}(fp(x)) == 1 => pred(x) == 1 => x \in D_{pred}
//
// theorem. (pred_{fp.project}(s) => pred_{fp.strict}(s)) =>
// D_{pred} == {x \in D | pred_{fp.strict}(fp(x))}
//
// => let x \in D_{pred}. then pred_{fp.project}(fp(x)) == 1 by def
// then pred_{fp.strict(fp(x)) == 1 by premise
// therefore {x \in D | pred_{fp.strict}(fp(x))} \subsetOf D_{pred}
// <= by previous lemma
//
// Note: if projectStrict is too conservative or project is too permissive,
// then this logic cannot determine that that the original predicate is
// satisfied
for (Map.Entry<String, Predicate> entry : constraints.entrySet()) {
if (partitionFields.contains(entry.getKey())) {
// constraint is against partition values and aligned by definition
continue;
}
Collection<FieldPartitioner> fps = partitioners.get(entry.getKey());
if (fps.isEmpty()) {
LOG.debug("No field partitioners for key {}", entry.getKey());
return false;
}
Predicate predicate = entry.getValue();
if (!(predicate instanceof Exists)) {
boolean satisfied = false;
for (FieldPartitioner fp : fps) {
if (fp instanceof CalendarFieldPartitioner) {
TimeDomain domain = TimeDomain.get(strategy, entry.getKey());
Predicate strict = domain.projectStrict(predicate);
Predicate permissive = domain.project(predicate);
LOG.debug("Time predicate strict: {}", strict);
LOG.debug("Time predicate permissive: {}", permissive);
satisfied = strict != null && strict.equals(permissive);
break;
} else {
Predicate strict = fp.projectStrict(predicate);
Predicate permissive = fp.project(predicate);
if (strict != null && strict.equals(permissive)) {
satisfied = true;
break;
}
}
}
// this predicate cannot be satisfied by the partition information
if (!satisfied) {
LOG.debug("Predicate not satisfied: {}", predicate);
return false;
}
}
}
return true;
} | java | {
"resource": ""
} |
q162472 | Constraints.toNormalizedQueryMap | train | public Map<String, String> toNormalizedQueryMap() {
Map<String, String> query = Maps.newTreeMap();
return toQueryMap(query, true);
} | java | {
"resource": ""
} |
q162473 | HiveSchemaConverter.startsWith | train | private static boolean startsWith(String[] left, List<String> right) {
// short circuit if a match isn't possible
if (left.length < right.size()) {
return false;
}
for (int i = 0; i < right.size(); i += 1) {
if (!left[i].equals(right.get(i))) {
return false;
}
}
return true;
} | java | {
"resource": ""
} |
q162474 | KeySchema.position | train | public int position(String fieldName) {
if (fieldPositions.containsKey(fieldName)) {
return fieldPositions.get(fieldName);
} else {
throw new DatasetException("Cannot recover " + fieldName + " from key");
}
} | java | {
"resource": ""
} |
q162475 | PartitionExpression.toExpression | train | public static String toExpression(PartitionStrategy partitionStrategy) {
List<FieldPartitioner> fieldPartitioners = partitionStrategy
.getFieldPartitioners();
if (fieldPartitioners.size() == 1) {
return PartitionFunctions.toExpression(fieldPartitioners.get(0));
}
StringBuilder sb = new StringBuilder();
sb.append("[");
for (FieldPartitioner fieldPartitioner : fieldPartitioners) {
if (sb.length() > 1) {
sb.append(", ");
}
sb.append(PartitionFunctions.toExpression(fieldPartitioner));
}
sb.append("]");
return sb.toString();
} | java | {
"resource": ""
} |
q162476 | VersionedAvroEntityMapper.initializeEntityVersionEntityMapper | train | private void initializeEntityVersionEntityMapper() {
AvroEntitySchema avroEntitySchema = schemaParser
.parseEntitySchema(managedSchemaEntityVersionSchema);
avroEntitySchema = AvroUtils.mergeSpecificStringTypes(
ManagedSchemaEntityVersion.class, avroEntitySchema);
AvroEntityComposer<ManagedSchemaEntityVersion> entityComposer = new AvroEntityComposer<ManagedSchemaEntityVersion>(
avroEntitySchema, true);
AvroEntitySerDe<ManagedSchemaEntityVersion> entitySerDe = new AvroEntitySerDe<ManagedSchemaEntityVersion>(
entityComposer, avroEntitySchema, avroEntitySchema, true);
this.managedSchemaEntityVersionEntityMapper = new BaseEntityMapper<ManagedSchemaEntityVersion>(
avroEntitySchema, entitySerDe);
} | java | {
"resource": ""
} |
q162477 | VersionedAvroEntityMapper.updateEntityMappers | train | private void updateEntityMappers() {
for (Entry<Integer, EntitySchema> entry : schemaManager.getEntitySchemas(
tableName, entityName).entrySet()) {
if (!entityMappers.containsKey(entry.getKey())) {
AvroEntitySchema writtenSchema = (AvroEntitySchema) entry.getValue();
EntityMapper<ENTITY> entityMapper = constructWrappedEntityMapper(
keySchema, entitySchema, writtenSchema, entityClass);
entityMappers.put(entry.getKey(), entityMapper);
}
}
} | java | {
"resource": ""
} |
q162478 | RangeFieldPartitioner.transformClosed | train | private Range<String> transformClosed(Range<String> range) {
if (range.hasLowerBound()) {
String lower = range.lowerEndpoint();
// the special case, (a, _] and apply(a) == a is handled by skipping a
String afterLower = domain.next(apply(lower));
if (afterLower != null) {
if (range.hasUpperBound()) {
String upper = range.upperEndpoint();
String upperImage = apply(upper);
// meaning: at the endpoint
if (upper.equals(upperImage) && range.isUpperBoundClosed()) {
// include upper
return Ranges.closed(afterLower, upperImage);
} else {
String beforeUpper = domain.previous(upperImage);
if (afterLower.compareTo(beforeUpper) <= 0) {
return Ranges.closed(afterLower, beforeUpper);
}
}
} else {
return Ranges.atLeast(afterLower);
}
}
} else if (range.hasUpperBound()) {
String upper = range.upperEndpoint();
String upperImage = apply(upper);
if (upper.equals(upperImage) && range.isUpperBoundClosed()) {
// include upper
return Ranges.atMost(upperImage);
} else {
String beforeUpper = domain.previous(upperImage);
if (beforeUpper != null) {
return Ranges.atMost(beforeUpper);
}
}
}
return null;
} | java | {
"resource": ""
} |
q162479 | FileSystemUtil.deleteParentDirectoriesIfEmpty | train | static boolean deleteParentDirectoriesIfEmpty(FileSystem fs, Path root, Path path) throws IOException {
boolean deleted = false;
try {
for (Path current = path.getParent();
!current.equals(root) && !(current.getParent() == null);
current = current.getParent()) {
final FileStatus[] stats = fs.listStatus(current);
if (stats == null || stats.length == 0) {
// dir is empty and should be removed
LOG.debug("Deleting empty path {}", current);
deleted = fs.delete(current, true) || deleted;
} else {
// all parent directories will be non-empty
break;
}
}
} catch (FileNotFoundException e) {
LOG.debug("Path does not exist it may have been deleted by another process.", e);
}
return deleted;
} | java | {
"resource": ""
} |
q162480 | FileSystemUtil.supportsRename | train | public static boolean supportsRename(URI fsUri, Configuration conf) {
String fsUriScheme = fsUri.getScheme();
// Only S3 is known to not support renaming, but allow configuration override.
// This logic is intended as a temporary placeholder solution and should
// be revisited once HADOOP-9565 has been completed.
return conf.getBoolean(FileSystemProperties.SUPPORTS_RENAME_PROP,
!(fsUriScheme.equalsIgnoreCase("s3n") || fsUriScheme.equalsIgnoreCase("s3a")));
} | java | {
"resource": ""
} |
q162481 | CSVAppender.valueString | train | private static String valueString(Object value, Schema schema) {
if (value == null || schema.getType() == Schema.Type.NULL) {
return null;
}
switch (schema.getType()) {
case BOOLEAN:
case FLOAT:
case DOUBLE:
case INT:
case LONG:
case STRING:
return value.toString();
case ENUM:
// serialize as the ordinal from the schema
return String.valueOf(schema.getEnumOrdinal(value.toString()));
case UNION:
int index = ReflectData.get().resolveUnion(schema, value);
return valueString(value, schema.getTypes().get(index));
default:
// FIXED, BYTES, MAP, ARRAY, RECORD are not supported
throw new DatasetOperationException(
"Unsupported field type:" + schema.getType());
}
} | java | {
"resource": ""
} |
q162482 | DatasetKeyOutputFormat.loadOrCreateJobDataset | train | @SuppressWarnings("unchecked")
private static <E> Dataset<E> loadOrCreateJobDataset(JobContext jobContext) {
Dataset<Object> dataset = load(jobContext).getDataset();
String jobDatasetName = getJobDatasetName(jobContext);
DatasetRepository repo = getDatasetRepository(jobContext);
if (repo.exists(TEMP_NAMESPACE, jobDatasetName)) {
Dataset<E> tempDataset = repo.load(TEMP_NAMESPACE, jobDatasetName,
DatasetKeyOutputFormat.<E>getType(jobContext));
try {
Compatibility.checkCompatible(dataset.getDescriptor(),
tempDataset.getDescriptor());
return tempDataset;
} catch (RuntimeException ex) {
// swallow
}
}
return repo.create(TEMP_NAMESPACE, jobDatasetName,
copy(dataset.getDescriptor()),
DatasetKeyOutputFormat.<E>getType(jobContext));
} | java | {
"resource": ""
} |
q162483 | SchemaTool.createOrMigrateSchemaDirectory | train | public void createOrMigrateSchemaDirectory(String schemaDirectory,
boolean createTableAndFamilies) throws InterruptedException {
List<String> schemaStrings;
if (schemaDirectory.startsWith(CLASSPATH_PREFIX)) {
URL dirURL = getClass().getClassLoader().getResource(
schemaDirectory.substring(CLASSPATH_PREFIX.length()));
if (dirURL != null && dirURL.getProtocol().equals("file")) {
try {
schemaStrings = getSchemaStringsFromDir(new File(dirURL.toURI()));
} catch (URISyntaxException e) {
throw new DatasetException(e);
}
} else if (dirURL != null && dirURL.getProtocol().equals("jar")) {
String jarPath = dirURL.getPath().substring(5,
dirURL.getPath().indexOf("!"));
schemaStrings = getSchemaStringsFromJar(jarPath,
schemaDirectory.substring(CLASSPATH_PREFIX.length()));
} else {
String msg = "Could not find classpath resource: " + schemaDirectory;
LOG.error(msg);
throw new DatasetException(msg);
}
} else {
schemaStrings = getSchemaStringsFromDir(new File(schemaDirectory));
}
Map<String, List<String>> tableEntitySchemaMap = new HashMap<String, List<String>>();
for (String schemaString : schemaStrings) {
List<String> tables = getTablesFromSchemaString(schemaString);
for (String table : tables) {
if (tableEntitySchemaMap.containsKey(table)) {
tableEntitySchemaMap.get(table).add(schemaString);
} else {
List<String> entityList = new ArrayList<String>();
entityList.add(schemaString);
tableEntitySchemaMap.put(table, entityList);
}
}
}
// Validate if for every key schema there is atleast one entity schemas
for (Entry<String, List<String>> entry : tableEntitySchemaMap.entrySet()) {
String table = entry.getKey();
List<String> entitySchemas = entry.getValue();
if (entitySchemas.size() == 0) {
String msg =
"Table requested, but no entity schemas for Table: " + table;
LOG.error(msg);
throw new ValidationException(msg);
}
}
// Migrate the schemas in a batch, collect all the table descriptors
// that require a schema migration
Collection<HTableDescriptor> tableDescriptors = Lists.newArrayList();
for (Entry<String, List<String>> entry : tableEntitySchemaMap.entrySet()) {
String table = entry.getKey();
for (String entitySchemaString : entry.getValue()) {
boolean migrationRequired = prepareManagedSchema(table, entitySchemaString);
// Optimization: If no migration is req, then no change in the table
if (migrationRequired) {
tableDescriptors.add(
prepareTableDescriptor(table, entitySchemaString));
}
}
}
if (createTableAndFamilies) {
createTables(tableDescriptors);
}
} | java | {
"resource": ""
} |
q162484 | SchemaTool.prepareManagedSchema | train | private boolean prepareManagedSchema(String tableName,
String entitySchemaString) {
String entityName = getEntityNameFromSchemaString(entitySchemaString);
AvroEntitySchema entitySchema = parser
.parseEntitySchema(entitySchemaString);
AvroKeySchema keySchema = parser.parseKeySchema(entitySchemaString);
// Verify there are no ambiguities with the managed schemas
if (schemaManager.hasManagedSchema(tableName, entityName)) {
KeySchema currentKeySchema = schemaManager
.getKeySchema(tableName, entityName);
if (!keySchema.equals(currentKeySchema)) {
String msg =
"Migrating schema with different keys. Current: " + currentKeySchema
.getRawSchema() + " New: " + keySchema.getRawSchema();
LOG.error(msg);
throw new ValidationException(msg);
}
if (!schemaManager
.hasSchemaVersion(tableName, entityName, entitySchema)) {
LOG.info("Migrating Schema: (" + tableName + ", " + entityName + ")");
schemaManager.migrateSchema(tableName, entityName, entitySchemaString);
} else {
LOG.info("Schema hasn't changed, not migrating: (" + tableName + ", "
+ entityName + ")");
return false;
}
} else {
LOG.info("Creating Schema: (" + tableName + ", " + entityName + ")");
parser.parseEntitySchema(entitySchemaString).getColumnMappingDescriptor()
.getRequiredColumnFamilies();
schemaManager.createSchema(tableName, entityName, entitySchemaString,
"org.kitesdk.data.hbase.avro.AvroKeyEntitySchemaParser",
"org.kitesdk.data.hbase.avro.AvroKeySerDe",
"org.kitesdk.data.hbase.avro.AvroEntitySerDe");
}
return true;
} | java | {
"resource": ""
} |
q162485 | SchemaTool.prepareTableDescriptor | train | private HTableDescriptor prepareTableDescriptor(String tableName,
String entitySchemaString) {
HTableDescriptor descriptor = new HTableDescriptor(
Bytes.toBytes(tableName));
AvroEntitySchema entitySchema = parser
.parseEntitySchema(entitySchemaString);
Set<String> familiesToAdd = entitySchema.getColumnMappingDescriptor()
.getRequiredColumnFamilies();
familiesToAdd.add(new String(Constants.SYS_COL_FAMILY));
familiesToAdd.add(new String(Constants.OBSERVABLE_COL_FAMILY));
for (String familyToAdd : familiesToAdd) {
if (!descriptor.hasFamily(familyToAdd.getBytes())) {
descriptor.addFamily(new HColumnDescriptor(familyToAdd));
}
}
return descriptor;
} | java | {
"resource": ""
} |
q162486 | SchemaTool.createTables | train | private void createTables(Collection<HTableDescriptor> tableDescriptors)
throws InterruptedException {
try {
Set<String> tablesCreated = Sets.newHashSet();
Multimap<String, HTableDescriptor> pendingTableUpdates = ArrayListMultimap
.create();
for (HTableDescriptor tableDescriptor : tableDescriptors) {
String tableName = Bytes.toString(tableDescriptor.getName());
if (tablesCreated.contains(tableName)) {
// We have to wait for the table async creation to modify
// Just add the required columns to be added
pendingTableUpdates.put(tableName, tableDescriptor);
} else {
LOG.info("Creating table " + tableName);
hbaseAdmin.createTableAsync(tableDescriptor, new byte[][] {});
tablesCreated.add(tableName);
}
}
// Wait for the tables to be online
for (int waitCount = 0;
waitCount < MAX_SECOND_WAIT_FOR_TABLE_CREATION; waitCount++) {
Iterator<String> iterator = tablesCreated.iterator();
while (iterator.hasNext()) {
String table = iterator.next();
if (hbaseAdmin.isTableAvailable(table)) {
// Perform any updates scheduled on the table
if (pendingTableUpdates.containsKey(table)) {
for (HTableDescriptor tableDescriptor : pendingTableUpdates
.get(table)) {
// Add the new columns - synchronous calls
modifyTable(table, tableDescriptor);
}
}
iterator.remove();
}
}
// If all tables are available, then break
if (tablesCreated.isEmpty()) {
break;
}
// Sleep for a second before checking again
Thread.sleep(1000);
}
} catch (IOException e) {
throw new DatasetException(e);
}
} | java | {
"resource": ""
} |
q162487 | SchemaTool.modifyTable | train | private void modifyTable(String tableName, HTableDescriptor newDescriptor) {
LOG.info("Modifying table " + tableName);
HColumnDescriptor[] newFamilies = newDescriptor.getColumnFamilies();
try {
List<HColumnDescriptor> columnsToAdd = Lists.newArrayList();
HTableDescriptor currentFamilies = hbaseAdmin
.getTableDescriptor(Bytes.toBytes(tableName));
for (HColumnDescriptor newFamily : newFamilies) {
if (!currentFamilies.hasFamily(newFamily.getName())) {
columnsToAdd.add(new HColumnDescriptor(newFamily.getName()));
}
}
// Add all the necessary column families
if (!columnsToAdd.isEmpty()) {
hbaseAdmin.disableTable(tableName);
try {
for (HColumnDescriptor columnToAdd : columnsToAdd) {
hbaseAdmin.addColumn(tableName, columnToAdd);
}
} finally {
hbaseAdmin.enableTable(tableName);
}
}
} catch (IOException e) {
throw new DatasetException(e);
}
} | java | {
"resource": ""
} |
q162488 | SchemaTool.getSchemaStringFromFile | train | private String getSchemaStringFromFile(File schemaFile) {
String schemaString;
FileInputStream fis = null;
try {
fis = new FileInputStream(schemaFile);
schemaString = AvroUtils.inputStreamToString(fis);
} catch (IOException e) {
throw new DatasetException(e);
} finally {
if (fis != null) {
try {
fis.close();
} catch (IOException e) {
}
}
}
return schemaString;
} | java | {
"resource": ""
} |
q162489 | SchemaTool.getSchemaStringsFromDir | train | private List<String> getSchemaStringsFromDir(File dir) {
List<String> schemaStrings = new ArrayList<String>();
Collection<File> schemaFiles = FileUtils.listFiles(dir,
new SuffixFileFilter(".avsc"), TrueFileFilter.INSTANCE);
for (File schemaFile : schemaFiles) {
schemaStrings.add(getSchemaStringFromFile(schemaFile));
}
return schemaStrings;
} | java | {
"resource": ""
} |
q162490 | SchemaTool.getSchemaStringsFromJar | train | private List<String> getSchemaStringsFromJar(String jarPath,
String directoryPath) {
LOG.info("Getting schema strings in: " + directoryPath + ", from jar: "
+ jarPath);
JarFile jar;
try {
jar = new JarFile(URLDecoder.decode(jarPath, "UTF-8"));
} catch (UnsupportedEncodingException e) {
throw new DatasetException(e);
} catch (IOException e) {
throw new DatasetException(e);
}
Enumeration<JarEntry> entries = jar.entries();
List<String> schemaStrings = new ArrayList<String>();
while (entries.hasMoreElements()) {
JarEntry jarEntry = entries.nextElement();
if (jarEntry.getName().startsWith(directoryPath)
&& jarEntry.getName().endsWith(".avsc")) {
LOG.info("Found schema: " + jarEntry.getName());
InputStream inputStream;
try {
inputStream = jar.getInputStream(jarEntry);
} catch (IOException e) {
throw new DatasetException(e);
}
String schemaString = AvroUtils.inputStreamToString(inputStream);
schemaStrings.add(schemaString);
}
}
return schemaStrings;
} | java | {
"resource": ""
} |
q162491 | Loader.setMetaStoreURI | train | private static void setMetaStoreURI(
Configuration conf, Map<String, String> match) {
try {
// If the host is set, construct a new MetaStore URI and set the property
// in the Configuration. Otherwise, do not change the MetaStore URI.
String host = match.get(URIPattern.HOST);
if (host != null && !NOT_SET.equals(host)) {
int port;
try {
port = Integer.parseInt(match.get(URIPattern.PORT));
} catch (NumberFormatException e) {
port = UNSPECIFIED_PORT;
}
conf.set(HIVE_METASTORE_URI_PROP,
new URI("thrift", null, host, port, null, null, null).toString());
}
} catch (URISyntaxException ex) {
throw new DatasetOperationException(
"Could not build metastore URI", ex);
}
} | java | {
"resource": ""
} |
q162492 | XMLStreamCopier.copy | train | public void copy(boolean isFragmentMode) throws XMLStreamException {
int ev = isFragmentMode ?
XMLStreamConstants.START_ELEMENT : XMLStreamConstants.START_DOCUMENT;
reader.require(ev, null, null);
int depth = 0;
ev = reader.getEventType();
while (true) {
switch (ev) {
case XMLStreamConstants.START_ELEMENT: {
writer.writeStartElement(
nonNull(reader.getPrefix()), // fixup bug where woodstox-3.2.7 returns null
reader.getLocalName(),
nonNull(reader.getNamespaceURI())); // Saxon requires nonNull
copyAttributes();
copyNamespaces();
depth++;
break;
}
case XMLStreamConstants.END_ELEMENT: {
writer.writeEndElement();
depth--;
if (isFragmentMode && depth == 0) {
writer.flush();
return; // we're done
}
break;
}
case XMLStreamConstants.ATTRIBUTE: {
// can happen as part of an XPath result sequence, or similar
copyAttribute(0);
break;
}
case XMLStreamConstants.START_DOCUMENT: {
copyStartDocument();
break;
}
case XMLStreamConstants.END_DOCUMENT: {
writer.writeEndDocument();
writer.flush();
return; // we're done
}
case XMLStreamConstants.PROCESSING_INSTRUCTION: {
writer.writeProcessingInstruction(
reader.getPITarget(), reader.getPIData());
break;
}
case XMLStreamConstants.COMMENT: {
writer.writeComment(reader.getText());
break;
}
case XMLStreamConstants.CDATA: {
writer.writeCData(reader.getText());
break;
}
case XMLStreamConstants.SPACE:
case XMLStreamConstants.CHARACTERS: {
copyText();
break;
}
case XMLStreamConstants.ENTITY_REFERENCE: {
// writer.writeEntityRef(reader.getLocalName()); // don't expand the ref
copyText(); // expand the ref (safer)
break;
}
case XMLStreamConstants.DTD: {
copyDTD();
break;
}
case XMLStreamConstants.ENTITY_DECLARATION:
break; // ignore (handled by XMLStreamConstants.DTD)
case XMLStreamConstants.NOTATION_DECLARATION:
break; // ignore (handled by XMLStreamConstants.DTD)
case XMLStreamConstants.NAMESPACE: {
// can happen as part of an XPath result sequence, or similar
writer.writeNamespace(reader.getPrefix(), reader.getNamespaceURI());
break;
}
default: {
throw new XMLStreamException("Unrecognized event type: "
+ reader.getEventType());
}
}
ev = reader.next();
}
} | java | {
"resource": ""
} |
q162493 | HBaseUtils.mergePuts | train | public static Put mergePuts(byte[] keyBytes, List<Put> putList) {
Put put = new Put(keyBytes);
for (Put putToMerge : putList) {
Map<byte[], List<KeyValue>> familyMap =
(Map<byte[], List<KeyValue>>) GET_FAMILY_MAP_METHOD.invoke(putToMerge);
for (List<KeyValue> keyValueList : familyMap.values()) {
for (KeyValue keyValue : keyValueList) {
// don't use put.add(KeyValue) since it doesn't work with HBase 0.96 onwards
put.add(keyValue.getFamily(), keyValue.getQualifier(),
keyValue.getTimestamp(), keyValue.getValue());
}
}
}
return put;
} | java | {
"resource": ""
} |
q162494 | HBaseUtils.mergePutActions | train | public static PutAction mergePutActions(byte[] keyBytes,
List<PutAction> putActionList) {
VersionCheckAction checkAction = null;
List<Put> putsToMerge = new ArrayList<Put>();
for (PutAction putActionToMerge : putActionList) {
putsToMerge.add(putActionToMerge.getPut());
VersionCheckAction checkActionToMerge = putActionToMerge.getVersionCheckAction();
if (checkActionToMerge != null) {
checkAction = checkActionToMerge;
}
}
Put put = mergePuts(keyBytes, putsToMerge);
return new PutAction(put, checkAction);
} | java | {
"resource": ""
} |
q162495 | HBaseUtils.addColumnsToOperation | train | private static void addColumnsToOperation(Collection<String> columns, Operation operation) {
// Keep track of whole family additions
Set<String> familySet = new HashSet<String>();
// Iterate through each of the required columns
for (String column : columns) {
// Split the column by : (family : column)
String[] familyAndColumn = column.split(":");
// Check if this is a family only
if (familyAndColumn.length == 1) {
// Add family to whole family additions, and add to scanner
familySet.add(familyAndColumn[0]);
operation.addFamily(Bytes.toBytes(familyAndColumn[0]));
} else {
// Add this column, as long as it's entire family wasn't added.
if (!familySet.contains(familyAndColumn[0])) {
operation.addColumn(Bytes.toBytes(familyAndColumn[0]), Bytes.toBytes(familyAndColumn[1]));
}
}
}
} | java | {
"resource": ""
} |
q162496 | HBaseUtils.addColumnsToScan | train | public static void addColumnsToScan(Collection<String> columns, final Scan scan) {
addColumnsToOperation(columns, new Operation() {
@Override
public void addColumn(byte[] family, byte[] column) {
scan.addColumn(family, column);
}
@Override
public void addFamily(byte[] family) {
scan.addFamily(family);
}
});
} | java | {
"resource": ""
} |
q162497 | HBaseUtils.addColumnsToGet | train | public static void addColumnsToGet(Collection<String> columns, final Get get) {
addColumnsToOperation(columns, new Operation() {
@Override
public void addColumn(byte[] family, byte[] column) {
get.addColumn(family, column);
}
@Override
public void addFamily(byte[] family) {
get.addFamily(family);
}
});
} | java | {
"resource": ""
} |
q162498 | XMLInputFactoryCreator.setupProperties | train | protected void setupProperties(XMLInputFactory factory) {
factory.setProperty(XMLInputFactory.IS_NAMESPACE_AWARE, Boolean.TRUE);
factory.setProperty(XMLInputFactory.IS_COALESCING, Boolean.TRUE);
factory.setProperty(XMLInputFactory.SUPPORT_DTD, Boolean.TRUE);
try {
factory.setProperty(XMLInputFactory.IS_VALIDATING, Boolean.FALSE);
} catch (IllegalArgumentException e) {
; // we can live with that
}
try {
factory.setProperty(
XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, Boolean.TRUE);
} catch (IllegalArgumentException e) {
; // we can live with that
}
factory.setXMLResolver(new XMLResolver() {
@Override
public InputStream resolveEntity(String publicID, String systemID, String baseURI, String namespace) {
return new InputStream() {
@Override
public int read() { return -1; }
};
}
});
String factoryName = factory.getClass().getName();
if (factoryName.equals("com.ctc.wstx.stax.WstxInputFactory")) {
try {
// it's safer to disable woodstox lazy parsing, in particular with DTDs
// see http://woodstox.codehaus.org/ConfiguringStreamReaders
// see com.ctc.wstx.api.WstxInputProperties
String P_LAZY_PARSING = "com.ctc.wstx.lazyParsing";
factory.setProperty(P_LAZY_PARSING, Boolean.FALSE);
} catch (IllegalArgumentException e) {
; // shouldn't happen, but we can live with that
}
try {
// enable/disable DTD caching (wstx default is to enable it)
String P_CACHE_DTDS = "com.ctc.wstx.cacheDTDs";
factory.setProperty(P_CACHE_DTDS, Boolean.valueOf(CACHE_DTDS));
} catch (IllegalArgumentException e) {
; // shouldn't happen, but we can live with that
}
// } else if (factory.isPropertySupported("report-cdata-event")) {}
} else if (factoryName.equals("com.sun.xml.stream.ZephyrParserFactory")) {
try {
// workaround to tell sjsxp to not ignore CDATA events
// see sjsxp-1_0/docs/ReleaseNotes.html
String P_REPORT_CDATA = "report-cdata-event";
// String P_REPORT_CDATA = "http://java.sun.com/xml/stream/properties/report-cdata-event";
factory.setProperty(P_REPORT_CDATA, Boolean.TRUE);
} catch (IllegalArgumentException e) {
; // shouldn't happen, but we can live with that
}
}
} | java | {
"resource": ""
} |
q162499 | FileSystemMetadataProvider.pathForMetadata | train | private Path pathForMetadata(String namespace, String name) {
return pathForMetadata(rootDirectory, namespace, name);
} | java | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.