_id
stringlengths
2
7
title
stringlengths
3
140
partition
stringclasses
3 values
text
stringlengths
73
34.1k
language
stringclasses
1 value
meta_information
dict
q162500
FileSystemMetadataProvider.pathForMetadata
train
private static Path pathForMetadata(Path root, String namespace, String name) { return new Path( FileSystemDatasetRepository.pathForDataset(root, namespace, name), METADATA_DIRECTORY); }
java
{ "resource": "" }
q162501
FileSystemMetadataProvider.checkExists
train
private static void checkExists(FileSystem fs, Path location) { try { if (!fs.exists(location)) { throw new DatasetNotFoundException( "Descriptor location does not exist: " + location); } } catch (IOException ex) { throw new DatasetIOException( "Cannot access descriptor location: " + location, ex); } }
java
{ "resource": "" }
q162502
SpecificAvroDao.buildCompositeDaoWithEntityManager
train
public static <K extends SpecificRecord, S extends SpecificRecord> Dao<Map<String, S>> buildCompositeDaoWithEntityManager( HTablePool tablePool, String tableName, List<Class<S>> subEntityClasses, SchemaManager schemaManager) { List<EntityMapper<S>> entityMappers = new ArrayList<EntityMapper<S>>(); for (Class<S> subEntityClass : subEntityClasses) { String entityName = getSchemaFromEntityClass(subEntityClass).getName(); entityMappers.add(new VersionedAvroEntityMapper.Builder() .setSchemaManager(schemaManager).setTableName(tableName) .setEntityName(entityName).setSpecific(true) .<S> build()); } return new SpecificMapCompositeAvroDao<S>(tablePool, tableName, entityMappers); }
java
{ "resource": "" }
q162503
MultiLevelIterator.advance
train
private boolean advance() { // done when there are depth iterators and the last iterator has an item while (iterators.size() < depth || !iterators.getLast().hasNext()) { // each iteration: add an iterator for the next level from the current // last iterator, or remove the last iterator because it is empty if (iterators.getLast().hasNext()) { current.add(iterators.getLast().next()); iterators.add(getLevel(current).iterator()); } else { iterators.removeLast(); if (iterators.isEmpty()) { return false; } else { current.removeLast(); } } } return true; }
java
{ "resource": "" }
q162504
DatasetSourceTarget.inputBundle
train
private static FormatBundle<DatasetKeyInputFormat> inputBundle(Configuration conf) { FormatBundle<DatasetKeyInputFormat> bundle = FormatBundle .forInput(DatasetKeyInputFormat.class); for (Map.Entry<String, String> entry : conf) { bundle.set(entry.getKey(), entry.getValue()); } return bundle; }
java
{ "resource": "" }
q162505
Validator.validateEnum
train
public <T extends Enum<T>> T validateEnum(Config config, String value, Class<T> type, T... choices) { if (choices.length == 0) { choices = type.getEnumConstants(); } Preconditions.checkArgument(choices.length > 0); try { T result = Enum.valueOf(type, value); if (!Arrays.asList(choices).contains(result)) { throw new IllegalArgumentException(); } return result; } catch (IllegalArgumentException e) { throw new MorphlineCompilationException( String.format("Invalid choice: '%s' (choose from {%s})", value, Joiner.on(",").join(choices)), config); } }
java
{ "resource": "" }
q162506
ScalableStatistics.add
train
public void add(double value) { count++; min = Math.min(min, value); max = Math.max(max, value); sum += value; sumOfSquares += value * value; addQuantileValue(value); }
java
{ "resource": "" }
q162507
ScalableStatistics.add
train
public void add(ScalableStatistics other) { count += other.count; min = Math.min(min, other.min); max = Math.max(max, other.max); sum += other.sum; sumOfSquares += other.sumOfSquares; tdigest.add(other.tdigest); if (other.exactValues != null) { for (int i = 0; i < other.numExactValues; i++) { addQuantileValue(other.exactValues[i]); } } }
java
{ "resource": "" }
q162508
ScalableStatistics.asBytes
train
public byte[] asBytes() { byte[] className = tdigest.getClass().getName().getBytes(Charsets.UTF_8); int vlen = exactValues == null ? 0 : numExactValues; ByteBuffer buf = ByteBuffer.allocate(4 + 8*5 + 4 + 4 + 8*vlen + 4 + className.length + tdigest.byteSize() + 4); buf.putInt(MAGIC_CODE); // for sanity checks // write basic descriptive stats buf.putLong(count); buf.putDouble(min); buf.putDouble(max); buf.putDouble(sum); buf.putDouble(sumOfSquares); // write exact values, if any buf.putInt(exactQuantilesThreshold); buf.putInt(exactValues == null ? -1 : numExactValues); if (exactValues != null) { for (int i = 0; i < numExactValues; i++) { buf.putDouble(exactValues[i]); } } // write tdigest buf.putInt(className.length); buf.put(className); tdigest.asBytes(buf); buf.putInt(MAGIC_CODE); // for sanity checks Preconditions.checkArgument(buf.position() == buf.capacity()); return buf.array(); }
java
{ "resource": "" }
q162509
ScalableStatistics.fromBytes
train
public static ScalableStatistics fromBytes(byte[] bytes) { Preconditions.checkArgument(bytes.length > 0); ByteBuffer buf = ByteBuffer.wrap(bytes); ScalableStatistics stats = new ScalableStatistics(); Preconditions.checkArgument(buf.getInt() == MAGIC_CODE); // read basic descriptive stats stats.count = buf.getLong(); stats.min = buf.getDouble(); stats.max = buf.getDouble(); stats.sum = buf.getDouble(); stats.sumOfSquares = buf.getDouble(); // read exact values, if any stats.exactQuantilesThreshold = buf.getInt(); int vlen = buf.getInt(); if (vlen >= 0) { stats.numExactValues = vlen; stats.exactValues = new double[vlen]; for (int i = 0; i < vlen; i++) { stats.exactValues[i] = buf.getDouble(); } } else { stats.exactValues = null; } // read tdigest byte[] classNameBytes = new byte[buf.getInt()]; buf.get(classNameBytes); String className = new String(classNameBytes, Charsets.UTF_8); try { Method method = Class.forName(className).getMethod("fromBytes", ByteBuffer.class); Preconditions.checkArgument(Modifier.isStatic(method.getModifiers())); stats.tdigest = (TDigest) method.invoke(null, buf); } catch (Exception e) { throw new RuntimeException(e); } Preconditions.checkArgument(buf.getInt() == MAGIC_CODE); Preconditions.checkArgument(buf.position() == buf.capacity()); return stats; }
java
{ "resource": "" }
q162510
JobClasspathHelper.createMd5SumFile
train
private void createMd5SumFile(FileSystem fs, String md5sum, Path remoteMd5Path) throws IOException { FSDataOutputStream os = null; try { os = fs.create(remoteMd5Path, true); os.writeBytes(md5sum); os.flush(); } catch (Exception e) { LOG.error("{}", e); } finally { if (os != null) { os.close(); } } }
java
{ "resource": "" }
q162511
HBaseService.configureHBaseCluster
train
private static Configuration configureHBaseCluster(Configuration config, int zkClientPort, FileSystem hdfsFs, String bindIP, int masterPort, int regionserverPort) throws IOException { // Configure the zookeeper port config .set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(zkClientPort)); // Initialize HDFS path configurations required by HBase Path hbaseDir = new Path(hdfsFs.makeQualified(hdfsFs.getHomeDirectory()), "hbase"); FSUtils.setRootDir(config, hbaseDir); hdfsFs.mkdirs(hbaseDir); config.set("fs.defaultFS", hdfsFs.getUri().toString()); config.set("fs.default.name", hdfsFs.getUri().toString()); FSUtils.setVersion(hdfsFs, hbaseDir); // Configure the bind addresses and ports. If running in Openshift, we only // have permission to bind to the private IP address, accessible through an // environment variable. logger.info("HBase force binding to ip: " + bindIP); config.set("hbase.master.ipc.address", bindIP); config.set(HConstants.MASTER_PORT, Integer.toString(masterPort)); config.set("hbase.regionserver.ipc.address", bindIP); config .set(HConstants.REGIONSERVER_PORT, Integer.toString(regionserverPort)); config.set(HConstants.ZOOKEEPER_QUORUM, bindIP); // By default, the HBase master and regionservers will report to zookeeper // that its hostname is what it determines by reverse DNS lookup, and not // what we use as the bind address. This means when we set the bind // address, daemons won't actually be able to connect to eachother if they // are different. Here, we do something that's illegal in 48 states - use // reflection to override a private static final field in the DNS class // that is a cachedHostname. This way, we are forcing the hostname that // reverse dns finds. This may not be compatible with newer versions of // Hadoop. try { Field cachedHostname = DNS.class.getDeclaredField("cachedHostname"); cachedHostname.setAccessible(true); Field modifiersField = Field.class.getDeclaredField("modifiers"); modifiersField.setAccessible(true); modifiersField.setInt(cachedHostname, cachedHostname.getModifiers() & ~Modifier.FINAL); cachedHostname.set(null, bindIP); } catch (Exception e) { // Reflection can throw so many checked exceptions. Let's wrap in an // IOException. throw new IOException(e); } // By setting the info ports to -1 for, we won't launch the master or // regionserver info web interfaces config.set(HConstants.MASTER_INFO_PORT, "-1"); config.set(HConstants.REGIONSERVER_INFO_PORT, "-1"); return config; }
java
{ "resource": "" }
q162512
HBaseService.waitForHBaseToComeOnline
train
private static void waitForHBaseToComeOnline(MiniHBaseCluster hbaseCluster) throws IOException, InterruptedException { // Wait for the master to be initialized. This is required because even // before it's initialized, the regionserver can come online and the meta // table can be scannable. If the cluster is quickly shut down after all of // this before the master is initialized, it can cause the shutdown to hang // indefinitely as initialization tasks will block forever. // // Unfortunately, no method available to wait for master to come online like // regionservers, so we use a while loop with a sleep so we don't hammer the // isInitialized method. while (!hbaseCluster.getMaster().isInitialized()) { Thread.sleep(1000); } // Now wait for the regionserver to come online. hbaseCluster.getRegionServer(0).waitForServerOnline(); // Don't leave here till we've done a successful scan of the hbase:meta // This validates that not only is the regionserver up, but that the // meta region is online so there are no race conditions where operations // requiring the meta region might run before it's available. Otherwise, // operations are susceptible to region not online errors. HTable t = new HTable(hbaseCluster.getConf(), HBASE_META_TABLE); ResultScanner s = t.getScanner(new Scan()); while (s.next() != null) { continue; } s.close(); t.close(); }
java
{ "resource": "" }
q162513
SignalManager.signalReady
train
public void signalReady(Constraints viewConstraints) { try { rootFileSystem.mkdirs(signalDirectory); } catch (IOException e) { throw new DatasetIOException("Unable to create signal manager directory: " + signalDirectory, e); } String normalizedConstraints = getNormalizedConstraints(viewConstraints); Path signalPath = new Path(signalDirectory, normalizedConstraints); try{ // create the output stream to overwrite the current contents, if the directory or file // exists it will be overwritten to get a new timestamp FSDataOutputStream os = rootFileSystem.create(signalPath, true); os.close(); } catch (IOException e) { throw new DatasetIOException("Could not access signal path: " + signalPath, e); } }
java
{ "resource": "" }
q162514
SignalManager.getReadyTimestamp
train
public long getReadyTimestamp(Constraints viewConstraints) { String normalizedConstraints = getNormalizedConstraints(viewConstraints); Path signalPath = new Path(signalDirectory, normalizedConstraints); // check if the signal exists try { try { FileStatus signalStatus = rootFileSystem.getFileStatus(signalPath); return signalStatus.getModificationTime(); } catch (final FileNotFoundException ex) { // empty, will be thrown when the signal path doesn't exist } return -1; } catch (IOException e) { throw new DatasetIOException("Could not access signal path: " + signalPath, e); } }
java
{ "resource": "" }
q162515
FileSystemDataset.viewForUri
train
View<E> viewForUri(URI location) { Preconditions.checkNotNull(location, "Partition location cannot be null"); PartitionView<E> view = getPartitionView(location); if (view == unbounded) { return this; } return view; }
java
{ "resource": "" }
q162516
HiveAbstractMetadataProvider.isExternal
train
protected boolean isExternal(String namespace, String name) { String resolved = resolveNamespace(namespace, name); if (resolved != null) { return isExternal(getMetaStoreUtil().getTable(resolved, name)); } return false; }
java
{ "resource": "" }
q162517
HiveAbstractMetadataProvider.isNamespace
train
private boolean isNamespace(String database) { Collection<String> tables = getMetaStoreUtil().getAllTables(database); for (String name : tables) { if (isReadable(database, name)) { return true; } } return false; }
java
{ "resource": "" }
q162518
HiveAbstractMetadataProvider.isReadable
train
private boolean isReadable(String namespace, String name) { Table table = getMetaStoreUtil().getTable(namespace, name); if (isManaged(table) || isExternal(table)) { // readable table types try { // get a descriptor for the table. if this succeeds, it is readable HiveUtils.descriptorForTable(conf, table); return true; } catch (DatasetException e) { // not a readable table } catch (IllegalStateException e) { // not a readable table } catch (IllegalArgumentException e) { // not a readable table } catch (UnsupportedOperationException e) { // not a readable table } } return false; }
java
{ "resource": "" }
q162519
Log4jAppender.append
train
@Override public synchronized void append(LoggingEvent event) throws FlumeException{ //If rpcClient is null, it means either this appender object was never //setup by setting hostname and port and then calling activateOptions //or this appender object was closed by calling close(), so we throw an //exception to show the appender is no longer accessible. if(rpcClient == null){ throw new FlumeException("Cannot Append to Appender!" + "Appender either closed or not setup correctly!"); } if(!rpcClient.isActive()){ reconnect(); } //Client created first time append is called. Map<String, String> hdrs = new HashMap<String, String>(); hdrs.put(Log4jAvroHeaders.LOGGER_NAME.toString(), event.getLoggerName()); hdrs.put(Log4jAvroHeaders.TIMESTAMP.toString(), String.valueOf(event.getTimeStamp())); //To get the level back simply use //LoggerEvent.toLevel(hdrs.get(Integer.parseInt( //Log4jAvroHeaders.LOG_LEVEL.toString())) hdrs.put(Log4jAvroHeaders.LOG_LEVEL.toString(), String.valueOf(event.getLevel().toInt())); Event flumeEvent; Object message = event.getMessage(); if (message instanceof GenericRecord) { GenericRecord record = (GenericRecord) message; populateAvroHeaders(hdrs, record.getSchema(), message); flumeEvent = EventBuilder.withBody(serialize(record, record.getSchema()), hdrs); } else if (message instanceof SpecificRecord || avroReflectionEnabled) { Schema schema = ReflectData.get().getSchema(message.getClass()); populateAvroHeaders(hdrs, schema, message); flumeEvent = EventBuilder.withBody(serialize(message, schema), hdrs); } else { hdrs.put(Log4jAvroHeaders.MESSAGE_ENCODING.toString(), "UTF8"); String msg = layout != null ? layout.format(event) : message.toString(); flumeEvent = EventBuilder.withBody(msg, Charset.forName("UTF8"), hdrs); } try { rpcClient.append(flumeEvent); } catch (EventDeliveryException e) { String msg = "Flume append() failed."; LogLog.error(msg); throw new FlumeException(msg + " Exception follows.", e); } }
java
{ "resource": "" }
q162520
JarFinder.getJar
train
public static String getJar(Class<?> klass) { Preconditions.checkNotNull(klass, "klass"); ClassLoader loader = klass.getClassLoader(); if (loader != null) { String class_file = klass.getName().replaceAll("\\.", "/") + ".class"; try { for (Enumeration<?> itr = loader.getResources(class_file); itr.hasMoreElements();) { URL url = (URL) itr.nextElement(); String path = url.getPath(); if (path.startsWith("file:")) { path = path.substring("file:".length()); } path = URLDecoder.decode(path, "UTF-8"); if ("jar".equals(url.getProtocol())) { path = URLDecoder.decode(path, "UTF-8"); return path.replaceAll("!.*$", ""); } else if ("file".equals(url.getProtocol())) { String klassName = klass.getName(); klassName = klassName.replace(".", "/") + ".class"; path = path.substring(0, path.length() - klassName.length()); File baseDir = new File(path); File testDir = new File(System.getProperty("test.build.dir", "target/test-dir")); testDir = testDir.getAbsoluteFile(); if (!testDir.exists()) { if (!testDir.mkdirs()) { throw new IOException("Unable to create directory :"+testDir.toString()); } } File tempJar = File.createTempFile(TMP_HADOOP, "", testDir); tempJar = new File(tempJar.getAbsolutePath() + ".jar"); createJar(baseDir, tempJar); return tempJar.getAbsolutePath(); } } } catch (IOException e) { throw new RuntimeException(e); } } return null; }
java
{ "resource": "" }
q162521
ZooKeeperDownloader.readConfigName
train
public String readConfigName(SolrZkClient zkClient, String collection) throws KeeperException, InterruptedException { if (collection == null) { throw new IllegalArgumentException("collection must not be null"); } String configName = null; // first check for alias byte[] aliasData = zkClient.getData(ZkStateReader.ALIASES, null, null, true); Aliases aliases = ClusterState.load(aliasData); String alias = aliases.getCollectionAlias(collection); if (alias != null) { List<String> aliasList = StrUtils.splitSmart(alias, ",", true); if (aliasList.size() > 1) { throw new IllegalArgumentException("collection cannot be an alias that maps to multiple collections"); } collection = aliasList.get(0); } String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection; if (LOG.isInfoEnabled()) { LOG.info("Load collection config from:" + path); } byte[] data = zkClient.getData(path, null, null, true); if(data != null) { ZkNodeProps props = ZkNodeProps.load(data); configName = props.getStr(ZkController.CONFIGNAME_PROP); } if (configName != null && !zkClient.exists(ZkConfigManager.CONFIGS_ZKNODE + "/" + configName, true)) { LOG.error("Specified config does not exist in ZooKeeper:" + configName); throw new IllegalArgumentException("Specified config does not exist in ZooKeeper:" + configName); } return configName; }
java
{ "resource": "" }
q162522
ZooKeeperDownloader.downloadConfigDir
train
public File downloadConfigDir(SolrZkClient zkClient, String configName, File dir) throws IOException, InterruptedException, KeeperException { Preconditions.checkArgument(dir.exists()); Preconditions.checkArgument(dir.isDirectory()); ZkConfigManager manager = new ZkConfigManager(zkClient); manager.downloadConfigDir(configName, dir.toPath()); File confDir = new File(dir, "conf"); if (!confDir.isDirectory()) { // create a temporary directory with "conf" subdir and mv the config in there. This is // necessary because of CDH-11188; solrctl does not generate nor accept directories with e.g. // conf/solrconfig.xml which is necessary for proper solr operation. This should work // even if solrctl changes. confDir = new File(Files.createTempDir().getAbsolutePath(), "conf"); confDir.getParentFile().deleteOnExit(); Files.move(dir, confDir); dir = confDir.getParentFile(); } verifyConfigDir(confDir); return dir; }
java
{ "resource": "" }
q162523
Compatibility.check
train
public static void check(String namespace, String name, DatasetDescriptor descriptor) { checkDatasetName(namespace, name); checkDescriptor(descriptor); }
java
{ "resource": "" }
q162524
Compatibility.checkAndWarn
train
public static void checkAndWarn(String namespace, String datasetName, Schema schema) { try { checkDatasetName(namespace, datasetName); checkSchema(schema); } catch (IllegalArgumentException e) { LOG.warn(e.getMessage()); } catch (IllegalStateException e) { LOG.warn(e.getMessage()); } }
java
{ "resource": "" }
q162525
Compatibility.checkDatasetName
train
public static void checkDatasetName(String namespace, String name) { Preconditions.checkNotNull(namespace, "Namespace cannot be null"); Preconditions.checkNotNull(name, "Dataset name cannot be null"); ValidationException.check(Compatibility.isCompatibleName(namespace), "Namespace %s is not alphanumeric (plus '_')", namespace); ValidationException.check(Compatibility.isCompatibleName(name), "Dataset name %s is not alphanumeric (plus '_')", name); }
java
{ "resource": "" }
q162526
Compatibility.checkDescriptor
train
public static void checkDescriptor(DatasetDescriptor descriptor) { Preconditions.checkNotNull(descriptor, "Descriptor cannot be null"); Schema schema = descriptor.getSchema(); checkSchema(schema); if (descriptor.isPartitioned()) { // marked as [BUG] because this is checked in DatasetDescriptor Preconditions.checkArgument(schema.getType() == Schema.Type.RECORD, "[BUG] Partitioned datasets must have record schemas"); Set<String> names = Sets.newHashSet(); for (Schema.Field field : schema.getFields()) { names.add(field.name()); } List<String> incompatible = Lists.newArrayList(); List<String> duplicates = Lists.newArrayList(); for (FieldPartitioner fp : Accessor.getDefault().getFieldPartitioners(descriptor .getPartitionStrategy())) { String name = fp.getName(); if (!isCompatibleName(name)) { incompatible.add(name); } else if (names.contains(name)) { duplicates.add(name); } else { names.add(name); } } ValidationException.check(incompatible.isEmpty(), "Partition names are not alphanumeric (plus '_'): %s", Joiner.on(", ").join(incompatible)); ValidationException.check(duplicates.isEmpty(), "Partition names duplicate data fields: %s", Joiner.on(", ").join(duplicates)); } }
java
{ "resource": "" }
q162527
Compatibility.getIncompatibleNames
train
private static List<String> getIncompatibleNames(Schema schema) { NameValidation validation = new NameValidation(); SchemaUtil.visit(schema, validation); return validation.getIncompatibleNames(); }
java
{ "resource": "" }
q162528
Record.copy
train
public Record copy() { //return new Record(ArrayListMultimap.create(fields)); // adding fields later causes (slow) rehashing ArrayListMultimap<String,Object> copy = ArrayListMultimap.create(fields.size() + 16, 10); copy.putAll(fields); return new Record(copy); }
java
{ "resource": "" }
q162529
Record.getFirstValue
train
public Object getFirstValue(String key) { List values = fields.get(key); return values.size() > 0 ? values.get(0) : null; }
java
{ "resource": "" }
q162530
Record.replaceValues
train
public void replaceValues(String key, Object value) { // fields.replaceValues(key, Collections.singletonList(value)); // unnecessarily slow List<Object> list = fields.get(key); list.clear(); list.add(value); }
java
{ "resource": "" }
q162531
Record.putIfAbsent
train
public void putIfAbsent(String key, Object value) { if (!fields.containsEntry(key, value)) { fields.put(key, value); } }
java
{ "resource": "" }
q162532
URIBuilder.build
train
public static URI build(String repoUri, String namespace, String dataset) { return build(URI.create(repoUri), namespace, dataset); }
java
{ "resource": "" }
q162533
URIBuilder.build
train
public static URI build(URI repoUri, String namespace, String dataset) { return new URIBuilder(repoUri, namespace, dataset).build(); }
java
{ "resource": "" }
q162534
SchemaUtil.isConsistentWithExpectedType
train
public static boolean isConsistentWithExpectedType(Schema.Type type, Class<?> expectedClass) { Class<?> typeClass = TYPE_TO_CLASS.get(type); return typeClass != null && expectedClass.isAssignableFrom(typeClass); }
java
{ "resource": "" }
q162535
SchemaUtil.partitionFieldSchema
train
public static Schema partitionFieldSchema(FieldPartitioner<?, ?> fp, Schema schema) { if (fp instanceof IdentityFieldPartitioner) { // copy the schema directly from the entity to preserve annotations return fieldSchema(schema, fp.getSourceName()); } else { Class<?> fieldType = getPartitionType(fp, schema); if (fieldType == Integer.class) { return Schema.create(Schema.Type.INT); } else if (fieldType == Long.class) { return Schema.create(Schema.Type.LONG); } else if (fieldType == String.class) { return Schema.create(Schema.Type.STRING); } else { throw new ValidationException( "Cannot encode partition " + fp.getName() + " with type " + fp.getSourceType() ); } } }
java
{ "resource": "" }
q162536
SchemaUtil.partitionField
train
private static Schema.Field partitionField(FieldPartitioner<?, ?> fp, Schema schema) { return new Schema.Field( fp.getName(), partitionFieldSchema(fp, schema), null, null); }
java
{ "resource": "" }
q162537
MediaType.set
train
public static Set<MediaType> set(MediaType... types) { Set<MediaType> set = new HashSet<MediaType>(); for (MediaType type : types) { if (type != null) { set.add(type); } } return Collections.unmodifiableSet(set); }
java
{ "resource": "" }
q162538
MediaType.set
train
public static Set<MediaType> set(String... types) { Set<MediaType> set = new HashSet<MediaType>(); for (String type : types) { MediaType mt = parse(type); if (mt != null) { set.add(mt); } } return Collections.unmodifiableSet(set); }
java
{ "resource": "" }
q162539
MediaType.unquote
train
private static String unquote(String s) { while (s.startsWith("\"") || s.startsWith("'")) { s = s.substring(1); } while (s.endsWith("\"") || s.endsWith("'")) { s = s.substring(0, s.length() - 1); } return s; }
java
{ "resource": "" }
q162540
AvroUtils.readAvroEntity
train
public static <T> T readAvroEntity(byte[] bytes, DatumReader<T> reader) { Decoder decoder = new DecoderFactory().binaryDecoder(bytes, null); return AvroUtils.<T> readAvroEntity(decoder, reader); }
java
{ "resource": "" }
q162541
AvroUtils.readAvroEntity
train
public static <T> T readAvroEntity(Decoder decoder, DatumReader<T> reader) { try { return reader.read(null, decoder); } catch (IOException e) { throw new SerializationException("Could not deserialize Avro entity", e); } }
java
{ "resource": "" }
q162542
AvroUtils.writeAvroEntity
train
public static <T> byte[] writeAvroEntity(T entity, DatumWriter<T> writer) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); Encoder encoder = new EncoderFactory().binaryEncoder(outputStream, null); writeAvroEntity(entity, encoder, writer); return outputStream.toByteArray(); }
java
{ "resource": "" }
q162543
AvroUtils.writeAvroEntity
train
public static <T> void writeAvroEntity(T entity, Encoder encoder, DatumWriter<T> writer) { try { writer.write(entity, encoder); encoder.flush(); } catch (IOException e) { throw new SerializationException("Could not serialize Avro entity", e); } }
java
{ "resource": "" }
q162544
AvroUtils.cloneField
train
public static Field cloneField(Field field) { return new Field(field.name(), field.schema(), field.doc(), field.defaultValue()); }
java
{ "resource": "" }
q162545
AvroUtils.inputStreamToString
train
public static String inputStreamToString(InputStream in) { final int BUFFER_SIZE = 1024; BufferedReader bufferedReader; try { bufferedReader = new BufferedReader(new InputStreamReader(in, "UTF-8")); } catch (UnsupportedEncodingException e) { throw new DatasetException( "Platform doesn't support UTF-8. It must!", e); } char[] buffer = new char[BUFFER_SIZE]; StringBuilder stringBuilder = new StringBuilder(BUFFER_SIZE); int bytesRead = 0; try { while ((bytesRead = bufferedReader.read(buffer, 0, BUFFER_SIZE)) > 0) { stringBuilder.append(buffer, 0, bytesRead); } } catch (IOException e) { throw new DatasetException("Error reading from input stream", e); } return stringBuilder.toString(); }
java
{ "resource": "" }
q162546
AvroUtils.getDefaultValueMap
train
public static Map<String, Object> getDefaultValueMap(Schema avroRecordSchema) { List<Field> defaultFields = new ArrayList<Field>(); for (Field f : avroRecordSchema.getFields()) { if (f.defaultValue() != null) { // Need to create a new Field here or we will get // org.apache.avro.AvroRuntimeException: Field already used: // schemaVersion defaultFields.add(new Field(f.name(), f.schema(), f.doc(), f .defaultValue(), f.order())); } } Schema defaultSchema = Schema.createRecord(defaultFields); Schema emptyRecordSchema = Schema.createRecord(new ArrayList<Field>()); DatumWriter<GenericRecord> writer = new GenericDatumWriter<GenericRecord>( emptyRecordSchema); DatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>( emptyRecordSchema, defaultSchema); GenericRecord emptyRecord = new GenericData.Record(emptyRecordSchema); GenericRecord defaultRecord = AvroUtils.readAvroEntity( AvroUtils.writeAvroEntity(emptyRecord, writer), reader); Map<String, Object> defaultValueMap = new HashMap<String, Object>(); for (Field f : defaultFields) { defaultValueMap.put(f.name(), defaultRecord.get(f.name())); } return defaultValueMap; }
java
{ "resource": "" }
q162547
JavaCompiler.compile
train
public Map<String, byte[]> compile(String fileName, String source, Writer err, String sourcePath, String classPath) { // to collect errors, warnings etc. DiagnosticCollector<JavaFileObject> diagnostics = new DiagnosticCollector<JavaFileObject>(); // create a new memory JavaFileManager MemoryJavaFileManager manager = new MemoryJavaFileManager(stdManager); // prepare the compilation unit List<JavaFileObject> compUnits = new ArrayList<JavaFileObject>(1); compUnits.add(manager.makeStringSource(fileName, source)); // javac options List<String> options = new ArrayList<String>(); options.add("-Xlint:all"); // options.add("-g:none"); options.add("-deprecation"); if (sourcePath != null) { options.add("-sourcepath"); options.add(sourcePath); } if (classPath != null) { options.add("-classpath"); options.add(classPath); } // create a compilation task javax.tools.JavaCompiler.CompilationTask task = tool.getTask(err, manager, diagnostics, options, null, compUnits); if (task.call() == false) { PrintWriter perr = new PrintWriter(err); for (Diagnostic diagnostic : diagnostics.getDiagnostics()) { perr.println(diagnostic.getMessage(null)); } perr.flush(); return null; } Map<String, byte[]> classBytes = manager.getClassBytes(); try { manager.close(); } catch (IOException exp) { } return classBytes; }
java
{ "resource": "" }
q162548
DataModelUtil.getDataModelForType
train
public static <E> GenericData getDataModelForType(Class<E> type) { // Need to check if SpecificRecord first because specific records also // implement GenericRecord if (SpecificRecord.class.isAssignableFrom(type)) { return new SpecificData(type.getClassLoader()); } else if (IndexedRecord.class.isAssignableFrom(type)) { return GenericData.get(); } else { return AllowNulls.get(); } }
java
{ "resource": "" }
q162549
DataModelUtil.getDatumReaderForType
train
@SuppressWarnings("unchecked") public static <E> DatumReader<E> getDatumReaderForType(Class<E> type, Schema writerSchema) { Schema readerSchema = getReaderSchema(type, writerSchema); GenericData dataModel = getDataModelForType(type); if (dataModel instanceof ReflectData) { return new ReflectDatumReader<E>(writerSchema, readerSchema, (ReflectData)dataModel); } else if (dataModel instanceof SpecificData) { return new SpecificDatumReader<E>(writerSchema, readerSchema, (SpecificData)dataModel); } else { return new GenericDatumReader<E>(writerSchema, readerSchema, dataModel); } }
java
{ "resource": "" }
q162550
DataModelUtil.getReaderSchema
train
public static <E> Schema getReaderSchema(Class<E> type, Schema schema) { Schema readerSchema = schema; GenericData dataModel = getDataModelForType(type); if (dataModel instanceof SpecificData) { readerSchema = ((SpecificData)dataModel).getSchema(type); } return readerSchema; }
java
{ "resource": "" }
q162551
DataModelUtil.getWriterSchema
train
public static <E> Schema getWriterSchema(Class<E> type, Schema schema) { Schema writerSchema = schema; GenericData dataModel = getDataModelForType(type); if (dataModel instanceof AllowNulls) { // assume fields are non-null by default to avoid schema conflicts dataModel = ReflectData.get(); } if (dataModel instanceof SpecificData) { writerSchema = ((SpecificData)dataModel).getSchema(type); } return writerSchema; }
java
{ "resource": "" }
q162552
DataModelUtil.createRecord
train
@SuppressWarnings("unchecked") public static <E> E createRecord(Class<E> type, Schema schema) { // Don't instantiate SpecificRecords or interfaces. if (isGeneric(type) && !type.isInterface()) { if (GenericData.Record.class.equals(type)) { return (E) GenericData.get().newRecord(null, schema); } return (E) ReflectData.newInstance(type, schema); } return null; }
java
{ "resource": "" }
q162553
HiveAbstractDatasetRepository.getHiveMetastoreUri
train
String getHiveMetastoreUri(Configuration conf) { String metastoreUris = conf.get(Loader.HIVE_METASTORE_URI_PROP); if (metastoreUris == null) { return null; } String[] uriArray = metastoreUris.split(HIVE_METASTORE_URIS_SEPARATOR); return uriArray[0]; }
java
{ "resource": "" }
q162554
BaseNCodec.isInAlphabet
train
public boolean isInAlphabet(final byte[] arrayOctet, final boolean allowWSPad) { for (int i = 0; i < arrayOctet.length; i++) { if (!isInAlphabet(arrayOctet[i]) && (!allowWSPad || (arrayOctet[i] != PAD) && !isWhiteSpace(arrayOctet[i]))) { return false; } } return true; }
java
{ "resource": "" }
q162555
BaseNCodec.containsAlphabetOrPad
train
protected boolean containsAlphabetOrPad(final byte[] arrayOctet) { if (arrayOctet == null) { return false; } for (final byte element : arrayOctet) { if (PAD == element || isInAlphabet(element)) { return true; } } return false; }
java
{ "resource": "" }
q162556
BaseNCodec.getEncodedLength
train
public long getEncodedLength(final byte[] pArray) { // Calculate non-chunked size - rounded up to allow for padding // cast to long is needed to avoid possibility of overflow long len = ((pArray.length + unencodedBlockSize-1) / unencodedBlockSize) * (long) encodedBlockSize; if (lineLength > 0) { // We're using chunking // Round up to nearest multiple len += ((len + lineLength-1) / lineLength) * chunkSeparatorLength; } return len; }
java
{ "resource": "" }
q162557
EntitySerDe.serialize
train
public PutAction serialize(byte[] keyBytes, FieldMapping fieldMapping, Object fieldValue) { Put put = new Put(keyBytes); PutAction putAction = new PutAction(put); String fieldName = fieldMapping.getFieldName(); if (fieldMapping.getMappingType() == MappingType.COLUMN || fieldMapping.getMappingType() == MappingType.COUNTER) { serializeColumn(fieldName, fieldMapping.getFamily(), fieldMapping.getQualifier(), fieldValue, put); } else if (fieldMapping.getMappingType() == MappingType.KEY_AS_COLUMN) { serializeKeyAsColumn(fieldName, fieldMapping.getFamily(), fieldMapping.getPrefix(), fieldValue, put); } else if (fieldMapping.getMappingType() == MappingType.OCC_VERSION) { serializeOCCColumn(fieldValue, putAction); } else { throw new ValidationException( "Invalid field mapping for field with name: " + fieldMapping.getFieldName()); } return putAction; }
java
{ "resource": "" }
q162558
EntitySerDe.deserialize
train
public Object deserialize(FieldMapping fieldMapping, Result result) { String fieldName = fieldMapping.getFieldName(); MappingType mappingType = fieldMapping.getMappingType(); if (mappingType == MappingType.COLUMN || mappingType == MappingType.COUNTER) { return deserializeColumn(fieldMapping.getFieldName(), fieldMapping.getFamily(), fieldMapping.getQualifier(), result); } else if (mappingType == MappingType.KEY_AS_COLUMN) { return deserializeKeyAsColumn(fieldMapping.getFieldName(), fieldMapping.getFamily(), fieldMapping.getPrefix(), result); } else if (mappingType == MappingType.OCC_VERSION) { return deserializeOCCColumn(result); } else { throw new ValidationException( "Invalid field mapping for field with name: " + fieldName); } }
java
{ "resource": "" }
q162559
EntitySerDe.serializeColumn
train
private void serializeColumn(String fieldName, byte[] family, byte[] qualifier, Object fieldValue, Put put) { // column mapping, so simply serialize the value and add the bytes // to the put. byte[] bytes = serializeColumnValueToBytes(fieldName, fieldValue); put.add(family, qualifier, bytes); }
java
{ "resource": "" }
q162560
EntitySerDe.serializeKeyAsColumn
train
private void serializeKeyAsColumn(String fieldName, byte[] family, String prefix, Object fieldValue, Put put) { // keyAsColumn mapping, so extract each value from the keyAsColumn field // using the entityComposer, serialize them, and them to the put. Map<CharSequence, Object> keyAsColumnValues = entityComposer .extractKeyAsColumnValues(fieldName, fieldValue); for (Entry<CharSequence, Object> entry : keyAsColumnValues.entrySet()) { CharSequence qualifier = entry.getKey(); byte[] qualifierBytes; byte[] columnKeyBytes = serializeKeyAsColumnKeyToBytes(fieldName, qualifier); if (prefix != null) { byte[] prefixBytes = prefix.getBytes(); qualifierBytes = new byte[prefixBytes.length + columnKeyBytes.length]; System.arraycopy(prefixBytes, 0, qualifierBytes, 0, prefixBytes.length); System.arraycopy(columnKeyBytes, 0, qualifierBytes, prefixBytes.length, columnKeyBytes.length); } else { qualifierBytes = columnKeyBytes; } // serialize the value, and add it to the put. byte[] bytes = serializeKeyAsColumnValueToBytes(fieldName, qualifier, entry.getValue()); put.add(family, qualifierBytes, bytes); } }
java
{ "resource": "" }
q162561
EntitySerDe.serializeOCCColumn
train
private void serializeOCCColumn(Object fieldValue, PutAction putAction) { // OCC Version mapping, so serialize as a long to the version check // column qualifier in the system column family. Long currVersion = (Long) fieldValue; VersionCheckAction versionCheckAction = new VersionCheckAction(currVersion); putAction.getPut().add(Constants.SYS_COL_FAMILY, Constants.VERSION_CHECK_COL_QUALIFIER, Bytes.toBytes(currVersion + 1)); putAction.setVersionCheckAction(versionCheckAction); }
java
{ "resource": "" }
q162562
EntitySerDe.deserializeColumn
train
private Object deserializeColumn(String fieldName, byte[] family, byte[] qualifier, Result result) { byte[] bytes = result.getValue(family, qualifier); if (bytes == null) { return getDefaultValue(fieldName); } else { return deserializeColumnValueFromBytes(fieldName, bytes); } }
java
{ "resource": "" }
q162563
EntitySerDe.deserializeKeyAsColumn
train
private Object deserializeKeyAsColumn(String fieldName, byte[] family, String prefix, Result result) { // Construct a map of keyAsColumn field values. From this we'll be able // to use the entityComposer to construct the entity field value. byte[] prefixBytes = prefix != null ? prefix.getBytes() : null; Map<CharSequence, Object> fieldValueAsMap = new HashMap<CharSequence, Object>(); Map<byte[], byte[]> familyMap = result.getFamilyMap(family); for (Map.Entry<byte[], byte[]> entry : familyMap.entrySet()) { byte[] qualifier = entry.getKey(); // if the qualifier of this column has a prefix that matches the // field prefix, then remove the prefix from the qualifier. if (prefixBytes != null && qualifier.length > prefixBytes.length && Arrays.equals(Arrays.copyOf(qualifier, prefixBytes.length), prefixBytes)) { qualifier = Arrays.copyOfRange(qualifier, prefixBytes.length, qualifier.length); } byte[] columnBytes = entry.getValue(); CharSequence keyAsColumnKey = deserializeKeyAsColumnKeyFromBytes( fieldName, qualifier); Object keyAsColumnValue = deserializeKeyAsColumnValueFromBytes(fieldName, qualifier, columnBytes); fieldValueAsMap.put(keyAsColumnKey, keyAsColumnValue); } // Now build the entity field from the fieldValueAsMap. return entityComposer.buildKeyAsColumnField(fieldName, fieldValueAsMap); }
java
{ "resource": "" }
q162564
EntitySerDe.deserializeOCCColumn
train
private Object deserializeOCCColumn(Result result) { byte[] versionBytes = result.getValue(Constants.SYS_COL_FAMILY, Constants.VERSION_CHECK_COL_QUALIFIER); if (versionBytes == null) { return null; } else { return Bytes.toLong(versionBytes); } }
java
{ "resource": "" }
q162565
AvroEntitySchema.mappingCompatible
train
private static boolean mappingCompatible(EntitySchema oldSchema, EntitySchema newSchema) { for (FieldMapping oldFieldMapping : oldSchema.getColumnMappingDescriptor() .getFieldMappings()) { FieldMapping newFieldMapping = newSchema.getColumnMappingDescriptor() .getFieldMapping(oldFieldMapping.getFieldName()); if (newFieldMapping != null) { if (!oldFieldMapping.equals(newFieldMapping)) { return false; } } } return true; }
java
{ "resource": "" }
q162566
Compiler.compile
train
public Command compile(File morphlineFile, String morphlineId, MorphlineContext morphlineContext, Command finalChild, Config... overrides) { Config config; try { config = parse(morphlineFile, overrides); } catch (IOException e) { throw new MorphlineCompilationException("Cannot parse morphline file: " + morphlineFile, null, e); } Config morphlineConfig = find(morphlineId, config, morphlineFile.getPath()); Command morphlineCommand = compile(morphlineConfig, morphlineContext, finalChild); return morphlineCommand; }
java
{ "resource": "" }
q162567
Compiler.parse
train
public Config parse(File file, Config... overrides) throws IOException { if (file == null || file.getPath().trim().length() == 0) { throw new MorphlineCompilationException("Missing morphlineFile parameter", null); } if (!file.exists()) { throw new FileNotFoundException("File not found: " + file); } if (!file.canRead()) { throw new IOException("Insufficient permissions to read file: " + file); } Config config = ConfigFactory.parseFile(file); for (Config override : overrides) { config = override.withFallback(config); } synchronized (LOCK) { ConfigFactory.invalidateCaches(); config = ConfigFactory.load(config); config.checkValid(ConfigFactory.defaultReference()); // eagerly validate aspects of tree config } return config; }
java
{ "resource": "" }
q162568
Compiler.find
train
public Config find(String morphlineId, Config config, String nameForErrorMsg) { List<? extends Config> morphlineConfigs = config.getConfigList("morphlines"); if (morphlineConfigs.size() == 0) { throw new MorphlineCompilationException( "Morphline file must contain at least one morphline: " + nameForErrorMsg, null); } if (morphlineId != null) { morphlineId = morphlineId.trim(); } if (morphlineId != null && morphlineId.length() == 0) { morphlineId = null; } Config morphlineConfig = null; if (morphlineId == null) { morphlineConfig = morphlineConfigs.get(0); Preconditions.checkNotNull(morphlineConfig); } else { for (Config candidate : morphlineConfigs) { if (morphlineId.equals(new Configs().getString(candidate, "id", null))) { morphlineConfig = candidate; break; } } if (morphlineConfig == null) { throw new MorphlineCompilationException( "Morphline id '" + morphlineId + "' not found in morphline file: " + nameForErrorMsg, null); } } return morphlineConfig; }
java
{ "resource": "" }
q162569
MiniCluster.start
train
public void start() throws IOException, InterruptedException { for (Service service : services) { service.configure(serviceConfig); logger.info("Running Minicluster Service: " + service.getClass().getName()); service.start(); serviceConfig.setHadoopConf(service.getHadoopConf()); // set the default configuration so that the minicluster is used DefaultConfiguration.set(serviceConfig.getHadoopConf()); } logger.info("All Minicluster Services running."); }
java
{ "resource": "" }
q162570
MiniCluster.stop
train
public void stop() throws IOException, InterruptedException { for (int i = services.size() - 1; i >= 0; i--) { Service service = services.get(i); logger.info("Stopping Minicluster Service: " + service.getClass().getName()); service.stop(); } logger.info("All Minicluster Services stopped."); }
java
{ "resource": "" }
q162571
DefaultSchemaManager.refreshManagedSchemaCache
train
@Override public void refreshManagedSchemaCache(String tableName, String entityName) { ManagedSchema managedSchema = managedSchemaDao.getManagedSchema(tableName, entityName); if (managedSchema != null) { getManagedSchemaMap().put( getManagedSchemaMapKey(managedSchema.getTable(), managedSchema.getName()), managedSchema); } }
java
{ "resource": "" }
q162572
DefaultSchemaManager.getManagedSchemaMap
train
private ConcurrentHashMap<String, ManagedSchema> getManagedSchemaMap() { if (managedSchemaMap == null) { synchronized (this) { if (managedSchemaMap == null) { managedSchemaMap = new ConcurrentHashMap<String, ManagedSchema>(); populateManagedSchemaMap(); } } } return managedSchemaMap; }
java
{ "resource": "" }
q162573
DefaultSchemaManager.populateManagedSchemaMap
train
private void populateManagedSchemaMap() { Collection<ManagedSchema> schemas = managedSchemaDao.getManagedSchemas(); for (ManagedSchema managedSchema : schemas) { getManagedSchemaMap().put( getManagedSchemaMapKey(managedSchema.getTable(), managedSchema.getName()), managedSchema); } }
java
{ "resource": "" }
q162574
DefaultSchemaManager.getSchemaParser
train
@SuppressWarnings("unchecked") private KeyEntitySchemaParser<?, ?> getSchemaParser( String schemaParserClassName) { if (schemaParsers.contains(schemaParserClassName)) { return schemaParsers.get(schemaParserClassName); } else { try { Class<KeyEntitySchemaParser<?, ?>> schemaParserClass = (Class<KeyEntitySchemaParser<?, ?>>) Class .forName(schemaParserClassName); KeyEntitySchemaParser<?, ?> schemaParser = schemaParserClass .getConstructor().newInstance(); schemaParsers.putIfAbsent(schemaParserClassName, schemaParser); return schemaParser; } catch (Exception e) { throw new DatasetException( "Could not instantiate schema parser class: " + schemaParserClassName, e); } } }
java
{ "resource": "" }
q162575
DefaultSchemaManager.getManagedSchemaVersions
train
private Map<Integer, String> getManagedSchemaVersions(String tableName, String entityName) { ManagedSchema managedSchema = getManagedSchema(tableName, entityName); Map<Integer, String> returnMap = new HashMap<Integer, String>(); for (Map.Entry<String, String> versionsEntry : managedSchema .getEntitySchemas().entrySet()) { returnMap.put(Integer.parseInt(versionsEntry.getKey()), versionsEntry.getValue()); } return returnMap; }
java
{ "resource": "" }
q162576
DefaultSchemaManager.getManagedSchema
train
private ManagedSchema getManagedSchema(String tableName, String entityName) { ManagedSchema managedSchema = getManagedSchemaFromSchemaMap(tableName, entityName); if (managedSchema == null) { refreshManagedSchemaCache(tableName, entityName); managedSchema = getManagedSchemaFromSchemaMap(tableName, entityName); if (managedSchema == null) { String msg = "Could not find managed schemas for " + tableName + ", " + entityName; throw new SchemaNotFoundException(msg); } } return managedSchema; }
java
{ "resource": "" }
q162577
DefaultSchemaManager.validateCompatibleWithTableSchemas
train
private void validateCompatibleWithTableSchemas(String tableName, KeySchema keySchema, EntitySchema entitySchema) { List<ManagedSchema> entitiesForTable = new ArrayList<ManagedSchema>(); for (Entry<String, ManagedSchema> entry : getManagedSchemaMap().entrySet()) { if (entry.getKey().startsWith(tableName + ":")) { entitiesForTable.add(entry.getValue()); } } for (ManagedSchema managedSchema : entitiesForTable) { if (!managedSchema.getName().equals(entitySchema.getName())) { KeyEntitySchemaParser<?, ?> parser = getSchemaParser(managedSchema .getSchemaType()); for (String schema : managedSchema.getEntitySchemas().values()) { EntitySchema otherEntitySchema = parser.parseEntitySchema(schema); KeySchema otherKeySchema = parser.parseKeySchema(schema); if (!keySchema.compatible(otherKeySchema)) { String msg = "StorageKey fields of schema not compatible with other schema for the table. " + "Table: " + tableName + ". Other schema: " + otherEntitySchema.getRawSchema() + " New schema: " + entitySchema.getRawSchema(); throw new IncompatibleSchemaException(msg); } if (!validateCompatibleWithTableColumns(entitySchema, otherEntitySchema)) { String msg = "Column mappings of schema not compatible with other schema for the table. " + "Table: " + tableName + ". Other schema: " + otherEntitySchema.getRawSchema() + " New schema: " + entitySchema.getRawSchema(); throw new IncompatibleSchemaException(msg); } if (!validateCompatibleWithTableOccVersion(entitySchema, otherEntitySchema)) { String msg = "OCCVersion mapping of schema not compatible with other schema for the table. " + "Only one schema in the table can have one." + "Table: " + tableName + ". Other schema: " + otherEntitySchema.getRawSchema() + " New schema: " + entitySchema.getRawSchema(); throw new IncompatibleSchemaException(msg); } } } } }
java
{ "resource": "" }
q162578
DefaultSchemaManager.validateCompatibleWithTableColumns
train
private boolean validateCompatibleWithTableColumns( EntitySchema entitySchema1, EntitySchema entitySchema2) { // Populate two collections of field mappings. One that contains all // of the column mappings, and one that contains the keyAsColumn // mappings from the first schema. These will be used to compare // against the second schema. Set<String> entitySchema1Columns = new HashSet<String>(); List<String> entitySchema1KeyAsColumns = new ArrayList<String>(); for (FieldMapping fieldMapping1 : entitySchema1 .getColumnMappingDescriptor().getFieldMappings()) { if (fieldMapping1.getMappingType() == MappingType.COLUMN || fieldMapping1.getMappingType() == MappingType.COUNTER) { entitySchema1Columns.add(getColumnValue(fieldMapping1)); } else if (fieldMapping1.getMappingType() == MappingType.KEY_AS_COLUMN) { entitySchema1KeyAsColumns.add(getColumnValue(fieldMapping1)); } } // For each field mapping in the second entity schema, we want to // validate the following: // // 1. That each column mapping in it doesn't map to the same column // as a column mapping in the first schema. // // 2. That each column mapping in it doesn't "startsWith()" with a // keyAsColumn mapping in the first schema, where the keyAsColumn // mapping value is "columnfamily:prefix". // // 3. That each keyAsColumn mapping in it isn't a prefix of one of // the first schema's column mappings. // // 4. That each keyAsColumn mapping in it isn't a prefix of one fo // the first schema's keyAsColumn mappings, and one of the first // schema's mappings isn't a prefix of this schema's keyAsColumn // mappings. for (FieldMapping fieldMapping2 : entitySchema2 .getColumnMappingDescriptor().getFieldMappings()) { if (fieldMapping2.getMappingType() == MappingType.COLUMN || fieldMapping2.getMappingType() == MappingType.COUNTER) { String value = getColumnValue(fieldMapping2); if (entitySchema1Columns.contains(value)) { LOG.warn("Field: " + fieldMapping2.getFieldName() + " has a table column conflict with a column mapped field in " + entitySchema1.getName()); return false; } for (String keyAsColumn : entitySchema1KeyAsColumns) { if (value.startsWith(keyAsColumn)) { LOG.warn("Field: " + fieldMapping2.getFieldName() + " has a table column conflict with a keyAsColumn mapped field in " + entitySchema1.getName()); return false; } } } else if (fieldMapping2.getMappingType() == MappingType.KEY_AS_COLUMN) { String entitySchema2KeyAsColumn = getColumnValue(fieldMapping2); for (String entitySchema1KeyAsColumn : entitySchema1KeyAsColumns) { if (entitySchema1KeyAsColumn.startsWith(entitySchema2KeyAsColumn)) { LOG.warn("Field " + fieldMapping2.getFieldName() + " has a table keyAsColumn conflict with a keyAsColumn mapped field in " + entitySchema1.getName()); return false; } } for (String entitySchema1Column : entitySchema1Columns) { if (entitySchema1Column.startsWith(entitySchema2KeyAsColumn)) { LOG.warn("Field " + fieldMapping2.getFieldName() + " has a table keyAsColumn conflict with a column mapped field in " + entitySchema1.getName()); return false; } } } } return true; }
java
{ "resource": "" }
q162579
DefaultSchemaManager.validateCompatibleWithTableOccVersion
train
private boolean validateCompatibleWithTableOccVersion( EntitySchema entitySchema1, EntitySchema entitySchema2) { boolean foundOccMapping = false; for (FieldMapping fieldMapping : entitySchema1.getColumnMappingDescriptor() .getFieldMappings()) { if (fieldMapping.getMappingType() == MappingType.OCC_VERSION) { foundOccMapping = true; break; } } if (foundOccMapping) { for (FieldMapping fieldMapping : entitySchema2 .getColumnMappingDescriptor().getFieldMappings()) { if (fieldMapping.getMappingType() == MappingType.OCC_VERSION) { LOG.warn("Field: " + fieldMapping.getFieldName() + " in schema " + entitySchema2.getName() + " conflicts with an occVersion field in " + entitySchema1.getName()); return false; } } } return true; }
java
{ "resource": "" }
q162580
MemcmpDecoder.readInt
train
@Override public int readInt() throws IOException { byte[] intBytes = new byte[4]; int i = in.read(intBytes); if (i < 4) { throw new EOFException(); } intBytes[0] = (byte) (intBytes[0] ^ 0x80); int value = 0; for (int j = 0; j < intBytes.length; ++j) { value = (value << 8) + (intBytes[j] & 0xff); } return value; }
java
{ "resource": "" }
q162581
MemcmpDecoder.readLong
train
@Override public long readLong() throws IOException { byte[] longBytes = new byte[8]; int i = in.read(longBytes); if (i < 8) { throw new EOFException(); } longBytes[0] = (byte) (longBytes[0] ^ 0x80); long value = 0; for (int j = 0; j < longBytes.length; ++j) { value = (value << 8) + (longBytes[j] & 0xff); } return value; }
java
{ "resource": "" }
q162582
MemcmpDecoder.readString
train
@Override public Utf8 readString(Utf8 old) throws IOException { ByteBuffer stringBytes = readBytes(null); return new Utf8(stringBytes.array()); }
java
{ "resource": "" }
q162583
MemcmpDecoder.readBytes
train
@Override public ByteBuffer readBytes(ByteBuffer old) throws IOException { ByteArrayOutputStream bytesOut = new ByteArrayOutputStream(); while (true) { int byteRead = in.read(); if (byteRead < 0) { throw new EOFException(); } if (byteRead == 0) { int secondByteRead = in.read(); if (byteRead < 0) { throw new EOFException(); } if (secondByteRead == 0) { break; } else if (secondByteRead == 1) { bytesOut.write(0); } else { String msg = "Illegal encoding. 0 byte cannot be followed by " + "anything other than 0 or 1. It was followed by " + Integer.toString(byteRead); throw new IOException(msg); } } else { bytesOut.write(byteRead); } } return ByteBuffer.wrap(bytesOut.toByteArray()); }
java
{ "resource": "" }
q162584
MemcmpDecoder.readFixed
train
@Override public void readFixed(byte[] bytes, int start, int length) throws IOException { int i = in.read(bytes, start, length); if (i < length) { throw new EOFException(); } }
java
{ "resource": "" }
q162585
KerberosUtil.runPrivileged
train
public static <T> T runPrivileged(UserGroupInformation login, PrivilegedExceptionAction<T> action) { try { if (login == null) { return action.run(); } else { return login.doAs(action); } } catch (IOException ex) { throw new DatasetIOException("Privileged action failed", ex); } catch (InterruptedException ex) { Thread.interrupted(); throw new DatasetException(ex); } catch (Exception ex) { throw Throwables.propagate(ex); } }
java
{ "resource": "" }
q162586
AbstractCommand.buildCommandChain
train
protected List<Command> buildCommandChain(Config rootConfig, String configKey, Command finalChild, boolean ignoreNotifications) { Preconditions.checkNotNull(rootConfig); Preconditions.checkNotNull(configKey); Preconditions.checkNotNull(finalChild); List<? extends Config> commandConfigs = new Configs().getConfigList(rootConfig, configKey, Collections.<Config>emptyList()); List<Command> commands = Lists.newArrayList(); Command currentParent = this; Connector lastConnector = null; for (int i = 0; i < commandConfigs.size(); i++) { boolean isLast = (i == commandConfigs.size() - 1); Connector connector = new Connector(ignoreNotifications && isLast); if (isLast) { connector.setChild(finalChild); } Config cmdConfig = commandConfigs.get(i); Command cmd = buildCommand(cmdConfig, currentParent, connector); commands.add(cmd); if (i > 0) { lastConnector.setChild(cmd); } connector.setParent(cmd); currentParent = connector; lastConnector = connector; } return commands; }
java
{ "resource": "" }
q162587
AbstractCommand.buildCommand
train
protected Command buildCommand(Config cmdConfig, Command currentParent, Command finalChild) { Preconditions.checkNotNull(cmdConfig); Preconditions.checkNotNull(currentParent); Preconditions.checkNotNull(finalChild); Set<Map.Entry<String, Object>> entries = cmdConfig.root().unwrapped().entrySet(); if (entries.size() != 1) { throw new MorphlineCompilationException("Illegal number of entries: " + entries.size(), cmdConfig); } Map.Entry<String, Object> entry = entries.iterator().next(); String cmdName = entry.getKey(); Class cmdClass; LOG.trace("Building command: {}", cmdName); if (!cmdName.contains(".") && !cmdName.contains("/")) { cmdClass = getContext().getCommandBuilder(cmdName); if (cmdClass == null) { throw new MorphlineCompilationException("No command builder registered for name: " + cmdName, cmdConfig); } } else { String className = cmdName.replace('/', '.'); try { cmdClass = Class.forName(className); } catch (ClassNotFoundException e) { throw new MorphlineCompilationException("Cannot find command class: " + className, cmdConfig, e); } } Object obj; try { obj = cmdClass.newInstance(); } catch (Exception e) { throw new MorphlineCompilationException("Cannot instantiate command class: " + cmdClass.getName(), cmdConfig, e); } if (!(obj instanceof CommandBuilder)) { throw new MorphlineCompilationException("Type of command " + cmdName + " must be an instance of " + CommandBuilder.class.getName() + " but is: " + cmdClass.getName(), cmdConfig); } CommandBuilder builder = (CommandBuilder) obj; Command cmd = builder.build(cmdConfig.getConfig(cmdName), currentParent, finalChild, getContext()); return cmd; }
java
{ "resource": "" }
q162588
RetryingSolrServer.limitStringLength
train
private String limitStringLength(String str) { if (str.length() > MAX_STRING_LENGTH) { str = str.substring(0, MAX_STRING_LENGTH) + " ..."; } return str; }
java
{ "resource": "" }
q162589
SmtpMailer.getRateLimitTemplateProperties
train
private Map<String, Object> getRateLimitTemplateProperties(SingularityRequest request, final SingularityEmailType emailType) { final Builder<String, Object> templateProperties = ImmutableMap.<String, Object>builder(); templateProperties.put("singularityRequestLink", mailTemplateHelpers.getSingularityRequestLink(request.getId())); templateProperties.put("rateLimitAfterNotifications", Integer.toString(smtpConfiguration.getRateLimitAfterNotifications())); templateProperties.put("rateLimitPeriodFormat", DurationFormatUtils.formatDurationHMS(smtpConfiguration.getRateLimitPeriodMillis())); templateProperties.put("rateLimitCooldownFormat", DurationFormatUtils.formatDurationHMS(smtpConfiguration.getRateLimitCooldownMillis())); templateProperties.put("emailType", emailType.name()); templateProperties.put("requestId", request.getId()); templateProperties.put("color", emailType.getColor()); return templateProperties.build(); }
java
{ "resource": "" }
q162590
SmtpMailer.queueMail
train
private void queueMail(final Collection<SingularityEmailDestination> destination, final SingularityRequest request, final SingularityEmailType emailType, final Optional<String> actionTaker, String subject, String body) { RateLimitResult result = checkRateLimitForMail(request, emailType); if (result == RateLimitResult.DONT_SEND_MAIL_IN_COOLDOWN) { return; } if (result == RateLimitResult.SEND_COOLDOWN_STARTED_MAIL) { subject = String.format("%s notifications for %s are being rate limited", emailType.name(), request.getId()); body = Jade4J.render(rateLimitedTemplate, getRateLimitTemplateProperties(request, emailType)); } final Set<String> toList = new HashSet<>(); final Set<String> ccList = new HashSet<>(); // Decide where to send this email. if (destination.contains(SingularityEmailDestination.OWNERS) && request.getOwners().isPresent() && !request.getOwners().get().isEmpty()) { toList.addAll(request.getOwners().get()); } if (destination.contains(SingularityEmailDestination.ADMINS) && !smtpConfiguration.getAdmins().isEmpty()) { if (toList.isEmpty()) { toList.addAll(smtpConfiguration.getAdmins()); } else { ccList.addAll(smtpConfiguration.getAdmins()); } } if (actionTaker.isPresent() && !Strings.isNullOrEmpty(actionTaker.get())) { if (destination.contains(SingularityEmailDestination.ACTION_TAKER)) { toList.add(actionTaker.get()); } else { final Iterator<String> i = toList.iterator(); while (i.hasNext()) { if (actionTaker.get().equalsIgnoreCase(i.next())) { i.remove(); } } } } Set<String> emailBlacklist = Sets.newHashSet(notificationsManager.getBlacklist()); toList.removeAll(emailBlacklist); ccList.removeAll(emailBlacklist); smtpSender.queueMail(Lists.newArrayList(toList), Lists.newArrayList(ccList), subject, body); }
java
{ "resource": "" }
q162591
SingularityClient.getSingularityRequests
train
public Collection<SingularityRequestParent> getSingularityRequests() { final Function<String, String> requestUri = (host) -> String.format(REQUESTS_FORMAT, getApiBase(host)); return getCollection(requestUri, "[ACTIVE, PAUSED, COOLDOWN] requests", REQUESTS_COLLECTION); }
java
{ "resource": "" }
q162592
SingularityClient.getActiveSingularityRequests
train
public Collection<SingularityRequestParent> getActiveSingularityRequests() { final Function<String, String> requestUri = (host) -> String.format(REQUESTS_GET_ACTIVE_FORMAT, getApiBase(host)); return getCollection(requestUri, "ACTIVE requests", REQUESTS_COLLECTION); }
java
{ "resource": "" }
q162593
SingularityClient.getPausedSingularityRequests
train
public Collection<SingularityRequestParent> getPausedSingularityRequests() { final Function<String, String> requestUri = (host) -> String.format(REQUESTS_GET_PAUSED_FORMAT, getApiBase(host)); return getCollection(requestUri, "PAUSED requests", REQUESTS_COLLECTION); }
java
{ "resource": "" }
q162594
SingularityClient.getCoolDownSingularityRequests
train
public Collection<SingularityRequestParent> getCoolDownSingularityRequests() { final Function<String, String> requestUri = (host) -> String.format(REQUESTS_GET_COOLDOWN_FORMAT, getApiBase(host)); return getCollection(requestUri, "COOLDOWN requests", REQUESTS_COLLECTION); }
java
{ "resource": "" }
q162595
SingularityClient.getPendingSingularityRequests
train
public Collection<SingularityPendingRequest> getPendingSingularityRequests() { final Function<String, String> requestUri = (host) -> String.format(REQUESTS_GET_PENDING_FORMAT, getApiBase(host)); return getCollection(requestUri, "pending requests", PENDING_REQUESTS_COLLECTION); }
java
{ "resource": "" }
q162596
SingularityClient.getSlaves
train
public Collection<SingularitySlave> getSlaves(Optional<MachineState> slaveState) { final Function<String, String> requestUri = (host) -> String.format(SLAVES_FORMAT, getApiBase(host)); Optional<Map<String, Object>> maybeQueryParams = Optional.absent(); String type = "slaves"; if (slaveState.isPresent()) { maybeQueryParams = Optional.of(ImmutableMap.of("state", slaveState.get().toString())); type = String.format("%s slaves", slaveState.get().toString()); } return getCollectionWithParams(requestUri, type, maybeQueryParams, SLAVES_COLLECTION); }
java
{ "resource": "" }
q162597
SingularityClient.getSlave
train
public Optional<SingularitySlave> getSlave(String slaveId) { final Function<String, String> requestUri = (host) -> String.format(SLAVE_DETAIL_FORMAT, getApiBase(host), slaveId); return getSingle(requestUri, "slave", slaveId, SingularitySlave.class); }
java
{ "resource": "" }
q162598
SingularityClient.getHistoryForTask
train
public Optional<SingularityTaskHistory> getHistoryForTask(String taskId) { final Function<String, String> requestUri = (host) -> String.format(TASK_HISTORY_FORMAT, getApiBase(host), taskId); return getSingle(requestUri, "task history", taskId, SingularityTaskHistory.class); }
java
{ "resource": "" }
q162599
SingularityClient.getHistoryForTask
train
@Deprecated public Optional<SingularityTaskIdHistory> getHistoryForTask(String requestId, String runId) { return getTaskIdHistoryByRunId(requestId, runId); }
java
{ "resource": "" }