name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hadoop_BalanceProcedureScheduler_remove | /**
* Remove the job from scheduler if it finishes.
*/
public BalanceJob remove(BalanceJob job) {
BalanceJob inner = findJob(job);
if (inner == null) {
return null;
} else if (job.isJobDone()) {
synchronized (this) {
return jobSet.remove(inner);
}
}
return null;
} | 3.68 |
hbase_StorageClusterStatusModel_getRegions | /** Returns the total number of regions served by the cluster */
@XmlAttribute
public int getRegions() {
return regions;
} | 3.68 |
flink_TableConfig_setRootConfiguration | /**
* Sets the given configuration as {@link #rootConfiguration}, which contains any configuration
* set in the execution context. See the docs of {@link TableConfig} for more information.
*
* @param rootConfiguration root configuration to be set
*/
@Internal
public void setRootConfiguration(ReadableConfig rootCon... | 3.68 |
flink_ListTypeInfo_getElementTypeInfo | /** Gets the type information for the elements contained in the list */
public TypeInformation<T> getElementTypeInfo() {
return elementTypeInfo;
} | 3.68 |
morf_SqlServer_openSchema | /**
* @see org.alfasoftware.morf.jdbc.DatabaseType#openSchema(java.sql.Connection,
* java.lang.String, java.lang.String)
*/
@Override
public Schema openSchema(Connection connection, String databaseName, String schemaName) {
return new SqlServerMetaDataProvider(connection, schemaName);
} | 3.68 |
hadoop_ReconfigurationException_constructMessage | /**
* Construct the exception message.
*/
private static String constructMessage(String property,
String newVal, String oldVal) {
String message = "Could not change property " + property;
if (oldVal != null) {
message += " from \'" + oldVal;
}
if (newVal != null) {
... | 3.68 |
framework_GridSortOrder_asc | /**
* Creates a new grid sort builder with given sorting using ascending sort
* direction.
*
* @param by
* the column to sort by
* @param <T>
* the grid type
*
* @return the grid sort builder
*/
public static <T> GridSortOrderBuilder<T> asc(Column<T, ?> by) {
return new GridSortOrder... | 3.68 |
hadoop_SnappyCodec_getCompressorType | /**
* Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
*
* @return the type of compressor needed by this codec.
*/
@Override
public Class<? extends Compressor> getCompressorType() {
return SnappyCompressor.class;
} | 3.68 |
hbase_MultiByteBuff_skip | /**
* Jumps the current position of this MBB by specified length.
*/
@Override
public MultiByteBuff skip(int length) {
checkRefCount();
// Get available bytes from this item and remaining from next
int jump = 0;
while (true) {
jump = this.curItem.remaining();
if (jump >= length) {
this.curItem.p... | 3.68 |
hbase_BucketCache_blockEvicted | /**
* This method is invoked after the bucketEntry is removed from {@link BucketCache#backingMap}
*/
void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decrementBlockNumber,
boolean evictedByEvictionProcess) {
bucketEntry.markAsEvicted();
blocksByHFile.remove(cacheKey);
if (decrementBl... | 3.68 |
hadoop_ZKClient_unregisterService | /**
* unregister the service.
*
* @param path the path at which the service was registered
* @throws IOException if there are I/O errors.
* @throws InterruptedException if any thread has interrupted.
*/
public void unregisterService(String path) throws IOException,
InterruptedException {
try {
zkClient.... | 3.68 |
hbase_Mutation_isEmpty | /**
* Method to check if the familyMap is empty
* @return true if empty, false otherwise
*/
public boolean isEmpty() {
return getFamilyCellMap().isEmpty();
} | 3.68 |
hbase_ProcedureExecutor_getCorePoolSize | /** Returns the core pool size settings. */
public int getCorePoolSize() {
return corePoolSize;
} | 3.68 |
framework_SliderTooltip_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
Slider slider = new Slider();
slider.setDescription("Tooltip");
addComponent(slider);
} | 3.68 |
hibernate-validator_ConstraintDefinitionContribution_includeExisting | /**
* Whether or not the existing constraint validators should be kept or not.
*
* @return {@code true} if the existing constraint validators for the constraint type wrapped by this
* instance should be kept, {@code false} otherwise.
*/
public boolean includeExisting() {
return includeExisting;
} | 3.68 |
flink_PythonOperatorChainingOptimizer_optimize | /**
* Perform chaining optimization. It will returns the chained transformations and the
* transformation after chaining optimization for the given transformation.
*/
public static Tuple2<List<Transformation<?>>, Transformation<?>> optimize(
List<Transformation<?>> transformations, Transformation<?> targetTr... | 3.68 |
querydsl_AbstractMongodbQuery_fetchOne | /**
* Fetch one with the specific fields
*
* @param paths fields to return
* @return first result
*/
public K fetchOne(Path<?>... paths) {
queryMixin.setProjection(paths);
return fetchOne();
} | 3.68 |
morf_DataValueLookupMetadataRegistry_appendAndIntern | /**
* Given an existing (interned) metadata descriptor, appends the given column and
* returns the interned result.
*
* <p>Used when adding a new value to an existing {@link DataValueLookupBuilderImpl}.</p>
*
* <p>This call pattern means we can avoid constructing the combined {@link DataValueLookupMetadata}
* si... | 3.68 |
framework_FileDownloader_isOverrideContentType | /**
* Checks whether the content type should be overridden.
*
* @return <code>true</code> if the content type will be overridden when
* possible; <code>false</code> if the original content type will be
* used.
* @see #setOverrideContentType(boolean)
*/
public boolean isOverrideContentType() {
... | 3.68 |
querydsl_SQLExpressions_max | /**
* Start a window function expression
*
* @param expr expression
* @return max(expr)
*/
public static <T extends Comparable> WindowOver<T> max(Expression<T> expr) {
return new WindowOver<T>(expr.getType(), Ops.AggOps.MAX_AGG, expr);
} | 3.68 |
flink_PbCodegenUtils_flinkContainerElementCode | /**
* @param flinkContainerCode code phrase which represent flink container type like row/array in
* codegen sections
* @param index the index number in flink container type
* @param eleType the element type
*/
public static String flinkContainerElementCode(
String flinkContainerCode, String index, Log... | 3.68 |
flink_ResourceProfile_merge | /**
* Calculates the sum of two resource profiles.
*
* @param other The other resource profile to add.
* @return The merged resource profile.
*/
@Nonnull
public ResourceProfile merge(final ResourceProfile other) {
checkNotNull(other, "Cannot merge with null resources");
if (equals(ANY) || other.equals(ANY... | 3.68 |
hadoop_StagingCommitter_getCommittedTaskPath | /**
* Compute the path where the output of a committed task is stored until the
* entire job is committed for a specific application attempt.
* @param appAttemptId the ID of the application attempt to use
* @param context the context of any task.
* @return the path where the output of a committed task is stored.
... | 3.68 |
streampipes_MigrateExtensionsResource_handleMigration | /**
* Migrates a pipeline element instance based on the provided {@link MigrationRequest}.
* The outcome of the migration is described in {@link MigrationResult}.
* The result is always part of the response.
* Independent, of the migration outcome, the returned response always has OK as status code.
* It is the re... | 3.68 |
hudi_LSMTimeline_isFileFromLayer | /**
* Returns whether a file belongs to the specified layer {@code layer} within the LSM layout.
*/
public static boolean isFileFromLayer(String fileName, int layer) {
return getFileLayer(fileName) == layer;
} | 3.68 |
hbase_MetricsHeapMemoryManager_updateUnblockedFlushCount | /**
* Update/Set the unblocked flush count histogram/gauge
* @param unblockedFlushCount the number of unblocked memstore flush since last tuning.
*/
public void updateUnblockedFlushCount(final long unblockedFlushCount) {
source.updateUnblockedFlushCount(unblockedFlushCount);
} | 3.68 |
flink_MemorySegment_putLong | /**
* Writes the given long value (64bit, 8 bytes) to the given position in the system's native
* byte order. This method offers the best speed for long integer writing and should be used
* unless a specific byte order is required. In most cases, it suffices to know that the byte
* order in which the value is writt... | 3.68 |
hadoop_NamenodeStatusReport_getNumOfBlocksPendingReplication | /**
* Get the number of pending replication blocks.
*
* @return Number of pending replication blocks.
*/
public long getNumOfBlocksPendingReplication() {
return this.numOfBlocksPendingReplication;
} | 3.68 |
hadoop_ECPolicyLoader_loadSchemas | /**
* Load schemas from root element in the XML configuration file.
* @param root root element
* @return EC schema map
*/
private Map<String, ECSchema> loadSchemas(Element root) {
NodeList elements = root.getElementsByTagName("schemas")
.item(0).getChildNodes();
Map<String, ECSchema> schemas = new HashMap... | 3.68 |
hibernate-validator_ReflectionHelper_getCollectionElementType | /**
* Determines the type of the elements of an {@code Iterable}, array or the value of a {@code Map}.
*/
public static Type getCollectionElementType(Type type) {
Type indexedType = null;
if ( isIterable( type ) && type instanceof ParameterizedType ) {
ParameterizedType paramType = (ParameterizedType) type;
ind... | 3.68 |
flink_FlinkRelUtil_merge | /**
* Merges the programs of two {@link Calc} instances and returns a new {@link Calc} instance
* with the merged program.
*/
public static Calc merge(Calc topCalc, Calc bottomCalc) {
RexProgram topProgram = topCalc.getProgram();
RexBuilder rexBuilder = topCalc.getCluster().getRexBuilder();
// Merge the... | 3.68 |
hbase_WALEntryBatch_getLastSeqIds | /** Returns the last sequenceid for each region if the table has serial-replication scope */
public Map<String, Long> getLastSeqIds() {
return lastSeqIds;
} | 3.68 |
flink_WatermarkOutputMultiplexer_updateCombinedWatermark | /**
* Checks whether we need to update the combined watermark. Should be called when a newly
* emitted per-output watermark is higher than the max so far or if we need to combined the
* deferred per-output updates.
*/
private void updateCombinedWatermark() {
if (combinedWatermarkStatus.updateCombinedWatermark()... | 3.68 |
morf_AbstractSqlDialectTest_differentSchemaTableName | /**
* For tests using tables from different schema values.
*
* @param baseName Base table name.
* @return Decorated name.
*/
protected String differentSchemaTableName(String baseName) {
return "MYSCHEMA." + baseName;
} | 3.68 |
graphhopper_ResponsePath_hasErrors | /**
* @return true if this alternative response contains one or more errors
*/
public boolean hasErrors() {
return !errors.isEmpty();
} | 3.68 |
flink_BufferManager_releaseAll | /**
* The floating buffer is recycled to local buffer pool directly, and the exclusive buffer
* will be gathered to return to global buffer pool later.
*
* @param exclusiveSegments The list that we will add exclusive segments into.
*/
void releaseAll(List<MemorySegment> exclusiveSegments) {
Buffer buffer;
... | 3.68 |
dubbo_CacheFilter_invoke | /**
* If cache is configured, dubbo will invoke method on each method call. If cache value is returned by cache store
* then it will return otherwise call the remote method and return value. If remote method's return value has error
* then it will not cache the value.
* @param invoker service
* @param invocatio... | 3.68 |
flink_ExtractionUtils_extractionError | /** Helper method for creating consistent exceptions during extraction. */
static ValidationException extractionError(Throwable cause, String message, Object... args) {
return new ValidationException(String.format(message, args), cause);
} | 3.68 |
dubbo_RequestEvent_toRequestErrorEvent | /**
* Acts on MetricsClusterFilter to monitor exceptions that occur before request execution
*/
public static RequestEvent toRequestErrorEvent(
ApplicationModel applicationModel,
String appName,
MetricsDispatcher metricsDispatcher,
Invocation invocation,
String side,
in... | 3.68 |
framework_AbstractSelect_setNullSelectionAllowed | /**
* Allow or disallow empty selection by the user. If the select is in
* single-select mode, you can make an item represent the empty selection by
* calling <code>setNullSelectionItemId()</code>. This way you can for
* instance set an icon and caption for the null selection item.
*
* @param nullSelectionAllowed... | 3.68 |
pulsar_AuthenticationProvider_newHttpAuthState | /**
* Create an http authentication data State use passed in AuthenticationDataSource.
* @deprecated implementations that previously relied on this should update their implementation of
* {@link #authenticateHttpRequest(HttpServletRequest, HttpServletResponse)} or of
* {@link #authenticateHttpRequestAsync(HttpServl... | 3.68 |
framework_VPanel_setIconUri | /** For internal use only. May be removed or replaced in the future. */
public void setIconUri(String iconUri, ApplicationConnection client) {
if (icon != null) {
captionNode.removeChild(icon.getElement());
}
icon = client.getIcon(iconUri);
if (icon != null) {
DOM.insertChild(captionNode... | 3.68 |
hadoop_JobBase_configure | /**
* Initializes a new instance from a {@link JobConf}.
*
* @param job
* the configuration
*/
public void configure(JobConf job) {
this.longCounters = new TreeMap<Object, Long>();
this.doubleCounters = new TreeMap<Object, Double>();
} | 3.68 |
hibernate-validator_BeanMetaDataManagerImpl_getBeanConfigurationForHierarchy | /**
* Returns a list with the configurations for all types contained in the given type's hierarchy (including
* implemented interfaces) starting at the specified type.
*
* @param beanClass The type of interest.
* @param <T> The type of the class to get the configurations for.
* @return A set with the configuratio... | 3.68 |
hudi_AdbSyncTool_syncSchema | /**
* Get the latest schema from the last commit and check if its in sync with the ADB
* table schema. If not, evolves the table schema.
*
* @param tableName The table to be synced
* @param tableExists Whether target table exists
* @param useRealTimeInputFormat Whether using realtime input... | 3.68 |
hadoop_Cluster_getJobHistoryUrl | /**
* Get the job history file path for a given job id. The job history file at
* this path may or may not be existing depending on the job completion state.
* The file is present only for the completed jobs.
* @param jobId the JobID of the job submitted by the current user.
* @return the file path of the job his... | 3.68 |
hbase_SnapshotInfo_getArchivedStoreFilesCount | /** Returns the number of available store files in the archive */
public int getArchivedStoreFilesCount() {
return hfilesArchiveCount.get();
} | 3.68 |
graphhopper_TarjanSCC_getSingleNodeComponents | /**
* The set of nodes that form their own (single-node) component. If {@link TarjanSCC#excludeSingleNodeComponents}
* is enabled this set will be empty.
*/
public BitSet getSingleNodeComponents() {
return singleNodeComponents;
} | 3.68 |
framework_VComboBox_setPrevButtonActive | /**
* Should the previous page button be visible to the user
*
* @param active
*/
private void setPrevButtonActive(boolean active) {
if (enableDebug) {
debug("VComboBox.SP: setPrevButtonActive(" + active + ")");
}
if (active) {
DOM.sinkEvents(up, Event.ONCLICK);
up.setClassName(... | 3.68 |
flink_TaskExecutorMemoryConfiguration_getFrameworkOffHeap | /** Returns the configured off-heap size used by the framework. */
public Long getFrameworkOffHeap() {
return frameworkOffHeap;
} | 3.68 |
flink_ComponentClosingUtils_tryShutdownExecutorElegantly | /**
* A util method that tries to shut down an {@link ExecutorService} elegantly within the given
* timeout. If the executor has not been shut down before it hits timeout or the thread is
* interrupted when waiting for the termination, a forceful shutdown will be attempted on the
* executor.
*
* @param executor t... | 3.68 |
flink_EmptyMutableObjectIterator_get | /**
* Gets a singleton instance of the empty iterator.
*
* @param <E> The type of the objects (not) returned by the iterator.
* @return An instance of the iterator.
*/
public static <E> MutableObjectIterator<E> get() {
@SuppressWarnings("unchecked")
MutableObjectIterator<E> iter = (MutableObjectIterator<E>... | 3.68 |
hbase_FileLink_getBackReferenceFileName | /**
* Get the referenced file name from the reference link directory path.
* @param dirPath Link references directory path
* @return Name of the file referenced
*/
public static String getBackReferenceFileName(final Path dirPath) {
return dirPath.getName().substring(BACK_REFERENCES_DIRECTORY_PREFIX.length());
} | 3.68 |
hbase_MetricsMasterFileSystem_addMetaWALSplit | /**
* Record a single instance of a split
* @param time time that the split took
* @param size length of original WALs that were split
*/
public synchronized void addMetaWALSplit(long time, long size) {
source.updateMetaWALSplitTime(time);
source.updateMetaWALSplitSize(size);
} | 3.68 |
hadoop_BinaryPartitioner_setOffsets | /**
* Set the subarray to be used for partitioning to
* <code>bytes[left:(right+1)]</code> in Python syntax.
*
* @param conf configuration object
* @param left left Python-style offset
* @param right right Python-style offset
*/
public static void setOffsets(Configuration conf, int left, int right) {
conf.se... | 3.68 |
flink_MapView_get | /**
* Return the value for the specified key or {@code null} if the key is not in the map view.
*
* @param key The look up key.
* @return The value for the specified key.
* @throws Exception Thrown if the system cannot get data.
*/
public V get(K key) throws Exception {
return map.get(key);
} | 3.68 |
hbase_RowMutations_add | /**
* Add a list of mutations
* @param mutations The data to send.
* @throws IOException if the row of added mutation doesn't match the original row
*/
public RowMutations add(List<? extends Mutation> mutations) throws IOException {
for (Mutation mutation : mutations) {
if (!Bytes.equals(row, mutation.getRow(... | 3.68 |
hbase_MetricsSource_setOldestWalAge | /*
* Sets the age of oldest log file just for source.
*/
public void setOldestWalAge(long age) {
singleSourceSource.setOldestWalAge(age);
} | 3.68 |
hadoop_RMContainerTokenSecretManager_createContainerToken | /**
* Helper function for creating ContainerTokens.
*
* @param containerId Container Id
* @param containerVersion Container version
* @param nodeId Node Id
* @param appSubmitter App Submitter
* @param capability Capability
* @param priority Priority
* @param createTime Create Time
* @param logAggregationConte... | 3.68 |
morf_SchemaEditor_removePrimaryKey | /**
* Drop the primary key of a table.
*
* @param tableName The original table name
* @param oldPrimaryKeyColumns The current/old primary key columns for the table.
*/
default void removePrimaryKey(String tableName, List<String> oldPrimaryKeyColumns){
changePrimaryKeyColumns(tableName, oldPrimaryKeyColumns, Coll... | 3.68 |
hbase_InternalScan_checkOnlyMemStore | /**
* StoreFiles will not be scanned. Only MemStore will be scanned.
*/
public void checkOnlyMemStore() {
memOnly = true;
filesOnly = false;
} | 3.68 |
hadoop_LogParserUtil_setLogParser | /**
* Set the {@link LogParser} to use.
*
* @param logParser the {@link LogParser} to use.
*/
public void setLogParser(final LogParser logParser) {
this.logParser = logParser;
} | 3.68 |
hbase_CoprocessorRpcUtils_getControllerException | /**
* Retreivies exception stored during RPC invocation.
* @param controller the controller instance provided by the client when calling the service
* @return exception if any, or null; Will return DoNotRetryIOException for string represented
* failure causes in controller.
*/
@Nullable
public static IOExc... | 3.68 |
morf_ChangePrimaryKeyColumns_isApplied | /**
* @see org.alfasoftware.morf.upgrade.SchemaChange#isApplied(org.alfasoftware.morf.metadata.Schema, org.alfasoftware.morf.jdbc.ConnectionResources)
*/
@Override
public boolean isApplied(Schema schema, ConnectionResources database) {
List<String> actual = namesOfColumns(primaryKeysForTable(schema.getTable(tableNa... | 3.68 |
morf_Table_primaryKey | /**
* @return The column definitions of the columns in the primary key.
*/
public default List<Column> primaryKey() {
return columns().stream()
.filter(Column::isPrimaryKey)
.collect(Collectors.toList());
} | 3.68 |
morf_RenameIndex_applyChange | /**
* Renames an index from the name specified to the new name.
*
* @param schema {@link Schema} to apply the change against resulting in new
* metadata.
* @param indexStartName the starting name for the index
* @param indexEndName the end name for the index
* @return MetaData with {@link SchemaChange} ... | 3.68 |
dubbo_GenericBeanPostProcessorAdapter_processBeforeInitialization | /**
* Process {@link T Bean} with name without return value before initialization,
* <p>
* This method will be invoked by BeanPostProcessor#postProcessBeforeInitialization(Object, String)
*
* @param bean Bean Object
* @param beanName Bean Name
* @throws BeansException in case of errors
*/
protected void pr... | 3.68 |
morf_UpgradePathFinder_readOnlyWithUUID | /**
* Reads the {@link OnlyWith} UUID from a class, doing some sanity checking.
*
* @param upgradeStepClass The upgrade step class.
* @return The UUID of the referenced class; null if no annotation is present on the given class.
*/
public static java.util.UUID readOnlyWithUUID(Class<? extends UpgradeStep> upgradeS... | 3.68 |
hbase_AvlUtil_readNext | /**
* Return the successor of the current node
* @param node the current node
* @return the successor of the current node
*/
public static <TNode extends AvlLinkedNode> TNode readNext(TNode node) {
return (TNode) node.iterNext;
} | 3.68 |
morf_SqlDialect_extractParameters | /**
* Extracts the parameters from a SQL statement.
*
* @param statement the SQL statement.
* @return the list of parameters.
*/
public List<SqlParameter> extractParameters(InsertStatement statement) {
SqlParameterExtractor extractor = new SqlParameterExtractor();
ObjectTreeTraverser.forCallback(extractor).dis... | 3.68 |
hbase_HBaseTestingUtility_startMiniMapReduceCluster | /**
* Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
* filesystem.
* @param servers The number of <code>TaskTracker</code>'s to start.
* @throws IOException When starting the cluster fails.
*/
private void startMiniMapReduceCluster(final int servers) throws IOExcepti... | 3.68 |
flink_DualInputOperator_setSecondInput | /**
* Sets the second input to the union of the given operators.
*
* @param inputs The operator(s) that form the second input.
* @deprecated This method will be removed in future versions. Use the {@link Union} operator
* instead.
*/
@Deprecated
public void setSecondInput(Operator<IN2>... inputs) {
this.i... | 3.68 |
hadoop_CryptoUtils_createIV | /**
* This method creates and initializes an IV (Initialization Vector)
*
* @param conf configuration
* @return byte[] initialization vector
* @throws IOException exception in case of error
*/
public static byte[] createIV(Configuration conf) throws IOException {
CryptoCodec cryptoCodec = CryptoCodec.getInstan... | 3.68 |
hbase_QuotaFilter_setNamespaceFilter | /**
* Set the namespace filter regex
* @param regex the namespace filter
* @return the quota filter object
*/
public QuotaFilter setNamespaceFilter(final String regex) {
this.namespaceRegex = regex;
hasFilters |= StringUtils.isNotEmpty(regex);
return this;
} | 3.68 |
framework_Tree_getItemDescriptionGenerator | /**
* Get the item description generator which generates tooltips for tree
* items.
*
* @return the item description generator
*/
public ItemDescriptionGenerator getItemDescriptionGenerator() {
return itemDescriptionGenerator;
} | 3.68 |
flink_DataTypeFactoryImpl_createSerializerExecutionConfig | /**
* Creates a lazy {@link ExecutionConfig} that contains options for {@link TypeSerializer}s with
* information from existing {@link ExecutionConfig} (if available) enriched with table {@link
* ReadableConfig}.
*/
private static Supplier<ExecutionConfig> createSerializerExecutionConfig(
ClassLoader classL... | 3.68 |
hadoop_RouterQuotaUsage_verifyNamespaceQuota | /**
* Verify if namespace quota is violated once quota is set. Relevant
* method {@link DirectoryWithQuotaFeature#verifyNamespaceQuota}.
* @throws NSQuotaExceededException If the quota is exceeded.
*/
public void verifyNamespaceQuota() throws NSQuotaExceededException {
long quota = getQuota();
long fileAndDirec... | 3.68 |
querydsl_MetaDataExporter_setExportAll | /**
* Set whether all table types should be exported
*
* @param exportAll
*/
public void setExportAll(boolean exportAll) {
this.exportAll = exportAll;
} | 3.68 |
hadoop_JobHistoryServer_getBindAddress | /**
* Retrieve JHS bind address from configuration
*
* @param conf
* @return InetSocketAddress
*/
public static InetSocketAddress getBindAddress(Configuration conf) {
return conf.getSocketAddr(JHAdminConfig.MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_POR... | 3.68 |
hadoop_SysInfoWindows_getNumCores | /** {@inheritDoc} */
@Override
public int getNumCores() {
return getNumProcessors();
} | 3.68 |
hbase_AsyncConnectionImpl_getNonceGenerator | // ditto
NonceGenerator getNonceGenerator() {
return nonceGenerator;
} | 3.68 |
flink_ResourceInformationReflector_getExternalResourcesUnSafe | /**
* Same as {@link #getExternalResources(Resource)} but allows to pass objects that are not of
* type {@link Resource}.
*/
@VisibleForTesting
Map<String, Long> getExternalResourcesUnSafe(Object resource) {
if (!isYarnResourceTypesAvailable) {
return Collections.emptyMap();
}
final Map<String, ... | 3.68 |
hadoop_ApplicationServiceRecordProcessor_createAInfo | /**
* Create an application A record descriptor.
*
* @param record the service record.
* @throws Exception if there is an issue during descriptor creation.
*/
protected void createAInfo(ServiceRecord record) throws Exception {
AApplicationRecordDescriptor recordInfo = new AApplicationRecordDescriptor(
getP... | 3.68 |
dubbo_ReferenceBeanBuilder_setInjvm | /**
* @param injvm
* @deprecated instead, use the parameter <b>scope</b> to judge if it's in jvm, scope=local
*/
@Deprecated
public ReferenceBeanBuilder setInjvm(Boolean injvm) {
attributes.put(ReferenceAttributes.INJVM, injvm);
return this;
} | 3.68 |
framework_CheckBox_writeDesign | /*
* (non-Javadoc)
*
* @see com.vaadin.ui.AbstractField#writeDesign(org.jsoup.nodes.Element,
* com.vaadin.ui.declarative.DesignContext)
*/
@Override
public void writeDesign(Element design, DesignContext designContext) {
super.writeDesign(design, designContext);
CheckBox def = designContext.getDefaultInstan... | 3.68 |
pulsar_ConsumerStats_getPartitionStats | /**
* @return stats for each partition if topic is partitioned topic
*/
default Map<String, ConsumerStats> getPartitionStats() {
return Collections.emptyMap();
} | 3.68 |
flink_NetUtils_validateHostPortString | /**
* Validates if the given String represents a hostname:port.
*
* <p>Works also for ipv6.
*
* <p>See:
* http://stackoverflow.com/questions/2345063/java-common-way-to-validate-and-convert-hostport-to-inetsocketaddress
*
* @return URL object for accessing host and port
*/
private static URL validateHostPortStr... | 3.68 |
hadoop_Cluster_renewDelegationToken | /**
* Renew a delegation token
* @param token the token to renew
* @return the new expiration time
* @throws InvalidToken
* @throws IOException
* @deprecated Use {@link Token#renew} instead
*/
public long renewDelegationToken(Token<DelegationTokenIdentifier> token
) throws Invali... | 3.68 |
hadoop_FederationStateStoreFacade_addReservationHomeSubCluster | /**
* Save Reservation And HomeSubCluster Mapping.
*
* @param reservationId reservationId
* @param homeSubCluster homeSubCluster
* @throws YarnException on failure
*/
public void addReservationHomeSubCluster(ReservationId reservationId,
ReservationHomeSubCluster homeSubCluster) throws YarnException {
try {
... | 3.68 |
morf_AbstractSqlDialectTest_testTrim | /**
* Tests that Trim functionality works.
*/
@Test
public void testTrim() {
// Given
Function trim = trim(new FieldReference("field1"));
SelectStatement selectStatement = new SelectStatement(trim).from(new TableReference("schedule"));
// When
String result = testDialect.convertStatementToSQL(selectStateme... | 3.68 |
flink_CatalogContext_getConfiguration | /**
* TODO After https://issues.apache.org/jira/browse/FLINK-32427 is finished, we can get
* configuration for catalog.
*/
@Override
public Configuration getConfiguration() {
throw new UnsupportedOperationException();
} | 3.68 |
hadoop_RouterQuotaUpdateService_generateNewQuota | /**
* Generate a new quota based on old quota and current quota usage value.
* @param oldQuota Old quota stored in State Store.
* @param currentQuotaUsage Current quota usage value queried from
* subcluster.
* @return A new RouterQuotaUsage.
*/
private RouterQuotaUsage generateNewQuota(RouterQuotaUsage old... | 3.68 |
querydsl_MetaDataExporter_setPackageName | /**
* Set the package name
*
* @param packageName package name for sources
*/
public void setPackageName(String packageName) {
module.bind(SQLCodegenModule.PACKAGE_NAME, packageName);
} | 3.68 |
flink_ScalarFunction_getResultType | /**
* Returns the result type of the evaluation method with a given signature.
*
* @deprecated This method uses the old type system and is based on the old reflective
* extraction logic. The method will be removed in future versions and is only called when
* using the deprecated {@code TableEnvironment.reg... | 3.68 |
pulsar_ProducerConfiguration_getMaxPendingMessagesAcrossPartitions | /**
*
* @return the maximum number of pending messages allowed across all the partitions
*/
public int getMaxPendingMessagesAcrossPartitions() {
return conf.getMaxPendingMessagesAcrossPartitions();
} | 3.68 |
dubbo_AbstractJSONImpl_getNumberAsDouble | /**
* Gets a number from an object for the given key. If the key is not present, this returns null.
* If the value does not represent a double, throws an exception.
*/
@Override
public Double getNumberAsDouble(Map<String, ?> obj, String key) {
assert obj != null;
assert key != null;
if (!obj.containsKey... | 3.68 |
hadoop_LongValueSum_reset | /**
* reset the aggregator
*/
public void reset() {
sum = 0;
} | 3.68 |
hadoop_FileSubclusterResolver_getMountPoints | /**
* Get a list of mount points for a path.
*
* @param path Path to get the mount points under.
* @param mountPoints the mount points to choose.
* @return Return empty list if the path is a mount point but there are no
* mount points under the path. Return null if the path is not a mount
* point... | 3.68 |
hadoop_StoreContext_pathToKey | /**
* Turns a path (relative or otherwise) into an S3 key.
*
* @param path input path, may be relative to the working dir
* @return a key excluding the leading "/", or, if it is the root path, ""
*/
public String pathToKey(Path path) {
return contextAccessors.pathToKey(path);
} | 3.68 |
morf_ChangelogStatementConsumer_writeWrapped | /**
* Writes one or more lines of text, applying line wrapping.
*/
private void writeWrapped(final String text) {
// Handle the case of multiple lines
if (text.contains(System.lineSeparator())) {
for (String line : text.split(System.lineSeparator())) {
writeWrapped(line);
}
return;
}
// Wr... | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.