name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hadoop_HsController_taskPage | /*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.webapp.AppController#taskPage()
*/
@Override
protected Class<? extends View> taskPage() {
return HsTaskPage.class;
} | 3.68 |
hbase_HBaseTestingUtility_memStoreTSAndTagsCombination | /**
* Create combination of memstoreTS and tags
*/
private static List<Object[]> memStoreTSAndTagsCombination() {
List<Object[]> configurations = new ArrayList<>();
configurations.add(new Object[] { false, false });
configurations.add(new Object[] { false, true });
configurations.add(new Object[] { true, fals... | 3.68 |
flink_JoinedStreams_where | /**
* Specifies a {@link KeySelector} for elements from the first input with explicit type
* information for the key type.
*
* @param keySelector The KeySelector to be used for extracting the first input's key for
* partitioning.
* @param keyType The type information describing the key type.
*/
public <KEY> ... | 3.68 |
flink_SavepointMetadata_getExistingOperators | /** @return List of {@link OperatorState} that already exists within the savepoint. */
public List<OperatorState> getExistingOperators() {
return operatorStateIndex.values().stream()
.filter(OperatorStateSpec::isExistingState)
.map(OperatorStateSpec::asExistingState)
.collect(Col... | 3.68 |
hbase_Increment_getTimeRange | /**
* Gets the TimeRange used for this increment.
*/
public TimeRange getTimeRange() {
return this.tr;
} | 3.68 |
querydsl_EnumExpression_ordinal | /**
* Get the ordinal of this enum
*
* @return ordinal number
*/
public NumberExpression<Integer> ordinal() {
if (ordinal == null) {
ordinal = Expressions.numberOperation(Integer.class, Ops.ORDINAL, mixin);
}
return ordinal;
} | 3.68 |
dubbo_ServiceNameMapping_getDefaultExtension | /**
* Get the default extension of {@link ServiceNameMapping}
*
* @return non-null {@link ServiceNameMapping}
*/
static ServiceNameMapping getDefaultExtension(ScopeModel scopeModel) {
return ScopeModelUtil.getApplicationModel(scopeModel).getDefaultExtension(ServiceNameMapping.class);
} | 3.68 |
pulsar_ComponentImpl_allowFunctionOps | /**
* @deprecated use {@link #isSuperUser(AuthenticationParameters)}
*/
@Deprecated
public boolean allowFunctionOps(NamespaceName namespaceName, String role,
AuthenticationDataSource authenticationData) {
AuthenticationParameters authParams = AuthenticationParameters.builder().clie... | 3.68 |
flink_SqlGatewayOpenApiSpecGenerator_main | /**
* Generates the Sql Gateway REST API OpenAPI spec.
*
* @param args args[0] contains the directory into which the generated files are placed
* @throws IOException if any file operation failed
*/
public static void main(String[] args) throws IOException, ConfigurationException {
String outputDirectory = args... | 3.68 |
framework_ColorPickerSelect_createColors | /**
* Creates the color.
*
* @param color
* the color
* @param rows
* the rows
* @param columns
* the columns
*
* @return the color[][]
*/
private Color[][] createColors(Color color, int rows, int columns) {
Color[][] colors = new Color[rows][columns];
float[] hsv = ... | 3.68 |
framework_DataCommunicator_getKeyMapper | /**
* Gets the {@link DataKeyMapper} used by this {@link DataCommunicator}. Key
* mapper can be used to map keys sent to the client-side back to their
* respective data objects.
*
* @return key mapper
*/
public DataKeyMapper<T> getKeyMapper() {
return keyMapper;
} | 3.68 |
hadoop_ECBlockGroup_getDataBlocks | /**
* Get data blocks
* @return data blocks
*/
public ECBlock[] getDataBlocks() {
return dataBlocks;
} | 3.68 |
hbase_HBaseTestingUtility_predicateTableAvailable | /**
* Returns a {@link Predicate} for checking that table is enabled
*/
public Waiter.Predicate<IOException> predicateTableAvailable(final TableName tableName) {
return new ExplainingPredicate<IOException>() {
@Override
public String explainFailure() throws IOException {
return explainTableAvailabilit... | 3.68 |
hbase_AccessController_checkPermissions | /**
* @deprecated since 2.2.0 and will be removed 4.0.0. Use {@link Admin#hasUserPermissions(List)}
* instead.
* @see Admin#hasUserPermissions(List)
* @see <a href="https://issues.apache.org/jira/browse/HBASE-22117">HBASE-22117</a>
*/
@Deprecated
@Override
public void checkPermissions(RpcController con... | 3.68 |
flink_JobEdge_isBroadcast | /** Gets whether the edge is broadcast edge. */
public boolean isBroadcast() {
return isBroadcast;
} | 3.68 |
flink_SavepointWriter_withConfiguration | /**
* Sets a configuration that will be applied to the stream operators used to bootstrap a new
* savepoint.
*
* @param option metadata information
* @param value value to be stored
* @param <T> type of the value to be stored
* @return The modified savepoint.
*/
public <T> SavepointWriter withConfiguration(Conf... | 3.68 |
framework_AbstractMedia_setSource | /**
* Sets a single media file as the source of the media component.
*
* @param source
*/
public void setSource(Resource source) {
clearSources();
addSource(source);
} | 3.68 |
hbase_ProcedureCoordinator_memberFinishedBarrier | /**
* Notification that the procedure had another member finished executing its in-barrier subproc
* via {@link Subprocedure#insideBarrier()}.
* @param procName name of the subprocedure that finished
* @param member name of the member that executed and released its barrier
* @param dataFromMember the... | 3.68 |
framework_BeanUtil_getMethodFromBridge | /**
* Return declared method for which {@code bridgeMethod} is generated using
* its {@code paramTypes}. If {@code bridgeMethod} is not a bridge method
* then return null.
*/
private static Method getMethodFromBridge(Method bridgeMethod,
Class<?>... paramTypes) throws SecurityException {
if (bridgeMetho... | 3.68 |
hadoop_StringValueMax_getReport | /**
* @return the string representation of the aggregated value
*/
public String getReport() {
return maxVal;
} | 3.68 |
hadoop_AbfsClient_getAbfsRestOperation | /**
* Creates an AbfsRestOperation with parameters including request headers and SAS token.
*
* @param operationType The type of the operation.
* @param httpMethod The HTTP method of the operation.
* @param url The URL associated with the operation.
* @param requestHeaders The list of HTTP... | 3.68 |
hmily_SubCoordinator_setRollbackOnly | /**
* Sets rollback only.
*/
public void setRollbackOnly() {
if (state == XaState.STATUS_PREPARING) {
state = XaState.STATUS_MARKED_ROLLBACK;
}
} | 3.68 |
MagicPlugin_BufferedMapCanvas_drawImage | // Shamelessly stolen from CraftMapCanvas.... wish they'd give us
// an extendible version or just let us create them at least :)
@Override
@SuppressWarnings("deprecation")
public void drawImage(int x, int y, Image image) {
byte[] bytes = MapPalette.imageToBytes(image);
for (int x2 = 0; x2 < image.getWidth(null... | 3.68 |
hudi_HoodieTableMetadataUtil_getPartitionIdentifier | /**
* Returns partition name for the given path.
*/
public static String getPartitionIdentifier(@Nonnull String relativePartitionPath) {
return EMPTY_PARTITION_NAME.equals(relativePartitionPath) ? NON_PARTITIONED_NAME : relativePartitionPath;
} | 3.68 |
hbase_LazyInitializedWALProvider_getProviderNoCreate | /**
* Get the provider if it already initialized, otherwise just return {@code null} instead of
* creating it.
*/
WALProvider getProviderNoCreate() {
return holder.get();
} | 3.68 |
hadoop_AbstractStoreOperation_getStoreContext | /**
* Get the store context.
* @return the context.
*/
public final StoreContext getStoreContext() {
return storeContext;
} | 3.68 |
Activiti_TablePage_getFirstResult | /**
* @return the start index of this page (ie the index of the first element in the page)
*/
public long getFirstResult() {
return firstResult;
} | 3.68 |
flink_DeclarativeAggregateFunction_mergeOperands | /**
* Merge inputs of {@link #mergeExpressions()}, these inputs are agg buffer generated by user
* definition.
*/
public final UnresolvedReferenceExpression[] mergeOperands() {
UnresolvedReferenceExpression[] aggBuffers = aggBufferAttributes();
UnresolvedReferenceExpression[] ret = new UnresolvedReferenceExp... | 3.68 |
zxing_BitSource_available | /**
* @return number of bits that can be read successfully
*/
public int available() {
return 8 * (bytes.length - byteOffset) - bitOffset;
} | 3.68 |
hibernate-validator_ConfigurationSource_getPriority | /**
* Returns this sources priority. Can be used to determine which
* configuration shall apply in case of conflicting configurations by
* several providers.
*
* @return This source's priority.
*/
public int getPriority() {
return priority;
} | 3.68 |
hudi_JavaExecutionStrategy_runClusteringForGroup | /**
* Executes clustering for the group.
*/
private List<WriteStatus> runClusteringForGroup(
HoodieClusteringGroup clusteringGroup, Map<String, String> strategyParams,
boolean preserveHoodieMetadata, String instantTime) {
List<HoodieRecord<T>> inputRecords = readRecordsForGroup(clusteringGroup, instantTime)... | 3.68 |
hbase_BucketCache_putIfAbsent | /**
* Return the previous associated value, or null if absent. It has the same meaning as
* {@link ConcurrentMap#putIfAbsent(Object, Object)}
*/
public RAMQueueEntry putIfAbsent(BlockCacheKey key, RAMQueueEntry entry) {
AtomicBoolean absent = new AtomicBoolean(false);
RAMQueueEntry re = delegate.computeIfAbsent(... | 3.68 |
framework_TabSheet_hideTabs | /**
* Hides or shows the tab selection parts ("tabs").
*
* @param tabsHidden
* true if the tabs should be hidden
* @deprecated as of 7.5, use {@link #setTabsVisible(boolean)} instead
*/
@Deprecated
public void hideTabs(boolean tabsHidden) {
setTabsVisible(!tabsHidden);
} | 3.68 |
graphhopper_VectorTileDecoder_setAutoScale | /**
* Set the autoScale setting.
*
* @param autoScale
* when true, the encoder automatically scale and return all coordinates in the 0..255 range.
* when false, the encoder returns all coordinates in the 0..extent-1 range as they are encoded.
*
*/
public void setAutoScale(boolean autoScale)... | 3.68 |
framework_HtmlInTabCaption_setup | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server.
* VaadinRequest)
*/
@Override
protected void setup(VaadinRequest request) {
getLayout().setSpacing(true);
TabSheet ts = new TabSheet();
ts.setCaption("TabSheet - no <u>html</u> in tab captions");
ts.setC... | 3.68 |
hbase_ChaosAgent_createEphemeralZNode | /***
* Function to create EPHEMERAL ZNODE with given path and data as params.
* @param path Path at which Ephemeral ZNode to create
* @param data Data to put under ZNode
*/
public void createEphemeralZNode(String path, byte[] data) {
zk.create(path, data, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL,
cre... | 3.68 |
hudi_OrcUtils_filterRowKeys | /**
* Read the rowKey list matching the given filter, from the given ORC file. If the filter is empty, then this will
* return all the rowkeys.
*
* @param conf configuration to build fs object.
* @param filePath The ORC file path.
* @param filter record keys filter
* @return Set Set of pairs of row key and... | 3.68 |
querydsl_NumberExpression_sum | /**
* Create a {@code sum(this)} expression
*
* <p>Get the sum of this expression (aggregation)</p>
*
* @return sum(this)
*/
public NumberExpression<T> sum() {
if (sum == null) {
sum = Expressions.numberOperation(getType(), Ops.AggOps.SUM_AGG, mixin);
}
return sum;
} | 3.68 |
flink_StreamExecutionEnvironment_registerSlotSharingGroup | /**
* Register a slot sharing group with its resource spec.
*
* <p>Note that a slot sharing group hints the scheduler that the grouped operators CAN be
* deployed into a shared slot. There's no guarantee that the scheduler always deploy the
* grouped operators together. In cases grouped operators are deployed into... | 3.68 |
hmily_PropertyName_isAncestorOf | /**
* Determine if the node name of the pointing is a parent. If yes, return true.
*
* @param name name.
* @return boolean boolean
*/
public boolean isAncestorOf(final PropertyName name) {
if (this.getElements().length >= name.getElements().length) {
return false;
}
for (int i = 0; i < this.el... | 3.68 |
hadoop_TFile_main | /**
* Dumping the TFile information.
*
* @param args
* A list of TFile paths.
*/
public static void main(String[] args) {
System.out.printf("TFile Dumper (TFile %s, BCFile %s)%n", TFile.API_VERSION
.toString(), BCFile.API_VERSION.toString());
if (args.length == 0) {
System.out
.prin... | 3.68 |
hbase_HFile_getAndResetChecksumFailuresCount | /**
* Number of checksum verification failures. It also clears the counter.
*/
public static final long getAndResetChecksumFailuresCount() {
return CHECKSUM_FAILURES.sumThenReset();
} | 3.68 |
hadoop_MoveStep_getMaxDiskErrors | /**
* Gets Maximum numbers of errors to be tolerated before this
* move operation is aborted.
* @return long.
*/
@Override
public long getMaxDiskErrors() {
return maxDiskErrors;
} | 3.68 |
hbase_SnapshotQuotaObserverChore_getPeriod | /**
* Extracts the period for the chore from the configuration.
* @param conf The configuration object.
* @return The configured chore period or the default value.
*/
static int getPeriod(Configuration conf) {
return conf.getInt(SNAPSHOT_QUOTA_CHORE_PERIOD_KEY, SNAPSHOT_QUOTA_CHORE_PERIOD_DEFAULT);
} | 3.68 |
cron-utils_FieldConstraintsBuilder_monthsMapping | /**
* Creates months mapping.
*
* @return Map where strings month names in EEE format, and integers correspond to their 1-12 mappings
*/
private static Map<String, Integer> monthsMapping() {
final Map<String, Integer> stringMapping = new HashMap<>();
stringMapping.put("JAN", 1);
stringMapping.put("FEB",... | 3.68 |
morf_TableReference_setName | /**
* @param name the name to set
* @deprecated Do not modify {@link TableReference} instances. This will be removed very soon.
*/
@Deprecated
public void setName(String name) {
this.name = name;
} | 3.68 |
druid_SQLAggregateExpr_getWithinGroup | //为了兼容之前的逻辑
@Deprecated
public SQLOrderBy getWithinGroup() {
return orderBy;
} | 3.68 |
hbase_ScannerContext_clearProgress | /**
* Clear away any progress that has been made so far. All progress fields are reset to initial
* values. Only clears progress that should reset between rows. {@link #getBlockSizeProgress()} is
* not reset because it increments for all blocks scanned whether the result is included or
* filtered.
*/
void clearPro... | 3.68 |
hbase_MiniHBaseCluster_waitOnMaster | /**
* Wait for the specified master to stop. Removes this thread from list of running threads.
* @return Name of master that just went down.
*/
public String waitOnMaster(final int serverNumber) {
return this.hbaseCluster.waitOnMaster(serverNumber);
} | 3.68 |
hadoop_ReferenceCountMap_remove | /**
* Delete the reference. Decrease the reference count for the instance, if
* any. On all references removal delete the instance from the map.
*
* @param key Key to remove the reference.
*/
public void remove(E key) {
E value = referenceMap.get(key);
if (value != null && value.decrementAndGetRefCount() == 0... | 3.68 |
flink_Path_getFileSystem | /**
* Returns the FileSystem that owns this Path.
*
* @return the FileSystem that owns this Path
* @throws IOException thrown if the file system could not be retrieved
*/
public FileSystem getFileSystem() throws IOException {
return FileSystem.get(this.toUri());
} | 3.68 |
hbase_MBeanSourceImpl_register | /**
* Register an mbean with the underlying metrics system
* @param serviceName Metrics service/system name
* @param metricsName name of the metrics obejct to expose
* @param theMbean the actual MBean
* @return ObjectName from jmx
*/
@Override
public ObjectName register(String serviceName, String metricsName, ... | 3.68 |
flink_Tuple11_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
pu... | 3.68 |
hadoop_MapReduceJobPropertiesParser_extractMaxHeapOpts | /**
* Extracts the -Xmx heap option from the specified string.
*/
public static void extractMaxHeapOpts(final String javaOptions,
List<String> heapOpts,
List<String> others) {
for (String opt : javaOptions.split(" ")) {
Matcher matcher... | 3.68 |
hbase_MasterProcedureScheduler_wakeRegions | /**
* Wake the procedures waiting for the specified regions
* @param procedure the procedure that was holding the regions
* @param regionInfos the list of regions the procedure was holding
*/
public void wakeRegions(final Procedure<?> procedure, final TableName table,
final RegionInfo... regionInfos) {
Arrays... | 3.68 |
framework_WebBrowser_getTimezoneOffset | /**
* Returns the browser-reported TimeZone offset in milliseconds from GMT.
* This includes possible daylight saving adjustments, to figure out which
* TimeZone the user actually might be in, see
* {@link #getRawTimezoneOffset()}.
*
* @see WebBrowser#getRawTimezoneOffset()
* @return timezone offset in milliseco... | 3.68 |
hadoop_AbfsOutputStream_getOutputStreamStatistics | /**
* Getter method for AbfsOutputStream statistics.
*
* @return statistics for AbfsOutputStream.
*/
@VisibleForTesting
public AbfsOutputStreamStatistics getOutputStreamStatistics() {
return outputStreamStatistics;
} | 3.68 |
hbase_Scan_getBatch | /** Returns maximum number of values to return for a single call to next() */
public int getBatch() {
return this.batch;
} | 3.68 |
hadoop_ManifestSuccessData_putDiagnostic | /**
* Add a diagnostics entry.
* @param key name
* @param value value
*/
public void putDiagnostic(String key, String value) {
diagnostics.put(key, value);
} | 3.68 |
hbase_MasterRpcServices_rpcPreCheck | /**
* Checks for the following pre-checks in order:
* <ol>
* <li>Master is initialized</li>
* <li>Rpc caller has admin permissions</li>
* </ol>
* @param requestName name of rpc request. Used in reporting failures to provide context.
* @throws ServiceException If any of the above listed pre-check fails.
*/
priva... | 3.68 |
hudi_BaseRollbackHelper_performRollback | /**
* Performs all rollback actions that we have collected in parallel.
*/
public List<HoodieRollbackStat> performRollback(HoodieEngineContext context, HoodieInstant instantToRollback,
List<HoodieRollbackRequest> rollbackRequests) {
int parallelism = Math.max(Math.min... | 3.68 |
framework_DragSourceExtensionConnector_addDraggable | /**
* Makes the given element draggable and adds class name.
*
* @param element
* Element to be set draggable.
*/
protected void addDraggable(Element element) {
element.setDraggable(Element.DRAGGABLE_TRUE);
element.addClassName(
getStylePrimaryName(element) + STYLE_SUFFIX_DRAGSOURCE)... | 3.68 |
hbase_ZKUtil_createWithParents | /**
* Creates the specified node and all parent nodes required for it to exist. The creation of
* parent znodes is not atomic with the leafe znode creation but the data is written atomically
* when the leaf node is created. No watches are set and no errors are thrown if the node already
* exists. The nodes created ... | 3.68 |
flink_InMemoryPartition_getBlockCount | /** @return number of segments owned by partition */
public int getBlockCount() {
return this.partitionPages.size();
} | 3.68 |
hadoop_MountVolumeMap_getCapacityRatioByMountAndStorageType | /**
* Return capacity ratio.
* If not exists, return 1 to use full capacity.
*/
double getCapacityRatioByMountAndStorageType(String mount,
StorageType storageType) {
if (mountVolumeMapping.containsKey(mount)) {
return mountVolumeMapping.get(mount).getCapacityRatio(storageType);
}
return 1;
} | 3.68 |
flink_BatchTask_initLocalStrategies | /**
* NOTE: This method must be invoked after the invocation of {@code #initInputReaders()} and
* {@code #initInputSerializersAndComparators(int)}!
*/
protected void initLocalStrategies(int numInputs) throws Exception {
final MemoryManager memMan = getMemoryManager();
final IOManager ioMan = getIOManager();... | 3.68 |
hadoop_NMTokenCache_removeToken | /**
* Removes NMToken for specified node manager
* @param nodeAddr node address (host:port)
*/
@Private
@VisibleForTesting
public void removeToken(String nodeAddr) {
nmTokens.remove(nodeAddr);
} | 3.68 |
framework_AbsoluteLayout_setCSSString | /**
* Sets the position attributes using CSS syntax. Attributes not
* included in the string are reset to their unset states.
*
* <code><pre>
* setCSSString("top:10px;left:20%;z-index:16;");
* </pre></code>
*
* @param css
*/
public void setCSSString(String css) {
topValue = rightValue = bottomValue = leftV... | 3.68 |
hbase_Procedure_setParentProcId | /**
* Called by the ProcedureExecutor to assign the parent to the newly created procedure.
*/
protected void setParentProcId(long parentProcId) {
this.parentProcId = parentProcId;
} | 3.68 |
hadoop_JobMetaData_getRecurrenceId | /**
* Get {@link RecurrenceId}.
*
* @return {@link RecurrenceId}.
*/
public final RecurrenceId getRecurrenceId() {
return recurrenceId;
} | 3.68 |
hadoop_SchedulingRequest_allocationTags | /**
* Set the <code>allocationTags</code> of the request.
*
* @see SchedulingRequest#setAllocationTags(Set)
* @param allocationTags <code>allocationsTags</code> of the request
* @return {@link SchedulingRequest.SchedulingRequestBuilder}
*/
@Public
@Unstable
public SchedulingRequestBuilder allocationTags(Set<Strin... | 3.68 |
pulsar_TopicsBase_buildMessage | // Build pulsar message from REST request.
private List<Message> buildMessage(ProducerMessages producerMessages, Schema schema,
String producerName, TopicName topicName) {
List<ProducerMessage> messages;
List<Message> pulsarMessages = new ArrayList<>();
messages = produce... | 3.68 |
hadoop_AbfsHttpOperation_getConnProperty | /**
* Gets the connection request property for a key.
* @param key The request property key.
* @return request peoperty value.
*/
String getConnProperty(String key) {
return connection.getRequestProperty(key);
} | 3.68 |
hadoop_DockerCommandExecutor_executeDockerCommand | /**
* Execute a docker command and return the output.
*
* @param dockerCommand the docker command to run.
* @param containerId the id of the container.
* @param env environment for the container.
* @param privilegedOperationExecutor the privileged operations e... | 3.68 |
framework_VAbstractCalendarPanel_onMouseOut | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.MouseOutHandler#onMouseOut(com.google
* .gwt.event.dom.client.MouseOutEvent)
*/
@Override
public void onMouseOut(MouseOutEvent event) {
if (mouseTimer != null) {
mouseTimer.cancel();
}
} | 3.68 |
hbase_ClientSnapshotDescriptionUtils_assertSnapshotRequestIsValid | /**
* Check to make sure that the description of the snapshot requested is valid
* @param snapshot description of the snapshot
* @throws IllegalArgumentException if the name of the snapshot or the name of the table to
* snapshot are not valid names
*/
public static void assertSnaps... | 3.68 |
framework_FocusableScrollPanel_getScrollPosition | /**
* Gets the vertical scroll position.
*
* @return the vertical scroll position, in pixels
*/
public int getScrollPosition() {
if (getElement().getPropertyJSO("_vScrollTop") != null) {
return getElement().getPropertyInt("_vScrollTop");
} else {
return getElement().getScrollTop();
}
} | 3.68 |
flink_ManuallyTriggeredScheduledExecutorService_trigger | /**
* Triggers the next queued runnable and executes it synchronously. This method throws an
* exception if no Runnable is currently queued.
*/
public void trigger() {
final Runnable next;
synchronized (queuedRunnables) {
next = queuedRunnables.removeFirst();
}
next.run();
} | 3.68 |
flink_KryoSerializer_getKryoInstance | /**
* Returns the Chill Kryo Serializer which is implicitly added to the classpath via
* flink-runtime. Falls back to the default Kryo serializer if it can't be found.
*
* @return The Kryo serializer instance.
*/
private Kryo getKryoInstance() {
try {
// check if ScalaKryoInstantiator is in class path... | 3.68 |
framework_Navigator_addView | /**
* Registers a view class for a view name.
* <p>
* Registering another view with a name that is already registered
* overwrites the old registration of the same type.
* <p>
* A new view instance is created every time a view is requested.
*
* @param viewName
* String that identifies a view (not nu... | 3.68 |
dubbo_RpcContext_asyncCall | /**
* one way async call, send request only, and result is not required
*
* @param runnable
*/
public void asyncCall(Runnable runnable) {
try {
setAttachment(Constants.RETURN_KEY, Boolean.FALSE.toString());
runnable.run();
} catch (Throwable e) {
// FIXME should put exception in futu... | 3.68 |
hadoop_StageConfig_getIoProcessors | /**
* Submitter for doing IO against the store other than
* manifest processing.
*/
public TaskPool.Submitter getIoProcessors() {
return ioProcessors;
} | 3.68 |
querydsl_JDOQueryFactory_selectFrom | /**
* Create a new {@link JDOQuery} instance with the given projection
*
* @param expr projection and source
* @param <T>
* @return select(expr).from(expr)
*/
public <T> JDOQuery<T> selectFrom(EntityPath<T> expr) {
return select(expr).from(expr);
} | 3.68 |
hbase_Client_removeExtraHeader | /**
* Remove an extra header.
*/
public void removeExtraHeader(final String name) {
extraHeaders.remove(name);
} | 3.68 |
hbase_ReplicationSourceManager_refreshSources | /**
* Close the previous replication sources of this peer id and open new sources to trigger the new
* replication state changes or new replication config changes. Here we don't need to change
* replication queue storage and only to enqueue all logs to the new replication source
* @param peerId the id of the replic... | 3.68 |
graphhopper_AbstractSRTMElevationProvider_calcIntKey | // use int key instead of string for lower memory usage
int calcIntKey(double lat, double lon) {
// we could use LinearKeyAlgo but this is simpler as we only need integer precision:
return (down(lat) + 90) * 1000 + down(lon) + 180;
} | 3.68 |
morf_AbstractSqlDialectTest_testSelectWithConcatenation | /**
* Tests concatenation in a select with {@linkplain FieldReference}s and
* {@linkplain FieldLiteral}s.
*/
@Test
public void testSelectWithConcatenation() {
SelectStatement stmt = new SelectStatement(new ConcatenatedField(new FieldReference("assetDescriptionLine1"), new FieldLiteral(
" "), new FieldReferen... | 3.68 |
hbase_JmxCacheBuster_clearJmxCache | /**
* For JMX to forget about all previously exported metrics.
*/
public static void clearJmxCache() {
if (LOG.isTraceEnabled()) {
LOG.trace("clearing JMX Cache" + StringUtils.stringifyException(new Exception()));
}
// If there are more then 100 ms before the executor will run then everything should be merg... | 3.68 |
flink_FsStateBackend_getWriteBufferSize | /**
* Gets the write buffer size for created checkpoint stream.
*
* <p>If not explicitly configured, this is the default value of {@link
* CheckpointingOptions#FS_WRITE_BUFFER_SIZE}.
*
* @return The write buffer size, in bytes.
*/
public int getWriteBufferSize() {
return writeBufferSize >= 0
? wr... | 3.68 |
hadoop_FederationNamespaceInfo_getClusterId | /**
* The HDFS cluster id for this namespace.
*
* @return Cluster identifier.
*/
public String getClusterId() {
return this.clusterId;
} | 3.68 |
hadoop_CommitContext_getIOStatisticsContext | /**
* IOStatistics context of the created thread.
* @return the IOStatistics.
*/
public IOStatisticsContext getIOStatisticsContext() {
return ioStatisticsContext;
} | 3.68 |
flink_ExecNodeUtil_createOneInputTransformation | /** Create a {@link OneInputTransformation} with memoryBytes. */
public static <I, O> OneInputTransformation<I, O> createOneInputTransformation(
Transformation<I> input,
TransformationMetadata transformationMeta,
StreamOperatorFactory<O> operatorFactory,
TypeInformation<O> outputType,
... | 3.68 |
hbase_RequestConverter_buildMoveRegionRequest | /**
* Create a protocol buffer MoveRegionRequest
* @return A MoveRegionRequest
*/
public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName,
ServerName destServerName) {
MoveRegionRequest.Builder builder = MoveRegionRequest.newBuilder();
builder
.setRegion(buildRegionSpecifier(RegionS... | 3.68 |
pulsar_SingletonCleanerListener_objectMapperFactoryClearCaches | // Call ObjectMapperFactory.clearCaches() using reflection to clear up classes held in
// the singleton Jackson ObjectMapper instances
private static void objectMapperFactoryClearCaches() {
if (OBJECTMAPPERFACTORY_CLEARCACHES_METHOD != null) {
try {
OBJECTMAPPERFACTORY_CLEARCACHES_METHOD.invoke(... | 3.68 |
hudi_TransactionUtils_getLastCompletedTxnInstantAndMetadata | /**
* Get the last completed transaction hoodie instant and {@link HoodieCommitMetadata#getExtraMetadata()}.
*
* @param metaClient
* @return
*/
public static Option<Pair<HoodieInstant, Map<String, String>>> getLastCompletedTxnInstantAndMetadata(
HoodieTableMetaClient metaClient) {
Option<HoodieInstant> hoodi... | 3.68 |
hadoop_NamenodeStatusReport_setNamenodeInfo | /**
* Set the namenode blocks information.
*
* @param numCorruptFiles number of corrupt files.
* @param numOfMissingBlocksWithReplicationFactorOne number of missing
* blocks with rep one.
* @param highestPriorityLowRedundancyRepBlocks number of high priority low
* redundancy rep blocks.
* @param highPriorityLow... | 3.68 |
hadoop_DatanodeLocalInfo_getSoftwareVersion | /** get software version */
public String getSoftwareVersion() {
return this.softwareVersion;
} | 3.68 |
pulsar_SchemaDataValidator_validateSchemaData | /**
* Validate if the schema data is well formed.
*
* @param schemaData schema data to validate
* @throws InvalidSchemaDataException if the schema data is not in a valid form.
*/
static void validateSchemaData(SchemaData schemaData) throws InvalidSchemaDataException {
switch (schemaData.getType()) {
ca... | 3.68 |
flink_JobSchedulingPlan_empty | /**
* Create an empty {@link JobSchedulingPlan} with no information about vertices or allocations.
*/
public static JobSchedulingPlan empty() {
return new JobSchedulingPlan(VertexParallelism.empty(), Collections.emptyList());
} | 3.68 |
querydsl_GeometryExpression_srid | /**
* Returns the Spatial Reference System ID for this geometric object. This will normally be a
* foreign key to an index of reference systems stored in either the same or some other datastore.
*
* @return SRID
*/
public NumberExpression<Integer> srid() {
if (srid == null) {
srid = Expressions.numberO... | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.