name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
zxing_CameraManager_setManualFramingRect | /**
* Allows third party apps to specify the scanning rectangle dimensions, rather than determine
* them automatically based on screen resolution.
*
* @param width The width in pixels to scan.
* @param height The height in pixels to scan.
*/
public synchronized void setManualFramingRect(int width, int height) {
... | 3.68 |
hadoop_AppIdKeyConverter_decode | /*
* (non-Javadoc)
*
* Converts/decodes a 12 byte representation of app id for (row) keys to an
* app id in string format which can be returned back to client.
* For decoding, 12 bytes are interpreted as 8 bytes of inverted cluster
* timestamp(long) followed by 4 bytes of inverted sequence id(int). Calls
* Appli... | 3.68 |
hudi_InternalSchemaUtils_searchSchema | /**
* Search target internalSchema by version number.
*
* @param versionId the internalSchema version to be search.
* @param treeMap internalSchemas collections to be searched.
* @return a internalSchema.
*/
public static InternalSchema searchSchema(long versionId, TreeMap<Long, InternalSchema> treeMap) {
if (t... | 3.68 |
hbase_SimpleMutableByteRange_putVLong | // Copied from com.google.protobuf.CodedOutputStream v2.5.0 writeRawVarint64
@Override
public int putVLong(int index, long val) {
int rPos = 0;
while (true) {
if ((val & ~0x7F) == 0) {
bytes[offset + index + rPos] = (byte) val;
break;
} else {
bytes[offset + index + rPos] = (byte) ((val & ... | 3.68 |
hmily_HmilyXaStatement_getXaConnection | /**
* Gets xa connection.
*
* @return the xa connection
*/
public synchronized XAConnection getXaConnection() {
if (this.xaConnection == null) {
throw new IllegalArgumentException("connection not implements XAConnection");
}
return xaConnection;
} | 3.68 |
flink_PrintStyle_tableauWithTypeInferredColumnWidths | /**
* Create a new {@link TableauStyle} using column widths computed from the type.
*
* @param schema the schema of the data to print
* @param converter the converter to use to convert field values to string
* @param maxColumnWidth Max column width
* @param printNullAsEmpty A flag to indicate whether null should ... | 3.68 |
hadoop_EntityGroupFSTimelineStoreMetrics_incrNoRefreshCacheRead | // Cache related
public void incrNoRefreshCacheRead() {
noRefreshCacheRead.incr();
} | 3.68 |
hadoop_UnmanagedApplicationManager_forceKillApplication | /**
* Force kill the UAM.
*
* @return kill response
* @throws IOException if fails to create rmProxy
* @throws YarnException if force kill fails
*/
public KillApplicationResponse forceKillApplication()
throws IOException, YarnException {
shutDownConnections();
KillApplicationRequest request =
KillA... | 3.68 |
hbase_AsyncTable_getScanner | /**
* Gets a scanner on the current table for the given family and qualifier.
* @param family The column family to scan.
* @param qualifier The column qualifier to scan.
* @return A scanner.
*/
default ResultScanner getScanner(byte[] family, byte[] qualifier) {
return getScanner(new Scan().addColumn(family, q... | 3.68 |
hbase_ConcurrentMapUtils_computeIfAbsent | /**
* In HBASE-16648 we found that ConcurrentHashMap.get is much faster than computeIfAbsent if the
* value already exists. Notice that the implementation does not guarantee that the supplier will
* only be executed once.
*/
public static <K, V> V computeIfAbsent(ConcurrentMap<K, V> map, K key, Supplier<V> supplier... | 3.68 |
framework_DataCommunicator_getActiveDataHandler | /**
* Returns the active data handler.
*
* @return the active data handler
* @since 8.0.6
*/
protected ActiveDataHandler getActiveDataHandler() {
return handler;
} | 3.68 |
hbase_ZNodeClearer_writeMyEphemeralNodeOnDisk | /**
* Logs the errors without failing on exception.
*/
public static void writeMyEphemeralNodeOnDisk(String fileContent) {
String fileName = ZNodeClearer.getMyEphemeralNodeFileName();
if (fileName == null) {
LOG.warn("Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared "
+ "on cras... | 3.68 |
hadoop_KeyProviderCache_invalidateCache | /**
* Invalidate cache. KeyProviders in the cache will be closed by cache hook.
*/
@VisibleForTesting
synchronized void invalidateCache() {
LOG.debug("Invalidating all cached KeyProviders.");
if (cache != null) {
cache.invalidateAll();
}
} | 3.68 |
framework_DragSourceExtension_setEffectAllowed | /**
* Sets the allowed effects for the current drag source element. Used for
* setting client side {@code DataTransfer.effectAllowed} parameter for the
* drag event.
* <p>
* By default the value is {@link EffectAllowed#UNINITIALIZED} which is
* equivalent to {@link EffectAllowed#ALL}.
*
* @param effect
* ... | 3.68 |
dubbo_DubboCertManager_signWithEcdsa | /**
* Generate key pair with ECDSA
*
* @return key pair
*/
protected static KeyPair signWithEcdsa() {
KeyPair keyPair = null;
try {
ECGenParameterSpec ecSpec = new ECGenParameterSpec("secp256r1");
KeyPairGenerator g = KeyPairGenerator.getInstance("EC");
g.initialize(ecSpec, new Secur... | 3.68 |
flink_Channel_getReplicationFactor | /**
* Returns the replication factor of the connection.
*
* @return The replication factor of the connection.
*/
public int getReplicationFactor() {
return this.replicationFactor;
} | 3.68 |
hibernate-validator_InheritedMethodsHelper_run | /**
* Runs the given privileged action, using a privileged block if required.
* <p>
* <b>NOTE:</b> This must never be changed into a publicly available method to avoid execution of arbitrary
* privileged actions within HV's protection domain.
*/
@IgnoreForbiddenApisErrors(reason = "SecurityManager is deprecated in... | 3.68 |
Activiti_TreeMethodExpression_getMethodInfo | /**
* Evaluates the expression and answers information about the method
* @param context used to resolve properties (<code>base.property</code> and <code>base[property]</code>)
* @return method information or <code>null</code> for literal expressions
* @throws ELException if evaluation fails (e.g. suitable method n... | 3.68 |
flink_NFACompiler_headOfGroup | /**
* Checks if the given pattern is the head pattern of the current group pattern.
*
* @param pattern the pattern to be checked
* @return {@code true} iff the given pattern is in a group pattern and it is the head
* pattern of the group pattern, {@code false} otherwise
*/
private boolean headOfGroup(Pattern<... | 3.68 |
shardingsphere-elasticjob_LeaderService_removeLeader | /**
* Remove leader and trigger leader election.
*/
public void removeLeader() {
jobNodeStorage.removeJobNodeIfExisted(LeaderNode.INSTANCE);
} | 3.68 |
hbase_AssignmentVerificationReport_getDispersionInformation | /**
* Return a list which contains 3 elements: average dispersion score, max dispersion score and min
* dispersion score as first, second and third elements, respectively.
*/
public List<Float> getDispersionInformation() {
List<Float> dispersion = new ArrayList<>();
dispersion.add(avgDispersionScore);
dispersi... | 3.68 |
hbase_ChaosAgent_createZNode | /***
* Function to create PERSISTENT ZNODE with given path and data given as params
* @param path Path at which ZNode to create
* @param data Data to put under ZNode
*/
public void createZNode(String path, byte[] data) {
zk.create(path, data, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, createZNodeCallback... | 3.68 |
flink_ExecutionVertex_notifyStateTransition | /** Simply forward this notification. */
void notifyStateTransition(
Execution execution, ExecutionState previousState, ExecutionState newState) {
// only forward this notification if the execution is still the current execution
// otherwise we have an outdated execution
if (isCurrentExecution(execu... | 3.68 |
hadoop_ExitUtil_terminate | /**
* Terminate the current process. Note that terminate is the *only* method
* that should be used to terminate the daemon processes.
*
* @param status exit code
* @param msg message used to create the {@code ExitException}
* @throws ExitException if {@link System#exit(int)} is disabled.
*/
public static void t... | 3.68 |
flink_NFAState_resetStateChanged | /** Reset the changed bit checked via {@link #isStateChanged()} to {@code false}. */
public void resetStateChanged() {
this.stateChanged = false;
} | 3.68 |
hbase_ReplicationSourceManager_getFs | /**
* Get the handle on the local file system
* @return Handle on the local file system
*/
public FileSystem getFs() {
return this.fs;
} | 3.68 |
zxing_DecoderResult_getErasures | /**
* @return number of erasures corrected, or {@code null} if not applicable
*/
public Integer getErasures() {
return erasures;
} | 3.68 |
flink_CoGroupOperator_with | /**
* Finalizes a CoGroup transformation by applying a {@link
* org.apache.flink.api.common.functions.RichCoGroupFunction} to groups of elements
* with identical keys.
*
* <p>Each CoGroupFunction call returns an arbitrary number of keys.
*
* @param function The CoGroupFunction that is called for all groups of el... | 3.68 |
hbase_DefaultMetricsSystemHelper_removeSourceName | /**
* Unfortunately Hadoop tries to be too-clever and permanently keeps track of all names registered
* so far as a Source, thus preventing further re-registration of the source with the same name.
* In case of dynamic metrics tied to region-lifecycles, this becomes a problem because we would
* like to be able to r... | 3.68 |
hbase_NamespaceStateManager_checkAndUpdateNamespaceRegionCount | /**
* Check and update region count for an existing table. To handle scenarios like restore snapshot
* @param name name of the table for region count needs to be checked and updated
* @param incr count of regions
* @throws QuotaExceededException if quota exceeds for the number of regions allowed in a
* ... | 3.68 |
hadoop_NMTokenCache_clearCache | /**
* It will remove all the nm tokens from its cache
*/
@Private
@VisibleForTesting
public void clearCache() {
nmTokens.clear();
} | 3.68 |
framework_Tree_getVisibleItemIds | /**
* Gets the visible item ids.
*
* @see Select#getVisibleItemIds()
*/
@Override
public Collection<?> getVisibleItemIds() {
final LinkedList<Object> visible = new LinkedList<Object>();
// Iterates trough hierarchical tree using a stack of iterators
final Stack<Iterator<?>> iteratorStack = new Stack<I... | 3.68 |
flink_BlobUtils_readFully | /**
* Auxiliary method to read a particular number of bytes from an input stream. This method
* blocks until the requested number of bytes have been read from the stream. If the stream
* cannot offer enough data, an {@link EOFException} is thrown.
*
* @param inputStream The input stream to read the data from.
* @... | 3.68 |
flink_CheckpointedPosition_getRecordsAfterOffset | /** Gets the records to skip after the offset. */
public long getRecordsAfterOffset() {
return recordsAfterOffset;
} | 3.68 |
flink_ReusingBuildFirstReOpenableHashJoinIterator_reopenProbe | /**
* Set new input for probe side
*
* @throws IOException
*/
public void reopenProbe(MutableObjectIterator<V2> probeInput) throws IOException {
reopenHashTable.reopenProbe(probeInput);
} | 3.68 |
hadoop_HdfsDataOutputStream_getCurrentBlockReplication | /**
* Get the actual number of replicas of the current block.
*
* This can be different from the designated replication factor of the file
* because the namenode does not maintain replication for the blocks which are
* currently being written to. Depending on the configuration, the client may
* continue to write ... | 3.68 |
flink_TaskSlot_generateSlotOffer | /**
* Generate the slot offer from this TaskSlot.
*
* @return The sot offer which this task slot can provide
*/
public SlotOffer generateSlotOffer() {
Preconditions.checkState(
TaskSlotState.ACTIVE == state || TaskSlotState.ALLOCATED == state,
"The task slot is not in state active or all... | 3.68 |
hudi_JavaExecutionStrategy_readRecordsForGroupBaseFiles | /**
* Read records from baseFiles.
*/
private List<HoodieRecord<T>> readRecordsForGroupBaseFiles(List<ClusteringOperation> clusteringOps) {
List<HoodieRecord<T>> records = new ArrayList<>();
clusteringOps.forEach(clusteringOp -> {
try (HoodieFileReader baseFileReader = HoodieFileReaderFactory.getReaderFactory... | 3.68 |
querydsl_JTSMultiCurveExpression_isClosed | /**
* Returns 1 (TRUE) if this MultiCurve is closed [StartPoint ( ) = EndPoint ( ) for each
* Curve in this MultiCurve].
*
* @return closed
*/
public BooleanExpression isClosed() {
if (closed == null) {
closed = Expressions.booleanOperation(SpatialOps.IS_CLOSED, mixin);
}
return closed;
} | 3.68 |
hudi_HoodieTable_getCompletedCleanTimeline | /**
* Get only the completed (no-inflights) clean timeline.
*/
public HoodieTimeline getCompletedCleanTimeline() {
return getActiveTimeline().getCleanerTimeline().filterCompletedInstants();
} | 3.68 |
querydsl_AbstractSQLClause_addListener | /**
* Add a listener
*
* @param listener listener to add
*/
public void addListener(SQLListener listener) {
listeners.add(listener);
} | 3.68 |
flink_KeyGroupRangeAssignment_computeKeyGroupForKeyHash | /**
* Assigns the given key to a key-group index.
*
* @param keyHash the hash of the key to assign
* @param maxParallelism the maximum supported parallelism, aka the number of key-groups.
* @return the key-group to which the given key is assigned
*/
public static int computeKeyGroupForKeyHash(int keyHash, int max... | 3.68 |
rocketmq-connect_WorkerDirectTask_assignment | /**
* Current task assignment processing partition
*
* @return the partition list
*/
@Override
public Set<RecordPartition> assignment() {
return null;
} | 3.68 |
hadoop_Hadoop20JHParser_canParse | /**
* Can this parser parse the input?
*
* @param input
* @return Whether this parser can parse the input.
* @throws IOException
*
* We will deem a stream to be a good 0.20 job history stream if the
* first line is exactly "Meta VERSION=\"1\" ."
*/
public static boolean canParse(InputStre... | 3.68 |
hbase_ProcedureEvent_getSuspendedProcedures | /**
* Access to suspendedProcedures is 'synchronized' on this object, but it's fine to return it here
* for tests.
*/
public ProcedureDeque getSuspendedProcedures() {
return suspendedProcedures;
} | 3.68 |
morf_CompositeSchema_tableNames | /**
* @see org.alfasoftware.morf.metadata.Schema#tableNames()
*/
@Override
public Collection<String> tableNames() {
Set<String> result = Sets.newHashSet();
Set<String> seenTables = Sets.newHashSet();
for (Schema schema : delegates) {
for (Table table : schema.tables()) {
if (seenTables.add(table.getNa... | 3.68 |
hbase_ZKProcedureCoordinator_sendAbortToMembers | /**
* This is the abort message being sent by the coordinator to member TODO this code isn't actually
* used but can be used to issue a cancellation from the coordinator.
*/
@Override
final public void sendAbortToMembers(Procedure proc, ForeignException ee) {
String procName = proc.getName();
LOG.debug("Aborting... | 3.68 |
morf_AliasedField_minus | /**
* @param expression value to subtract from this field.
* @return A new expression using {@link MathsField} and {@link MathsOperator#MINUS}.
*/
public final MathsField minus(AliasedField expression) {
return new MathsField(this, MathsOperator.MINUS, potentiallyBracketExpression(expression));
} | 3.68 |
hadoop_LocalResolver_getNamenodesSubcluster | /**
* Get the Namenode mapping from the subclusters from the Membership store. As
* the Routers are usually co-located with Namenodes, we also check for the
* local address for this Router here.
*
* @return NN IP -> Subcluster.
*/
private Map<String, String> getNamenodesSubcluster(
MembershipStore membershipS... | 3.68 |
hbase_RollingStatCalculator_getMean | /** Returns mean of the data values that are in the current list of data values */
public double getMean() {
return this.currentSum / (double) numberOfDataValues;
} | 3.68 |
framework_HierarchicalDataCommunicator_setDataProvider | /**
* Set the current hierarchical data provider for this communicator.
*
* @param dataProvider
* the data provider to set, must extend
* {@link HierarchicalDataProvider}, not <code>null</code>
* @param initialFilter
* the initial filter value to use, or <code>null</code> to not
... | 3.68 |
hadoop_Times_parseISO8601ToLocalTimeInMillis | /**
* Given ISO formatted string with format "yyyy-MM-dd'T'HH:mm:ss.SSSZ", return
* epoch time for local Time zone.
* @param isoString in format of "yyyy-MM-dd'T'HH:mm:ss.SSSZ".
* @return epoch time for local time zone.
* @throws ParseException if given ISO formatted string can not be parsed.
*/
public static lon... | 3.68 |
flink_FlatMapNode_computeOperatorSpecificDefaultEstimates | /**
* Computes the estimates for the FlatMap operator. Since it un-nests, we assume a cardinality
* increase. To give the system a hint at data increase, we take a default magic number of a 5
* times increase.
*/
@Override
protected void computeOperatorSpecificDefaultEstimates(DataStatistics statistics) {
this.... | 3.68 |
flink_SharedSlot_allocateLogicalSlot | /**
* Registers an allocation request for a logical slot.
*
* <p>The logical slot request is complete once the underlying physical slot request is
* complete.
*
* @param executionVertexId {@link ExecutionVertexID} of the execution for which to allocate the
* logical slot
* @return the logical slot future
*... | 3.68 |
pulsar_ObjectMapperFactory_replaceSingletonInstances | /*
* Replaces the existing singleton ObjectMapper instances with new instances.
* This is used in tests to ensure that classloaders and class references don't leak between tests.
*/
private static void replaceSingletonInstances() {
MAPPER_REFERENCE.set(new MapperReference(createObjectMapperInstance()));
INST... | 3.68 |
hadoop_SCMStore_createAppCheckerService | /**
* Create an instance of the AppChecker service via reflection based on the
* {@link YarnConfiguration#SCM_APP_CHECKER_CLASS} parameter.
*
* @param conf
* @return an instance of the AppChecker class
*/
@Private
@SuppressWarnings("unchecked")
public static AppChecker createAppCheckerService(Configuration conf)... | 3.68 |
AreaShop_BuyRegion_isInResellingMode | /**
* Check if the region is being resold.
* @return true if the region is available for reselling, otherwise false
*/
public boolean isInResellingMode() {
return config.getBoolean("buy.resellMode");
} | 3.68 |
flink_YarnApplicationFileUploader_registerSingleLocalResource | /**
* Register a single local/remote resource and adds it to <tt>localResources</tt>.
*
* @param key the key to add the resource under
* @param resourcePath path of the resource to be registered
* @param relativeDstPath the relative path at the target location (this will be prefixed by the
* application-speci... | 3.68 |
MagicPlugin_BaseSpell_onPlayerDeath | /**
* Listener method, called on player move for registered spells.
*
* @param event The original entity death event
*/
public void onPlayerDeath(EntityDeathEvent event)
{
} | 3.68 |
morf_DataValueLookup_getValues | /**
* Returns all the key/value pairs stored.
*
* @return An iterable of data values.
*/
public default Iterable<? extends DataValue> getValues() {
throw new UnsupportedOperationException(
"Data value lookup type " + getClass().getName() + " currently lacks supported for getValues()");
} | 3.68 |
hadoop_CloseableTaskPoolSubmitter_close | /**
* Shut down the pool.
*/
@Override
public void close() {
if (pool != null) {
pool.shutdown();
pool = null;
}
} | 3.68 |
pulsar_PulsarClientImplementationBindingImpl_convertKeyValueDataStringToSchemaInfoSchema | /**
* Convert the key/value schema info data json bytes to key/value schema info data bytes.
*
* @param keyValueSchemaInfoDataJsonBytes the key/value schema info data json bytes
* @return the key/value schema info data bytes
*/
public byte[] convertKeyValueDataStringToSchemaInfoSchema(byte[] keyValueSchemaInfoData... | 3.68 |
hbase_ZkSplitLogWorkerCoordination_taskLoop | /**
* Wait for tasks to become available at /hbase/splitlog zknode. Grab a task one at a time. This
* policy puts an upper-limit on the number of simultaneous log splitting that could be happening
* in a cluster.
* <p>
* Synchronization using <code>taskReadySeq</code> ensures that it will try to grab every task
*... | 3.68 |
pulsar_AuthorizationService_allowNamespaceOperationAsync | /**
* Grant authorization-action permission on a namespace to the given client.
*
* @param namespaceName
* @param operation
* @param role
* @param authData
* additional authdata in json for targeted authorization provider
* @return IllegalArgumentException when namespace not found
* @throws IllegalS... | 3.68 |
hadoop_BlockManagerParameters_getMaxBlocksCount | /**
* @return The max blocks count to be kept in cache at any time.
*/
public int getMaxBlocksCount() {
return maxBlocksCount;
} | 3.68 |
streampipes_ZipFileExtractor_extractZipToMap | // TODO used by export feature - extend this to support binaries
public Map<String, byte[]> extractZipToMap() throws IOException {
byte[] buffer = new byte[1024];
Map<String, byte[]> entries = new HashMap<>();
ZipInputStream zis = new ZipInputStream(zipInputStream);
ZipEntry zipEntry = zis.getNextEntry();
whi... | 3.68 |
flink_CheckedThread_run | /** This method is final - thread work should go into the {@link #go()} method instead. */
@Override
public final void run() {
try {
go();
} catch (Throwable t) {
error = t;
}
} | 3.68 |
pulsar_SchemaUtils_jsonifySchemaInfoWithVersion | /**
* Jsonify the schema info with version.
*
* @param schemaInfoWithVersion the schema info
* @return the jsonified schema info with version
*/
public static String jsonifySchemaInfoWithVersion(SchemaInfoWithVersion schemaInfoWithVersion) {
GsonBuilder gsonBuilder = new GsonBuilder()
.setPrettyPri... | 3.68 |
framework_VScrollTable_prepareRow | /**
* This method is used to instantiate new rows for this table. It
* automatically sets correct widths to rows cells and assigns correct
* client reference for child widgets.
*
* This method can be called only after table has been initialized
*
* @param uidl
*/
private VScrollTableRow prepareRow(UIDL uidl) {
... | 3.68 |
hmily_CreateSQLUtil_getInsertValuesClause | /**
* Get insert values clause.
*
* @param keySet key set
* @return insert values clause
*/
public static String getInsertValuesClause(final Set<String> keySet) {
Map<String, String> map = Maps.asMap(keySet, input -> "?");
return String.format("(%s) VALUES (%s)", Joiner.on(",").join(map.keySet()), Joiner.o... | 3.68 |
flink_CommonTestUtils_createCopySerializable | /**
* Creates a copy of an object via Java Serialization.
*
* @param original The original object.
* @return The copied object.
*/
public static <T extends java.io.Serializable> T createCopySerializable(T original)
throws IOException {
if (original == null) {
throw new IllegalArgumentException(... | 3.68 |
flink_ExternalResourceUtils_getExternalResourceConfigurationKeys | /**
* Get the external resource configuration keys map, indexed by the resource name. The
* configuration key should be used for deployment specific container request.
*
* @param config Configurations
* @param suffix suffix of config option for deployment specific configuration key
* @return external resource con... | 3.68 |
querydsl_BeanMap_getTypeFunction | /**
* Returns a transformer for the given primitive type.
*
* @param aType the primitive type whose transformer to return
* @return a transformer that will convert strings into that type,
* or null if the given type is not a primitive type
*/
protected Function<?,?> getTypeFunction(Class<?> aType) {
r... | 3.68 |
dubbo_ServiceInvokeRestFilter_acceptSupportJudge | /**
* accept can not support will throw UnSupportAcceptException
*
* @param requestFacade
*/
private void acceptSupportJudge(RequestFacade requestFacade, Class<?> returnType) {
try {
// media type judge
getAcceptMediaType(requestFacade, returnType);
} catch (UnSupportContentTypeException e) ... | 3.68 |
hadoop_CandidateNodeSetUtils_getSingleNode | /*
* If the {@link CandidateNodeSet} only has one entry, return it. Otherwise,
* return null.
*/
public static <N extends SchedulerNode> N getSingleNode(
CandidateNodeSet<N> candidates) {
N node = null;
if (1 == candidates.getAllNodes().size()) {
node = candidates.getAllNodes().values().iterator().next()... | 3.68 |
hadoop_DataNodeVolumeMetrics_getNativeCopyIoSampleCount | // Based on nativeCopyIoRate
public long getNativeCopyIoSampleCount() {
return nativeCopyIoRate.lastStat().numSamples();
} | 3.68 |
flink_PageSizeUtil_getSystemPageSize | /**
* Tries to get the system page size. If the page size cannot be determined, this returns -1.
*
* <p>This internally relies on the presence of "unsafe" and the resolution via some Netty
* utilities.
*/
public static int getSystemPageSize() {
try {
return PageSizeUtilInternal.getSystemPageSize();
... | 3.68 |
hbase_RegionSizeCalculator_getRegionSize | /**
* Returns size of given region in bytes. Returns 0 if region was not found.
*/
public long getRegionSize(byte[] regionId) {
Long size = sizeMap.get(regionId);
if (size == null) {
LOG.debug("Unknown region:" + Arrays.toString(regionId));
return 0;
} else {
return size;
}
} | 3.68 |
hbase_ZKWatcher_interruptedExceptionNoThrow | /**
* Log the InterruptedException and interrupt current thread
* @param ie The IterruptedException to log
* @param throwLater Whether we will throw the exception latter
*/
public void interruptedExceptionNoThrow(InterruptedException ie, boolean throwLater) {
LOG.debug(prefix("Received InterruptedExceptio... | 3.68 |
hudi_HiveSchemaUtils_splitSchemaByPartitionKeys | /**
* Split the field schemas by given partition keys.
*
* @param fieldSchemas The Hive field schemas.
* @param partitionKeys The partition keys.
* @return The pair of (regular columns, partition columns) schema fields
*/
public static Pair<List<FieldSchema>, List<FieldSchema>> splitSchemaByPartitionKeys(
Li... | 3.68 |
flink_WindowSavepointReader_aggregate | /**
* Reads window state generated using an {@link AggregateFunction}.
*
* @param uid The uid of the operator.
* @param aggregateFunction The aggregate function used to create the window.
* @param readerFunction The window reader function.
* @param keyType The key type of the window.
* @param accType The type in... | 3.68 |
hadoop_AbfsConfiguration_getLong | /**
* Returns the account-specific value if it exists, then looks for an
* account-agnostic value, and finally tries the default value.
* @param key Account-agnostic configuration key
* @param defaultValue Value returned if none is configured
* @return value if one exists, else the default value
*/
public long ge... | 3.68 |
hadoop_Time_now | /**
* Current system time. Do not use this to calculate a duration or interval
* to sleep, because it will be broken by settimeofday. Instead, use
* monotonicNow.
* @return current time in msec.
*/
public static long now() {
return System.currentTimeMillis();
} | 3.68 |
morf_UpdateStatementBuilder_where | /**
* Specifies the where criteria
*
* <blockquote><pre>
* update([table])
* .set([fields])
* .where([criteria]);</pre></blockquote>
*
* @param criterion the criteria to filter the results by
* @return this, for method chaining.
*/
public UpdateStatementBuilder where(Criterion criterion) {
if (... | 3.68 |
AreaShop_RegionSign_getProfile | /**
* Get the ConfigurationSection defining the sign layout.
* @return The sign layout config
*/
public ConfigurationSection getProfile() {
return getRegion().getConfigurationSectionSetting("general.signProfile", "signProfiles", getRegion().getConfig().get("general.signs." + key + ".profile"));
} | 3.68 |
flink_SkipListKeySerializer_deserializeKey | /**
* Deserialize the partition key from the byte buffer which stores skip list key.
*
* @param memorySegment the memory segment which stores the skip list key.
* @param offset the start position of the skip list key in the byte buffer.
* @param len length of the skip list key.
*/
K deserializeKey(MemorySegment m... | 3.68 |
flink_NettyShuffleMetricFactory_registerLegacyNetworkMetrics | /**
* Registers legacy network metric groups before shuffle service refactoring.
*
* <p>Registers legacy metric groups if shuffle service implementation is original default one.
*
* @deprecated should be removed in future
*/
@SuppressWarnings("DeprecatedIsStillUsed")
@Deprecated
public static void registerLegacyN... | 3.68 |
framework_Navigator_parseParameterStringToMap | /**
* Parses the given parameter string to a map using the given separator
* string.
*
* @param parameterString
* the parameter string to parse
* @param separator
* the string (typically one character) used to separate values
* from each other
* @return The navigation state as ... | 3.68 |
framework_LayoutDependencyTree_setNeedsHorizontalMeasure | /**
* @param connectorId
* the connector id of the component whose horizontal size might
* have changed
* @param needsMeasure
* {@code true} if measuring should be enabled, {@code false} if
* measuring should be disabled (disabling is only effective if
* the... | 3.68 |
hadoop_MountTableRefresherService_getClientCreator | /**
* Creates RouterClient and caches it.
*/
private CacheLoader<String, RouterClient> getClientCreator() {
return new CacheLoader<String, RouterClient>() {
public RouterClient load(String adminAddress) throws IOException {
InetSocketAddress routerSocket =
NetUtils.createSocketAddr(adminAddress)... | 3.68 |
hudi_JsonEncoder_configure | /**
* Reconfigures this JsonEncoder to output to the JsonGenerator provided.
* <p/>
* If the JsonGenerator provided is null, a NullPointerException is thrown.
* <p/>
* Otherwise, this JsonEncoder will flush its current output and then
* reconfigure its output to use the provided JsonGenerator.
*
* @param genera... | 3.68 |
framework_Table_setSpanColumns | /**
* If set to true, only one string will be rendered, spanning the entire
* row.
*
* @param spanColumns
*/
public void setSpanColumns(boolean spanColumns) {
this.spanColumns = spanColumns;
} | 3.68 |
flink_DeduplicateFunctionHelper_processLastRowOnChangelog | /**
* Processes element to deduplicate on keys, sends current element as last row, retracts
* previous element if needed.
*
* <p>Note: we don't support stateless mode yet. Because this is not safe for Kafka tombstone
* messages which doesn't contain full content. This can be a future improvement if the
* downstre... | 3.68 |
framework_VAbstractCalendarPanel_getResetKey | /**
* Returns the reset key which will reset the calendar to the previous
* selection. By default this is backspace but it can be overridden to
* change the key to whatever you want.
*
* @return the reset key
*/
protected int getResetKey() {
return KeyCodes.KEY_BACKSPACE;
} | 3.68 |
hudi_FailSafeConsistencyGuard_waitForFilesVisibility | /**
* Helper function to wait for all files belonging to single directory to appear.
*
* @param dirPath Dir Path
* @param files Files to appear/disappear
* @param event Appear/Disappear
* @throws TimeoutException
*/
public void waitForFilesVisibility(String dirPath, List<String> files, FileVisibility event) thro... | 3.68 |
flink_StateBackend_useManagedMemory | /** Whether the state backend uses Flink's managed memory. */
default boolean useManagedMemory() {
return false;
} | 3.68 |
hbase_RestoreSnapshotHelper_hasRegionsToAdd | /** Returns true if there're new regions */
public boolean hasRegionsToAdd() {
return this.regionsToAdd != null && this.regionsToAdd.size() > 0;
} | 3.68 |
flink_TableSource_explainSource | /**
* Describes the table source.
*
* @return A String explaining the {@link TableSource}.
*/
default String explainSource() {
return TableConnectorUtils.generateRuntimeName(
getClass(), getTableSchema().getFieldNames());
} | 3.68 |
framework_AbstractTestUI_setTransport | /**
* Sets the push transport according to the transport= URL parameter if such
* is given. Supports transport=xhr (disables push), transport=websocket
* (forces websocket into use), transport=streaming (forces streaming into
* use). Using ?transport=xyz disables the fallback transport.
*
* @param request
* ... | 3.68 |
hudi_DFSHoodieDatasetInputReader_iteratorLimit | /**
* Creates an iterator returning the first {@code limitSize} elements of the given iterator. If the original iterator does not contain that many elements, the returned iterator will have the same
* behavior as the original iterator. The returned iterator supports {@code remove()} if the original iterator does.
*
... | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.