name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hadoop_SignerManager_maybeRegisterSigner | /**
* Make sure the signer class is registered once with the AWS SDK.
* @param signerName signer name
* @param signerClassName classname
* @param conf source configuration
* @throws RuntimeException if the class is not found
*/
private static void maybeRegisterSigner(String signerName,
String signerClassName,... | 3.68 |
shardingsphere-elasticjob_JobItemExecutorFactory_getExecutor | /**
* Get executor.
*
* @param elasticJobClass elastic job class
* @return job item executor
*/
@SuppressWarnings("unchecked")
public static JobItemExecutor getExecutor(final Class<? extends ElasticJob> elasticJobClass) {
for (ClassedJobItemExecutor each : ShardingSphereServiceLoader.getServiceInstances(Class... | 3.68 |
hadoop_TaskPool_revertWith | /**
* Task to revert with after another task failed.
* @param task task to execute
* @return the builder
*/
public Builder<I> revertWith(Task<I, ?> task) {
this.revertTask = task;
return this;
} | 3.68 |
graphhopper_OSMNodeData_addCoordinatesIfMapped | /**
* Stores the given coordinates for the given OSM node ID, but only if a non-empty node type was set for this
* OSM node ID previously.
*
* @return the node type this OSM node was associated with before this method was called
*/
public long addCoordinatesIfMapped(long osmNodeId, double lat, double lon, DoubleSu... | 3.68 |
hbase_BucketCache_cacheBlock | /**
* Cache the block with the specified name and buffer.
* @param cacheKey block's cache key
* @param cachedItem block buffer
* @param inMemory if block is in-memory
*/
@Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory,
boolean waitWhenCache) {
cacheBlockWithW... | 3.68 |
flink_ResettableExternalBuffer_close | /** Delete all files and release the memory. */
@Override
public void close() {
clearChannels();
inMemoryBuffer.close();
pool.close();
} | 3.68 |
morf_AbstractSqlDialectTest_expectedSqlForMathOperations13 | /**
* @return expected SQL for math operation 13
*/
protected String expectedSqlForMathOperations13() {
return "a + b + c / 2";
} | 3.68 |
flink_CompositeTypeSerializerUtil_setNestedSerializersSnapshots | /**
* Overrides the existing nested serializer's snapshots with the provided {@code
* nestedSnapshots}.
*
* @param compositeSnapshot the composite snapshot to overwrite its nested serializers.
* @param nestedSnapshots the nested snapshots to overwrite with.
*/
public static void setNestedSerializersSnapshots(
... | 3.68 |
morf_ConnectionResourcesBean_setSchemaName | /**
* @see org.alfasoftware.morf.jdbc.AbstractConnectionResources#setSchemaName(java.lang.String)
*/
@Override
public void setSchemaName(String schemaName) {
this.schemaName = schemaName;
} | 3.68 |
framework_ColorUtil_getHSLPatternColor | /**
* Parses {@link Color} from matched HSL {@link Matcher}.
*
* @param matcher
* {@link Matcher} matching HSL pattern with named regex groups
* {@code hue}, {@code saturation}, and {@code light}
* @return {@link Color} parsed from {@link Matcher}
*/
public static Color getHSLPatternColor(M... | 3.68 |
framework_LocaleService_createLocaleData | /**
* Creates a LocaleData instance for transportation to the client.
*
* @since 7.1
* @param locale
* The locale for which to create a LocaleData object
* @return A LocaleData object with information about the given locale
*/
protected LocaleData createLocaleData(Locale locale) {
LocaleData local... | 3.68 |
querydsl_StringExpression_substring | /**
* Create a {@code this.substring(beginIndex, endIndex)} expression
*
* @param beginIndex inclusive start index
* @param endIndex exclusive end index
* @return this.substring(beginIndex, endIndex)
* @see java.lang.String#substring(int, int)
*/
public StringExpression substring(Expression<Integer> beginIndex, ... | 3.68 |
hbase_HRegion_attachRegionReplicationInWALAppend | /**
* Attach {@link RegionReplicationSink#add} to the mvcc writeEntry for replicating to region
* replica.
*/
private void attachRegionReplicationInWALAppend(BatchOperation<?> batchOp,
MiniBatchOperationInProgress<Mutation> miniBatchOp, WALKeyImpl walKey, WALEdit walEdit,
WriteEntry writeEntry) {
if (!regionRe... | 3.68 |
pulsar_ManagedLedgerImpl_getPreviousPosition | /**
* Get the entry position that come before the specified position in the message stream, using information from the
* ledger list and each ledger entries count.
*
* @param position
* the current position
* @return the previous position
*/
public PositionImpl getPreviousPosition(PositionImpl positio... | 3.68 |
rocketmq-connect_AbstractConfigManagementService_mergeConnectConfig | /**
* Merge new received configs with the configs in memory.
*
* @param connectName
* @param schemaAndValue
* @return
*/
private boolean mergeConnectConfig(String connectName, SchemaAndValue schemaAndValue) {
Struct value = (Struct) schemaAndValue.value();
Object targetState = value.get(FIELD_STATE);
... | 3.68 |
hadoop_AzureFileSystemInstrumentation_setAccountName | /**
* Sets the account name to tag all the metrics with.
* @param accountName The account name.
*/
public void setAccountName(String accountName) {
registry.tag("accountName",
"Name of the Azure Storage account that these metrics are going against",
accountName);
} | 3.68 |
hbase_StoreFileInfo_isReference | /**
* @param name file name to check.
* @return True if the path has format of a HStoreFile reference.
*/
public static boolean isReference(final String name) {
Matcher m = REF_NAME_PATTERN.matcher(name);
return m.matches() && m.groupCount() > 1;
} | 3.68 |
framework_TooltipDelay_getTicketNumber | /*
* (non-Javadoc)
*
* @see com.vaadin.tests.components.AbstractTestUI#getTicketNumber()
*/
@Override
protected Integer getTicketNumber() {
return 13695;
} | 3.68 |
hbase_MultiByteBuff_getLong | /**
* Returns the long value at the current position. Also advances the position by the size of long
* @return the long value at the current position
*/
@Override
public long getLong() {
checkRefCount();
int remaining = this.curItem.remaining();
if (remaining >= Bytes.SIZEOF_LONG) {
return this.curItem.get... | 3.68 |
flink_DataStream_transform | /**
* Method for passing user defined operators created by the given factory along with the type
* information that will transform the DataStream.
*
* <p>This method uses the rather new operator factories and should only be used when custom
* factories are needed.
*
* @param operatorName name of the operator, fo... | 3.68 |
hudi_HoodieLogFormatWriter_getOutputStream | /**
* Lazily opens the output stream if needed for writing.
* @return OutputStream for writing to current log file.
* @throws IOException
*/
private FSDataOutputStream getOutputStream() throws IOException {
if (this.output == null) {
boolean created = false;
while (!created) {
try {
// Block... | 3.68 |
hbase_CloneSnapshotProcedure_postCloneSnapshot | /**
* Action after cloning from snapshot.
* @param env MasterProcedureEnv
*/
private void postCloneSnapshot(final MasterProcedureEnv env)
throws IOException, InterruptedException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
final RegionInfo[] regions =
... | 3.68 |
hbase_SpaceLimitSettings_validateProtoArguments | /**
* Validates that the provided protobuf SpaceQuota has the necessary information to construct a
* {@link SpaceLimitSettings}.
* @param proto The protobuf message to validate.
*/
static void validateProtoArguments(final QuotaProtos.SpaceQuota proto) {
if (!Objects.requireNonNull(proto).hasSoftLimit()) {
thr... | 3.68 |
querydsl_JTSGeometryExpression_union | /**
* Returns a geometric object that represents the Point set
* union of this geometric object with anotherGeometry.
*
* @param geometry other geometry
* @return union of this and the other geometry
*/
public JTSGeometryExpression<Geometry> union(Expression<? extends Geometry> geometry) {
return JTSGeometryE... | 3.68 |
framework_Dependency_getType | /**
* Gets the type of the dependency.
*
* @return the type of the dependency
*/
public Type getType() {
return type;
} | 3.68 |
hadoop_S3AReadOpContext_getReadInvoker | /**
* Get invoker to use for read operations.
* @return invoker to use for read codepaths
*/
public Invoker getReadInvoker() {
return invoker;
} | 3.68 |
hbase_ColumnSchemaModel___getInMemory | /** Returns true if the IN_MEMORY attribute is present and true */
public boolean __getInMemory() {
Object o = attrs.get(IN_MEMORY);
return o != null
? Boolean.parseBoolean(o.toString())
: ColumnFamilyDescriptorBuilder.DEFAULT_IN_MEMORY;
} | 3.68 |
hbase_OperationStatus_getOperationStatusCode | /**
* */
public OperationStatusCode getOperationStatusCode() {
return code;
} | 3.68 |
hadoop_DataNodeVolumeMetrics_getReadIoSampleCount | // Based on readIoRate
public long getReadIoSampleCount() {
return readIoRate.lastStat().numSamples();
} | 3.68 |
morf_UpgradeHelper_preSchemaUpgrade | /**
* preUpgrade - generates a collection of SQL statements to run before the upgrade.
* @param upgradeSchemas - Holds the source and target schema.
* @param viewChanges - Changes to be made to views.
* @param viewChangesDeploymentHelper - Deployment helper for the view changes.
* @return - Collection of SQL State... | 3.68 |
flink_TransientBlobCache_getBlobExpiryTimes | /**
* Returns the blob expiry times - for testing purposes only!
*
* @return blob expiry times (internal state!)
*/
@VisibleForTesting
ConcurrentMap<Tuple2<JobID, TransientBlobKey>, Long> getBlobExpiryTimes() {
return blobExpiryTimes;
} | 3.68 |
querydsl_AbstractHibernateSQLQuery_setFetchSize | /**
* Set a fetchJoin size for the underlying JDBC query.
* @param fetchSize the fetchJoin size
*/
@SuppressWarnings("unchecked")
public Q setFetchSize(int fetchSize) {
this.fetchSize = fetchSize;
return (Q) this;
} | 3.68 |
hadoop_YarnClientUtils_getRmPrincipal | /**
* Perform the <code>_HOST</code> replacement in the {@code principal},
* Returning the result. Correctly handles HA resource manager configurations.
*
* @param rmPrincipal the principal string to prepare
* @param conf the configuration
* @return the prepared principal string
* @throws IOException thrown if t... | 3.68 |
morf_OracleDialect_rebuildTriggers | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#rebuildTriggers(org.alfasoftware.morf.metadata.Table)
*/
@Override
public Collection<String> rebuildTriggers(Table table) {
return rebuildSequenceAndTrigger(table,getAutoIncrementColumnForTable(table));
} | 3.68 |
dubbo_AdaptiveClassCodeGenerator_generateInvocationArgumentNullCheck | /**
* generate code to test argument of type <code>Invocation</code> is null
*/
private String generateInvocationArgumentNullCheck(Method method) {
Class<?>[] pts = method.getParameterTypes();
return IntStream.range(0, pts.length)
.filter(i -> CLASS_NAME_INVOCATION.equals(pts[i].getName()))
... | 3.68 |
hadoop_NMClientAsync_onRollbackLastReInitializationError | /**
* Error Callback for rollback of last re-initialization.
*
* @param containerId the Id of the container to restart.
* @param t a Throwable.
*/
public void onRollbackLastReInitializationError(ContainerId containerId,
Throwable t) {} | 3.68 |
flink_LimitedConnectionsFileSystem_getStreamInactivityTimeout | /**
* Gets the milliseconds that a stream may spend not writing any bytes before it is closed as
* inactive.
*/
public long getStreamInactivityTimeout() {
return streamInactivityTimeoutNanos / 1_000_000;
} | 3.68 |
hbase_HRegion_getOpenSeqNum | /** Returns the latest sequence number that was read from storage when this region was opened */
public long getOpenSeqNum() {
return this.openSeqNum;
} | 3.68 |
hbase_HBaseServerBase_updateConfiguration | /**
* Reload the configuration from disk.
*/
public void updateConfiguration() {
LOG.info("Reloading the configuration from disk.");
// Reload the configuration from disk.
conf.reloadConfiguration();
configurationManager.notifyAllObservers(conf);
} | 3.68 |
flink_ExecNodeUtil_setManagedMemoryWeight | /**
* Sets {Transformation#declareManagedMemoryUseCaseAtOperatorScope(ManagedMemoryUseCase, int)}
* using the given bytes for {@link ManagedMemoryUseCase#OPERATOR}.
*/
public static <T> void setManagedMemoryWeight(
Transformation<T> transformation, long memoryBytes) {
if (memoryBytes > 0) {
final... | 3.68 |
framework_DragAndDropHandler_onDragStartOnDraggableElement | /**
* This method can be called to trigger drag and drop on any grid element
* that can be dragged and dropped.
*
* @param dragStartingEvent
* the drag triggering event, usually a {@link Event#ONMOUSEDOWN}
* or {@link Event#ONTOUCHSTART} event on the draggable element
*
* @param callback
... | 3.68 |
querydsl_ComparableExpression_max | /**
* Create a {@code max(this)} expression
*
* <p>Get the maximum value of this expression (aggregation)</p>
*
* @return max(this)
*/
@Override
public ComparableExpression<T> max() {
return Expressions.comparableOperation(getType(), Ops.AggOps.MAX_AGG, mixin);
} | 3.68 |
hudi_HoodieFlinkWriteClient_startAsyncCleaning | /**
* Starts async cleaning service for finished commits.
*
* <p>The Flink write client is designed to write data set as buckets
* but cleaning action should trigger after all the write actions within a
* checkpoint finish.
*/
public void startAsyncCleaning() {
tableServiceClient.startAsyncCleanerService(this);... | 3.68 |
morf_OracleMetaDataProvider_expensiveReadTableNames | /**
* A table name reading method which is more efficient than the Oracle driver meta-data version.
*
* @see <a href="http://download.oracle.com/docs/cd/B19306_01/server.102/b14237/statviews_2094.htm">ALL_TAB_COLUMNS specification</a>
*/
private void expensiveReadTableNames() {
log.info("Starting read of table de... | 3.68 |
hbase_HRegionFileSystem_loadRegionInfoFileContent | /**
* Create a {@link RegionInfo} from the serialized version on-disk.
* @param fs {@link FileSystem} that contains the Region Info file
* @param regionDir {@link Path} to the Region Directory that contains the Info file
* @return An {@link RegionInfo} instance gotten from the Region Info file.
* @throws IO... | 3.68 |
morf_WithMetaDataAdapter_close | /**
* {@inheritDoc}
*
* @see org.alfasoftware.morf.dataset.DataSetProducer#close()
*/
@Override
public void close() {
super.close();
schemaProducer.close();
} | 3.68 |
hbase_SnapshotInfo_getSnapshotFilesMap | /**
* Gets the store files map for snapshot
* @param conf the {@link Configuration} to use
* @param snapshot {@link SnapshotDescription} to get stats from
* @param exec the {@link ExecutorService} to use
* @param filesMap {@link Map} the map to pu... | 3.68 |
pulsar_TimeAverageMessageData_update | /**
* Update using a new bundle sample.
*
* @param newSample
* Most recently observed bundle stats.
*/
public void update(final NamespaceBundleStats newSample) {
update(newSample.msgThroughputIn, newSample.msgThroughputOut, newSample.msgRateIn, newSample.msgRateOut);
} | 3.68 |
graphhopper_LandmarkStorage_createLandmarksForSubnetwork | /**
* This method creates landmarks for the specified subnetwork (integer list)
*
* @return landmark mapping
*/
private boolean createLandmarksForSubnetwork(final int startNode, final byte[] subnetworks, EdgeFilter accessFilter) {
final int subnetworkId = landmarkIDs.size();
int[] tmpLandmarkNodeIds = new i... | 3.68 |
flink_PojoSerializerSnapshotData_createFrom | /**
* Creates a {@link PojoSerializerSnapshotData} from existing snapshotted configuration of a
* {@link PojoSerializer}.
*/
static <T> PojoSerializerSnapshotData<T> createFrom(
Class<T> pojoClass,
Field[] fields,
TypeSerializerSnapshot<?>[] existingFieldSerializerSnapshots,
LinkedHas... | 3.68 |
hbase_HashTable_getCurrentKey | /**
* Get the current key
* @return the current key or null if there is no current key
*/
public ImmutableBytesWritable getCurrentKey() {
return key;
} | 3.68 |
hbase_ZKWatcher_registerListenerFirst | /**
* Register the specified listener to receive ZooKeeper events and add it as the first in the list
* of current listeners.
* @param listener the listener to register
*/
public void registerListenerFirst(ZKListener listener) {
listeners.add(0, listener);
} | 3.68 |
flink_StreamOperatorWrapper_endOperatorInput | /**
* Ends an input of the operator contained by this wrapper.
*
* @param inputId the input ID starts from 1 which indicates the first input.
*/
public void endOperatorInput(int inputId) throws Exception {
if (wrapped instanceof BoundedOneInput) {
((BoundedOneInput) wrapped).endInput();
} else if (w... | 3.68 |
zxing_MathUtils_sum | /**
* @param array values to sum
* @return sum of values in array
*/
public static int sum(int[] array) {
int count = 0;
for (int a : array) {
count += a;
}
return count;
} | 3.68 |
flink_GenericArrayData_isPrimitiveArray | /**
* Returns true if this is a primitive array.
*
* <p>A primitive array is an array whose elements are of primitive type.
*/
public boolean isPrimitiveArray() {
return isPrimitiveArray;
} | 3.68 |
morf_ResolvedTables_addModifiedTable | /**
* Store information about modification of given table.
*
* @param tableName modified table
*/
public void addModifiedTable(String tableName) {
modifiedTables.add(tableName.toUpperCase());
readTables.remove(tableName.toUpperCase());
} | 3.68 |
hadoop_AllocateResponse_nmTokens | /**
* Set the <code>nmTokens</code> of the response.
* @see AllocateResponse#setNMTokens(List)
* @param nmTokens <code>nmTokens</code> of the response
* @return {@link AllocateResponseBuilder}
*/
@Private
@Unstable
public AllocateResponseBuilder nmTokens(List<NMToken> nmTokens) {
allocateResponse.setNMTokens(nmT... | 3.68 |
framework_VDateField_getDate | /**
* Returns a copy of the current date. Modifying the returned date will not
* modify the value of this VDateField. Use {@link #setDate(Date)} to change
* the current date.
* <p>
* For internal use only. May be removed or replaced in the future.
*
* @return A copy of the current date
*/
public Date getDate() ... | 3.68 |
framework_VFilterSelect_setSelectedItemIcon | /**
* Sets the icon URI of the selected item. The icon is shown on the left
* side of the item caption text. Set the URI to null to remove the icon.
*
* @param iconUri
* The URI of the icon
*/
public void setSelectedItemIcon(String iconUri) {
if (iconUri == null || iconUri.isEmpty()) {
if ... | 3.68 |
hadoop_RollingFileSystemSink_rollLogDirIfNeeded | /**
* Check the current directory against the time stamp. If they're not
* the same, create a new directory and a new log file in that directory.
*
* @throws MetricsException thrown if an error occurs while creating the
* new directory or new log file
*/
private void rollLogDirIfNeeded() throws MetricsException ... | 3.68 |
hadoop_MkdirOperation_probePathStatusOrNull | /**
* Get the status of a path, downgrading FNFE to null result.
* @param path path to probe.
* @param probes probes to exec
* @return the status or null
* @throws IOException failure other than FileNotFound
*/
private S3AFileStatus probePathStatusOrNull(final Path path,
final Set<StatusProbeEnum> probes) thr... | 3.68 |
flink_MemorySegment_getHeapMemory | /**
* Get the heap byte array object.
*
* @return Return non-null if the memory is on the heap, and return null if the memory if off
* the heap.
*/
public byte[] getHeapMemory() {
return heapMemory;
} | 3.68 |
hadoop_ExecutionSummarizer_getTraceSignature | // Generates a signature for the trace file based on
// - filename
// - modification time
// - file length
// - owner
protected static String getTraceSignature(String input) throws IOException {
Path inputPath = new Path(input);
FileSystem fs = inputPath.getFileSystem(new Configuration());
FileStatus stat... | 3.68 |
hadoop_OBSInputStream_remainingInFile | /**
* Bytes left in stream.
*
* @return how many bytes are left to read
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public synchronized long remainingInFile() {
return this.contentLength - this.streamCurrentPos;
} | 3.68 |
morf_AbstractSqlDialectTest_stringLiteralPrefix | /**
* On some databases our string literals need prefixing with N to be
* correctly typed as a unicode string.
*
* @return prefix to insert before quoted string literal.
*/
protected String stringLiteralPrefix() {
return "";
} | 3.68 |
framework_Range_endsBefore | /**
* Checks whether this range ends before the start of another range.
*
* @param other
* the other range to compare against
* @return <code>true</code> if this range ends before the
* <code>other</code>
*/
public boolean endsBefore(final Range other) {
return getEnd() <= other.getStart()... | 3.68 |
hudi_BaseHoodieWriteClient_completeCompaction | /**
* Commit Compaction and track metrics.
*/
protected void completeCompaction(HoodieCommitMetadata metadata, HoodieTable table, String compactionCommitTime) {
tableServiceClient.completeCompaction(metadata, table, compactionCommitTime);
} | 3.68 |
flink_Tuple25_of | /**
* Creates a new tuple and assigns the given values to the tuple's fields. This is more
* convenient than using the constructor, because the compiler can infer the generic type
* arguments implicitly. For example: {@code Tuple3.of(n, x, s)} instead of {@code new
* Tuple3<Integer, Double, String>(n, x, s)}
*/
pu... | 3.68 |
streampipes_StreamPipesClient_pipelineElementTemplates | /**
* Get API to work with pipline element templates
*
* @return {@link org.apache.streampipes.client.api.PipelineElementTemplateApi}
*/
@Override
public IPipelineElementTemplateApi pipelineElementTemplates() {
return new PipelineElementTemplateApi(config);
} | 3.68 |
hmily_ConsulClient_pull | /**
* pull.
* @param consulConfig consul config
* @return InputStream
*/
public InputStream pull(final ConsulConfig consulConfig) {
if (consul == null) {
if (StringUtils.isNoneBlank(consulConfig.getHostAndPorts())) {
consul = Consul.builder().withMultipleHostAndPort(buildHostAndPortList(cons... | 3.68 |
shardingsphere-elasticjob_JobNodePath_getConfigNodePath | /**
* Get configuration node path.
*
* @return configuration node path
*/
public String getConfigNodePath() {
return String.format("/%s/%s", jobName, CONFIG_NODE);
} | 3.68 |
flink_BufferManager_recycle | /**
* Exclusive buffer is recycled to this channel manager directly and it may trigger return extra
* floating buffer based on <tt>numRequiredBuffers</tt>.
*
* @param segment The exclusive segment of this channel.
*/
@Override
public void recycle(MemorySegment segment) {
@Nullable Buffer releasedFloatingBuffer... | 3.68 |
framework_Tree_areChildrenAllowed | /**
* Tests if the Item with given ID can have any children.
*
* @see Container.Hierarchical#areChildrenAllowed(Object)
*/
@Override
public boolean areChildrenAllowed(Object itemId) {
return ((Container.Hierarchical) items).areChildrenAllowed(itemId);
} | 3.68 |
framework_ContainerEventProvider_getContainerDataSource | /**
* Returns the container used as data source.
*
*/
public Container.Indexed getContainerDataSource() {
return container;
} | 3.68 |
hadoop_SharedKeyCredentials_initializeMac | /**
* Initialize the HmacSha256 associated with the account key.
*/
private void initializeMac() {
// Initializes the HMAC-SHA256 Mac and SecretKey.
try {
hmacSha256 = Mac.getInstance(HMAC_SHA256);
hmacSha256.init(new SecretKeySpec(accountKey, HMAC_SHA256));
} catch (final Exception e) {
throw new I... | 3.68 |
hmily_OriginTrackedPropertiesLoader_isPropertyDelimiter | /**
* Is property delimiter boolean.
*
* @return the boolean
*/
public boolean isPropertyDelimiter() {
return !this.escaped && (this.character == '=' || this.character == ':');
} | 3.68 |
flink_TypeSerializerSnapshotSerializationUtil_deserializeV2 | /** Deserialization path for Flink versions 1.7+. */
@VisibleForTesting
static <T> TypeSerializerSnapshot<T> deserializeV2(DataInputView in, ClassLoader cl)
throws IOException {
return TypeSerializerSnapshot.readVersionedSnapshot(in, cl);
} | 3.68 |
hadoop_ActiveUsersManager_getNumActiveUsers | /**
* Get number of active users i.e. users with applications which have pending
* resource requests.
* @return number of active users
*/
@Lock({Queue.class, SchedulerApplicationAttempt.class})
@Override
synchronized public int getNumActiveUsers() {
return activeUsers;
} | 3.68 |
dubbo_JValidator_generateMethodParameterClass | /**
* try to generate methodParameterClass.
*
* @param clazz interface class
* @param method invoke method
* @param parameterClassName generated parameterClassName
* @return Class<?> generated methodParameterClass
*/
private static Class<?> generateMethodParameterClass(Class<?> clazz, Me... | 3.68 |
hbase_HFileReaderImpl_blockSeek | /**
* Within a loaded block, seek looking for the last key that is smaller than (or equal to?) the
* key we are interested in. A note on the seekBefore: if you have seekBefore = true, AND the
* first key in the block = key, then you'll get thrown exceptions. The caller has to check for
* that case and load the prev... | 3.68 |
zxing_QRCodeEncoder_encodeContentsFromZXingIntent | // It would be nice if the string encoding lived in the core ZXing library,
// but we use platform specific code like PhoneNumberUtils, so it can't.
private void encodeContentsFromZXingIntent(Intent intent) {
// Default to QR_CODE if no format given.
String formatString = intent.getStringExtra(Intents.Encode.FORMA... | 3.68 |
hadoop_AbfsHttpOperation_sendRequest | /**
* Sends the HTTP request. Note that HttpUrlConnection requires that an
* empty buffer be sent in order to set the "Content-Length: 0" header, which
* is required by our endpoint.
*
* @param buffer the request entity body.
* @param offset an offset into the buffer where the data beings.
* @p... | 3.68 |
hadoop_StartupProgress_endStep | /**
* Ends execution of the specified step within the specified phase. This is
* a no-op if the phase is already completed.
*
* @param phase Phase within which the step should be ended
* @param step Step to end
*/
public void endStep(Phase phase, Step step) {
if (!isComplete(phase)) {
lazyInitStep(phase, st... | 3.68 |
hbase_HFileWriterImpl_appendMetaBlock | /**
* Add a meta block to the end of the file. Call before close(). Metadata blocks are expensive.
* Fill one with a bunch of serialized data rather than do a metadata block per metadata instance.
* If metadata is small, consider adding to file info using
* {@link #appendFileInfo(byte[], byte[])} name of the block ... | 3.68 |
flink_AbstractStreamOperator_getRuntimeContext | /**
* Returns a context that allows the operator to query information about the execution and also
* to interact with systems such as broadcast variables and managed state. This also allows to
* register timers.
*/
@VisibleForTesting
public StreamingRuntimeContext getRuntimeContext() {
return runtimeContext;
} | 3.68 |
hbase_QuotaTableUtil_quotasFromData | /*
* ========================================================================= Quotas protobuf
* helpers
*/
protected static Quotas quotasFromData(final byte[] data) throws IOException {
return quotasFromData(data, 0, data.length);
} | 3.68 |
morf_SelectStatementBuilder_except | /**
* Perform an EXCEPT set operation with another {@code selectStatement}, retaining
* all rows which exist in top select statement only.
*
* @param selectStatement the other select statement that contains entries that
* will not be present in the final result set.
* @return this, for me... | 3.68 |
AreaShop_TeleportFeature_hasTeleportLocation | /**
* Check if the region has a teleportLocation specified.
* @return true if the region has a teleportlocation, false otherwise
*/
public boolean hasTeleportLocation() {
return getRegion().getConfigurationSectionSetting("general.teleportLocation") != null;
} | 3.68 |
hadoop_SWebHdfs_createSWebHdfsFileSystem | /**
* Returns a new {@link SWebHdfsFileSystem}, with the given configuration.
*
* @param conf configuration
* @return new SWebHdfsFileSystem
*/
private static SWebHdfsFileSystem createSWebHdfsFileSystem(
Configuration conf) {
SWebHdfsFileSystem fs = new SWebHdfsFileSystem();
fs.setConf(conf);
return fs;
... | 3.68 |
flink_BlobServerConnection_run | /** Main connection work method. Accepts requests until the other side closes the connection. */
@Override
public void run() {
try {
final InputStream inputStream = this.clientSocket.getInputStream();
final OutputStream outputStream = this.clientSocket.getOutputStream();
while (true) {
... | 3.68 |
zxing_AlignmentPatternFinder_centerFromEnd | /**
* Given a count of black/white/black pixels just seen and an end position,
* figures the location of the center of this black/white/black run.
*/
private static float centerFromEnd(int[] stateCount, int end) {
return (end - stateCount[2]) - stateCount[1] / 2.0f;
} | 3.68 |
querydsl_NumberExpression_mod | /**
* Create a {@code mod(this, num)} expression
*
* @param num
* @return mod(this, num)
*/
public NumberExpression<T> mod(T num) {
return Expressions.numberOperation(getType(), Ops.MOD, mixin, ConstantImpl.create(num));
} | 3.68 |
flink_TableChange_modifyColumnPosition | /**
* A table change to modify the column position.
*
* <p>It is equal to the following statement:
*
* <pre>
* ALTER TABLE <table_name> MODIFY <column_name> <original_column_type> <column_position>
* </pre>
*
* @param oldColumn the definition of the old column.
* @param columnPositi... | 3.68 |
morf_DeleteStatement_shallowCopy | /**
* Performs a shallow copy to a builder, allowing a duplicate
* to be created and modified.
*
* @return A builder, initialised as a duplicate of this statement.
*/
@Override
public DeleteStatementBuilder shallowCopy() {
return new DeleteStatementBuilder(this);
} | 3.68 |
framework_AbstractComponentTest_constructComponent | /**
* Construct the component that is to be tested. This method uses a no-arg
* constructor by default. Override to customize.
*
* @return Instance of the component that is to be tested.
* @throws IllegalAccessException
* @throws InstantiationException
*/
protected T constructComponent() {
try {
retu... | 3.68 |
hbase_CompositeImmutableSegment_close | /**
* Closing a segment before it is being discarded
*/
@Override
public void close() {
for (ImmutableSegment s : segments) {
s.close();
}
} | 3.68 |
flink_HiveParserDefaultGraphWalker_startWalking | // starting point for walking.
public void startWalking(Collection<Node> startNodes, HashMap<Node, Object> nodeOutput)
throws SemanticException {
toWalk.addAll(startNodes);
while (toWalk.size() > 0) {
Node nd = toWalk.remove(0);
walk(nd);
// Some walkers extending DefaultGraphWal... | 3.68 |
flink_AdaptiveScheduler_computeVertexParallelismStoreForExecution | /**
* Creates the parallelism store that should be used to build the {@link ExecutionGraph}, which
* will respect the vertex parallelism of the passed {@link JobGraph} in all execution modes.
*
* @param jobGraph The job graph for execution.
* @param executionMode The mode of scheduler execution.
* @param defaultM... | 3.68 |
flink_JobManagerSharedServices_shutdown | /**
* Shutdown the {@link JobMaster} services.
*
* <p>This method makes sure all services are closed or shut down, even when an exception
* occurred in the shutdown of one component. The first encountered exception is thrown, with
* successive exceptions added as suppressed exceptions.
*
* @throws Exception The ... | 3.68 |
flink_TypeSerializerSchemaCompatibility_isCompatibleAsIs | /**
* Returns whether or not the type of the compatibility is {@link Type#COMPATIBLE_AS_IS}.
*
* @return whether or not the type of the compatibility is {@link Type#COMPATIBLE_AS_IS}.
*/
public boolean isCompatibleAsIs() {
return resultType == Type.COMPATIBLE_AS_IS;
} | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.