name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
framework_ApplicationConnection_flushActiveConnector | /**
* Calls {@link ComponentConnector#flush()} on the active connector. Does
* nothing if there is no active (focused) connector.
*/
public void flushActiveConnector() {
ComponentConnector activeConnector = getActiveConnector();
if (activeConnector == null) {
return;
}
activeConnector.flush()... | 3.68 |
hadoop_DirectBufferPool_countBuffersOfSize | /**
* Return the number of available buffers of a given size.
* This is used only for tests.
*/
@VisibleForTesting
int countBuffersOfSize(int size) {
Queue<WeakReference<ByteBuffer>> list = buffersBySize.get(size);
if (list == null) {
return 0;
}
return list.size();
} | 3.68 |
flink_SqlJsonUtils_createArrayNode | /** Returns a new {@link ArrayNode}. */
public static ArrayNode createArrayNode() {
return MAPPER.createArrayNode();
} | 3.68 |
flink_PythonFunction_takesRowAsInput | /** Returns Whether the Python function takes row as input instead of each columns of a row. */
default boolean takesRowAsInput() {
return false;
} | 3.68 |
hbase_RecoverableZooKeeper_setAcl | /**
* setAcl is an idempotent operation. Retry before throwing exception
* @return list of ACLs
*/
public Stat setAcl(String path, List<ACL> acls, int version)
throws KeeperException, InterruptedException {
final Span span = TraceUtil.createSpan("RecoverableZookeeper.setAcl");
try (Scope ignored = span.makeCur... | 3.68 |
framework_CheckBoxGroupElement_getOptionElements | /**
* Gets the list of option elements for this check box group.
*
* @return list of option elements
*/
public List<WebElement> getOptionElements() {
return findElements(bySelectOption);
} | 3.68 |
framework_WindowConnector_setWindowOrderAndPosition | /**
* Gives the WindowConnector an order number. As a side effect, moves the
* window according to its order number so the windows are stacked. This
* method should be called for each window in the order they should appear.
*/
public void setWindowOrderAndPosition() {
getWidget().setWindowOrderAndPosition();
} | 3.68 |
flink_StreamGraphHasherV2_traverseStreamGraphAndGenerateHashes | /**
* Returns a map with a hash for each {@link StreamNode} of the {@link StreamGraph}. The hash is
* used as the {@link JobVertexID} in order to identify nodes across job submissions if they
* didn't change.
*
* <p>The complete {@link StreamGraph} is traversed. The hash is either computed from the
* transformati... | 3.68 |
graphhopper_BBox_createInverse | /**
* Prefills BBox with minimum values so that it can increase.
*/
public static BBox createInverse(boolean elevation) {
if (elevation) {
return new BBox(Double.MAX_VALUE, -Double.MAX_VALUE, Double.MAX_VALUE, -Double.MAX_VALUE,
Double.MAX_VALUE, -Double.MAX_VALUE, true);
} else {
... | 3.68 |
morf_CaseStatement_drive | /**
* @see org.alfasoftware.morf.util.ObjectTreeTraverser.Driver#drive(ObjectTreeTraverser)
*/
@Override
public void drive(ObjectTreeTraverser traverser) {
traverser
.dispatch(getWhenConditions())
.dispatch(getDefaultValue());
} | 3.68 |
hbase_RegionPlan_setDestination | /**
* Set the destination server for the plan for this region.
*/
public void setDestination(ServerName dest) {
this.dest = dest;
} | 3.68 |
querydsl_DateExpression_currentDate | /**
* Create an expression representing the current date as a {@code DateExpression} instance
*
* @param cl type of expression
* @return current date
*/
public static <T extends Comparable> DateExpression<T> currentDate(Class<T> cl) {
return Expressions.dateOperation(cl, Ops.DateTimeOps.CURRENT_DATE);
} | 3.68 |
graphhopper_SubnetworkStorage_setSubnetwork | /**
* This method sets the subnetwork if of the specified nodeId. Default is 0 and means subnetwork
* was too small to be useful to be stored.
*/
public void setSubnetwork(int nodeId, int subnetwork) {
if (subnetwork > 127)
throw new IllegalArgumentException("Number of subnetworks is currently limited to... | 3.68 |
hadoop_GangliaMetricVisitor_getType | /**
* @return the type of a visited metric
*/
String getType() {
return type;
} | 3.68 |
framework_CssLayoutConnector_makeCamelCase | /**
* Converts a css property string to CamelCase
*
* @param cssProperty
* The property string
* @return A string converted to camelcase
*/
private static final String makeCamelCase(String cssProperty) {
cssProperty = SharedUtil.dashSeparatedToCamelCase(cssProperty);
if ("float".equals(cssPrope... | 3.68 |
hibernate-validator_TraversableResolvers_wrapWithCachingForSingleValidation | /**
* Potentially wrap the {@link TraversableResolver} into a caching one.
* <p>
* If {@code traversableResolver} is {@code TraverseAllTraversableResolver.INSTANCE}, we don't wrap it and it is
* returned directly. Same if the caching is explicitly disabled.
* <p>
* If {@code traversableResolver} is an instance of... | 3.68 |
hadoop_TypedBytesInput_readLong | /**
* Reads the long following a <code>Type.LONG</code> code.
* @return the obtained long
* @throws IOException
*/
public long readLong() throws IOException {
return in.readLong();
} | 3.68 |
hbase_HRegionServer_buildRegionSpaceUseReportRequest | /**
* Builds a {@link RegionSpaceUseReportRequest} protobuf message from the region size map.
* @param regionSizes The size in bytes of regions
* @return The corresponding protocol buffer message.
*/
RegionSpaceUseReportRequest buildRegionSpaceUseReportRequest(RegionSizeStore regionSizes) {
RegionSpaceUseReportRe... | 3.68 |
pulsar_BaseContext_getStateStore | /**
* Get the state store with the provided store name.
*
* @param tenant the state tenant name
* @param ns the state namespace name
* @param name the state store name
* @param <X> the type of interface of the store to return
* @return the state store instance.
*
* @throws ClassCastException if the return type... | 3.68 |
hadoop_AbstractS3ACommitter_loadAndAbort | /**
* Load a pendingset file and abort all of its contents.
* Invoked within a parallel run; the commitContext thread
* pool is already busy/possibly full, so do not
* execute work through the same submitter.
* @param commitContext context to commit through
* @param activeCommit commit state
* @param status stat... | 3.68 |
hbase_HFileWriterImpl_checkBlockBoundary | /**
* At a block boundary, write all the inline blocks and opens new block.
*/
protected void checkBlockBoundary() throws IOException {
boolean shouldFinishBlock = false;
// This means hbase.writer.unified.encoded.blocksize.ratio was set to something different from 0
// and we should use the encoding ratio
if... | 3.68 |
flink_ViewUpdater_notifyOfAddedView | /**
* Notifies this ViewUpdater of a new metric that should be regularly updated.
*
* @param view metric that should be regularly updated
*/
public void notifyOfAddedView(View view) {
synchronized (lock) {
toAdd.add(view);
}
} | 3.68 |
flink_TableFactoryService_filterByFactoryClass | /** Filters factories with matching context by factory class. */
@SuppressWarnings("unchecked")
private static <T> List<T> filterByFactoryClass(
Class<T> factoryClass,
Map<String, String> properties,
List<TableFactory> foundFactories) {
List<TableFactory> classFactories =
foundF... | 3.68 |
flink_ExecutionConfig_disableForceKryo | /** Disable use of Kryo serializer for all POJOs. */
public void disableForceKryo() {
setForceKryo(false);
} | 3.68 |
zxing_CameraManager_stopPreview | /**
* Tells the camera to stop drawing preview frames.
*/
public synchronized void stopPreview() {
if (autoFocusManager != null) {
autoFocusManager.stop();
autoFocusManager = null;
}
if (camera != null && previewing) {
camera.getCamera().stopPreview();
previewCallback.setHandler(null, 0);
pr... | 3.68 |
zxing_ProductResultParser_parse | // Treat all UPC and EAN variants as UPCs, in the sense that they are all product barcodes.
@Override
public ProductParsedResult parse(Result result) {
BarcodeFormat format = result.getBarcodeFormat();
if (!(format == BarcodeFormat.UPC_A || format == BarcodeFormat.UPC_E ||
format == BarcodeFormat.EAN_8 || f... | 3.68 |
framework_Flash_removeParameter | /**
* Removes an object parameter from the list.
*
* @param name
* the name of the parameter to remove.
*/
public void removeParameter(String name) {
if (getState().embedParams == null) {
return;
}
getState().embedParams.remove(name);
requestRepaint();
} | 3.68 |
hbase_ActiveMasterManager_handleMasterNodeChange | /**
* Handle a change in the master node. Doesn't matter whether this was called from a nodeCreated
* or nodeDeleted event because there are no guarantees that the current state of the master node
* matches the event at the time of our next ZK request.
* <p>
* Uses the watchAndCheckExists method which watches the ... | 3.68 |
flink_TieredStorageMemoryManagerImpl_recycleBuffer | /** Note that this method may be called by the netty thread. */
private void recycleBuffer(Object owner, MemorySegment buffer) {
bufferPool.recycle(buffer);
decNumRequestedBuffer(owner);
} | 3.68 |
open-banking-gateway_PaymentAccessFactory_paymentForPsuAndAspsp | /**
* Create {@code PaymentAccess} object that is similar to consent facing to PSU/Fintech user and ASPSP pair.
* @param psu Payee/authorizer of this payment
* @param aspsp ASPSP/Bank that is going to perform the payment
* @param session Session that identifies the payment.
* @return Payment context to authorize
... | 3.68 |
morf_Function_countDistinct | /**
* Helper method to create an instance of the "count(distinct)" SQL function.
*
* @param field the field to evaluate in the count function.
*
* @return an instance of a count function
*/
public static Function countDistinct(AliasedField field) {
return new Function(FunctionType.COUNT_DISTINCT, field);
} | 3.68 |
framework_InMemoryDataProviderHelpers_filteringByCaseInsensitiveString | /**
* Wraps a given data provider so that its filter tests the given predicate
* with the lower case string provided by the given value provider.
*
* @param dataProvider
* the data provider to wrap
* @param valueProvider
* the value provider for providing string values to filter
* @param p... | 3.68 |
framework_FocusableComplexPanel_addKeyDownHandler | /*
* (non-Javadoc)
*
* @see
* com.google.gwt.event.dom.client.HasKeyDownHandlers#addKeyDownHandler(
* com.google.gwt.event.dom.client.KeyDownHandler)
*/
@Override
public HandlerRegistration addKeyDownHandler(KeyDownHandler handler) {
return addDomHandler(handler, KeyDownEvent.getType());
} | 3.68 |
hbase_RegionCoprocessorHost_preMemStoreCompactionCompactScannerOpen | /**
* Invoked before create StoreScanner for in memory compaction.
*/
public ScanInfo preMemStoreCompactionCompactScannerOpen(HStore store) throws IOException {
CustomizedScanInfoBuilder builder = new CustomizedScanInfoBuilder(store.getScanInfo());
execOperation(coprocEnvironments.isEmpty() ? null : new RegionObs... | 3.68 |
framework_AbstractSelect_isNewItemsAllowed | /**
* Does the select allow adding new options by the user. If true, the new
* options can be added to the Container. The text entered by the user is
* used as id. Note that data-source must allow adding new items.
*
* @return True if additions are allowed.
*/
public boolean isNewItemsAllowed() {
return allow... | 3.68 |
zxing_OpenCameraInterface_open | /**
* Opens the requested camera with {@link Camera#open(int)}, if one exists.
*
* @param cameraId camera ID of the camera to use. A negative value
* or {@link #NO_REQUESTED_CAMERA} means "no preference", in which case a rear-facing
* camera is returned if possible or else any camera
* @return handle to {@link ... | 3.68 |
cron-utils_CronDefinitionBuilder_quartz | /**
* Creates CronDefinition instance matching Quartz specification.
*
* <p>The cron expression is expected to be a string comprised of 6 or 7
* fields separated by white space. Fields can contain any of the allowed
* values, along with various combinations of the allowed special characters
* for that field. The ... | 3.68 |
pulsar_PulsarClientException_getPreviousExceptions | /**
* Get the collection of previous exceptions which have caused retries
* for this operation.
*
* @return a collection of exception, ordered as they occurred
*/
public Collection<Throwable> getPreviousExceptions() {
return this.previous;
} | 3.68 |
hbase_MetricSampleQuantiles_compress | /**
* Try to remove extraneous items from the set of sampled items. This checks if an item is
* unnecessary based on the desired error bounds, and merges it with the adjacent item if it is.
*/
private void compress() {
if (samples.size() < 2) {
return;
}
ListIterator<SampleItem> it = samples.listIterator(... | 3.68 |
hbase_StoreFileTrackerValidationUtils_checkForCreateTable | /**
* Pre check when creating a new table.
* <p/>
* For now, only make sure that we do not use {@link Trackers#MIGRATION} for newly created tables.
* @throws IOException when there are check errors, the upper layer should fail the
* {@code CreateTableProcedure}.
*/
public static void checkForC... | 3.68 |
flink_AsyncWaitOperator_timerTriggered | /** Rewrite the timeout process to deal with retry state. */
private void timerTriggered() throws Exception {
if (!resultHandler.completed.get()) {
// cancel delayed retry timer first
cancelRetryTimer();
// force reset retryAwaiting to prevent the handler to trigger retry unnecessarily
... | 3.68 |
hadoop_CacheStats_roundUpPageSize | /**
* Round up to the OS page size.
*/
long roundUpPageSize(long count) {
return usedBytesCount.rounder.roundUp(count);
} | 3.68 |
hbase_ClusterStatusPublisher_getDeadServers | /**
* Get the servers which died since a given timestamp. protected because it can be subclassed by
* the tests.
*/
protected List<Pair<ServerName, Long>> getDeadServers(long since) {
if (master.getServerManager() == null) {
return Collections.emptyList();
}
return master.getServerManager().getDeadServers... | 3.68 |
flink_NetUtils_getAvailablePort | /**
* Find a non-occupied port.
*
* @return A non-occupied port.
*/
public static Port getAvailablePort() {
for (int i = 0; i < 50; i++) {
try (ServerSocket serverSocket = new ServerSocket(0)) {
int port = serverSocket.getLocalPort();
if (port != 0) {
FileLock fil... | 3.68 |
hadoop_EmptyIOStatisticsContextImpl_snapshot | /**
* Create a new empty snapshot.
* A new one is always created for isolation.
*
* @return a statistics snapshot
*/
@Override
public IOStatisticsSnapshot snapshot() {
return new IOStatisticsSnapshot();
} | 3.68 |
flink_SortMergeResultPartitionReadScheduler_release | /**
* Releases this read scheduler and returns a {@link CompletableFuture} which will be completed
* when all resources are released.
*/
CompletableFuture<?> release() {
List<SortMergeSubpartitionReader> pendingReaders;
synchronized (lock) {
if (isReleased) {
return releaseFuture;
... | 3.68 |
rocketmq-connect_Base64Util_base64Decode | /**
* decode
*
* @param in
* @return
*/
public static byte[] base64Decode(String in) {
if (StringUtils.isEmpty(in)) {
return null;
}
return Base64.getDecoder().decode(in);
} | 3.68 |
hadoop_TimelineEntity_getStartTime | /**
* Get the start time of the entity
*
* @return the start time of the entity
*/
@XmlElement(name = "starttime")
public Long getStartTime() {
return startTime;
} | 3.68 |
hadoop_TimelineEntity_setRelatedEntities | /**
* Set the related entity map to the given map of related entities
*
* @param relatedEntities
* a map of related entities
*/
public void setRelatedEntities(
Map<String, Set<String>> relatedEntities) {
this.relatedEntities = TimelineServiceHelper.mapCastToHashMap(
relatedEntities);
} | 3.68 |
flink_MetricRegistryConfiguration_fromConfiguration | /**
* Create a metric registry configuration object from the given {@link Configuration}.
*
* @param configuration to generate the metric registry configuration from
* @param maximumFrameSize the maximum message size that the RPC system supports
* @return Metric registry configuration generated from the configurat... | 3.68 |
druid_DruidAbstractDataSource_setInitExceptionThrow | /**
* @since 1.1.11
*/
public void setInitExceptionThrow(boolean initExceptionThrow) {
this.initExceptionThrow = initExceptionThrow;
} | 3.68 |
framework_TabSheet_addSelectedTabChangeListener | /**
* Adds a tab selection listener.
*
* @see Registration
*
* @param listener
* the Listener to be added, not null
* @return a registration object for removing the listener
* @since 8.0
*/
public Registration addSelectedTabChangeListener(
SelectedTabChangeListener listener) {
return add... | 3.68 |
pulsar_NonPersistentSubscription_delete | /**
* Delete the subscription by closing and deleting its managed cursor. Handle unsubscribe call from admin layer.
*
* @param closeIfConsumersConnected
* Flag indicate whether explicitly close connected consumers before trying to delete subscription. If
* any consumer is connected to it and ... | 3.68 |
hudi_HoodieBackedTableMetadataWriter_getRecordIndexUpdates | /**
* Return records that represent update to the record index due to write operation on the dataset.
*
* @param writeStatuses {@code WriteStatus} from the write operation
*/
private HoodieData<HoodieRecord> getRecordIndexUpdates(HoodieData<WriteStatus> writeStatuses) {
HoodiePairData<String, HoodieRecordDelegate... | 3.68 |
morf_DatabaseMetaDataProvider_createIndexFrom | /**
* Creates an index from given info.
*
* @param indexName The name of the index.
* @param isUnique Whether to mark this index as unique.
* @param columnNames The column names for the index.
* @return An {@link IndexBuilder} for the index.
*/
protected static Index createIndexFrom(RealName indexName, boolean i... | 3.68 |
flink_FlinkContainersSettings_getLogProperties | /**
* Gets logging properties.
*
* @return The logging properties.
*/
public Properties getLogProperties() {
return logProperties;
} | 3.68 |
hudi_HoodieSyncClient_getPartitionEvents | /**
* Iterate over the storage partitions and find if there are any new partitions that need to be added or updated.
* Generate a list of PartitionEvent based on the changes required.
*/
public List<PartitionEvent> getPartitionEvents(List<Partition> partitionsInMetastore,
... | 3.68 |
hbase_MasterObserver_postSnapshot | /**
* Called after the snapshot operation has been requested. Called as part of snapshot RPC call.
* @param ctx the environment to interact with the framework and master
* @param snapshot the SnapshotDescriptor for the snapshot
* @param tableDescriptor the TableDescriptor of the table to snapshot... | 3.68 |
morf_ExistingViewHashLoader_loadViewHashes | /**
* Loads the hashes for the deployed views, or empty if the hashes cannot be loaded
* (e.g. if the deployed views table does not exist in the existing schema).
*
* @param schema The existing database schema.
* @return The deployed view hashes.
*/
Optional<Map<String, String>> loadViewHashes(Schema schema) {
... | 3.68 |
flink_FileInputFormat_initDefaultsFromConfiguration | /**
* Initialize defaults for input format. Needs to be a static method because it is configured
* for local cluster execution.
*
* @param configuration The configuration to load defaults from
*/
private static void initDefaultsFromConfiguration(Configuration configuration) {
final long to =
config... | 3.68 |
dubbo_SimpleReferenceCache_get | /**
* Check and return existing ReferenceConfig and its corresponding proxy instance.
*
* @param type service interface class
* @param <T> service interface type
* @return the existing proxy instance of the same interface definition
*/
@Override
@SuppressWarnings("unchecked")
public <T> T get(Class<T> type) {
... | 3.68 |
flink_SinkModifyOperation_getTargetColumns | /** return null when no column list specified. */
@Nullable
public int[][] getTargetColumns() {
return targetColumns;
} | 3.68 |
flink_KubernetesUtils_resolveDNSPolicy | /**
* Resolve the DNS policy defined value. Return DNS_POLICY_HOSTNETWORK if host network enabled.
* If not, check whether there is a DNS policy overridden in pod template.
*
* @param dnsPolicy DNS policy defined in pod template spec
* @param hostNetworkEnabled Host network enabled or not
* @return the resolved v... | 3.68 |
flink_BlobUtils_getStorageLocationPath | /**
* Returns the path for the given blob key.
*
* <p>The returned path can be used with the (local or HA) BLOB store file system back-end for
* recovery purposes and follows the same scheme as {@link #getStorageLocation(File, JobID,
* BlobKey)}.
*
* @param storageDir storage directory used be the BLOB service
... | 3.68 |
flink_BoundedBlockingSubpartitionReader_notifyDataAvailable | /**
* This method is actually only meaningful for the {@link BoundedBlockingSubpartitionType#FILE}.
*
* <p>For the other types the {@link #nextBuffer} can not be ever set to null, so it is no need
* to notify available via this method. But the implementation is also compatible with other
* types even though called... | 3.68 |
hadoop_NativeTaskOutputFiles_getSpillIndexFile | /**
* Return a local map spill index file created earlier
*
* @param spillNumber the number
*/
public Path getSpillIndexFile(int spillNumber) throws IOException {
String path = String
.format(SPILL_INDEX_FILE_FORMAT_STRING, id, TASKTRACKER_OUTPUT, spillNumber);
return lDirAlloc.getLocalPathToRead(path, c... | 3.68 |
flink_CheckpointProperties_discardOnSubsumed | /**
* Returns whether the checkpoint should be discarded when it is subsumed.
*
* <p>A checkpoint is subsumed when the maximum number of retained checkpoints is reached and a
* more recent checkpoint completes..
*
* @return <code>true</code> if the checkpoint should be discarded when it is subsumed; <code>
* ... | 3.68 |
hbase_ZKTableArchiveClient_getArchiveZNode | /**
* @param conf conf to read for the base archive node
* @param zooKeeper zookeeper to used for building the full path
* @return get the znode for long-term archival of a table for
*/
public static String getArchiveZNode(Configuration conf, ZKWatcher zooKeeper) {
return ZNodePaths.joinZNode(zooKeeper.getZN... | 3.68 |
framework_DragAndDropWrapper_setHTML5DataFlavor | /**
* Sets data flavors available in the DragAndDropWrapper is used to start an
* HTML5 style drags. Most commonly the "Text" flavor should be set.
* Multiple data types can be set.
*
* @param type
* the string identifier of the drag "payload". E.g. "Text" or
* "text/html"
* @param value
... | 3.68 |
flink_SupportsRowLevelUpdate_getRowLevelUpdateMode | /**
* Planner will rewrite the update statement to query base on the {@link
* RowLevelUpdateMode}, keeping the query of update unchanged by default(in `UPDATED_ROWS`
* mode), or changing the query to union the updated rows and the other rows (in `ALL_ROWS`
* mode).
*
* <p>Take the following SQL as an example:
*
... | 3.68 |
framework_VCalendar_areDatesEqualToSecond | /**
* Are the dates equal (uses second resolution).
*
* @param date1
* The first the to compare
* @param date2
* The second date to compare
* @return
*/
@SuppressWarnings("deprecation")
public static boolean areDatesEqualToSecond(Date date1, Date date2) {
return date1.getYear() == date... | 3.68 |
morf_ConcatenatedField_toString | /**
*
* @see org.alfasoftware.morf.sql.element.AliasedField#toString()
*/
@Override
public String toString() {
return "CONCAT(" + StringUtils.join(fields, ", ") + ")" + super.toString();
} | 3.68 |
hbase_ZKMainServer_hasCommandLineArguments | /**
* @param args the arguments to check for command-line arguments
* @return True if command-line arguments were passed.
*/
private static boolean hasCommandLineArguments(final String[] args) {
if (hasServer(args)) {
if (args.length < 2) {
throw new IllegalStateException("-server param but no value");
... | 3.68 |
framework_Flash_getStandby | /**
* Returns standby.
*
* @since 7.4.1
* @return Standby string.
*/
public String getStandby() {
return getState(false).standby;
} | 3.68 |
hadoop_OBSFileSystem_isReadTransformEnabled | /**
* Get read transform switch stat.
*
* @return is read transform enabled
*/
boolean isReadTransformEnabled() {
return readTransformEnable;
} | 3.68 |
hudi_HoodieFlinkCopyOnWriteTable_handleUpdate | // -------------------------------------------------------------------------
// Used for compaction
// -------------------------------------------------------------------------
@Override
public Iterator<List<WriteStatus>> handleUpdate(
String instantTime, String partitionPath, String fileId,
Map<String, Hoodie... | 3.68 |
flink_DelimitedInputFormat_configure | /**
* Configures this input format by reading the path to the file from the configuration and the
* string that defines the record delimiter.
*
* @param parameters The configuration object to read the parameters from.
*/
@Override
public void configure(Configuration parameters) {
super.configure(parameters);
... | 3.68 |
hbase_HBaseCommonTestingUtility_randomPort | /**
* Returns a random port. These ports cannot be registered with IANA and are intended for
* dynamic allocation (see http://bit.ly/dynports).
*/
private int randomPort() {
return MIN_RANDOM_PORT
+ ThreadLocalRandom.current().nextInt(MAX_RANDOM_PORT - MIN_RANDOM_PORT);
} | 3.68 |
hadoop_JsonSerDeser_save | /**
* Save an instance to a file
* @param instance instance to save
* @param file file
* @throws IOException
*/
public void save(T instance, File file) throws
IOException {
writeJsonAsBytes(instance, new FileOutputStream(file.getAbsoluteFile()));
} | 3.68 |
morf_SqlServerDialect_getColumnRepresentation | /**
* @see org.alfasoftware.morf.jdbc.SqlDialect#getColumnRepresentation(org.alfasoftware.morf.metadata.DataType,
* int, int)
*/
@Override
protected String getColumnRepresentation(DataType dataType, int width, int scale) {
if (needsCollation(dataType)) {
return String.format("%s %s", getInternalColumnRepr... | 3.68 |
flink_IntervalJoinOperator_processElement1 | /**
* Process a {@link StreamRecord} from the left stream. Whenever an {@link StreamRecord} arrives
* at the left stream, it will get added to the left buffer. Possible join candidates for that
* element will be looked up from the right buffer and if the pair lies within the user defined
* boundaries, it gets passe... | 3.68 |
framework_AbstractSplitPanelConnector_handleSingleComponentMove | /**
* Handles the case when there is only one child component and that
* component is moved between first <-> second. This does not trigger a
* hierarchy change event as the list of children contains the same
* component in both cases.
*/
private void handleSingleComponentMove() {
if (getChildComponents().size... | 3.68 |
framework_LayoutDemo_fillLayout | /**
* Add multiple demo component to given layout.
*
* @param layout
* where components are added
* @param numberOfComponents
* to add
*/
private void fillLayout(Layout layout, int numberOfComponents) {
for (int i = 1; i <= numberOfComponents; i++) {
layout.addComponent(getExamp... | 3.68 |
streampipes_Protocols_mqtt | /**
* Defines the transport protocol MQTT used by a data stream at runtime.
*
* @param mqttHost The hostname of any MQTT broker
* @param mqttPort The port of any MQTT broker
* @param topic The topic identifier
* @return The {@link org.apache.streampipes.model.grounding.MqttTransportProtocol}
* containing URL ... | 3.68 |
hbase_MergeTableRegionsProcedure_preMergeRegions | /**
* Pre merge region action
* @param env MasterProcedureEnv
**/
private void preMergeRegions(final MasterProcedureEnv env) throws IOException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.preMergeRegionsAction(regionsToMerge, getUser());
}
// TODO: ... | 3.68 |
hbase_CatalogJanitor_getLastReport | /** Returns Returns last published Report that comes of last successful scan of hbase:meta. */
public CatalogJanitorReport getLastReport() {
return this.lastReport;
} | 3.68 |
hbase_MetaTableAccessor_makeDeleteFromRegionInfo | /**
* Generates and returns a Delete containing the region info for the catalog table
*/
public static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo, long ts) {
if (regionInfo == null) {
throw new IllegalArgumentException("Can't make a delete for null region");
}
if (regionInfo.getReplicaId() != Reg... | 3.68 |
hbase_RestCsrfPreventionFilter_getFilterParams | /**
* Constructs a mapping of configuration properties to be used for filter initialization. The
* mapping includes all properties that start with the specified configuration prefix. Property
* names in the mapping are trimmed to remove the configuration prefix.
* @param conf configuration to read
* @param c... | 3.68 |
flink_DataViewUtils_createDistinctViewDataType | /** Creates a special {@link DataType} for DISTINCT aggregates. */
public static DataType createDistinctViewDataType(
DataType keyDataType, int filterArgs, int filterArgsLimit) {
final DataType valueDataType;
if (filterArgs <= filterArgsLimit) {
valueDataType = DataTypes.BIGINT().notNull();
... | 3.68 |
flink_SegmentsUtil_hash | /**
* hash segments to int.
*
* @param segments Source segments.
* @param offset Source segments offset.
* @param numBytes the number bytes to hash.
*/
public static int hash(MemorySegment[] segments, int offset, int numBytes) {
if (inFirstSegment(segments, offset, numBytes)) {
return MurmurHashUtil.h... | 3.68 |
hadoop_StartupProgressServlet_writeNumberFieldIfDefined | /**
* Writes a JSON number field only if the value is defined.
*
* @param json JsonGenerator to receive output
* @param key String key to put
* @param value long value to put
* @throws IOException if there is an I/O error
*/
private static void writeNumberFieldIfDefined(JsonGenerator json, String key,
long ... | 3.68 |
graphhopper_GraphHopper_setAllowWrites | /**
* Specifies if it is allowed for GraphHopper to write. E.g. for read only filesystems it is not
* possible to create a lock file and so we can avoid write locks.
*/
public GraphHopper setAllowWrites(boolean allowWrites) {
this.allowWrites = allowWrites;
return this;
} | 3.68 |
hbase_ServerManager_getVersionNumber | /**
* May return 0 when server is not online.
*/
public int getVersionNumber(ServerName serverName) {
ServerMetrics serverMetrics = onlineServers.get(serverName);
return serverMetrics != null ? serverMetrics.getVersionNumber() : 0;
} | 3.68 |
flink_OperationUtils_indent | /**
* Increases indentation for description of string of child {@link Operation}. The input can
* already contain indentation. This will increase all the indentations by one level.
*
* @param item result of {@link Operation#asSummaryString()}
* @return string with increased indentation
*/
static String indent(Str... | 3.68 |
hmily_HmilyRepositoryFacade_removeHmilyParticipantUndo | /**
* Remove hmily participant undo.
*
* @param undoId the undo id
*/
public void removeHmilyParticipantUndo(final Long undoId) {
if (hmilyConfig.isPhyDeleted()) {
checkRows(hmilyRepository.removeHmilyParticipantUndo(undoId));
} else {
updateHmilyParticipantUndoStatus(undoId, HmilyActionEnum... | 3.68 |
flink_RestServerEndpointConfiguration_getMaxContentLength | /**
* Returns the max content length that the REST server endpoint could handle.
*
* @return max content length that the REST server endpoint could handle
*/
public int getMaxContentLength() {
return maxContentLength;
} | 3.68 |
hudi_HoodieWriteHandle_doWrite | /**
* Perform the actual writing of the given record into the backing file.
*/
protected void doWrite(HoodieRecord record, Schema schema, TypedProperties props) {
// NO_OP
} | 3.68 |
flink_WindowedStream_trigger | /** Sets the {@code Trigger} that should be used to trigger window emission. */
@PublicEvolving
public WindowedStream<T, K, W> trigger(Trigger<? super T, ? super W> trigger) {
builder.trigger(trigger);
return this;
} | 3.68 |
flink_DateTimeUtils_parseFraction | /**
* Parses a fraction, multiplying the first character by {@code multiplier}, the second
* character by {@code multiplier / 10}, the third character by {@code multiplier / 100}, and so
* forth.
*
* <p>For example, {@code parseFraction("1234", 100)} yields {@code 123}.
*/
private static int parseFraction(String ... | 3.68 |
pulsar_Topics_deletePartitionedTopic | /**
* @see Topics#deletePartitionedTopic(String, boolean, boolean)
* IMPORTANT NOTICE: the application is not able to connect to the topic(delete then re-create with same name) again
* if the schema auto uploading is disabled. Besides, users should to use the truncate method to clean up
* data of the topic instead ... | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.