name stringlengths 12 178 | code_snippet stringlengths 8 36.5k | score float64 3.26 3.68 |
|---|---|---|
hbase_ColumnSchemaModel___setBlockcache | /**
* @param value the desired value of the BLOCKCACHE attribute
*/
public void __setBlockcache(boolean value) {
attrs.put(BLOCKCACHE, Boolean.toString(value));
} | 3.68 |
framework_FocusUtil_setTabIndex | /**
* Sets the widget's position in the tab index. If more than one widget has
* the same tab index, each such widget will receive focus in an arbitrary
* order. Setting the tab index to <code>-1</code> will cause the widget to
* be removed from the tab order.
*
* @param focusable
* The widget
* @par... | 3.68 |
flink_HiveParserTypeCheckProcFactory_getDefaultExprProcessor | /** Factory method to get DefaultExprProcessor. */
public HiveParserTypeCheckProcFactory.DefaultExprProcessor getDefaultExprProcessor() {
return new HiveParserTypeCheckProcFactory.DefaultExprProcessor();
} | 3.68 |
flink_HiveServer2Endpoint_waitUntilOperationIsTerminated | /**
* Similar solution comparing to the {@code
* org.apache.hive.jdbc.HiveStatement#waitForOperationToComplete}.
*
* <p>The better solution is to introduce an interface similar to {@link TableResult#await()}.
*/
private void waitUntilOperationIsTerminated(
SessionHandle sessionHandle, OperationHandle opera... | 3.68 |
morf_UpdateStatementBuilder_getFields | /**
* Gets the list of fields
*
* @return the fields
*/
List<AliasedField> getFields() {
return fields;
} | 3.68 |
hadoop_RollingWindowAverage_cleanupOldPoints | /**
* Clean up points that don't count any more (are before our
* rolling window) from our current queue of points.
*/
private void cleanupOldPoints() {
Date cutoffTime = new Date(new Date().getTime() - windowSizeMs);
while (!currentPoints.isEmpty()
&& currentPoints.peekFirst().getEventTime().before(cutof... | 3.68 |
hadoop_LoggedJob_getJobProperties | /**
* Get the configuration properties of the job.
*/
public JobProperties getJobProperties() {
return jobProperties;
} | 3.68 |
AreaShop_SignLinkerManager_isInSignLinkMode | /**
* Check if the player is in sign linking mode.
* @param player The player to check
* @return true if the player is in sign linking mode, otherwise false
*/
public boolean isInSignLinkMode(Player player) {
return signLinkers.containsKey(player.getUniqueId());
} | 3.68 |
framework_StaticSection_removeColumn | /**
* Removes the cell corresponding to the given column id.
*
* @param columnId
* the id of the column whose cell to remove
*/
public void removeColumn(String columnId) {
for (ROW row : rows) {
row.removeCell(columnId);
}
markAsDirty();
} | 3.68 |
shardingsphere-elasticjob_ShardingService_getShardingItems | /**
* Get sharding items.
*
* @param jobInstanceId job instance ID
* @return sharding items
*/
public List<Integer> getShardingItems(final String jobInstanceId) {
JobInstance jobInstance = YamlEngine.unmarshal(jobNodeStorage.getJobNodeData(instanceNode.getInstancePath(jobInstanceId)), JobInstance.class);
i... | 3.68 |
hadoop_TimelinePutResponse_setEntityType | /**
* Set the entity type
*
* @param entityType
* the entity type
*/
public void setEntityType(String entityType) {
this.entityType = entityType;
} | 3.68 |
graphhopper_GHPoint_toGeoJson | /**
* Attention: geoJson is LON,LAT
*/
public Double[] toGeoJson() {
return new Double[]{lon, lat};
} | 3.68 |
hadoop_AbfsInputStream_seek | /**
* Seek to given position in stream.
* @param n position to seek to
* @throws IOException if there is an error
* @throws EOFException if attempting to seek past end of file
*/
@Override
public synchronized void seek(long n) throws IOException {
LOG.debug("requested seek to position {}", n);
if (closed) {
... | 3.68 |
framework_AbstractInMemoryContainer_hasContainerFilters | /**
* Returns true if any filters have been applied to the container.
*
* @return true if the container has filters applied, false otherwise
* @since 7.1
*/
protected boolean hasContainerFilters() {
return !getContainerFilters().isEmpty();
} | 3.68 |
hudi_BufferedRandomAccessFile_spaceAvailableInBuffer | /**
* @return - whether space is available at the end of the buffer.
*/
private boolean spaceAvailableInBuffer() {
return (this.isEOF && (this.validLastPosition < this.endPosition()));
} | 3.68 |
dubbo_DubboBeanUtils_registerCommonBeans | /**
* Register the common beans
*
* @param registry {@link BeanDefinitionRegistry}
* @see ReferenceAnnotationBeanPostProcessor
* @see DubboConfigDefaultPropertyValueBeanPostProcessor
* @see DubboConfigAliasPostProcessor
*/
static void registerCommonBeans(BeanDefinitionRegistry registry) {
registerInfrastruc... | 3.68 |
morf_Function_dateToYyyymmdd | /**
* Helper method to create an instance of the "DATE_TO_YYYYMMDD" SQL function.
* {@code expression} must result in a string.
*
* @see Cast
* @param expression the expression to evaluate
* @return an instance of the DATE_TO_YYYYMMDD function
*/
public static Function dateToYyyymmdd(AliasedField expression) {
... | 3.68 |
hbase_HRegion_getBaseConf | /**
* A split takes the config from the parent region & passes it to the daughter region's
* constructor. If 'conf' was passed, you would end up using the HTD of the parent region in
* addition to the new daughter HTD. Pass 'baseConf' to the daughter regions to avoid this tricky
* dedupe problem.
* @return Configu... | 3.68 |
flink_TableConfig_addConfiguration | /**
* Adds the given key-value configuration to the underlying application-specific configuration.
* It overwrites existing keys.
*
* @param configuration key-value configuration to be added
*/
public void addConfiguration(Configuration configuration) {
Preconditions.checkNotNull(configuration);
this.confi... | 3.68 |
graphhopper_CHMeasurement_testPerformanceAutomaticNodeOrdering | /**
* Parses a given osm file, contracts the graph and runs random routing queries on it. This is useful to test
* the node contraction heuristics with regards to the performance of the automatic graph contraction (the node
* contraction order determines how many and which shortcuts will be introduced) and the resul... | 3.68 |
hadoop_FileIoProvider_exists | /**
* Check for file existence using {@link File#exists()}.
*
* @param volume target volume. null if unavailable.
* @param f file object.
* @return true if the file exists.
*/
public boolean exists(@Nullable FsVolumeSpi volume, File f) {
final long begin = profilingEventHook.beforeMetadataOp(volume, EXISTS);
... | 3.68 |
framework_FieldBinder_getUnboundFields | /**
* Returns a collection of field names that are not bound.
*
* @return a collection of fields assignable to Component that are not bound
*/
public Collection<String> getUnboundFields() throws FieldBindingException {
List<String> unboundFields = new ArrayList<>();
for (Field f : fieldMap.values()) {
... | 3.68 |
hadoop_SchedulerHealth_getResourcesReleased | /**
* Get the resources released in the last scheduler run.
*
* @return resources released
*/
public Resource getResourcesReleased() {
return getResourceDetails(Operation.RELEASE);
} | 3.68 |
flink_CopyOnWriteSkipListStateMap_totalSize | /** Returns total size of this map, including logically removed state. */
int totalSize() {
return totalSize;
} | 3.68 |
flink_JobResultDeserializer_assertNotEndOfInput | /** Asserts that the provided JsonToken is not null, i.e., not at the end of the input. */
private static void assertNotEndOfInput(
final JsonParser p, @Nullable final JsonToken jsonToken) {
checkState(jsonToken != null, "Unexpected end of input at %s", p.getCurrentLocation());
} | 3.68 |
hbase_RegionGroupingProvider_getStrategy | /**
* instantiate a strategy from a config property. requires conf to have already been set (as well
* as anything the provider might need to read).
*/
RegionGroupingStrategy getStrategy(final Configuration conf, final String key,
final String defaultValue) throws IOException {
Class<? extends RegionGroupingStra... | 3.68 |
hbase_StoreUtils_getSplitPoint | /**
* Gets the mid point of the largest file passed in as split point.
*/
static Optional<byte[]> getSplitPoint(Collection<HStoreFile> storefiles,
CellComparator comparator) throws IOException {
Optional<HStoreFile> largestFile = StoreUtils.getLargestFile(storefiles);
return largestFile.isPresent()
? StoreU... | 3.68 |
framework_VaadinPortlet_getCurrent | /**
* Gets the currently used Vaadin portlet. The current portlet is
* automatically defined when processing requests related to the service
* (see {@link ThreadLocal}) and in {@link VaadinSession#access(Runnable)}
* and {@link UI#access(Runnable)}. In other cases, (e.g. from background
* threads, the current serv... | 3.68 |
flink_QuickSort_fix | /**
* Fix the records into sorted order, swapping when the first record is greater than the second
* record.
*
* @param s paged sortable
* @param pN page number of first record
* @param pO page offset of first record
* @param rN page number of second record
* @param rO page offset of second record
*/
private s... | 3.68 |
framework_BasicEventProvider_containsEvent | /**
* Does this event provider container this event.
*
* @param event
* The event to check for
* @return If this provider has the event then true is returned, else false
*/
public boolean containsEvent(BasicEvent event) {
return eventList.contains(event);
} | 3.68 |
hbase_MemStoreFlusher_getRequeueCount | /**
* @return Count of times {@link #requeue(long)} was called; i.e this is number of times we've
* been requeued.
*/
public int getRequeueCount() {
return this.requeueCount;
} | 3.68 |
framework_AbstractComponent_getIcon | /*
* Gets the component's icon resource. Don't add a JavaDoc comment here, we
* use the default documentation from implemented interface.
*/
@Override
public Resource getIcon() {
return getResource(ComponentConstants.ICON_RESOURCE);
} | 3.68 |
flink_SourceReader_handleSourceEvents | /**
* Handle a custom source event sent by the {@link SplitEnumerator}. This method is called when
* the enumerator sends an event via {@link SplitEnumeratorContext#sendEventToSourceReader(int,
* SourceEvent)}.
*
* <p>This method has a default implementation that does nothing, because most sources do not
* requir... | 3.68 |
querydsl_BeanMap_valueIterator | /**
* Convenience method for getting an iterator over the values.
*
* @return an iterator over the values
*/
public Iterator<Object> valueIterator() {
final Iterator<String> iter = keyIterator();
return new Iterator<Object>() {
@Override
public boolean hasNext() {
return iter.has... | 3.68 |
hadoop_RMContainerTokenSecretManager_activateNextMasterKey | /**
* Activate the new master-key
*/
@Private
public void activateNextMasterKey() {
super.writeLock.lock();
try {
LOG.info("Activating next master key with id: "
+ this.nextMasterKey.getMasterKey().getKeyId());
this.currentMasterKey = this.nextMasterKey;
this.nextMasterKey = null;
} finally ... | 3.68 |
hudi_HoodiePartitionMetadata_readFromFS | /**
* Read out the metadata for this partition.
*/
public void readFromFS() throws IOException {
// first try reading the text format (legacy, currently widespread)
boolean readFile = readTextFormatMetaFile();
if (!readFile) {
// now try reading the base file formats.
readFile = readBaseFormatMetaFile()... | 3.68 |
hbase_Scan_isReversed | /**
* Get whether this scan is a reversed one.
* @return true if backward scan, false if forward(default) scan
*/
public boolean isReversed() {
return reversed;
} | 3.68 |
hbase_KeyValue_getSequenceId | /**
* used to achieve atomic operations in the memstore.
*/
@Override
public long getSequenceId() {
return seqId;
} | 3.68 |
hadoop_SecureableZone_removeRecord | /**
* Removes a record from the Zone.
* @param r The record to be removed
* @see Record
*/
@Override public void removeRecord(Record r) {
if (records == null) {
records = new ArrayList<Record>();
}
super.removeRecord(r);
records.remove(r);
} | 3.68 |
pulsar_PulsarAdminImpl_getServiceUrl | /**
* @return the service HTTP URL that is being used
*/
public String getServiceUrl() {
return serviceUrl;
} | 3.68 |
flink_SqlReplaceTableAs_getFullConstraints | /** Returns the column constraints plus the table constraints. */
public List<SqlTableConstraint> getFullConstraints() {
return SqlConstraintValidator.getFullConstraints(tableConstraints, columnList);
} | 3.68 |
hmily_ZookeeperRepository_hasNext | /**
* Has next boolean.
*
* @return the boolean
*/
public boolean hasNext() {
return index < nodes.length;
} | 3.68 |
flink_StringUtils_concatenateWithAnd | /**
* If both string arguments are non-null, this method concatenates them with ' and '. If only
* one of the arguments is non-null, this method returns the non-null argument. If both
* arguments are null, this method returns null.
*
* @param s1 The first string argument
* @param s2 The second string argument
* ... | 3.68 |
hbase_TransitRegionStateProcedure_serverCrashed | // Should be called with RegionStateNode locked
public void serverCrashed(MasterProcedureEnv env, RegionStateNode regionNode,
ServerName serverName, boolean forceNewPlan) throws IOException {
this.forceNewPlan = forceNewPlan;
if (remoteProc != null) {
// this means we are waiting for the sub procedure, so wak... | 3.68 |
flink_StateTtlConfig_setTtlTimeCharacteristic | /**
* Sets the time characteristic.
*
* @param ttlTimeCharacteristic The time characteristic configures time scale to use for
* ttl.
*/
@Nonnull
public Builder setTtlTimeCharacteristic(
@Nonnull TtlTimeCharacteristic ttlTimeCharacteristic) {
this.ttlTimeCharacteristic = ttlTimeCharacteristic;
r... | 3.68 |
framework_TreeGridConnector_isCollapseAllowed | /**
* Checks if the item can be collapsed.
*
* @param row
* the item row
* @return {@code true} if the item is allowed to be collapsed,
* {@code false} otherwise.
*/
public static boolean isCollapseAllowed(JsonObject row) {
return row.getBoolean(
HierarchicalDataCommunicatorCon... | 3.68 |
hbase_StateMachineProcedure_getCurrentStateId | /**
* This method is used from test code as it cannot be assumed that state transition will happen
* sequentially. Some procedures may skip steps/ states, some may add intermediate steps in
* future.
*/
public int getCurrentStateId() {
return getStateId(getCurrentState());
} | 3.68 |
hbase_Mutation_has | /**
* Private method to determine if this object's familyMap contains the given value assigned to the
* given family, qualifier and timestamp, respecting the 2 boolean arguments.
*/
protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, boolean ignoreTS,
boolean ignoreValue) {
List<Cell> l... | 3.68 |
morf_ParallelQueryHint_toString | /**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return degreeOfParallelism == null ? getClass().getSimpleName() : format("ParallelQueryHint [degreeOfParallelism=%s]", degreeOfParallelism.toString());
} | 3.68 |
flink_CommittableCollector_getCheckpointCommittablesUpTo | /**
* Returns all {@link CheckpointCommittableManager} until the requested checkpoint id.
*
* @param checkpointId counter
* @return collection of {@link CheckpointCommittableManager}
*/
public Collection<? extends CheckpointCommittableManager<CommT>> getCheckpointCommittablesUpTo(
long checkpointId) {
... | 3.68 |
framework_StaticSection_getComponent | /**
* Returns the component displayed in this cell.
*
* @return the component
*/
public Component getComponent() {
if (cellState.type != GridStaticCellType.WIDGET) {
throw new IllegalStateException(
"Cannot fetch Component from a cell with type "
+ cellState.type)... | 3.68 |
hadoop_JsonSerialization_toJson | /**
* Convert an instance to a JSON string.
* @param instance instance to convert
* @return a JSON string description
* @throws JsonProcessingException Json generation problems
*/
public synchronized String toJson(T instance) throws JsonProcessingException {
return mapper.writeValueAsString(instance);
} | 3.68 |
morf_SqlDialect_getSqlForMin | /**
* Converts the min function into SQL.
*
* @param function the function details
* @return a string representation of the SQL
*/
protected String getSqlForMin(Function function) {
return "MIN(" + getSqlFrom(function.getArguments().get(0)) + ")";
} | 3.68 |
hudi_ClusteringCommitSink_validateWriteResult | /**
* Validate actions taken by clustering. In the first implementation, we validate at least one new file is written.
* But we can extend this to add more validation. E.g. number of records read = number of records written etc.
* We can also make these validations in BaseCommitActionExecutor to reuse pre-commit hoo... | 3.68 |
flink_JavaFieldPredicates_annotatedWith | /**
* Match the single Annotation of the {@link JavaField}.
*
* @return A {@link DescribedPredicate} returning true, if the tested {@link JavaField} is
* annotated with the annotation identified by the fully qualified name {@code
* fqAnnotationTypeName}.
*/
public static DescribedPredicate<JavaField> anno... | 3.68 |
framework_VCheckBoxGroup_buildOptions | /*
* Build all the options
*/
public void buildOptions(List<JsonObject> items) {
Roles.getGroupRole().set(getElement());
int i = 0;
int widgetsToRemove = getWidget().getWidgetCount() - items.size();
if (widgetsToRemove < 0) {
widgetsToRemove = 0;
}
List<Widget> remove = new ArrayList<>... | 3.68 |
hibernate-validator_ExecutableMetaData_findParameterMetaData | /**
* Finds the one executable from the underlying hierarchy with parameter
* constraints. If no executable in the hierarchy is parameter constrained,
* the parameter meta data from this builder's base executable is returned.
*
* @return The parameter meta data for this builder's executable.
*/
private List<Param... | 3.68 |
hadoop_PipesPartitioner_getPartition | /**
* If a partition result was set manually, return it. Otherwise, we call
* the Java partitioner.
* @param key the key to partition
* @param value the value to partition
* @param numPartitions the number of reduces
*/
public int getPartition(K key, V value,
int numPartitions) {
Intege... | 3.68 |
hadoop_StagingCommitter_cleanup | /**
* Staging committer cleanup includes calling wrapped committer's
* cleanup method, and removing staging uploads path and all
* destination paths in the final filesystem.
* @param commitContext commit context
* @param suppressExceptions should exceptions be suppressed?
* @throws IOException IO failures if exce... | 3.68 |
hadoop_ServletUtil_parseLongParam | /**
* parseLongParam.
*
* @param request request.
* @param param param.
* @return a long value as passed in the given parameter, throwing
* an exception if it is not present or if it is not a valid number.
* @throws IOException raised on errors performing I/O.
*/
public static long parseLongParam(ServletRequest... | 3.68 |
pulsar_StreamingDataBlockHeaderImpl_toStream | /**
* Get the content of the data block header as InputStream.
* Read out in format:
* [ magic_word -- int ][ block_len -- int ][ first_entry_id -- long] [padding zeros]
*/
@Override
public InputStream toStream() {
ByteBuf out = PulsarByteBufAllocator.DEFAULT.buffer(HEADER_MAX_SIZE, HEADER_MAX_SIZE);
out... | 3.68 |
hudi_OptionsResolver_sortClusteringEnabled | /**
* Returns whether the clustering sort is enabled.
*/
public static boolean sortClusteringEnabled(Configuration conf) {
return !StringUtils.isNullOrEmpty(conf.getString(FlinkOptions.CLUSTERING_SORT_COLUMNS));
} | 3.68 |
hadoop_StepType_getDescription | /**
* Returns step type description.
*
* @return String step type description
*/
public String getDescription() {
return description;
} | 3.68 |
rocketmq-connect_AbstractConfigManagementService_putTaskConfigs | /**
* remove and add
*
* @param connectorName
* @param configs
*/
protected void putTaskConfigs(String connectorName, List<ConnectKeyValue> configs) {
List<ConnectKeyValue> exist = taskKeyValueStore.get(connectorName);
if (null != exist && exist.size() > 0) {
taskKeyValueStore.remove(connectorName)... | 3.68 |
hbase_ByteBufferArray_asSubByteBuffers | /**
* Creates a sub-array from a given array of ByteBuffers from the given offset to the length
* specified. For eg, if there are 4 buffers forming an array each with length 10 and if we call
* asSubByteBuffers(5, 10) then we will create an sub-array consisting of two BBs and the first
* one be a BB from 'position'... | 3.68 |
AreaShop_TeleportFeature_setTeleport | /**
* Set the teleport location of this region.
* @param location The location to set as teleport location
*/
public void setTeleport(Location location) {
if(location == null) {
getRegion().setSetting("general.teleportLocation", null);
} else {
getRegion().setSetting("general.teleportLocation", Utils.locationT... | 3.68 |
hadoop_NamedCommitterFactory_loadCommitterClass | /**
* Load the class named in {@link #NAMED_COMMITTER_CLASS}.
* @param context job or task context
* @return the committer class
* @throws IOException if no committer was defined.
*/
private Class<? extends PathOutputCommitter> loadCommitterClass(
JobContext context) throws IOException {
Preconditions.checkN... | 3.68 |
streampipes_TextDocumentStatistics_getNumWords | /**
* Returns the overall number of words in all blocks.
*
* @return Sum
*/
public int getNumWords() {
return numWords;
} | 3.68 |
flink_PekkoInvocationHandler_invokeRpc | /**
* Invokes a RPC method by sending the RPC invocation details to the rpc endpoint.
*
* @param method to call
* @param args of the method call
* @return result of the RPC; the result future is completed with a {@link TimeoutException} if
* the requests times out; if the recipient is not reachable, then the ... | 3.68 |
hbase_RowResource_updateBinary | // This currently supports only update of one row at a time.
Response updateBinary(final byte[] message, final HttpHeaders headers, final boolean replace) {
servlet.getMetrics().incrementRequests(1);
if (servlet.isReadOnly()) {
servlet.getMetrics().incrementFailedPutRequests(1);
return Response.status(Respo... | 3.68 |
graphhopper_VectorTile_setExtent | /**
* <pre>
* Although this is an "optional" field it is required by the specification.
* See https://github.com/mapbox/vector-tile-spec/issues/47
* </pre>
*
* <code>optional uint32 extent = 5 [default = 4096];</code>
*/
public Builder setExtent(int value) {
bitField0_ |= 0x00000020;
extent_ = value;
onCha... | 3.68 |
hadoop_Chain_setReducer | /**
* Sets the Reducer class to the chain job.
*
* <p>
* The configuration properties of the chain job have precedence over the
* configuration properties of the Reducer.
*
* @param job
* the chain job.
* @param klass
* the Reducer class to add.
* @param inputKeyClass
* reducer ... | 3.68 |
flink_AvroWriters_forGenericRecord | /**
* Creates an {@link AvroWriterFactory} that accepts and writes Avro generic types. The Avro
* writers will use the given schema to build and write the records.
*
* @param schema The schema of the generic type.
*/
public static AvroWriterFactory<GenericRecord> forGenericRecord(Schema schema) {
String schema... | 3.68 |
flink_Catalog_bulkGetPartitionColumnStatistics | /**
* Get a list of column statistics for given partitions.
*
* @param tablePath path of the table
* @param partitionSpecs partition specs of partitions that will be used to filter out all other
* unrelated statistics, i.e. the statistics fetch will be limited within the given
* partitions
* @return list... | 3.68 |
framework_QuerySortOrder_asc | /**
* Creates a new query sort builder with given sorting using ascending sort
* direction.
*
* @param by
* the string to sort by
*
* @return the query sort builder
*/
public static QuerySortOrderBuilder asc(String by) {
return new QuerySortOrderBuilder().thenAsc(by);
} | 3.68 |
framework_TabSheet_addTab | /**
* Adds a new tab into TabSheet. Component caption and icon are copied to
* the tab metadata at creation time.
*
* If the tab sheet already contains the component, its tab is returned.
*
* @param component
* the component to be added onto tab - should not be null.
* @param position
* T... | 3.68 |
morf_DataValueLookup_getObject | /**
* Gets the value as either a long, integer, boolean, date, local date, big decimal,
* byte array or string according to the type definition when called.
*
* <p>Just dispatches to the corresponding typed method (e.g. {@link #getBoolean(String)}).
*
* <p>Most useful when interacting with {@link ResultSet}. In o... | 3.68 |
flink_AbstractParameterTool_getBoolean | /**
* Returns the Boolean value for the given key. If the key does not exists it will return the
* default value given. The method returns whether the string of the value is "true" ignoring
* cases.
*/
public boolean getBoolean(String key, boolean defaultValue) {
addToDefaults(key, Boolean.toString(defaultValue... | 3.68 |
framework_VRichTextToolbar_updateStatus | /**
* Updates the status of all the stateful buttons.
*/
@SuppressWarnings("deprecation")
private void updateStatus() {
if (basic != null) {
bold.setDown(basic.isBold());
italic.setDown(basic.isItalic());
underline.setDown(basic.isUnderlined());
subscript.setDown(basic.isSubscript(... | 3.68 |
hadoop_CRC64_init | /*
* Initialize a table constructed from POLY (0x9a6c9329ac4bc9b5L).
* */
private void init() {
value = -1;
for (int n = 0; n < TABLE_LENGTH; ++n) {
long crc = n;
for (int i = 0; i < 8; ++i) {
if ((crc & 1) == 1) {
crc = (crc >>> 1) ^ POLY;
} else {
crc >>>= 1;
}
}
... | 3.68 |
flink_FutureUtils_throwIfCompletedExceptionally | /**
* Throws the causing exception if the given future is completed exceptionally, otherwise do
* nothing.
*
* @param future the future to check.
* @throws Exception when the future is completed exceptionally.
*/
public static void throwIfCompletedExceptionally(CompletableFuture<?> future) throws Exception {
... | 3.68 |
querydsl_MathExpressions_coth | /**
* Create a {@code coth(num)} expression
*
* <p>Returns the hyperbolic cotangent of num.</p>
*
* @param num numeric expression
* @return coth(num)
*/
public static <A extends Number & Comparable<?>> NumberExpression<Double> coth(Expression<A> num) {
return Expressions.numberOperation(Double.class, Ops.Mat... | 3.68 |
pulsar_ProducerImpl_getCnxIfReady | /**
* Hook method for testing. By returning null, it's possible to prevent messages
* being delivered to the broker.
*
* @return cnx if OpSend messages should be written to open connection. Caller must
* verify that the returned cnx is not null before using reference.
*/
protected ClientCnx getCnxIfReady() {
... | 3.68 |
rocketmq-connect_PluginUtils_isConcrete | /**
* Verify the given class corresponds to a concrete class and not to an abstract class or
*/
public static boolean isConcrete(Class<?> klass) {
int mod = klass.getModifiers();
return !Modifier.isAbstract(mod) && !Modifier.isInterface(mod);
} | 3.68 |
hbase_MetricsMaster_incrementRequests | /**
* @param inc How much to add to requests.
*/
public void incrementRequests(final long inc) {
masterSource.incRequests(inc);
} | 3.68 |
hadoop_ApplicationEntity_isApplicationEntity | /**
* Checks if the input TimelineEntity object is an ApplicationEntity.
*
* @param te TimelineEntity object.
* @return true if input is an ApplicationEntity, false otherwise
*/
public static boolean isApplicationEntity(TimelineEntity te) {
return (te == null ? false
: te.getType().equals(TimelineEntityTyp... | 3.68 |
hbase_BufferedDataBlockEncoder_ensureSpace | /**
* Asserts that there is at least the given amount of unfilled space remaining in the given
* buffer.
* @param out typically, the buffer we are writing to
* @param length the required space in the buffer
* @throws EncoderBufferTooSmallException If there are no enough bytes.
*/
protected static void ensureSp... | 3.68 |
framework_ViewChangeListener_getNewView | /**
* Returns the view being activated.
*
* @return new View
*/
public View getNewView() {
return newView;
} | 3.68 |
incubator-hugegraph-toolchain_HugeGraphLoader_loadStruct | /**
* TODO: Separate classes: ReadHandler -> ParseHandler -> InsertHandler
* Let load task worked in pipeline mode
*/
private void loadStruct(InputStruct struct, InputReader reader) {
LOG.info("Start loading '{}'", struct);
LoadMetrics metrics = this.context.summary().metrics(struct);
metrics.startInFlig... | 3.68 |
pulsar_PulsarAdminImpl_close | /**
* Close the Pulsar admin client to release all the resources.
*/
@Override
public void close() {
try {
auth.close();
} catch (IOException e) {
LOG.error("Failed to close the authentication service", e);
}
client.close();
asyncHttpConnector.close();
} | 3.68 |
flink_FlinkRelMetadataQuery_getFilteredColumnInterval | /**
* Returns the {@link FlinkMetadata.ColumnInterval} of the given column under the given filter
* argument.
*
* @param rel the relational expression
* @param columnIndex the index of the given column
* @param filterArg the index of the filter argument
* @return the interval of the given column of a specified r... | 3.68 |
streampipes_MD5_crypt | /**
* Encodes a string
*
* @param str String to encode
* @return Encoded String
*/
public static String crypt(String str) {
if (str == null || str.length() == 0) {
throw new IllegalArgumentException("String to encrypt cannot be null or zero length");
}
StringBuilder hexString = new StringBuilder();
t... | 3.68 |
hbase_BackupManifest_addDependentImage | /**
* Add dependent backup image for this backup.
* @param image The direct dependent backup image
*/
public void addDependentImage(BackupImage image) {
this.backupImage.addAncestor(image);
} | 3.68 |
hbase_RequestControllerFactory_create | /**
* Constructs a {@link org.apache.hadoop.hbase.client.RequestController}.
* @param conf The {@link Configuration} to use.
* @return A RequestController which is built according to the configuration.
*/
public static RequestController create(Configuration conf) {
Class<? extends RequestController> clazz = conf.... | 3.68 |
hmily_HmilyUndoContext_getHmilyLocks | /**
* Get hmily locks.
*
* @return hmily locks
*/
public Collection<HmilyLock> getHmilyLocks() {
return dataSnapshot.getTuples().stream()
.map(tuple -> new HmilyLock(transId, participantId, resourceId, tuple.getTableName(), Joiner.on("_").join(tuple.getPrimaryKeyValues()))).collect(Collectors.toList());... | 3.68 |
framework_LegacyWindow_addComponent | /**
* Adds a component to this UI. The component is not added directly to the
* UI, but instead to the content container ({@link #getContent()}).
*
* This method should only be called when the content is a
* {@link ComponentContainer} (default {@link VerticalLayout} or explicitly
* set).
*
* @param component
*... | 3.68 |
flink_ExpressionBuilder_aggDecimalPlus | /**
* Used only for implementing SUM/AVG aggregations (with and without retractions) on a Decimal
* type to avoid overriding decimal precision/scale calculation for sum/avg with the rules
* applied for the normal plus.
*/
@Internal
public static UnresolvedCallExpression aggDecimalPlus(Expression input1, Expression ... | 3.68 |
morf_AnalyseTable_apply | /**
* Before an upgrade step is run, if the table for analysis is not present, an illegal argument exception is thrown
* to prevent the upgrade step from starting.
*/
@Override
public Schema apply(Schema schema) {
if (!schema.tableExists(tableName.toUpperCase())) {
throw new IllegalArgumentException("Cannot a... | 3.68 |
hudi_TimelineUtils_getAffectedPartitions | /**
* Returns partitions that have been modified including internal operations such as clean in the passed timeline.
*/
public static List<String> getAffectedPartitions(HoodieTimeline timeline) {
return timeline.filterCompletedInstants().getInstantsAsStream().flatMap(s -> {
switch (s.getAction()) {
case C... | 3.68 |
flink_ExtractionUtils_collectAnnotationsOfClass | /**
* Collects all annotations of the given type defined in the current class or superclasses.
* Duplicates are ignored.
*/
static <T extends Annotation> Set<T> collectAnnotationsOfClass(
Class<T> annotation, Class<?> annotatedClass) {
final List<Class<?>> classHierarchy = new ArrayList<>();
Class<?>... | 3.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.