function_name
stringlengths
1
57
function_code
stringlengths
20
4.99k
documentation
stringlengths
50
2k
language
stringclasses
5 values
file_path
stringlengths
8
166
line_number
int32
4
16.7k
parameters
listlengths
0
20
return_type
stringlengths
0
131
has_type_hints
bool
2 classes
complexity
int32
1
51
quality_score
float32
6
9.68
repo_name
stringclasses
34 values
repo_stars
int32
2.9k
242k
docstring_style
stringclasses
7 values
is_async
bool
2 classes
entries
@Override public ImmutableSet<Entry<K, V>> entries() { ImmutableSet<Entry<K, V>> result = entries; return result == null ? (entries = new EntrySet<>(this)) : result; }
Returns an immutable collection of all key-value pairs in the multimap. Its iterator traverses the values for the first key, the values for the second key, and so on.
java
android/guava/src/com/google/common/collect/ImmutableSetMultimap.java
604
[]
true
2
6.88
google/guava
51,352
javadoc
false
equals
@Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ReplicaState that = (ReplicaState) o; return replicaId == that.replicaId && Objects.equals(replicaDirectoryId, that.replicaDirectoryId) && logEndOffset == that.logEndOffset && lastFetchTimestamp.equals(that.lastFetchTimestamp) && lastCaughtUpTimestamp.equals(that.lastCaughtUpTimestamp); }
Return the last millisecond timestamp at which this replica was known to be caught up with the leader. @return The value of the lastCaughtUpTime if known, empty otherwise
java
clients/src/main/java/org/apache/kafka/clients/admin/QuorumInfo.java
176
[ "o" ]
true
8
6.56
apache/kafka
31,560
javadoc
false
afterPropertiesSet
@Override public void afterPropertiesSet() throws Exception { if (isSingleton()) { this.initialized = true; this.singletonInstance = createInstance(); this.earlySingletonInstance = null; } }
Eagerly create the singleton instance, if necessary.
java
spring-beans/src/main/java/org/springframework/beans/factory/config/AbstractFactoryBean.java
134
[]
void
true
2
6.4
spring-projects/spring-framework
59,386
javadoc
false
addPartitionsToTransactionHandler
private TxnRequestHandler addPartitionsToTransactionHandler() { pendingPartitionsInTransaction.addAll(newPartitionsInTransaction); newPartitionsInTransaction.clear(); AddPartitionsToTxnRequest.Builder builder = AddPartitionsToTxnRequest.Builder.forClient(transactionalId, producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, new ArrayList<>(pendingPartitionsInTransaction)); return new AddPartitionsToTxnHandler(builder); }
Check if the transaction is in the prepared state. @return true if the current state is PREPARED_TRANSACTION
java
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
1,212
[]
TxnRequestHandler
true
1
7.04
apache/kafka
31,560
javadoc
false
always
public Always<T> always() { Supplier<@Nullable T> getValue = this::getValue; return new Always<>(getValue, this::test); }
Return a version of this source that can be used to always complete mappings, even if values are {@code null}. @return a new {@link Always} instance @since 4.0.0
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/PropertyMapper.java
352
[]
true
1
6.96
spring-projects/spring-boot
79,428
javadoc
false
evictEntries
@GuardedBy("this") void evictEntries(ReferenceEntry<K, V> newest) { if (!map.evictsBySize()) { return; } drainRecencyQueue(); // If the newest entry by itself is too heavy for the segment, don't bother evicting // anything else, just that if (newest.getValueReference().getWeight() > maxSegmentWeight) { if (!removeEntry(newest, newest.getHash(), RemovalCause.SIZE)) { throw new AssertionError(); } } while (totalWeight > maxSegmentWeight) { ReferenceEntry<K, V> e = getNextEvictable(); if (!removeEntry(e, e.getHash(), RemovalCause.SIZE)) { throw new AssertionError(); } } }
Performs eviction if the segment is over capacity. Avoids flushing the entire cache if the newest entry exceeds the maximum weight all on its own. @param newest the most recently added entry
java
android/guava/src/com/google/common/cache/LocalCache.java
2,558
[ "newest" ]
void
true
6
7.04
google/guava
51,352
javadoc
false
zipContent
ZipContent zipContent() { return this.zipContent; }
Return the underling {@link ZipContent}. @return the zip content
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jar/NestedJarFileResources.java
69
[]
ZipContent
true
1
6.16
spring-projects/spring-boot
79,428
javadoc
false
bindIndexed
private void bindIndexed(ConfigurationPropertySource source, ConfigurationPropertyName root, Bindable<?> target, AggregateElementBinder elementBinder, IndexedCollectionSupplier collection, ResolvableType aggregateType, ResolvableType elementType) { ConfigurationProperty property = source.getConfigurationProperty(root); if (property != null) { getContext().setConfigurationProperty(property); bindValue(target, collection.get(), aggregateType, elementType, property.getValue()); } else { bindIndexed(source, root, elementBinder, collection, elementType); } }
Bind indexed elements to the supplied collection. @param name the name of the property to bind @param target the target bindable @param elementBinder the binder to use for elements @param aggregateType the aggregate type, may be a collection or an array @param elementType the element type @param result the destination for results
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/IndexedElementsBinder.java
83
[ "source", "root", "target", "elementBinder", "collection", "aggregateType", "elementType" ]
void
true
2
6.4
spring-projects/spring-boot
79,428
javadoc
false
reschedule
@CanIgnoreReturnValue Cancellable reschedule() { // invoke the callback outside the lock, prevents some shenanigans. Schedule schedule; try { schedule = CustomScheduler.this.getNextSchedule(); } catch (Throwable t) { restoreInterruptIfIsInterruptedException(t); service.notifyFailed(t); return new FutureAsCancellable(immediateCancelledFuture()); } // We reschedule ourselves with a lock held for two reasons. 1. we want to make sure that // cancel calls cancel on the correct future. 2. we want to make sure that the assignment // to currentFuture doesn't race with itself so that currentFuture is assigned in the // correct order. Throwable scheduleFailure = null; Cancellable toReturn; lock.lock(); try { toReturn = initializeOrUpdateCancellationDelegate(schedule); } catch (Throwable e) { // Any Exception is either a RuntimeException or sneaky checked exception. // // If an exception is thrown by the subclass then we need to make sure that the service // notices and transitions to the FAILED state. We do it by calling notifyFailed directly // because the service does not monitor the state of the future so if the exception is not // caught and forwarded to the service the task would stop executing but the service would // have no idea. // TODO(lukes): consider building everything in terms of ListenableScheduledFuture then // the AbstractService could monitor the future directly. Rescheduling is still hard... // but it would help with some of these lock ordering issues. scheduleFailure = e; toReturn = new FutureAsCancellable(immediateCancelledFuture()); } finally { lock.unlock(); } // Call notifyFailed outside the lock to avoid lock ordering issues. if (scheduleFailure != null) { service.notifyFailed(scheduleFailure); } return toReturn; }
Atomically reschedules this task and assigns the new future to {@link #cancellationDelegate}.
java
android/guava/src/com/google/common/util/concurrent/AbstractScheduledService.java
593
[]
Cancellable
true
4
6.24
google/guava
51,352
javadoc
false
format
StringBuffer format(Object obj, StringBuffer toAppendTo, FieldPosition pos);
Formats a {@link Date}, {@link Calendar} or {@link Long} (milliseconds) object. @param obj the object to format. @param toAppendTo the buffer to append to. @param pos the position - ignored. @return the buffer passed in. @see java.text.DateFormat#format(Object, StringBuffer, FieldPosition)
java
src/main/java/org/apache/commons/lang3/time/DatePrinter.java
152
[ "obj", "toAppendTo", "pos" ]
StringBuffer
true
1
6.16
apache/commons-lang
2,896
javadoc
false
determinePropertySourceName
private String determinePropertySourceName(ConfigurationPropertySource source) { if (source.getUnderlyingSource() instanceof PropertySource<?> underlyingSource) { return underlyingSource.getName(); } Object underlyingSource = source.getUnderlyingSource(); Assert.state(underlyingSource != null, "'underlyingSource' must not be null"); return underlyingSource.toString(); }
Analyse the {@link ConfigurableEnvironment environment} and attempt to rename legacy properties if a replacement exists. @return a report of the migration
java
core/spring-boot-properties-migrator/src/main/java/org/springframework/boot/context/properties/migrator/PropertiesMigrationReporter.java
111
[ "source" ]
String
true
2
6.08
spring-projects/spring-boot
79,428
javadoc
false
_record_count
def _record_count(self) -> int: """ Get number of records in file. This is maybe suboptimal because we have to seek to the end of the file. Side effect: returns file position to record_start. """ self.filepath_or_buffer.seek(0, 2) total_records_length = self.filepath_or_buffer.tell() - self.record_start if total_records_length % 80 != 0: warnings.warn( "xport file may be corrupted.", stacklevel=find_stack_level(), ) if self.record_length > 80: self.filepath_or_buffer.seek(self.record_start) return total_records_length // self.record_length self.filepath_or_buffer.seek(-80, 2) last_card_bytes = self.filepath_or_buffer.read(80) last_card = np.frombuffer(last_card_bytes, dtype=np.uint64) # 8 byte blank ix = np.flatnonzero(last_card == 2314885530818453536) if len(ix) == 0: tail_pad = 0 else: tail_pad = 8 * len(ix) self.filepath_or_buffer.seek(self.record_start) return (total_records_length - tail_pad) // self.record_length
Get number of records in file. This is maybe suboptimal because we have to seek to the end of the file. Side effect: returns file position to record_start.
python
pandas/io/sas/sas_xport.py
386
[ "self" ]
int
true
5
6
pandas-dev/pandas
47,362
unknown
false
negate
default FailableIntPredicate<E> negate() { return t -> !test(t); }
Returns a predicate that negates this predicate. @return a predicate that negates this predicate.
java
src/main/java/org/apache/commons/lang3/function/FailableIntPredicate.java
79
[]
true
1
6.48
apache/commons-lang
2,896
javadoc
false
splitPreserveAllTokens
public static String[] splitPreserveAllTokens(final String str, final String separatorChars) { return splitWorker(str, separatorChars, -1, true); }
Splits the provided text into an array, separators specified, preserving all tokens, including empty tokens created by adjacent separators. This is an alternative to using StringTokenizer. <p> The separator is not included in the returned String array. Adjacent separators are treated as separators for empty tokens. For more control over the split use the StrTokenizer class. </p> <p> A {@code null} input String returns {@code null}. A {@code null} separatorChars splits on whitespace. </p> <pre> StringUtils.splitPreserveAllTokens(null, *) = null StringUtils.splitPreserveAllTokens("", *) = [] StringUtils.splitPreserveAllTokens("abc def", null) = ["abc", "def"] StringUtils.splitPreserveAllTokens("abc def", " ") = ["abc", "def"] StringUtils.splitPreserveAllTokens("abc def", " ") = ["abc", "", "def"] StringUtils.splitPreserveAllTokens("ab:cd:ef", ":") = ["ab", "cd", "ef"] StringUtils.splitPreserveAllTokens("ab:cd:ef:", ":") = ["ab", "cd", "ef", ""] StringUtils.splitPreserveAllTokens("ab:cd:ef::", ":") = ["ab", "cd", "ef", "", ""] StringUtils.splitPreserveAllTokens("ab::cd:ef", ":") = ["ab", "", "cd", "ef"] StringUtils.splitPreserveAllTokens(":cd:ef", ":") = ["", "cd", "ef"] StringUtils.splitPreserveAllTokens("::cd:ef", ":") = ["", "", "cd", "ef"] StringUtils.splitPreserveAllTokens(":cd:ef:", ":") = ["", "cd", "ef", ""] </pre> @param str the String to parse, may be {@code null}. @param separatorChars the characters used as the delimiters, {@code null} splits on whitespace. @return an array of parsed Strings, {@code null} if null String input. @since 2.1
java
src/main/java/org/apache/commons/lang3/StringUtils.java
7,509
[ "str", "separatorChars" ]
true
1
6.16
apache/commons-lang
2,896
javadoc
false
systemDefault
static SslBundle systemDefault() { try { KeyManagerFactory keyManagerFactory = KeyManagerFactory .getInstance(KeyManagerFactory.getDefaultAlgorithm()); keyManagerFactory.init(null, null); TrustManagerFactory trustManagerFactory = TrustManagerFactory .getInstance(TrustManagerFactory.getDefaultAlgorithm()); trustManagerFactory.init((KeyStore) null); SSLContext sslContext = SSLContext.getDefault(); return of(null, null, null, null, new SslManagerBundle() { @Override public KeyManagerFactory getKeyManagerFactory() { return keyManagerFactory; } @Override public TrustManagerFactory getTrustManagerFactory() { return trustManagerFactory; } @Override public SSLContext createSslContext(String protocol) { return sslContext; } }); } catch (NoSuchAlgorithmException | KeyStoreException | UnrecoverableKeyException ex) { throw new IllegalStateException("Could not initialize system default SslBundle: " + ex.getMessage(), ex); } }
Factory method to create a new {@link SslBundle} which uses the system defaults. @return a new {@link SslBundle} instance @since 3.5.0
java
core/spring-boot/src/main/java/org/springframework/boot/ssl/SslBundle.java
193
[]
SslBundle
true
2
8.08
spring-projects/spring-boot
79,428
javadoc
false
analyzeForMissingParameters
static @Nullable FailureAnalysis analyzeForMissingParameters(Throwable failure) { return analyzeForMissingParameters(failure, failure, new HashSet<>()); }
Analyze the given failure for missing parameter name exceptions. @param failure the failure to analyze @return a failure analysis or {@code null}
java
core/spring-boot/src/main/java/org/springframework/boot/diagnostics/analyzer/MissingParameterNamesFailureAnalyzer.java
60
[ "failure" ]
FailureAnalysis
true
1
6.64
spring-projects/spring-boot
79,428
javadoc
false
matches
boolean matches(Class<?> clazz);
Should the pointcut apply to the given interface or target class? @param clazz the candidate target class @return whether the advice should apply to the given target class
java
spring-aop/src/main/java/org/springframework/aop/ClassFilter.java
48
[ "clazz" ]
true
1
6.8
spring-projects/spring-framework
59,386
javadoc
false
checkJarHell
public static void checkJarHell(Consumer<String> output) throws IOException { ClassLoader loader = JarHell.class.getClassLoader(); output.accept("java.class.path: " + System.getProperty("java.class.path")); output.accept("sun.boot.class.path: " + System.getProperty("sun.boot.class.path")); if (loader instanceof URLClassLoader urlClassLoader) { output.accept("classloader urls: " + Arrays.toString(urlClassLoader.getURLs())); } checkJarHell(parseClassPath(), output); }
Checks the current classpath for duplicate classes @param output A {@link String} {@link Consumer} to which debug output will be sent @throws IllegalStateException if jar hell was found
java
libs/core/src/main/java/org/elasticsearch/jdk/JarHell.java
78
[ "output" ]
void
true
2
6.08
elastic/elasticsearch
75,680
javadoc
false
findAllAnnotationsOnBean
@Override public <A extends Annotation> Set<A> findAllAnnotationsOnBean( String beanName, Class<A> annotationType, boolean allowFactoryBeanInit) throws NoSuchBeanDefinitionException { Set<A> annotations = new LinkedHashSet<>(); Class<?> beanType = getType(beanName, allowFactoryBeanInit); if (beanType != null) { MergedAnnotations.from(beanType, MergedAnnotations.SearchStrategy.TYPE_HIERARCHY) .stream(annotationType) .filter(MergedAnnotation::isPresent) .forEach(mergedAnnotation -> annotations.add(mergedAnnotation.synthesize())); } if (containsBeanDefinition(beanName)) { RootBeanDefinition bd = getMergedLocalBeanDefinition(beanName); // Check raw bean class, for example, in case of a proxy. if (bd.hasBeanClass() && bd.getFactoryMethodName() == null) { Class<?> beanClass = bd.getBeanClass(); if (beanClass != beanType) { MergedAnnotations.from(beanClass, MergedAnnotations.SearchStrategy.TYPE_HIERARCHY) .stream(annotationType) .filter(MergedAnnotation::isPresent) .forEach(mergedAnnotation -> annotations.add(mergedAnnotation.synthesize())); } } // Check annotations declared on factory method, if any. Method factoryMethod = bd.getResolvedFactoryMethod(); if (factoryMethod != null) { MergedAnnotations.from(factoryMethod, MergedAnnotations.SearchStrategy.TYPE_HIERARCHY) .stream(annotationType) .filter(MergedAnnotation::isPresent) .forEach(mergedAnnotation -> annotations.add(mergedAnnotation.synthesize())); } } return annotations; }
Check whether the specified bean would need to be eagerly initialized in order to determine its type. @param factoryBeanName a factory-bean reference that the bean definition defines a factory method for @return whether eager initialization is necessary
java
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
845
[ "beanName", "annotationType", "allowFactoryBeanInit" ]
true
7
7.76
spring-projects/spring-framework
59,386
javadoc
false
_encode
def _encode(values, *, uniques, check_unknown=True): """Helper function to encode values into [0, n_uniques - 1]. Uses pure python method for object dtype, and numpy method for all other dtypes. The numpy method has the limitation that the `uniques` need to be sorted. Importantly, this is not checked but assumed to already be the case. The calling method needs to ensure this for all non-object values. Parameters ---------- values : ndarray Values to encode. uniques : ndarray The unique values in `values`. If the dtype is not object, then `uniques` needs to be sorted. check_unknown : bool, default=True If True, check for values in `values` that are not in `unique` and raise an error. This is ignored for object dtype, and treated as True in this case. This parameter is useful for _BaseEncoder._transform() to avoid calling _check_unknown() twice. Returns ------- encoded : ndarray Encoded values """ xp, _ = get_namespace(values, uniques) if not xp.isdtype(values.dtype, "numeric"): try: return _map_to_integer(values, uniques) except KeyError as e: raise ValueError(f"y contains previously unseen labels: {e}") else: if check_unknown: diff = _check_unknown(values, uniques) if diff: raise ValueError(f"y contains previously unseen labels: {diff}") return xp.searchsorted(uniques, values)
Helper function to encode values into [0, n_uniques - 1]. Uses pure python method for object dtype, and numpy method for all other dtypes. The numpy method has the limitation that the `uniques` need to be sorted. Importantly, this is not checked but assumed to already be the case. The calling method needs to ensure this for all non-object values. Parameters ---------- values : ndarray Values to encode. uniques : ndarray The unique values in `values`. If the dtype is not object, then `uniques` needs to be sorted. check_unknown : bool, default=True If True, check for values in `values` that are not in `unique` and raise an error. This is ignored for object dtype, and treated as True in this case. This parameter is useful for _BaseEncoder._transform() to avoid calling _check_unknown() twice. Returns ------- encoded : ndarray Encoded values
python
sklearn/utils/_encode.py
197
[ "values", "uniques", "check_unknown" ]
false
5
6.08
scikit-learn/scikit-learn
64,340
numpy
false
supportedResourceTypes
public Set<Byte> supportedResourceTypes() { return version() == 0 ? Set.of(ConfigResource.Type.CLIENT_METRICS.id()) : Set.of( ConfigResource.Type.TOPIC.id(), ConfigResource.Type.BROKER.id(), ConfigResource.Type.BROKER_LOGGER.id(), ConfigResource.Type.CLIENT_METRICS.id(), ConfigResource.Type.GROUP.id() ); }
Return the supported config resource types in different request version. If there is a new config resource type, the ListConfigResourcesRequest should bump a new request version to include it. For v0, the supported config resource types contain CLIENT_METRICS (16). For v1, the supported config resource types contain TOPIC (2), BROKER (4), BROKER_LOGGER (8), CLIENT_METRICS (16), and GROUP (32).
java
clients/src/main/java/org/apache/kafka/common/requests/ListConfigResourcesRequest.java
95
[]
true
2
6.72
apache/kafka
31,560
javadoc
false
toString
@Override public String toString() { if (count() > 0) { return MoreObjects.toStringHelper(this) .add("xStats", xStats) .add("yStats", yStats) .add("populationCovariance", populationCovariance()) .toString(); } else { return MoreObjects.toStringHelper(this) .add("xStats", xStats) .add("yStats", yStats) .toString(); } }
{@inheritDoc} <p><b>Note:</b> This hash code is consistent with exact equality of the calculated statistics, including the floating point values. See the note on {@link #equals} for details.
java
android/guava/src/com/google/common/math/PairedStats.java
240
[]
String
true
2
6.24
google/guava
51,352
javadoc
false
NativeStyleEditor
function NativeStyleEditor() { const {inspectedElementID} = useContext(TreeStateContext); const inspectedElementStyleAndLayout = useContext(NativeStyleContext); if (inspectedElementID === null) { return null; } if (inspectedElementStyleAndLayout === null) { return null; } const {layout, style} = inspectedElementStyleAndLayout; if (layout === null && style === null) { return null; } return ( <div className={styles.Stack}> {layout !== null && ( <LayoutViewer id={inspectedElementID} layout={layout} /> )} {style !== null && <StyleEditor id={inspectedElementID} style={style} />} </div> ); }
Copyright (c) Meta Platforms, Inc. and affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. @flow
javascript
packages/react-devtools-shared/src/devtools/views/Components/NativeStyleEditor/index.js
45
[]
false
7
6.24
facebook/react
241,750
jsdoc
false
ActualOpenInEditorButton
function ActualOpenInEditorButton({ editorURL, source, className, }: Props): React.Node { let disable; if (source == null) { disable = true; } else { const staleLocation: ReactFunctionLocation = [ '', source.url, // This is not live but we just use any line/column to validate whether this can be opened. // We'll call checkConditions again when we click it to get the latest line number. source.selectionRef.line, source.selectionRef.column, ]; disable = checkConditions(editorURL, staleLocation).shouldDisableButton; } return ( <Button disabled={disable} className={className} onClick={() => { if (source == null) { return; } const latestLocation: ReactFunctionLocation = [ '', source.url, // These might have changed since we last read it. source.selectionRef.line, source.selectionRef.column, ]; const {url, shouldDisableButton} = checkConditions( editorURL, latestLocation, ); if (!shouldDisableButton) { window.open(url); } }}> <ButtonIcon type="editor" /> <ButtonLabel>Open in editor</ButtonLabel> </Button> ); }
Copyright (c) Meta Platforms, Inc. and affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. @flow
javascript
packages/react-devtools-shared/src/devtools/views/Editor/OpenInEditorButton.js
27
[]
false
5
6.4
facebook/react
241,750
jsdoc
false
close
@Override public void close() throws IOException { synchronized (this.lock) { if (this.thread != null) { this.thread.close(); this.thread.interrupt(); try { this.thread.join(); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } this.thread = null; } } }
Retrieves all {@link Path Paths} that should be registered for the specified {@link Path}. If the path is a symlink, changes to the symlink should be monitored, not just the file it points to. For example, for the given {@code keystore.jks} path in the following directory structure:<pre> +- stores | +─ keystore.jks +- <em>data</em> -&gt; stores +─ <em>keystore.jks</em> -&gt; data/keystore.jks </pre> the resulting paths would include: <p> <ul> <li>{@code keystore.jks}</li> <li>{@code data/keystore.jks}</li> <li>{@code data}</li> <li>{@code stores/keystore.jks}</li> </ul> @param paths the source paths @return all possible {@link Path} instances to be registered @throws IOException if an I/O error occurs
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/ssl/FileWatcher.java
144
[]
void
true
3
7.76
spring-projects/spring-boot
79,428
javadoc
false
getExcludeAutoConfigurationsProperty
protected List<String> getExcludeAutoConfigurationsProperty() { Environment environment = getEnvironment(); if (environment == null) { return Collections.emptyList(); } if (environment instanceof ConfigurableEnvironment) { Binder binder = Binder.get(environment); return binder.bind(PROPERTY_NAME_AUTOCONFIGURE_EXCLUDE, String[].class) .map(Arrays::asList) .orElse(Collections.emptyList()); } String[] excludes = environment.getProperty(PROPERTY_NAME_AUTOCONFIGURE_EXCLUDE, String[].class); return (excludes != null) ? Arrays.asList(excludes) : Collections.emptyList(); }
Returns the auto-configurations excluded by the {@code spring.autoconfigure.exclude} property. @return excluded auto-configurations @since 2.3.2
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationImportSelector.java
263
[]
true
4
7.6
spring-projects/spring-boot
79,428
javadoc
false
getEcCurveNameFromOid
private static String getEcCurveNameFromOid(String oidString) throws GeneralSecurityException { return switch (oidString) { // see https://tools.ietf.org/html/rfc5480#section-2.1.1.1 case "1.2.840.10045.3.1" -> "secp192r1"; case "1.3.132.0.1" -> "sect163k1"; case "1.3.132.0.15" -> "sect163r2"; case "1.3.132.0.33" -> "secp224r1"; case "1.3.132.0.26" -> "sect233k1"; case "1.3.132.0.27" -> "sect233r1"; case "1.2.840.10045.3.1.7" -> "secp256r1"; case "1.3.132.0.16" -> "sect283k1"; case "1.3.132.0.17" -> "sect283r1"; case "1.3.132.0.34" -> "secp384r1"; case "1.3.132.0.36" -> "sect409k1"; case "1.3.132.0.37" -> "sect409r1"; case "1.3.132.0.35" -> "secp521r1"; case "1.3.132.0.38" -> "sect571k1"; case "1.3.132.0.39" -> "sect571r1"; default -> throw new GeneralSecurityException( "Error parsing EC named curve identifier. Named curve with OID: " + oidString + " is not supported" ); }; }
Parses a DER encoded private key and reads its algorithm identifier Object OID. @param keyBytes the private key raw bytes @return A string identifier for the key algorithm (RSA, DSA, or EC) @throws GeneralSecurityException if the algorithm oid that is parsed from ASN.1 is unknown @throws IOException if the DER encoded key can't be parsed
java
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java
728
[ "oidString" ]
String
true
1
6.72
elastic/elasticsearch
75,680
javadoc
false
refreshAndGetPartitionsToValidate
Map<TopicPartition, SubscriptionState.FetchPosition> refreshAndGetPartitionsToValidate() { return positionsValidator.refreshAndGetPartitionsToValidate(apiVersions); }
Callback for the response of the list offset call. @param listOffsetsResponse The response from the server. @return {@link OffsetFetcherUtils.ListOffsetResult} extracted from the response, containing the fetched offsets and partitions to retry.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherUtils.java
175
[]
true
1
6.32
apache/kafka
31,560
javadoc
false
of
static <T> ValueProcessor<T> of(Class<? extends T> type, UnaryOperator<@Nullable T> action) { return of(action).whenInstanceOf(type); }
Factory method to crate a new {@link ValueProcessor} that applies the given action. @param <T> the value type @param type the value type @param action the action to apply @return a new {@link ValueProcessor} instance
java
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
1,046
[ "type", "action" ]
true
1
6.48
spring-projects/spring-boot
79,428
javadoc
false
dag_next_execution
def dag_next_execution(args) -> None: """ Return the next logical datetime of a DAG at the command line. >>> airflow dags next-execution tutorial 2018-08-31 10:38:00 """ from airflow.models.serialized_dag import SerializedDagModel with create_session() as session: dag = SerializedDagModel.get_dag(args.dag_id, session=session) last_parsed_dag: DagModel | None = session.scalars( select(DagModel).where(DagModel.dag_id == args.dag_id) ).one_or_none() if not dag or not last_parsed_dag: raise SystemExit(f"DAG: {args.dag_id} does not exist in the database") if last_parsed_dag.is_paused: print("[INFO] Please be reminded this DAG is PAUSED now.", file=sys.stderr) def print_execution_interval(interval: DataInterval | None): if interval is None: print( "[WARN] No following schedule can be found. " "This DAG may have schedule interval '@once' or `None`.", file=sys.stderr, ) print(None) return print(interval.start.isoformat()) next_interval = get_next_data_interval(dag.timetable, last_parsed_dag) print_execution_interval(next_interval) for _ in range(1, args.num_executions): next_info = dag.next_dagrun_info(next_interval, restricted=False) next_interval = None if next_info is None else next_info.data_interval print_execution_interval(next_interval)
Return the next logical datetime of a DAG at the command line. >>> airflow dags next-execution tutorial 2018-08-31 10:38:00
python
airflow-core/src/airflow/cli/commands/dag_command.py
308
[ "args" ]
None
true
7
7.28
apache/airflow
43,597
unknown
false
write
public static long write(DataOutputStream out, byte magic, long timestamp, byte[] key, byte[] value, CompressionType compressionType, TimestampType timestampType) throws IOException { return write(out, magic, timestamp, wrapNullable(key), wrapNullable(value), compressionType, timestampType); }
Write the record data with the given compression type and return the computed crc. @param out The output stream to write to @param magic The magic value to be used @param timestamp The timestamp of the record @param key The record key @param value The record value @param compressionType The compression type @param timestampType The timestamp type @return the computed CRC for this record. @throws IOException for any IO errors writing to the output stream.
java
clients/src/main/java/org/apache/kafka/common/record/LegacyRecord.java
400
[ "out", "magic", "timestamp", "key", "value", "compressionType", "timestampType" ]
true
1
6.72
apache/kafka
31,560
javadoc
false
hermder
def hermder(c, m=1, scl=1, axis=0): """ Differentiate a Hermite series. Returns the Hermite series coefficients `c` differentiated `m` times along `axis`. At each iteration the result is multiplied by `scl` (the scaling factor is for use in a linear change of variable). The argument `c` is an array of coefficients from low to high degree along each axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c : array_like Array of Hermite series coefficients. If `c` is multidimensional the different axis correspond to different variables with the degree in each axis given by the corresponding index. m : int, optional Number of derivatives taken, must be non-negative. (Default: 1) scl : scalar, optional Each differentiation is multiplied by `scl`. The end result is multiplication by ``scl**m``. This is for use in a linear change of variable. (Default: 1) axis : int, optional Axis over which the derivative is taken. (Default: 0). Returns ------- der : ndarray Hermite series of the derivative. See Also -------- hermint Notes ----- In general, the result of differentiating a Hermite series does not resemble the same operation on a power series. Thus the result of this function may be "unintuitive," albeit correct; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite import hermder >>> hermder([ 1. , 0.5, 0.5, 0.5]) array([1., 2., 3.]) >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) array([1., 2., 3.]) """ c = np.array(c, ndmin=1, copy=True) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) cnt = pu._as_int(m, "the order of derivation") iaxis = pu._as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") iaxis = np.lib.array_utils.normalize_axis_index(iaxis, c.ndim) if cnt == 0: return c c = np.moveaxis(c, iaxis, 0) n = len(c) if cnt >= n: c = c[:1] * 0 else: for i in range(cnt): n = n - 1 c *= scl der = np.empty((n,) + c.shape[1:], dtype=c.dtype) for j in range(n, 0, -1): der[j - 1] = (2 * j) * c[j] c = der c = np.moveaxis(c, 0, iaxis) return c
Differentiate a Hermite series. Returns the Hermite series coefficients `c` differentiated `m` times along `axis`. At each iteration the result is multiplied by `scl` (the scaling factor is for use in a linear change of variable). The argument `c` is an array of coefficients from low to high degree along each axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c : array_like Array of Hermite series coefficients. If `c` is multidimensional the different axis correspond to different variables with the degree in each axis given by the corresponding index. m : int, optional Number of derivatives taken, must be non-negative. (Default: 1) scl : scalar, optional Each differentiation is multiplied by `scl`. The end result is multiplication by ``scl**m``. This is for use in a linear change of variable. (Default: 1) axis : int, optional Axis over which the derivative is taken. (Default: 0). Returns ------- der : ndarray Hermite series of the derivative. See Also -------- hermint Notes ----- In general, the result of differentiating a Hermite series does not resemble the same operation on a power series. Thus the result of this function may be "unintuitive," albeit correct; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite import hermder >>> hermder([ 1. , 0.5, 0.5, 0.5]) array([1., 2., 3.]) >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) array([1., 2., 3.])
python
numpy/polynomial/hermite.py
596
[ "c", "m", "scl", "axis" ]
false
8
7.44
numpy/numpy
31,054
numpy
false
memory_efficient_fusion
def memory_efficient_fusion( fn: Union[Callable, nn.Module], **kwargs, ): """ Wrapper function over :func:`aot_function` and :func:`aot_module` to perform memory efficient fusion. It uses the :func:`min_cut_rematerialization_partition` partitioner to perform efficient recomputation. It uses NVFuser to compile the generated forward and backward graphs. .. warning:: This API is experimental and likely to change. Args: fn (Union[Callable, nn.Module]): A Python function or a ``nn.Module`` that takes one or more arguments. Must return one or more Tensors. **kwargs: Any other overrides you want to make to the settings Returns: Returns a ``Callable`` or ``nn.Module`` that retains the eager behavior of the original :attr:`fn`, but whose forward and backward graphs have gone through recomputation optimizations, and the graphs have been compiled with nvfuser. """ config = { "fw_compiler": ts_compile, "bw_compiler": ts_compile, "partition_fn": min_cut_rematerialization_partition, "decompositions": default_decompositions, } config.update(kwargs) if isinstance(fn, torch.nn.Module): return aot_module(fn, **config) else: return aot_function(fn, **config)
Wrapper function over :func:`aot_function` and :func:`aot_module` to perform memory efficient fusion. It uses the :func:`min_cut_rematerialization_partition` partitioner to perform efficient recomputation. It uses NVFuser to compile the generated forward and backward graphs. .. warning:: This API is experimental and likely to change. Args: fn (Union[Callable, nn.Module]): A Python function or a ``nn.Module`` that takes one or more arguments. Must return one or more Tensors. **kwargs: Any other overrides you want to make to the settings Returns: Returns a ``Callable`` or ``nn.Module`` that retains the eager behavior of the original :attr:`fn`, but whose forward and backward graphs have gone through recomputation optimizations, and the graphs have been compiled with nvfuser.
python
torch/_functorch/compilers.py
237
[ "fn" ]
true
3
7.44
pytorch/pytorch
96,034
google
false
__iter__
def __iter__(self) -> Iterator: """ Return an iterator over the boxed values Yields ------ tstamp : Timestamp """ if self.ndim > 1: for i in range(len(self)): yield self[i] else: # convert in chunks of 10k for efficiency data = self.asi8 length = len(self) chunksize = _ITER_CHUNKSIZE chunks = (length // chunksize) + 1 for i in range(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, length) converted = ints_to_pydatetime( data[start_i:end_i], tz=self.tz, box="timestamp", reso=self._creso, ) yield from converted
Return an iterator over the boxed values Yields ------ tstamp : Timestamp
python
pandas/core/arrays/datetimes.py
670
[ "self" ]
Iterator
true
5
6.56
pandas-dev/pandas
47,362
unknown
false
toCalendar
public static Calendar toCalendar(final Date date, final TimeZone tz) { final Calendar c = Calendar.getInstance(tz); c.setTime(Objects.requireNonNull(date, "date")); return c; }
Converts a {@link Date} of a given {@link TimeZone} into a {@link Calendar}. @param date the date to convert to a Calendar. @param tz the time zone of the {@code date}. @return the created Calendar. @throws NullPointerException if {@code date} or {@code tz} is null.
java
src/main/java/org/apache/commons/lang3/time/DateUtils.java
1,626
[ "date", "tz" ]
Calendar
true
1
6.88
apache/commons-lang
2,896
javadoc
false
check_job_success
def check_job_success(self, job_id: str) -> bool: """ Check the final status of the Batch job. Return True if the job 'SUCCEEDED', else raise an AirflowException. :param job_id: a Batch job ID :raises: AirflowException """ job = self.get_job_description(job_id) job_status = job.get("status") if job_status == self.SUCCESS_STATE: self.log.info("AWS Batch job (%s) succeeded: %s", job_id, job) return True if job_status == self.FAILURE_STATE: raise AirflowException(f"AWS Batch job ({job_id}) failed: {job}") if job_status in self.INTERMEDIATE_STATES: raise AirflowException(f"AWS Batch job ({job_id}) is not complete: {job}") raise AirflowException(f"AWS Batch job ({job_id}) has unknown status: {job}")
Check the final status of the Batch job. Return True if the job 'SUCCEEDED', else raise an AirflowException. :param job_id: a Batch job ID :raises: AirflowException
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/batch_client.py
256
[ "self", "job_id" ]
bool
true
4
6.72
apache/airflow
43,597
sphinx
false
_validate_binary_probabilistic_prediction
def _validate_binary_probabilistic_prediction(y_true, y_prob, sample_weight, pos_label): r"""Convert y_true and y_prob in binary classification to shape (n_samples, 2) Parameters ---------- y_true : array-like of shape (n_samples,) True labels. y_prob : array-like of shape (n_samples,) Probabilities of the positive class. sample_weight : array-like of shape (n_samples,), default=None Sample weights. pos_label : int, float, bool or str, default=None Label of the positive class. If None, `pos_label` will be inferred in the following manner: * if `y_true` in {-1, 1} or {0, 1}, `pos_label` defaults to 1; * else if `y_true` contains string, an error will be raised and `pos_label` should be explicitly specified; * otherwise, `pos_label` defaults to the greater label, i.e. `np.unique(y_true)[-1]`. Returns ------- transformed_labels : array of shape (n_samples, 2) y_prob : array of shape (n_samples, 2) """ # sanity checks on y_true and y_prob y_true = column_or_1d(y_true) y_prob = column_or_1d(y_prob) assert_all_finite(y_true) assert_all_finite(y_prob) check_consistent_length(y_prob, y_true, sample_weight) if sample_weight is not None: _check_sample_weight(sample_weight, y_prob, force_float_dtype=False) y_type = type_of_target(y_true, input_name="y_true") if y_type != "binary": raise ValueError( f"The type of the target inferred from y_true is {y_type} but should be " "binary according to the shape of y_prob." ) xp, _, device_ = get_namespace_and_device(y_prob) if xp.max(y_prob) > 1: raise ValueError(f"y_prob contains values greater than 1: {xp.max(y_prob)}") if xp.min(y_prob) < 0: raise ValueError(f"y_prob contains values less than 0: {xp.min(y_prob)}") # check that pos_label is consistent with y_true try: pos_label = _check_pos_label_consistency(pos_label, y_true) except ValueError: classes = np.unique(y_true) if classes.dtype.kind not in ("O", "U", "S"): # for backward compatibility, if classes are not string then # `pos_label` will correspond to the greater label pos_label = classes[-1] else: raise # convert (n_samples,) to (n_samples, 2) shape transformed_labels = _one_hot_encoding_binary_target( y_true=y_true, pos_label=pos_label, target_xp=xp, target_device=device_ ) y_prob = xp.stack((1 - y_prob, y_prob), axis=1) return transformed_labels, y_prob
r"""Convert y_true and y_prob in binary classification to shape (n_samples, 2) Parameters ---------- y_true : array-like of shape (n_samples,) True labels. y_prob : array-like of shape (n_samples,) Probabilities of the positive class. sample_weight : array-like of shape (n_samples,), default=None Sample weights. pos_label : int, float, bool or str, default=None Label of the positive class. If None, `pos_label` will be inferred in the following manner: * if `y_true` in {-1, 1} or {0, 1}, `pos_label` defaults to 1; * else if `y_true` contains string, an error will be raised and `pos_label` should be explicitly specified; * otherwise, `pos_label` defaults to the greater label, i.e. `np.unique(y_true)[-1]`. Returns ------- transformed_labels : array of shape (n_samples, 2) y_prob : array of shape (n_samples, 2)
python
sklearn/metrics/_classification.py
3,559
[ "y_true", "y_prob", "sample_weight", "pos_label" ]
false
7
6
scikit-learn/scikit-learn
64,340
numpy
false
getInputStream
@Override public InputStream getInputStream(ZipEntry entry) throws IOException { Objects.requireNonNull(entry, "entry"); if (entry instanceof NestedJarEntry nestedJarEntry && nestedJarEntry.isOwnedBy(this)) { return getInputStream(nestedJarEntry.contentEntry()); } return getInputStream(getNestedJarEntry(entry.getName()).contentEntry()); }
Return if an entry with the given name exists. @param name the name to check @return if the entry exists
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jar/NestedJarFile.java
343
[ "entry" ]
InputStream
true
3
8.08
spring-projects/spring-boot
79,428
javadoc
false
rackId
public Optional<String> rackId() { return rackId; }
@return Instance ID used by the member when joining the group. If non-empty, it will indicate that this is a static member.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManager.java
207
[]
true
1
6.96
apache/kafka
31,560
javadoc
false
sample
def sample( obj_len: int, size: int, replace: bool, weights: np.ndarray | None, random_state: np.random.RandomState | np.random.Generator, ) -> np.ndarray: """ Randomly sample `size` indices in `np.arange(obj_len)`. Parameters ---------- obj_len : int The length of the indices being considered size : int The number of values to choose replace : bool Allow or disallow sampling of the same row more than once. weights : np.ndarray[np.float64] or None If None, equal probability weighting, otherwise weights according to the vector normalized random_state: np.random.RandomState or np.random.Generator State used for the random sampling Returns ------- np.ndarray[np.intp] """ if weights is not None: weight_sum = weights.sum() if weight_sum != 0: weights = weights / weight_sum else: raise ValueError("Invalid weights: weights sum to zero") assert weights is not None # for mypy if not replace and size * weights.max() > 1: raise ValueError( "Weighted sampling cannot be achieved with replace=False. Either " "set replace=True or use smaller weights. See the docstring of " "sample for details." ) return random_state.choice(obj_len, size=size, replace=replace, p=weights).astype( np.intp, copy=False )
Randomly sample `size` indices in `np.arange(obj_len)`. Parameters ---------- obj_len : int The length of the indices being considered size : int The number of values to choose replace : bool Allow or disallow sampling of the same row more than once. weights : np.ndarray[np.float64] or None If None, equal probability weighting, otherwise weights according to the vector normalized random_state: np.random.RandomState or np.random.Generator State used for the random sampling Returns ------- np.ndarray[np.intp]
python
pandas/core/sample.py
118
[ "obj_len", "size", "replace", "weights", "random_state" ]
np.ndarray
true
6
6.24
pandas-dev/pandas
47,362
numpy
false
getMatchData
function getMatchData(object) { var result = keys(object), length = result.length; while (length--) { var key = result[length], value = object[key]; result[length] = [key, value, isStrictComparable(value)]; } return result; }
Gets the property names, values, and compare flags of `object`. @private @param {Object} object The object to query. @returns {Array} Returns the match data of `object`.
javascript
lodash.js
6,054
[ "object" ]
false
2
6.24
lodash/lodash
61,490
jsdoc
false
memberEquals
private static boolean memberEquals(final Class<?> type, final Object o1, final Object o2) { if (o1 == o2) { return true; } if (o1 == null || o2 == null) { return false; } if (type.isArray()) { return arrayMemberEquals(type.getComponentType(), o1, o2); } if (type.isAnnotation()) { return equals((Annotation) o1, (Annotation) o2); } return o1.equals(o2); }
Helper method for checking whether two objects of the given type are equal. This method is used to compare the parameters of two annotation instances. @param type the type of the objects to be compared @param o1 the first object @param o2 the second object @return a flag whether these objects are equal
java
src/main/java/org/apache/commons/lang3/AnnotationUtils.java
307
[ "type", "o1", "o2" ]
true
6
8
apache/commons-lang
2,896
javadoc
false
protocolType
protected abstract String protocolType();
Unique identifier for the class of supported protocols (e.g. "consumer" or "connect"). @return Non-null protocol type name
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
204
[]
String
true
1
6.32
apache/kafka
31,560
javadoc
false
write
def write( self, log: str, remote_log_location: str, append: bool = True, max_retry: int = 1, ) -> bool: """ Write the log to the remote_log_location; return `True` or fails silently and return `False`. :param log: the contents to write to the remote_log_location :param remote_log_location: the log's location in remote storage :param append: if False, any existing log file is overwritten. If True, the new log is appended to any existing logs. :param max_retry: Maximum number of times to retry on upload failure :return: whether the log is successfully written to remote location or not. """ try: if append and self.s3_log_exists(remote_log_location): old_log = self.s3_read(remote_log_location) log = f"{old_log}\n{log}" if old_log else log except Exception: self.log.exception("Could not verify previous log to append") return False # Default to a single retry attempt because s3 upload failures are # rare but occasionally occur. Multiple retry attempts are unlikely # to help as they usually indicate non-ephemeral errors. for try_num in range(1 + max_retry): try: self.hook.load_string( log, key=remote_log_location, replace=True, encrypt=conf.getboolean("logging", "ENCRYPT_S3_LOGS"), ) break except Exception: if try_num < max_retry: self.log.warning( "Failed attempt to write logs to %s, will retry", remote_log_location, ) else: self.log.exception("Could not write logs to %s", remote_log_location) return False return True
Write the log to the remote_log_location; return `True` or fails silently and return `False`. :param log: the contents to write to the remote_log_location :param remote_log_location: the log's location in remote storage :param append: if False, any existing log file is overwritten. If True, the new log is appended to any existing logs. :param max_retry: Maximum number of times to retry on upload failure :return: whether the log is successfully written to remote location or not.
python
providers/amazon/src/airflow/providers/amazon/aws/log/s3_task_handler.py
102
[ "self", "log", "remote_log_location", "append", "max_retry" ]
bool
true
7
8.08
apache/airflow
43,597
sphinx
false
getInfo
@SuppressWarnings("unchecked") public <I> I getInfo(Class<I> type, Function<ZipContent, I> function) { Map<Class<?>, Object> info = (this.info != null) ? this.info.get() : null; if (info == null) { info = new ConcurrentHashMap<>(); this.info = new SoftReference<>(info); } return (I) info.computeIfAbsent(type, (key) -> { debug.log("Getting %s info from zip '%s'", type.getName(), this); return function.apply(this); }); }
Get or compute information based on the {@link ZipContent}. @param <I> the info type to get or compute @param type the info type to get or compute @param function the function used to compute the information @return the computed or existing information
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java
317
[ "type", "function" ]
I
true
3
7.76
spring-projects/spring-boot
79,428
javadoc
false
initializeGpuInfo
private static long initializeGpuInfo() { try { var gpuInfoProvider = CuVSProvider.provider().gpuInfoProvider(); var availableGPUs = gpuInfoProvider.availableGPUs(); if (availableGPUs.isEmpty()) { LOG.warn("No GPU found"); return -1L; } for (var gpu : availableGPUs) { int major = gpu.computeCapabilityMajor(); int minor = gpu.computeCapabilityMinor(); boolean hasRequiredCapability = major >= GPUInfoProvider.MIN_COMPUTE_CAPABILITY_MAJOR && (major > GPUInfoProvider.MIN_COMPUTE_CAPABILITY_MAJOR || minor >= GPUInfoProvider.MIN_COMPUTE_CAPABILITY_MINOR); boolean hasRequiredMemory = gpu.totalDeviceMemoryInBytes() >= MIN_DEVICE_MEMORY_IN_BYTES; if (hasRequiredCapability == false) { LOG.warn( "GPU [{}] does not have the minimum compute capabilities (required: [{}.{}], found: [{}.{}])", gpu.name(), GPUInfoProvider.MIN_COMPUTE_CAPABILITY_MAJOR, GPUInfoProvider.MIN_COMPUTE_CAPABILITY_MINOR, gpu.computeCapabilityMajor(), gpu.computeCapabilityMinor() ); } else if (hasRequiredMemory == false) { LOG.warn( "GPU [{}] does not have minimum memory required (required: [{}], found: [{}])", gpu.name(), MIN_DEVICE_MEMORY_IN_BYTES, gpu.totalDeviceMemoryInBytes() ); } else { LOG.info("Found compatible GPU [{}] (id: [{}])", gpu.name(), gpu.gpuId()); return gpu.totalDeviceMemoryInBytes(); } } return -1L; } catch (UnsupportedOperationException uoe) { final String msg; if (uoe.getMessage() == null) { msg = Strings.format( "runtime Java version [%d], OS [%s], arch [%s]", Runtime.version().feature(), System.getProperty("os.name"), System.getProperty("os.arch") ); } else { msg = uoe.getMessage(); } LOG.warn("GPU based vector indexing is not supported on this platform; " + msg); return -1L; } catch (Throwable t) { if (t instanceof ExceptionInInitializerError ex) { t = ex.getCause(); } LOG.warn("Exception occurred during creation of cuvs resources", t); return -1L; } }
Initializes GPU support information by finding the first compatible GPU. Returns the total GPU memory in bytes, or -1 if GPU is not found or supported.
java
libs/gpu-codec/src/main/java/org/elasticsearch/gpu/GPUSupport.java
41
[]
true
10
6.8
elastic/elasticsearch
75,680
javadoc
false
removePattern
@Deprecated public static String removePattern(final String source, final String regex) { return RegExUtils.removePattern(source, regex); }
Removes each substring of the source String that matches the given regular expression using the DOTALL option. This call is a {@code null} safe equivalent to: <ul> <li>{@code source.replaceAll(&quot;(?s)&quot; + regex, StringUtils.EMPTY)}</li> <li>{@code Pattern.compile(regex, Pattern.DOTALL).matcher(source).replaceAll(StringUtils.EMPTY)}</li> </ul> <p> A {@code null} reference passed to this method is a no-op. </p> <pre>{@code StringUtils.removePattern(null, *) = null StringUtils.removePattern("any", (String) null) = "any" StringUtils.removePattern("A<__>\n<__>B", "<.*>") = "AB" StringUtils.removePattern("ABCabc123", "[a-z]") = "ABC123" }</pre> @param source the source string. @param regex the regular expression to which this string is to be matched. @return The resulting {@link String}. @see #replacePattern(String, String, String) @see String#replaceAll(String, String) @see Pattern#DOTALL @since 3.2 @since 3.5 Changed {@code null} reference passed to this method is a no-op. @deprecated Use {@link RegExUtils#removePattern(CharSequence, String)}.
java
src/main/java/org/apache/commons/lang3/StringUtils.java
5,928
[ "source", "regex" ]
String
true
1
6.32
apache/commons-lang
2,896
javadoc
false
_boost
def _boost(self, iboost, X, y, sample_weight, random_state): """Implement a single boost. Perform a single boost according to the discrete SAMME algorithm and return the updated sample weights. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) The target values (class labels). sample_weight : array-like of shape (n_samples,) The current sample weights. random_state : RandomState instance The RandomState instance used if the base estimator accepts a `random_state` attribute. Returns ------- sample_weight : array-like of shape (n_samples,) or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. estimator_error : float The classification error for the current boost. If None then boosting has terminated early. """ estimator = self._make_estimator(random_state=random_state) estimator.fit(X, y, sample_weight=sample_weight) y_predict = estimator.predict(X) if iboost == 0: self.classes_ = getattr(estimator, "classes_", None) self.n_classes_ = len(self.classes_) # Instances incorrectly classified incorrect = y_predict != y # Error fraction estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0)) # Stop if classification is perfect if estimator_error <= 0: return sample_weight, 1.0, 0.0 n_classes = self.n_classes_ # Stop if the error is at least as bad as random guessing if estimator_error >= 1.0 - (1.0 / n_classes): self.estimators_.pop(-1) if len(self.estimators_) == 0: raise ValueError( "BaseClassifier in AdaBoostClassifier " "ensemble is worse than random, ensemble " "can not be fit." ) return None, None, None # Boost weight using multi-class AdaBoost SAMME alg estimator_weight = self.learning_rate * ( np.log((1.0 - estimator_error) / estimator_error) + np.log(n_classes - 1.0) ) # Only boost the weights if it will fit again if not iboost == self.n_estimators - 1: # Only boost positive weights sample_weight = np.exp( np.log(sample_weight) + estimator_weight * incorrect * (sample_weight > 0) ) return sample_weight, estimator_weight, estimator_error
Implement a single boost. Perform a single boost according to the discrete SAMME algorithm and return the updated sample weights. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) The target values (class labels). sample_weight : array-like of shape (n_samples,) The current sample weights. random_state : RandomState instance The RandomState instance used if the base estimator accepts a `random_state` attribute. Returns ------- sample_weight : array-like of shape (n_samples,) or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. estimator_error : float The classification error for the current boost. If None then boosting has terminated early.
python
sklearn/ensemble/_weight_boosting.py
486
[ "self", "iboost", "X", "y", "sample_weight", "random_state" ]
false
6
6
scikit-learn/scikit-learn
64,340
numpy
false
get_k8s_pod_yaml
def get_k8s_pod_yaml(cls, ti: TaskInstance, session: Session = NEW_SESSION) -> dict | None: """ Get rendered Kubernetes Pod Yaml for a TaskInstance from the RenderedTaskInstanceFields table. :param ti: Task Instance :param session: SqlAlchemy Session :return: Kubernetes Pod Yaml """ result = session.scalar( select(cls).where( cls.dag_id == ti.dag_id, cls.task_id == ti.task_id, cls.run_id == ti.run_id, cls.map_index == ti.map_index, ) ) return result.k8s_pod_yaml if result else None
Get rendered Kubernetes Pod Yaml for a TaskInstance from the RenderedTaskInstanceFields table. :param ti: Task Instance :param session: SqlAlchemy Session :return: Kubernetes Pod Yaml
python
airflow-core/src/airflow/models/renderedtifields.py
227
[ "cls", "ti", "session" ]
dict | None
true
2
7.44
apache/airflow
43,597
sphinx
false
generate_kernel_call
def generate_kernel_call( self, kernel_name: str, call_args, *, device=None, triton=True, arg_types=None, raw_keys=None, raw_args=None, triton_meta=None, original_fxnode_name=None, ): """ Generates kernel call code. triton: Defines whether the backend uses Triton for codegen. Otherwise it uses the CUDA language when gpu=True, and C++ when gpu=False. """ # Store buffers corresponding to each call arg. # This is used to generate example args for autotuning later on. self.args_to_buffers.update( { arg: V.graph.try_get_buffer(arg) for arg in call_args if isinstance(arg, str) } ) device = device or V.graph.get_current_device_or_throw() self.writeline( KernelCallLine( self, kernel_name=kernel_name, call_args=call_args, # pyrefly: ignore [bad-argument-type] raw_keys=raw_keys, # pyrefly: ignore [bad-argument-type] raw_args=raw_args, # pyrefly: ignore [bad-argument-type] arg_types=arg_types, triton=triton, # pyrefly: ignore [bad-argument-type] triton_meta=triton_meta, device=device, graph_name=V.graph.name, # pyrefly: ignore [bad-argument-type] original_fxnode_name=original_fxnode_name, ) )
Generates kernel call code. triton: Defines whether the backend uses Triton for codegen. Otherwise it uses the CUDA language when gpu=True, and C++ when gpu=False.
python
torch/_inductor/codegen/wrapper.py
2,848
[ "self", "kernel_name", "call_args", "device", "triton", "arg_types", "raw_keys", "raw_args", "triton_meta", "original_fxnode_name" ]
true
2
6.88
pytorch/pytorch
96,034
unknown
false
afterPropertiesSet
@Override public void afterPropertiesSet() { if (isSingleton()) { this.map = createMap(); } }
Set if a singleton should be created, or a new object on each request otherwise. Default is {@code true} (a singleton).
java
spring-beans/src/main/java/org/springframework/beans/factory/config/YamlMapFactoryBean.java
94
[]
void
true
2
7.04
spring-projects/spring-framework
59,386
javadoc
false
validIndex
public static <T> T[] validIndex(final T[] array, final int index) { return validIndex(array, index, DEFAULT_VALID_INDEX_ARRAY_EX_MESSAGE, Integer.valueOf(index)); }
Validates that the index is within the bounds of the argument array; otherwise throwing an exception. <pre>Validate.validIndex(myArray, 2);</pre> <p>If the array is {@code null}, then the message of the exception is &quot;The validated object is null&quot;.</p> <p>If the index is invalid, then the message of the exception is &quot;The validated array index is invalid: &quot; followed by the index.</p> @param <T> the array type. @param array the array to check, validated not null by this method. @param index the index to check. @return the validated array (never {@code null} for method chaining). @throws NullPointerException if the array is {@code null}. @throws IndexOutOfBoundsException if the index is invalid. @see #validIndex(Object[], int, String, Object...) @since 3.0
java
src/main/java/org/apache/commons/lang3/Validate.java
1,197
[ "array", "index" ]
true
1
6.64
apache/commons-lang
2,896
javadoc
false
noNullElements
public static <T> T[] noNullElements(final T[] array, final String message, final Object... values) { Objects.requireNonNull(array, "array"); for (int i = 0; i < array.length; i++) { if (array[i] == null) { final Object[] values2 = ArrayUtils.add(values, Integer.valueOf(i)); throw new IllegalArgumentException(getMessage(message, values2)); } } return array; }
Validate that the specified argument array is neither {@code null} nor contains any elements that are {@code null}; otherwise throwing an exception with the specified message. <pre>Validate.noNullElements(myArray, "The array contain null at position %d");</pre> <p>If the array is {@code null}, then the message in the exception is &quot;The validated object is null&quot;. <p>If the array has a {@code null} element, then the iteration index of the invalid element is appended to the {@code values} argument.</p> @param <T> the array type. @param array the array to check, validated not null by this method. @param message the {@link String#format(String, Object...)} exception message if invalid, not null. @param values the optional values for the formatted exception message, null array not recommended. @return the validated array (never {@code null} method for chaining). @throws NullPointerException if the array is {@code null}. @throws IllegalArgumentException if an element is {@code null}. @see #noNullElements(Object[])
java
src/main/java/org/apache/commons/lang3/Validate.java
751
[ "array", "message" ]
true
3
7.6
apache/commons-lang
2,896
javadoc
false
getOverrideHierarchy
public static Set<Method> getOverrideHierarchy(final Method method, final Interfaces interfacesBehavior) { Objects.requireNonNull(method, "method"); final Set<Method> result = new LinkedHashSet<>(); result.add(method); final Class<?>[] parameterTypes = method.getParameterTypes(); final Class<?> declaringClass = method.getDeclaringClass(); final Iterator<Class<?>> hierarchy = ClassUtils.hierarchy(declaringClass, interfacesBehavior).iterator(); //skip the declaring class :P hierarchy.next(); hierarchyTraversal: while (hierarchy.hasNext()) { final Class<?> c = hierarchy.next(); final Method m = getMatchingAccessibleMethod(c, method.getName(), parameterTypes); if (m == null) { continue; } if (Arrays.equals(m.getParameterTypes(), parameterTypes)) { // matches without generics result.add(m); continue; } // necessary to get arguments every time in the case that we are including interfaces final Map<TypeVariable<?>, Type> typeArguments = TypeUtils.getTypeArguments(declaringClass, m.getDeclaringClass()); for (int i = 0; i < parameterTypes.length; i++) { final Type childType = TypeUtils.unrollVariables(typeArguments, method.getGenericParameterTypes()[i]); final Type parentType = TypeUtils.unrollVariables(typeArguments, m.getGenericParameterTypes()[i]); if (!TypeUtils.equals(childType, parentType)) { continue hierarchyTraversal; } } result.add(m); } return result; }
Gets the hierarchy of overridden methods down to {@code result} respecting generics. @param method lowest to consider. @param interfacesBehavior whether to search interfaces, {@code null} {@code implies} false. @return a {@code Set<Method>} in ascending order from subclass to superclass. @throws NullPointerException if the specified method is {@code null}. @throws SecurityException if an underlying accessible object's method denies the request. @see SecurityManager#checkPermission @since 3.2
java
src/main/java/org/apache/commons/lang3/reflect/MethodUtils.java
501
[ "method", "interfacesBehavior" ]
true
6
7.6
apache/commons-lang
2,896
javadoc
false
compression
@Override public double compression() { if (mergingDigest != null) { return mergingDigest.compression(); } return sortingDigest.compression(); }
Similar to the constructor above. The limit for switching from a {@link SortingDigest} to a {@link MergingDigest} implementation is calculated based on the passed compression factor. @param compression The compression factor for the MergingDigest
java
libs/tdigest/src/main/java/org/elasticsearch/tdigest/HybridDigest.java
182
[]
true
2
6.24
elastic/elasticsearch
75,680
javadoc
false
generate_copies_of_performance_dag
def generate_copies_of_performance_dag( performance_dag_path: str, performance_dag_conf: dict[str, str] ) -> tuple[str, list[str]]: """ Create context manager that creates copies of DAG. Contextmanager that creates copies of performance DAG inside temporary directory using the dag prefix env variable as a base for filenames. :param performance_dag_path: path to the performance DAG that should be copied. :param performance_dag_conf: dict with environment variables as keys and their values as values. :yields: a pair consisting of path to the temporary directory and a list with paths to copies of performance DAG :type: tuple[str, list[str]] """ dag_files_count = int( get_performance_dag_environment_variable(performance_dag_conf, "PERF_DAG_FILES_COUNT") ) safe_dag_prefix = get_dag_prefix(performance_dag_conf) with tempfile.TemporaryDirectory() as temp_dir: performance_dag_copies = [] for i in range(1, dag_files_count + 1): destination_filename = f"{safe_dag_prefix}_{i}.py" destination_path = os.path.join(temp_dir, destination_filename) copyfile(performance_dag_path, destination_path) performance_dag_copies.append(destination_path) yield temp_dir, performance_dag_copies
Create context manager that creates copies of DAG. Contextmanager that creates copies of performance DAG inside temporary directory using the dag prefix env variable as a base for filenames. :param performance_dag_path: path to the performance DAG that should be copied. :param performance_dag_conf: dict with environment variables as keys and their values as values. :yields: a pair consisting of path to the temporary directory and a list with paths to copies of performance DAG :type: tuple[str, list[str]]
python
performance/src/performance_dags/performance_dag/performance_dag_utils.py
414
[ "performance_dag_path", "performance_dag_conf" ]
tuple[str, list[str]]
true
2
6.72
apache/airflow
43,597
sphinx
false
nanAwareAggregate
private static double nanAwareAggregate(double a, double b, DoubleBinaryOperator aggregator) { if (Double.isNaN(a)) { return b; } if (Double.isNaN(b)) { return a; } return aggregator.applyAsDouble(a, b); }
Merges the given histogram into the current result. The histogram might be upscaled if needed. @param toAdd the histogram to merge
java
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramMerger.java
298
[ "a", "b", "aggregator" ]
true
3
6.72
elastic/elasticsearch
75,680
javadoc
false
poll
@Override public NetworkClientDelegate.PollResult poll(long currentTimeMs) { if (coordinatorRequestManager.coordinator().isEmpty() || membershipManager().shouldSkipHeartbeat()) { membershipManager().onHeartbeatRequestSkipped(); maybePropagateCoordinatorFatalErrorEvent(); return NetworkClientDelegate.PollResult.EMPTY; } pollTimer.update(currentTimeMs); if (pollTimer.isExpired() && !membershipManager().isLeavingGroup()) { logger.warn("Consumer poll timeout has expired. This means the time between " + "subsequent calls to poll() was longer than the configured max.poll.interval.ms, " + "which typically implies that the poll loop is spending too much time processing " + "messages. You can address this either by increasing max.poll.interval.ms or by " + "reducing the maximum size of batches returned in poll() with max.poll.records."); membershipManager().transitionToSendingLeaveGroup(true); NetworkClientDelegate.UnsentRequest leaveHeartbeat = makeHeartbeatRequest(currentTimeMs, true); // We can ignore the leave response because we can join before or after receiving the response. heartbeatRequestState.reset(); resetHeartbeatState(); return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs(), Collections.singletonList(leaveHeartbeat)); } // Case 1: The member state is LEAVING - if the member is a share consumer, we should immediately send leave; // if the member is an async consumer, this will also depend on leavingGroupOperation. boolean heartbeatNow = shouldSendLeaveHeartbeatNow() || // Case 2: The member state indicates it should send a heartbeat without waiting for the interval, // and there is no heartbeat request currently in-flight (membershipManager().shouldHeartbeatNow() && !heartbeatRequestState.requestInFlight()); if (!heartbeatRequestState.canSendRequest(currentTimeMs) && !heartbeatNow) { return new NetworkClientDelegate.PollResult(heartbeatRequestState.timeToNextHeartbeatMs(currentTimeMs)); } NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequest(currentTimeMs, false); return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs(), Collections.singletonList(request)); }
This will build a heartbeat request if one must be sent, determined based on the member state. A heartbeat is sent in the following situations: <ol> <li>Member is part of the consumer group or wants to join it.</li> <li>The heartbeat interval has expired, or the member is in a state that indicates that it should heartbeat without waiting for the interval.</li> </ol> This will also determine the maximum wait time until the next poll based on the member's state. <ol> <li>If the member is without a coordinator or is in a failed state, the timer is set to Long.MAX_VALUE, as there's no need to send a heartbeat.</li> <li>If the member cannot send a heartbeat due to either exponential backoff, it will return the remaining time left on the backoff timer.</li> <li>If the member's heartbeat timer has not expired, It will return the remaining time left on the heartbeat timer.</li> <li>If the member can send a heartbeat, the timer is set to the current heartbeat interval.</li> </ol> @return {@link PollResult} that includes a heartbeat request if one must be sent, and the time to wait until the next poll.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java
163
[ "currentTimeMs" ]
true
9
7.04
apache/kafka
31,560
javadoc
false
all_functions
def all_functions(): """Get a list of all functions from `sklearn`. Returns ------- functions : list of tuples List of (name, function), where ``name`` is the function name as string and ``function`` is the actual function. Examples -------- >>> from sklearn.utils.discovery import all_functions >>> functions = all_functions() >>> name, function = functions[0] >>> name 'accuracy_score' """ # lazy import to avoid circular imports from sklearn.base from sklearn.utils._testing import ignore_warnings all_functions = [] root = str(Path(__file__).parent.parent) # sklearn package # Ignore deprecation warnings triggered at import time and from walking # packages with ignore_warnings(category=FutureWarning): for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."): module_parts = module_name.split(".") if ( any(part in _MODULE_TO_IGNORE for part in module_parts) or "._" in module_name ): continue module = import_module(module_name) functions = inspect.getmembers(module, _is_checked_function) functions = [ (func.__name__, func) for name, func in functions if not name.startswith("_") ] all_functions.extend(functions) # drop duplicates, sort for reproducibility # itemgetter is used to ensure the sort does not extend to the 2nd item of # the tuple return sorted(set(all_functions), key=itemgetter(0))
Get a list of all functions from `sklearn`. Returns ------- functions : list of tuples List of (name, function), where ``name`` is the function name as string and ``function`` is the actual function. Examples -------- >>> from sklearn.utils.discovery import all_functions >>> functions = all_functions() >>> name, function = functions[0] >>> name 'accuracy_score'
python
sklearn/utils/discovery.py
210
[]
false
4
7.36
scikit-learn/scikit-learn
64,340
unknown
false
hasNonStaticBeanMethods
boolean hasNonStaticBeanMethods() { for (BeanMethod beanMethod : this.beanMethods) { if (!beanMethod.getMetadata().isStatic()) { return true; } } return false; }
Return the configuration classes that imported this class, or an empty Set if this configuration was not imported. @since 4.0.5 @see #isImported()
java
spring-context/src/main/java/org/springframework/context/annotation/ConfigurationClass.java
211
[]
true
2
6.72
spring-projects/spring-framework
59,386
javadoc
false
reallocateResultWithCapacity
private void reallocateResultWithCapacity(int newCapacity, boolean copyBucketsFromPreviousResult) { FixedCapacityExponentialHistogram newResult = FixedCapacityExponentialHistogram.create(newCapacity, breaker); if (copyBucketsFromPreviousResult && result != null) { BucketIterator it = result.negativeBuckets().iterator(); while (it.hasNext()) { boolean added = newResult.tryAddBucket(it.peekIndex(), it.peekCount(), false); assert added : "Output histogram should have enough capacity"; it.advance(); } it = result.positiveBuckets().iterator(); while (it.hasNext()) { boolean added = newResult.tryAddBucket(it.peekIndex(), it.peekCount(), true); assert added : "Output histogram should have enough capacity"; it.advance(); } } if (result != null && resultAlreadyReturned == false) { Releasables.close(result); } resultAlreadyReturned = false; result = newResult; }
Sets the given bucket of the negative buckets. If the bucket already exists, it will be replaced. Buckets may be set in arbitrary order. However, for best performance and minimal allocations, buckets should be set in order of increasing index and all negative buckets should be set before positive buckets. @param index the index of the bucket @param count the count of the bucket, must be at least 1 @return the builder
java
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramBuilder.java
221
[ "newCapacity", "copyBucketsFromPreviousResult" ]
void
true
7
8.24
elastic/elasticsearch
75,680
javadoc
false
warnDisablingExponentialBackoff
public static void warnDisablingExponentialBackoff(AbstractConfig config) { long retryBackoffMs = config.getLong(RETRY_BACKOFF_MS_CONFIG); long retryBackoffMaxMs = config.getLong(RETRY_BACKOFF_MAX_MS_CONFIG); if (retryBackoffMs > retryBackoffMaxMs) { log.warn("Configuration '{}' with value '{}' is greater than configuration '{}' with value '{}'. " + "A static backoff with value '{}' will be applied.", RETRY_BACKOFF_MS_CONFIG, retryBackoffMs, RETRY_BACKOFF_MAX_MS_CONFIG, retryBackoffMaxMs, retryBackoffMaxMs); } long connectionSetupTimeoutMs = config.getLong(SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG); long connectionSetupTimeoutMaxMs = config.getLong(SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG); if (connectionSetupTimeoutMs > connectionSetupTimeoutMaxMs) { log.warn("Configuration '{}' with value '{}' is greater than configuration '{}' with value '{}'. " + "A static connection setup timeout with value '{}' will be applied.", SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG, connectionSetupTimeoutMs, SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG, connectionSetupTimeoutMaxMs, connectionSetupTimeoutMaxMs); } }
Log warning if the exponential backoff is disabled due to initial backoff value is greater than max backoff value. @param config The config object.
java
clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java
277
[ "config" ]
void
true
3
6.56
apache/kafka
31,560
javadoc
false
controller
public Node controller() { return holder().controller; }
The controller node returned in metadata response @return the controller node or null if it doesn't exist
java
clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java
249
[]
Node
true
1
6.32
apache/kafka
31,560
javadoc
false
as
@SuppressWarnings("unchecked") public <R> Member<R> as(Extractor<T, R> extractor) { Assert.notNull(extractor, "'adapter' must not be null"); Member<R> result = (Member<R>) this; result.valueExtractor = this.valueExtractor.as(extractor::extract); return result; }
Adapt the value by applying the given {@link Function}. @param <R> the result type @param extractor a {@link Extractor} to adapt the value @return a {@link Member} which may be configured further
java
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
452
[ "extractor" ]
true
1
6.88
spring-projects/spring-boot
79,428
javadoc
false
hasReadyNodes
public boolean hasReadyNodes(long now) { for (Map.Entry<String, NodeConnectionState> entry : nodeState.entrySet()) { if (isReady(entry.getValue(), now)) { return true; } } return false; }
Return true if there is at least one node with connection in the READY state and not throttled. Returns false otherwise. @param now the current time in ms
java
clients/src/main/java/org/apache/kafka/clients/ClusterConnectionStates.java
300
[ "now" ]
true
2
6.88
apache/kafka
31,560
javadoc
false
doShutdown
protected void doShutdown() throws IOException { IOUtils.close(databaseReader.get()); int numEntriesEvicted = cache.purgeCacheEntriesForDatabase(projectId, databasePath); logger.info("evicted [{}] entries from cache after reloading database [{}]", numEntriesEvicted, databasePath); if (deleteDatabaseFileOnShutdown) { logger.info("deleting [{}]", databasePath); Files.delete(databasePath); } }
Prepares the database for lookup by incrementing the usage count. If the usage count is already negative, it indicates that the database is being closed, and this method will return false to indicate that no lookup should be performed. @return true if the database is ready for lookup, false if it is being closed
java
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java
156
[]
void
true
2
8.08
elastic/elasticsearch
75,680
javadoc
false
serverAuthenticationSessionExpired
public boolean serverAuthenticationSessionExpired(long nowNanos) { Long serverSessionExpirationTimeNanos = authenticator.serverSessionExpirationTimeNanos(); return serverSessionExpirationTimeNanos != null && nowNanos - serverSessionExpirationTimeNanos > 0; }
Return true if this is a server-side channel and the given time is past the session expiration time, if any, otherwise false @param nowNanos the current time in nanoseconds as per {@code System.nanoTime()} @return true if this is a server-side channel and the given time is past the session expiration time, if any, otherwise false
java
clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java
641
[ "nowNanos" ]
true
2
7.36
apache/kafka
31,560
javadoc
false
topicPartitions
public ListConsumerGroupOffsetsSpec topicPartitions(Collection<TopicPartition> topicPartitions) { this.topicPartitions = topicPartitions; return this; }
Set the topic partitions whose offsets are to be listed for a consumer group. {@code null} includes all topic partitions. @param topicPartitions List of topic partitions to include @return This ListConsumerGroupOffsetSpec
java
clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsSpec.java
39
[ "topicPartitions" ]
ListConsumerGroupOffsetsSpec
true
1
6.16
apache/kafka
31,560
javadoc
false
growIfNeeded
private void growIfNeeded() { if (size > queue.length) { int newCapacity = calculateNewCapacity(); Object[] newQueue = new Object[newCapacity]; arraycopy(queue, 0, newQueue, 0, queue.length); queue = newQueue; } }
Returns the comparator used to order the elements in this queue. Obeys the general contract of {@link PriorityQueue#comparator}, but returns {@link Ordering#natural} instead of {@code null} to indicate natural ordering.
java
android/guava/src/com/google/common/collect/MinMaxPriorityQueue.java
957
[]
void
true
2
6.08
google/guava
51,352
javadoc
false
synchronizedSetMultimap
@J2ktIncompatible // Synchronized public static <K extends @Nullable Object, V extends @Nullable Object> SetMultimap<K, V> synchronizedSetMultimap(SetMultimap<K, V> multimap) { return Synchronized.setMultimap(multimap, null); }
Returns a synchronized (thread-safe) {@code SetMultimap} backed by the specified multimap. <p>You must follow the warnings described in {@link #synchronizedMultimap}. <p>The returned multimap will be serializable if the specified multimap is serializable. @param multimap the multimap to be wrapped @return a synchronized view of the specified multimap
java
android/guava/src/com/google/common/collect/Multimaps.java
899
[ "multimap" ]
true
1
6.4
google/guava
51,352
javadoc
false
get_cmake_cache_variables
def get_cmake_cache_variables(self) -> dict[str, CMakeValue]: r"""Gets values in CMakeCache.txt into a dictionary. Returns: dict: A ``dict`` containing the value of cached CMake variables. """ with open(self._cmake_cache_file) as f: return get_cmake_cache_variables_from_file(f)
r"""Gets values in CMakeCache.txt into a dictionary. Returns: dict: A ``dict`` containing the value of cached CMake variables.
python
tools/setup_helpers/cmake.py
159
[ "self" ]
dict[str, CMakeValue]
true
1
6.4
pytorch/pytorch
96,034
unknown
false
addIfAbsent
public boolean addIfAbsent(long offset, AcknowledgeType type) { return acknowledgements.putIfAbsent(offset, type) == null; }
Adds an acknowledgement for a specific offset. Will <b>not</b> overwrite an existing acknowledgement for the same offset. @param offset The record offset. @param type The AcknowledgeType. @return Whether the acknowledgement was added.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/Acknowledgements.java
75
[ "offset", "type" ]
true
1
6.32
apache/kafka
31,560
javadoc
false
invokeBeanDefiningMethod
private GroovyBeanDefinitionWrapper invokeBeanDefiningMethod(String beanName, Object[] args) { boolean hasClosureArgument = (args[args.length - 1] instanceof Closure); if (args[0] instanceof Class<?> beanClass) { if (hasClosureArgument) { if (args.length - 1 != 1) { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper( beanName, beanClass, resolveConstructorArguments(args, 1, args.length - 1)); } else { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName, beanClass); } } else { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper( beanName, beanClass, resolveConstructorArguments(args, 1, args.length)); } } else if (args[0] instanceof RuntimeBeanReference runtimeBeanReference) { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName); this.currentBeanDefinition.getBeanDefinition().setFactoryBeanName(runtimeBeanReference.getBeanName()); } else if (args[0] instanceof Map<?, ?> namedArgs) { // named constructor arguments if (args.length > 1 && args[1] instanceof Class<?> clazz) { List<Object> constructorArgs = resolveConstructorArguments(args, 2, (hasClosureArgument ? args.length - 1 : args.length)); this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName, clazz, constructorArgs); for (Map.Entry<?, ?> entity : namedArgs.entrySet()) { String propName = (String) entity.getKey(); setProperty(propName, entity.getValue()); } } // factory method syntax else { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName); // First arg is the map containing factoryBean : factoryMethod Map.Entry<?, ?> factoryBeanEntry = namedArgs.entrySet().iterator().next(); // If we have a closure body, that will be the last argument. // In between are the constructor args int constructorArgsTest = (hasClosureArgument ? 2 : 1); // If we have more than this number of args, we have constructor args if (args.length > constructorArgsTest){ // factory-method requires args int endOfConstructArgs = (hasClosureArgument ? args.length - 1 : args.length); this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName, null, resolveConstructorArguments(args, 1, endOfConstructArgs)); } else { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName); } this.currentBeanDefinition.getBeanDefinition().setFactoryBeanName(factoryBeanEntry.getKey().toString()); this.currentBeanDefinition.getBeanDefinition().setFactoryMethodName(factoryBeanEntry.getValue().toString()); } } else if (args[0] instanceof Closure) { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName); this.currentBeanDefinition.getBeanDefinition().setAbstract(true); } else { List<Object> constructorArgs = resolveConstructorArguments(args, 0, (hasClosureArgument ? args.length - 1 : args.length)); this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName, null, constructorArgs); } if (hasClosureArgument) { Closure<?> callable = (Closure<?>) args[args.length - 1]; callable.setDelegate(this); callable.setResolveStrategy(Closure.DELEGATE_FIRST); callable.call(this.currentBeanDefinition); } GroovyBeanDefinitionWrapper beanDefinition = this.currentBeanDefinition; this.currentBeanDefinition = null; beanDefinition.getBeanDefinition().setAttribute(GroovyBeanDefinitionWrapper.class.getName(), beanDefinition); getRegistry().registerBeanDefinition(beanName, beanDefinition.getBeanDefinition()); return beanDefinition; }
This method is called when a bean definition node is called. @param beanName the name of the bean to define @param args the arguments to the bean. The first argument is the class name, the last argument is sometimes a closure. All the arguments in between are constructor arguments. @return the bean definition wrapper
java
spring-beans/src/main/java/org/springframework/beans/factory/groovy/GroovyBeanDefinitionReader.java
466
[ "beanName", "args" ]
GroovyBeanDefinitionWrapper
true
15
8.16
spring-projects/spring-framework
59,386
javadoc
false
square
private static double square(double x) { return x * x; }
Square of a number @param x The input number. @return The square of the input number.
java
libs/h3/src/main/java/org/elasticsearch/h3/Vec3d.java
126
[ "x" ]
true
1
6.8
elastic/elasticsearch
75,680
javadoc
false
unassignUnsentCalls
private void unassignUnsentCalls(Predicate<Node> shouldUnassign) { for (Iterator<Map.Entry<Node, List<Call>>> iter = callsToSend.entrySet().iterator(); iter.hasNext(); ) { Map.Entry<Node, List<Call>> entry = iter.next(); Node node = entry.getKey(); List<Call> awaitingCalls = entry.getValue(); if (awaitingCalls.isEmpty()) { iter.remove(); } else if (shouldUnassign.test(node)) { nodeReadyDeadlines.remove(node); transitionToPendingAndClearList(awaitingCalls); iter.remove(); } } }
Unassign calls that have not yet been sent based on some predicate. For example, this is used to reassign the calls that have been assigned to a disconnected node. @param shouldUnassign Condition for reassignment. If the predicate is true, then the calls will be put back in the pendingCalls collection and they will be reassigned
java
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
1,402
[ "shouldUnassign" ]
void
true
4
6.88
apache/kafka
31,560
javadoc
false
encodeRealpathResult
function encodeRealpathResult(result, options) { if (!options || !options.encoding || options.encoding === 'utf8') return result; const asBuffer = Buffer.from(result); if (options.encoding === 'buffer') { return asBuffer; } return asBuffer.toString(options.encoding); }
Stops watching for changes on `filename`. @param {string | Buffer | URL} filename @param {() => any} [listener] @returns {void}
javascript
lib/fs.js
2,642
[ "result", "options" ]
false
5
6.24
nodejs/node
114,839
jsdoc
false
add
@CanIgnoreReturnValue @Override public Builder<E> add(E element) { super.add(element); return this; }
Adds {@code element} to the {@code ImmutableList}. @param element the element to add @return this {@code Builder} object @throws NullPointerException if {@code element} is null
java
android/guava/src/com/google/common/collect/ImmutableList.java
786
[ "element" ]
true
1
6.24
google/guava
51,352
javadoc
false
createTypeFiltersFor
public static List<TypeFilter> createTypeFiltersFor(AnnotationAttributes filterAttributes, Environment environment, ResourceLoader resourceLoader, BeanDefinitionRegistry registry) { List<TypeFilter> typeFilters = new ArrayList<>(); FilterType filterType = filterAttributes.getEnum("type"); for (Class<?> filterClass : filterAttributes.getClassArray("classes")) { switch (filterType) { case ANNOTATION -> { Assert.isAssignable(Annotation.class, filterClass, "@ComponentScan ANNOTATION type filter requires an annotation type"); @SuppressWarnings("unchecked") Class<Annotation> annotationType = (Class<Annotation>) filterClass; typeFilters.add(new AnnotationTypeFilter(annotationType)); } case ASSIGNABLE_TYPE -> typeFilters.add(new AssignableTypeFilter(filterClass)); case CUSTOM -> { Assert.isAssignable(TypeFilter.class, filterClass, "@ComponentScan CUSTOM type filter requires a TypeFilter implementation"); TypeFilter filter = ParserStrategyUtils.instantiateClass(filterClass, TypeFilter.class, environment, resourceLoader, registry); typeFilters.add(filter); } default -> throw new IllegalArgumentException("Filter type not supported with Class value: " + filterType); } } for (String expression : filterAttributes.getStringArray("pattern")) { switch (filterType) { case ASPECTJ -> typeFilters.add(new AspectJTypeFilter(expression, resourceLoader.getClassLoader())); case REGEX -> typeFilters.add(new RegexPatternTypeFilter(Pattern.compile(expression))); default -> throw new IllegalArgumentException("Filter type not supported with String pattern: " + filterType); } } return typeFilters; }
Create {@linkplain TypeFilter type filters} from the supplied {@link AnnotationAttributes}, such as those sourced from {@link ComponentScan#includeFilters()} or {@link ComponentScan#excludeFilters()}. <p>Each {@link TypeFilter} will be instantiated using an appropriate constructor, with {@code BeanClassLoaderAware}, {@code BeanFactoryAware}, {@code EnvironmentAware}, and {@code ResourceLoaderAware} contracts invoked if they are implemented by the type filter. @param filterAttributes {@code AnnotationAttributes} for a {@link ComponentScan.Filter @Filter} declaration @param environment the {@code Environment} to make available to filters @param resourceLoader the {@code ResourceLoader} to make available to filters @param registry the {@code BeanDefinitionRegistry} to make available to filters as a {@link org.springframework.beans.factory.BeanFactory} if applicable @return a list of instantiated and configured type filters @see TypeFilter @see AnnotationTypeFilter @see AssignableTypeFilter @see AspectJTypeFilter @see RegexPatternTypeFilter @see org.springframework.beans.factory.BeanClassLoaderAware @see org.springframework.beans.factory.BeanFactoryAware @see org.springframework.context.EnvironmentAware @see org.springframework.context.ResourceLoaderAware
java
spring-context/src/main/java/org/springframework/context/annotation/TypeFilterUtils.java
73
[ "filterAttributes", "environment", "resourceLoader", "registry" ]
true
1
6.08
spring-projects/spring-framework
59,386
javadoc
false
__init__
def __init__( self, len_or_dims: Optional[Union[int, Sequence]] = None, name: Optional[str] = None, ): """ Initialize a new DimList object. Args: len_or_dims: Optional length (int) or sequence of dimensions/sizes name: Optional name for the dimension list """ # Initialize attributes self._name = name self._dims: list = [] self._bound = False if isinstance(len_or_dims, int): self.bind_len(len_or_dims) elif len_or_dims is not None: dims = [] for i, item in enumerate(len_or_dims): if isinstance(item, int): dim_name = f"{self._name}{i}" if self._name else f"dim{i}" dims.append(Dim(dim_name, item)) else: dims.append(Dim(item)) self._set_dims(dims)
Initialize a new DimList object. Args: len_or_dims: Optional length (int) or sequence of dimensions/sizes name: Optional name for the dimension list
python
functorch/dim/__init__.py
155
[ "self", "len_or_dims", "name" ]
true
7
6.4
pytorch/pytorch
96,034
google
false
forbid_nonstring_types
def forbid_nonstring_types( forbidden: list[str] | None, name: str | None = None ) -> Callable[[F], F]: """ Decorator to forbid specific types for a method of StringMethods. For calling `.str.{method}` on a Series or Index, it is necessary to first initialize the :class:`StringMethods` object, and then call the method. However, different methods allow different input types, and so this can not be checked during :meth:`StringMethods.__init__`, but must be done on a per-method basis. This decorator exists to facilitate this process, and make it explicit which (inferred) types are disallowed by the method. :meth:`StringMethods.__init__` allows the *union* of types its different methods allow (after skipping NaNs; see :meth:`StringMethods._validate`), namely: ['string', 'empty', 'bytes', 'mixed', 'mixed-integer']. The default string types ['string', 'empty'] are allowed for all methods. For the additional types ['bytes', 'mixed', 'mixed-integer'], each method then needs to forbid the types it is not intended for. Parameters ---------- forbidden : list-of-str or None List of forbidden non-string types, may be one or more of `['bytes', 'mixed', 'mixed-integer']`. name : str, default None Name of the method to use in the error message. By default, this is None, in which case the name from the method being wrapped will be copied. However, for working with further wrappers (like _pat_wrapper and _noarg_wrapper), it is necessary to specify the name. Returns ------- func : wrapper The method to which the decorator is applied, with an added check that enforces the inferred type to not be in the list of forbidden types. Raises ------ TypeError If the inferred type of the underlying data is in `forbidden`. """ # deal with None forbidden = [] if forbidden is None else forbidden allowed_types = {"string", "empty", "bytes", "mixed", "mixed-integer"} - set( forbidden ) def _forbid_nonstring_types(func: F) -> F: func_name = func.__name__ if name is None else name @wraps(func) def wrapper(self, *args, **kwargs): if self._inferred_dtype not in allowed_types: msg = ( f"Cannot use .str.{func_name} with values of " f"inferred dtype '{self._inferred_dtype}'." ) raise TypeError(msg) return func(self, *args, **kwargs) wrapper.__name__ = func_name return cast(F, wrapper) return _forbid_nonstring_types
Decorator to forbid specific types for a method of StringMethods. For calling `.str.{method}` on a Series or Index, it is necessary to first initialize the :class:`StringMethods` object, and then call the method. However, different methods allow different input types, and so this can not be checked during :meth:`StringMethods.__init__`, but must be done on a per-method basis. This decorator exists to facilitate this process, and make it explicit which (inferred) types are disallowed by the method. :meth:`StringMethods.__init__` allows the *union* of types its different methods allow (after skipping NaNs; see :meth:`StringMethods._validate`), namely: ['string', 'empty', 'bytes', 'mixed', 'mixed-integer']. The default string types ['string', 'empty'] are allowed for all methods. For the additional types ['bytes', 'mixed', 'mixed-integer'], each method then needs to forbid the types it is not intended for. Parameters ---------- forbidden : list-of-str or None List of forbidden non-string types, may be one or more of `['bytes', 'mixed', 'mixed-integer']`. name : str, default None Name of the method to use in the error message. By default, this is None, in which case the name from the method being wrapped will be copied. However, for working with further wrappers (like _pat_wrapper and _noarg_wrapper), it is necessary to specify the name. Returns ------- func : wrapper The method to which the decorator is applied, with an added check that enforces the inferred type to not be in the list of forbidden types. Raises ------ TypeError If the inferred type of the underlying data is in `forbidden`.
python
pandas/core/strings/accessor.py
83
[ "forbidden", "name" ]
Callable[[F], F]
true
4
6.8
pandas-dev/pandas
47,362
numpy
false
setExcludedPatterns
public void setExcludedPatterns(String... excludedPatterns) { Assert.notEmpty(excludedPatterns, "'excludedPatterns' must not be empty"); this.excludedPatterns = new String[excludedPatterns.length]; for (int i = 0; i < excludedPatterns.length; i++) { this.excludedPatterns[i] = excludedPatterns[i].strip(); } initExcludedPatternRepresentation(this.excludedPatterns); }
Set the regular expressions defining methods to match for exclusion. Matching will be the union of all these; if any match, the pointcut matches. @see #setExcludedPattern
java
spring-aop/src/main/java/org/springframework/aop/support/AbstractRegexpMethodPointcut.java
110
[]
void
true
2
6.56
spring-projects/spring-framework
59,386
javadoc
false
readSchemaFromDirectory
async function readSchemaFromDirectory(schemaPath: string): Promise<LookupResult> { debug('Reading schema from multiple files', schemaPath) const typeError = await ensureType(schemaPath, 'directory') if (typeError) { return { ok: false, error: typeError } } const files = await loadSchemaFiles(schemaPath) return { ok: true, schema: { schemaPath, schemaRootDir: schemaPath, schemas: files } } }
Loads the schema, returns null if it is not found Throws an error if schema is specified explicitly in any of the available ways (argument, package.json config), but can not be loaded @param schemaPathFromArgs @param schemaPathFromConfig @param opts @returns
typescript
packages/internals/src/cli/getSchema.ts
130
[ "schemaPath" ]
true
2
7.44
prisma/prisma
44,834
jsdoc
true
fit_transform
def fit_transform(self, X, y=None): """Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Ignored. Returns ------- X_new : ndarray of shape (n_samples, n_components) Transformed values. Notes ----- This method returns a Fortran-ordered array. To convert it to a C-ordered array, use 'np.ascontiguousarray'. """ U, S, _, X, x_is_centered, xp = self._fit(X) if U is not None: U = U[:, : self.n_components_] if self.whiten: # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples) U *= sqrt(X.shape[0] - 1) else: # X_new = X * V = U * S * Vt * V = U * S U *= S[: self.n_components_] return U else: # solver="covariance_eigh" does not compute U at fit time. return self._transform(X, xp, x_is_centered=x_is_centered)
Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Ignored. Returns ------- X_new : ndarray of shape (n_samples, n_components) Transformed values. Notes ----- This method returns a Fortran-ordered array. To convert it to a C-ordered array, use 'np.ascontiguousarray'.
python
sklearn/decomposition/_pca.py
444
[ "self", "X", "y" ]
false
5
6.08
scikit-learn/scikit-learn
64,340
numpy
false
isPossiblyBitMask
static bool isPossiblyBitMask(const EnumDecl *EnumDec) { const ValueRange VR(EnumDec); const int EnumLen = enumLength(EnumDec); const int NonPowOfTwoCounter = countNonPowOfTwoLiteralNum(EnumDec); return NonPowOfTwoCounter >= 1 && NonPowOfTwoCounter <= 2 && NonPowOfTwoCounter < EnumLen / 2 && (VR.MaxVal - VR.MinVal != EnumLen - 1) && !(NonPowOfTwoCounter == 1 && isMaxValAllBitSetLiteral(EnumDec)); }
literal) or when it could contain consecutive values.
cpp
clang-tools-extra/clang-tidy/bugprone/SuspiciousEnumUsageCheck.cpp
100
[]
true
6
6.88
llvm/llvm-project
36,021
doxygen
false
count
private int count() { return buffer.getInt(RECORDS_COUNT_OFFSET); }
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas. @return The base timestamp
java
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
226
[]
true
1
6.8
apache/kafka
31,560
javadoc
false
compare
public static int compare(final String str1, final String str2, final boolean nullIsLess) { if (str1 == str2) { // NOSONARLINT this intentionally uses == to allow for both null return 0; } if (str1 == null) { return nullIsLess ? -1 : 1; } if (str2 == null) { return nullIsLess ? 1 : -1; } return str1.compareTo(str2); }
Compares two Strings lexicographically, as per {@link String#compareTo(String)}, returning : <ul> <li>{@code int = 0}, if {@code str1} is equal to {@code str2} (or both {@code null})</li> <li>{@code int < 0}, if {@code str1} is less than {@code str2}</li> <li>{@code int > 0}, if {@code str1} is greater than {@code str2}</li> </ul> <p> This is a {@code null} safe version of : </p> <pre> str1.compareTo(str2) </pre> <p> {@code null} inputs are handled according to the {@code nullIsLess} parameter. Two {@code null} references are considered equal. </p> <pre>{@code StringUtils.compare(null, null, *) = 0 StringUtils.compare(null , "a", true) < 0 StringUtils.compare(null , "a", false) > 0 StringUtils.compare("a", null, true) > 0 StringUtils.compare("a", null, false) < 0 StringUtils.compare("abc", "abc", *) = 0 StringUtils.compare("a", "b", *) < 0 StringUtils.compare("b", "a", *) > 0 StringUtils.compare("a", "B", *) > 0 StringUtils.compare("ab", "abc", *) < 0 }</pre> @param str1 the String to compare from. @param str2 the String to compare to. @param nullIsLess whether consider {@code null} value less than non-{@code null} value. @return &lt; 0, 0, &gt; 0, if {@code str1} is respectively less, equal ou greater than {@code str2}. @see String#compareTo(String) @since 3.5
java
src/main/java/org/apache/commons/lang3/StringUtils.java
853
[ "str1", "str2", "nullIsLess" ]
true
6
8.08
apache/commons-lang
2,896
javadoc
false
lookup
function lookup(hostname, options) { let hints = 0; let family = 0; let all = false; let dnsOrder = getDefaultResultOrder(); // Parse arguments if (hostname) { validateString(hostname, 'hostname'); } if (typeof options === 'number') { validateOneOf(options, 'family', validFamilies); family = options; } else if (options !== undefined && typeof options !== 'object') { throw new ERR_INVALID_ARG_TYPE('options', ['integer', 'object'], options); } else { if (options?.hints != null) { validateNumber(options.hints, 'options.hints'); hints = options.hints >>> 0; validateHints(hints); } if (options?.family != null) { validateOneOf(options.family, 'options.family', validFamilies); family = options.family; } if (options?.all != null) { validateBoolean(options.all, 'options.all'); all = options.all; } if (options?.verbatim != null) { validateBoolean(options.verbatim, 'options.verbatim'); dnsOrder = options.verbatim ? 'verbatim' : 'ipv4first'; } if (options?.order != null) { validateOneOf(options.order, 'options.order', validDnsOrders); dnsOrder = options.order; } } return createLookupPromise(family, hostname, all, hints, dnsOrder); }
Get the IP address for a given hostname. @param {string} hostname - The hostname to resolve (ex. 'nodejs.org'). @param {object} [options] - Optional settings. @param {boolean} [options.all] - Whether to return all or just the first resolved address. @param {0 | 4 | 6} [options.family] - The record family. Must be 4, 6, or 0 (for both). @param {number} [options.hints] - One or more supported getaddrinfo flags (supply multiple via bitwise OR). @param {'ipv4first' | 'ipv6first' | 'verbatim'} [options.order] - Return results in same order DNS resolved them; New code should supply `verbatim`. @returns {Promise<object>}
javascript
lib/internal/dns/promises.js
195
[ "hostname", "options" ]
false
13
6.08
nodejs/node
114,839
jsdoc
false
tryGetConstEnumValue
function tryGetConstEnumValue(node: Node): string | number | undefined { if (getIsolatedModules(compilerOptions)) { return undefined; } return isPropertyAccessExpression(node) || isElementAccessExpression(node) ? resolver.getConstantValue(node) : undefined; }
Hooks node substitutions. @param hint A hint as to the intended usage of the node. @param node The node to substitute.
typescript
src/compiler/transformers/ts.ts
2,735
[ "node" ]
true
4
6.72
microsoft/TypeScript
107,154
jsdoc
false
hashCode
@InlineMe(replacement = "Byte.hashCode(value)") @InlineMeValidationDisabled( "The hash code of a byte is the int version of the byte itself, so it's simplest to return" + " that.") public static int hashCode(byte value) { return value; }
Returns a hash code for {@code value}; obsolete alternative to {@link Byte#hashCode(byte)}. @param value a primitive {@code byte} value @return a hash code for the value
java
android/guava/src/com/google/common/primitives/Bytes.java
60
[ "value" ]
true
1
6.56
google/guava
51,352
javadoc
false
addAdvisorOnChainCreation
private void addAdvisorOnChainCreation(Object next) { // We need to convert to an Advisor if necessary so that our source reference // matches what we find from superclass interceptors. addAdvisor(namedBeanToAdvisor(next)); }
Invoked when advice chain is created. <p>Add the given advice, advisor or object to the interceptor list. Because of these three possibilities, we can't type the signature more strongly. @param next advice, advisor or target object
java
spring-aop/src/main/java/org/springframework/aop/framework/ProxyFactoryBean.java
517
[ "next" ]
void
true
1
6
spring-projects/spring-framework
59,386
javadoc
false
scanJsxIdentifier
function scanJsxIdentifier(): SyntaxKind { if (tokenIsIdentifierOrKeyword(token)) { // An identifier or keyword has already been parsed - check for a `-` or a single instance of `:` and then append it and // everything after it to the token // Do note that this means that `scanJsxIdentifier` effectively _mutates_ the visible token without advancing to a new token // Any caller should be expecting this behavior and should only read the pos or token value after calling it. while (pos < end) { const ch = charCodeUnchecked(pos); if (ch === CharacterCodes.minus) { tokenValue += "-"; pos++; continue; } const oldPos = pos; tokenValue += scanIdentifierParts(); // reuse `scanIdentifierParts` so unicode escapes are handled if (pos === oldPos) { break; } } return getIdentifierToken(); } return token; }
Unconditionally back up and scan a template expression portion.
typescript
src/compiler/scanner.ts
3,770
[]
true
5
6.56
microsoft/TypeScript
107,154
jsdoc
false
indexOf
public abstract int indexOf(CharSequence seq, CharSequence searchSeq, int startPos);
Finds the first index within a CharSequence, handling {@code null}. This method uses {@link String#indexOf(String, int)} if possible. <p> A {@code null} CharSequence will return {@code -1}. A negative start position is treated as zero. An empty ("") search CharSequence always matches. A start position greater than the string length only matches an empty search CharSequence. </p> <p> Case-sensitive examples </p> <pre> Strings.CS.indexOf(null, *, *) = -1 Strings.CS.indexOf(*, null, *) = -1 Strings.CS.indexOf("", "", 0) = 0 Strings.CS.indexOf("", *, 0) = -1 (except when * = "") Strings.CS.indexOf("aabaabaa", "a", 0) = 0 Strings.CS.indexOf("aabaabaa", "b", 0) = 2 Strings.CS.indexOf("aabaabaa", "ab", 0) = 1 Strings.CS.indexOf("aabaabaa", "b", 3) = 5 Strings.CS.indexOf("aabaabaa", "b", 9) = -1 Strings.CS.indexOf("aabaabaa", "b", -1) = 2 Strings.CS.indexOf("aabaabaa", "", 2) = 2 Strings.CS.indexOf("abc", "", 9) = 3 </pre> <p> Case-insensitive examples </p> <pre> Strings.CI.indexOf(null, *, *) = -1 Strings.CI.indexOf(*, null, *) = -1 Strings.CI.indexOf("", "", 0) = 0 Strings.CI.indexOf("aabaabaa", "A", 0) = 0 Strings.CI.indexOf("aabaabaa", "B", 0) = 2 Strings.CI.indexOf("aabaabaa", "AB", 0) = 1 Strings.CI.indexOf("aabaabaa", "B", 3) = 5 Strings.CI.indexOf("aabaabaa", "B", 9) = -1 Strings.CI.indexOf("aabaabaa", "B", -1) = 2 Strings.CI.indexOf("aabaabaa", "", 2) = 2 Strings.CI.indexOf("abc", "", 9) = -1 </pre> @param seq the CharSequence to check, may be null @param searchSeq the CharSequence to find, may be null @param startPos the start position, negative treated as zero @return the first index of the search CharSequence (always &ge; startPos), -1 if no match or {@code null} string input
java
src/main/java/org/apache/commons/lang3/Strings.java
860
[ "seq", "searchSeq", "startPos" ]
true
1
6
apache/commons-lang
2,896
javadoc
false
gh_summary_path
def gh_summary_path() -> Path | None: """Return the Path to the GitHub step summary file, or None if not set.""" p = os.environ.get("GITHUB_STEP_SUMMARY") return Path(p) if p else None
Return the Path to the GitHub step summary file, or None if not set.
python
.ci/lumen_cli/cli/lib/common/gh_summary.py
55
[]
Path | None
true
2
6.48
pytorch/pytorch
96,034
unknown
false
format
public static String format(final Date date, final String pattern, final TimeZone timeZone) { return format(date, pattern, timeZone, null); }
Formats a date/time into a specific pattern in a time zone. @param date the date to format, not null. @param pattern the pattern to use to format the date, not null. @param timeZone the time zone to use, may be {@code null}. @return the formatted date.
java
src/main/java/org/apache/commons/lang3/time/DateFormatUtils.java
290
[ "date", "pattern", "timeZone" ]
String
true
1
6.64
apache/commons-lang
2,896
javadoc
false
bindIndexed
private void bindIndexed(ConfigurationPropertySource source, ConfigurationPropertyName root, AggregateElementBinder elementBinder, IndexedCollectionSupplier collection, ResolvableType elementType) { Set<String> knownIndexedChildren = new HashSet<>(); if (source instanceof IterableConfigurationPropertySource iterableSource) { knownIndexedChildren = getKnownIndexedChildren(iterableSource, root); } for (int i = 0; i < Integer.MAX_VALUE; i++) { ConfigurationPropertyName name = appendIndex(root, i); Object value = elementBinder.bind(name, Bindable.of(elementType), source); if (value == null) { break; } knownIndexedChildren.remove(name.getLastElement(Form.UNIFORM)); collection.get().add(value); } if (source instanceof IterableConfigurationPropertySource iterableSource) { assertNoUnboundChildren(knownIndexedChildren, iterableSource, root); } }
Bind indexed elements to the supplied collection. @param name the name of the property to bind @param target the target bindable @param elementBinder the binder to use for elements @param aggregateType the aggregate type, may be a collection or an array @param elementType the element type @param result the destination for results
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/IndexedElementsBinder.java
109
[ "source", "root", "elementBinder", "collection", "elementType" ]
void
true
5
6.4
spring-projects/spring-boot
79,428
javadoc
false
newInstance
public static @Nullable EnclosedInSquareBracketsConverter newInstance(@Nullable Configuration config, String[] options) { if (options.length < 1) { LOGGER.error("Incorrect number of options on style. Expected at least 1, received {}", options.length); return null; } PatternParser parser = PatternLayout.createPatternParser(config); List<PatternFormatter> formatters = parser.parse(options[0]); return new EnclosedInSquareBracketsConverter(formatters); }
Creates a new instance of the class. Required by Log4J2. @param config the configuration @param options the options @return a new instance, or {@code null} if the options are invalid
java
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/EnclosedInSquareBracketsConverter.java
70
[ "config", "options" ]
EnclosedInSquareBracketsConverter
true
2
8.24
spring-projects/spring-boot
79,428
javadoc
false
_merge_dictionaries
def _merge_dictionaries(d1, d2, aggregate_duplicates=True): """Merge two dictionaries recursively into the first one. Example: >>> d1 = {'dict': {'a': 1}, 'list': [1, 2], 'tuple': (1, 2)} >>> d2 = {'dict': {'b': 2}, 'list': [3, 4], 'set': {'a', 'b'}} >>> _merge_dictionaries(d1, d2) d1 will be modified to: { 'dict': {'a': 1, 'b': 2}, 'list': [1, 2, 3, 4], 'tuple': (1, 2), 'set': {'a', 'b'} } Arguments: d1 (dict): Dictionary to merge into. d2 (dict): Dictionary to merge from. aggregate_duplicates (bool): If True, aggregate duplicated items (by key) into a list of all values in d1 in the same key. If False, duplicate keys will be taken from d2 and override the value in d1. """ if not d2: return for key, value in d1.items(): if key in d2: if isinstance(value, dict): _merge_dictionaries(d1[key], d2[key]) else: if isinstance(value, (int, float, str)): d1[key] = [value] if aggregate_duplicates else value if isinstance(d2[key], list) and isinstance(d1[key], list): d1[key].extend(d2[key]) elif aggregate_duplicates: if d1[key] is None: d1[key] = [] else: d1[key] = list(d1[key]) d1[key].append(d2[key]) for key, value in d2.items(): if key not in d1: d1[key] = value
Merge two dictionaries recursively into the first one. Example: >>> d1 = {'dict': {'a': 1}, 'list': [1, 2], 'tuple': (1, 2)} >>> d2 = {'dict': {'b': 2}, 'list': [3, 4], 'set': {'a', 'b'}} >>> _merge_dictionaries(d1, d2) d1 will be modified to: { 'dict': {'a': 1, 'b': 2}, 'list': [1, 2, 3, 4], 'tuple': (1, 2), 'set': {'a', 'b'} } Arguments: d1 (dict): Dictionary to merge into. d2 (dict): Dictionary to merge from. aggregate_duplicates (bool): If True, aggregate duplicated items (by key) into a list of all values in d1 in the same key. If False, duplicate keys will be taken from d2 and override the value in d1.
python
celery/canvas.py
73
[ "d1", "d2", "aggregate_duplicates" ]
false
15
7.44
celery/celery
27,741
google
false
_build_chime_payload
def _build_chime_payload(self, message: str) -> str: """ Build payload for Chime and ensures messages do not exceed max length allowed. :param message: The message you want to send to your Chime room. (max 4096 characters) """ payload: dict[str, Any] = {} # We need to make sure that the message does not exceed the max length for Chime if len(message) > 4096: raise AirflowException("Chime message must be 4096 characters or less.") payload["Content"] = message return json.dumps(payload)
Build payload for Chime and ensures messages do not exceed max length allowed. :param message: The message you want to send to your Chime room. (max 4096 characters)
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/chime.py
86
[ "self", "message" ]
str
true
2
7.04
apache/airflow
43,597
sphinx
false
determinePrimaryCandidate
protected @Nullable String determinePrimaryCandidate(Map<String, Object> candidates, Class<?> requiredType) { String primaryBeanName = null; // First pass: identify unique primary candidate for (Map.Entry<String, Object> entry : candidates.entrySet()) { String candidateBeanName = entry.getKey(); Object beanInstance = entry.getValue(); if (isPrimary(candidateBeanName, beanInstance)) { if (primaryBeanName != null) { boolean candidateLocal = containsBeanDefinition(candidateBeanName); boolean primaryLocal = containsBeanDefinition(primaryBeanName); if (candidateLocal == primaryLocal) { String message = "more than one 'primary' bean found among candidates: " + candidates.keySet(); logger.trace(message); throw new NoUniqueBeanDefinitionException(requiredType, candidates.size(), message); } else if (candidateLocal) { primaryBeanName = candidateBeanName; } } else { primaryBeanName = candidateBeanName; } } } // Second pass: identify unique non-fallback candidate if (primaryBeanName == null) { for (String candidateBeanName : candidates.keySet()) { if (!isFallback(candidateBeanName)) { if (primaryBeanName != null) { return null; } primaryBeanName = candidateBeanName; } } } return primaryBeanName; }
Determine the primary candidate in the given set of beans. @param candidates a Map of candidate names and candidate instances (or candidate classes if not created yet) that match the required type @param requiredType the target dependency type to match against @return the name of the primary candidate, or {@code null} if none found @see #isPrimary(String, Object)
java
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
2,085
[ "candidates", "requiredType" ]
String
true
8
7.76
spring-projects/spring-framework
59,386
javadoc
false
get
public static Path get(URI uri) { if (uri.getScheme().equalsIgnoreCase("file")) { return DEFAULT.provider().getPath(uri); } else { return Paths.get(uri); } }
Returns a {@code Path} from a URI <p> This works just like {@code Paths.get()}. <p> Remember: this should almost never be used. Usually resolve a path against an existing one!
java
libs/core/src/main/java/org/elasticsearch/core/PathUtils.java
59
[ "uri" ]
Path
true
2
7.04
elastic/elasticsearch
75,680
javadoc
false