function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
describe_categorical_1d
|
def describe_categorical_1d(
data: Series,
percentiles_ignored: Sequence[float],
) -> Series:
"""Describe series containing categorical data.
Parameters
----------
data : Series
Series to be described.
percentiles_ignored : list-like of numbers
Ignored, but in place to unify interface.
"""
names = ["count", "unique", "top", "freq"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
if count_unique > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
dtype = None
else:
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
top, freq = np.nan, np.nan
dtype = "object"
result = [data.count(), count_unique, top, freq]
from pandas import Series
return Series(result, index=names, name=data.name, dtype=dtype)
|
Describe series containing categorical data.
Parameters
----------
data : Series
Series to be described.
percentiles_ignored : list-like of numbers
Ignored, but in place to unify interface.
|
python
|
pandas/core/methods/describe.py
| 267
|
[
"data",
"percentiles_ignored"
] |
Series
| true
| 3
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
appendAll
|
public <T> StrBuilder appendAll(@SuppressWarnings("unchecked") final T... array) {
/*
* @SuppressWarnings used to hide warning about vararg usage. We cannot
* use @SafeVarargs, since this method is not final. Using @SuppressWarnings
* is fine, because it isn't inherited by subclasses, so each subclass must
* vouch for itself whether its use of 'array' is safe.
*/
if (ArrayUtils.isNotEmpty(array)) {
for (final Object element : array) {
append(element);
}
}
return this;
}
|
Appends each item in an array to the builder without any separators.
Appending a null array will have no effect.
Each object is appended using {@link #append(Object)}.
@param <T> the element type
@param array the array to append
@return {@code this} instance.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 821
|
[] |
StrBuilder
| true
| 2
| 8.4
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
__init__
|
def __init__(self, name: str = "", rules=None) -> None:
"""
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
"""
super().__init__()
if not name:
name = type(self).__name__
self.name = name
if rules is not None:
self.rules = rules
|
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
|
python
|
pandas/tseries/holiday.py
| 469
|
[
"self",
"name",
"rules"
] |
None
| true
| 3
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_initial_imputation
|
def _initial_imputation(self, X, in_fit=False):
"""Perform initial imputation for input `X`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
in_fit : bool, default=False
Whether function is called in :meth:`fit`.
Returns
-------
Xt : ndarray of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
X_filled : ndarray of shape (n_samples, n_features)
Input data with the most recent imputations.
mask_missing_values : ndarray of shape (n_samples, n_features)
Input data's missing indicator matrix, where `n_samples` is the
number of samples and `n_features` is the number of features,
masked by non-missing features.
X_missing_mask : ndarray, shape (n_samples, n_features)
Input data's mask matrix indicating missing datapoints, where
`n_samples` is the number of samples and `n_features` is the
number of features.
"""
if is_scalar_nan(self.missing_values):
ensure_all_finite = "allow-nan"
else:
ensure_all_finite = True
X = validate_data(
self,
X,
dtype=FLOAT_DTYPES,
order="F",
reset=in_fit,
ensure_all_finite=ensure_all_finite,
)
_check_inputs_dtype(X, self.missing_values)
X_missing_mask = _get_mask(X, self.missing_values)
mask_missing_values = X_missing_mask.copy()
if self.initial_imputer_ is None:
self.initial_imputer_ = SimpleImputer(
missing_values=self.missing_values,
strategy=self.initial_strategy,
fill_value=self.fill_value,
keep_empty_features=self.keep_empty_features,
).set_output(transform="default")
X_filled = self.initial_imputer_.fit_transform(X)
else:
X_filled = self.initial_imputer_.transform(X)
if in_fit:
self._is_empty_feature = np.all(mask_missing_values, axis=0)
if not self.keep_empty_features:
# drop empty features
Xt = X[:, ~self._is_empty_feature]
mask_missing_values = mask_missing_values[:, ~self._is_empty_feature]
else:
# mark empty features as not missing and keep the original
# imputation
mask_missing_values[:, self._is_empty_feature] = False
Xt = X
Xt[:, self._is_empty_feature] = X_filled[:, self._is_empty_feature]
return Xt, X_filled, mask_missing_values, X_missing_mask
|
Perform initial imputation for input `X`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
in_fit : bool, default=False
Whether function is called in :meth:`fit`.
Returns
-------
Xt : ndarray of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
X_filled : ndarray of shape (n_samples, n_features)
Input data with the most recent imputations.
mask_missing_values : ndarray of shape (n_samples, n_features)
Input data's missing indicator matrix, where `n_samples` is the
number of samples and `n_features` is the number of features,
masked by non-missing features.
X_missing_mask : ndarray, shape (n_samples, n_features)
Input data's mask matrix indicating missing datapoints, where
`n_samples` is the number of samples and `n_features` is the
number of features.
|
python
|
sklearn/impute/_iterative.py
| 591
|
[
"self",
"X",
"in_fit"
] | false
| 8
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
update_source_code
|
def update_source_code(cls, dag_id: str, fileloc: str, session: Session = NEW_SESSION) -> None:
"""
Check if the source code of the DAG has changed and update it if needed.
:param dag_id: Dag ID
:param fileloc: The path of code file to read the code from
:param session: The database session.
:return: None
"""
latest_dagcode = cls.get_latest_dagcode(dag_id, session)
if not latest_dagcode:
return
new_source_code = cls.get_code_from_file(fileloc)
new_source_code_hash = cls.dag_source_hash(new_source_code)
if new_source_code_hash != latest_dagcode.source_code_hash:
latest_dagcode.source_code = new_source_code
latest_dagcode.source_code_hash = new_source_code_hash
session.merge(latest_dagcode)
|
Check if the source code of the DAG has changed and update it if needed.
:param dag_id: Dag ID
:param fileloc: The path of code file to read the code from
:param session: The database session.
:return: None
|
python
|
airflow-core/src/airflow/models/dagcode.py
| 175
|
[
"cls",
"dag_id",
"fileloc",
"session"
] |
None
| true
| 3
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
insert
|
public StrBuilder insert(final int index, final char[] chars, final int offset, final int length) {
validateIndex(index);
if (chars == null) {
return insert(index, nullText);
}
if (offset < 0 || offset > chars.length) {
throw new StringIndexOutOfBoundsException("Invalid offset: " + offset);
}
if (length < 0 || offset + length > chars.length) {
throw new StringIndexOutOfBoundsException("Invalid length: " + length);
}
if (length > 0) {
ensureCapacity(size + length);
System.arraycopy(buffer, index, buffer, index + length, size - index);
System.arraycopy(chars, offset, buffer, index, length);
size += length;
}
return this;
}
|
Inserts part of the character array into this builder.
Inserting null will use the stored null text value.
@param index the index to add at, must be valid
@param chars the char array to insert
@param offset the offset into the character array to start at, must be valid
@param length the length of the character array part to copy, must be positive
@return {@code this} instance.
@throws IndexOutOfBoundsException if any index is invalid
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 2,168
|
[
"index",
"chars",
"offset",
"length"
] |
StrBuilder
| true
| 7
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
binaryToHexDigit
|
public static char binaryToHexDigit(final boolean[] src, final int srcPos) {
if (src.length == 0) {
throw new IllegalArgumentException("Cannot convert an empty array.");
}
if (src.length > srcPos + 3 && src[srcPos + 3]) {
if (src[srcPos + 2]) {
if (src[srcPos + 1]) {
return src[srcPos] ? 'f' : 'e';
}
return src[srcPos] ? 'd' : 'c';
}
if (src[srcPos + 1]) {
return src[srcPos] ? 'b' : 'a';
}
return src[srcPos] ? '9' : '8';
}
if (src.length > srcPos + 2 && src[srcPos + 2]) {
if (src[srcPos + 1]) {
return src[srcPos] ? '7' : '6';
}
return src[srcPos] ? '5' : '4';
}
if (src.length > srcPos + 1 && src[srcPos + 1]) {
return src[srcPos] ? '3' : '2';
}
return src[srcPos] ? '1' : '0';
}
|
Converts binary (represented as boolean array) to a hexadecimal digit using the default (LSB0) bit ordering.
<p>
(1, 0, 0, 0) is converted as follow: '1'.
</p>
@param src the binary to convert.
@param srcPos the position of the LSB to start the conversion.
@return a hexadecimal digit representing the selected bits.
@throws IllegalArgumentException if {@code src} is empty.
@throws NullPointerException if {@code src} is {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/Conversion.java
| 201
|
[
"src",
"srcPos"
] | true
| 20
| 6.72
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
manhattan_distances
|
def manhattan_distances(X, Y=None):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array where each row is a sample and each column is a feature.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
An array where each row is a sample and each column is a feature.
If `None`, method uses `Y=X`.
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
Pairwise L1 distances.
Notes
-----
When X and/or Y are CSR sparse matrices and they are not already
in canonical format, this function modifies them in-place to
make them canonical.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])
array([[0.]])
>>> manhattan_distances([[3]], [[2]])
array([[1.]])
>>> manhattan_distances([[2]], [[3]])
array([[1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])
array([[0., 2.],
[4., 4.]])
"""
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
if issparse(X) or issparse(Y):
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
X.sum_duplicates() # this also sorts indices in-place
Y.sum_duplicates()
D = np.zeros((n_x, n_y))
_sparse_manhattan(X.data, X.indices, X.indptr, Y.data, Y.indices, Y.indptr, D)
return D
xp, _, device_ = get_namespace_and_device(X, Y)
if _is_numpy_namespace(xp):
return distance.cdist(X, Y, "cityblock")
# array API support
float_dtype = _find_matching_floating_dtype(X, Y, xp=xp)
out = xp.empty((n_x, n_y), dtype=float_dtype, device=device_)
batch_size = 1024
for i in range(0, n_x, batch_size):
i_end = min(i + batch_size, n_x)
batch_X = X[i:i_end, ...]
for j in range(0, n_y, batch_size):
j_end = min(j + batch_size, n_y)
batch_Y = Y[j:j_end, ...]
block_dist = xp.sum(
xp.abs(batch_X[:, None, :] - batch_Y[None, :, :]), axis=2
)
out[i:i_end, j:j_end] = block_dist
return out
|
Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
An array where each row is a sample and each column is a feature.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
An array where each row is a sample and each column is a feature.
If `None`, method uses `Y=X`.
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
Pairwise L1 distances.
Notes
-----
When X and/or Y are CSR sparse matrices and they are not already
in canonical format, this function modifies them in-place to
make them canonical.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])
array([[0.]])
>>> manhattan_distances([[3]], [[2]])
array([[1.]])
>>> manhattan_distances([[2]], [[3]])
array([[1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])
array([[0., 2.],
[4., 4.]])
|
python
|
sklearn/metrics/pairwise.py
| 1,052
|
[
"X",
"Y"
] | false
| 6
| 7.28
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
whenNot
|
public Member<T> whenNot(Predicate<@Nullable T> predicate) {
Assert.notNull(predicate, "'predicate' must not be null");
return when(predicate.negate());
}
|
Only include this member when the given predicate does not match.
@param predicate the predicate to test
@return a {@link Member} which may be configured further
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 430
|
[
"predicate"
] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
lookupGeneratedClass
|
private static GeneratedClass lookupGeneratedClass(GenerationContext generationContext, ClassName target) {
ClassName topLevelClassName = target.topLevelClassName();
GeneratedClass generatedClass = generationContext.getGeneratedClasses()
.getOrAddForFeatureComponent("BeanDefinitions", topLevelClassName, type -> {
type.addJavadoc("Bean definitions for {@link $T}.", topLevelClassName);
type.addModifiers(Modifier.PUBLIC);
});
List<String> names = target.simpleNames();
if (names.size() == 1) {
return generatedClass;
}
List<String> namesToProcess = names.subList(1, names.size());
ClassName currentTargetClassName = topLevelClassName;
GeneratedClass tmp = generatedClass;
for (String nameToProcess : namesToProcess) {
currentTargetClassName = currentTargetClassName.nestedClass(nameToProcess);
tmp = createInnerClass(tmp, nameToProcess, currentTargetClassName);
}
return tmp;
}
|
Return the {@link GeneratedClass} to use for the specified {@code target}.
<p>If the target class is an inner class, a corresponding inner class in
the original structure is created.
@param generationContext the generation context to use
@param target the chosen target class name for the bean definition
@return the generated class to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanDefinitionMethodGenerator.java
| 117
|
[
"generationContext",
"target"
] |
GeneratedClass
| true
| 2
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_rsplit
|
def _rsplit(a, sep=None, maxsplit=None):
"""
For each element in `a`, return a list of the words in the
string, using `sep` as the delimiter string.
Calls :meth:`str.rsplit` element-wise.
Except for splitting from the right, `rsplit`
behaves like `split`.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
sep : str or unicode, optional
If `sep` is not specified or None, any whitespace string
is a separator.
maxsplit : int, optional
If `maxsplit` is given, at most `maxsplit` splits are done,
the rightmost ones.
Returns
-------
out : ndarray
Array of list objects
See Also
--------
str.rsplit, split
Examples
--------
>>> import numpy as np
>>> a = np.array(['aAaAaA', 'abBABba'])
>>> np.strings.rsplit(a, 'A') # doctest: +SKIP
array([list(['a', 'a', 'a', '']), # doctest: +SKIP
list(['abB', 'Bba'])], dtype=object) # doctest: +SKIP
"""
# This will return an array of lists of different sizes, so we
# leave it as an object array
return _vec_string(
a, np.object_, 'rsplit', [sep] + _clean_args(maxsplit))
|
For each element in `a`, return a list of the words in the
string, using `sep` as the delimiter string.
Calls :meth:`str.rsplit` element-wise.
Except for splitting from the right, `rsplit`
behaves like `split`.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
sep : str or unicode, optional
If `sep` is not specified or None, any whitespace string
is a separator.
maxsplit : int, optional
If `maxsplit` is given, at most `maxsplit` splits are done,
the rightmost ones.
Returns
-------
out : ndarray
Array of list objects
See Also
--------
str.rsplit, split
Examples
--------
>>> import numpy as np
>>> a = np.array(['aAaAaA', 'abBABba'])
>>> np.strings.rsplit(a, 'A') # doctest: +SKIP
array([list(['a', 'a', 'a', '']), # doctest: +SKIP
list(['abB', 'Bba'])], dtype=object) # doctest: +SKIP
|
python
|
numpy/_core/strings.py
| 1,445
|
[
"a",
"sep",
"maxsplit"
] | false
| 1
| 6.64
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
isBeforeRange
|
public boolean isBeforeRange(final Range<T> otherRange) {
if (otherRange == null) {
return false;
}
return isBefore(otherRange.minimum);
}
|
Checks whether this range is completely before the specified range.
<p>This method may fail if the ranges have two different comparators or element types.</p>
@param otherRange the range to check, null returns false.
@return true if this range is completely before the specified range.
@throws RuntimeException if ranges cannot be compared.
|
java
|
src/main/java/org/apache/commons/lang3/Range.java
| 464
|
[
"otherRange"
] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
findInterruptibleMethods
|
private static Set<Method> findInterruptibleMethods(Class<?> interfaceType) {
Set<Method> set = new HashSet<>();
for (Method m : interfaceType.getMethods()) {
if (declaresInterruptedEx(m)) {
set.add(m);
}
}
return set;
}
|
Creates a TimeLimiter instance using the given executor service to execute method calls.
<p><b>Warning:</b> using a bounded executor may be counterproductive! If the thread pool fills
up, any time callers spend waiting for a thread may count toward their time limit, and in this
case the call may even time out before the target method is ever invoked.
@param executor the ExecutorService that will execute the method calls on the target objects;
for example, a {@link Executors#newCachedThreadPool()}.
@since 22.0
|
java
|
android/guava/src/com/google/common/util/concurrent/SimpleTimeLimiter.java
| 241
|
[
"interfaceType"
] | true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
countMatches
|
public static int countMatches(final CharSequence str, final char ch) {
if (isEmpty(str)) {
return 0;
}
int count = 0;
// We could also call str.toCharArray() for faster lookups but that would generate more garbage.
for (int i = 0; i < str.length(); i++) {
if (ch == str.charAt(i)) {
count++;
}
}
return count;
}
|
Counts how many times the char appears in the given string.
<p>
A {@code null} or empty ("") String input returns {@code 0}.
</p>
<pre>
StringUtils.countMatches(null, *) = 0
StringUtils.countMatches("", *) = 0
StringUtils.countMatches("abba", 0) = 0
StringUtils.countMatches("abba", 'a') = 2
StringUtils.countMatches("abba", 'b') = 2
StringUtils.countMatches("abba", 'x') = 0
</pre>
@param str the CharSequence to check, may be null.
@param ch the char to count.
@return the number of occurrences, 0 if the CharSequence is {@code null}.
@since 3.4
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 1,457
|
[
"str",
"ch"
] | true
| 4
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
splitByWholeSeparator
|
public static String[] splitByWholeSeparator(final String str, final String separator, final int max) {
return splitByWholeSeparatorWorker(str, separator, max, false);
}
|
Splits the provided text into an array, separator string specified. Returns a maximum of {@code max} substrings.
<p>
The separator(s) will not be included in the returned String array. Adjacent separators are treated as one separator.
</p>
<p>
A {@code null} input String returns {@code null}. A {@code null} separator splits on whitespace.
</p>
<pre>
StringUtils.splitByWholeSeparator(null, *, *) = null
StringUtils.splitByWholeSeparator("", *, *) = []
StringUtils.splitByWholeSeparator("ab de fg", null, 0) = ["ab", "de", "fg"]
StringUtils.splitByWholeSeparator("ab de fg", null, 0) = ["ab", "de", "fg"]
StringUtils.splitByWholeSeparator("ab:cd:ef", ":", 2) = ["ab", "cd:ef"]
StringUtils.splitByWholeSeparator("ab-!-cd-!-ef", "-!-", 5) = ["ab", "cd", "ef"]
StringUtils.splitByWholeSeparator("ab-!-cd-!-ef", "-!-", 2) = ["ab", "cd-!-ef"]
</pre>
@param str the String to parse, may be null.
@param separator String containing the String to be used as a delimiter, {@code null} splits on whitespace.
@param max the maximum number of elements to include in the returned array. A zero or negative value implies no limit.
@return an array of parsed Strings, {@code null} if null String was input.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 7,277
|
[
"str",
"separator",
"max"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
build
|
@Override
public SpringProfileArbiter build() {
Environment environment = Log4J2LoggingSystem.getEnvironment(this.loggerContext);
if (environment == null) {
statusLogger.debug("Creating Arbiter without a Spring Environment");
}
String name = this.configuration.getStrSubstitutor().replace(this.name);
String[] profiles = trimArrayElements(StringUtils.commaDelimitedListToStringArray(name));
return new SpringProfileArbiter(environment, profiles);
}
|
Sets the profile name or expression.
@param name the profile name or expression
@return this
@see Profiles#of(String...)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/SpringProfileArbiter.java
| 98
|
[] |
SpringProfileArbiter
| true
| 2
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
flush
|
protected boolean flush(ByteBuffer buf) throws IOException {
int remaining = buf.remaining();
if (remaining > 0) {
int written = socketChannel.write(buf);
return written >= remaining;
}
return true;
}
|
Flushes the buffer to the network, non blocking.
Visible for testing.
@param buf ByteBuffer
@return boolean true if the buffer has been emptied out, false otherwise
@throws IOException
|
java
|
clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java
| 247
|
[
"buf"
] | true
| 2
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
isCurrentThreadAllowedToHoldSingletonLock
|
@Override
protected @Nullable Boolean isCurrentThreadAllowedToHoldSingletonLock() {
String mainThreadPrefix = this.mainThreadPrefix;
if (mainThreadPrefix != null) {
// We only differentiate in the preInstantiateSingletons phase, using
// the volatile mainThreadPrefix field as an indicator for that phase.
PreInstantiation preInstantiation = this.preInstantiationThread.get();
if (preInstantiation != null) {
// A Spring-managed bootstrap thread:
// MAIN is allowed to lock (true) or even forced to lock (null),
// BACKGROUND is never allowed to lock (false).
return switch (preInstantiation) {
case MAIN -> (Boolean.TRUE.equals(this.strictLocking) ? null : true);
case BACKGROUND -> false;
};
}
// Not a Spring-managed bootstrap thread...
if (Boolean.FALSE.equals(this.strictLocking)) {
// Explicitly configured to use lenient locking wherever possible.
return true;
}
else if (this.strictLocking == null) {
// No explicit locking configuration -> infer appropriate locking.
if (!getThreadNamePrefix().equals(mainThreadPrefix)) {
// An unmanaged thread (assumed to be application-internal) with lenient locking,
// and not part of the same thread pool that provided the main bootstrap thread
// (excluding scenarios where we are hit by multiple external bootstrap threads).
return true;
}
}
}
// Traditional behavior: forced to always hold a full lock.
return null;
}
|
Considers all beans as eligible for metadata caching
if the factory's configuration has been marked as frozen.
@see #freezeConfiguration()
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
| 1,058
|
[] |
Boolean
| true
| 7
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
instance
|
public Struct instance(BoundField field) {
validateField(field);
if (field.def.type instanceof Schema) {
return new Struct((Schema) field.def.type);
} else if (field.def.type.isArray()) {
return new Struct((Schema) field.def.type.arrayElementType().get());
} else {
throw new SchemaException("Field '" + field.def.name + "' is not a container type, it is of type " + field.def.type);
}
}
|
Create a struct for the schema of a container type (struct or array). Note that for array type, this method
assumes that the type is an array of schema and creates a struct of that schema. Arrays of other types can't be
instantiated with this method.
@param field The field to create an instance of
@return The struct
@throws SchemaException If the given field is not a container type
|
java
|
clients/src/main/java/org/apache/kafka/common/protocol/types/Struct.java
| 164
|
[
"field"
] |
Struct
| true
| 3
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
calculateFirst
|
function calculateFirst(field: Field, ignoreNulls: boolean, nullAsZero: boolean): FieldCalcs {
return { first: field.values[0] };
}
|
@returns an object with a key for each selected stat
NOTE: This will also modify the 'field.state' object,
leaving values in a cache until cleared.
|
typescript
|
packages/grafana-data/src/transformations/fieldReducer.ts
| 607
|
[
"field",
"ignoreNulls",
"nullAsZero"
] | true
| 1
| 6.96
|
grafana/grafana
| 71,362
|
jsdoc
| false
|
|
format
|
String format(long millis);
|
Formats a millisecond {@code long} value.
@param millis the millisecond value to format.
@return the formatted string.
@since 2.1
|
java
|
src/main/java/org/apache/commons/lang3/time/DatePrinter.java
| 116
|
[
"millis"
] |
String
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
loadJars
|
private static ClassLoader loadJars(List<Path> dirs) {
final List<URL> urls = new ArrayList<>();
for (var dir : dirs) {
try (Stream<Path> jarFiles = Files.list(dir)) {
jarFiles.filter(p -> p.getFileName().toString().endsWith(".jar")).map(p -> {
try {
return p.toUri().toURL();
} catch (MalformedURLException e) {
throw new AssertionError(e);
}
}).forEach(urls::add);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
return URLClassLoader.newInstance(urls.toArray(URL[]::new));
}
|
Loads a tool provider from the Elasticsearch distribution.
@param sysprops the system properties of the CLI process
@param toolname the name of the tool to load
@param libs the library directories to load, relative to the Elasticsearch homedir
@return the instance of the loaded tool
@throws AssertionError if the given toolname cannot be found or there are more than one tools found with the same name
|
java
|
libs/cli/src/main/java/org/elasticsearch/cli/CliToolProvider.java
| 76
|
[
"dirs"
] |
ClassLoader
| true
| 3
| 7.76
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
is_bool_indexer
|
def is_bool_indexer(key: Any) -> bool:
"""
Check whether `key` is a valid boolean indexer.
Parameters
----------
key : Any
Only list-likes may be considered boolean indexers.
All other types are not considered a boolean indexer.
For array-like input, boolean ndarrays or ExtensionArrays
with ``_is_boolean`` set are considered boolean indexers.
Returns
-------
bool
Whether `key` is a valid boolean indexer.
Raises
------
ValueError
When the array is an object-dtype ndarray or ExtensionArray
and contains missing values.
See Also
--------
check_array_indexer : Check that `key` is a valid array to index,
and convert to an ndarray.
"""
if isinstance(
key,
(ABCSeries, np.ndarray, ABCIndex, ABCExtensionArray, ABCNumpyExtensionArray),
) and not isinstance(key, ABCMultiIndex):
if key.dtype == np.object_:
key_array = np.asarray(key)
if not lib.is_bool_array(key_array):
na_msg = "Cannot mask with non-boolean array containing NA / NaN values"
if lib.is_bool_array(key_array, skipna=True):
# Don't raise on e.g. ["A", "B", np.nan], see
# test_loc_getitem_list_of_labels_categoricalindex_with_na
raise ValueError(na_msg)
return False
return True
elif is_bool_dtype(key.dtype):
return True
elif isinstance(key, list):
# check if np.array(key).dtype would be bool
if len(key) > 0:
if type(key) is not list:
# GH#42461 cython will raise TypeError if we pass a subclass
key = list(key)
return lib.is_bool_list(key)
return False
|
Check whether `key` is a valid boolean indexer.
Parameters
----------
key : Any
Only list-likes may be considered boolean indexers.
All other types are not considered a boolean indexer.
For array-like input, boolean ndarrays or ExtensionArrays
with ``_is_boolean`` set are considered boolean indexers.
Returns
-------
bool
Whether `key` is a valid boolean indexer.
Raises
------
ValueError
When the array is an object-dtype ndarray or ExtensionArray
and contains missing values.
See Also
--------
check_array_indexer : Check that `key` is a valid array to index,
and convert to an ndarray.
|
python
|
pandas/core/common.py
| 103
|
[
"key"
] |
bool
| true
| 10
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
transformThen
|
function transformThen(node: PromiseReturningCallExpression<"then">, onFulfilled: Expression | undefined, onRejected: Expression | undefined, transformer: Transformer, hasContinuation: boolean, continuationArgName?: SynthBindingName): readonly Statement[] {
if (!onFulfilled || isNullOrUndefined(transformer, onFulfilled)) {
// If we don't have an `onfulfilled` callback, try treating this as a `.catch`.
return transformCatch(node, onRejected, transformer, hasContinuation, continuationArgName);
}
// We don't currently support transforming a `.then` with both onfulfilled and onrejected handlers, per GH#38152.
if (onRejected && !isNullOrUndefined(transformer, onRejected)) {
return silentFail();
}
const inputArgName = getArgBindingName(onFulfilled, transformer);
// Transform the left-hand-side of `.then` into an array of inlined statements. We pass `true` for hasContinuation as `node` is the outer continuation.
const inlinedLeftHandSide = transformExpression(node.expression.expression, node.expression.expression, transformer, /*hasContinuation*/ true, inputArgName);
if (hasFailed()) return silentFail(); // shortcut out of more work
// Transform the callback argument into an array of inlined statements. We pass whether we have an outer continuation here
// as that indicates whether `return` is valid.
const inlinedCallback = transformCallbackArgument(onFulfilled, hasContinuation, continuationArgName, inputArgName, node, transformer);
if (hasFailed()) return silentFail(); // shortcut out of more work
return concatenate(inlinedLeftHandSide, inlinedCallback);
}
|
@param hasContinuation Whether another `then`, `catch`, or `finally` continuation follows this continuation.
@param continuationArgName The argument name for the continuation that follows this call.
|
typescript
|
src/services/codefixes/convertToAsyncFunction.ts
| 549
|
[
"node",
"onFulfilled",
"onRejected",
"transformer",
"hasContinuation",
"continuationArgName?"
] | true
| 7
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
toFloatVersion
|
private static float toFloatVersion(final String value) {
final int defaultReturnValue = -1;
if (!value.contains(".")) {
return NumberUtils.toFloat(value, defaultReturnValue);
}
final String[] toParse = split(value);
if (toParse.length >= 2) {
return NumberUtils.toFloat(toParse[0] + '.' + toParse[1], defaultReturnValue);
}
return defaultReturnValue;
}
|
Parses a float value from a String.
@param value the String to parse.
@return the float value represented by the string or -1 if the given String cannot be parsed.
|
java
|
src/main/java/org/apache/commons/lang3/JavaVersion.java
| 332
|
[
"value"
] | true
| 3
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
add_srs_entry
|
def add_srs_entry(
srs, auth_name="EPSG", auth_srid=None, ref_sys_name=None, database=None
):
"""
Take a GDAL SpatialReference system and add its information to the
`spatial_ref_sys` table of the spatial backend. Doing this enables
database-level spatial transformations for the backend. Thus, this utility
is useful for adding spatial reference systems not included by default with
the backend:
>>> from django.contrib.gis.utils import add_srs_entry
>>> add_srs_entry(3857)
Keyword Arguments:
auth_name:
This keyword may be customized with the value of the `auth_name` field.
Defaults to 'EPSG'.
auth_srid:
This keyword may be customized with the value of the `auth_srid` field.
Defaults to the SRID determined by GDAL.
ref_sys_name:
For SpatiaLite users only, sets the value of the `ref_sys_name` field.
Defaults to the name determined by GDAL.
database:
The name of the database connection to use; the default is the value
of `django.db.DEFAULT_DB_ALIAS` (at the time of this writing, its value
is 'default').
"""
database = database or DEFAULT_DB_ALIAS
connection = connections[database]
if not hasattr(connection.ops, "spatial_version"):
raise Exception("The `add_srs_entry` utility only works with spatial backends.")
if not connection.features.supports_add_srs_entry:
raise Exception("This utility does not support your database backend.")
SpatialRefSys = connection.ops.spatial_ref_sys()
# If argument is not a `SpatialReference` instance, use it as parameter
# to construct a `SpatialReference` instance.
if not isinstance(srs, SpatialReference):
srs = SpatialReference(srs)
if srs.srid is None:
raise Exception(
"Spatial reference requires an SRID to be "
"compatible with the spatial backend."
)
# Initializing the keyword arguments dictionary for both PostGIS
# and SpatiaLite.
kwargs = {
"srid": srs.srid,
"auth_name": auth_name,
"auth_srid": auth_srid or srs.srid,
"proj4text": srs.proj4,
}
# Backend-specific fields for the SpatialRefSys model.
srs_field_names = {f.name for f in SpatialRefSys._meta.get_fields()}
if "srtext" in srs_field_names:
kwargs["srtext"] = srs.wkt
if "ref_sys_name" in srs_field_names:
# SpatiaLite specific
kwargs["ref_sys_name"] = ref_sys_name or srs.name
# Creating the spatial_ref_sys model.
try:
# Try getting via SRID only, because using all kwargs may
# differ from exact wkt/proj in database.
SpatialRefSys.objects.using(database).get(srid=srs.srid)
except SpatialRefSys.DoesNotExist:
SpatialRefSys.objects.using(database).create(**kwargs)
|
Take a GDAL SpatialReference system and add its information to the
`spatial_ref_sys` table of the spatial backend. Doing this enables
database-level spatial transformations for the backend. Thus, this utility
is useful for adding spatial reference systems not included by default with
the backend:
>>> from django.contrib.gis.utils import add_srs_entry
>>> add_srs_entry(3857)
Keyword Arguments:
auth_name:
This keyword may be customized with the value of the `auth_name` field.
Defaults to 'EPSG'.
auth_srid:
This keyword may be customized with the value of the `auth_srid` field.
Defaults to the SRID determined by GDAL.
ref_sys_name:
For SpatiaLite users only, sets the value of the `ref_sys_name` field.
Defaults to the name determined by GDAL.
database:
The name of the database connection to use; the default is the value
of `django.db.DEFAULT_DB_ALIAS` (at the time of this writing, its value
is 'default').
|
python
|
django/contrib/gis/utils/srs.py
| 5
|
[
"srs",
"auth_name",
"auth_srid",
"ref_sys_name",
"database"
] | false
| 10
| 7.6
|
django/django
| 86,204
|
unknown
| false
|
|
listConsumerGroupOffsets
|
ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec> groupSpecs, ListConsumerGroupOffsetsOptions options);
|
List the consumer group offsets available in the cluster for the specified consumer groups.
@param groupSpecs Map of consumer group ids to a spec that specifies the topic partitions of the group to list offsets for.
@param options The options to use when listing the consumer group offsets.
@return The ListConsumerGroupOffsetsResult
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 938
|
[
"groupSpecs",
"options"
] |
ListConsumerGroupOffsetsResult
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
hashCode
|
@Override
public int hashCode() {
int hashCode = this.hashCode;
Elements elements = this.elements;
if (hashCode == 0 && elements.getSize() != 0) {
for (int elementIndex = 0; elementIndex < elements.getSize(); elementIndex++) {
hashCode = 31 * hashCode + elements.hashCode(elementIndex);
}
this.hashCode = hashCode;
}
return hashCode;
}
|
Returns {@code true} if this element is an ancestor (immediate or nested parent) of
the specified name.
@param name the name to check
@return {@code true} if this name is an ancestor
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 527
|
[] | true
| 4
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
flip
|
public FluentBitSet flip(final int fromIndex, final int toIndex) {
bitSet.flip(fromIndex, toIndex);
return this;
}
|
Sets each bit from the specified {@code fromIndex} (inclusive) to the specified {@code toIndex} (exclusive) to the
complement of its current value.
@param fromIndex index of the first bit to flip.
@param toIndex index after the last bit to flip.
@throws IndexOutOfBoundsException if {@code fromIndex} is negative, or {@code toIndex} is negative, or
{@code fromIndex} is larger than {@code toIndex}.
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/util/FluentBitSet.java
| 230
|
[
"fromIndex",
"toIndex"
] |
FluentBitSet
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
occupied_slots
|
def occupied_slots(self, session: Session = NEW_SESSION) -> int:
"""
Get the number of slots used by running/queued tasks at the moment.
:param session: SQLAlchemy ORM Session
:return: the used number of slots
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
occupied_states = self.get_occupied_states()
return int(
session.scalar(
select(func.sum(TaskInstance.pool_slots))
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state.in_(occupied_states))
)
or 0
)
|
Get the number of slots used by running/queued tasks at the moment.
:param session: SQLAlchemy ORM Session
:return: the used number of slots
|
python
|
airflow-core/src/airflow/models/pool.py
| 244
|
[
"self",
"session"
] |
int
| true
| 2
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
fetchablePartitions
|
private List<TopicPartition> fetchablePartitions(Set<TopicPartition> buffered) {
// This is the test that returns true if the partition is *not* buffered
Predicate<TopicPartition> isNotBuffered = tp -> !buffered.contains(tp);
// Return all partitions that are in an otherwise fetchable state *and* for which we don't already have some
// messages sitting in our buffer.
return subscriptions.fetchablePartitions(isNotBuffered);
}
|
Return the list of <em>fetchable</em> partitions, which are the list of partitions to which we are subscribed,
but <em>excluding</em> any partitions for which we still have buffered data. The idea is that since the user
has yet to process the data for the partition that has already been fetched, we should not go send for more data
until the previously-fetched data has been processed.
@param buffered The set of partitions we have in our buffer
@return {@link List} of {@link TopicPartition topic partitions} for which we should fetch data
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java
| 346
|
[
"buffered"
] | true
| 1
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getValue
|
@Deprecated
@Override
public Double getValue() {
return Double.valueOf(this.value);
}
|
Gets the value as a Double instance.
@return the value as a Double, never null.
@deprecated Use {@link #get()}.
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableDouble.java
| 276
|
[] |
Double
| true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
atan2
|
public static double atan2(double y, double x) {
if (x > 0.0) {
if (y == 0.0) {
return (1 / y == Double.NEGATIVE_INFINITY) ? -0.0 : 0.0;
}
if (x == Double.POSITIVE_INFINITY) {
if (y == Double.POSITIVE_INFINITY) {
return M_QUARTER_PI;
} else if (y == Double.NEGATIVE_INFINITY) {
return -M_QUARTER_PI;
} else if (y > 0.0) {
return 0.0;
} else if (y < 0.0) {
return -0.0;
} else {
return Double.NaN;
}
} else {
return FastMath.atan(y / x);
}
} else if (x < 0.0) {
if (y == 0.0) {
return (1 / y == Double.NEGATIVE_INFINITY) ? -Math.PI : Math.PI;
}
if (x == Double.NEGATIVE_INFINITY) {
if (y == Double.POSITIVE_INFINITY) {
return 3 * M_QUARTER_PI;
} else if (y == Double.NEGATIVE_INFINITY) {
return -3 * M_QUARTER_PI;
} else if (y > 0.0) {
return Math.PI;
} else if (y < 0.0) {
return -Math.PI;
} else {
return Double.NaN;
}
} else if (y > 0.0) {
return M_HALF_PI + FastMath.atan(-x / y);
} else if (y < 0.0) {
return -M_HALF_PI - FastMath.atan(x / y);
} else {
return Double.NaN;
}
} else if (x == 0.0) {
if (y == 0.0) {
if (1 / x == Double.NEGATIVE_INFINITY) {
return (1 / y == Double.NEGATIVE_INFINITY) ? -Math.PI : Math.PI;
} else {
return (1 / y == Double.NEGATIVE_INFINITY) ? -0.0 : 0.0;
}
}
if (y > 0.0) {
return M_HALF_PI;
} else if (y < 0.0) {
return -M_HALF_PI;
} else {
return Double.NaN;
}
} else {
return Double.NaN;
}
}
|
For special values for which multiple conventions could be adopted, behaves like Math.atan2(double,double).
@param y Coordinate on y axis.
@param x Coordinate on x axis.
@return Angle from x axis positive side to (x,y) position, in radians, in [-PI,PI].
Angle measure is positive when going from x axis to y axis (positive sides).
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/FastMath.java
| 537
|
[
"y",
"x"
] | true
| 26
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
withTimeout
|
@J2ktIncompatible
@GwtIncompatible // java.util.concurrent.ScheduledExecutorService
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
public static <V extends @Nullable Object> ListenableFuture<V> withTimeout(
ListenableFuture<V> delegate,
long time,
TimeUnit unit,
ScheduledExecutorService scheduledExecutor) {
if (delegate.isDone()) {
return delegate;
}
return TimeoutFuture.create(delegate, time, unit, scheduledExecutor);
}
|
Returns a future that delegates to another but will finish early (via a {@link
TimeoutException} wrapped in an {@link ExecutionException}) if the specified duration expires.
<p>The delegate future is interrupted and cancelled if it times out.
@param delegate The future to delegate to.
@param time when to time out the future
@param unit the time unit of the time parameter
@param scheduledExecutor The executor service to enforce the timeout.
@since 19.0
|
java
|
android/guava/src/com/google/common/util/concurrent/Futures.java
| 405
|
[
"delegate",
"time",
"unit",
"scheduledExecutor"
] | true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
list_fargate_profiles
|
def list_fargate_profiles(
self,
clusterName: str,
verbose: bool = False,
) -> list:
"""
List all AWS Fargate profiles associated with the specified cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.list_fargate_profiles`
:param clusterName: The name of the Amazon EKS Cluster containing Fargate profiles to list.
:param verbose: Provides additional logging if set to True. Defaults to False.
:return: A list of Fargate profile names within a given cluster.
"""
eks_client = self.conn
list_fargate_profiles_call = partial(eks_client.list_fargate_profiles, clusterName=clusterName)
return self._list_all(
api_call=list_fargate_profiles_call, response_key="fargateProfileNames", verbose=verbose
)
|
List all AWS Fargate profiles associated with the specified cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.list_fargate_profiles`
:param clusterName: The name of the Amazon EKS Cluster containing Fargate profiles to list.
:param verbose: Provides additional logging if set to True. Defaults to False.
:return: A list of Fargate profile names within a given cluster.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/eks.py
| 500
|
[
"self",
"clusterName",
"verbose"
] |
list
| true
| 1
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
get_event_subscription_state
|
def get_event_subscription_state(self, subscription_name: str) -> str:
"""
Get the current state of an RDS snapshot export to Amazon S3.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_event_subscriptions`
:param subscription_name: The name of the target RDS event notification subscription.
:return: Returns the status of the event subscription as a string (eg. "active")
:raises AirflowNotFoundException: If the event subscription does not exist.
"""
try:
response = self.conn.describe_event_subscriptions(SubscriptionName=subscription_name)
except self.conn.exceptions.ClientError as e:
if e.response["Error"]["Code"] in ("SubscriptionNotFoundFault", "SubscriptionNotFound"):
raise AirflowNotFoundException(e)
raise e
return response["EventSubscriptionsList"][0]["Status"].lower()
|
Get the current state of an RDS snapshot export to Amazon S3.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_event_subscriptions`
:param subscription_name: The name of the target RDS event notification subscription.
:return: Returns the status of the event subscription as a string (eg. "active")
:raises AirflowNotFoundException: If the event subscription does not exist.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/rds.py
| 186
|
[
"self",
"subscription_name"
] |
str
| true
| 2
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
set_uuid
|
def set_uuid(self, uuid: str) -> Styler:
"""
Set the uuid applied to ``id`` attributes of HTML elements.
Parameters
----------
uuid : str
The uuid to be applied to ``id`` attributes of HTML elements.
Returns
-------
Styler
Instance of class with specified uuid for `id` attributes set.
See Also
--------
Styler.set_caption : Set the text added to a ``<caption>`` HTML element.
Styler.set_td_classes : Set the ``class`` attribute of ``<td>`` HTML elements.
Styler.set_tooltips : Set the DataFrame of strings on ``Styler`` generating
``:hover`` tooltips.
Notes
-----
Almost all HTML elements within the table, and including the ``<table>`` element
are assigned ``id`` attributes. The format is ``T_uuid_<extra>`` where
``<extra>`` is typically a more specific identifier, such as ``row1_col2``.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], index=["A", "B"], columns=["c1", "c2"])
You can get the `id` attributes with the following:
>>> print((df).style.to_html()) # doctest: +SKIP
To add a title to column `c1`, its `id` is T_20a7d_level0_col0:
>>> df.style.set_uuid("T_20a7d_level0_col0").set_caption("Test")
... # doctest: +SKIP
Please see:
`Table visualization <../../user_guide/style.ipynb>`_ for more examples.
"""
self.uuid = uuid
return self
|
Set the uuid applied to ``id`` attributes of HTML elements.
Parameters
----------
uuid : str
The uuid to be applied to ``id`` attributes of HTML elements.
Returns
-------
Styler
Instance of class with specified uuid for `id` attributes set.
See Also
--------
Styler.set_caption : Set the text added to a ``<caption>`` HTML element.
Styler.set_td_classes : Set the ``class`` attribute of ``<td>`` HTML elements.
Styler.set_tooltips : Set the DataFrame of strings on ``Styler`` generating
``:hover`` tooltips.
Notes
-----
Almost all HTML elements within the table, and including the ``<table>`` element
are assigned ``id`` attributes. The format is ``T_uuid_<extra>`` where
``<extra>`` is typically a more specific identifier, such as ``row1_col2``.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], index=["A", "B"], columns=["c1", "c2"])
You can get the `id` attributes with the following:
>>> print((df).style.to_html()) # doctest: +SKIP
To add a title to column `c1`, its `id` is T_20a7d_level0_col0:
>>> df.style.set_uuid("T_20a7d_level0_col0").set_caption("Test")
... # doctest: +SKIP
Please see:
`Table visualization <../../user_guide/style.ipynb>`_ for more examples.
|
python
|
pandas/io/formats/style.py
| 2,356
|
[
"self",
"uuid"
] |
Styler
| true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
containsEqualValue
|
bool containsEqualValue(value_type const& value) const {
auto it = table_.findMatching(value.first, [&](auto& key) {
return value.first == key;
});
return !it.atEnd() && value.second == table_.valueAtItem(it.citem()).second;
}
|
Checks for a value using operator==
@methodset Lookup
containsEqualValue returns true iff there is an element in the map
that compares equal to value using operator==. It is undefined
behavior to call this function if operator== on key_type can ever
return true when the same keys passed to key_eq() would return false
(the opposite is allowed).
|
cpp
|
folly/container/F14Map.h
| 999
|
[] | true
| 2
| 6.4
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
getBigThreadConstructor
|
private static @Nullable Constructor<Thread> getBigThreadConstructor() {
try {
return Thread.class.getConstructor(
ThreadGroup.class, Runnable.class, String.class, long.class, boolean.class);
} catch (Throwable t) {
// Probably pre Java 9. We'll fall back to Thread.inheritableThreadLocals.
return null;
}
}
|
Looks up FinalizableReference.finalizeReferent() method.
|
java
|
android/guava/src/com/google/common/base/internal/Finalizer.java
| 246
|
[] | true
| 2
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
containsOption
|
boolean containsOption(String name);
|
Return whether the set of option arguments parsed from the arguments contains an
option with the given name.
@param name the name to check
@return {@code true} if the arguments contain an option with the given name
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ApplicationArguments.java
| 51
|
[
"name"
] | true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
getApplicationEventMulticaster
|
ApplicationEventMulticaster getApplicationEventMulticaster() throws IllegalStateException {
if (this.applicationEventMulticaster == null) {
throw new IllegalStateException("ApplicationEventMulticaster not initialized - " +
"call 'refresh' before multicasting events via the context: " + this);
}
return this.applicationEventMulticaster;
}
|
Return the internal ApplicationEventMulticaster used by the context.
@return the internal ApplicationEventMulticaster (never {@code null})
@throws IllegalStateException if the context has not been initialized yet
|
java
|
spring-context/src/main/java/org/springframework/context/support/AbstractApplicationContext.java
| 466
|
[] |
ApplicationEventMulticaster
| true
| 2
| 7.12
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_get_value
|
def _get_value(self, index, col, takeable: bool = False) -> Scalar:
"""
Quickly retrieve single value at passed column and index.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
Notes
-----
Assumes that both `self.index._index_as_unique` and
`self.columns._index_as_unique`; Caller is responsible for checking.
"""
if takeable:
series = self._ixs(col, axis=1)
return series._values[index]
series = self._get_item(col)
if not isinstance(self.index, MultiIndex):
# CategoricalIndex: Trying to use the engine fastpath may give incorrect
# results if our categories are integers that dont match our codes
# IntervalIndex: IntervalTree has no get_loc
row = self.index.get_loc(index)
return series._values[row]
# For MultiIndex going through engine effectively restricts us to
# same-length tuples; see test_get_set_value_no_partial_indexing
loc = self.index._engine.get_loc(index)
return series._values[loc]
|
Quickly retrieve single value at passed column and index.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
Notes
-----
Assumes that both `self.index._index_as_unique` and
`self.columns._index_as_unique`; Caller is responsible for checking.
|
python
|
pandas/core/frame.py
| 4,274
|
[
"self",
"index",
"col",
"takeable"
] |
Scalar
| true
| 3
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
endsWithAny
|
public boolean endsWithAny(final CharSequence sequence, final CharSequence... searchStrings) {
if (StringUtils.isEmpty(sequence) || ArrayUtils.isEmpty(searchStrings)) {
return false;
}
for (final CharSequence searchString : searchStrings) {
if (endsWith(sequence, searchString)) {
return true;
}
}
return false;
}
|
Tests if a CharSequence ends with any of the provided suffixes.
<p>
Case-sensitive examples
</p>
<pre>
Strings.CS.endsWithAny(null, null) = false
Strings.CS.endsWithAny(null, new String[] {"abc"}) = false
Strings.CS.endsWithAny("abcxyz", null) = false
Strings.CS.endsWithAny("abcxyz", new String[] {""}) = true
Strings.CS.endsWithAny("abcxyz", new String[] {"xyz"}) = true
Strings.CS.endsWithAny("abcxyz", new String[] {null, "xyz", "abc"}) = true
Strings.CS.endsWithAny("abcXYZ", "def", "XYZ") = true
Strings.CS.endsWithAny("abcXYZ", "def", "xyz") = false
</pre>
@param sequence the CharSequence to check, may be null
@param searchStrings the CharSequence suffixes to find, may be empty or contain {@code null}
@see Strings#endsWith(CharSequence, CharSequence)
@return {@code true} if the input {@code sequence} is {@code null} AND no {@code searchStrings} are provided, or the input {@code sequence} ends in any
of the provided {@code searchStrings}.
|
java
|
src/main/java/org/apache/commons/lang3/Strings.java
| 633
|
[
"sequence"
] | true
| 4
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
make_union
|
def make_union(
*transformers, n_jobs=None, verbose=False, verbose_feature_names_out=True
):
"""Construct a :class:`FeatureUnion` from the given transformers.
This is a shorthand for the :class:`FeatureUnion` constructor; it does not
require, and does not permit, naming the transformers. Instead, they will
be given names automatically based on their types. It also does not allow
weighting.
Parameters
----------
*transformers : list of estimators
One or more estimators.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
verbose_feature_names_out : bool, default=True
If True, the feature names generated by `get_feature_names_out` will
include prefixes derived from the transformer names.
Returns
-------
f : FeatureUnion
A :class:`FeatureUnion` object for concatenating the results of multiple
transformer objects.
See Also
--------
FeatureUnion : Class for concatenating the results of multiple transformer
objects.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> from sklearn.pipeline import make_union
>>> make_union(PCA(), TruncatedSVD())
FeatureUnion(transformer_list=[('pca', PCA()),
('truncatedsvd', TruncatedSVD())])
"""
return FeatureUnion(
_name_estimators(transformers),
n_jobs=n_jobs,
verbose=verbose,
verbose_feature_names_out=verbose_feature_names_out,
)
|
Construct a :class:`FeatureUnion` from the given transformers.
This is a shorthand for the :class:`FeatureUnion` constructor; it does not
require, and does not permit, naming the transformers. Instead, they will
be given names automatically based on their types. It also does not allow
weighting.
Parameters
----------
*transformers : list of estimators
One or more estimators.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
verbose_feature_names_out : bool, default=True
If True, the feature names generated by `get_feature_names_out` will
include prefixes derived from the transformer names.
Returns
-------
f : FeatureUnion
A :class:`FeatureUnion` object for concatenating the results of multiple
transformer objects.
See Also
--------
FeatureUnion : Class for concatenating the results of multiple transformer
objects.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> from sklearn.pipeline import make_union
>>> make_union(PCA(), TruncatedSVD())
FeatureUnion(transformer_list=[('pca', PCA()),
('truncatedsvd', TruncatedSVD())])
|
python
|
sklearn/pipeline.py
| 2,086
|
[
"n_jobs",
"verbose",
"verbose_feature_names_out"
] | false
| 1
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
topicsAwaitingReconciliation
|
Set<Uuid> topicsAwaitingReconciliation() {
return topicPartitionsAwaitingReconciliation().keySet();
}
|
@return Set of topic IDs received in a target assignment that have not been reconciled yet
because topic names are not in metadata or reconciliation hasn't finished. Reconciliation
hasn't finished for a topic if the currently active assignment has a different set of partitions
for the topic than the target assignment.
Visible for testing.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 1,338
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
reinstall_if_setup_changed
|
def reinstall_if_setup_changed() -> bool:
"""
Prints warning if detected airflow sources are not the ones that Breeze was installed with.
:return: True if warning was printed.
"""
res = subprocess.run(
["uv", "tool", "upgrade", "apache-airflow-breeze"],
cwd=MY_BREEZE_ROOT_PATH,
check=True,
text=True,
capture_output=True,
)
if "Modified" in res.stderr:
inform_about_self_upgrade()
return True
return False
|
Prints warning if detected airflow sources are not the ones that Breeze was installed with.
:return: True if warning was printed.
|
python
|
dev/breeze/src/airflow_breeze/utils/path_utils.py
| 129
|
[] |
bool
| true
| 2
| 8.24
|
apache/airflow
| 43,597
|
unknown
| false
|
invoke
|
@Override
public @Nullable Object invoke(final MethodInvocation invocation) throws Throwable {
Method method = invocation.getMethod();
CacheOperationInvoker aopAllianceInvoker = () -> {
try {
return invocation.proceed();
}
catch (Throwable ex) {
throw new CacheOperationInvoker.ThrowableWrapper(ex);
}
};
Object target = invocation.getThis();
Assert.state(target != null, "Target must not be null");
try {
return execute(aopAllianceInvoker, target, method, invocation.getArguments());
}
catch (CacheOperationInvoker.ThrowableWrapper th) {
throw th.getOriginal();
}
}
|
Construct a new {@code JCacheInterceptor} with the given error handler.
@param errorHandler a supplier for the error handler to use,
applying the default error handler if the supplier is not resolvable
@since 5.1
|
java
|
spring-context-support/src/main/java/org/springframework/cache/jcache/interceptor/JCacheInterceptor.java
| 68
|
[
"invocation"
] |
Object
| true
| 3
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
heartbeat
|
def heartbeat(
self, heartbeat_callback: Callable[[Session], None], session: Session = NEW_SESSION
) -> None:
"""
Update the job's entry in the database with the latest_heartbeat timestamp.
This allows for the job to be killed externally and allows the system
to monitor what is actually active. For instance, an old heartbeat
for SchedulerJob would mean something is wrong. This also allows for
any job to be killed externally, regardless of who is running it or on
which machine it is running.
Note that if your heart rate is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
:param heartbeat_callback: Callback that will be run when the heartbeat is recorded in the Job
:param session to use for saving the job
"""
previous_heartbeat = self.latest_heartbeat
with DebugTrace.start_span(span_name="heartbeat", component="Job") as span:
try:
span.set_attribute("heartbeat", str(self.latest_heartbeat))
# This will cause it to load from the db
session.merge(self)
previous_heartbeat = self.latest_heartbeat
if self.state == JobState.RESTARTING:
self.kill()
# Figure out how long to sleep for
sleep_for: float = 0
if self.latest_heartbeat:
seconds_remaining = (
self.heartrate - (timezone.utcnow() - self.latest_heartbeat).total_seconds()
)
sleep_for = max(0, seconds_remaining)
if span.is_recording():
span.add_event(name="sleep", attributes={"sleep_for": sleep_for})
sleep(sleep_for)
# Update last heartbeat time
with create_session() as session:
# Make the session aware of this object
session.merge(self)
self.latest_heartbeat = timezone.utcnow()
session.commit()
time_since_last_heartbeat: float = (
0
if previous_heartbeat is None
else (timezone.utcnow() - previous_heartbeat).total_seconds()
)
health_check_threshold_value = health_check_threshold(self.job_type, self.heartrate)
if time_since_last_heartbeat > health_check_threshold_value:
self.log.info("Heartbeat recovered after %.2f seconds", time_since_last_heartbeat)
# At this point, the DB has updated.
previous_heartbeat = self.latest_heartbeat
heartbeat_callback(session)
self.log.debug("[heartbeat]")
self.heartbeat_failed = False
except OperationalError:
Stats.incr(convert_camel_to_snake(self.__class__.__name__) + "_heartbeat_failure", 1, 1)
if not self.heartbeat_failed:
self.log.exception("%s heartbeat failed with error", self.__class__.__name__)
self.heartbeat_failed = True
msg = f"{self.__class__.__name__} heartbeat got an exception"
if span.is_recording():
span.add_event(name="error", attributes={"message": msg})
if self.is_alive():
self.log.error(
"%s heartbeat failed with error. Scheduler may go into unhealthy state",
self.__class__.__name__,
)
msg = f"{self.__class__.__name__} heartbeat failed with error. Scheduler may go into unhealthy state"
if span.is_recording():
span.add_event(name="error", attributes={"message": msg})
else:
msg = f"{self.__class__.__name__} heartbeat failed with error. Scheduler is in unhealthy state"
self.log.error(msg)
if span.is_recording():
span.add_event(name="error", attributes={"message": msg})
# We didn't manage to heartbeat, so make sure that the timestamp isn't updated
self.latest_heartbeat = previous_heartbeat
|
Update the job's entry in the database with the latest_heartbeat timestamp.
This allows for the job to be killed externally and allows the system
to monitor what is actually active. For instance, an old heartbeat
for SchedulerJob would mean something is wrong. This also allows for
any job to be killed externally, regardless of who is running it or on
which machine it is running.
Note that if your heart rate is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
:param heartbeat_callback: Callback that will be run when the heartbeat is recorded in the Job
:param session to use for saving the job
|
python
|
airflow-core/src/airflow/jobs/job.py
| 204
|
[
"self",
"heartbeat_callback",
"session"
] |
None
| true
| 12
| 6.96
|
apache/airflow
| 43,597
|
sphinx
| false
|
get
|
public static ConditionEvaluationReport get(ConfigurableListableBeanFactory beanFactory) {
synchronized (beanFactory) {
ConditionEvaluationReport report;
if (beanFactory.containsSingleton(BEAN_NAME)) {
report = beanFactory.getBean(BEAN_NAME, ConditionEvaluationReport.class);
}
else {
report = new ConditionEvaluationReport();
beanFactory.registerSingleton(BEAN_NAME, report);
}
locateParent(beanFactory.getParentBeanFactory(), report);
return report;
}
}
|
Obtain a {@link ConditionEvaluationReport} for the specified bean factory.
@param beanFactory the bean factory
@return an existing or new {@link ConditionEvaluationReport}
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionEvaluationReport.java
| 181
|
[
"beanFactory"
] |
ConditionEvaluationReport
| true
| 2
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
toFloat
|
public Float toFloat() {
return Float.valueOf(floatValue());
}
|
Gets this mutable as an instance of Float.
@return a Float instance containing the value from this mutable, never null.
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableFloat.java
| 402
|
[] |
Float
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getEnvironmentPostProcessors
|
List<EnvironmentPostProcessor> getEnvironmentPostProcessors(@Nullable ResourceLoader resourceLoader,
ConfigurableBootstrapContext bootstrapContext) {
ClassLoader classLoader = (resourceLoader != null) ? resourceLoader.getClassLoader() : null;
EnvironmentPostProcessorsFactory postProcessorsFactory = this.postProcessorsFactory.apply(classLoader);
return postProcessorsFactory.getEnvironmentPostProcessors(this.deferredLogs, bootstrapContext);
}
|
Factory method that creates an {@link EnvironmentPostProcessorApplicationListener}
with a specific {@link EnvironmentPostProcessorsFactory}.
@param postProcessorsFactory the environment post processor factory
@return an {@link EnvironmentPostProcessorApplicationListener} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/support/EnvironmentPostProcessorApplicationListener.java
| 153
|
[
"resourceLoader",
"bootstrapContext"
] | true
| 2
| 7.12
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
exactly_one
|
def exactly_one(*args) -> bool:
"""
Return True if exactly one of args is "truthy", and False otherwise.
If user supplies an iterable, we raise ValueError and force them to unpack.
"""
if is_container(args[0]):
raise ValueError(
"Not supported for iterable args. Use `*` to unpack your iterable in the function call."
)
return sum(map(bool, args)) == 1
|
Return True if exactly one of args is "truthy", and False otherwise.
If user supplies an iterable, we raise ValueError and force them to unpack.
|
python
|
airflow-core/src/airflow/utils/helpers.py
| 265
|
[] |
bool
| true
| 2
| 6.72
|
apache/airflow
| 43,597
|
unknown
| false
|
name
|
default String name() {
NamedComponent[] annotationsByType = this.getClass().getAnnotationsByType(NamedComponent.class);
if (annotationsByType.length == 1) {
return annotationsByType[0].value();
}
return null;
}
|
Returns a name from NamedComponent annotation.
@return a name used on NamedComponent annotation or null when a class implementing this interface is not annotated
|
java
|
libs/plugin-api/src/main/java/org/elasticsearch/plugin/Nameable.java
| 23
|
[] |
String
| true
| 2
| 7.6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
loadPatternCompiler
|
private static PatternCompiler loadPatternCompiler() {
// We want the JDK Pattern compiler:
// - under Android (where it hurts startup performance)
// - even for the JVM in our open-source release (https://github.com/google/guava/issues/3147)
// If anyone in our monorepo uses the Android copy of Guava on a JVM, that would be unfortunate.
// But that is only likely to happen in Robolectric tests, where the risks of JDK regex are low.
return new JdkPatternCompiler();
}
|
Returns the string if it is not empty, or a null string otherwise.
@param string the string to test and possibly return
@return {@code string} if it is not empty; {@code null} otherwise
|
java
|
android/guava/src/com/google/common/base/Platform.java
| 98
|
[] |
PatternCompiler
| true
| 1
| 7.2
|
google/guava
| 51,352
|
javadoc
| false
|
negate
|
default FailableLongPredicate<E> negate() {
return t -> !test(t);
}
|
Returns a predicate that negates this predicate.
@return a predicate that negates this predicate.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableLongPredicate.java
| 79
|
[] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
writeTo
|
@Override
public int writeTo(TransferableChannel destChannel, int offset, int length) throws IOException {
long newSize = Math.min(channel.size(), end) - start;
int oldSize = sizeInBytes();
if (newSize < oldSize)
throw new KafkaException(String.format(
"Size of FileRecords %s has been truncated during write: old size %d, new size %d",
file.getAbsolutePath(), oldSize, newSize));
long position = start + offset;
int count = Math.min(length, oldSize - offset);
// safe to cast to int since `count` is an int
return (int) destChannel.transferFrom(channel, position, count);
}
|
Truncate this file message set to the given size in bytes. Note that this API does no checking that the
given size falls on a valid message boundary.
In some versions of the JDK truncating to the same size as the file message set will cause an
update of the files mtime, so truncate is only performed if the targetSize is smaller than the
size of the underlying FileChannel.
It is expected that no other threads will do writes to the log when this function is called.
@param targetSize The size to truncate to. Must be between 0 and sizeInBytes.
@return The number of bytes truncated off
|
java
|
clients/src/main/java/org/apache/kafka/common/record/FileRecords.java
| 290
|
[
"destChannel",
"offset",
"length"
] | true
| 2
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
close
|
@Override
public void close() {
close(Duration.ZERO);
}
|
Add a {@link CompletableApplicationEvent} to the handler. The method blocks waiting for the result, and will
return the result value upon successful completion; otherwise throws an error.
<p/>
See {@link ConsumerUtils#getResult(Future)} for more details.
@param event A {@link CompletableApplicationEvent} created by the polling thread
@return Value that is the result of the event
@param <T> Type of return value of the event
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventHandler.java
| 147
|
[] |
void
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
serialize
|
def serialize(cls, operation: "GemmOperation") -> str: # type: ignore[name-defined] # noqa: F821
"""Serialize a GEMM operation to JSON string.
Args:
operation: GemmOperation object
Returns:
str: JSON string representation of the operation
"""
assert operation.__class__.__qualname__ == "GemmOperation", (
"Only GemmOperation objects are supported via the main API"
)
return json.dumps(cls._gemm_operation_to_json(operation))
|
Serialize a GEMM operation to JSON string.
Args:
operation: GemmOperation object
Returns:
str: JSON string representation of the operation
|
python
|
torch/_inductor/codegen/cuda/serialization.py
| 32
|
[
"cls",
"operation"
] |
str
| true
| 1
| 6.08
|
pytorch/pytorch
| 96,034
|
google
| false
|
maybe_cast_to_integer_array
|
def maybe_cast_to_integer_array(arr: list | np.ndarray, dtype: np.dtype) -> np.ndarray:
"""
Takes any dtype and returns the casted version, raising for when data is
incompatible with integer/unsigned integer dtypes.
Parameters
----------
arr : np.ndarray or list
The array to cast.
dtype : np.dtype
The integer dtype to cast the array to.
Returns
-------
ndarray
Array of integer or unsigned integer dtype.
Raises
------
OverflowError : the dtype is incompatible with the data
ValueError : loss of precision has occurred during casting
Examples
--------
If you try to coerce negative values to unsigned integers, it raises:
>>> pd.Series([-1], dtype="uint64")
Traceback (most recent call last):
...
OverflowError: Trying to coerce negative values to unsigned integers
Also, if you try to coerce float values to integers, it raises:
>>> maybe_cast_to_integer_array([1, 2, 3.5], dtype=np.dtype("int64"))
Traceback (most recent call last):
...
ValueError: Trying to coerce float values to integers
"""
assert dtype.kind in "iu"
try:
if not isinstance(arr, np.ndarray):
with warnings.catch_warnings():
# We already disallow dtype=uint w/ negative numbers
# (test_constructor_coercion_signed_to_unsigned) so safe to ignore.
warnings.filterwarnings(
"ignore",
"NumPy will stop allowing conversion of out-of-bound Python int",
DeprecationWarning,
)
casted = np.asarray(arr, dtype=dtype)
else:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
casted = arr.astype(dtype, copy=False)
except OverflowError as err:
raise OverflowError(
"The elements provided in the data cannot all be "
f"casted to the dtype {dtype}"
) from err
if isinstance(arr, np.ndarray) and arr.dtype == dtype:
# avoid expensive array_equal check
return casted
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings(
"ignore", "elementwise comparison failed", FutureWarning
)
if np.array_equal(arr, casted):
return casted
# We do this casting to allow for proper
# data and dtype checking.
#
# We didn't do this earlier because NumPy
# doesn't handle `uint64` correctly.
arr = np.asarray(arr)
if np.issubdtype(arr.dtype, str):
# TODO(numpy-2.0 min): This case will raise an OverflowError above
if (casted.astype(str) == arr).all():
return casted
raise ValueError(f"string values cannot be losslessly cast to {dtype}")
if dtype.kind == "u" and (arr < 0).any():
# TODO: can this be hit anymore after numpy 2.0?
raise OverflowError("Trying to coerce negative values to unsigned integers")
if arr.dtype.kind == "f":
if not np.isfinite(arr).all():
raise IntCastingNaNError(
"Cannot convert non-finite values (NA or inf) to integer"
)
raise ValueError("Trying to coerce float values to integers")
if arr.dtype == object:
raise ValueError("Trying to coerce object values to integers")
if casted.dtype < arr.dtype:
# TODO: Can this path be hit anymore with numpy > 2
# GH#41734 e.g. [1, 200, 923442] and dtype="int8" -> overflows
raise ValueError(
f"Values are too large to be losslessly converted to {dtype}. "
f"To cast anyway, use pd.Series(values).astype({dtype})"
)
if arr.dtype.kind in "mM":
# test_constructor_maskedarray_nonfloat
raise TypeError(
f"Constructing a Series or DataFrame from {arr.dtype} values and "
f"dtype={dtype} is not supported. Use values.view({dtype}) instead."
)
# No known cases that get here, but raising explicitly to cover our bases.
raise ValueError(f"values cannot be losslessly cast to {dtype}")
|
Takes any dtype and returns the casted version, raising for when data is
incompatible with integer/unsigned integer dtypes.
Parameters
----------
arr : np.ndarray or list
The array to cast.
dtype : np.dtype
The integer dtype to cast the array to.
Returns
-------
ndarray
Array of integer or unsigned integer dtype.
Raises
------
OverflowError : the dtype is incompatible with the data
ValueError : loss of precision has occurred during casting
Examples
--------
If you try to coerce negative values to unsigned integers, it raises:
>>> pd.Series([-1], dtype="uint64")
Traceback (most recent call last):
...
OverflowError: Trying to coerce negative values to unsigned integers
Also, if you try to coerce float values to integers, it raises:
>>> maybe_cast_to_integer_array([1, 2, 3.5], dtype=np.dtype("int64"))
Traceback (most recent call last):
...
ValueError: Trying to coerce float values to integers
|
python
|
pandas/core/dtypes/cast.py
| 1,492
|
[
"arr",
"dtype"
] |
np.ndarray
| true
| 15
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getUnsafe
|
private static @Nullable Unsafe getUnsafe() {
try {
return Unsafe.getUnsafe();
} catch (SecurityException e) {
// that's okay; try reflection instead
}
try {
return doPrivileged(
(PrivilegedExceptionAction<Unsafe>)
() -> {
Class<Unsafe> k = Unsafe.class;
for (Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x)) {
return k.cast(x);
}
}
return null;
});
} catch (PrivilegedActionException e) {
return null;
}
}
|
The offset to the first element in a byte array, or {@link
#OFFSET_UNSAFE_APPROACH_IS_UNAVAILABLE}.
|
java
|
android/guava/src/com/google/common/primitives/UnsignedBytes.java
| 372
|
[] |
Unsafe
| true
| 4
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
replay
|
def replay(
self,
custom_params_encoder: Callable[_P, object] | None = None,
custom_result_decoder: Callable[_P, Callable[[_EncodedR], _R]] | None = None,
) -> Callable[[Callable[_P, _R]], Callable[_P, _R]]:
"""Replay a cached function result without executing the function.
This is a decorator that retrieves cached results using a two-level
cache strategy. It checks the in-memory cache first (fast), then
falls back to the on-disk cache. If found on disk, the result is
cached in memory for future access.
Args:
custom_params_encoder: Optional encoder for function parameters.
If None, parameters are pickled directly.
custom_result_decoder: Optional decoder factory for cached results.
Takes function parameters and returns a decoder
function that converts _EncodedR -> R.
Returns:
A decorator function that can be applied to functions.
Example:
@persistent_memoizer.replay(
custom_params_encoder=my_param_encoder,
custom_result_decoder=my_result_decoder_factory,
)
def expensive_function(x, y):
return x + y
"""
def wrapper(fn: Callable[_P, _R]) -> Callable[_P, _R]:
"""Wrap the function to retrieve from cache.
Args:
fn: The function to wrap (not actually called).
Returns:
A wrapped version of the function.
"""
# If caching is disabled, always raise KeyError (cache miss)
if not config.IS_CACHING_MODULE_ENABLED():
def always_miss(*args: _P.args, **kwargs: _P.kwargs) -> _R:
raise KeyError("Caching is disabled")
return always_miss
# Get the memory replay function
memory_replay_fn = self._memoizer.replay(
custom_params_encoder, custom_result_decoder
)(fn)
def inner(*args: _P.args, **kwargs: _P.kwargs) -> _R:
"""Retrieve the cached result without calling the function.
Checks memory cache first, then disk cache. Populates memory
cache from disk on a disk hit.
Args:
*args: Positional arguments to generate the cache key.
**kwargs: Keyword arguments to generate the cache key.
Returns:
The cached result (decoded if decoder is provided).
Raises:
KeyError: If no cached result exists for the given parameters.
"""
# Try memory cache first via memoizer
try:
return memory_replay_fn(*args, **kwargs)
except KeyError:
pass # Memory miss, check disk
# Memory miss - check disk cache
cache_key = self._make_key(custom_params_encoder, *args, **kwargs)
disk_hit = self._disk_cache.get(cache_key)
if disk_hit is not None:
# Disk cache hit - unpickle the CacheEntry
pickled_value = disk_hit.value
cache_entry = cast(CacheEntry, pickle.loads(pickled_value))
# Populate memory cache for future access
self._memoizer._cache.insert(cache_key, cache_entry)
# Decode and return
if custom_result_decoder is not None:
decoder_fn = custom_result_decoder(*args, **kwargs)
return decoder_fn(cast(_EncodedR, cache_entry.encoded_result))
return cast(_R, cache_entry.encoded_result)
# Complete miss
raise KeyError(f"No cached result found for key: {cache_key}")
return inner
return wrapper
|
Replay a cached function result without executing the function.
This is a decorator that retrieves cached results using a two-level
cache strategy. It checks the in-memory cache first (fast), then
falls back to the on-disk cache. If found on disk, the result is
cached in memory for future access.
Args:
custom_params_encoder: Optional encoder for function parameters.
If None, parameters are pickled directly.
custom_result_decoder: Optional decoder factory for cached results.
Takes function parameters and returns a decoder
function that converts _EncodedR -> R.
Returns:
A decorator function that can be applied to functions.
Example:
@persistent_memoizer.replay(
custom_params_encoder=my_param_encoder,
custom_result_decoder=my_result_decoder_factory,
)
def expensive_function(x, y):
return x + y
|
python
|
torch/_inductor/runtime/caching/interfaces.py
| 712
|
[
"self",
"custom_params_encoder",
"custom_result_decoder"
] |
Callable[[Callable[_P, _R]], Callable[_P, _R]]
| true
| 4
| 9.12
|
pytorch/pytorch
| 96,034
|
google
| false
|
_build_metrics
|
def _build_metrics(func_name, namespace):
"""
Build metrics dict from function args.
It assumes that function arguments is from airflow.bin.cli module's function
and has Namespace instance where it optionally contains "dag_id", "task_id",
and "logical_date".
:param func_name: name of function
:param namespace: Namespace instance from argparse
:return: dict with metrics
"""
from airflow._shared.secrets_masker import _secrets_masker
sub_commands_to_check_for_sensitive_fields = {"users", "connections"}
sub_commands_to_check_for_sensitive_key = {"variables"}
sensitive_fields = {"-p", "--password", "--conn-password"}
full_command = list(sys.argv)
sub_command = full_command[1] if len(full_command) > 1 else None
# For cases when value under sub_commands_to_check_for_sensitive_key have sensitive info
if sub_command in sub_commands_to_check_for_sensitive_key:
key = full_command[-2] if len(full_command) > 3 else None
if key and _secrets_masker().should_hide_value_for_key(key):
# Mask the sensitive value since key contain sensitive keyword
full_command[-1] = "*" * 8
elif sub_command in sub_commands_to_check_for_sensitive_fields:
for idx, command in enumerate(full_command):
if command in sensitive_fields:
# For cases when password is passed as "--password xyz" (with space between key and value)
full_command[idx + 1] = "*" * 8
else:
# For cases when password is passed as "--password=xyz" (with '=' between key and value)
for sensitive_field in sensitive_fields:
if command.startswith(f"{sensitive_field}="):
full_command[idx] = f"{sensitive_field}={'*' * 8}"
# handle conn-json and conn-uri separately as it requires different handling
if "--conn-json" in full_command:
import json
json_index = full_command.index("--conn-json") + 1
conn_json = json.loads(full_command[json_index])
for k in conn_json:
if k and _secrets_masker().should_hide_value_for_key(k):
conn_json[k] = "*" * 8
full_command[json_index] = json.dumps(conn_json)
if "--conn-uri" in full_command:
from urllib.parse import urlparse, urlunparse
uri_index = full_command.index("--conn-uri") + 1
conn_uri = full_command[uri_index]
parsed_uri = urlparse(conn_uri)
netloc = parsed_uri.netloc
if parsed_uri.password:
password = "*" * 8
netloc = f"{parsed_uri.username}:{password}@{parsed_uri.hostname}"
if parsed_uri.port:
netloc += f":{parsed_uri.port}"
full_command[uri_index] = urlunparse(
(
parsed_uri.scheme,
netloc,
parsed_uri.path,
parsed_uri.params,
parsed_uri.query,
parsed_uri.fragment,
)
)
metrics = {
"sub_command": func_name,
"start_datetime": timezone.utcnow(),
"full_command": f"{full_command}",
"user": getuser(),
}
if not isinstance(namespace, Namespace):
raise ValueError(
f"namespace argument should be argparse.Namespace instance, but is {type(namespace)}"
)
tmp_dic = vars(namespace)
metrics["dag_id"] = tmp_dic.get("dag_id")
metrics["task_id"] = tmp_dic.get("task_id")
metrics["logical_date"] = tmp_dic.get("logical_date")
metrics["host_name"] = socket.gethostname()
return metrics
|
Build metrics dict from function args.
It assumes that function arguments is from airflow.bin.cli module's function
and has Namespace instance where it optionally contains "dag_id", "task_id",
and "logical_date".
:param func_name: name of function
:param namespace: Namespace instance from argparse
:return: dict with metrics
|
python
|
airflow-core/src/airflow/utils/cli.py
| 129
|
[
"func_name",
"namespace"
] | false
| 20
| 6
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
intersect
|
public static Optional<ApiVersion> intersect(ApiVersion thisVersion,
ApiVersion other) {
if (thisVersion == null || other == null) return Optional.empty();
if (thisVersion.apiKey() != other.apiKey())
throw new IllegalArgumentException("thisVersion.apiKey: " + thisVersion.apiKey()
+ " must be equal to other.apiKey: " + other.apiKey());
short minVersion = (short) Math.max(thisVersion.minVersion(), other.minVersion());
short maxVersion = (short) Math.min(thisVersion.maxVersion(), other.maxVersion());
return minVersion > maxVersion
? Optional.empty()
: Optional.of(new ApiVersion()
.setApiKey(thisVersion.apiKey())
.setMinVersion(minVersion)
.setMaxVersion(maxVersion));
}
|
Find the common range of supported API versions between the locally
known range and that of another set.
@param listenerType the listener type which constrains the set of exposed APIs
@param activeControllerApiVersions controller ApiVersions
@param enableUnstableLastVersion whether unstable versions should be advertised or not
@param clientTelemetryEnabled whether client telemetry is enabled or not
@return commonly agreed ApiVersion collection
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsResponse.java
| 311
|
[
"thisVersion",
"other"
] | true
| 5
| 7.28
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_decode_attribute
|
def _decode_attribute(self, s):
'''(INTERNAL) Decodes an attribute line.
The attribute is the most complex declaration in an arff file. All
attributes must follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, quoted if the name contains any
whitespace, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
The nominal names follow the rules for the attribute names, i.e., they
must be quoted if the name contains whitespaces.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a tuple (ATTRIBUTE_NAME, TYPE_OR_VALUES).
'''
_, v = s.split(' ', 1)
v = v.strip()
# Verify the general structure of declaration
m = _RE_ATTRIBUTE.match(v)
if not m:
raise BadAttributeFormat()
# Extracts the raw name and type
name, type_ = m.groups()
# Extracts the final name
name = str(name.strip('"\''))
# Extracts the final type
if type_[:1] == "{" and type_[-1:] == "}":
try:
type_ = _parse_values(type_.strip('{} '))
except Exception:
raise BadAttributeType()
if isinstance(type_, dict):
raise BadAttributeType()
else:
# If not nominal, verify the type name
type_ = str(type_).upper()
if type_ not in ['NUMERIC', 'REAL', 'INTEGER', 'STRING']:
raise BadAttributeType()
return (name, type_)
|
(INTERNAL) Decodes an attribute line.
The attribute is the most complex declaration in an arff file. All
attributes must follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, quoted if the name contains any
whitespace, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
The nominal names follow the rules for the attribute names, i.e., they
must be quoted if the name contains whitespaces.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a tuple (ATTRIBUTE_NAME, TYPE_OR_VALUES).
|
python
|
sklearn/externals/_arff.py
| 713
|
[
"self",
"s"
] | false
| 7
| 6.96
|
scikit-learn/scikit-learn
| 64,340
|
sphinx
| false
|
|
chebroots
|
def chebroots(c):
"""
Compute the roots of a Chebyshev series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * T_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
numpy.polynomial.polynomial.polyroots
numpy.polynomial.legendre.legroots
numpy.polynomial.laguerre.lagroots
numpy.polynomial.hermite.hermroots
numpy.polynomial.hermite_e.hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Chebyshev series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.chebyshev as cheb
>>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) # may vary
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0] / c[1]])
# rotated companion matrix reduces error
m = chebcompanion(c)[::-1, ::-1]
r = np.linalg.eigvals(m)
r.sort()
return r
|
Compute the roots of a Chebyshev series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * T_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
numpy.polynomial.polynomial.polyroots
numpy.polynomial.legendre.legroots
numpy.polynomial.laguerre.lagroots
numpy.polynomial.hermite.hermroots
numpy.polynomial.hermite_e.hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Chebyshev series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.chebyshev as cheb
>>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) # may vary
|
python
|
numpy/polynomial/chebyshev.py
| 1,666
|
[
"c"
] | false
| 3
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
withPrefix
|
@Override
default IterableConfigurationPropertySource withPrefix(@Nullable String prefix) {
return (StringUtils.hasText(prefix)) ? new PrefixedIterableConfigurationPropertySource(this, prefix) : this;
}
|
Returns a sequential {@code Stream} for the {@link ConfigurationPropertyName names}
managed by this source.
@return a stream of names (never {@code null})
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/IterableConfigurationPropertySource.java
| 79
|
[
"prefix"
] |
IterableConfigurationPropertySource
| true
| 2
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
substituteThisExpression
|
function substituteThisExpression(node: ThisExpression) {
if (
enabledSubstitutions & ClassPropertySubstitutionFlags.ClassStaticThisOrSuperReference &&
lexicalEnvironment?.data &&
!noSubstitution.has(node)
) {
const { facts, classConstructor, classThis } = lexicalEnvironment.data;
const substituteThis = shouldSubstituteThisWithClassThis ? classThis ?? classConstructor : classConstructor;
if (substituteThis) {
return setTextRange(
setOriginalNode(
factory.cloneNode(substituteThis),
node,
),
node,
);
}
if (facts & ClassFacts.ClassWasDecorated && legacyDecorators) {
return factory.createParenthesizedExpression(factory.createVoidZero());
}
}
return node;
}
|
Hooks node substitutions.
@param hint The context for the emitter.
@param node The node to substitute.
|
typescript
|
src/compiler/transformers/classFields.ts
| 3,265
|
[
"node"
] | false
| 8
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
_get_data_info_by_name
|
def _get_data_info_by_name(
name: str,
version: Union[int, str],
data_home: Optional[str],
n_retries: int = 3,
delay: float = 1.0,
):
"""
Utilizes the openml dataset listing api to find a dataset by
name/version
OpenML api function:
https://www.openml.org/api_docs#!/data/get_data_list_data_name_data_name
Parameters
----------
name : str
name of the dataset
version : int or str
If version is an integer, the exact name/version will be obtained from
OpenML. If version is a string (value: "active") it will take the first
version from OpenML that is annotated as active. Any other string
values except "active" are treated as integer.
data_home : str or None
Location to cache the response. None if no cache is required.
n_retries : int, default=3
Number of retries when HTTP errors are encountered. Error with status
code 412 won't be retried as they represent OpenML generic errors.
delay : float, default=1.0
Number of seconds between retries.
Returns
-------
first_dataset : json
json representation of the first dataset object that adhired to the
search criteria
"""
if version == "active":
# situation in which we return the oldest active version
url = _SEARCH_NAME.format(name) + "/status/active/"
error_msg = "No active dataset {} found.".format(name)
json_data = _get_json_content_from_openml_api(
url,
error_msg,
data_home=data_home,
n_retries=n_retries,
delay=delay,
)
res = json_data["data"]["dataset"]
if len(res) > 1:
first_version = version = res[0]["version"]
warning_msg = (
"Multiple active versions of the dataset matching the name"
f" {name} exist. Versions may be fundamentally different, "
f"returning version {first_version}. "
"Available versions:\n"
)
for r in res:
warning_msg += f"- version {r['version']}, status: {r['status']}\n"
warning_msg += (
f" url: https://www.openml.org/search?type=data&id={r['did']}\n"
)
warn(warning_msg)
return res[0]
# an integer version has been provided
url = (_SEARCH_NAME + "/data_version/{}").format(name, version)
try:
json_data = _get_json_content_from_openml_api(
url,
error_message=None,
data_home=data_home,
n_retries=n_retries,
delay=delay,
)
except OpenMLError:
# we can do this in 1 function call if OpenML does not require the
# specification of the dataset status (i.e., return datasets with a
# given name / version regardless of active, deactivated, etc. )
# TODO: feature request OpenML.
url += "/status/deactivated"
error_msg = "Dataset {} with version {} not found.".format(name, version)
json_data = _get_json_content_from_openml_api(
url,
error_msg,
data_home=data_home,
n_retries=n_retries,
delay=delay,
)
return json_data["data"]["dataset"][0]
|
Utilizes the openml dataset listing api to find a dataset by
name/version
OpenML api function:
https://www.openml.org/api_docs#!/data/get_data_list_data_name_data_name
Parameters
----------
name : str
name of the dataset
version : int or str
If version is an integer, the exact name/version will be obtained from
OpenML. If version is a string (value: "active") it will take the first
version from OpenML that is annotated as active. Any other string
values except "active" are treated as integer.
data_home : str or None
Location to cache the response. None if no cache is required.
n_retries : int, default=3
Number of retries when HTTP errors are encountered. Error with status
code 412 won't be retried as they represent OpenML generic errors.
delay : float, default=1.0
Number of seconds between retries.
Returns
-------
first_dataset : json
json representation of the first dataset object that adhired to the
search criteria
|
python
|
sklearn/datasets/_openml.py
| 262
|
[
"name",
"version",
"data_home",
"n_retries",
"delay"
] | true
| 4
| 6.8
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
scanWordCharacters
|
function scanWordCharacters(): string {
let value = "";
while (true) {
const ch = charCodeChecked(pos);
if (ch === CharacterCodes.EOF || !isWordCharacter(ch)) {
break;
}
value += String.fromCharCode(ch);
pos++;
}
return value;
}
|
A stack of scopes for named capturing groups. @see {scanGroupName}
|
typescript
|
src/compiler/scanner.ts
| 3,555
|
[] | true
| 4
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
reorder_communication_preserving_peak_memory
|
def reorder_communication_preserving_peak_memory(
snodes: list[BaseSchedulerNode],
) -> list[BaseSchedulerNode]:
"""
Reorders communication ops relative to computation ops to improve communication-compute overlapping and hide comm
latency. Stops moving a particular op if it reaches a point that would have increased the peak memory footprint.
Currently, follows these heuristics (subject to change or tune):
- never reorders collectives relative to one another, for SPMD safety
- has an option for per-collective prefetch limit, but does not enable it by default
- limits the total number of reorder steps to some factor of the graph size to prevent worst-case quadratic
performance
Prerequisite: sink_comms_and_waits - ensure comm and wait nodes are scheduled as late as possible, respecting data
dependencies. That allows reorder_communication_preserving_peak_memory to take a best case peak-memory snapshot,
and then monotonically improve latency by moving collectives backward in time.
Peak memory impact is computed in an iterative fashion. First, memory use at each timestep is computed, and global
peak memory is computed as a max over timesteps. Then, when swapping any two adjacent nodes, only the curr-memory
for the earlier of the nodes after the swap is affected. This enables checking step by step whether a swap is
peak-memory-safe, and bailing out if not. Example:
0 n0 C0
1 n1 C0 + Allocs(n1) - Frees(n1)
2 n2 C0 + Allocs(n1) - Frees(n1) + Allocs(n2) - Frees(n2)
0 n0 C0
1 n2 C0 + Allocs(n2) - Frees(n2) <-- After moving n2 to Time 1, only time1 memory changes
2 n1 C0 + Allocs(n2) - Frees(n2) + Allocs(n1) - Frees(n1)
"""
reordered_snodes, node_stats = (
_reorder_communication_preserving_peak_memory_internal(snodes)
)
return reordered_snodes
|
Reorders communication ops relative to computation ops to improve communication-compute overlapping and hide comm
latency. Stops moving a particular op if it reaches a point that would have increased the peak memory footprint.
Currently, follows these heuristics (subject to change or tune):
- never reorders collectives relative to one another, for SPMD safety
- has an option for per-collective prefetch limit, but does not enable it by default
- limits the total number of reorder steps to some factor of the graph size to prevent worst-case quadratic
performance
Prerequisite: sink_comms_and_waits - ensure comm and wait nodes are scheduled as late as possible, respecting data
dependencies. That allows reorder_communication_preserving_peak_memory to take a best case peak-memory snapshot,
and then monotonically improve latency by moving collectives backward in time.
Peak memory impact is computed in an iterative fashion. First, memory use at each timestep is computed, and global
peak memory is computed as a max over timesteps. Then, when swapping any two adjacent nodes, only the curr-memory
for the earlier of the nodes after the swap is affected. This enables checking step by step whether a swap is
peak-memory-safe, and bailing out if not. Example:
0 n0 C0
1 n1 C0 + Allocs(n1) - Frees(n1)
2 n2 C0 + Allocs(n1) - Frees(n1) + Allocs(n2) - Frees(n2)
0 n0 C0
1 n2 C0 + Allocs(n2) - Frees(n2) <-- After moving n2 to Time 1, only time1 memory changes
2 n1 C0 + Allocs(n2) - Frees(n2) + Allocs(n1) - Frees(n1)
|
python
|
torch/_inductor/comms.py
| 113
|
[
"snodes"
] |
list[BaseSchedulerNode]
| true
| 1
| 6.96
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
hashCode
|
@Override
public int hashCode() {
int result = topic != null ? topic.hashCode() : 0;
result = 31 * result + (partition != null ? partition.hashCode() : 0);
result = 31 * result + (headers != null ? headers.hashCode() : 0);
result = 31 * result + (key != null ? key.hashCode() : 0);
result = 31 * result + (value != null ? value.hashCode() : 0);
result = 31 * result + (timestamp != null ? timestamp.hashCode() : 0);
return result;
}
|
@return The partition to which the record will be sent (or null if no partition was specified)
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/ProducerRecord.java
| 215
|
[] | true
| 7
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
tryToComputeNext
|
private boolean tryToComputeNext() {
state = State.FAILED; // temporary pessimism
next = computeNext();
if (state != State.DONE) {
state = State.READY;
return true;
}
return false;
}
|
Implementations of {@link #computeNext} <b>must</b> invoke this method when there are no
elements left in the iteration.
@return {@code null}; a convenience so your {@code computeNext} implementation can use the
simple statement {@code return endOfData();}
|
java
|
android/guava/src/com/google/common/collect/AbstractIterator.java
| 139
|
[] | true
| 2
| 7.44
|
google/guava
| 51,352
|
javadoc
| false
|
|
handleBindResult
|
@Contract("_, _, _, _, _, true -> null")
private <T> @Nullable T handleBindResult(ConfigurationPropertyName name, Bindable<T> target, BindHandler handler,
Context context, @Nullable Object result, boolean create) throws Exception {
if (result != null) {
result = handler.onSuccess(name, target, context, result);
result = context.getConverter().convert(result, target);
}
if (result == null && create) {
result = fromDataObjectBinders(target.getBindMethod(),
(dataObjectBinder) -> dataObjectBinder.create(target, context));
if (result == null) {
IllegalStateException ex = new IllegalStateException(
"Unable to create instance for " + target.getType());
List<DataObjectBinder> dataObjectBinders = this.dataObjectBinders.get(target.getBindMethod());
Assert.state(dataObjectBinders != null, "'dataObjectBinders' must not be null");
dataObjectBinders
.forEach((dataObjectBinder) -> dataObjectBinder.onUnableToCreateInstance(target, context, ex));
throw ex;
}
result = handler.onCreate(name, target, context, result);
result = context.getConverter().convert(result, target);
}
handler.onFinish(name, target, context, result);
return context.getConverter().convert(result, target);
}
|
Bind the specified target {@link Bindable} using this binder's
{@link ConfigurationPropertySource property sources} or create a new instance using
the type of the {@link Bindable} if the result of the binding is {@code null}.
@param name the configuration property name to bind
@param target the target bindable
@param handler the bind handler (may be {@code null})
@param <T> the bound or created type
@return the bound or created object
@since 2.2.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
| 382
|
[
"name",
"target",
"handler",
"context",
"result",
"create"
] |
T
| true
| 5
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
setAsText
|
@Override
public void setAsText(String text) throws IllegalArgumentException {
this.resourceEditor.setAsText(text);
Resource resource = (Resource) this.resourceEditor.getValue();
try {
setValue(resource != null ? resource.getInputStream() : null);
}
catch (IOException ex) {
throw new IllegalArgumentException("Failed to retrieve InputStream for " + resource, ex);
}
}
|
Create a new InputStreamEditor, using the given ResourceEditor underneath.
@param resourceEditor the ResourceEditor to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/InputStreamEditor.java
| 68
|
[
"text"
] |
void
| true
| 3
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_create_tasks
|
def _create_tasks(
self,
tasks: Iterable[Operator],
task_creator: Callable[[Operator, Iterable[int]], CreatedTasks],
*,
session: Session,
) -> CreatedTasks:
"""
Create missing tasks -- and expand any MappedOperator that _only_ have literals as input.
:param tasks: Tasks to create jobs for in the DAG run
:param task_creator: Function to create task instances
"""
from airflow.models.expandinput import NotFullyPopulated
from airflow.models.mappedoperator import get_mapped_ti_count
map_indexes: Iterable[int]
for task in tasks:
try:
count = get_mapped_ti_count(task, self.run_id, session=session)
except (NotMapped, NotFullyPopulated):
map_indexes = (-1,)
else:
if count:
map_indexes = range(count)
else:
# Make sure to always create at least one ti; this will be
# marked as REMOVED later at runtime.
map_indexes = (-1,)
yield from task_creator(task, map_indexes)
|
Create missing tasks -- and expand any MappedOperator that _only_ have literals as input.
:param tasks: Tasks to create jobs for in the DAG run
:param task_creator: Function to create task instances
|
python
|
airflow-core/src/airflow/models/dagrun.py
| 1,879
|
[
"self",
"tasks",
"task_creator",
"session"
] |
CreatedTasks
| true
| 5
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
predecessors
|
@Override
public Set<N> predecessors() {
return new AbstractSet<N>() {
@Override
public UnmodifiableIterator<N> iterator() {
if (orderedNodeConnections == null) {
Iterator<Entry<N, Object>> entries = adjacentNodeValues.entrySet().iterator();
return new AbstractIterator<N>() {
@Override
protected @Nullable N computeNext() {
while (entries.hasNext()) {
Entry<N, Object> entry = entries.next();
if (isPredecessor(entry.getValue())) {
return entry.getKey();
}
}
return endOfData();
}
};
} else {
Iterator<NodeConnection<N>> nodeConnections = orderedNodeConnections.iterator();
return new AbstractIterator<N>() {
@Override
protected @Nullable N computeNext() {
while (nodeConnections.hasNext()) {
NodeConnection<N> nodeConnection = nodeConnections.next();
if (nodeConnection instanceof NodeConnection.Pred) {
return nodeConnection.node;
}
}
return endOfData();
}
};
}
}
@Override
public int size() {
return predecessorCount;
}
@Override
public boolean contains(@Nullable Object obj) {
return isPredecessor(adjacentNodeValues.get(obj));
}
};
}
|
All node connections in this graph, in edge insertion order.
<p>Note: This field and {@link #adjacentNodeValues} cannot be combined into a single
LinkedHashMap because one target node may be mapped to both a predecessor and a successor. A
LinkedHashMap combines two such edges into a single node-value pair, even though the edges may
not have been inserted consecutively.
|
java
|
android/guava/src/com/google/common/graph/DirectedGraphConnections.java
| 271
|
[] | true
| 6
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
resolveShortcut
|
public @Nullable Object resolveShortcut(BeanFactory beanFactory) throws BeansException {
return null;
}
|
Resolve a shortcut for this dependency against the given factory, for example
taking some pre-resolved information into account.
<p>The resolution algorithm will first attempt to resolve a shortcut through this
method before going into the regular type matching algorithm across all beans.
Subclasses may override this method to improve resolution performance based on
pre-cached information while still receiving {@link InjectionPoint} exposure etc.
@param beanFactory the associated factory
@return the shortcut result if any, or {@code null} if none
@throws BeansException if the shortcut could not be obtained
@since 4.3.1
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/DependencyDescriptor.java
| 207
|
[
"beanFactory"
] |
Object
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
put
|
public JSONArray put(int index, long value) throws JSONException {
return put(index, (Long) value);
}
|
Sets the value at {@code index} to {@code value}, null padding this array to the
required length if necessary. If a value already exists at {@code
index}, it will be replaced.
@param index the index to set the value to
@param value the value
@return this array.
@throws JSONException if processing of json failed
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 232
|
[
"index",
"value"
] |
JSONArray
| true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
joining
|
public static Collector<Object, ?, String> joining() {
return new SimpleCollector<>(StringBuilder::new, StringBuilder::append, StringBuilder::append, StringBuilder::toString, CH_NOID);
}
|
Returns a {@code Collector} that concatenates the input elements, separated by the specified delimiter, in encounter order.
<p>
This is a variation of {@link Collectors#joining()} that works with any element class, not just {@code CharSequence}.
</p>
<p>
For example:
</p>
<pre>
Stream.of(Long.valueOf(1), Long.valueOf(2), Long.valueOf(3))
.collect(LangCollectors.joining())
returns "123"
</pre>
@return A {@code Collector} which concatenates Object elements, separated by the specified delimiter, in encounter order.
|
java
|
src/main/java/org/apache/commons/lang3/stream/LangCollectors.java
| 132
|
[] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
asBiConsumer
|
public static <O1, O2> BiConsumer<O1, O2> asBiConsumer(final FailableBiConsumer<O1, O2, ?> consumer) {
return (input1, input2) -> accept(consumer, input1, input2);
}
|
Converts the given {@link FailableBiConsumer} into a standard {@link BiConsumer}.
@param <O1> the type of the first argument of the consumers
@param <O2> the type of the second argument of the consumers
@param consumer a failable {@link BiConsumer}
@return a standard {@link BiConsumer}
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/Functions.java
| 352
|
[
"consumer"
] | true
| 1
| 6.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getFraction
|
public static Fraction getFraction(double value) {
final int sign = value < 0 ? -1 : 1;
value = Math.abs(value);
if (value > Integer.MAX_VALUE || Double.isNaN(value)) {
throw new ArithmeticException("The value must not be greater than Integer.MAX_VALUE or NaN");
}
final int wholeNumber = (int) value;
value -= wholeNumber;
int numer0 = 0; // the pre-previous
int denom0 = 1; // the pre-previous
int numer1 = 1; // the previous
int denom1 = 0; // the previous
int numer2; // the current, setup in calculation
int denom2; // the current, setup in calculation
int a1 = (int) value;
int a2;
double x1 = 1;
double x2;
double y1 = value - a1;
double y2;
double delta1;
double delta2 = Double.MAX_VALUE;
double fraction;
int i = 1;
do {
delta1 = delta2;
a2 = (int) (x1 / y1);
x2 = y1;
y2 = x1 - a2 * y1;
numer2 = a1 * numer1 + numer0;
denom2 = a1 * denom1 + denom0;
fraction = (double) numer2 / (double) denom2;
delta2 = Math.abs(value - fraction);
a1 = a2;
x1 = x2;
y1 = y2;
numer0 = numer1;
denom0 = denom1;
numer1 = numer2;
denom1 = denom2;
i++;
} while (delta1 > delta2 && denom2 <= 10000 && denom2 > 0 && i < 25);
if (i == 25) {
throw new ArithmeticException("Unable to convert double to fraction");
}
return getReducedFraction((numer0 + wholeNumber * denom0) * sign, denom0);
}
|
Creates a {@link Fraction} instance from a {@code double} value.
<p>
This method uses the <a href="https://web.archive.org/web/20210516065058/http%3A//archives.math.utk.edu/articles/atuyl/confrac/"> continued fraction
algorithm</a>, computing a maximum of 25 convergents and bounding the denominator by 10,000.
</p>
@param value the double value to convert
@return a new fraction instance that is close to the value
@throws ArithmeticException if {@code |value| > Integer.MAX_VALUE} or {@code value = NaN}
@throws ArithmeticException if the calculated denominator is {@code zero}
@throws ArithmeticException if the algorithm does not converge
|
java
|
src/main/java/org/apache/commons/lang3/math/Fraction.java
| 134
|
[
"value"
] |
Fraction
| true
| 8
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
nullToEmpty
|
public static Boolean[] nullToEmpty(final Boolean[] array) {
return nullTo(array, EMPTY_BOOLEAN_OBJECT_ARRAY);
}
|
Defensive programming technique to change a {@code null}
reference to an empty one.
<p>
This method returns an empty array for a {@code null} input array.
</p>
<p>
As a memory optimizing technique an empty array passed in will be overridden with
the empty {@code public static} references in this class.
</p>
@param array the array to check for {@code null} or empty.
@return the same array, {@code public static} empty array if {@code null} or empty input.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,299
|
[
"array"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
read
|
public static int read(InputStream input, ByteBuffer buffer, int count) throws IOException {
if (buffer.hasArray()) {
return readToHeapBuffer(input, buffer, count);
}
return readToDirectBuffer(input, buffer, count);
}
|
Read up to {code count} bytes from {@code input} and store them into {@code buffer}.
The buffers position will be incremented by the number of bytes read from the stream.
@param input stream to read from
@param buffer buffer to read into
@param count maximum number of bytes to read
@return number of bytes read from the stream
@throws IOException in case of I/O errors
|
java
|
libs/core/src/main/java/org/elasticsearch/core/Streams.java
| 92
|
[
"input",
"buffer",
"count"
] | true
| 2
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
getFilteredHeaders
|
function getFilteredHeaders(
headers: HttpHeaders,
includeHeaders: string[] | undefined,
): Record<string, string[]> {
if (!includeHeaders) {
return {};
}
const headersMap: Record<string, string[]> = {};
for (const key of includeHeaders) {
const values = headers.getAll(key);
if (values !== null) {
headersMap[key] = values;
}
}
return headersMap;
}
|
@returns true when the requests contains autorization related headers.
|
typescript
|
packages/common/http/src/transfer_cache.ts
| 249
|
[
"headers",
"includeHeaders"
] | true
| 3
| 6.4
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
badElementIndex
|
private static String badElementIndex(int index, int size, String desc) {
if (index < 0) {
return lenientFormat("%s (%s) must not be negative", desc, index);
} else if (size < 0) {
throw new IllegalArgumentException("negative size: " + size);
} else { // index >= size
return lenientFormat("%s (%s) must be less than size (%s)", desc, index, size);
}
}
|
Ensures that {@code index} specifies a valid <i>element</i> in an array, list or string of size
{@code size}. An element index may range from zero, inclusive, to {@code size}, exclusive.
@param index a user-supplied index identifying an element of an array, list or string
@param size the size of that array, list or string
@param desc the text to use to describe this index in an error message
@return the value of {@code index}
@throws IndexOutOfBoundsException if {@code index} is negative or is not less than {@code size}
@throws IllegalArgumentException if {@code size} is negative
|
java
|
android/guava/src/com/google/common/base/Preconditions.java
| 1,374
|
[
"index",
"size",
"desc"
] |
String
| true
| 3
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
toString
|
@Deprecated
public static String toString(final Object obj, final String nullStr) {
return Objects.toString(obj, nullStr);
}
|
Gets the {@code toString} of an {@link Object} returning
a specified text if {@code null} input.
<pre>
ObjectUtils.toString(null, null) = null
ObjectUtils.toString(null, "null") = "null"
ObjectUtils.toString("", "null") = ""
ObjectUtils.toString("bat", "null") = "bat"
ObjectUtils.toString(Boolean.TRUE, "null") = "true"
</pre>
@param obj the Object to {@code toString}, may be null.
@param nullStr the String to return if {@code null} input, may be null.
@return the passed in Object's toString, or {@code nullStr} if {@code null} input.
@see Objects#toString(Object)
@see Objects#toString(Object, String)
@see StringUtils#defaultString(String,String)
@see String#valueOf(Object)
@since 2.0
@deprecated this method has been replaced by {@code java.util.Objects.toString(Object, String)} in Java 7 and
will be removed in future releases.
|
java
|
src/main/java/org/apache/commons/lang3/ObjectUtils.java
| 1,263
|
[
"obj",
"nullStr"
] |
String
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
isEmpty
|
boolean isEmpty() {
return acknowledgementsToSend.isEmpty() &&
incompleteAcknowledgements.isEmpty() &&
inFlightAcknowledgements.isEmpty();
}
|
Timeout in milliseconds indicating how long the request would be retried if it fails with a retriable exception.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 1,300
|
[] | true
| 3
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
merge
|
function merge(/* obj1, obj2, obj3, ... */) {
const {caseless, skipUndefined} = isContextDefined(this) && this || {};
const result = {};
const assignValue = (val, key) => {
const targetKey = caseless && findKey(result, key) || key;
if (isPlainObject(result[targetKey]) && isPlainObject(val)) {
result[targetKey] = merge(result[targetKey], val);
} else if (isPlainObject(val)) {
result[targetKey] = merge({}, val);
} else if (isArray(val)) {
result[targetKey] = val.slice();
} else if (!skipUndefined || !isUndefined(val)) {
result[targetKey] = val;
}
}
for (let i = 0, l = arguments.length; i < l; i++) {
arguments[i] && forEach(arguments[i], assignValue);
}
return result;
}
|
Accepts varargs expecting each argument to be an object, then
immutably merges the properties of each object and returns result.
When multiple objects contain the same key the later object in
the arguments list will take precedence.
Example:
```js
var result = merge({foo: 123}, {foo: 456});
console.log(result.foo); // outputs 456
```
@param {Object} obj1 Object to merge
@returns {Object} Result of all merge properties
|
javascript
|
lib/utils.js
| 344
|
[] | false
| 16
| 7.84
|
axios/axios
| 108,381
|
jsdoc
| false
|
|
add
|
@CanIgnoreReturnValue
@Override
boolean add(@ParametricNullness E element);
|
Adds a single occurrence of the specified element to this multiset.
<p>This method refines {@link Collection#add}, which only <i>ensures</i> the presence of the
element, to further specify that a successful call must always increment the count of the
element, and the overall size of the collection, by one.
<p>To both add the element and obtain the previous count of that element, use {@link
#add(Object, int) add}{@code (element, 1)} instead.
@param element the element to add one occurrence of; may be null only if explicitly allowed by
the implementation
@return {@code true} always, since this call is required to modify the multiset, unlike other
{@link Collection} types
@throws NullPointerException if {@code element} is null and this implementation does not permit
null elements
@throws IllegalArgumentException if {@link Integer#MAX_VALUE} occurrences of {@code element}
are already contained in this multiset
|
java
|
android/guava/src/com/google/common/collect/Multiset.java
| 159
|
[
"element"
] | true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
close
|
@Override
public void close() {
if (closed == false) {
closed = true;
arrays.adjustBreaker(-SHALLOW_SIZE);
Releasables.close(weight, mean, tempWeight, tempMean, order);
}
}
|
Merges any pending inputs and compresses the data down to the public setting.
Note that this typically loses a bit of precision and thus isn't a thing to
be doing all the time. It is best done only when we want to show results to
the outside world.
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java
| 624
|
[] |
void
| true
| 2
| 7.04
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
resolveConfigurationMetadata
|
private ConfigurationMetadata resolveConfigurationMetadata(TypeElement type) {
try {
String sourceLocation = MetadataStore.SOURCE_METADATA_PATH.apply(type, this.typeUtils);
FileObject resource = this.processingEnvironment.getFiler()
.getResource(StandardLocation.CLASS_PATH, "", sourceLocation);
return (resource != null) ? new JsonMarshaller().read(resource.openInputStream()) : null;
}
catch (Exception ex) {
return null;
}
}
|
Resolve the {@link SourceMetadata} for the specified type. If the type has no
source metadata, return an {@link SourceMetadata#EMPTY} source.
@param typeElement the type to discover source metadata from
@return the source metadata for the specified type
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/ConfigurationPropertiesSourceResolver.java
| 66
|
[
"type"
] |
ConfigurationMetadata
| true
| 3
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
valueOrThrow
|
public static short valueOrThrow(String key, Map<String, Short> versionRangeMap) {
final Short value = versionRangeMap.get(key);
if (value == null) {
throw new IllegalArgumentException(String.format("%s absent in [%s]", key, mapToString(versionRangeMap)));
}
return value;
}
|
Raises an exception unless the following condition is met:
minValue >= 0 and maxValue >= 0 and maxValue >= minValue.
@param minKeyLabel Label for the min version key, that's used only to convert to/from a map.
@param minValue The minimum version value.
@param maxKeyLabel Label for the max version key, that's used only to convert to/from a map.
@param maxValue The maximum version value.
@throws IllegalArgumentException If any of the following conditions are true:
- (minValue < 0) OR (maxValue < 0) OR (maxValue < minValue).
- minKeyLabel is empty, OR, minKeyLabel is empty.
|
java
|
clients/src/main/java/org/apache/kafka/common/feature/BaseVersionRange.java
| 131
|
[
"key",
"versionRangeMap"
] | true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
removeAllOccurrences
|
public static char[] removeAllOccurrences(final char[] array, final char element) {
return (char[]) removeAt(array, indexesOf(array, element));
}
|
Removes the occurrences of the specified element from the specified char array.
<p>
All subsequent elements are shifted to the left (subtracts one from their indices).
If the array doesn't contain such an element, no elements are removed from the array.
{@code null} will be returned if the input array is {@code null}.
</p>
@param array the input array, will not be modified, and may be {@code null}.
@param element the element to remove.
@return A new array containing the existing elements except the occurrences of the specified element.
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 5,470
|
[
"array",
"element"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_recursive_set_fill_value
|
def _recursive_set_fill_value(fillvalue, dt):
"""
Create a fill value for a structured dtype.
Parameters
----------
fillvalue : scalar or array_like
Scalar or array representing the fill value. If it is of shorter
length than the number of fields in dt, it will be resized.
dt : dtype
The structured dtype for which to create the fill value.
Returns
-------
val : tuple
A tuple of values corresponding to the structured fill value.
"""
fillvalue = np.resize(fillvalue, len(dt.names))
output_value = []
for (fval, name) in zip(fillvalue, dt.names):
cdtype = dt[name]
if cdtype.subdtype:
cdtype = cdtype.subdtype[0]
if cdtype.names is not None:
output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
else:
output_value.append(np.array(fval, dtype=cdtype).item())
return tuple(output_value)
|
Create a fill value for a structured dtype.
Parameters
----------
fillvalue : scalar or array_like
Scalar or array representing the fill value. If it is of shorter
length than the number of fields in dt, it will be resized.
dt : dtype
The structured dtype for which to create the fill value.
Returns
-------
val : tuple
A tuple of values corresponding to the structured fill value.
|
python
|
numpy/ma/core.py
| 423
|
[
"fillvalue",
"dt"
] | false
| 5
| 6.08
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
isUnderneathClassLoader
|
private static boolean isUnderneathClassLoader(@Nullable ClassLoader candidate, @Nullable ClassLoader parent) {
if (candidate == parent) {
return true;
}
if (candidate == null) {
return false;
}
ClassLoader classLoaderToCheck = candidate;
while (classLoaderToCheck != null) {
classLoaderToCheck = classLoaderToCheck.getParent();
if (classLoaderToCheck == parent) {
return true;
}
}
return false;
}
|
Check whether the given ClassLoader is underneath the given parent,
that is, whether the parent is within the candidate's hierarchy.
@param candidate the candidate ClassLoader to check
@param parent the parent ClassLoader to check for
|
java
|
spring-beans/src/main/java/org/springframework/beans/CachedIntrospectionResults.java
| 195
|
[
"candidate",
"parent"
] | true
| 5
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
job_completion
|
def job_completion(
self, job_name: str, run_id: str, verbose: bool = False, sleep_before_return: int = 0
) -> dict[str, str]:
"""
Wait until Glue job with job_name finishes; return final state if finished or raises AirflowException.
:param job_name: unique job name per AWS account
:param run_id: The job-run ID of the predecessor job run
:param verbose: If True, more Glue Job Run logs show in the Airflow Task Logs. (default: False)
:param sleep_before_return: time in seconds to wait before returning final status.
:return: Dict of JobRunState and JobRunId
"""
next_log_tokens = self.LogContinuationTokens()
while True:
job_run_state = self.get_job_state(job_name, run_id)
ret = self._handle_state(job_run_state, job_name, run_id, verbose, next_log_tokens)
if ret:
time.sleep(sleep_before_return)
return ret
time.sleep(self.job_poll_interval)
|
Wait until Glue job with job_name finishes; return final state if finished or raises AirflowException.
:param job_name: unique job name per AWS account
:param run_id: The job-run ID of the predecessor job run
:param verbose: If True, more Glue Job Run logs show in the Airflow Task Logs. (default: False)
:param sleep_before_return: time in seconds to wait before returning final status.
:return: Dict of JobRunState and JobRunId
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/glue.py
| 374
|
[
"self",
"job_name",
"run_id",
"verbose",
"sleep_before_return"
] |
dict[str, str]
| true
| 3
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
insert
|
def insert(self, loc: int, item) -> Index:
"""
Make new Index inserting new item at location.
Follows Python numpy.insert semantics for negative values.
Parameters
----------
loc : int
The integer location where the new item will be inserted.
item : object
The new item to be inserted into the Index.
Returns
-------
Index
Returns a new Index object resulting from inserting the specified item at
the specified location within the original Index.
See Also
--------
Index.append : Append a collection of Indexes together.
Examples
--------
>>> idx = pd.Index(["a", "b", "c"])
>>> idx.insert(1, "x")
Index(['a', 'x', 'b', 'c'], dtype='str')
"""
item = lib.item_from_zerodim(item)
if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object:
item = self._na_value
arr = self._values
if using_string_dtype() and len(self) == 0 and self.dtype == np.object_:
# special case: if we are an empty object-dtype Index, also
# take into account the inserted item for the resulting dtype
# (https://github.com/pandas-dev/pandas/pull/60797)
dtype = self._find_common_type_compat(item)
if dtype != self.dtype:
return self.astype(dtype).insert(loc, item)
try:
if isinstance(arr, ExtensionArray):
res_values = arr.insert(loc, item)
return type(self)._simple_new(res_values, name=self.name)
else:
item = self._validate_fill_value(item)
except (TypeError, ValueError, LossySetitemError):
# e.g. trying to insert an integer into a DatetimeIndex
# We cannot keep the same dtype, so cast to the (often object)
# minimal shared dtype before doing the insert.
dtype = self._find_common_type_compat(item)
if dtype == self.dtype:
# EA's might run into recursion errors if loc is invalid
raise
return self.astype(dtype).insert(loc, item)
if arr.dtype != object or not isinstance(
item, (tuple, np.datetime64, np.timedelta64)
):
# with object-dtype we need to worry about numpy incorrectly casting
# dt64/td64 to integer, also about treating tuples as sequences
# special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550
casted = arr.dtype.type(item)
new_values = np.insert(arr, loc, casted)
else:
# error: No overload variant of "insert" matches argument types
# "ndarray[Any, Any]", "int", "None"
new_values = np.insert(arr, loc, None) # type: ignore[call-overload]
loc = loc if loc >= 0 else loc - 1
new_values[loc] = item
# GH#51363 stopped doing dtype inference here
out = Index(new_values, dtype=new_values.dtype, name=self.name)
return out
|
Make new Index inserting new item at location.
Follows Python numpy.insert semantics for negative values.
Parameters
----------
loc : int
The integer location where the new item will be inserted.
item : object
The new item to be inserted into the Index.
Returns
-------
Index
Returns a new Index object resulting from inserting the specified item at
the specified location within the original Index.
See Also
--------
Index.append : Append a collection of Indexes together.
Examples
--------
>>> idx = pd.Index(["a", "b", "c"])
>>> idx.insert(1, "x")
Index(['a', 'x', 'b', 'c'], dtype='str')
|
python
|
pandas/core/indexes/base.py
| 7,051
|
[
"self",
"loc",
"item"
] |
Index
| true
| 14
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getDatabaseType
|
@Override
public final String getDatabaseType() throws IOException {
if (databaseType.get() == null) {
synchronized (databaseType) {
if (databaseType.get() == null) {
databaseType.set(MMDBUtil.getDatabaseType(databasePath));
}
}
}
return databaseType.get();
}
|
Read the database type from the database and cache it for future calls.
@return the database type
@throws IOException if an I/O exception occurs reading the database type
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java
| 82
|
[] |
String
| true
| 3
| 8.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
resolveBeanClass
|
protected @Nullable Class<?> resolveBeanClass(RootBeanDefinition mbd, String beanName, Class<?>... typesToMatch)
throws CannotLoadBeanClassException {
try {
if (mbd.hasBeanClass()) {
return mbd.getBeanClass();
}
Class<?> beanClass = doResolveBeanClass(mbd, typesToMatch);
if (mbd.hasBeanClass()) {
mbd.prepareMethodOverrides();
}
return beanClass;
}
catch (ClassNotFoundException ex) {
throw new CannotLoadBeanClassException(mbd.getResourceDescription(), beanName, mbd.getBeanClassName(), ex);
}
catch (LinkageError err) {
throw new CannotLoadBeanClassException(mbd.getResourceDescription(), beanName, mbd.getBeanClassName(), err);
}
catch (BeanDefinitionValidationException ex) {
throw new BeanDefinitionStoreException(mbd.getResourceDescription(),
beanName, "Validation of method overrides failed", ex);
}
}
|
Resolve the bean class for the specified bean definition,
resolving a bean class name into a Class reference (if necessary)
and storing the resolved Class in the bean definition for further use.
@param mbd the merged bean definition to determine the class for
@param beanName the name of the bean (for error handling purposes)
@param typesToMatch the types to match in case of internal type matching purposes
(also signals that the returned {@code Class} will never be exposed to application code)
@return the resolved bean class (or {@code null} if none)
@throws CannotLoadBeanClassException if we failed to load the class
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 1,557
|
[
"mbd",
"beanName"
] | true
| 6
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
withAlias
|
public PemSslStoreDetails withAlias(@Nullable String alias) {
return new PemSslStoreDetails(this.type, alias, this.password, this.certificates, this.privateKey,
this.privateKeyPassword);
}
|
Return a new {@link PemSslStoreDetails} instance with a new alias.
@param alias the new alias
@return a new {@link PemSslStoreDetails} instance
@since 3.2.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemSslStoreDetails.java
| 101
|
[
"alias"
] |
PemSslStoreDetails
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.