function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
createNetlifyUrl
|
function createNetlifyUrl(config: ImageLoaderConfig, path?: string) {
// Note: `path` can be undefined, in which case we use a fake one to construct a `URL` instance.
const url = new URL(path ?? 'https://a/');
url.pathname = '/.netlify/images';
if (!isAbsoluteUrl(config.src) && !config.src.startsWith('/')) {
config.src = '/' + config.src;
}
url.searchParams.set('url', config.src);
if (config.width) {
url.searchParams.set('w', config.width.toString());
}
// When requesting a placeholder image we ask for a low quality image to reduce the load time.
// If the quality is specified in the loader config - always use provided value.
const configQuality = config.loaderParams?.['quality'] ?? config.loaderParams?.['q'];
if (config.isPlaceholder && !configQuality) {
url.searchParams.set('q', PLACEHOLDER_QUALITY);
}
for (const [param, value] of Object.entries(config.loaderParams ?? {})) {
if (validParams.has(param)) {
url.searchParams.set(validParams.get(param)!, value.toString());
} else {
if (ngDevMode) {
console.warn(
formatRuntimeError(
RuntimeErrorCode.INVALID_LOADER_ARGUMENTS,
`The Netlify image loader has detected an \`<img>\` tag with the unsupported attribute "\`${param}\`".`,
),
);
}
}
}
// The "a" hostname is used for relative URLs, so we can remove it from the final URL.
return url.hostname === 'a' ? url.href.replace(url.origin, '') : url.href;
}
|
Function that generates an ImageLoader for Netlify and turns it into an Angular provider.
@param path optional URL of the desired Netlify site. Defaults to the current site.
@returns Set of providers to configure the Netlify loader.
@publicApi
|
typescript
|
packages/common/src/directives/ng_optimized_image/image_loaders/netlify_loader.ts
| 79
|
[
"config",
"path?"
] | false
| 10
| 7.28
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
add_suffix
|
def add_suffix(self, suffix: str, axis: Axis | None = None) -> Self:
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to add suffix on
.. versionadded:: 2.0.0
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix("_item")
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix("_col")
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = lambda x: f"{x}{suffix}"
axis_name = self._info_axis_name
if axis is not None:
axis_name = self._get_axis_name(axis)
mapper = {axis_name: f}
# error: Keywords must be strings
# error: No overload variant of "_rename" of "NDFrame" matches argument
# type "dict[Literal['index', 'columns'], Callable[[Any], str]]"
return self._rename(**mapper) # type: ignore[call-overload, misc]
|
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to add suffix on
.. versionadded:: 2.0.0
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix("_item")
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix("_col")
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
|
python
|
pandas/core/generic.py
| 4,774
|
[
"self",
"suffix",
"axis"
] |
Self
| true
| 2
| 8.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
transformedBeanName
|
protected String transformedBeanName(String name) {
return canonicalName(BeanFactoryUtils.transformedBeanName(name));
}
|
Return the bean name, stripping out the factory dereference prefix if necessary,
and resolving aliases to canonical names.
@param name the user-specified name
@return the transformed bean name
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 1,271
|
[
"name"
] |
String
| true
| 1
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
idxmin
|
def idxmin(
self, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False
) -> Series:
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {{0 or 'index', 1 or 'columns'}}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If the entire DataFrame is NA,
or if ``skipna=False`` and there is an NA value, this method
will raise a ``ValueError``.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series
Indexes of minima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin : Return index of the minimum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame(
... {
... {
... "consumption": [10.51, 103.11, 55.48],
... "co2_emissions": [37.2, 19.66, 1712],
... }
... },
... index=["Pork", "Wheat Products", "Beef"],
... )
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the minimum value in each column.
>>> df.idxmin()
consumption Pork
co2_emissions Wheat Products
dtype: object
To return the index for the minimum value in each row, use ``axis="columns"``.
>>> df.idxmin(axis="columns")
Pork consumption
Wheat Products co2_emissions
Beef consumption
dtype: object
"""
axis = self._get_axis_number(axis)
if self.empty and len(self.axes[axis]):
axis_dtype = self.axes[axis].dtype
return self._constructor_sliced(dtype=axis_dtype)
if numeric_only:
data = self._get_numeric_data()
else:
data = self
res = data._reduce(
nanops.nanargmin, "argmin", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not N
if (indices == -1).any():
if skipna:
msg = "Encountered all NA values"
else:
msg = "Encountered an NA values with skipna=False"
raise ValueError(msg)
index = data._get_axis(axis)
result = algorithms.take(
index._values, indices, allow_fill=True, fill_value=index._na_value
)
final_result = data._constructor_sliced(result, index=data._get_agg_axis(axis))
return final_result.__finalize__(self, method="idxmin")
|
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {{0 or 'index', 1 or 'columns'}}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If the entire DataFrame is NA,
or if ``skipna=False`` and there is an NA value, this method
will raise a ``ValueError``.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series
Indexes of minima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin : Return index of the minimum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame(
... {
... {
... "consumption": [10.51, 103.11, 55.48],
... "co2_emissions": [37.2, 19.66, 1712],
... }
... },
... index=["Pork", "Wheat Products", "Beef"],
... )
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the minimum value in each column.
>>> df.idxmin()
consumption Pork
co2_emissions Wheat Products
dtype: object
To return the index for the minimum value in each row, use ``axis="columns"``.
>>> df.idxmin(axis="columns")
Pork consumption
Wheat Products co2_emissions
Beef consumption
dtype: object
|
python
|
pandas/core/frame.py
| 14,096
|
[
"self",
"axis",
"skipna",
"numeric_only"
] |
Series
| true
| 8
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
append
|
def append(self, other):
"""
Append a collection of Index options together.
The `append` method is used to combine multiple `Index` objects into a single
`Index`. This is particularly useful when dealing with multi-level indexing
(MultiIndex) where you might need to concatenate different levels of indices.
The method handles the alignment of the levels and codes of the indices being
appended to ensure consistency in the resulting `MultiIndex`.
Parameters
----------
other : Index or list/tuple of indices
Index or list/tuple of Index objects to be appended.
Returns
-------
Index
The combined index.
See Also
--------
MultiIndex: A multi-level, or hierarchical, index object for pandas objects.
Index.append : Append a collection of Index options together.
concat : Concatenate pandas objects along a particular axis.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([["a"], ["b"]])
>>> mi
MultiIndex([('a', 'b')],
)
>>> mi.append(mi)
MultiIndex([('a', 'b'), ('a', 'b')],
)
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all(
(isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other
):
codes = []
levels = []
names = []
for i in range(self.nlevels):
level_values = self.levels[i]
for mi in other:
level_values = level_values.union(mi.levels[i])
level_codes = [
recode_for_categories(
mi.codes[i], mi.levels[i], level_values, copy=False
)
for mi in ([self, *other])
]
level_name = self.names[i]
if any(mi.names[i] != level_name for mi in other):
level_name = None
codes.append(np.concatenate(level_codes))
levels.append(level_values)
names.append(level_name)
return MultiIndex(
codes=codes, levels=levels, names=names, verify_integrity=False
)
to_concat = (self._values,) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
# We only get here if other contains at least one index with tuples,
# setting names to None automatically
return MultiIndex.from_tuples(new_tuples)
except (TypeError, IndexError):
return Index(new_tuples)
|
Append a collection of Index options together.
The `append` method is used to combine multiple `Index` objects into a single
`Index`. This is particularly useful when dealing with multi-level indexing
(MultiIndex) where you might need to concatenate different levels of indices.
The method handles the alignment of the levels and codes of the indices being
appended to ensure consistency in the resulting `MultiIndex`.
Parameters
----------
other : Index or list/tuple of indices
Index or list/tuple of Index objects to be appended.
Returns
-------
Index
The combined index.
See Also
--------
MultiIndex: A multi-level, or hierarchical, index object for pandas objects.
Index.append : Append a collection of Index options together.
concat : Concatenate pandas objects along a particular axis.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([["a"], ["b"]])
>>> mi
MultiIndex([('a', 'b')],
)
>>> mi.append(mi)
MultiIndex([('a', 'b'), ('a', 'b')],
)
|
python
|
pandas/core/indexes/multi.py
| 2,377
|
[
"self",
"other"
] | false
| 7
| 7.44
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
label_ranking_average_precision_score
|
def label_ranking_average_precision_score(y_true, y_score, *, sample_weight=None):
"""Compute ranking-based average precision.
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : {array-like, sparse matrix} of shape (n_samples, n_labels)
True binary labels in binary indicator format.
y_score : array-like of shape (n_samples, n_labels)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
For :term:`decision_function` scores, values greater than or equal to
zero should indicate the positive class.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.20
Returns
-------
score : float
Ranking-based average precision score.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score)
0.416
"""
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true, ensure_2d=False, accept_sparse="csr")
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formatted array and the degenerate case with one label
y_type = type_of_target(y_true, input_name="y_true")
if y_type != "multilabel-indicator" and not (
y_type == "binary" and y_true.ndim == 2
):
raise ValueError("{0} format is not supported".format(y_type))
if not issparse(y_true):
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.0
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if relevant.size == 0 or relevant.size == n_labels:
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
aux = 1.0
else:
scores_i = y_score[i]
rank = rankdata(scores_i, "max")[relevant]
L = rankdata(scores_i[relevant], "max")
aux = (L / rank).mean()
if sample_weight is not None:
aux = aux * sample_weight[i]
out += aux
if sample_weight is None:
out /= n_samples
else:
out /= np.sum(sample_weight)
return float(out)
|
Compute ranking-based average precision.
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : {array-like, sparse matrix} of shape (n_samples, n_labels)
True binary labels in binary indicator format.
y_score : array-like of shape (n_samples, n_labels)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
For :term:`decision_function` scores, values greater than or equal to
zero should indicate the positive class.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.20
Returns
-------
score : float
Ranking-based average precision score.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score)
0.416
|
python
|
sklearn/metrics/_ranking.py
| 1,320
|
[
"y_true",
"y_score",
"sample_weight"
] | false
| 13
| 7.12
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
setupWebStorage
|
function setupWebStorage() {
if (getEmbedderOptions().noBrowserGlobals ||
!getOptionValue('--experimental-webstorage')) {
return;
}
// https://html.spec.whatwg.org/multipage/webstorage.html#webstorage
exposeLazyInterfaces(globalThis, 'internal/webstorage', ['Storage']);
defineReplaceableLazyAttribute(globalThis, 'internal/webstorage', [
'localStorage', 'sessionStorage',
]);
}
|
Patch the process object with legacy properties and normalizations.
Replace `process.argv[0]` with `process.execPath`, preserving the original `argv[0]` value as `process.argv0`.
Replace `process.argv[1]` with the resolved absolute file path of the entry point, if found.
@param {boolean} expandArgv1 - Whether to replace `process.argv[1]` with the resolved absolute file path of
the main entry point.
@returns {string}
|
javascript
|
lib/internal/process/pre_execution.js
| 399
|
[] | false
| 3
| 6.96
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
min
|
def min(
self,
axis: Axis | None = 0,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
):
"""
Return the minimum of the values over the requested axis.
If you want the *index* of the minimum, use ``idxmin``.
This is the equivalent of the ``numpy.ndarray`` method ``argmin``.
Parameters
----------
axis : {index (0)}
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to 0.
For DataFrames, specifying ``axis=None`` will apply the aggregation
across both axes.
.. versionadded:: 2.0.0
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
scalar or Series (if level specified)
The minimum of the values in the Series.
See Also
--------
numpy.min : Equivalent numpy function for arrays.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis.
Examples
--------
>>> idx = pd.MultiIndex.from_arrays(
... [["warm", "warm", "cold", "cold"], ["dog", "falcon", "fish", "spider"]],
... names=["blooded", "animal"],
... )
>>> s = pd.Series([4, 2, 0, 8], name="legs", index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.min()
0
"""
return NDFrame.min(
self, axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs
)
|
Return the minimum of the values over the requested axis.
If you want the *index* of the minimum, use ``idxmin``.
This is the equivalent of the ``numpy.ndarray`` method ``argmin``.
Parameters
----------
axis : {index (0)}
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to 0.
For DataFrames, specifying ``axis=None`` will apply the aggregation
across both axes.
.. versionadded:: 2.0.0
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
scalar or Series (if level specified)
The minimum of the values in the Series.
See Also
--------
numpy.min : Equivalent numpy function for arrays.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis.
Examples
--------
>>> idx = pd.MultiIndex.from_arrays(
... [["warm", "warm", "cold", "cold"], ["dog", "falcon", "fish", "spider"]],
... names=["blooded", "animal"],
... )
>>> s = pd.Series([4, 2, 0, 8], name="legs", index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.min()
0
|
python
|
pandas/core/series.py
| 7,467
|
[
"self",
"axis",
"skipna",
"numeric_only"
] | true
| 1
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
reset
|
public StrTokenizer reset(final String input) {
reset();
if (input != null) {
this.chars = input.toCharArray();
} else {
this.chars = null;
}
return this;
}
|
Reset this tokenizer, giving it a new input string to parse.
In this manner you can re-use a tokenizer with the same settings
on multiple input lines.
@param input the new string to tokenize, null sets no text to parse.
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrTokenizer.java
| 884
|
[
"input"
] |
StrTokenizer
| true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
min
|
@ParametricNullness
public static <T extends @Nullable Object> T min(
@ParametricNullness T a, @ParametricNullness T b, Comparator<? super T> comparator) {
return (comparator.compare(a, b) <= 0) ? a : b;
}
|
Returns the minimum of the two values, according to the given comparator. If the values compare
as equal, the first is returned.
<p>The recommended solution for finding the {@code minimum} of some values depends on the type
of your data and the number of elements you have. Read more in the Guava User Guide article on
<a href="https://github.com/google/guava/wiki/CollectionUtilitiesExplained#comparators">{@code
Comparators}</a>.
@param a first value to compare, returned if less than or equal to b
@param b second value to compare.
@throws ClassCastException if the parameters are not <i>mutually comparable</i> using the given
comparator.
@since 30.0
|
java
|
android/guava/src/com/google/common/collect/Comparators.java
| 241
|
[
"a",
"b",
"comparator"
] |
T
| true
| 2
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
synchronizedSortedSetMultimap
|
@J2ktIncompatible // Synchronized
public static <K extends @Nullable Object, V extends @Nullable Object>
SortedSetMultimap<K, V> synchronizedSortedSetMultimap(SortedSetMultimap<K, V> multimap) {
return Synchronized.sortedSetMultimap(multimap, null);
}
|
Returns a synchronized (thread-safe) {@code SortedSetMultimap} backed by the specified
multimap.
<p>You must follow the warnings described in {@link #synchronizedMultimap}.
<p>The returned multimap will be serializable if the specified multimap is serializable.
@param multimap the multimap to be wrapped
@return a synchronized view of the specified multimap
|
java
|
android/guava/src/com/google/common/collect/Multimaps.java
| 950
|
[
"multimap"
] | true
| 1
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
_find_insertion_index_for_version
|
def _find_insertion_index_for_version(content: list[str], version: str) -> tuple[int, bool]:
"""Finds insertion index for the specified version from the .rst changelog content.
:param content: changelog split into separate lines
:param version: version to look for
:return: A 2-tuple. The first item indicates the insertion index, while the
second is a boolean indicating whether to append (False) or insert (True)
to the changelog.
"""
changelog_found = False
skip_next_line = False
index = 0
for index, line in enumerate(content):
if not changelog_found and line.strip() == version:
changelog_found = True
skip_next_line = True
elif not skip_next_line and line and all(char == "." for char in line):
return index - 2, changelog_found
else:
skip_next_line = False
return index, changelog_found
|
Finds insertion index for the specified version from the .rst changelog content.
:param content: changelog split into separate lines
:param version: version to look for
:return: A 2-tuple. The first item indicates the insertion index, while the
second is a boolean indicating whether to append (False) or insert (True)
to the changelog.
|
python
|
dev/breeze/src/airflow_breeze/prepare_providers/provider_documentation.py
| 987
|
[
"content",
"version"
] |
tuple[int, bool]
| true
| 8
| 7.92
|
apache/airflow
| 43,597
|
sphinx
| false
|
nullToEmpty
|
public static short[] nullToEmpty(final short[] array) {
return isEmpty(array) ? EMPTY_SHORT_ARRAY : array;
}
|
Defensive programming technique to change a {@code null}
reference to an empty one.
<p>
This method returns an empty array for a {@code null} input array.
</p>
<p>
As a memory optimizing technique an empty array passed in will be overridden with
the empty {@code public static} references in this class.
</p>
@param array the array to check for {@code null} or empty.
@return the same array, {@code public static} empty array if {@code null} or empty input.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,584
|
[
"array"
] | true
| 2
| 8.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getThreadNamePrefix
|
private static String getThreadNamePrefix() {
String name = Thread.currentThread().getName();
int numberSeparator = name.lastIndexOf('-');
return (numberSeparator >= 0 ? name.substring(0, numberSeparator) : name);
}
|
Considers all beans as eligible for metadata caching
if the factory's configuration has been marked as frozen.
@see #freezeConfiguration()
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
| 1,233
|
[] |
String
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
cov
|
def cov(
self,
other: Series,
min_periods: int | None = None,
ddof: int | None = 1,
) -> float:
"""
Compute covariance with Series, excluding missing values.
The two `Series` objects are not required to be the same length and
will be aligned internally before the covariance is calculated.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
See Also
--------
DataFrame.cov : Compute pairwise covariance of columns.
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874
"""
this, other = self.align(other, join="inner")
if len(this) == 0:
return np.nan
this_values = this.to_numpy(dtype=float, na_value=np.nan, copy=False)
other_values = other.to_numpy(dtype=float, na_value=np.nan, copy=False)
return nanops.nancov(
this_values, other_values, min_periods=min_periods, ddof=ddof
)
|
Compute covariance with Series, excluding missing values.
The two `Series` objects are not required to be the same length and
will be aligned internally before the covariance is calculated.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
See Also
--------
DataFrame.cov : Compute pairwise covariance of columns.
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874
|
python
|
pandas/core/series.py
| 2,767
|
[
"self",
"other",
"min_periods",
"ddof"
] |
float
| true
| 2
| 8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
isBefore
|
public boolean isBefore(final T element) {
if (element == null) {
return false;
}
return comparator.compare(element, maximum) > 0;
}
|
Checks whether this range is before the specified element.
@param element the element to check for, null returns false.
@return true if this range is entirely before the specified element.
|
java
|
src/main/java/org/apache/commons/lang3/Range.java
| 448
|
[
"element"
] | true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
popPrompt
|
public void popPrompt() {
if (!this.prompts.isEmpty()) {
this.prompts.pop();
}
}
|
Pop a previously pushed prompt, returning to the previous value.
@see #pushPrompt(String)
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/shell/ShellPrompts.java
| 47
|
[] |
void
| true
| 2
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getParameterNames
|
@Override
public @Nullable String @Nullable [] getParameterNames(Method method) {
this.argumentTypes = method.getParameterTypes();
this.numberOfRemainingUnboundArguments = this.argumentTypes.length;
this.parameterNameBindings = new String[this.numberOfRemainingUnboundArguments];
int minimumNumberUnboundArgs = 0;
if (this.returningName != null) {
minimumNumberUnboundArgs++;
}
if (this.throwingName != null) {
minimumNumberUnboundArgs++;
}
if (this.numberOfRemainingUnboundArguments < minimumNumberUnboundArgs) {
throw new IllegalStateException(
"Not enough arguments in method to satisfy binding of returning and throwing variables");
}
try {
int algorithmicStep = STEP_JOIN_POINT_BINDING;
while (this.numberOfRemainingUnboundArguments > 0 && algorithmicStep < STEP_FINISHED) {
switch (algorithmicStep++) {
case STEP_JOIN_POINT_BINDING -> {
if (!maybeBindThisJoinPoint()) {
maybeBindThisJoinPointStaticPart();
}
}
case STEP_THROWING_BINDING -> maybeBindThrowingVariable();
case STEP_ANNOTATION_BINDING -> maybeBindAnnotationsFromPointcutExpression();
case STEP_RETURNING_BINDING -> maybeBindReturningVariable();
case STEP_PRIMITIVE_ARGS_BINDING -> maybeBindPrimitiveArgsFromPointcutExpression();
case STEP_THIS_TARGET_ARGS_BINDING -> maybeBindThisOrTargetOrArgsFromPointcutExpression();
case STEP_REFERENCE_PCUT_BINDING -> maybeBindReferencePointcutParameter();
default -> throw new IllegalStateException("Unknown algorithmic step: " + (algorithmicStep - 1));
}
}
}
catch (AmbiguousBindingException | IllegalArgumentException ex) {
if (this.raiseExceptions) {
throw ex;
}
else {
return null;
}
}
if (this.numberOfRemainingUnboundArguments == 0) {
return this.parameterNameBindings;
}
else {
if (this.raiseExceptions) {
throw new IllegalStateException("Failed to bind all argument names: " +
this.numberOfRemainingUnboundArguments + " argument(s) could not be bound");
}
else {
// convention for failing is to return null, allowing participation in a chain of responsibility
return null;
}
}
}
|
Deduce the parameter names for an advice method.
<p>See the {@link AspectJAdviceParameterNameDiscoverer class-level javadoc}
for this class for details on the algorithm used.
@param method the target {@link Method}
@return the parameter names
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AspectJAdviceParameterNameDiscoverer.java
| 220
|
[
"method"
] | true
| 11
| 7.36
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
shouldExtract
|
private boolean shouldExtract(ProjectGenerationRequest request, ProjectGenerationResponse response) {
if (request.isExtract()) {
return true;
}
// explicit name hasn't been provided for an archive and there is no extension
return isZipArchive(response) && request.getOutput() != null && !request.getOutput().contains(".");
}
|
Detect if the project should be extracted.
@param request the generation request
@param response the generation response
@return if the project should be extracted
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/ProjectGenerator.java
| 75
|
[
"request",
"response"
] | true
| 4
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
transposeHalfByte
|
public static void transposeHalfByte(int[] q, byte[] quantQueryByte) {
if (quantQueryByte.length * Byte.SIZE < 4 * q.length) {
throw new IllegalArgumentException("packed array is too small: " + quantQueryByte.length * Byte.SIZE + " < " + 4 * q.length);
}
IMPL.transposeHalfByte(q, quantQueryByte);
}
|
The idea here is to organize the query vector bits such that the first bit
of every dimension is in the first set dimensions bits, or (dimensions/8) bytes. The second,
third, and fourth bits are in the second, third, and fourth set of dimensions bits,
respectively. This allows for direct bitwise comparisons with the stored index vectors through
summing the bitwise results with the relative required bit shifts.
@param q the query vector, assumed to be half-byte quantized with values between 0 and 15
@param quantQueryByte the byte array to store the transposed query vector.
|
java
|
libs/simdvec/src/main/java/org/elasticsearch/simdvec/ESVectorUtil.java
| 414
|
[
"q",
"quantQueryByte"
] |
void
| true
| 2
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
getActive
|
public static @Nullable CloudPlatform getActive(@Nullable Environment environment) {
if (environment != null) {
for (CloudPlatform cloudPlatform : values()) {
if (cloudPlatform.isActive(environment)) {
return cloudPlatform;
}
}
}
return null;
}
|
Returns the active {@link CloudPlatform} or {@code null} if one is not active.
@param environment the environment
@return the {@link CloudPlatform} or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/cloud/CloudPlatform.java
| 244
|
[
"environment"
] |
CloudPlatform
| true
| 3
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
hasAtLeastOneGeoipProcessor
|
@SuppressWarnings("unchecked")
private static boolean hasAtLeastOneGeoipProcessor(
Map<String, Object> processor,
boolean downloadDatabaseOnPipelineCreation,
Map<String, PipelineConfiguration> pipelineConfigById,
Map<String, Boolean> pipelineHasGeoProcessorById
) {
if (processor == null) {
return false;
}
{
final Map<String, Object> processorConfig = (Map<String, Object>) processor.get(GEOIP_TYPE);
if (processorConfig != null) {
return downloadDatabaseOnPipelineCreation(processorConfig) == downloadDatabaseOnPipelineCreation;
}
}
{
final Map<String, Object> processorConfig = (Map<String, Object>) processor.get(IP_LOCATION_TYPE);
if (processorConfig != null) {
return downloadDatabaseOnPipelineCreation(processorConfig) == downloadDatabaseOnPipelineCreation;
}
}
return isProcessorWithOnFailureGeoIpProcessor(
processor,
downloadDatabaseOnPipelineCreation,
pipelineConfigById,
pipelineHasGeoProcessorById
)
|| isForeachProcessorWithGeoipProcessor(
processor,
downloadDatabaseOnPipelineCreation,
pipelineConfigById,
pipelineHasGeoProcessorById
)
|| isPipelineProcessorWithGeoIpProcessor(
processor,
downloadDatabaseOnPipelineCreation,
pipelineConfigById,
pipelineHasGeoProcessorById
);
}
|
Check if a processor config is a geoip processor or contains at least a geoip processor.
@param processor Processor config.
@param downloadDatabaseOnPipelineCreation Should the download_database_on_pipeline_creation of the geoip processor be true or false.
@param pipelineConfigById A Map of pipeline id to PipelineConfiguration
@param pipelineHasGeoProcessorById A Map of pipeline id to Boolean, indicating whether the pipeline references a geoip processor
(true), does not reference a geoip processor (false), or we are currently trying to figure that
out (null).
@return true if a geoip processor is found in the processor list.
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java
| 372
|
[
"processor",
"downloadDatabaseOnPipelineCreation",
"pipelineConfigById",
"pipelineHasGeoProcessorById"
] | true
| 6
| 7.6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
checkNotNull
|
@CanIgnoreReturnValue
public static <T> T checkNotNull(@Nullable T reference) {
if (reference == null) {
throw new NullPointerException();
}
return reference;
}
|
Ensures that an object reference passed as a parameter to the calling method is not null.
@param reference an object reference
@return the non-null reference that was validated
@throws NullPointerException if {@code reference} is null
@see Verify#verifyNotNull Verify.verifyNotNull()
|
java
|
android/guava/src/com/google/common/base/Preconditions.java
| 899
|
[
"reference"
] |
T
| true
| 2
| 7.28
|
google/guava
| 51,352
|
javadoc
| false
|
nonInternalValues
|
public Map<String, ?> nonInternalValues() {
Map<String, Object> nonInternalConfigs = new RecordingMap<>();
values.forEach((key, value) -> {
ConfigDef.ConfigKey configKey = definition.configKeys().get(key);
if (configKey == null || !configKey.internalConfig) {
nonInternalConfigs.put(key, value);
}
});
return nonInternalConfigs;
}
|
If at least one key with {@code prefix} exists, all prefixed values will be parsed and put into map.
If no value with {@code prefix} exists all unprefixed values will be returned.
<p>
This is useful if one wants to allow prefixed configs to override default ones, but wants to use either
only prefixed configs or only regular configs, but not mix them.
|
java
|
clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
| 360
|
[] | true
| 3
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
__hash__
|
def __hash__(self) -> int:
"""
Computes the hash value for the wrapped VariableTracker.
For unrealized LazyVariableTrackers, uses the hash of the original value
to avoid realizing the tracker and inserting unnecessary guards.
For all other cases, delegates to the VariableTracker's get_python_hash method.
Returns:
The hash value of the underlying variable tracker
"""
if (
isinstance(self.vt, variables.LazyVariableTracker)
and not self.vt.is_realized()
and self.vt.is_hashable()
):
return hash(self.vt.original_value())
return self.vt.get_python_hash()
|
Computes the hash value for the wrapped VariableTracker.
For unrealized LazyVariableTrackers, uses the hash of the original value
to avoid realizing the tracker and inserting unnecessary guards.
For all other cases, delegates to the VariableTracker's get_python_hash method.
Returns:
The hash value of the underlying variable tracker
|
python
|
torch/_dynamo/variables/dicts.py
| 126
|
[
"self"
] |
int
| true
| 4
| 7.44
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
cutlass_key
|
def cutlass_key() -> bytes:
"""
Compute a key representing the state of the CUTLASS library.
Note: OSS and fbcode will have different keys.
"""
if config.is_fbcode():
with (
importlib.resources.path(
"cutlass_library", "src_hash.txt"
) as resource_path,
open(resource_path) as resource_file,
):
return resource_file.read().encode()
combined_hash = hashlib.sha256()
build_code_hash([config.cuda.cutlass_dir], "", combined_hash)
return combined_hash.digest()
|
Compute a key representing the state of the CUTLASS library.
Note: OSS and fbcode will have different keys.
|
python
|
torch/_inductor/codecache.py
| 3,801
|
[] |
bytes
| true
| 2
| 7.04
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
recordsTime
|
boolean recordsTime() {
return recordsWrite() || recordsAccess();
}
|
Creates a new, empty map with the specified strategy, initial capacity and concurrency level.
|
java
|
android/guava/src/com/google/common/cache/LocalCache.java
| 360
|
[] | true
| 2
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
get
|
@SuppressWarnings("unchecked")
@Override
public @Nullable V get(@Nullable Object key) {
Object result = get(hashTable, alternatingKeysAndValues, size, 0, key);
/*
* We can't simply cast the result of `RegularImmutableMap.get` to V because of a bug in our
* nullness checker (resulting from https://github.com/jspecify/checker-framework/issues/8).
*/
if (result == null) {
return null;
} else {
return (V) result;
}
}
|
Returns a hash table for the specified keys and values, and ensures that neither keys nor
values are null. This method may update {@code alternatingKeysAndValues} if there are duplicate
keys. If so, the return value will indicate how many entries are still valid, and will also
include a {@link Builder.DuplicateKey} in case duplicate keys are not allowed now or will not
be allowed on a later {@link Builder#buildOrThrow()} call.
@param keyOffset 1 if this is the reverse direction of a BiMap, 0 otherwise.
@return an {@code Object} that is a {@code byte[]}, {@code short[]}, or {@code int[]}, the
smallest possible to fit {@code tableSize}; or an {@code Object[]} where [0] is one of
these; [1] indicates how many element pairs in {@code alternatingKeysAndValues} are valid;
and [2] is a {@link Builder.DuplicateKey} for the first duplicate key encountered.
|
java
|
android/guava/src/com/google/common/collect/RegularImmutableMap.java
| 305
|
[
"key"
] |
V
| true
| 2
| 8.24
|
google/guava
| 51,352
|
javadoc
| false
|
store
|
@Override
public void store(Writer writer, String comments) throws IOException {
StringWriter stringWriter = new StringWriter();
super.store(stringWriter, (this.omitComments ? null : comments));
String contents = stringWriter.toString();
for (String line : contents.split(EOL)) {
if (!(this.omitComments && line.startsWith("#"))) {
writer.write(line + EOL);
}
}
}
|
Construct a new {@code SortedProperties} instance with properties populated
from the supplied {@link Properties} object and honoring the supplied
{@code omitComments} flag.
<p>Default properties from the supplied {@code Properties} object will
not be copied.
@param properties the {@code Properties} object from which to copy the
initial properties
@param omitComments {@code true} if comments should be omitted when
storing properties in a file
|
java
|
spring-context-indexer/src/main/java/org/springframework/context/index/processor/SortedProperties.java
| 99
|
[
"writer",
"comments"
] |
void
| true
| 4
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
safe_sqr
|
def safe_sqr(X, *, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : {array-like, ndarray, sparse matrix}
copy : bool, default=True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
Return the element-wise square of the input.
Examples
--------
>>> from sklearn.utils import safe_sqr
>>> safe_sqr([1, 2, 3])
array([1, 4, 9])
"""
X = check_array(X, accept_sparse=["csr", "csc", "coo"], ensure_2d=False)
if sparse.issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X**2
else:
X **= 2
return X
|
Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : {array-like, ndarray, sparse matrix}
copy : bool, default=True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
Return the element-wise square of the input.
Examples
--------
>>> from sklearn.utils import safe_sqr
>>> safe_sqr([1, 2, 3])
array([1, 4, 9])
|
python
|
sklearn/utils/extmath.py
| 1,377
|
[
"X",
"copy"
] | false
| 6
| 7.52
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
infer_dtype_from_object
|
def infer_dtype_from_object(dtype) -> type:
"""
Get a numpy dtype.type-style object for a dtype object.
This methods also includes handling of the datetime64[ns] and
datetime64[ns, TZ] objects.
If no dtype can be found, we return ``object``.
Parameters
----------
dtype : dtype, type
The dtype object whose numpy dtype.type-style
object we want to extract.
Returns
-------
type
"""
if isinstance(dtype, type) and issubclass(dtype, np.generic):
# Type object from a dtype
return dtype
elif isinstance(dtype, (np.dtype, ExtensionDtype)):
# dtype object
try:
_validate_date_like_dtype(dtype)
except TypeError:
# Should still pass if we don't have a date-like
pass
if hasattr(dtype, "numpy_dtype"):
# TODO: Implement this properly
# https://github.com/pandas-dev/pandas/issues/52576
return dtype.numpy_dtype.type
return dtype.type
try:
dtype = pandas_dtype(dtype)
except TypeError:
pass
if isinstance(dtype, ExtensionDtype):
return dtype.type
elif isinstance(dtype, str):
# TODO(jreback)
# should deprecate these
if dtype in ["datetimetz", "datetime64tz"]:
return DatetimeTZDtype.type
elif dtype in ["period"]:
raise NotImplementedError
if dtype in ["datetime", "timedelta"]:
dtype += "64"
try:
return infer_dtype_from_object(getattr(np, dtype))
except (AttributeError, TypeError):
# Handles cases like _get_dtype(int) i.e.,
# Python objects that are valid dtypes
# (unlike user-defined types, in general)
#
# TypeError handles the float16 type code of 'e'
# further handle internal types
pass
return infer_dtype_from_object(np.dtype(dtype))
|
Get a numpy dtype.type-style object for a dtype object.
This methods also includes handling of the datetime64[ns] and
datetime64[ns, TZ] objects.
If no dtype can be found, we return ``object``.
Parameters
----------
dtype : dtype, type
The dtype object whose numpy dtype.type-style
object we want to extract.
Returns
-------
type
|
python
|
pandas/core/dtypes/common.py
| 1,703
|
[
"dtype"
] |
type
| true
| 10
| 6.96
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
end
|
void end(@Nullable Series series) {
if (series != null) {
this.activeSeries.pop();
append(series.closeChar);
}
}
|
End an active {@link Series} (JSON object or array).
@param series the series type being ended (must match {@link #start(Series)})
@see #start(Series)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonValueWriter.java
| 179
|
[
"series"
] |
void
| true
| 2
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
wrap
|
public static String wrap(final String str, final int wrapLength) {
return wrap(str, wrapLength, null, false);
}
|
Wraps a single line of text, identifying words by {@code ' '}.
<p>New lines will be separated by the system property line separator.
Very long words, such as URLs will <em>not</em> be wrapped.</p>
<p>Leading spaces on a new line are stripped.
Trailing spaces are not stripped.</p>
<table border="1">
<caption>Examples</caption>
<tr>
<th>input</th>
<th>wrapLength</th>
<th>result</th>
</tr>
<tr>
<td>null</td>
<td>*</td>
<td>null</td>
</tr>
<tr>
<td>""</td>
<td>*</td>
<td>""</td>
</tr>
<tr>
<td>"Here is one line of text that is going to be wrapped after 20 columns."</td>
<td>20</td>
<td>"Here is one line of\ntext that is going\nto be wrapped after\n20 columns."</td>
</tr>
<tr>
<td>"Click here to jump to the commons website - https://commons.apache.org"</td>
<td>20</td>
<td>"Click here to jump\nto the commons\nwebsite -\nhttps://commons.apache.org"</td>
</tr>
<tr>
<td>"Click here, https://commons.apache.org, to jump to the commons website"</td>
<td>20</td>
<td>"Click here,\nhttps://commons.apache.org,\nto jump to the\ncommons website"</td>
</tr>
</table>
(assuming that '\n' is the systems line separator)
@param str the String to be word wrapped, may be null.
@param wrapLength the column to wrap the words at, less than 1 is treated as 1.
@return a line with newlines inserted, {@code null} if null input.
|
java
|
src/main/java/org/apache/commons/lang3/text/WordUtils.java
| 459
|
[
"str",
"wrapLength"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
field
|
public XContentBuilder field(String name, Long value) throws IOException {
return (value == null) ? nullField(name) : field(name, value.longValue());
}
|
@return the value of the "human readable" flag. When the value is equal to true,
some types of values are written in a format easier to read for a human.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
| 585
|
[
"name",
"value"
] |
XContentBuilder
| true
| 2
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
doXContentBody
|
@Override
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
super.doXContentBody(builder, params);
if (normalizationFactor > 0) {
boolean hasValue = (Double.isInfinite(normalizedValue()) || Double.isNaN(normalizedValue())) == false;
builder.field("normalized_value", hasValue ? normalizedValue() : null);
if (hasValue && format != DocValueFormat.RAW) {
builder.field("normalized_value_as_string", format.format(normalizedValue()));
}
}
return builder;
}
|
Returns the normalized value. If no normalised factor has been specified
this method will return {@link #value()}
@return the normalized value
|
java
|
modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/Derivative.java
| 81
|
[
"builder",
"params"
] |
XContentBuilder
| true
| 6
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
lazyLoadCp
|
function lazyLoadCp() {
if (cpFn === undefined) {
({ cpFn } = require('internal/fs/cp/cp'));
cpFn = require('util').callbackify(cpFn);
({ cpSyncFn } = require('internal/fs/cp/cp-sync'));
}
}
|
Synchronously truncates the file descriptor.
@param {number} fd
@param {number} [len]
@returns {void}
|
javascript
|
lib/fs.js
| 1,102
|
[] | false
| 2
| 6.88
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
_flop_count
|
def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
"""
Computes the number of FLOPS in the contraction.
Parameters
----------
idx_contraction : iterable
The indices involved in the contraction
inner : bool
Does this contraction require an inner product?
num_terms : int
The number of terms in a contraction
size_dictionary : dict
The size of each of the indices in idx_contraction
Returns
-------
flop_count : int
The total number of FLOPS required for the contraction.
Examples
--------
>>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
30
>>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
60
"""
overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
op_factor = max(1, num_terms - 1)
if inner:
op_factor += 1
return overall_size * op_factor
|
Computes the number of FLOPS in the contraction.
Parameters
----------
idx_contraction : iterable
The indices involved in the contraction
inner : bool
Does this contraction require an inner product?
num_terms : int
The number of terms in a contraction
size_dictionary : dict
The size of each of the indices in idx_contraction
Returns
-------
flop_count : int
The total number of FLOPS required for the contraction.
Examples
--------
>>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
30
>>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
60
|
python
|
numpy/_core/einsumfunc.py
| 23
|
[
"idx_contraction",
"inner",
"num_terms",
"size_dictionary"
] | false
| 2
| 7.52
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
leaveFromCurrentSegment
|
function leaveFromCurrentSegment(analyzer, node) {
const state = CodePath.getState(analyzer.codePath);
const currentSegments = state.currentSegments;
for (let i = 0; i < currentSegments.length; ++i) {
const currentSegment = currentSegments[i];
if (currentSegment.reachable) {
analyzer.emitter.emit('onCodePathSegmentEnd', currentSegment, node);
}
}
state.currentSegments = [];
}
|
Updates the current segment with empty.
This is called at the last of functions or the program.
@param {CodePathAnalyzer} analyzer The instance.
@param {ASTNode} node The current AST node.
@returns {void}
|
javascript
|
packages/eslint-plugin-react-hooks/src/code-path-analysis/code-path-analyzer.js
| 227
|
[
"analyzer",
"node"
] | false
| 3
| 6.08
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
mergeInvalidArgumentTypeErrors
|
function mergeInvalidArgumentTypeErrors(errorList: NonUnionError[]) {
const invalidArgsError = new Map<string, InvalidArgumentTypeError>()
const result: NonUnionError[] = []
for (const error of errorList) {
if (error.kind !== 'InvalidArgumentType') {
result.push(error)
continue
}
const key = `${error.selectionPath.join('.')}:${error.argumentPath.join('.')}`
const prevError = invalidArgsError.get(key)
if (!prevError) {
invalidArgsError.set(key, error)
} else {
invalidArgsError.set(key, {
...error,
argument: {
...error.argument,
typeNames: uniqueConcat(prevError.argument.typeNames, error.argument.typeNames),
},
})
}
}
result.push(...invalidArgsError.values())
return result
}
|
Iterates over provided error list and merges all InvalidArgumentType
with matching selectionPath and argumentPath into one. For example,
if the list has an error, saying that `where.arg` does not match `Int`
and another, saying that `where.arg` does not match IntFilter, resulting
list will contain a single error for `where.arg` saying it does not
match `Int | IntFilter`
@param errorList
@returns
|
typescript
|
packages/client/src/runtime/core/errorRendering/applyUnionError.ts
| 54
|
[
"errorList"
] | false
| 4
| 6.8
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
lazyLoadStreams
|
function lazyLoadStreams() {
if (!ReadStream) {
({ ReadStream, WriteStream } = require('internal/fs/streams'));
FileReadStream = ReadStream;
FileWriteStream = WriteStream;
}
}
|
Synchronously copies `src` to `dest`. `src` can be a file, directory, or
symlink. The contents of directories will be copied recursively.
@param {string | URL} src
@param {string | URL} dest
@param {object} [options]
@returns {void}
|
javascript
|
lib/fs.js
| 3,127
|
[] | false
| 2
| 7.44
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
divergingEpoch
|
public static Optional<FetchResponseData.EpochEndOffset> divergingEpoch(FetchResponseData.PartitionData partitionResponse) {
return partitionResponse.divergingEpoch().epoch() < 0 ? Optional.empty()
: Optional.of(partitionResponse.divergingEpoch());
}
|
Convenience method to find the size of a response.
@param version The version of the response to use.
@param partIterator The partition iterator.
@return The response size in bytes.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java
| 179
|
[
"partitionResponse"
] | true
| 2
| 8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
registerAnnotationConfigProcessors
|
public static Set<BeanDefinitionHolder> registerAnnotationConfigProcessors(
BeanDefinitionRegistry registry, @Nullable Object source) {
DefaultListableBeanFactory beanFactory = unwrapDefaultListableBeanFactory(registry);
if (beanFactory != null) {
if (!(beanFactory.getDependencyComparator() instanceof AnnotationAwareOrderComparator)) {
beanFactory.setDependencyComparator(AnnotationAwareOrderComparator.INSTANCE);
}
if (!(beanFactory.getAutowireCandidateResolver() instanceof ContextAnnotationAutowireCandidateResolver)) {
beanFactory.setAutowireCandidateResolver(new ContextAnnotationAutowireCandidateResolver());
}
}
Set<BeanDefinitionHolder> beanDefs = CollectionUtils.newLinkedHashSet(6);
if (!registry.containsBeanDefinition(CONFIGURATION_ANNOTATION_PROCESSOR_BEAN_NAME)) {
RootBeanDefinition def = new RootBeanDefinition(ConfigurationClassPostProcessor.class);
def.setSource(source);
beanDefs.add(registerPostProcessor(registry, def, CONFIGURATION_ANNOTATION_PROCESSOR_BEAN_NAME));
}
if (!registry.containsBeanDefinition(AUTOWIRED_ANNOTATION_PROCESSOR_BEAN_NAME)) {
RootBeanDefinition def = new RootBeanDefinition(AutowiredAnnotationBeanPostProcessor.class);
def.setSource(source);
beanDefs.add(registerPostProcessor(registry, def, AUTOWIRED_ANNOTATION_PROCESSOR_BEAN_NAME));
}
// Check for Jakarta Annotations support, and if present add the CommonAnnotationBeanPostProcessor.
if (JAKARTA_ANNOTATIONS_PRESENT && !registry.containsBeanDefinition(COMMON_ANNOTATION_PROCESSOR_BEAN_NAME)) {
RootBeanDefinition def = new RootBeanDefinition(CommonAnnotationBeanPostProcessor.class);
def.setSource(source);
beanDefs.add(registerPostProcessor(registry, def, COMMON_ANNOTATION_PROCESSOR_BEAN_NAME));
}
// Check for JPA support, and if present add the PersistenceAnnotationBeanPostProcessor.
if (JPA_PRESENT && !registry.containsBeanDefinition(PERSISTENCE_ANNOTATION_PROCESSOR_BEAN_NAME)) {
RootBeanDefinition def = new RootBeanDefinition();
try {
def.setBeanClass(ClassUtils.forName(PERSISTENCE_ANNOTATION_PROCESSOR_CLASS_NAME,
AnnotationConfigUtils.class.getClassLoader()));
}
catch (ClassNotFoundException ex) {
throw new IllegalStateException(
"Cannot load optional framework class: " + PERSISTENCE_ANNOTATION_PROCESSOR_CLASS_NAME, ex);
}
def.setSource(source);
beanDefs.add(registerPostProcessor(registry, def, PERSISTENCE_ANNOTATION_PROCESSOR_BEAN_NAME));
}
if (!registry.containsBeanDefinition(EVENT_LISTENER_PROCESSOR_BEAN_NAME)) {
RootBeanDefinition def = new RootBeanDefinition(EventListenerMethodProcessor.class);
def.setSource(source);
beanDefs.add(registerPostProcessor(registry, def, EVENT_LISTENER_PROCESSOR_BEAN_NAME));
}
if (!registry.containsBeanDefinition(EVENT_LISTENER_FACTORY_BEAN_NAME)) {
RootBeanDefinition def = new RootBeanDefinition(DefaultEventListenerFactory.class);
def.setSource(source);
beanDefs.add(registerPostProcessor(registry, def, EVENT_LISTENER_FACTORY_BEAN_NAME));
}
return beanDefs;
}
|
Register all relevant annotation post processors in the given registry.
@param registry the registry to operate on
@param source the configuration source element (already extracted)
that this registration was triggered from. May be {@code null}.
@return a Set of BeanDefinitionHolders, containing all bean definitions
that have actually been registered by this call
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/AnnotationConfigUtils.java
| 143
|
[
"registry",
"source"
] | true
| 13
| 7.52
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
toByteArray
|
public static byte[] toByteArray(InputStream in) throws IOException {
checkNotNull(in);
return toByteArrayInternal(in, new ArrayDeque<byte[]>(TO_BYTE_ARRAY_DEQUE_SIZE), 0);
}
|
Reads all bytes from an input stream into a byte array. Does not close the stream.
<p><b>Java 9+ users:</b> use {@code in#readAllBytes()} instead.
@param in the input stream to read from
@return a byte array containing all the bytes from the stream
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/ByteStreams.java
| 240
|
[
"in"
] | true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
|
min
|
public static <T extends Comparable<? super T>> T min(T a, T b) {
return (a.compareTo(b) <= 0) ? a : b;
}
|
Returns the minimum of the two values. If the values compare as 0, the first is returned.
<p>The recommended solution for finding the {@code minimum} of some values depends on the type
of your data and the number of elements you have. Read more in the Guava User Guide article on
<a href="https://github.com/google/guava/wiki/CollectionUtilitiesExplained#comparators">{@code
Comparators}</a>.
@param a first value to compare, returned if less than or equal to b.
@param b second value to compare.
@throws ClassCastException if the parameters are not <i>mutually comparable</i>.
@since 30.0
|
java
|
android/guava/src/com/google/common/collect/Comparators.java
| 222
|
[
"a",
"b"
] |
T
| true
| 2
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
record
|
def record(
self,
custom_params_encoder: Callable[_P, object] | None = None,
custom_result_encoder: Callable[_P, Callable[[_R], _EncodedR]] | None = None,
) -> Callable[[Callable[_P, _R]], Callable[_P, _R]]:
"""Record a function call result with custom encoding.
This is a decorator that wraps a function to enable memoization
with custom encoding/decoding logic.
Args:
custom_params_encoder: Optional encoder for function parameters.
If None, parameters are pickled directly.
custom_result_encoder: Optional encoder factory for function results.
Takes function parameters and returns an encoder
function that converts R -> _EncodedR.
Returns:
A decorator function that can be applied to functions.
Example:
@memoizer.record(
custom_params_encoder=my_param_encoder,
custom_result_encoder=my_result_encoder_factory,
)
def expensive_function(x, y):
return x + y
"""
def wrapper(fn: Callable[_P, _R]) -> Callable[_P, _R]:
"""Wrap the function to enable memoization.
Args:
fn: The function to wrap.
Returns:
A wrapped version of the function.
"""
# If caching is disabled, return the original function unchanged
if not config.IS_CACHING_MODULE_ENABLED():
return fn
def inner(*args: _P.args, **kwargs: _P.kwargs) -> _R:
"""Call the original function and cache the result.
Args:
*args: Positional arguments to pass to the function.
**kwargs: Keyword arguments to pass to the function.
Returns:
The result of calling the original function.
"""
# Call the function to compute the result
result = fn(*args, **kwargs)
# Generate cache key from parameters
cache_key = self._make_key(custom_params_encoder, *args, **kwargs)
# Encode params for human-readable dump
if custom_params_encoder is not None:
encoded_params = custom_params_encoder(*args, **kwargs)
else:
encoded_params = {
"args": args,
"kwargs": kwargs,
}
# Encode the result if encoder is provided
if custom_result_encoder is not None:
# Get the encoder function by calling the factory with params
encoder_fn = custom_result_encoder(*args, **kwargs)
encoded_result = encoder_fn(result)
else:
encoded_result = result
# Store CacheEntry in cache
cache_entry = CacheEntry(
encoded_params=encoded_params,
encoded_result=encoded_result,
)
self._cache.insert(cache_key, cache_entry)
# Return the original result (not the encoded version)
return result
return inner
return wrapper
|
Record a function call result with custom encoding.
This is a decorator that wraps a function to enable memoization
with custom encoding/decoding logic.
Args:
custom_params_encoder: Optional encoder for function parameters.
If None, parameters are pickled directly.
custom_result_encoder: Optional encoder factory for function results.
Takes function parameters and returns an encoder
function that converts R -> _EncodedR.
Returns:
A decorator function that can be applied to functions.
Example:
@memoizer.record(
custom_params_encoder=my_param_encoder,
custom_result_encoder=my_result_encoder_factory,
)
def expensive_function(x, y):
return x + y
|
python
|
torch/_inductor/runtime/caching/interfaces.py
| 425
|
[
"self",
"custom_params_encoder",
"custom_result_encoder"
] |
Callable[[Callable[_P, _R]], Callable[_P, _R]]
| true
| 6
| 9.12
|
pytorch/pytorch
| 96,034
|
google
| false
|
get_dag_prefix
|
def get_dag_prefix(performance_dag_conf: dict[str, str]) -> str:
"""
Return DAG prefix.
Returns prefix that will be assigned to DAGs created with given performance DAG configuration.
:param performance_dag_conf: dict with environment variables as keys and their values as values
:return: final form of prefix after substituting inappropriate characters
:rtype: str
"""
dag_prefix = get_performance_dag_environment_variable(performance_dag_conf, "PERF_DAG_PREFIX")
safe_dag_prefix = safe_dag_id(dag_prefix)
return safe_dag_prefix
|
Return DAG prefix.
Returns prefix that will be assigned to DAGs created with given performance DAG configuration.
:param performance_dag_conf: dict with environment variables as keys and their values as values
:return: final form of prefix after substituting inappropriate characters
:rtype: str
|
python
|
performance/src/performance_dags/performance_dag/performance_dag_utils.py
| 449
|
[
"performance_dag_conf"
] |
str
| true
| 1
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
det
|
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
scipy.linalg.det : Similar function in SciPy.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> import numpy as np
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0 # may vary
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
r = r.astype(result_t, copy=False)
return r
|
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
scipy.linalg.det : Similar function in SciPy.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> import numpy as np
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0 # may vary
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
|
python
|
numpy/linalg/_linalg.py
| 2,357
|
[
"a"
] | false
| 2
| 7.52
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
applyNonNull
|
public static <T, U, R, E1 extends Throwable, E2 extends Throwable> R applyNonNull(final T value1,
final FailableFunction<? super T, ? extends U, E1> mapper1, final FailableFunction<? super U, ? extends R, E2> mapper2) throws E1, E2 {
return applyNonNull(applyNonNull(value1, mapper1), mapper2);
}
|
Applies values to a chain of functions, where a {@code null} can short-circuit each step. A function is only applied if the previous value is not
{@code null}, otherwise this method returns {@code null}.
<pre>{@code
Failable.applyNonNull(" a ", String::toUpperCase, String::trim) = "A"
Failable.applyNonNull(null, String::toUpperCase, String::trim) = null
Failable.applyNonNull(" a ", s -> null, String::trim) = null
Failable.applyNonNull(" a ", String::toUpperCase, s -> null) = null
}</pre>
<p>
Useful when working with expressions that may return {@code null} as it allows a single-line expression without using temporary local variables or
evaluating expressions twice. Provides an alternative to using {@link Optional} that is shorter and has less allocation.
</p>
@param <T> The type of the input of this method and the first function.
@param <U> The type of the result of the first function and the input to the second function.
@param <R> The type of the result of the second function and this method.
@param <E1> The type of thrown exception or error by the first function.
@param <E2> The type of thrown exception or error by the second function.
@param value1 The value to apply the functions to, may be {@code null}.
@param mapper1 The first function to apply, must not be {@code null}.
@param mapper2 The second function to apply, must not be {@code null}.
@return The result of the final function (which may be {@code null}) or {@code null} if the input value or any intermediate value is {@code null}.
@throws E1 Thrown by the first function.
@throws E2 Thrown by the second function.
@see #applyNonNull(Object, FailableFunction)
@see #applyNonNull(Object, FailableFunction, FailableFunction, FailableFunction)
@since 3.19.0
|
java
|
src/main/java/org/apache/commons/lang3/function/Failable.java
| 238
|
[
"value1",
"mapper1",
"mapper2"
] |
R
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
makeHeartbeatRequestAndHandleResponse
|
private NetworkClientDelegate.UnsentRequest makeHeartbeatRequestAndHandleResponse(final long currentTimeMs) {
NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequest(currentTimeMs);
return request.whenComplete((response, exception) -> {
long completionTimeMs = request.handler().completionTimeMs();
if (response != null) {
metricsManager.recordRequestLatency(response.requestLatencyMs());
onResponse((StreamsGroupHeartbeatResponse) response.responseBody(), completionTimeMs);
} else {
onFailure(exception, completionTimeMs);
}
});
}
|
A heartbeat should be sent without waiting for the heartbeat interval to expire if:
- the member is leaving the group
or
- the member is joining the group or acknowledging the assignment and for both cases there is no heartbeat request
in flight.
@return true if a heartbeat should be sent before the interval expires, false otherwise
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsGroupHeartbeatRequestManager.java
| 494
|
[
"currentTimeMs"
] | true
| 2
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_use_interchange_protocol
|
def _use_interchange_protocol(X):
"""Use interchange protocol for non-pandas dataframes that follow the protocol.
Note: at this point we chose not to use the interchange API on pandas dataframe
to ensure strict behavioral backward compatibility with older versions of
scikit-learn.
"""
return not is_pandas_df(X) and hasattr(X, "__dataframe__")
|
Use interchange protocol for non-pandas dataframes that follow the protocol.
Note: at this point we chose not to use the interchange API on pandas dataframe
to ensure strict behavioral backward compatibility with older versions of
scikit-learn.
|
python
|
sklearn/utils/validation.py
| 308
|
[
"X"
] | false
| 2
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
unknown
| false
|
|
markCoordinatorUnknown
|
public void markCoordinatorUnknown(final String cause, final long currentTimeMs) {
if (coordinator != null || timeMarkedUnknownMs == -1) {
timeMarkedUnknownMs = currentTimeMs;
totalDisconnectedMin = 0;
}
if (coordinator != null) {
log.info(
"Group coordinator {} is unavailable or invalid due to cause: {}. Rediscovery will be attempted.",
coordinator,
cause
);
coordinator = null;
} else {
long durationOfOngoingDisconnectMs = Math.max(0, currentTimeMs - timeMarkedUnknownMs);
long currDisconnectMin = durationOfOngoingDisconnectMs / COORDINATOR_DISCONNECT_LOGGING_INTERVAL_MS;
if (currDisconnectMin > totalDisconnectedMin) {
log.warn("Consumer has been disconnected from the group coordinator for {}ms", durationOfOngoingDisconnectMs);
totalDisconnectedMin = currDisconnectMin;
}
}
}
|
Mark the coordinator as "unknown" (i.e. {@code null}) when a disconnect is detected. This detection can occur
in one of two paths:
<ol>
<li>The coordinator was discovered, but then later disconnected</li>
<li>The coordinator has not yet been discovered and/or connected</li>
</ol>
@param cause String explanation of why the coordinator is marked unknown
@param currentTimeMs Current time in milliseconds
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java
| 161
|
[
"cause",
"currentTimeMs"
] |
void
| true
| 5
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
throttle
|
function throttle(func, wait, options) {
var leading = true,
trailing = true;
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
if (isObject(options)) {
leading = 'leading' in options ? !!options.leading : leading;
trailing = 'trailing' in options ? !!options.trailing : trailing;
}
return debounce(func, wait, {
'leading': leading,
'maxWait': wait,
'trailing': trailing
});
}
|
Creates a throttled function that only invokes `func` at most once per
every `wait` milliseconds. The throttled function comes with a `cancel`
method to cancel delayed `func` invocations and a `flush` method to
immediately invoke them. Provide `options` to indicate whether `func`
should be invoked on the leading and/or trailing edge of the `wait`
timeout. The `func` is invoked with the last arguments provided to the
throttled function. Subsequent calls to the throttled function return the
result of the last `func` invocation.
**Note:** If `leading` and `trailing` options are `true`, `func` is
invoked on the trailing edge of the timeout only if the throttled function
is invoked more than once during the `wait` timeout.
If `wait` is `0` and `leading` is `false`, `func` invocation is deferred
until to the next tick, similar to `setTimeout` with a timeout of `0`.
See [David Corbacho's article](https://css-tricks.com/debouncing-throttling-explained-examples/)
for details over the differences between `_.throttle` and `_.debounce`.
@static
@memberOf _
@since 0.1.0
@category Function
@param {Function} func The function to throttle.
@param {number} [wait=0] The number of milliseconds to throttle invocations to.
@param {Object} [options={}] The options object.
@param {boolean} [options.leading=true]
Specify invoking on the leading edge of the timeout.
@param {boolean} [options.trailing=true]
Specify invoking on the trailing edge of the timeout.
@returns {Function} Returns the new throttled function.
@example
// Avoid excessively updating the position while scrolling.
jQuery(window).on('scroll', _.throttle(updatePosition, 100));
// Invoke `renewToken` when the click event is fired, but not more than once every 5 minutes.
var throttled = _.throttle(renewToken, 300000, { 'trailing': false });
jQuery(element).on('click', throttled);
// Cancel the trailing throttled invocation.
jQuery(window).on('popstate', throttled.cancel);
|
javascript
|
lodash.js
| 11,004
|
[
"func",
"wait",
"options"
] | false
| 5
| 7.2
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
withDefaultPort
|
public HostAndPort withDefaultPort(int defaultPort) {
checkArgument(isValidPort(defaultPort));
if (hasPort()) {
return this;
}
return new HostAndPort(host, defaultPort, hasBracketlessColons);
}
|
Provide a default port if the parsed string contained only a host.
<p>You can chain this after {@link #fromString(String)} to include a port in case the port was
omitted from the input string. If a port was already provided, then this method is a no-op.
@param defaultPort a port number, from [0..65535]
@return a HostAndPort instance, guaranteed to have a defined port.
|
java
|
android/guava/src/com/google/common/net/HostAndPort.java
| 249
|
[
"defaultPort"
] |
HostAndPort
| true
| 2
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
toStringTrueFalse
|
public static String toStringTrueFalse(final Boolean bool) {
return toString(bool, TRUE, FALSE, null);
}
|
Converts a Boolean to a String returning {@code 'true'},
{@code 'false'}, or {@code null}.
<pre>
BooleanUtils.toStringTrueFalse(Boolean.TRUE) = "true"
BooleanUtils.toStringTrueFalse(Boolean.FALSE) = "false"
BooleanUtils.toStringTrueFalse(null) = null;
</pre>
@param bool the Boolean to check
@return {@code 'true'}, {@code 'false'}, or {@code null}
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 1,106
|
[
"bool"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getClass
|
public static Class<?> getClass(final ClassLoader classLoader, final String className, final boolean initialize) throws ClassNotFoundException {
// This method was re-written to avoid recursion and stack overflows found by fuzz testing.
String next = className;
int lastDotIndex = -1;
do {
try {
final Class<?> clazz = getPrimitiveClass(next);
return clazz != null ? clazz : Class.forName(toCleanName(next), initialize, classLoader);
} catch (final ClassNotFoundException ex) {
lastDotIndex = next.lastIndexOf(PACKAGE_SEPARATOR_CHAR);
if (lastDotIndex != -1) {
next = next.substring(0, lastDotIndex) + INNER_CLASS_SEPARATOR_CHAR + next.substring(lastDotIndex + 1);
}
}
} while (lastDotIndex != -1);
throw new ClassNotFoundException(next);
}
|
Gets the class represented by {@code className} using the {@code classLoader}. This implementation supports the
syntaxes "{@code java.util.Map.Entry[]}", "{@code java.util.Map$Entry[]}", "{@code [Ljava.util.Map.Entry;}", and
"{@code [Ljava.util.Map$Entry;}".
<p>
The provided class name is normalized by removing all whitespace. This is especially helpful when handling XML element values in which whitespace has not
been collapsed.
</p>
@param classLoader the class loader to use to load the class.
@param className the class name.
@param initialize whether the class must be initialized.
@return the class represented by {@code className} using the {@code classLoader}.
@throws NullPointerException if the className is null.
@throws ClassNotFoundException if the class is not found.
@throws IllegalArgumentException Thrown if the class name represents an array with more dimensions than the JVM supports, 255.
@throws IllegalArgumentException Thrown if the class name length is greater than 65,535.
@see Class#forName(String, boolean, ClassLoader)
@see <a href="https://docs.oracle.com/javase/specs/jvms/se25/html/jvms-4.html#jvms-4.4.1">JVM: Array dimension limits in JVM Specification CONSTANT_Class_info</a>
@see <a href="https://docs.oracle.com/javase/specs/jls/se25/html/jls-6.html#jls-6.7">JLS: Fully Qualified Names and Canonical Names</a>
@see <a href="https://docs.oracle.com/javase/specs/jls/se25/html/jls-13.html#jls-13.1">JLS: The Form of a Binary</a>
|
java
|
src/main/java/org/apache/commons/lang3/ClassUtils.java
| 585
|
[
"classLoader",
"className",
"initialize"
] | true
| 4
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
isCompatible
|
public boolean isCompatible(BloomFilter<T> that) {
checkNotNull(that);
return this != that
&& this.numHashFunctions == that.numHashFunctions
&& this.bitSize() == that.bitSize()
&& this.strategy.equals(that.strategy)
&& this.funnel.equals(that.funnel);
}
|
Determines whether a given Bloom filter is compatible with this Bloom filter. For two Bloom
filters to be compatible, they must:
<ul>
<li>not be the same instance
<li>have the same number of hash functions
<li>have the same bit size
<li>have the same strategy
<li>have equal funnels
</ul>
@param that The Bloom filter to check for compatibility.
@since 15.0
|
java
|
android/guava/src/com/google/common/hash/BloomFilter.java
| 244
|
[
"that"
] | true
| 5
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
fill
|
function fill(array, value, start, end) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
if (start && typeof start != 'number' && isIterateeCall(array, value, start)) {
start = 0;
end = length;
}
return baseFill(array, value, start, end);
}
|
Fills elements of `array` with `value` from `start` up to, but not
including, `end`.
**Note:** This method mutates `array`.
@static
@memberOf _
@since 3.2.0
@category Array
@param {Array} array The array to fill.
@param {*} value The value to fill `array` with.
@param {number} [start=0] The start position.
@param {number} [end=array.length] The end position.
@returns {Array} Returns `array`.
@example
var array = [1, 2, 3];
_.fill(array, 'a');
console.log(array);
// => ['a', 'a', 'a']
_.fill(Array(3), 2);
// => [2, 2, 2]
_.fill([4, 6, 8, 10], '*', 1, 3);
// => [4, '*', '*', 10]
|
javascript
|
lodash.js
| 7,305
|
[
"array",
"value",
"start",
"end"
] | false
| 6
| 7.68
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
visitNamedExportBindings
|
function visitNamedExportBindings(node: NamedExportBindings, allowEmpty: boolean): VisitResult<NamedExportBindings> | undefined {
return isNamespaceExport(node) ? visitNamespaceExports(node) : visitNamedExports(node, allowEmpty);
}
|
Visits named exports, eliding it if it does not contain an export specifier that
resolves to a value.
@param node The named exports node.
|
typescript
|
src/compiler/transformers/ts.ts
| 2,392
|
[
"node",
"allowEmpty"
] | true
| 2
| 6.48
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
baseKeysIn
|
function baseKeysIn(object) {
if (!isObject(object)) {
return nativeKeysIn(object);
}
var isProto = isPrototype(object),
result = [];
for (var key in object) {
if (!(key == 'constructor' && (isProto || !hasOwnProperty.call(object, key)))) {
result.push(key);
}
}
return result;
}
|
The base implementation of `_.keysIn` which doesn't treat sparse arrays as dense.
@private
@param {Object} object The object to query.
@returns {Array} Returns the array of property names.
|
javascript
|
lodash.js
| 3,544
|
[
"object"
] | false
| 5
| 6.08
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
of
|
static SslOptions of(@Nullable Set<String> ciphers, @Nullable Set<String> enabledProtocols) {
return of(toArray(ciphers), toArray(enabledProtocols));
}
|
Factory method to create a new {@link SslOptions} instance.
@param ciphers the ciphers
@param enabledProtocols the enabled protocols
@return a new {@link SslOptions} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/SslOptions.java
| 105
|
[
"ciphers",
"enabledProtocols"
] |
SslOptions
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getPatternForStyle
|
static String getPatternForStyle(final Integer dateStyle, final Integer timeStyle, final Locale locale) {
final Locale safeLocale = LocaleUtils.toLocale(locale);
final ArrayKey key = new ArrayKey(dateStyle, timeStyle, safeLocale);
return dateTimeInstanceCache.computeIfAbsent(key, k -> {
try {
final DateFormat formatter;
if (dateStyle == null) {
formatter = DateFormat.getTimeInstance(timeStyle.intValue(), safeLocale);
} else if (timeStyle == null) {
formatter = DateFormat.getDateInstance(dateStyle.intValue(), safeLocale);
} else {
formatter = DateFormat.getDateTimeInstance(dateStyle.intValue(), timeStyle.intValue(), safeLocale);
}
return ((SimpleDateFormat) formatter).toPattern();
} catch (final ClassCastException ex) {
throw new IllegalArgumentException("No date time pattern for locale: " + safeLocale);
}
});
}
|
Gets a date/time format for the specified styles and locale.
@param dateStyle date style: FULL, LONG, MEDIUM, or SHORT, null indicates no date in format.
@param timeStyle time style: FULL, LONG, MEDIUM, or SHORT, null indicates no time in format.
@param locale The non-null locale of the desired format.
@return a localized standard date/time format.
@throws IllegalArgumentException if the Locale has no date/time pattern defined.
|
java
|
src/main/java/org/apache/commons/lang3/time/AbstractFormatCache.java
| 104
|
[
"dateStyle",
"timeStyle",
"locale"
] |
String
| true
| 4
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
ifftshift
|
def ifftshift(x, axes=None):
"""
The inverse of `fftshift`. Although identical for even-length `x`, the
functions differ by one sample for odd-length `x`.
Parameters
----------
x : array_like
Input array.
axes : int or shape tuple, optional
Axes over which to calculate. Defaults to None, which shifts all axes.
Returns
-------
y : ndarray
The shifted array.
See Also
--------
fftshift : Shift zero-frequency component to the center of the spectrum.
Examples
--------
>>> import numpy as np
>>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
>>> freqs
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
>>> np.fft.ifftshift(np.fft.fftshift(freqs))
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
"""
x = asarray(x)
if axes is None:
axes = tuple(range(x.ndim))
shift = [-(dim // 2) for dim in x.shape]
elif isinstance(axes, integer_types):
shift = -(x.shape[axes] // 2)
else:
shift = [-(x.shape[ax] // 2) for ax in axes]
return roll(x, shift, axes)
|
The inverse of `fftshift`. Although identical for even-length `x`, the
functions differ by one sample for odd-length `x`.
Parameters
----------
x : array_like
Input array.
axes : int or shape tuple, optional
Axes over which to calculate. Defaults to None, which shifts all axes.
Returns
-------
y : ndarray
The shifted array.
See Also
--------
fftshift : Shift zero-frequency component to the center of the spectrum.
Examples
--------
>>> import numpy as np
>>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
>>> freqs
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
>>> np.fft.ifftshift(np.fft.fftshift(freqs))
array([[ 0., 1., 2.],
[ 3., 4., -4.],
[-3., -2., -1.]])
|
python
|
numpy/fft/_helper.py
| 78
|
[
"x",
"axes"
] | false
| 4
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
commitSync
|
@Override
public Map<TopicIdPartition, Optional<KafkaException>> commitSync() {
return delegate.commitSync();
}
|
Commit the acknowledgements for the records returned. If the consumer is using explicit acknowledgement,
the acknowledgements to commit have been indicated using {@link #acknowledge(ConsumerRecord)} or
{@link #acknowledge(ConsumerRecord, AcknowledgeType)}. If the consumer is using implicit acknowledgement,
all the records returned by the latest call to {@link #poll(Duration)} are acknowledged.
<p>
This is a synchronous commit and will block until either the commit succeeds, an unrecoverable error is
encountered (in which case it is thrown to the caller), or the timeout specified by {@code default.api.timeout.ms}
expires.
@return A map of the results for each topic-partition for which delivery was acknowledged.
If the acknowledgement failed for a topic-partition, an exception is present.
@throws WakeupException if {@link #wakeup()} is called before or while this method is called
@throws InterruptException if the thread is interrupted while blocked
@throws KafkaException for any other unrecoverable errors
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java
| 632
|
[] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
sortedLastIndexOf
|
function sortedLastIndexOf(array, value) {
var length = array == null ? 0 : array.length;
if (length) {
var index = baseSortedIndex(array, value, true) - 1;
if (eq(array[index], value)) {
return index;
}
}
return -1;
}
|
This method is like `_.lastIndexOf` except that it performs a binary
search on a sorted `array`.
@static
@memberOf _
@since 4.0.0
@category Array
@param {Array} array The array to inspect.
@param {*} value The value to search for.
@returns {number} Returns the index of the matched value, else `-1`.
@example
_.sortedLastIndexOf([4, 5, 5, 5, 6], 5);
// => 3
|
javascript
|
lodash.js
| 8,174
|
[
"array",
"value"
] | false
| 4
| 7.68
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
unmodifiableMap
|
private static <K extends @Nullable Object, V extends @Nullable Object> Map<K, V> unmodifiableMap(
Map<K, ? extends V> map) {
if (map instanceof SortedMap) {
return Collections.unmodifiableSortedMap((SortedMap<K, ? extends V>) map);
} else {
return Collections.unmodifiableMap(map);
}
}
|
Computes the difference between two sorted maps, using the comparator of the left map, or
{@code Ordering.natural()} if the left map uses the natural ordering of its elements. This
difference is an immutable snapshot of the state of the maps at the time this method is called.
It will never change, even if the maps change at a later time.
<p>Since this method uses {@code TreeMap} instances internally, the keys of the right map must
all compare as distinct according to the comparator of the left map.
<p><b>Note:</b>If you only need to know whether two sorted maps have the same mappings, call
{@code left.equals(right)} instead of this method.
@param left the map to treat as the "left" map for purposes of comparison
@param right the map to treat as the "right" map for purposes of comparison
@return the difference between the two maps
@since 11.0
|
java
|
android/guava/src/com/google/common/collect/Maps.java
| 581
|
[
"map"
] | true
| 2
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
predict_proba
|
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Note that in the multilabel case, each sample can have any number of
labels. This returns the marginal probability that the given sample has
the label in question. For example, it is entirely consistent that two
labels both have a 90% probability of applying to a given sample.
In the single label multiclass case, the rows of the returned matrix
sum to 1.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
T : array-like of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self)
# Y[i, j] gives the probability that sample i has the label j.
# In the multi-label case, these are not disjoint.
Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T
if len(self.estimators_) == 1:
# Only one estimator, but we still want to return probabilities
# for two classes.
Y = np.concatenate(((1 - Y), Y), axis=1)
if not self.multilabel_:
# Then, (nonzero) sample probability distributions should be normalized.
row_sums = np.sum(Y, axis=1)[:, np.newaxis]
np.divide(Y, row_sums, out=Y, where=row_sums != 0)
return Y
|
Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Note that in the multilabel case, each sample can have any number of
labels. This returns the marginal probability that the given sample has
the label in question. For example, it is entirely consistent that two
labels both have a 90% probability of applying to a given sample.
In the single label multiclass case, the rows of the returned matrix
sum to 1.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
T : array-like of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
|
python
|
sklearn/multiclass.py
| 523
|
[
"self",
"X"
] | false
| 3
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
socket
|
int socket(int domain, int type, int protocol);
|
Open a file descriptor to connect to a socket.
@param domain The socket protocol family, eg AF_UNIX
@param type The socket type, eg SOCK_DGRAM
@param protocol The protocol for the given protocl family, normally 0
@return an open file descriptor, or -1 on failure with errno set
@see <a href="https://man7.org/linux/man-pages/man2/socket.2.html">socket manpage</a>
|
java
|
libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java
| 109
|
[
"domain",
"type",
"protocol"
] | true
| 1
| 6.32
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
truePredicate
|
@SuppressWarnings("unchecked")
static <T, U, E extends Throwable> FailableBiPredicate<T, U, E> truePredicate() {
return TRUE;
}
|
Gets the TRUE singleton.
@param <T> Consumed type 1.
@param <U> Consumed type 2.
@param <E> The kind of thrown exception or error.
@return The NOP singleton.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableBiPredicate.java
| 63
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
isAssignable
|
public static boolean isAssignable(Class<?> cls, final Class<?> toClass, final boolean autoboxing) {
if (toClass == null) {
return false;
}
// have to check for null, as isAssignableFrom doesn't
if (cls == null) {
return !toClass.isPrimitive();
}
// autoboxing:
if (autoboxing) {
if (cls.isPrimitive() && !toClass.isPrimitive()) {
cls = primitiveToWrapper(cls);
if (cls == null) {
return false;
}
}
if (toClass.isPrimitive() && !cls.isPrimitive()) {
cls = wrapperToPrimitive(cls);
if (cls == null) {
return false;
}
}
}
if (cls.equals(toClass)) {
return true;
}
if (cls.isPrimitive()) {
if (!toClass.isPrimitive()) {
return false;
}
if (Integer.TYPE.equals(cls)) {
return Long.TYPE.equals(toClass) || Float.TYPE.equals(toClass) || Double.TYPE.equals(toClass);
}
if (Long.TYPE.equals(cls)) {
return Float.TYPE.equals(toClass) || Double.TYPE.equals(toClass);
}
if (Boolean.TYPE.equals(cls)) {
return false;
}
if (Double.TYPE.equals(cls)) {
return false;
}
if (Float.TYPE.equals(cls)) {
return Double.TYPE.equals(toClass);
}
if (Character.TYPE.equals(cls) || Short.TYPE.equals(cls)) {
return Integer.TYPE.equals(toClass) || Long.TYPE.equals(toClass) || Float.TYPE.equals(toClass) || Double.TYPE.equals(toClass);
}
if (Byte.TYPE.equals(cls)) {
return Short.TYPE.equals(toClass) || Integer.TYPE.equals(toClass) || Long.TYPE.equals(toClass) || Float.TYPE.equals(toClass)
|| Double.TYPE.equals(toClass);
}
// should never get here
return false;
}
return toClass.isAssignableFrom(cls);
}
|
Tests whether one {@link Class} can be assigned to a variable of another {@link Class}.
<p>
Unlike the {@link Class#isAssignableFrom(java.lang.Class)} method, this method takes into account widenings of
primitive classes and {@code null}s.
</p>
<p>
Primitive widenings allow an int to be assigned to a long, float or double. This method returns the correct result
for these cases.
</p>
<p>
{@code null} may be assigned to any reference type. This method will return {@code true} if {@code null} is passed in
and the toClass is non-primitive.
</p>
<p>
Specifically, this method tests whether the type represented by the specified {@link Class} parameter can be
converted to the type represented by this {@link Class} object via an identity conversion widening primitive or
widening reference conversion. See <em><a href="https://docs.oracle.com/javase/specs/">The Java Language
Specification</a></em>, sections 5.1.1, 5.1.2 and 5.1.4 for details.
</p>
@param cls the Class to check, may be null.
@param toClass the Class to try to assign into, returns false if null.
@param autoboxing whether to use implicit autoboxing/unboxing between primitives and wrappers.
@return {@code true} if assignment possible.
|
java
|
src/main/java/org/apache/commons/lang3/ClassUtils.java
| 1,321
|
[
"cls",
"toClass",
"autoboxing"
] | true
| 31
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
lastIndexOf
|
public static int lastIndexOf(final short[] array, final short valueToFind) {
return lastIndexOf(array, valueToFind, Integer.MAX_VALUE);
}
|
Finds the last index of the given value within the array.
<p>
This method returns {@link #INDEX_NOT_FOUND} ({@code -1}) for a {@code null} input array.
</p>
@param array the array to traverse backwards looking for the object, may be {@code null}.
@param valueToFind the object to find.
@return the last index of the value within the array, {@link #INDEX_NOT_FOUND} ({@code -1}) if not found or {@code null} array input.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,175
|
[
"array",
"valueToFind"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
to_dict
|
def to_dict(self, *, prune_empty: bool = False, validate: bool = True) -> dict[str, Any]:
"""
Convert Connection to json-serializable dictionary.
:param prune_empty: Whether or not remove empty values.
:param validate: Validate dictionary is JSON-serializable
:meta private:
"""
conn = {
"conn_id": self.conn_id,
"conn_type": self.conn_type,
"description": self.description,
"host": self.host,
"login": self.login,
"password": self.password,
"schema": self.schema,
"port": self.port,
}
if prune_empty:
conn = prune_dict(val=conn, mode="strict")
if (extra := self.extra_dejson) or not prune_empty:
conn["extra"] = extra
if validate:
json.dumps(conn)
return conn
|
Convert Connection to json-serializable dictionary.
:param prune_empty: Whether or not remove empty values.
:param validate: Validate dictionary is JSON-serializable
:meta private:
|
python
|
airflow-core/src/airflow/models/connection.py
| 544
|
[
"self",
"prune_empty",
"validate"
] |
dict[str, Any]
| true
| 5
| 6.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
is_dtype
|
def is_dtype(cls, dtype: object) -> bool:
"""
Check if we match 'dtype'.
Parameters
----------
dtype : object
The object to check.
Returns
-------
bool
Notes
-----
The default implementation is True if
1. ``cls.construct_from_string(dtype)`` is an instance
of ``cls``.
2. ``dtype`` is an object and is an instance of ``cls``
3. ``dtype`` has a ``dtype`` attribute, and any of the above
conditions is true for ``dtype.dtype``.
"""
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, (ABCSeries, ABCIndex, ABCDataFrame, np.dtype)):
# https://github.com/pandas-dev/pandas/issues/22960
# avoid passing data to `construct_from_string`. This could
# cause a FutureWarning from numpy about failing elementwise
# comparison from, e.g., comparing DataFrame == 'category'.
return False
elif dtype is None:
return False
elif isinstance(dtype, cls):
return True
if isinstance(dtype, str):
try:
return cls.construct_from_string(dtype) is not None
except TypeError:
return False
return False
|
Check if we match 'dtype'.
Parameters
----------
dtype : object
The object to check.
Returns
-------
bool
Notes
-----
The default implementation is True if
1. ``cls.construct_from_string(dtype)`` is an instance
of ``cls``.
2. ``dtype`` is an object and is an instance of ``cls``
3. ``dtype`` has a ``dtype`` attribute, and any of the above
conditions is true for ``dtype.dtype``.
|
python
|
pandas/core/dtypes/base.py
| 300
|
[
"cls",
"dtype"
] |
bool
| true
| 5
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
__init__
|
def __init__(
self,
values: list[T],
update_op: Callable[[T, T], T],
summary_op: Callable[[T, T], T],
identity_element: T,
):
"""
Initialize a segment tree with the given values and operations.
Args:
values: list of initial values
update_op: Function to apply when updating a value (e.g., addition)
summary_op: Function to summarize two values (e.g., min, max, sum)
identity_element: Identity element for the summary_op (e.g., 0 for sum, float('inf') for min)
Raises:
ValueError: If the input values list is empty
"""
if not values:
raise ValueError("Cannot create a segment tree with empty values list")
self.n = len(values)
self.update_op = update_op
self.summary_op = summary_op
self.identity = identity_element
# Size of segment tree array (next power of 2 * 2)
# The tree follows a standard heap layout where
# node `n`'s children are at `2*n` and `2*n+1`.
# Index 0 is unused.
self.size = 1
while self.size < self.n:
self.size *= 2
self.size *= 2
# Initialize tree and lazy arrays
self.tree = [identity_element] * self.size
# The lazy array contains updates to the given node
# Upon update, we only push updates to the top-most
# nodes that fully receive the update. We then
# propagate the update down as required (i.e., when
# we receive an interval query that neither fully
# contains the node nor fully doesn't contain the
# node
self.lazy: list[Optional[T]] = [None] * self.size
# Build the tree
self._build(values, 1, 0, self.n - 1)
|
Initialize a segment tree with the given values and operations.
Args:
values: list of initial values
update_op: Function to apply when updating a value (e.g., addition)
summary_op: Function to summarize two values (e.g., min, max, sum)
identity_element: Identity element for the summary_op (e.g., 0 for sum, float('inf') for min)
Raises:
ValueError: If the input values list is empty
|
python
|
torch/_inductor/codegen/segmented_tree.py
| 13
|
[
"self",
"values",
"update_op",
"summary_op",
"identity_element"
] | true
| 3
| 6.88
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
findSystemLayerModules
|
private static Set<Module> findSystemLayerModules() {
var systemModulesDescriptors = ModuleFinder.ofSystem()
.findAll()
.stream()
.map(ModuleReference::descriptor)
.collect(Collectors.toUnmodifiableSet());
return Stream.concat(
// entitlements is a "system" module, we can do anything from it
Stream.of(PolicyManager.class.getModule()),
// anything in the boot layer is also part of the system
ModuleLayer.boot()
.modules()
.stream()
.filter(
m -> systemModulesDescriptors.contains(m.getDescriptor())
&& MODULES_EXCLUDED_FROM_SYSTEM_MODULES.contains(m.getName()) == false
)
).collect(Collectors.toUnmodifiableSet());
}
|
This class contains all the entitlements by type, plus the {@link FileAccessTree} for the special case of filesystem entitlements.
<p>
We use layers when computing {@link ModuleEntitlements}; first, we check whether the module we are building it for is in the
server layer ({@link PolicyManager#SERVER_LAYER_MODULES}) (*).
If it is, we use the server policy, using the same caller class module name as the scope, and read the entitlements for that scope.
Otherwise, we use the {@code PluginResolver} to identify the correct plugin layer and find the policy for it (if any).
If the plugin is modular, we again use the same caller class module name as the scope, and read the entitlements for that scope.
If it's not, we use the single {@code ALL-UNNAMED} scope – in this case there is one scope and all entitlements apply
to all the plugin code.
</p>
<p>
(*) implementation detail: this is currently done in an indirect way: we know the module is not in the system layer
(otherwise the check would have been already trivially allowed), so we just check that the module is named, and it belongs to the
boot {@link ModuleLayer}. We might want to change this in the future to make it more consistent/easier to maintain.
</p>
@param componentName the plugin name or else one of the special component names like "(server)".
|
java
|
libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java
| 193
|
[] | true
| 2
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
doDifference
|
private static <K extends @Nullable Object, V extends @Nullable Object> void doDifference(
Map<? extends K, ? extends V> left,
Map<? extends K, ? extends V> right,
Equivalence<? super @NonNull V> valueEquivalence,
Map<K, V> onlyOnLeft,
Map<K, V> onlyOnRight,
Map<K, V> onBoth,
Map<K, ValueDifference<V>> differences) {
for (Entry<? extends K, ? extends V> entry : left.entrySet()) {
K leftKey = entry.getKey();
V leftValue = entry.getValue();
if (right.containsKey(leftKey)) {
/*
* The cast is safe because onlyOnRight contains all the keys of right.
*
* TODO(cpovirk): Consider checking onlyOnRight.containsKey instead of right.containsKey.
* That could change behavior if the input maps use different equivalence relations (and so
* a key that appears once in `right` might appear multiple times in `left`). We don't
* guarantee behavior in that case, anyway, and the current behavior is likely undesirable.
* So that's either a reason to feel free to change it or a reason to not bother thinking
* further about this.
*/
V rightValue = uncheckedCastNullableTToT(onlyOnRight.remove(leftKey));
if (valueEquivalence.equivalent(leftValue, rightValue)) {
onBoth.put(leftKey, leftValue);
} else {
differences.put(leftKey, ValueDifferenceImpl.create(leftValue, rightValue));
}
} else {
onlyOnLeft.put(leftKey, leftValue);
}
}
}
|
Computes the difference between two sorted maps, using the comparator of the left map, or
{@code Ordering.natural()} if the left map uses the natural ordering of its elements. This
difference is an immutable snapshot of the state of the maps at the time this method is called.
It will never change, even if the maps change at a later time.
<p>Since this method uses {@code TreeMap} instances internally, the keys of the right map must
all compare as distinct according to the comparator of the left map.
<p><b>Note:</b>If you only need to know whether two sorted maps have the same mappings, call
{@code left.equals(right)} instead of this method.
@param left the map to treat as the "left" map for purposes of comparison
@param right the map to treat as the "right" map for purposes of comparison
@return the difference between the two maps
@since 11.0
|
java
|
android/guava/src/com/google/common/collect/Maps.java
| 547
|
[
"left",
"right",
"valueEquivalence",
"onlyOnLeft",
"onlyOnRight",
"onBoth",
"differences"
] |
void
| true
| 3
| 8.24
|
google/guava
| 51,352
|
javadoc
| false
|
isNamedBeanAnAdvisorOrAdvice
|
private boolean isNamedBeanAnAdvisorOrAdvice(String beanName) {
Assert.state(this.beanFactory != null, "No BeanFactory set");
Class<?> namedBeanClass = this.beanFactory.getType(beanName);
if (namedBeanClass != null) {
return (Advisor.class.isAssignableFrom(namedBeanClass) || Advice.class.isAssignableFrom(namedBeanClass));
}
// Treat it as a target bean if we can't tell.
if (logger.isDebugEnabled()) {
logger.debug("Could not determine type of bean with name '" + beanName +
"' - assuming it is neither an Advisor nor an Advice");
}
return false;
}
|
Look at bean factory metadata to work out whether this bean name,
which concludes the interceptorNames list, is an Advisor or Advice,
or may be a target.
@param beanName bean name to check
@return {@code true} if it's an Advisor or Advice
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/ProxyFactoryBean.java
| 388
|
[
"beanName"
] | true
| 4
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
is_default_pool
|
def is_default_pool(id: int, session: Session = NEW_SESSION) -> bool:
"""
Check id if is the default_pool.
:param id: pool id
:param session: SQLAlchemy ORM Session
:return: True if id is default_pool, otherwise False
"""
return exists_query(
Pool.id == id,
Pool.pool == Pool.DEFAULT_POOL_NAME,
session=session,
)
|
Check id if is the default_pool.
:param id: pool id
:param session: SQLAlchemy ORM Session
:return: True if id is default_pool, otherwise False
|
python
|
airflow-core/src/airflow/models/pool.py
| 101
|
[
"id",
"session"
] |
bool
| true
| 1
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
predictBeanType
|
default @Nullable Class<?> predictBeanType(Class<?> beanClass, String beanName) throws BeansException {
return null;
}
|
Predict the type of the bean to be eventually returned from this
processor's {@link #postProcessBeforeInstantiation} callback.
<p>The default implementation returns {@code null}.
Specific implementations should try to predict the bean type as
far as known/cached already, without extra processing steps.
@param beanClass the raw class of the bean
@param beanName the name of the bean
@return the type of the bean, or {@code null} if not predictable
@throws org.springframework.beans.BeansException in case of errors
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/SmartInstantiationAwareBeanPostProcessor.java
| 50
|
[
"beanClass",
"beanName"
] | true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
withLowerBounds
|
public WildcardTypeBuilder withLowerBounds(final Type... bounds) {
this.lowerBounds = bounds;
return this;
}
|
Specify lower bounds of the wildcard type to build.
@param bounds to set.
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 207
|
[] |
WildcardTypeBuilder
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
throwIfGroupIdNotDefined
|
private void throwIfGroupIdNotDefined() {
if (groupId.isEmpty())
throw new InvalidGroupIdException("To use the group management or offset commit APIs, you must " +
"provide a valid " + ConsumerConfig.GROUP_ID_CONFIG + " in the consumer configuration.");
}
|
Release the light lock protecting the consumer from multi-threaded access.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java
| 1,274
|
[] |
void
| true
| 2
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
reverse
|
public static void reverse(TDigestDoubleArray order, int offset, int length) {
for (int i = 0; i < length / 2; i++) {
double t = order.get(offset + i);
order.set(offset + i, order.get(offset + length - i - 1));
order.set(offset + length - i - 1, t);
}
}
|
Reverses part of an array.
@param order The array containing the data to reverse.
@param offset Where to start reversing.
@param length How many elements to reverse
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/Sort.java
| 205
|
[
"order",
"offset",
"length"
] |
void
| true
| 2
| 7.04
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
get_memory_traffic_bytes
|
def get_memory_traffic_bytes(self):
"""Return the number of bytes read/written by this operator.
Override this method in subclasses for operations with non-standard memory patterns
(e.g., matmul which is compute-bound rather than memory-bound).
The framework will use this value along with execution time to compute
and report memory bandwidth in GB/s.
Default implementation assumes a pointwise-like operation:
- Reads: all input tensors
- Writes: output tensor (estimated as size of largest input)
This default works correctly for:
- Element-wise operations (add, mul, relu, etc.)
- Activations (gelu, sigmoid, etc.)
- Optimizers (SGD, Adam, etc.)
- Reductions (sum, mean, etc. - may underestimate writes)
Returns:
int or None: Total bytes transferred (reads + writes), or None if not applicable
"""
if not hasattr(self, "inputs") or not self.inputs:
return None
input_tensors = [v for v in self.inputs.values() if isinstance(v, torch.Tensor)]
if not input_tensors:
return None
# Calculate total bytes read from all inputs
bytes_read = sum(t.numel() * t.element_size() for t in input_tensors)
# Estimate output size as the largest input (common for pointwise ops)
largest_input = max(input_tensors, key=lambda t: t.numel())
bytes_written = largest_input.numel() * largest_input.element_size()
return bytes_read + bytes_written
|
Return the number of bytes read/written by this operator.
Override this method in subclasses for operations with non-standard memory patterns
(e.g., matmul which is compute-bound rather than memory-bound).
The framework will use this value along with execution time to compute
and report memory bandwidth in GB/s.
Default implementation assumes a pointwise-like operation:
- Reads: all input tensors
- Writes: output tensor (estimated as size of largest input)
This default works correctly for:
- Element-wise operations (add, mul, relu, etc.)
- Activations (gelu, sigmoid, etc.)
- Optimizers (SGD, Adam, etc.)
- Reductions (sum, mean, etc. - may underestimate writes)
Returns:
int or None: Total bytes transferred (reads + writes), or None if not applicable
|
python
|
benchmarks/operator_benchmark/benchmark_pytorch.py
| 121
|
[
"self"
] | false
| 4
| 7.12
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
add
|
@CanIgnoreReturnValue
@Override
public Builder<E> add(E... elements) {
super.add(elements);
return this;
}
|
Adds each element of {@code elements} to the {@code ImmutableList}.
@param elements the {@code Iterable} to add to the {@code ImmutableList}
@return this {@code Builder} object
@throws NullPointerException if {@code elements} is null or contains a null element
|
java
|
android/guava/src/com/google/common/collect/ImmutableList.java
| 800
|
[] | true
| 1
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
tokenize
|
protected List<String> tokenize(final char[] srcChars, final int offset, final int count) {
if (ArrayUtils.isEmpty(srcChars)) {
return Collections.emptyList();
}
final StrBuilder buf = new StrBuilder();
final List<String> tokenList = new ArrayList<>();
int pos = offset;
// loop around the entire buffer
while (pos >= 0 && pos < count) {
// find next token
pos = readNextToken(srcChars, pos, count, buf, tokenList);
// handle case where end of string is a delimiter
if (pos >= count) {
addToken(tokenList, StringUtils.EMPTY);
}
}
return tokenList;
}
|
Internal method to performs the tokenization.
<p>
Most users of this class do not need to call this method. This method
will be called automatically by other (public) methods when required.
</p>
<p>
This method exists to allow subclasses to add code before or after the
tokenization. For example, a subclass could alter the character array,
offset or count to be parsed, or call the tokenizer multiple times on
multiple strings. It is also be possible to filter the results.
</p>
<p>
{@link StrTokenizer} will always pass a zero offset and a count
equal to the length of the array to this method, however a subclass
may pass other values, or even an entirely different array.
</p>
@param srcChars the character array being tokenized, may be null.
@param offset the start position within the character array, must be valid.
@param count the number of characters to tokenize, must be valid.
@return the modifiable list of String tokens, unmodifiable if null array or zero count.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrTokenizer.java
| 1,079
|
[
"srcChars",
"offset",
"count"
] | true
| 5
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getTokenList
|
public List<String> getTokenList() {
checkTokenized();
final List<String> list = new ArrayList<>(tokens.length);
list.addAll(Arrays.asList(tokens));
return list;
}
|
Gets a copy of the full token list as an independent modifiable list.
@return the tokens as a String array.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrTokenizer.java
| 541
|
[] | true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getPointcut
|
@Override
public Pointcut getPointcut() {
synchronized (this.pointcutMonitor) {
if (this.pointcut == null) {
this.pointcut = createPointcut();
if (this.patterns != null) {
this.pointcut.setPatterns(this.patterns);
}
}
return this.pointcut;
}
}
|
Initialize the singleton Pointcut held within this Advisor.
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/RegexpMethodPointcutAdvisor.java
| 120
|
[] |
Pointcut
| true
| 3
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
resolveObject
|
public @Nullable Object resolveObject(RegisteredBean registeredBean) {
Assert.notNull(registeredBean, "'registeredBean' must not be null");
return resolveValue(registeredBean, getField(registeredBean));
}
|
Resolve the field value for the specified registered bean.
@param registeredBean the registered bean
@return the resolved field value
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/AutowiredFieldValueResolver.java
| 147
|
[
"registeredBean"
] |
Object
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
install_handler
|
static void __attribute__((constructor)) install_handler(void) {
static const std::unordered_map<std::string_view, int> signalMap = {
{"segv", SIGSEGV},
{"ill", SIGILL},
#ifdef SIGBUS
{"bus", SIGBUS},
#endif
#ifdef SIGSTKFLT
{"stkflt", SIGSTKFLT},
#endif
{"abrt", SIGABRT},
{"fpe", SIGFPE},
};
std::bitset<64> sigMask;
// The string description of signals to install the handler for.
folly::StringPiece sigEnv = std::getenv("FOLLY_SEGFAULT_SIGNALS") ?: "all";
while (!sigEnv.empty()) {
std::string_view sigName = sigEnv.split_step(" ");
// If signame of all was given, add in all signals.
if (sigName == "all") {
for (const auto& ent : signalMap) {
sigMask.set(ent.second);
}
} else {
auto sig = signalMap.find(sigName);
if (sig == signalMap.end()) {
fprintf(
stderr,
"unknown signal: \"%.*s\"\n",
static_cast<int>(sigName.length()),
sigName.data());
std::abort();
}
sigMask.set(sig->second);
}
}
folly::symbolizer::installFatalSignalHandler(sigMask);
}
|
A standalone shared library that can be `LD_PRELOAD`d to print symbolized
native stack traces when the process dies due to a signal. By default, this
is enabled for SIGSEGV, SIGILL, SIGBUS, SIGSTKFLT, SIGABRT, and SIGFPE.
Based on glibc's `libSegFault.so`:
https://github.com/lattera/glibc/blob/master/debug/segfault.c
Usage:
$ LD_PRELOAD=libFollySegFault.so <my_prog> ...
|
cpp
|
folly/debugging/symbolizer/tool/LibSegFault.cpp
| 41
|
[] | true
| 6
| 6.56
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
setFuture
|
@CanIgnoreReturnValue
@SuppressWarnings("Interruption") // We are propagating an interrupt from a caller.
protected boolean setFuture(ListenableFuture<? extends V> future) {
checkNotNull(future);
@RetainedLocalRef Object localValue = value();
if (localValue == null) {
if (future.isDone()) {
Object value = getFutureValue(future);
if (casValue(this, null, value)) {
complete(
this,
/*
* Interruption doesn't propagate through a DelegatingToFuture chain (see
* getFutureValue), so don't invoke interruptTask.
*/
false);
return true;
}
return false;
}
DelegatingToFuture<V> valueToSet = new DelegatingToFuture<>(this, future);
if (casValue(this, null, valueToSet)) {
// the listener is responsible for calling completeWithFuture, directExecutor is appropriate
// since all we are doing is unpacking a completed future which should be fast.
try {
future.addListener(valueToSet, DirectExecutor.INSTANCE);
} catch (Throwable t) {
// Any Exception is either a RuntimeException or sneaky checked exception.
//
// addListener has thrown an exception! DelegatingToFuture.run can't throw any exceptions
// so this must have been caused by addListener itself. The most likely explanation is a
// misconfigured mock. Try to switch to Failure.
Failure failure;
try {
failure = new Failure(t);
} catch (Exception | Error oomMostLikely) { // sneaky checked exception
failure = Failure.FALLBACK_INSTANCE;
}
// Note: The only way this CAS could fail is if cancel() has raced with us. That is ok.
boolean unused = casValue(this, valueToSet, failure);
}
return true;
}
localValue = value(); // we lost the cas, fall through and maybe cancel
}
// The future has already been set to something. If it is cancellation we should cancel the
// incoming future.
if (localValue instanceof Cancellation) {
// we don't care if it fails, this is best-effort.
future.cancel(((Cancellation) localValue).wasInterrupted);
}
return false;
}
|
Sets the result of this {@code Future} to match the supplied input {@code Future} once the
supplied {@code Future} is done, unless this {@code Future} has already been cancelled or set
(including "set asynchronously," defined below).
<p>If the supplied future is {@linkplain #isDone done} when this method is called and the call
is accepted, then this future is guaranteed to have been completed with the supplied future by
the time this method returns. If the supplied future is not done and the call is accepted, then
the future will be <i>set asynchronously</i>. Note that such a result, though not yet known,
cannot be overridden by a call to a {@code set*} method, only by a call to {@link #cancel}.
<p>If the call {@code setFuture(delegate)} is accepted and this {@code Future} is later
cancelled, cancellation will be propagated to {@code delegate}. Additionally, any call to
{@code setFuture} after any cancellation will propagate cancellation to the supplied {@code
Future}.
<p>Note that, even if the supplied future is cancelled and it causes this future to complete,
it will never trigger interruption behavior. In particular, it will not cause this future to
invoke the {@link #interruptTask} method, and the {@link #wasInterrupted} method will not
return {@code true}.
<p>Beware of completing a future while holding a lock. Its listeners may do slow work or
acquire other locks, risking deadlocks.
@param future the future to delegate to
@return true if the attempt was accepted, indicating that the {@code Future} was not previously
cancelled or set.
@since 19.0
|
java
|
android/guava/src/com/google/common/util/concurrent/AbstractFuture.java
| 551
|
[
"future"
] | true
| 8
| 8
|
google/guava
| 51,352
|
javadoc
| false
|
|
isFixPossiblyReExportingImportingFile
|
function isFixPossiblyReExportingImportingFile(fix: ImportFixWithModuleSpecifier, importingFilePath: Path, toPath: (fileName: string) => Path): boolean {
if (
fix.isReExport &&
fix.exportInfo?.moduleFileName &&
isIndexFileName(fix.exportInfo.moduleFileName)
) {
const reExportDir = toPath(getDirectoryPath(fix.exportInfo.moduleFileName));
return startsWith(importingFilePath, reExportDir);
}
return false;
}
|
@returns `Comparison.LessThan` if `a` is better than `b`.
|
typescript
|
src/services/codefixes/importFixes.ts
| 1,453
|
[
"fix",
"importingFilePath",
"toPath"
] | true
| 4
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
build
|
public ConfigurationMetadataRepository build() {
SimpleConfigurationMetadataRepository result = new SimpleConfigurationMetadataRepository();
for (SimpleConfigurationMetadataRepository repository : this.repositories) {
result.include(repository);
}
return result;
}
|
Build a {@link ConfigurationMetadataRepository} with the current state of this
builder.
@return this builder
|
java
|
configuration-metadata/spring-boot-configuration-metadata/src/main/java/org/springframework/boot/configurationmetadata/ConfigurationMetadataRepositoryJsonBuilder.java
| 86
|
[] |
ConfigurationMetadataRepository
| true
| 1
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
findCommand
|
public @Nullable Command findCommand(String name) {
for (Command candidate : this.commands) {
String candidateName = candidate.getName();
if (candidateName.equals(name) || (isOptionCommand(candidate) && ("--" + candidateName).equals(name))) {
return candidate;
}
}
return null;
}
|
Find a command by name.
@param name the name of the command
@return the command or {@code null} if not found
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/CommandRunner.java
| 151
|
[
"name"
] |
Command
| true
| 4
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_kmeans_plusplus
|
def _kmeans_plusplus(
X, n_clusters, x_squared_norms, sample_weight, random_state, n_local_trials=None
):
"""Computational component for initialization of n_clusters by
k-means++. Prior validation of data is assumed.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds for.
n_clusters : int
The number of seeds to choose.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in `X`.
x_squared_norms : ndarray of shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : RandomState instance
The generator used to initialize the centers.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
The initial centers for k-means.
indices : ndarray of shape (n_clusters,)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly and track index of point
center_id = random_state.choice(n_samples, p=sample_weight / sample_weight.sum())
indices = np.full(n_clusters, -1, dtype=int)
if sp.issparse(X):
centers[0] = X[[center_id]].toarray()
else:
centers[0] = X[center_id]
indices[0] = center_id
# Initialize list of closest distances and calculate current potential
closest_dist_sq = _euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms, squared=True
)
current_pot = closest_dist_sq @ sample_weight
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.uniform(size=n_local_trials) * current_pot
candidate_ids = np.searchsorted(
np.cumsum(sample_weight * closest_dist_sq), rand_vals
)
# XXX: numerical imprecision can result in a candidate_id out of range
np.clip(candidate_ids, None, closest_dist_sq.size - 1, out=candidate_ids)
# Compute distances to center candidates
distance_to_candidates = _euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True
)
# update closest distances squared and potential for each candidate
np.minimum(closest_dist_sq, distance_to_candidates, out=distance_to_candidates)
candidates_pot = distance_to_candidates @ sample_weight.reshape(-1, 1)
# Decide which candidate is the best
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[[best_candidate]].toarray()
else:
centers[c] = X[best_candidate]
indices[c] = best_candidate
return centers, indices
|
Computational component for initialization of n_clusters by
k-means++. Prior validation of data is assumed.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds for.
n_clusters : int
The number of seeds to choose.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in `X`.
x_squared_norms : ndarray of shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : RandomState instance
The generator used to initialize the centers.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
The initial centers for k-means.
indices : ndarray of shape (n_clusters,)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
|
python
|
sklearn/cluster/_kmeans.py
| 180
|
[
"X",
"n_clusters",
"x_squared_norms",
"sample_weight",
"random_state",
"n_local_trials"
] | false
| 7
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
to_iceberg
|
def to_iceberg(
self,
table_identifier: str,
catalog_name: str | None = None,
*,
catalog_properties: dict[str, Any] | None = None,
location: str | None = None,
append: bool = False,
snapshot_properties: dict[str, str] | None = None,
) -> None:
"""
Write a DataFrame to an Apache Iceberg table.
.. versionadded:: 3.0.0
.. warning::
to_iceberg is experimental and may change without warning.
Parameters
----------
table_identifier : str
Table identifier.
catalog_name : str, optional
The name of the catalog.
catalog_properties : dict of {str: str}, optional
The properties that are used next to the catalog configuration.
location : str, optional
Location for the table.
append : bool, default False
If ``True``, append data to the table, instead of replacing the content.
snapshot_properties : dict of {str: str}, optional
Custom properties to be added to the snapshot summary
See Also
--------
read_iceberg : Read an Apache Iceberg table.
DataFrame.to_parquet : Write a DataFrame in Parquet format.
Examples
--------
>>> df = pd.DataFrame(data={"col1": [1, 2], "col2": [4, 3]})
>>> df.to_iceberg("my_table", catalog_name="my_catalog") # doctest: +SKIP
"""
from pandas.io.iceberg import to_iceberg
to_iceberg(
self,
table_identifier,
catalog_name,
catalog_properties=catalog_properties,
location=location,
append=append,
snapshot_properties=snapshot_properties,
)
|
Write a DataFrame to an Apache Iceberg table.
.. versionadded:: 3.0.0
.. warning::
to_iceberg is experimental and may change without warning.
Parameters
----------
table_identifier : str
Table identifier.
catalog_name : str, optional
The name of the catalog.
catalog_properties : dict of {str: str}, optional
The properties that are used next to the catalog configuration.
location : str, optional
Location for the table.
append : bool, default False
If ``True``, append data to the table, instead of replacing the content.
snapshot_properties : dict of {str: str}, optional
Custom properties to be added to the snapshot summary
See Also
--------
read_iceberg : Read an Apache Iceberg table.
DataFrame.to_parquet : Write a DataFrame in Parquet format.
Examples
--------
>>> df = pd.DataFrame(data={"col1": [1, 2], "col2": [4, 3]})
>>> df.to_iceberg("my_table", catalog_name="my_catalog") # doctest: +SKIP
|
python
|
pandas/core/frame.py
| 3,706
|
[
"self",
"table_identifier",
"catalog_name",
"catalog_properties",
"location",
"append",
"snapshot_properties"
] |
None
| true
| 1
| 6.96
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
isInstanceOf
|
public static void isInstanceOf(final Class<?> type, final Object obj, final String message, final Object... values) {
// TODO when breaking BC, consider returning obj
if (!type.isInstance(obj)) {
throw new IllegalArgumentException(getMessage(message, values));
}
}
|
Validate that the argument is an instance of the specified class; otherwise
throwing an exception with the specified message. This method is useful when
validating according to an arbitrary class
<pre>Validate.isInstanceOf(OkClass.class, object, "Wrong class, object is of class %s",
object.getClass().getName());</pre>
@param type the class the object must be validated against, not null.
@param obj the object to check, null throws an exception.
@param message the {@link String#format(String, Object...)} exception message if invalid, not null.
@param values the optional values for the formatted exception message, null array not recommended.
@throws IllegalArgumentException if argument is not of specified class.
@see #isInstanceOf(Class, Object)
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 469
|
[
"type",
"obj",
"message"
] |
void
| true
| 2
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
escapeXml11
|
public static String escapeXml11(final String input) {
return ESCAPE_XML11.translate(input);
}
|
Escapes the characters in a {@link String} using XML entities.
<p>For example: {@code "bread" & "butter"} =>
{@code "bread" & "butter"}.
</p>
<p>XML 1.1 can represent certain control characters, but it cannot represent
the null byte or unpaired Unicode surrogate code points, even after escaping.
{@code escapeXml11} will remove characters that do not fit in the following
ranges:</p>
<p>{@code [#x1-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]}</p>
<p>{@code escapeXml11} will escape characters in the following ranges:</p>
<p>{@code [#x1-#x8] | [#xB-#xC] | [#xE-#x1F] | [#x7F-#x84] | [#x86-#x9F]}</p>
<p>The returned string can be inserted into a valid XML 1.1 document. Do not
use it for XML 1.0 documents.</p>
@param input the {@link String} to escape, may be null
@return a new escaped {@link String}, {@code null} if null string input
@see #unescapeXml(String)
@since 3.3
|
java
|
src/main/java/org/apache/commons/lang3/StringEscapeUtils.java
| 655
|
[
"input"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
unmodifiableMultimap
|
public static <K extends @Nullable Object, V extends @Nullable Object>
Multimap<K, V> unmodifiableMultimap(Multimap<K, V> delegate) {
if (delegate instanceof UnmodifiableMultimap || delegate instanceof ImmutableMultimap) {
return delegate;
}
return new UnmodifiableMultimap<>(delegate);
}
|
Returns an unmodifiable view of the specified multimap. Query operations on the returned
multimap "read through" to the specified multimap, and attempts to modify the returned
multimap, either directly or through the multimap's views, result in an {@code
UnsupportedOperationException}.
<p>The returned multimap will be serializable if the specified multimap is serializable.
@param delegate the multimap for which an unmodifiable view is to be returned
@return an unmodifiable view of the specified multimap
|
java
|
android/guava/src/com/google/common/collect/Multimaps.java
| 654
|
[
"delegate"
] | true
| 3
| 7.44
|
google/guava
| 51,352
|
javadoc
| false
|
|
create_model
|
def create_model(self, config: dict):
"""
Create a model in Amazon SageMaker.
In the request, you name the model and describe a primary container. For
the primary container, you specify the Docker image that contains
inference code, artifacts (from prior training), and a custom
environment map that the inference code uses when you deploy the model
for predictions.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_model`
:param config: the config for model
:return: A response to model creation
"""
return self.get_conn().create_model(**config)
|
Create a model in Amazon SageMaker.
In the request, you name the model and describe a primary container. For
the primary container, you specify the Docker image that contains
inference code, artifacts (from prior training), and a custom
environment map that the inference code uses when you deploy the model
for predictions.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.create_model`
:param config: the config for model
:return: A response to model creation
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
| 457
|
[
"self",
"config"
] | true
| 1
| 6.56
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
readFully
|
default void readFully(ByteBuffer dst, long pos) throws IOException {
do {
int count = read(dst, pos);
if (count <= 0) {
throw new EOFException();
}
pos += count;
}
while (dst.hasRemaining());
}
|
Fully read a sequence of bytes from this channel into the given buffer, starting at
the given block position and filling {@link ByteBuffer#remaining() remaining} bytes
in the buffer.
@param dst the buffer into which bytes are to be transferred
@param pos the position within the block at which the transfer is to begin
@throws EOFException if an attempt is made to read past the end of the block
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/DataBlock.java
| 62
|
[
"dst",
"pos"
] |
void
| true
| 2
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
canOptimiseForSingleAcknowledgeType
|
private boolean canOptimiseForSingleAcknowledgeType(AcknowledgementBatch acknowledgementBatch) {
if (acknowledgementBatch == null || acknowledgementBatch.acknowledgeTypes().size() == 1) return false;
int firstAcknowledgeType = acknowledgementBatch.acknowledgeTypes().get(0);
for (int i = 1; i < acknowledgementBatch.acknowledgeTypes().size(); i++) {
if (acknowledgementBatch.acknowledgeTypes().get(i) != firstAcknowledgeType) return false;
}
return true;
}
|
@return Returns true if the array of acknowledge types in the share fetch batch contains a single acknowledge type
and the array size can be reduced to 1.
Returns false when the array has more than one acknowledge type or is already optimised.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/Acknowledgements.java
| 307
|
[
"acknowledgementBatch"
] | true
| 5
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.