function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
firstIndex
|
@Override
int firstIndex() {
return (firstEntry == ENDPOINT) ? -1 : firstEntry;
}
|
Pointer to the last node in the linked list, or {@code ENDPOINT} if there are no entries.
|
java
|
android/guava/src/com/google/common/collect/ObjectCountLinkedHashMap.java
| 105
|
[] | true
| 2
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
saturatedCast
|
public static int saturatedCast(long value) {
if (value <= 0) {
return 0;
} else if (value >= (1L << 32)) {
return -1;
} else {
return (int) value;
}
}
|
Returns the {@code int} value that, when treated as unsigned, is nearest in value to {@code
value}.
@param value any {@code long} value
@return {@code 2^32 - 1} if {@code value >= 2^32}, {@code 0} if {@code value <= 0}, and {@code
value} cast to {@code int} otherwise
@since 21.0
|
java
|
android/guava/src/com/google/common/primitives/UnsignedInts.java
| 107
|
[
"value"
] | true
| 3
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
droplevel
|
def droplevel(self, level: IndexLabel = 0):
"""
Return index with requested level(s) removed.
If resulting index has only 1 level left, the result will be
of Index type, not MultiIndex. The original index is not modified inplace.
Parameters
----------
level : int, str, or list-like, default 0
If a string is given, must be the name of a level
If list-like, elements must be names or indexes of levels.
Returns
-------
Index or MultiIndex
Returns an Index or MultiIndex object, depending on the resulting index
after removing the requested level(s).
See Also
--------
Index.dropna : Return Index without NA/NaN values.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
... [[1, 2], [3, 4], [5, 6]], names=["x", "y", "z"]
... )
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
names=['x', 'y', 'z'])
>>> mi.droplevel()
MultiIndex([(3, 5),
(4, 6)],
names=['y', 'z'])
>>> mi.droplevel(2)
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.droplevel("z")
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.droplevel(["x", "y"])
Index([5, 6], dtype='int64', name='z')
"""
if not isinstance(level, (tuple, list)):
level = [level]
levnums = sorted((self._get_level_number(lev) for lev in level), reverse=True)
return self._drop_level_numbers(levnums)
|
Return index with requested level(s) removed.
If resulting index has only 1 level left, the result will be
of Index type, not MultiIndex. The original index is not modified inplace.
Parameters
----------
level : int, str, or list-like, default 0
If a string is given, must be the name of a level
If list-like, elements must be names or indexes of levels.
Returns
-------
Index or MultiIndex
Returns an Index or MultiIndex object, depending on the resulting index
after removing the requested level(s).
See Also
--------
Index.dropna : Return Index without NA/NaN values.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays(
... [[1, 2], [3, 4], [5, 6]], names=["x", "y", "z"]
... )
>>> mi
MultiIndex([(1, 3, 5),
(2, 4, 6)],
names=['x', 'y', 'z'])
>>> mi.droplevel()
MultiIndex([(3, 5),
(4, 6)],
names=['y', 'z'])
>>> mi.droplevel(2)
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.droplevel("z")
MultiIndex([(1, 3),
(2, 4)],
names=['x', 'y'])
>>> mi.droplevel(["x", "y"])
Index([5, 6], dtype='int64', name='z')
|
python
|
pandas/core/indexes/base.py
| 2,246
|
[
"self",
"level"
] | true
| 2
| 8.32
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
astype
|
def astype(self, dtype: Dtype, copy: bool = True):
"""
Create an Index with values cast to dtypes.
The class of a new Index is determined by dtype. When conversion is
impossible, a TypeError exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
Note that any signed integer `dtype` is treated as ``'int64'``,
and any unsigned integer `dtype` is treated as ``'uint64'``,
regardless of the size.
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
Returns
-------
Index
Index with values cast to specified dtype.
See Also
--------
Index.dtype: Return the dtype object of the underlying data.
Index.dtypes: Return the dtype object of the underlying data.
Index.convert_dtypes: Convert columns to the best possible dtypes.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.astype("float")
Index([1.0, 2.0, 3.0], dtype='float64')
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if self.dtype == dtype:
# Ensure that self.astype(self.dtype) is self
return self.copy() if copy else self
values = self._data
if isinstance(values, ExtensionArray):
with rewrite_exception(type(values).__name__, type(self).__name__):
new_values = values.astype(dtype, copy=copy)
elif isinstance(dtype, ExtensionDtype):
cls = dtype.construct_array_type()
# Note: for RangeIndex and CategoricalDtype self vs self._values
# behaves differently here.
new_values = cls._from_sequence(self, dtype=dtype, copy=copy)
else:
# GH#13149 specifically use astype_array instead of astype
new_values = astype_array(values, dtype=dtype, copy=copy)
# pass copy=False because any copying will be done in the astype above
result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False)
if (
not copy
and self._references is not None
and astype_is_view(self.dtype, dtype)
):
result._references = self._references
result._references.add_index_reference(result)
return result
|
Create an Index with values cast to dtypes.
The class of a new Index is determined by dtype. When conversion is
impossible, a TypeError exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
Note that any signed integer `dtype` is treated as ``'int64'``,
and any unsigned integer `dtype` is treated as ``'uint64'``,
regardless of the size.
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
Returns
-------
Index
Index with values cast to specified dtype.
See Also
--------
Index.dtype: Return the dtype object of the underlying data.
Index.dtypes: Return the dtype object of the underlying data.
Index.convert_dtypes: Convert columns to the best possible dtypes.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.astype("float")
Index([1.0, 2.0, 3.0], dtype='float64')
|
python
|
pandas/core/indexes/base.py
| 1,112
|
[
"self",
"dtype",
"copy"
] | true
| 10
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
predict
|
def predict(self, X):
"""Perform classification on an array of test vectors `X`.
The predicted class `C` for each sample in `X` is returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
y_pred : ndarray of shape (n_samples,)
The predicted classes.
"""
check_is_fitted(self)
if np.isclose(self.class_prior_, 1 / len(self.classes_)).all():
# `validate_data` is called here since we are not calling `super()`
ensure_all_finite = (
"allow-nan" if get_tags(self).input_tags.allow_nan else True
)
X = validate_data(
self,
X,
ensure_all_finite=ensure_all_finite,
accept_sparse="csr",
reset=False,
)
return self.classes_[
pairwise_distances_argmin(X, self.centroids_, metric=self.metric)
]
else:
return super().predict(X)
|
Perform classification on an array of test vectors `X`.
The predicted class `C` for each sample in `X` is returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
y_pred : ndarray of shape (n_samples,)
The predicted classes.
|
python
|
sklearn/neighbors/_nearest_centroid.py
| 275
|
[
"self",
"X"
] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
parse_time_delta
|
def parse_time_delta(time_str: str):
"""
Parse a time string e.g. (2h13m) into a timedelta object.
:param time_str: A string identifying a duration. (eg. 2h13m)
:return datetime.timedelta: A datetime.timedelta object or "@once"
"""
if (parts := RE_TIME_DELTA.match(time_str)) is None:
msg = (
f"Could not parse any time information from '{time_str}'. "
f"Examples of valid strings: '8h', '2d8h5m20s', '2m4s'"
)
raise ValueError(msg)
time_params = {name: float(param) for name, param in parts.groupdict().items() if param}
return timedelta(**time_params)
|
Parse a time string e.g. (2h13m) into a timedelta object.
:param time_str: A string identifying a duration. (eg. 2h13m)
:return datetime.timedelta: A datetime.timedelta object or "@once"
|
python
|
dev/airflow_perf/dags/elastic_dag.py
| 34
|
[
"time_str"
] | true
| 2
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
getEntityTransformer
|
private EntryTransformer getEntityTransformer() {
if (getLayout() instanceof RepackagingLayout repackagingLayout) {
return new RepackagingEntryTransformer(repackagingLayout);
}
return EntryTransformer.NONE;
}
|
Writes a signature file if necessary for the given {@code writtenLibraries}.
@param writtenLibraries the libraries
@param writer the writer to use to write the signature file if necessary
@throws IOException if a failure occurs when writing the signature file
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Packager.java
| 275
|
[] |
EntryTransformer
| true
| 2
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
mean
|
def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0):
"""
Return the mean value of the Array.
Parameters
----------
skipna : bool, default True
Whether to ignore any NaT elements.
axis : int, optional, default 0
Axis for the function to be applied on.
Returns
-------
scalar
Timestamp or Timedelta.
See Also
--------
numpy.ndarray.mean : Returns the average of array elements along a given axis.
Series.mean : Return the mean value in a Series.
Notes
-----
mean is only defined for Datetime and Timedelta dtypes, not for Period.
Examples
--------
For :class:`pandas.DatetimeIndex`:
>>> idx = pd.date_range("2001-01-01 00:00", periods=3)
>>> idx
DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
dtype='datetime64[us]', freq='D')
>>> idx.mean()
Timestamp('2001-01-02 00:00:00')
For :class:`pandas.TimedeltaIndex`:
>>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit="D")
>>> tdelta_idx
TimedeltaIndex(['1 days', '2 days', '3 days'],
dtype='timedelta64[ns]', freq=None)
>>> tdelta_idx.mean()
Timedelta('2 days 00:00:00')
"""
if isinstance(self.dtype, PeriodDtype):
# See discussion in GH#24757
raise TypeError(
f"mean is not implemented for {type(self).__name__} since the "
"meaning is ambiguous. An alternative is "
"obj.to_timestamp(how='start').mean()"
)
result = nanops.nanmean(
self._ndarray, axis=axis, skipna=skipna, mask=self.isna()
)
return self._wrap_reduction_result(axis, result)
|
Return the mean value of the Array.
Parameters
----------
skipna : bool, default True
Whether to ignore any NaT elements.
axis : int, optional, default 0
Axis for the function to be applied on.
Returns
-------
scalar
Timestamp or Timedelta.
See Also
--------
numpy.ndarray.mean : Returns the average of array elements along a given axis.
Series.mean : Return the mean value in a Series.
Notes
-----
mean is only defined for Datetime and Timedelta dtypes, not for Period.
Examples
--------
For :class:`pandas.DatetimeIndex`:
>>> idx = pd.date_range("2001-01-01 00:00", periods=3)
>>> idx
DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
dtype='datetime64[us]', freq='D')
>>> idx.mean()
Timestamp('2001-01-02 00:00:00')
For :class:`pandas.TimedeltaIndex`:
>>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit="D")
>>> tdelta_idx
TimedeltaIndex(['1 days', '2 days', '3 days'],
dtype='timedelta64[ns]', freq=None)
>>> tdelta_idx.mean()
Timedelta('2 days 00:00:00')
|
python
|
pandas/core/arrays/datetimelike.py
| 1,618
|
[
"self",
"skipna",
"axis"
] | true
| 2
| 8.16
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
secondary_training_status_message
|
def secondary_training_status_message(
job_description: dict[str, list[Any]], prev_description: dict | None
) -> str:
"""
Format string containing start time and the secondary training job status message.
:param job_description: Returned response from DescribeTrainingJob call
:param prev_description: Previous job description from DescribeTrainingJob call
:return: Job status string to be printed.
"""
current_transitions = job_description.get("SecondaryStatusTransitions")
if not current_transitions:
return ""
prev_transitions_num = 0
if prev_description is not None:
if prev_description.get("SecondaryStatusTransitions") is not None:
prev_transitions_num = len(prev_description["SecondaryStatusTransitions"])
transitions_to_print = (
current_transitions[-1:]
if len(current_transitions) == prev_transitions_num
else current_transitions[prev_transitions_num - len(current_transitions) :]
)
status_strs = []
for transition in transitions_to_print:
message = transition["StatusMessage"]
time_utc = timezone.convert_to_utc(cast("datetime", job_description["LastModifiedTime"]))
status_strs.append(f"{time_utc:%Y-%m-%d %H:%M:%S} {transition['Status']} - {message}")
return "\n".join(status_strs)
|
Format string containing start time and the secondary training job status message.
:param job_description: Returned response from DescribeTrainingJob call
:param prev_description: Previous job description from DescribeTrainingJob call
:return: Job status string to be printed.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
| 109
|
[
"job_description",
"prev_description"
] |
str
| true
| 6
| 7.28
|
apache/airflow
| 43,597
|
sphinx
| false
|
nextWord
|
function nextWord(word: string, start: number): number {
for (let i = start; i < word.length; i++) {
if (isWordSeparator(word.charCodeAt(i)) ||
(i > 0 && isWordSeparator(word.charCodeAt(i - 1)))) {
return i;
}
}
return word.length;
}
|
Gets alternative codes to the character code passed in. This comes in the
form of an array of character codes, all of which must match _in order_ to
successfully match.
@param code The character code to check.
|
typescript
|
src/vs/base/common/filters.ts
| 412
|
[
"word",
"start"
] | true
| 5
| 7.04
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
reverse
|
private static CharSequence reverse(CharSequence s) {
return new StringBuilder(s).reverse();
}
|
Parses a trie node and returns the number of characters consumed.
@param stack The prefixes that precede the characters represented by this node. Each entry of
the stack is in reverse order.
@param encoded The serialized trie.
@param start An index in the encoded serialized trie to begin reading characters from.
@param builder A map builder to which all entries will be added.
@return The number of characters consumed from {@code encoded}.
|
java
|
android/guava/src/com/google/thirdparty/publicsuffix/TrieParser.java
| 116
|
[
"s"
] |
CharSequence
| true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
pollResponseReceivedDuringReauthentication
|
default Optional<NetworkReceive> pollResponseReceivedDuringReauthentication() {
return Optional.empty();
}
|
Return the next (always non-null but possibly empty) client-side
{@link NetworkReceive} response that arrived during re-authentication that
is unrelated to re-authentication, if any. These correspond to requests sent
prior to the beginning of re-authentication; the requests were made when the
channel was successfully authenticated, and the responses arrived during the
re-authentication process. The response returned is removed from the authenticator's
queue. Responses of requests sent after completion of re-authentication are
processed only when the authenticator response queue is empty.
@return the (always non-null but possibly empty) client-side
{@link NetworkReceive} response that arrived during
re-authentication that is unrelated to re-authentication, if any
|
java
|
clients/src/main/java/org/apache/kafka/common/network/Authenticator.java
| 152
|
[] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getGeoIpTaskState
|
@Nullable
static GeoIpTaskState getGeoIpTaskState(ProjectMetadata projectMetadata, String taskId) {
PersistentTasksCustomMetadata.PersistentTask<?> task = getTaskWithId(projectMetadata, taskId);
return (task == null) ? null : (GeoIpTaskState) task.getState();
}
|
Retrieves the geoip downloader's task state from the project metadata. This may return null in some circumstances,
for example if the geoip downloader task hasn't been created yet (which it wouldn't be if it's disabled).
@param projectMetadata the project metatdata to read the task state from.
@param taskId the task ID of the geoip downloader task to read the state for.
@return the geoip downloader's task state or null if there is not a state to read
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java
| 251
|
[
"projectMetadata",
"taskId"
] |
GeoIpTaskState
| true
| 2
| 8.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
pickBy
|
function pickBy(object, predicate) {
if (object == null) {
return {};
}
var props = arrayMap(getAllKeysIn(object), function(prop) {
return [prop];
});
predicate = getIteratee(predicate);
return basePickBy(object, props, function(value, path) {
return predicate(value, path[0]);
});
}
|
Creates an object composed of the `object` properties `predicate` returns
truthy for. The predicate is invoked with two arguments: (value, key).
@static
@memberOf _
@since 4.0.0
@category Object
@param {Object} object The source object.
@param {Function} [predicate=_.identity] The function invoked per property.
@returns {Object} Returns the new object.
@example
var object = { 'a': 1, 'b': '2', 'c': 3 };
_.pickBy(object, _.isNumber);
// => { 'a': 1, 'c': 3 }
|
javascript
|
lodash.js
| 13,688
|
[
"object",
"predicate"
] | false
| 2
| 7.52
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
_check_label_or_level_ambiguity
|
def _check_label_or_level_ambiguity(self, key: Level, axis: Axis = 0) -> None:
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key : Hashable
Label or level name.
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns).
Raises
------
ValueError: `key` is ambiguous
"""
axis_int = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis_int].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis_int == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis_int == 0 else ("an", "index")
)
msg = (
f"'{key}' is both {level_article} {level_type} level and "
f"{label_article} {label_type} label, which is ambiguous."
)
raise ValueError(msg)
|
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key : Hashable
Label or level name.
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns).
Raises
------
ValueError: `key` is ambiguous
|
python
|
pandas/core/generic.py
| 1,691
|
[
"self",
"key",
"axis"
] |
None
| true
| 7
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
stack
|
def stack(
frame: DataFrame, level=-1, dropna: bool = True, sort: bool = True
) -> Series | DataFrame:
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series or DataFrame
"""
def stack_factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(
frame, level_num=level_num, dropna=dropna, sort=sort
)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_codes = [lab.repeat(K) for lab in frame.index.codes]
clev, clab = stack_factorize(frame.columns)
new_levels.append(clev)
new_codes.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(
levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False
)
else:
levels, (ilab, clab) = zip(*map(stack_factorize, (frame.index, frame.columns)))
codes = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(
levels=levels,
codes=codes,
names=[frame.index.name, frame.columns.name],
verify_integrity=False,
)
new_values: ArrayLike
if not frame.empty and frame._is_homogeneous_type:
# For homogeneous EAs, frame._values will coerce to object. So
# we concatenate instead.
dtypes = list(frame.dtypes._values)
dtype = dtypes[0]
if isinstance(dtype, ExtensionDtype):
arr = dtype.construct_array_type()
new_values = arr._concat_same_type(
[col._values for _, col in frame.items()]
)
new_values = _reorder_for_extension_array_stack(new_values, N, K)
else:
# homogeneous, non-EA
new_values = frame._values.ravel()
else:
# non-homogeneous
new_values = frame._values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return frame._constructor_sliced(new_values, index=new_index)
|
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series or DataFrame
|
python
|
pandas/core/reshape/reshape.py
| 658
|
[
"frame",
"level",
"dropna",
"sort"
] |
Series | DataFrame
| true
| 11
| 6.32
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
poly2leg
|
def poly2leg(pol):
"""
Convert a polynomial to a Legendre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Legendre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Legendre
series.
See Also
--------
leg2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> import numpy as np
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], ...
>>> c = P.Legendre(P.legendre.poly2leg(p.coef))
>>> c
Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = legadd(legmulx(res), pol[i])
return res
|
Convert a polynomial to a Legendre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Legendre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Legendre
series.
See Also
--------
leg2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> import numpy as np
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], ...
>>> c = P.Legendre(P.legendre.poly2leg(p.coef))
>>> c
Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary
|
python
|
numpy/polynomial/legendre.py
| 98
|
[
"pol"
] | false
| 2
| 7.36
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
acquire
|
@CanIgnoreReturnValue
public double acquire() {
return acquire(1);
}
|
Acquires a single permit from this {@code RateLimiter}, blocking until the request can be
granted. Tells the amount of time slept, if any.
<p>This method is equivalent to {@code acquire(1)}.
@return time spent sleeping to enforce rate, in seconds; 0.0 if not rate-limited
@since 16.0 (present in 13.0 with {@code void} return type})
|
java
|
android/guava/src/com/google/common/util/concurrent/RateLimiter.java
| 289
|
[] | true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
|
isTrue
|
public static void isTrue(final boolean expression, final String message, final Object... values) {
if (!expression) {
throw new IllegalArgumentException(getMessage(message, values));
}
}
|
Validate that the argument condition is {@code true}; otherwise
throwing an exception with the specified message. This method is useful when
validating according to an arbitrary boolean expression, such as validating a
primitive number or using your own custom validation expression.
<pre>{@code
Validate.isTrue(i >= min && i <= max, "The value must be between %d and %d", min, max);}</pre>
@param expression the boolean expression to check.
@param message the {@link String#format(String, Object...)} exception message if invalid, not null.
@param values the optional values for the formatted exception message, null array not recommended.
@throws IllegalArgumentException if expression is {@code false}.
@see #isTrue(boolean)
@see #isTrue(boolean, String, long)
@see #isTrue(boolean, String, double)
@see #isTrue(boolean, Supplier)
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 572
|
[
"expression",
"message"
] |
void
| true
| 2
| 6.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
main
|
public static void main(String[] args) throws Exception {
int requiredArgs = 6;
Assert.state(args.length >= requiredArgs, () -> "Usage: " + SpringApplicationAotProcessor.class.getName()
+ " <applicationMainClass> <sourceOutput> <resourceOutput> <classOutput> <groupId> <artifactId> <originalArgs...>");
Class<?> application = Class.forName(args[0]);
Settings settings = Settings.builder()
.sourceOutput(Paths.get(args[1]))
.resourceOutput(Paths.get(args[2]))
.classOutput(Paths.get(args[3]))
.groupId((StringUtils.hasText(args[4])) ? args[4] : "unspecified")
.artifactId(args[5])
.build();
String[] applicationArgs = (args.length > requiredArgs) ? Arrays.copyOfRange(args, requiredArgs, args.length)
: new String[0];
new SpringApplicationAotProcessor(application, settings, applicationArgs).process();
}
|
Create a new processor for the specified application and settings.
@param application the application main class
@param settings the general AOT processor settings
@param applicationArgs the arguments to provide to the main method
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplicationAotProcessor.java
| 82
|
[
"args"
] |
void
| true
| 3
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
translate
|
@SuppressWarnings("resource") // Caller closes writer
public final void translate(final CharSequence input, final Writer writer) throws IOException {
Objects.requireNonNull(writer, "writer");
if (input == null) {
return;
}
int pos = 0;
final int len = input.length();
while (pos < len) {
final int consumed = translate(input, pos, writer);
if (consumed == 0) {
// inlined implementation of Character.toChars(Character.codePointAt(input, pos))
// avoids allocating temp char arrays and duplicate checks
final char c1 = input.charAt(pos);
writer.write(c1);
pos++;
if (Character.isHighSurrogate(c1) && pos < len) {
final char c2 = input.charAt(pos);
if (Character.isLowSurrogate(c2)) {
writer.write(c2);
pos++;
}
}
continue;
}
// contract with translators is that they have to understand code points
// and they just took care of a surrogate pair
for (int pt = 0; pt < consumed; pt++) {
pos += Character.charCount(Character.codePointAt(input, pos));
}
}
}
|
Translate an input onto a Writer. This is intentionally final as its algorithm is
tightly coupled with the abstract method of this class.
@param input CharSequence that is being translated.
@param writer Writer to translate the text to.
@throws IOException if and only if the Writer produces an IOException.
|
java
|
src/main/java/org/apache/commons/lang3/text/translate/CharSequenceTranslator.java
| 103
|
[
"input",
"writer"
] |
void
| true
| 8
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
printBanner
|
private void printBanner() {
String version = getClass().getPackage().getImplementationVersion();
version = (version != null) ? " (v" + version + ")" : "";
System.out.println(ansi("Spring Boot", Code.BOLD).append(version, Code.FAINT));
System.out.println(ansi("Hit TAB to complete. Type 'help' and hit RETURN for help, and 'exit' to quit."));
}
|
Run the shell until the user exists.
@throws Exception on error
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/shell/Shell.java
| 142
|
[] |
void
| true
| 2
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
slice
|
Records slice(int position, int size);
|
Return a slice of records from this instance, which is a view into this set starting from the given position
and with the given size limit.
If the size is beyond the end of the records, the end will be based on the size of the records at the time of the read.
If this records set is already sliced, the position will be taken relative to that slicing.
@param position The start position to begin the read from. The position should be aligned to
the batch boundary, else the returned records can't be iterated.
@param size The number of bytes after the start position to include
@return A sliced wrapper on this message set limited based on the given position and size
|
java
|
clients/src/main/java/org/apache/kafka/common/record/Records.java
| 107
|
[
"position",
"size"
] |
Records
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
equals
|
private static boolean equals(final WildcardType wildcardType, final Type type) {
if (type instanceof WildcardType) {
final WildcardType other = (WildcardType) type;
return equals(getImplicitLowerBounds(wildcardType), getImplicitLowerBounds(other))
&& equals(getImplicitUpperBounds(wildcardType), getImplicitUpperBounds(other));
}
return false;
}
|
Tests whether {@code wildcardType} equals {@code type}.
@param wildcardType LHS.
@param type RHS.
@return Whether {@code wildcardType} equals {@code type}.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 528
|
[
"wildcardType",
"type"
] | true
| 3
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
to_period
|
def to_period(
self,
freq: Frequency | None = None,
axis: Axis = 0,
copy: bool | lib.NoDefault = lib.no_default,
) -> DataFrame:
"""
Convert DataFrame from DatetimeIndex to PeriodIndex.
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed). Either index of columns can be
converted, depending on `axis` argument.
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default False
This keyword is now ignored; changing its value will have no
impact on the method.
.. deprecated:: 3.0.0
This keyword is ignored and will be removed in pandas 4.0. Since
pandas 3.0, this method always returns a new object using a lazy
copy mechanism that defers copies until necessary
(Copy-on-Write). See the `user guide on Copy-on-Write
<https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
for more details.
Returns
-------
DataFrame
The DataFrame with the converted PeriodIndex.
See Also
--------
Series.to_period: Equivalent method for Series.
Series.dt.to_period: Convert DateTime column values.
Examples
--------
>>> idx = pd.to_datetime(
... [
... "2001-03-31 00:00:00",
... "2002-05-31 00:00:00",
... "2003-08-31 00:00:00",
... ]
... )
>>> idx
DatetimeIndex(['2001-03-31', '2002-05-31', '2003-08-31'],
dtype='datetime64[s]', freq=None)
>>> idx.to_period("M")
PeriodIndex(['2001-03', '2002-05', '2003-08'], dtype='period[M]')
For the yearly frequency
>>> idx.to_period("Y")
PeriodIndex(['2001', '2002', '2003'], dtype='period[Y-DEC]')
"""
self._check_copy_deprecation(copy)
new_obj = self.copy(deep=False)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, DatetimeIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_period(freq=freq)
setattr(new_obj, axis_name, new_ax)
return new_obj
|
Convert DataFrame from DatetimeIndex to PeriodIndex.
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed). Either index of columns can be
converted, depending on `axis` argument.
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default False
This keyword is now ignored; changing its value will have no
impact on the method.
.. deprecated:: 3.0.0
This keyword is ignored and will be removed in pandas 4.0. Since
pandas 3.0, this method always returns a new object using a lazy
copy mechanism that defers copies until necessary
(Copy-on-Write). See the `user guide on Copy-on-Write
<https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
for more details.
Returns
-------
DataFrame
The DataFrame with the converted PeriodIndex.
See Also
--------
Series.to_period: Equivalent method for Series.
Series.dt.to_period: Convert DateTime column values.
Examples
--------
>>> idx = pd.to_datetime(
... [
... "2001-03-31 00:00:00",
... "2002-05-31 00:00:00",
... "2003-08-31 00:00:00",
... ]
... )
>>> idx
DatetimeIndex(['2001-03-31', '2002-05-31', '2003-08-31'],
dtype='datetime64[s]', freq=None)
>>> idx.to_period("M")
PeriodIndex(['2001-03', '2002-05', '2003-08'], dtype='period[M]')
For the yearly frequency
>>> idx.to_period("Y")
PeriodIndex(['2001', '2002', '2003'], dtype='period[Y-DEC]')
|
python
|
pandas/core/frame.py
| 14,705
|
[
"self",
"freq",
"axis",
"copy"
] |
DataFrame
| true
| 2
| 7.76
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
span
|
@Override
public Range<K> span() {
Entry<Cut<K>, RangeMapEntry<K, V>> firstEntry = entriesByLowerBound.firstEntry();
Entry<Cut<K>, RangeMapEntry<K, V>> lastEntry = entriesByLowerBound.lastEntry();
// Either both are null or neither is, but we check both to satisfy the nullness checker.
if (firstEntry == null || lastEntry == null) {
throw new NoSuchElementException();
}
return Range.create(
firstEntry.getValue().getKey().lowerBound, lastEntry.getValue().getKey().upperBound);
}
|
Returns the range that spans the given range and entry, if the entry can be coalesced.
|
java
|
android/guava/src/com/google/common/collect/TreeRangeMap.java
| 194
|
[] | true
| 3
| 7.2
|
google/guava
| 51,352
|
javadoc
| false
|
|
_infer_tz_from_endpoints
|
def _infer_tz_from_endpoints(
start: Timestamp, end: Timestamp, tz: tzinfo | None
) -> tzinfo | None:
"""
If a timezone is not explicitly given via `tz`, see if one can
be inferred from the `start` and `end` endpoints. If more than one
of these inputs provides a timezone, require that they all agree.
Parameters
----------
start : Timestamp
end : Timestamp
tz : tzinfo or None
Returns
-------
tz : tzinfo or None
Raises
------
TypeError : if start and end timezones do not agree
"""
try:
inferred_tz = timezones.infer_tzinfo(start, end)
except AssertionError as err:
# infer_tzinfo raises AssertionError if passed mismatched timezones
raise TypeError(
"Start and end cannot both be tz-aware with different timezones"
) from err
inferred_tz = timezones.maybe_get_tz(inferred_tz)
tz = timezones.maybe_get_tz(tz)
if tz is not None and inferred_tz is not None:
if not timezones.tz_compare(inferred_tz, tz):
raise AssertionError("Inferred time zone not equal to passed time zone")
elif inferred_tz is not None:
tz = inferred_tz
return tz
|
If a timezone is not explicitly given via `tz`, see if one can
be inferred from the `start` and `end` endpoints. If more than one
of these inputs provides a timezone, require that they all agree.
Parameters
----------
start : Timestamp
end : Timestamp
tz : tzinfo or None
Returns
-------
tz : tzinfo or None
Raises
------
TypeError : if start and end timezones do not agree
|
python
|
pandas/core/arrays/datetimes.py
| 2,847
|
[
"start",
"end",
"tz"
] |
tzinfo | None
| true
| 5
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
isUnbindableBean
|
private boolean isUnbindableBean(ConfigurationPropertyName name, Bindable<?> target, Context context) {
for (ConfigurationPropertySource source : context.getSources()) {
if (source.containsDescendantOf(name) == ConfigurationPropertyState.PRESENT) {
// We know there are properties to bind so we can't bypass anything
return false;
}
}
Class<?> resolved = target.getType().resolve(Object.class);
if (resolved.isPrimitive() || NON_BEAN_CLASSES.contains(resolved)) {
return true;
}
return resolved.getName().startsWith("java.");
}
|
Bind the specified target {@link Bindable} using this binder's
{@link ConfigurationPropertySource property sources} or create a new instance using
the type of the {@link Bindable} if the result of the binding is {@code null}.
@param name the configuration property name to bind
@param target the target bindable
@param handler the bind handler (may be {@code null})
@param <T> the bound or created type
@return the bound or created object
@since 2.2.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
| 525
|
[
"name",
"target",
"context"
] | true
| 4
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
writeFileSync
|
function writeFileSync(path, data, options) {
options = getOptions(options, {
encoding: 'utf8',
mode: 0o666,
flag: 'w',
flush: false,
});
const flush = options.flush ?? false;
validateBoolean(flush, 'options.flush');
const flag = options.flag || 'w';
// C++ fast path for string data and UTF8 encoding
if (typeof data === 'string' && (options.encoding === 'utf8' || options.encoding === 'utf-8')) {
if (!isInt32(path)) {
path = getValidatedPath(path);
}
return binding.writeFileUtf8(
path,
data,
stringToFlags(flag),
parseFileMode(options.mode, 'mode', 0o666),
);
}
if (!isArrayBufferView(data)) {
validateStringAfterArrayBufferView(data, 'data');
data = Buffer.from(data, options.encoding || 'utf8');
}
const isUserFd = isFd(path); // File descriptor ownership
const fd = isUserFd ? path : fs.openSync(path, flag, options.mode);
let offset = 0;
let length = data.byteLength;
try {
while (length > 0) {
const written = fs.writeSync(fd, data, offset, length);
offset += written;
length -= written;
}
if (flush) {
fs.fsyncSync(fd);
}
} finally {
if (!isUserFd) fs.closeSync(fd);
}
}
|
Synchronously writes data to the file.
@param {string | Buffer | URL | number} path
@param {string | Buffer | TypedArray | DataView} data
@param {{
encoding?: string | null;
mode?: number;
flag?: string;
flush?: boolean;
} | string} [options]
@returns {void}
|
javascript
|
lib/fs.js
| 2,372
|
[
"path",
"data",
"options"
] | false
| 12
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
findCustomEditor
|
@Override
public @Nullable PropertyEditor findCustomEditor(@Nullable Class<?> requiredType, @Nullable String propertyPath) {
Class<?> requiredTypeToUse = requiredType;
if (propertyPath != null) {
if (this.customEditorsForPath != null) {
// Check property-specific editor first.
PropertyEditor editor = getCustomEditor(propertyPath, requiredType);
if (editor == null) {
List<String> strippedPaths = new ArrayList<>();
addStrippedPropertyPaths(strippedPaths, "", propertyPath);
for (Iterator<String> it = strippedPaths.iterator(); it.hasNext() && editor == null;) {
String strippedPath = it.next();
editor = getCustomEditor(strippedPath, requiredType);
}
}
if (editor != null) {
return editor;
}
}
if (requiredType == null) {
requiredTypeToUse = getPropertyType(propertyPath);
}
}
// No property-specific editor -> check type-specific editor.
return getCustomEditor(requiredTypeToUse);
}
|
Copy the default editors registered in this instance to the given target registry.
@param target the target registry to copy to
|
java
|
spring-beans/src/main/java/org/springframework/beans/PropertyEditorRegistrySupport.java
| 324
|
[
"requiredType",
"propertyPath"
] |
PropertyEditor
| true
| 8
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
uuidToByteArray
|
public static byte[] uuidToByteArray(final UUID src, final byte[] dst, final int dstPos, final int nBytes) {
if (0 == nBytes) {
return dst;
}
if (nBytes > 16) {
throw new IllegalArgumentException("nBytes > 16");
}
longToByteArray(src.getMostSignificantBits(), 0, dst, dstPos, Math.min(nBytes, 8));
if (nBytes >= 8) {
longToByteArray(src.getLeastSignificantBits(), 0, dst, dstPos + 8, nBytes - 8);
}
return dst;
}
|
Converts UUID into an array of byte using the default (little-endian, LSB0) byte and bit ordering.
@param src the UUID to convert.
@param dst the destination array.
@param dstPos the position in {@code dst} where to copy the result.
@param nBytes the number of bytes to copy to {@code dst}, must be smaller or equal to the width of the input (from srcPos to MSB).
@return {@code dst}.
@throws NullPointerException if {@code dst} is {@code null}.
@throws IllegalArgumentException if {@code nBytes > 16}.
@throws ArrayIndexOutOfBoundsException if {@code dstPos + nBytes > dst.length}.
|
java
|
src/main/java/org/apache/commons/lang3/Conversion.java
| 1,374
|
[
"src",
"dst",
"dstPos",
"nBytes"
] | true
| 4
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
writeTokenText
|
function writeTokenText(token: SyntaxKind, writer: (s: string) => void, pos?: number): number {
const tokenString = tokenToString(token)!;
writer(tokenString);
return pos! < 0 ? pos! : pos! + tokenString.length;
}
|
Emits a list without brackets or raising events.
NOTE: You probably don't want to call this directly and should be using `emitList` or `emitExpressionList` instead.
|
typescript
|
src/compiler/emitter.ts
| 4,934
|
[
"token",
"writer",
"pos?"
] | true
| 2
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
createJarFileForLocalFile
|
private JarFile createJarFileForLocalFile(URL url, Runtime.Version version, Consumer<JarFile> closeAction)
throws IOException {
String path = UrlDecoder.decode(url.getPath());
return new UrlJarFile(new File(path), version, closeAction);
}
|
Create a new {@link UrlJarFile} or {@link UrlNestedJarFile} instance.
@param jarFileUrl the jar file URL
@param closeAction the action to call when the file is closed
@return a new {@link JarFile} instance
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/UrlJarFileFactory.java
| 77
|
[
"url",
"version",
"closeAction"
] |
JarFile
| true
| 1
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
stream
|
public Stream<T> stream() {
return stream;
}
|
Converts the FailableStream into an equivalent stream.
@return A stream, which will return the same elements, which this FailableStream would return.
|
java
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
| 476
|
[] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
mid
|
public static String mid(final String str, int pos, final int len) {
if (str == null) {
return null;
}
if (len < 0 || pos > str.length()) {
return EMPTY;
}
if (pos < 0) {
pos = 0;
}
if (str.length() <= pos + len) {
return str.substring(pos);
}
return str.substring(pos, pos + len);
}
|
Gets {@code len} characters from the middle of a String.
<p>
If {@code len} characters are not available, the remainder of the String will be returned without an exception. If the String is {@code null},
{@code null} will be returned. An empty String is returned if len is negative or exceeds the length of {@code str}.
</p>
<pre>
StringUtils.mid(null, *, *) = null
StringUtils.mid(*, *, -ve) = ""
StringUtils.mid("", 0, *) = ""
StringUtils.mid("abc", 0, 2) = "ab"
StringUtils.mid("abc", 0, 4) = "abc"
StringUtils.mid("abc", 2, 4) = "c"
StringUtils.mid("abc", 4, 2) = ""
StringUtils.mid("abc", -2, 2) = "ab"
</pre>
@param str the String to get the characters from, may be null.
@param pos the position to start from, negative treated as zero.
@param len the length of the required String.
@return the middle characters, {@code null} if null String input.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 5,330
|
[
"str",
"pos",
"len"
] |
String
| true
| 6
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
copyOrNull
|
private static String @Nullable [] copyOrNull(String @Nullable [] state) {
if (state == null) {
return null;
}
return copy(state);
}
|
Copy the contents of this message to the given target message.
@param target the {@code MailMessage} to copy to
|
java
|
spring-context-support/src/main/java/org/springframework/mail/SimpleMailMessage.java
| 243
|
[
"state"
] | true
| 2
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
compareTo
|
@Override
public int compareTo(final MutableFloat other) {
return Float.compare(this.value, other.value);
}
|
Compares this mutable to another in ascending order.
@param other the other mutable to compare to, not null.
@return negative if this is less, zero if equal, positive if greater.
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableFloat.java
| 138
|
[
"other"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
maybeCompleteLeaveInProgress
|
private boolean maybeCompleteLeaveInProgress() {
if (leaveGroupInProgress.isPresent()) {
leaveGroupInProgress.get().complete(null);
leaveGroupInProgress = Optional.empty();
return true;
}
return false;
}
|
Complete the leave in progress (if any). This is expected to be used to complete the leave
in progress when a member receives the response to the leave heartbeat.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
| 851
|
[] | true
| 2
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
davies_bouldin_score
|
def davies_bouldin_score(X, labels):
"""Compute the Davies-Bouldin score.
The score is defined as the average similarity measure of each cluster with
its most similar cluster, where similarity is the ratio of within-cluster
distances to between-cluster distances. Thus, clusters which are farther
apart and less dispersed will result in a better score.
The minimum score is zero, with lower values indicating better clustering.
Read more in the :ref:`User Guide <davies-bouldin_index>`.
.. versionadded:: 0.20
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Davies-Bouldin score.
References
----------
.. [1] Davies, David L.; Bouldin, Donald W. (1979).
`"A Cluster Separation Measure"
<https://ieeexplore.ieee.org/document/4766909>`__.
IEEE Transactions on Pattern Analysis and Machine Intelligence.
PAMI-1 (2): 224-227
Examples
--------
>>> from sklearn.metrics import davies_bouldin_score
>>> X = [[0, 1], [1, 1], [3, 4]]
>>> labels = [0, 0, 1]
>>> davies_bouldin_score(X, labels)
0.12...
"""
xp, _, device_ = get_namespace_and_device(X, labels)
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = le.classes_.shape[0]
check_number_of_labels(n_labels, n_samples)
dtype = _max_precision_float_dtype(xp, device_)
intra_dists = xp.zeros(n_labels, dtype=dtype, device=device_)
centroids = xp.zeros((n_labels, X.shape[1]), dtype=dtype, device=device_)
for k in range(n_labels):
cluster_k = _safe_indexing(X, xp.nonzero(labels == k)[0])
centroid = _average(cluster_k, axis=0, xp=xp)
centroids[k, ...] = centroid
intra_dists[k] = _average(
pairwise_distances(cluster_k, xp.stack([centroid])), xp=xp
)
centroid_distances = pairwise_distances(centroids)
zero = xp.asarray(0.0, device=device_, dtype=dtype)
if xp.all(xpx.isclose(intra_dists, zero)) or xp.all(
xpx.isclose(centroid_distances, zero)
):
return 0.0
centroid_distances[centroid_distances == 0] = xp.inf
combined_intra_dists = intra_dists[:, None] + intra_dists
scores = xp.max(combined_intra_dists / centroid_distances, axis=1)
return float(_average(scores, xp=xp))
|
Compute the Davies-Bouldin score.
The score is defined as the average similarity measure of each cluster with
its most similar cluster, where similarity is the ratio of within-cluster
distances to between-cluster distances. Thus, clusters which are farther
apart and less dispersed will result in a better score.
The minimum score is zero, with lower values indicating better clustering.
Read more in the :ref:`User Guide <davies-bouldin_index>`.
.. versionadded:: 0.20
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Davies-Bouldin score.
References
----------
.. [1] Davies, David L.; Bouldin, Donald W. (1979).
`"A Cluster Separation Measure"
<https://ieeexplore.ieee.org/document/4766909>`__.
IEEE Transactions on Pattern Analysis and Machine Intelligence.
PAMI-1 (2): 224-227
Examples
--------
>>> from sklearn.metrics import davies_bouldin_score
>>> X = [[0, 1], [1, 1], [3, 4]]
>>> labels = [0, 0, 1]
>>> davies_bouldin_score(X, labels)
0.12...
|
python
|
sklearn/metrics/cluster/_unsupervised.py
| 413
|
[
"X",
"labels"
] | false
| 4
| 7.12
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
mode
|
def mode(self, dropna: bool = True) -> Series:
"""
Return the mode(s) of the Series.
The mode is the value that appears most often. There can be multiple modes.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
Returns
-------
Series
Modes of the Series in sorted order.
See Also
--------
numpy.mode : Equivalent numpy function for computing median.
Series.sum : Sum of the values.
Series.median : Median of the values.
Series.std : Standard deviation of the values.
Series.var : Variance of the values.
Series.min : Minimum value.
Series.max : Maximum value.
Examples
--------
>>> s = pd.Series([2, 4, 2, 2, 4, None])
>>> s.mode()
0 2.0
dtype: float64
More than one mode:
>>> s = pd.Series([2, 4, 8, 2, 4, None])
>>> s.mode()
0 2.0
1 4.0
dtype: float64
With and without considering null value:
>>> s = pd.Series([2, 4, None, None, 4, None])
>>> s.mode(dropna=False)
0 NaN
dtype: float64
>>> s = pd.Series([2, 4, None, None, 4, None])
>>> s.mode()
0 4.0
dtype: float64
"""
# TODO: Add option for bins like value_counts()
values = self._values
if isinstance(values, np.ndarray):
res_values, _ = algorithms.mode(values, dropna=dropna)
else:
res_values = values._mode(dropna=dropna)
# Ensure index is type stable (should always use int index)
return self._constructor(
res_values,
index=range(len(res_values)),
name=self.name,
copy=False,
dtype=self.dtype,
).__finalize__(self, method="mode")
|
Return the mode(s) of the Series.
The mode is the value that appears most often. There can be multiple modes.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
Returns
-------
Series
Modes of the Series in sorted order.
See Also
--------
numpy.mode : Equivalent numpy function for computing median.
Series.sum : Sum of the values.
Series.median : Median of the values.
Series.std : Standard deviation of the values.
Series.var : Variance of the values.
Series.min : Minimum value.
Series.max : Maximum value.
Examples
--------
>>> s = pd.Series([2, 4, 2, 2, 4, None])
>>> s.mode()
0 2.0
dtype: float64
More than one mode:
>>> s = pd.Series([2, 4, 8, 2, 4, None])
>>> s.mode()
0 2.0
1 4.0
dtype: float64
With and without considering null value:
>>> s = pd.Series([2, 4, None, None, 4, None])
>>> s.mode(dropna=False)
0 NaN
dtype: float64
>>> s = pd.Series([2, 4, None, None, 4, None])
>>> s.mode()
0 4.0
dtype: float64
|
python
|
pandas/core/series.py
| 2,085
|
[
"self",
"dropna"
] |
Series
| true
| 3
| 8.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
instantiate
|
private Object instantiate(String beanName, RootBeanDefinition mbd,
@Nullable Object factoryBean, Method factoryMethod, @Nullable Object[] args) {
try {
return this.beanFactory.getInstantiationStrategy().instantiate(
mbd, beanName, this.beanFactory, factoryBean, factoryMethod, args);
}
catch (Throwable ex) {
throw new BeanCreationException(mbd.getResourceDescription(), beanName, ex.getMessage(), ex);
}
}
|
Instantiate the bean using a named factory method. The method may be static, if the
bean definition parameter specifies a class, rather than a "factory-bean", or
an instance variable on a factory object itself configured using Dependency Injection.
<p>Implementation requires iterating over the static or instance methods with the
name specified in the RootBeanDefinition (the method may be overloaded) and trying
to match with the parameters. We don't have the types attached to constructor args,
so trial and error is the only way to go here. The explicitArgs array may contain
argument values passed in programmatically via the corresponding getBean method.
@param beanName the name of the bean
@param mbd the merged bean definition for the bean
@param explicitArgs argument values passed in programmatically via the getBean
method, or {@code null} if none (-> use constructor argument values from bean definition)
@return a BeanWrapper for the new instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/ConstructorResolver.java
| 649
|
[
"beanName",
"mbd",
"factoryBean",
"factoryMethod",
"args"
] |
Object
| true
| 2
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
is_python_scalar
|
def is_python_scalar(x: object) -> TypeIs[complex]: # numpydoc ignore=PR01,RT01
"""Return True if `x` is a Python scalar, False otherwise."""
# isinstance(x, float) returns True for np.float64
# isinstance(x, complex) returns True for np.complex128
# bool is a subclass of int
return isinstance(x, int | float | complex) and not is_numpy_array(x)
|
Return True if `x` is a Python scalar, False otherwise.
|
python
|
sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
| 148
|
[
"x"
] |
TypeIs[complex]
| true
| 2
| 6
|
scikit-learn/scikit-learn
| 64,340
|
unknown
| false
|
describe_cluster_snapshots
|
def describe_cluster_snapshots(self, cluster_identifier: str) -> list[str] | None:
"""
List snapshots for a cluster.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.describe_cluster_snapshots`
:param cluster_identifier: unique identifier of a cluster
"""
response = self.conn.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier)
if "Snapshots" not in response:
return None
snapshots = response["Snapshots"]
snapshots = [snapshot for snapshot in snapshots if snapshot["Status"]]
snapshots.sort(key=lambda x: x["SnapshotCreateTime"], reverse=True)
return snapshots
|
List snapshots for a cluster.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.describe_cluster_snapshots`
:param cluster_identifier: unique identifier of a cluster
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/redshift_cluster.py
| 124
|
[
"self",
"cluster_identifier"
] |
list[str] | None
| true
| 2
| 6.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
equals
|
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SharePartitionOffsetInfo that = (SharePartitionOffsetInfo) o;
return startOffset == that.startOffset &&
Objects.equals(leaderEpoch, that.leaderEpoch) &&
Objects.equals(lag, that.lag);
}
|
Get the lag for the partition.
@return The lag of the partition.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/SharePartitionOffsetInfo.java
| 75
|
[
"o"
] | true
| 6
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getTypeOnlyPromotionFix
|
function getTypeOnlyPromotionFix(sourceFile: SourceFile, symbolToken: Identifier, symbolName: string, program: Program): FixPromoteTypeOnlyImport | undefined {
const checker = program.getTypeChecker();
const symbol = checker.resolveName(symbolName, symbolToken, SymbolFlags.Value, /*excludeGlobals*/ true);
if (!symbol) return undefined;
const typeOnlyAliasDeclaration = checker.getTypeOnlyAliasDeclaration(symbol);
if (!typeOnlyAliasDeclaration || getSourceFileOfNode(typeOnlyAliasDeclaration) !== sourceFile) return undefined;
return { kind: ImportFixKind.PromoteTypeOnly, typeOnlyAliasDeclaration };
}
|
@param forceImportKeyword Indicates that the user has already typed `import`, so the result must start with `import`.
(In other words, do not allow `const x = require("...")` for JS files.)
@internal
|
typescript
|
src/services/codefixes/importFixes.ts
| 1,588
|
[
"sourceFile",
"symbolToken",
"symbolName",
"program"
] | true
| 4
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
insertInSequenceOrder
|
private void insertInSequenceOrder(Deque<ProducerBatch> deque, ProducerBatch batch) {
// When we are re-enqueueing and have enabled idempotence, the re-enqueued batch must always have a sequence.
if (batch.baseSequence() == RecordBatch.NO_SEQUENCE)
throw new IllegalStateException("Trying to re-enqueue a batch which doesn't have a sequence even " +
"though idempotency is enabled.");
if (!transactionManager.hasInflightBatches(batch.topicPartition))
throw new IllegalStateException("We are re-enqueueing a batch which is not tracked as part of the in flight " +
"requests. batch.topicPartition: " + batch.topicPartition + "; batch.baseSequence: " + batch.baseSequence());
ProducerBatch firstBatchInQueue = deque.peekFirst();
if (firstBatchInQueue != null && firstBatchInQueue.hasSequence() && firstBatchInQueue.baseSequence() < batch.baseSequence()) {
// The incoming batch can't be inserted at the front of the queue without violating the sequence ordering.
// This means that the incoming batch should be placed somewhere further back.
// We need to find the right place for the incoming batch and insert it there.
// We will only enter this branch if we have multiple inflights sent to different brokers and we need to retry
// the inflight batches.
//
// Since we reenqueue exactly one batch a time and ensure that the queue is ordered by sequence always, it
// is a simple linear scan of a subset of the in flight batches to find the right place in the queue each time.
List<ProducerBatch> orderedBatches = new ArrayList<>();
while (deque.peekFirst() != null && deque.peekFirst().hasSequence() && deque.peekFirst().baseSequence() < batch.baseSequence())
orderedBatches.add(deque.pollFirst());
log.debug("Reordered incoming batch with sequence {} for partition {}. It was placed in the queue at " +
"position {}", batch.baseSequence(), batch.topicPartition, orderedBatches.size());
// Either we have reached a point where there are batches without a sequence (ie. never been drained
// and are hence in order by default), or the batch at the front of the queue has a sequence greater
// than the incoming batch. This is the right place to add the incoming batch.
deque.addFirst(batch);
// Now we have to re insert the previously queued batches in the right order.
for (int i = orderedBatches.size() - 1; i >= 0; --i) {
deque.addFirst(orderedBatches.get(i));
}
// At this point, the incoming batch has been queued in the correct place according to its sequence.
} else {
deque.addFirst(batch);
}
}
|
Split the big batch that has been rejected and reenqueue the split batches in to the accumulator.
@return the number of split batches.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java
| 552
|
[
"deque",
"batch"
] |
void
| true
| 10
| 7.2
|
apache/kafka
| 31,560
|
javadoc
| false
|
uncompletedEvents
|
public List<CompletableEvent<?>> uncompletedEvents() {
// The following code does not use the Java Collections Streams API to reduce overhead in the critical
// path of the ConsumerNetworkThread loop.
List<CompletableEvent<?>> events = new ArrayList<>();
for (CompletableEvent<?> event : tracked) {
if (!event.future().isDone())
events.add(event);
}
return events;
}
|
It is possible for the {@link AsyncKafkaConsumer#close() consumer to close} before completing the processing of
all the events in the queue. In this case, we need to
{@link CompletableFuture#completeExceptionally(Throwable) expire} any remaining events.
<p/>
Check each of the {@link #add(CompletableEvent) previously-added} {@link CompletableEvent completable events},
and for any that are incomplete, expire them. Also check the core event queue for any incomplete events and
likewise expire them.
<p/>
<em>Note</em>: because this is called in the context of {@link AsyncKafkaConsumer#close() closing consumer},
don't take the deadline into consideration, just close it regardless.
@param events Events from a queue that have not yet been tracked that also need to be reviewed
@return The number of events that were expired
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java
| 163
|
[] | true
| 2
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_from_sequence
|
def _from_sequence(
cls, scalars, *, dtype: Dtype | None = None, copy: bool = False
) -> Self:
"""
Construct a new ExtensionArray from a sequence of scalars.
Parameters
----------
scalars : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type`` or be converted into this type in this method.
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
copy : bool, default False
If True, copy the underlying data.
Returns
-------
ExtensionArray
See Also
--------
api.extensions.ExtensionArray._from_sequence_of_strings : Construct a new
ExtensionArray from a sequence of strings.
api.extensions.ExtensionArray._hash_pandas_object : Hook for
hash_pandas_object.
Examples
--------
>>> pd.arrays.IntegerArray._from_sequence([4, 5])
<IntegerArray>
[4, 5]
Length: 2, dtype: Int64
"""
raise AbstractMethodError(cls)
|
Construct a new ExtensionArray from a sequence of scalars.
Parameters
----------
scalars : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type`` or be converted into this type in this method.
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
copy : bool, default False
If True, copy the underlying data.
Returns
-------
ExtensionArray
See Also
--------
api.extensions.ExtensionArray._from_sequence_of_strings : Construct a new
ExtensionArray from a sequence of strings.
api.extensions.ExtensionArray._hash_pandas_object : Hook for
hash_pandas_object.
Examples
--------
>>> pd.arrays.IntegerArray._from_sequence([4, 5])
<IntegerArray>
[4, 5]
Length: 2, dtype: Int64
|
python
|
pandas/core/arrays/base.py
| 282
|
[
"cls",
"scalars",
"dtype",
"copy"
] |
Self
| true
| 1
| 6.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getMergedBeanDefinition
|
@Override
public BeanDefinition getMergedBeanDefinition(String name) throws BeansException {
String beanName = transformedBeanName(name);
// Efficiently check whether bean definition exists in this factory.
if (getParentBeanFactory() instanceof ConfigurableBeanFactory parent && !containsBeanDefinition(beanName)) {
return parent.getMergedBeanDefinition(beanName);
}
// Resolve merged bean definition locally.
return getMergedLocalBeanDefinition(beanName);
}
|
Return a 'merged' BeanDefinition for the given bean name,
merging a child bean definition with its parent if necessary.
<p>This {@code getMergedBeanDefinition} considers bean definition
in ancestors as well.
@param name the name of the bean to retrieve the merged definition for
(may be an alias)
@return a (potentially merged) RootBeanDefinition for the given bean
@throws NoSuchBeanDefinitionException if there is no bean with the given name
@throws BeanDefinitionStoreException in case of an invalid bean definition
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 1,139
|
[
"name"
] |
BeanDefinition
| true
| 3
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
initializeBucketTreeMapsIfNeeded
|
private void initializeBucketTreeMapsIfNeeded() {
if (negativeBuckets == null) {
negativeBuckets = new TreeMap<>();
positiveBuckets = new TreeMap<>();
// copy existing buckets to the maps
if (result != null) {
BucketIterator it = result.negativeBuckets().iterator();
while (it.hasNext()) {
negativeBuckets.put(it.peekIndex(), it.peekCount());
it.advance();
}
it = result.positiveBuckets().iterator();
while (it.hasNext()) {
positiveBuckets.put(it.peekIndex(), it.peekCount());
it.advance();
}
}
}
}
|
Sets the given bucket of the negative buckets. If the bucket already exists, it will be replaced.
Buckets may be set in arbitrary order. However, for best performance and minimal allocations,
buckets should be set in order of increasing index and all negative buckets should be set before positive buckets.
@param index the index of the bucket
@param count the count of the bucket, must be at least 1
@return the builder
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramBuilder.java
| 186
|
[] |
void
| true
| 5
| 8.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
checkDependencies
|
protected void checkDependencies(
String beanName, AbstractBeanDefinition mbd, PropertyDescriptor[] pds, @Nullable PropertyValues pvs)
throws UnsatisfiedDependencyException {
int dependencyCheck = mbd.getDependencyCheck();
for (PropertyDescriptor pd : pds) {
if (pd.getWriteMethod() != null && (pvs == null || !pvs.contains(pd.getName()))) {
boolean isSimple = BeanUtils.isSimpleProperty(pd.getPropertyType());
boolean unsatisfied = (dependencyCheck == AbstractBeanDefinition.DEPENDENCY_CHECK_ALL) ||
(isSimple && dependencyCheck == AbstractBeanDefinition.DEPENDENCY_CHECK_SIMPLE) ||
(!isSimple && dependencyCheck == AbstractBeanDefinition.DEPENDENCY_CHECK_OBJECTS);
if (unsatisfied) {
throw new UnsatisfiedDependencyException(mbd.getResourceDescription(), beanName, pd.getName(),
"Set this property value or disable dependency checking for this bean.");
}
}
}
}
|
Perform a dependency check that all properties exposed have been set,
if desired. Dependency checks can be objects (collaborating beans),
simple (primitives and String), or all (both).
@param beanName the name of the bean
@param mbd the merged bean definition the bean was created with
@param pds the relevant property descriptors for the target bean
@param pvs the property values to be applied to the bean
@see #isExcludedFromDependencyCheck(java.beans.PropertyDescriptor)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractAutowireCapableBeanFactory.java
| 1,634
|
[
"beanName",
"mbd",
"pds",
"pvs"
] |
void
| true
| 9
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
originals
|
public Map<String, Object> originals() {
Map<String, Object> copy = new RecordingMap<>();
copy.putAll(originals);
return copy;
}
|
Called directly after user configs got parsed (and thus default values got set).
This allows to change default values for "secondary defaults" if required.
@param parsedValues unmodifiable map of current configuration
@return a map of updates that should be applied to the configuration (will be validated to prevent bad updates)
|
java
|
clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
| 243
|
[] | true
| 1
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
replace
|
public String replace(final char[] source) {
if (source == null) {
return null;
}
final StrBuilder buf = new StrBuilder(source.length).append(source);
substitute(buf, 0, source.length);
return buf.toString();
}
|
Replaces all the occurrences of variables with their matching values
from the resolver using the given source array as a template.
The array is not altered by this method.
@param source the character array to replace in, not altered, null returns null.
@return the result of the replace operation.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrSubstitutor.java
| 511
|
[
"source"
] |
String
| true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
notEmpty
|
public static <T extends Map<?, ?>> T notEmpty(final T map) {
return notEmpty(map, DEFAULT_NOT_EMPTY_MAP_EX_MESSAGE);
}
|
<p>Validates that the specified argument map is neither {@code null}
nor a size of zero (no elements); otherwise throwing an exception.
<pre>Validate.notEmpty(myMap);</pre>
<p>The message in the exception is "The validated map is
empty".
@param <T> the map type.
@param map the map to check, validated not null by this method.
@return the validated map (never {@code null} method for chaining).
@throws NullPointerException if the map is {@code null}.
@throws IllegalArgumentException if the map is empty.
@see #notEmpty(Map, String, Object...)
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 847
|
[
"map"
] |
T
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
get_git_version
|
def get_git_version(self) -> str:
"""
Return a version to identify the state of the underlying git repo.
The version will indicate whether the head of the current git-backed working directory
is tied to a release tag or not. It will indicate the former with a 'release:{version}'
prefix and the latter with a '.dev0' suffix. Following the prefix will be a sha of the
current branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
Example pre-release version: ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".
Example release version: ".release+2f635dc265e78db6708f59f68e8009abb92c1e65".
Example modified release version: ".release+2f635dc265e78db6708f59f68e8009abb92c1e65".dirty
:return: Found Airflow version in Git repo.
"""
try:
import git
try:
git_path = Path(self.root).parent.resolve() / ".git"
log.warning("Getting git version from: %s", git_path)
# Get git version from the git of the airflow root repo
repo = git.Repo(str(git_path))
except git.NoSuchPathError:
log.warning(".git directory not found: Cannot compute the git version")
return ""
except git.InvalidGitRepositoryError:
log.warning("Invalid .git directory not found: Cannot compute the git version")
return ""
except ImportError:
log.warning("gitpython not found: Cannot compute the git version.")
return ""
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return f".dev0+{sha}.dirty"
# commit is clean
return f".release:{sha}"
return "no_git_version"
|
Return a version to identify the state of the underlying git repo.
The version will indicate whether the head of the current git-backed working directory
is tied to a release tag or not. It will indicate the former with a 'release:{version}'
prefix and the latter with a '.dev0' suffix. Following the prefix will be a sha of the
current branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
Example pre-release version: ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".
Example release version: ".release+2f635dc265e78db6708f59f68e8009abb92c1e65".
Example modified release version: ".release+2f635dc265e78db6708f59f68e8009abb92c1e65".dirty
:return: Found Airflow version in Git repo.
|
python
|
airflow-core/hatch_build.py
| 74
|
[
"self"
] |
str
| true
| 3
| 6.88
|
apache/airflow
| 43,597
|
unknown
| false
|
lastIndexOf
|
static int lastIndexOf(final CharSequence cs, final CharSequence searchChar, int start) {
if (searchChar == null || cs == null) {
return NOT_FOUND;
}
if (searchChar instanceof String) {
if (cs instanceof String) {
return ((String) cs).lastIndexOf((String) searchChar, start);
}
if (cs instanceof StringBuilder) {
return ((StringBuilder) cs).lastIndexOf((String) searchChar, start);
}
if (cs instanceof StringBuffer) {
return ((StringBuffer) cs).lastIndexOf((String) searchChar, start);
}
}
final int len1 = cs.length();
final int len2 = searchChar.length();
if (start > len1) {
start = len1;
}
if (start < 0 || len2 > len1) {
return NOT_FOUND;
}
if (len2 == 0) {
return start;
}
if (len2 <= TO_STRING_LIMIT) {
if (cs instanceof String) {
return ((String) cs).lastIndexOf(searchChar.toString(), start);
}
if (cs instanceof StringBuilder) {
return ((StringBuilder) cs).lastIndexOf(searchChar.toString(), start);
}
if (cs instanceof StringBuffer) {
return ((StringBuffer) cs).lastIndexOf(searchChar.toString(), start);
}
}
if (start + len2 > len1) {
start = len1 - len2;
}
final char char0 = searchChar.charAt(0);
int i = start;
while (true) {
while (cs.charAt(i) != char0) {
i--;
if (i < 0) {
return NOT_FOUND;
}
}
if (checkLaterThan1(cs, searchChar, len2, i)) {
return i;
}
i--;
if (i < 0) {
return NOT_FOUND;
}
}
}
|
Used by the lastIndexOf(CharSequence methods) as a green implementation of lastIndexOf
@param cs the {@link CharSequence} to be processed.
@param searchChar the {@link CharSequence} to find.
@param start the start index.
@return the index where the search sequence was found.
|
java
|
src/main/java/org/apache/commons/lang3/CharSequenceUtils.java
| 148
|
[
"cs",
"searchChar",
"start"
] | true
| 21
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getObject
|
@Override
public @Nullable Object getObject() throws BeansException {
BeanWrapper target = this.targetBeanWrapper;
if (target != null) {
if (logger.isWarnEnabled() && this.targetBeanName != null &&
this.beanFactory instanceof ConfigurableBeanFactory cbf &&
cbf.isCurrentlyInCreation(this.targetBeanName)) {
logger.warn("Target bean '" + this.targetBeanName + "' is still in creation due to a circular " +
"reference - obtained value for property '" + this.propertyPath + "' may be outdated!");
}
}
else {
// Fetch prototype target bean...
Assert.state(this.beanFactory != null, "No BeanFactory available");
Assert.state(this.targetBeanName != null, "No target bean name specified");
Object bean = this.beanFactory.getBean(this.targetBeanName);
target = PropertyAccessorFactory.forBeanPropertyAccess(bean);
}
Assert.state(this.propertyPath != null, "No property path specified");
return target.getPropertyValue(this.propertyPath);
}
|
The bean name of this PropertyPathFactoryBean will be interpreted
as "beanName.property" pattern, if neither "targetObject" nor
"targetBeanName" nor "propertyPath" have been specified.
This allows for concise bean definitions with just an id/name.
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/PropertyPathFactoryBean.java
| 199
|
[] |
Object
| true
| 6
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
compression
|
public abstract double compression();
|
Returns the current compression factor.
@return The compression factor originally used to set up the TDigest.
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/TDigest.java
| 168
|
[] | true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
inclusiveBetween
|
public static void inclusiveBetween(final double start, final double end, final double value, final String message) {
// TODO when breaking BC, consider returning value
if (value < start || value > end) {
throw new IllegalArgumentException(message);
}
}
|
Validate that the specified primitive value falls between the two
inclusive values specified; otherwise, throws an exception with the
specified message.
<pre>Validate.inclusiveBetween(0.1, 2.1, 1.1, "Not in range");</pre>
@param start the inclusive start value.
@param end the inclusive end value.
@param value the value to validate.
@param message the exception message if invalid, not null.
@throws IllegalArgumentException if the value falls outside the boundaries.
@since 3.3
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 291
|
[
"start",
"end",
"value",
"message"
] |
void
| true
| 3
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
createDelegationToken
|
default CreateDelegationTokenResult createDelegationToken() {
return createDelegationToken(new CreateDelegationTokenOptions());
}
|
Create a Delegation Token.
<p>
This is a convenience method for {@link #createDelegationToken(CreateDelegationTokenOptions)} with default options.
See the overload for more details.
@return The CreateDelegationTokenResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 713
|
[] |
CreateDelegationTokenResult
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
toUnsentRequest
|
public NetworkClientDelegate.UnsentRequest toUnsentRequest() {
Map<String, Uuid> topicIds = metadata.topicIds();
boolean canUseTopicIds = true;
List<OffsetFetchRequestData.OffsetFetchRequestTopics> topics = new ArrayList<>();
Map<String, List<TopicPartition>> tps = requestedPartitions.stream().collect(Collectors.groupingBy(TopicPartition::topic));
for (Map.Entry<String, List<TopicPartition>> entry : tps.entrySet()) {
String topic = entry.getKey();
Uuid topicId = topicIds.getOrDefault(topic, Uuid.ZERO_UUID);
if (Uuid.ZERO_UUID.equals(topicId)) {
canUseTopicIds = false;
}
topics.add(new OffsetFetchRequestData.OffsetFetchRequestTopics()
.setName(entry.getKey())
.setTopicId(topicId)
.setPartitionIndexes(entry.getValue().stream()
.map(TopicPartition::partition)
.collect(Collectors.toList())));
}
OffsetFetchRequestData.OffsetFetchRequestGroup groupData = new OffsetFetchRequestData.OffsetFetchRequestGroup()
.setGroupId(groupId)
.setTopics(topics);
if (memberInfo.memberEpoch.isPresent()) {
groupData = groupData.setMemberId(memberInfo.memberId)
.setMemberEpoch(memberInfo.memberEpoch.get());
}
OffsetFetchRequestData data = new OffsetFetchRequestData()
.setRequireStable(true)
.setGroups(List.of(groupData));
OffsetFetchRequest.Builder builder = canUseTopicIds
? OffsetFetchRequest.Builder.forTopicIdsOrNames(data, throwOnFetchStableOffsetUnsupported)
: OffsetFetchRequest.Builder.forTopicNames(data, throwOnFetchStableOffsetUnsupported);
return buildRequestWithResponseHandling(builder);
}
|
Future with the result of the request. This can be reset using {@link #resetFuture()}
to get a new result when the request is retried.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
| 1,006
|
[] | true
| 4
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
all
|
public KafkaFuture<Map<TopicPartition, ListOffsetsResultInfo>> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture<?>[0]))
.thenApply(v -> {
Map<TopicPartition, ListOffsetsResultInfo> offsets = new HashMap<>(futures.size());
for (Map.Entry<TopicPartition, KafkaFuture<ListOffsetsResultInfo>> entry : futures.entrySet()) {
try {
offsets.put(entry.getKey(), entry.getValue().get());
} catch (InterruptedException | ExecutionException e) {
// This should be unreachable, because allOf ensured that all the futures completed successfully.
throw new RuntimeException(e);
}
}
return offsets;
});
}
|
Return a future which succeeds only if offsets for all specified partitions have been successfully
retrieved.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ListOffsetsResult.java
| 54
|
[] | true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
trimToSize
|
public void trimToSize() {
if (needsAllocArrays()) {
return;
}
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
Map<K, V> newDelegate = createHashFloodingResistantDelegate(size());
newDelegate.putAll(delegate);
this.table = newDelegate;
return;
}
int size = this.size;
if (size < requireEntries().length) {
resizeEntries(size);
}
int minimumTableSize = CompactHashing.tableSize(size);
int mask = hashTableMask();
if (minimumTableSize < mask) { // smaller table size will always be less than current mask
resizeTable(mask, minimumTableSize, UNSET, UNSET);
}
}
|
Ensures that this {@code CompactHashMap} has the smallest representation in memory, given its
current size.
|
java
|
android/guava/src/com/google/common/collect/CompactHashMap.java
| 950
|
[] |
void
| true
| 5
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
list_options
|
def list_options() -> list[str]:
r"""Returns a dictionary describing the optimizations and debug configurations
that are available to `torch.compile()`.
The options are documented in `torch._inductor.config`.
Example::
>>> torch._inductor.list_options()
"""
from torch._inductor import config
current_config: dict[str, Any] = config.get_config_copy()
return list(current_config.keys())
|
r"""Returns a dictionary describing the optimizations and debug configurations
that are available to `torch.compile()`.
The options are documented in `torch._inductor.config`.
Example::
>>> torch._inductor.list_options()
|
python
|
torch/_inductor/__init__.py
| 381
|
[] |
list[str]
| true
| 1
| 6.48
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
_decode_relation
|
def _decode_relation(self, s):
'''(INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string. The string must
start with alphabetic character and must be quoted if the name includes
spaces, otherwise this method will raise a `BadRelationFormat` exception.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a string with the decoded relation name.
'''
_, v = s.split(' ', 1)
v = v.strip()
if not _RE_RELATION.match(v):
raise BadRelationFormat()
res = str(v.strip('"\''))
return res
|
(INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string. The string must
start with alphabetic character and must be quoted if the name includes
spaces, otherwise this method will raise a `BadRelationFormat` exception.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a string with the decoded relation name.
|
python
|
sklearn/externals/_arff.py
| 690
|
[
"self",
"s"
] | false
| 2
| 7.12
|
scikit-learn/scikit-learn
| 64,340
|
sphinx
| false
|
|
supportsSourceType
|
@Override
public boolean supportsSourceType(@Nullable Class<?> sourceType) {
return (sourceType != null && sourceType.isInstance(this.source));
}
|
Create a SourceFilteringListener for the given event source,
expecting subclasses to override the {@link #onApplicationEventInternal}
method (instead of specifying a delegate listener).
@param source the event source that this listener filters for,
only processing events from this source
|
java
|
spring-context/src/main/java/org/springframework/context/event/SourceFilteringListener.java
| 82
|
[
"sourceType"
] | true
| 2
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
resolveAutowireCandidates
|
@SuppressWarnings("unchecked")
public static <T> Map<String, T> resolveAutowireCandidates(ConfigurableListableBeanFactory lbf, Class<T> type,
boolean includeNonSingletons, boolean allowEagerInit) {
Map<String, T> candidates = new LinkedHashMap<>();
for (String beanName : BeanFactoryUtils.beanNamesForTypeIncludingAncestors(lbf, type,
includeNonSingletons, allowEagerInit)) {
if (AutowireUtils.isAutowireCandidate(lbf, beanName)) {
Object beanInstance = lbf.getBean(beanName);
if (!(beanInstance instanceof NullBean)) {
candidates.put(beanName, (T) beanInstance);
}
}
}
return candidates;
}
|
Resolve a map of all beans of the given type, also picking up beans defined in
ancestor bean factories, with the specific condition that each bean actually
has autowire candidate status. This matches simple injection point resolution
as implemented by this {@link AutowireCandidateResolver} strategy, including
beans which are not marked as default candidates but excluding beans which
are not even marked as autowire candidates.
@param lbf the bean factory
@param type the type of bean to match
@param includeNonSingletons whether to include prototype or scoped beans too
or just singletons (also applies to FactoryBeans)
@param allowEagerInit whether to initialize <i>lazy-init singletons</i> and
<i>objects created by FactoryBeans</i> (or by factory methods with a
"factory-bean" reference) for the type check. Note that FactoryBeans need to be
eagerly initialized to determine their type: So be aware that passing in "true"
for this flag will initialize FactoryBeans and "factory-bean" references.
@return the Map of matching bean instances, or an empty Map if none
@throws BeansException if a bean could not be created
@since 6.2.5
@see BeanFactoryUtils#beansOfTypeIncludingAncestors(ListableBeanFactory, Class, boolean, boolean)
@see org.springframework.beans.factory.config.BeanDefinition#isAutowireCandidate()
@see AbstractBeanDefinition#isDefaultCandidate()
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/SimpleAutowireCandidateResolver.java
| 96
|
[
"lbf",
"type",
"includeNonSingletons",
"allowEagerInit"
] | true
| 3
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
_decode8Bits
|
private int _decode8Bits() throws IOException {
if (_inputPtr >= _inputEnd) {
loadMoreGuaranteed();
}
return _inputBuffer[_inputPtr++] & 0xFF;
}
|
Method used to decode explicit length of a variable-length value
(or, for indefinite/chunked, indicate that one is not known).
Note that long (64-bit) length is only allowed if it fits in
32-bit signed int, for now; expectation being that longer values
are always encoded as chunks.
|
java
|
libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/cbor/ESCborParser.java
| 138
|
[] | true
| 2
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
_parse_thead_tbody_tfoot
|
def _parse_thead_tbody_tfoot(self, table_html):
"""
Given a table, return parsed header, body, and foot.
Parameters
----------
table_html : node-like
Returns
-------
tuple of (header, body, footer), each a list of list-of-text rows.
Notes
-----
Header and body are lists-of-lists. Top level list is a list of
rows. Each row is a list of str text.
Logic: Use <thead>, <tbody>, <tfoot> elements to identify
header, body, and footer, otherwise:
- Put all rows into body
- Move rows from top of body to header only if
all elements inside row are <th>
- Move rows from bottom of body to footer only if
all elements inside row are <th>
"""
header_rows = self._parse_thead_tr(table_html)
body_rows = self._parse_tbody_tr(table_html)
footer_rows = self._parse_tfoot_tr(table_html)
def row_is_all_th(row):
return all(self._equals_tag(t, "th") for t in self._parse_td(row))
if not header_rows:
# The table has no <thead>. Move the top all-<th> rows from
# body_rows to header_rows. (This is a common case because many
# tables in the wild have no <thead> or <tfoot>
while body_rows and row_is_all_th(body_rows[0]):
header_rows.append(body_rows.pop(0))
header, rem = self._expand_colspan_rowspan(header_rows, section="header")
body, rem = self._expand_colspan_rowspan(
body_rows,
section="body",
remainder=rem,
overflow=len(footer_rows) > 0,
)
footer, _ = self._expand_colspan_rowspan(
footer_rows, section="footer", remainder=rem, overflow=False
)
return header, body, footer
|
Given a table, return parsed header, body, and foot.
Parameters
----------
table_html : node-like
Returns
-------
tuple of (header, body, footer), each a list of list-of-text rows.
Notes
-----
Header and body are lists-of-lists. Top level list is a list of
rows. Each row is a list of str text.
Logic: Use <thead>, <tbody>, <tfoot> elements to identify
header, body, and footer, otherwise:
- Put all rows into body
- Move rows from top of body to header only if
all elements inside row are <th>
- Move rows from bottom of body to footer only if
all elements inside row are <th>
|
python
|
pandas/io/html.py
| 413
|
[
"self",
"table_html"
] | false
| 4
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
callExpressionVisitor
|
function callExpressionVisitor(node: Node): VisitResult<Node | undefined> {
if (node.kind === SyntaxKind.SuperKeyword) {
return visitSuperKeyword(node as SuperExpression, /*isExpressionOfCall*/ true);
}
return visitor(node);
}
|
Restores the `HierarchyFacts` for this node's ancestor after visiting this node's
subtree, propagating specific facts from the subtree.
@param ancestorFacts The `HierarchyFacts` of the ancestor to restore after visiting the subtree.
@param excludeFacts The existing `HierarchyFacts` of the subtree that should not be propagated.
@param includeFacts The new `HierarchyFacts` of the subtree that should be propagated.
|
typescript
|
src/compiler/transformers/es2015.ts
| 626
|
[
"node"
] | true
| 2
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
appendFixedWidthPadLeft
|
public StrBuilder appendFixedWidthPadLeft(final Object obj, final int width, final char padChar) {
if (width > 0) {
ensureCapacity(size + width);
String str = ObjectUtils.toString(obj, this::getNullText);
if (str == null) {
str = StringUtils.EMPTY;
}
final int strLen = str.length();
if (strLen >= width) {
str.getChars(strLen - width, strLen, buffer, size);
} else {
final int padLen = width - strLen;
final int toIndex = size + padLen;
Arrays.fill(buffer, size, toIndex, padChar);
str.getChars(0, strLen, buffer, toIndex);
}
size += width;
}
return this;
}
|
Appends an object to the builder padding on the left to a fixed width.
The {@code toString} of the object is used.
If the object is larger than the length, the left-hand side side is lost.
If the object is null, the null text value is used.
@param obj the object to append, null uses null text
@param width the fixed field width, zero or negative has no effect
@param padChar the pad character to use
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 861
|
[
"obj",
"width",
"padChar"
] |
StrBuilder
| true
| 4
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
process
|
private void process(TypeDescriptor descriptor, TypeMirror type) {
if (type.getKind() == TypeKind.DECLARED) {
DeclaredType declaredType = (DeclaredType) type;
DeclaredType freshType = (DeclaredType) this.env.getElementUtils()
.getTypeElement(this.types.asElement(type).toString())
.asType();
List<? extends TypeMirror> arguments = declaredType.getTypeArguments();
for (int i = 0; i < arguments.size(); i++) {
TypeMirror specificType = arguments.get(i);
TypeMirror signatureType = freshType.getTypeArguments().get(i);
descriptor.registerIfNecessary(signatureType, specificType);
}
TypeElement element = (TypeElement) this.types.asElement(type);
process(descriptor, element.getSuperclass());
}
}
|
Return the {@link PrimitiveType} of the specified type or {@code null} if the type
does not represent a valid wrapper type.
@param typeMirror a type
@return the primitive type or {@code null} if the type is not a wrapper type
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/TypeUtils.java
| 235
|
[
"descriptor",
"type"
] |
void
| true
| 3
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_open_openml_url
|
def _open_openml_url(
url: str, data_home: Optional[str], n_retries: int = 3, delay: float = 1.0
):
"""
Returns a resource from OpenML.org. Caches it to data_home if required.
Parameters
----------
url : str
OpenML URL that will be downloaded and cached locally. The path component
of the URL is used to replicate the tree structure as sub-folders of the local
cache folder.
data_home : str
Directory to which the files will be cached. If None, no caching will
be applied.
n_retries : int, default=3
Number of retries when HTTP errors are encountered. Error with status
code 412 won't be retried as they represent OpenML generic errors.
delay : float, default=1.0
Number of seconds between retries.
Returns
-------
result : stream
A stream to the OpenML resource.
"""
def is_gzip_encoded(_fsrc):
return _fsrc.info().get("Content-Encoding", "") == "gzip"
req = Request(url)
req.add_header("Accept-encoding", "gzip")
if data_home is None:
fsrc = _retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(req)
if is_gzip_encoded(fsrc):
return gzip.GzipFile(fileobj=fsrc, mode="rb")
return fsrc
openml_path = urlparse(url).path.lstrip("/")
local_path = _get_local_path(openml_path, data_home)
dir_name, file_name = os.path.split(local_path)
if not os.path.exists(local_path):
os.makedirs(dir_name, exist_ok=True)
try:
# Create a tmpdir as a subfolder of dir_name where the final file will
# be moved to if the download is successful. This guarantees that the
# renaming operation to the final location is atomic to ensure the
# concurrence safety of the dataset caching mechanism.
with TemporaryDirectory(dir=dir_name) as tmpdir:
with closing(
_retry_on_network_error(n_retries, delay, req.full_url)(urlopen)(
req
)
) as fsrc:
opener: Callable
if is_gzip_encoded(fsrc):
opener = open
else:
opener = gzip.GzipFile
with opener(os.path.join(tmpdir, file_name), "wb") as fdst:
shutil.copyfileobj(fsrc, fdst)
shutil.move(fdst.name, local_path)
except Exception:
if os.path.exists(local_path):
os.unlink(local_path)
raise
# XXX: First time, decompression will not be necessary (by using fsrc), but
# it will happen nonetheless
return gzip.GzipFile(local_path, "rb")
|
Returns a resource from OpenML.org. Caches it to data_home if required.
Parameters
----------
url : str
OpenML URL that will be downloaded and cached locally. The path component
of the URL is used to replicate the tree structure as sub-folders of the local
cache folder.
data_home : str
Directory to which the files will be cached. If None, no caching will
be applied.
n_retries : int, default=3
Number of retries when HTTP errors are encountered. Error with status
code 412 won't be retried as they represent OpenML generic errors.
delay : float, default=1.0
Number of seconds between retries.
Returns
-------
result : stream
A stream to the OpenML resource.
|
python
|
sklearn/datasets/_openml.py
| 124
|
[
"url",
"data_home",
"n_retries",
"delay"
] | true
| 7
| 6.96
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
addAndGet
|
public long addAndGet(final Number operand) {
this.value += operand.longValue();
return value;
}
|
Increments this instance's value by {@code operand}; this method returns the value associated with the instance
immediately after the addition operation. This method is not thread safe.
@param operand the quantity to add, not null.
@throws NullPointerException if {@code operand} is null.
@return the value associated with this instance after adding the operand.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableLong.java
| 125
|
[
"operand"
] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
__bolt_hugify_self
|
void __bolt_hugify_self() {
// clang-format off
#if defined(__x86_64__)
__asm__ __volatile__(SAVE_ALL "call __bolt_hugify_self_impl\n" RESTORE_ALL
"jmp __bolt_hugify_start_program\n"
:::);
#elif defined(__aarch64__) || defined(__arm64__)
__asm__ __volatile__(SAVE_ALL "bl __bolt_hugify_self_impl\n" RESTORE_ALL
"adrp x16, __bolt_hugify_start_program\n"
"add x16, x16, #:lo12:__bolt_hugify_start_program\n"
"br x16\n"
:::);
#else
__exit(1);
#endif
// clang-format on
}
|
This is hooking ELF's entry, it needs to save all machine state.
|
cpp
|
bolt/runtime/hugify.cpp
| 172
|
[] | true
| 2
| 7.04
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
tryAcquire
|
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
public boolean tryAcquire(int permits, long timeout, TimeUnit unit) {
long timeoutMicros = max(unit.toMicros(timeout), 0);
checkPermits(permits);
long microsToWait;
synchronized (mutex()) {
long nowMicros = stopwatch.readMicros();
if (!canAcquire(nowMicros, timeoutMicros)) {
return false;
} else {
microsToWait = reserveAndGetWaitLength(permits, nowMicros);
}
}
stopwatch.sleepMicrosUninterruptibly(microsToWait);
return true;
}
|
Acquires the given number of permits from this {@code RateLimiter} if it can be obtained
without exceeding the specified {@code timeout}, or returns {@code false} immediately (without
waiting) if the permits would not have been granted before the timeout expired.
@param permits the number of permits to acquire
@param timeout the maximum time to wait for the permits. Negative values are treated as zero.
@param unit the time unit of the timeout argument
@return {@code true} if the permits were acquired, {@code false} otherwise
@throws IllegalArgumentException if the requested number of permits is negative or zero
|
java
|
android/guava/src/com/google/common/util/concurrent/RateLimiter.java
| 411
|
[
"permits",
"timeout",
"unit"
] | true
| 2
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
capacity
|
public int capacity() {
return buffer.length;
}
|
Gets the current size of the internal character array buffer.
@return the capacity
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,574
|
[] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
startswith
|
def startswith(a, prefix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
in ``a`` starts with ``prefix``, otherwise `False`.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
prefix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
start, end : array_like, with any integer dtype
With ``start``, test beginning at that position. With ``end``,
stop comparing at that position.
Returns
-------
out : ndarray
Output array of bools
See Also
--------
str.startswith
Examples
--------
>>> import numpy as np
>>> s = np.array(['foo', 'bar'])
>>> s
array(['foo', 'bar'], dtype='<U3')
>>> np.strings.startswith(s, 'fo')
array([True, False])
>>> np.strings.startswith(s, 'o', start=1, end=2)
array([True, False])
"""
end = end if end is not None else MAX
return _startswith_ufunc(a, prefix, start, end)
|
Returns a boolean array which is `True` where the string element
in ``a`` starts with ``prefix``, otherwise `False`.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
prefix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
start, end : array_like, with any integer dtype
With ``start``, test beginning at that position. With ``end``,
stop comparing at that position.
Returns
-------
out : ndarray
Output array of bools
See Also
--------
str.startswith
Examples
--------
>>> import numpy as np
>>> s = np.array(['foo', 'bar'])
>>> s
array(['foo', 'bar'], dtype='<U3')
>>> np.strings.startswith(s, 'fo')
array([True, False])
>>> np.strings.startswith(s, 'o', start=1, end=2)
array([True, False])
|
python
|
numpy/_core/strings.py
| 450
|
[
"a",
"prefix",
"start",
"end"
] | false
| 2
| 7.36
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
is_wsl2
|
def is_wsl2() -> bool:
"""
Check if the current platform is WSL2. This method will exit with error printing appropriate
message if WSL1 is detected as WSL1 is not supported.
:return: True if the current platform is WSL2, False otherwise (unless it's WSL1 then it exits).
"""
if not sys.platform.startswith("linux"):
return False
release_name = platform.uname().release
has_wsl_interop = _exists_no_permission_error("/proc/sys/fs/binfmt_misc/WSLInterop")
microsoft_in_release = "microsoft" in release_name.lower()
wsl_conf = _exists_no_permission_error("/etc/wsl.conf")
if not has_wsl_interop and not microsoft_in_release and not wsl_conf:
return False
if microsoft_in_release:
# Release name WSL1 detection
if "Microsoft" in release_name:
message_on_wsl1_detected(release_name=release_name, kernel_version=None)
sys.exit(1)
return True
# Kernel WSL1 detection
kernel_version: tuple[int, ...] = (0, 0)
if len(parts := release_name.split(".", 2)[:2]) == 2:
with contextlib.suppress(TypeError, ValueError):
kernel_version = tuple(map(int, parts))
if kernel_version < (4, 19):
message_on_wsl1_detected(release_name=None, kernel_version=kernel_version)
sys.exit(1)
return True
|
Check if the current platform is WSL2. This method will exit with error printing appropriate
message if WSL1 is detected as WSL1 is not supported.
:return: True if the current platform is WSL2, False otherwise (unless it's WSL1 then it exits).
|
python
|
dev/breeze/src/airflow_breeze/utils/platforms.py
| 57
|
[] |
bool
| true
| 9
| 8.4
|
apache/airflow
| 43,597
|
unknown
| false
|
ABSL_LOCKS_EXCLUDED
|
ABSL_LOCKS_EXCLUDED(reader_mu_) {
grpc::internal::MutexLock l(&reader_mu_);
if (GPR_UNLIKELY(backlog_.send_initial_metadata_wanted)) {
reader->SendInitialMetadata();
}
if (GPR_UNLIKELY(backlog_.read_wanted != nullptr)) {
reader->Read(backlog_.read_wanted);
}
if (GPR_UNLIKELY(backlog_.finish_wanted)) {
reader->Finish(std::move(backlog_.status_wanted));
}
// Set reader_ last so that other functions can use it lock-free
reader_.store(reader, std::memory_order_release);
}
|
The following notifications are exactly like ServerBidiReactor.
|
cpp
|
include/grpcpp/support/server_callback.h
| 558
|
[] | true
| 4
| 6.08
|
grpc/grpc
| 44,113
|
doxygen
| false
|
|
getSiblingNode
|
function getSiblingNode(node: ?(Node | Element)) {
while (node) {
if (node.nextSibling) {
return node.nextSibling;
}
node = node.parentNode;
}
}
|
Get the next sibling within a container. This will walk up the
DOM if a node's siblings have been exhausted.
@param {DOMElement|DOMTextNode} node
@return {?DOMElement|DOMTextNode}
|
javascript
|
packages/react-dom-bindings/src/client/getNodeForCharacterOffset.js
| 32
|
[] | false
| 3
| 6.8
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
_get_exc_class_and_code
|
def _get_exc_class_and_code(
exc_class_or_code: type[Exception] | int,
) -> tuple[type[Exception], int | None]:
"""Get the exception class being handled. For HTTP status codes
or ``HTTPException`` subclasses, return both the exception and
status code.
:param exc_class_or_code: Any exception class, or an HTTP status
code as an integer.
"""
exc_class: type[Exception]
if isinstance(exc_class_or_code, int):
try:
exc_class = default_exceptions[exc_class_or_code]
except KeyError:
raise ValueError(
f"'{exc_class_or_code}' is not a recognized HTTP"
" error code. Use a subclass of HTTPException with"
" that code instead."
) from None
else:
exc_class = exc_class_or_code
if isinstance(exc_class, Exception):
raise TypeError(
f"{exc_class!r} is an instance, not a class. Handlers"
" can only be registered for Exception classes or HTTP"
" error codes."
)
if not issubclass(exc_class, Exception):
raise ValueError(
f"'{exc_class.__name__}' is not a subclass of Exception."
" Handlers can only be registered for Exception classes"
" or HTTP error codes."
)
if issubclass(exc_class, HTTPException):
return exc_class, exc_class.code
else:
return exc_class, None
|
Get the exception class being handled. For HTTP status codes
or ``HTTPException`` subclasses, return both the exception and
status code.
:param exc_class_or_code: Any exception class, or an HTTP status
code as an integer.
|
python
|
src/flask/sansio/scaffold.py
| 657
|
[
"exc_class_or_code"
] |
tuple[type[Exception], int | None]
| true
| 7
| 6.72
|
pallets/flask
| 70,946
|
sphinx
| false
|
map
|
def map(
f: Callable[[pytree.PyTree, tuple[pytree.PyTree, ...]], pytree.PyTree],
xs: Union[pytree.PyTree, torch.Tensor],
*args: TypeVarTuple,
):
r"""
Performs a map of f with xs. Intuitively, you can think of the semantic being:
out = []
for idx in len(xs.size(0)):
xs_sliced = xs.select(0, idx)
out.append(f(xs_sliced, *args))
torch.stack(out)
.. warning::
`torch._higher_order_ops.map` is a prototype feature in PyTorch. It currently
does not support autograd and you may run into miscompiles.
Read more about feature classification at:
https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype
Args:
f (Callable): a callable that takes an input x, that could either be a single Tensor
or a nested dict, list of tensors and some additional inputs
xs: the inputs that're to be mapped over. We'll iterate over the first dim of each x
and perform f on each slice.
*args: additional arguments provided to each step of f. They could also be omitted and
map is able to automatically figure out the read dependency.
Return:
the stacked output for each step of f
Example:
def f(xs):
return xs[0] + xs[1] + const1 + const2
xs = [torch.randn(2, 3), torch.randn(2, 3)]
const1 = torch.randn(2, 3)
const2 = torch.randn(2, 3)
# returns a tensor of shape [2, 2, 3]
torch._higher_order_ops.map(f, xs)
"""
flat_xs, xs_spec = pytree.tree_flatten(xs)
flat_args, args_spec = pytree.tree_flatten(args)
if not all(isinstance(t, torch.Tensor) for t in flat_xs):
raise RuntimeError(f"Mapped xs can only consist of tensors. Got xs {flat_xs}.")
shapes = [xs.shape for xs in flat_xs]
leading_dim_size = shapes[0][0]
if leading_dim_size == 0:
raise RuntimeError("Leading dimensions of mapped xs cannot be 0.")
if any(cur_shape[0] != leading_dim_size for cur_shape in shapes):
raise RuntimeError(
f"Leading dimensions of mapped xs must be consistent. Got shapes {shapes}."
)
def run_flattened_map(f, flat_xs, flat_args):
def wrapped_fn(*flat_args, f, xs_tree_spec, args_tree_spec, num_xs):
xs = pytree.tree_unflatten(flat_args[:num_xs], xs_tree_spec)
args = pytree.tree_unflatten(flat_args[num_xs:], args_tree_spec)
return f(xs, *args)
inner_f = functools.partial(
wrapped_fn,
f=f,
xs_tree_spec=xs_spec,
args_tree_spec=args_spec,
num_xs=len(flat_xs),
)
return map_impl(inner_f, flat_xs, flat_args)
from torch._higher_order_ops.utils import _maybe_compile_and_run_fn
return _maybe_compile_and_run_fn(run_flattened_map, f, flat_xs, flat_args)
|
r"""
Performs a map of f with xs. Intuitively, you can think of the semantic being:
out = []
for idx in len(xs.size(0)):
xs_sliced = xs.select(0, idx)
out.append(f(xs_sliced, *args))
torch.stack(out)
.. warning::
`torch._higher_order_ops.map` is a prototype feature in PyTorch. It currently
does not support autograd and you may run into miscompiles.
Read more about feature classification at:
https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype
Args:
f (Callable): a callable that takes an input x, that could either be a single Tensor
or a nested dict, list of tensors and some additional inputs
xs: the inputs that're to be mapped over. We'll iterate over the first dim of each x
and perform f on each slice.
*args: additional arguments provided to each step of f. They could also be omitted and
map is able to automatically figure out the read dependency.
Return:
the stacked output for each step of f
Example:
def f(xs):
return xs[0] + xs[1] + const1 + const2
xs = [torch.randn(2, 3), torch.randn(2, 3)]
const1 = torch.randn(2, 3)
const2 = torch.randn(2, 3)
# returns a tensor of shape [2, 2, 3]
torch._higher_order_ops.map(f, xs)
|
python
|
torch/_higher_order_ops/map.py
| 47
|
[
"f",
"xs"
] | true
| 4
| 9.6
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
leaveGroup
|
public CompletableFuture<Void> leaveGroup() {
return leaveGroup(false);
}
|
Leaves the group.
<p>
This method does the following:
<ol>
<li>Transitions member state to {@link MemberState#PREPARE_LEAVING}.</li>
<li>Requests the invocation of the revocation callback or lost callback.</li>
<li>Once the callback completes, it clears the current and target assignment, unsubscribes from
all topics and transitions the member state to {@link MemberState#LEAVING}.</li>
</ol>
States {@link MemberState#PREPARE_LEAVING} and {@link MemberState#LEAVING} cause the heartbeat request manager
to send a leave group heartbeat.
</p>
@return future that will complete when the revocation callback execution completes and the heartbeat
to leave the group has been sent out.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
| 916
|
[] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
from
|
public static SpringApplication.Augmented from(ThrowingConsumer<String[]> main) {
Assert.notNull(main, "'main' must not be null");
return new Augmented(main, Collections.emptySet(), Collections.emptySet());
}
|
Create an application from an existing {@code main} method that can run with
additional {@code @Configuration} or bean classes. This method can be helpful when
writing a test harness that needs to start an application with additional
configuration.
@param main the main method entry point that runs the {@link SpringApplication}
@return a {@link SpringApplication.Augmented} instance that can be used to add
configuration and run the application
@since 3.1.0
@see #withHook(SpringApplicationHook, Runnable)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 1,432
|
[
"main"
] | true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
shouldVisitNode
|
function shouldVisitNode(node: Node): boolean {
return (node.transformFlags & TransformFlags.ContainsES2015) !== 0
|| convertedLoopState !== undefined
|| (hierarchyFacts & HierarchyFacts.ConstructorWithSuperCall && isOrMayContainReturnCompletion(node))
|| (isIterationStatement(node, /*lookInLabeledStatements*/ false) && shouldConvertIterationStatement(node))
|| (getInternalEmitFlags(node) & InternalEmitFlags.TypeScriptClassWrapper) !== 0;
}
|
Restores the `HierarchyFacts` for this node's ancestor after visiting this node's
subtree, propagating specific facts from the subtree.
@param ancestorFacts The `HierarchyFacts` of the ancestor to restore after visiting the subtree.
@param excludeFacts The existing `HierarchyFacts` of the subtree that should not be propagated.
@param includeFacts The new `HierarchyFacts` of the subtree that should be propagated.
|
typescript
|
src/compiler/transformers/es2015.ts
| 593
|
[
"node"
] | true
| 7
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
nodes
|
public Map<Integer, Node> nodes() {
return nodes;
}
|
@return The voter nodes in the Raft cluster, or an empty map if KIP-853 is not enabled.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/QuorumInfo.java
| 76
|
[] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
split_and_set_ranges
|
def split_and_set_ranges(
self, lengths: Sequence[Sequence[sympy.Expr]]
) -> list[list[sympy.Expr]]:
"""
Split and set iteration ranges for the kernel based on the provided lengths.
This method maps the kernel's tiling structure to the node's iteration space,
handling both pointwise and reduction dimensions appropriately.
Args:
lengths: A sequence of sequences of symbolic expressions representing
the sizes of different dimensions for each node.
Returns:
A list of lists of symbolic expressions representing the mapped
iteration variables for each dimension.
"""
# Create a dictionary mapping each range tree prefix to its total number of elements
tiling = {rt.prefix: rt.numel for rt in self.range_trees}
# If we're not inside a reduction loop, set all reduction dimensions to 1
# This effectively disables reduction dimensions when not needed
if not self.inside_reduction:
for prefix in tiling:
if prefix_is_reduction(prefix):
tiling[prefix] = sympy.S.One
# Extract the values from the tiling dictionary to create groups
groups = [*tiling.values()]
# Map the kernel's group structure to the node's sizes and set the ranges
# using the set_ranges method, returning the resulting iteration variables
return self.map_kernel_groups_to_node_sizes(groups, lengths, self.set_ranges)
|
Split and set iteration ranges for the kernel based on the provided lengths.
This method maps the kernel's tiling structure to the node's iteration space,
handling both pointwise and reduction dimensions appropriately.
Args:
lengths: A sequence of sequences of symbolic expressions representing
the sizes of different dimensions for each node.
Returns:
A list of lists of symbolic expressions representing the mapped
iteration variables for each dimension.
|
python
|
torch/_inductor/codegen/simd.py
| 868
|
[
"self",
"lengths"
] |
list[list[sympy.Expr]]
| true
| 4
| 7.92
|
pytorch/pytorch
| 96,034
|
google
| false
|
trimmedEndIndex
|
function trimmedEndIndex(string) {
var index = string.length;
while (index-- && reWhitespace.test(string.charAt(index))) {}
return index;
}
|
Used by `_.trim` and `_.trimEnd` to get the index of the last non-whitespace
character of `string`.
@private
@param {string} string The string to inspect.
@returns {number} Returns the index of the last non-whitespace character.
|
javascript
|
lodash.js
| 1,364
|
[
"string"
] | false
| 3
| 6.08
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
getResult
|
public static <T> T getResult(Future<T> future) {
try {
return future.get();
} catch (ExecutionException e) {
if (e.getCause() instanceof IllegalStateException)
throw (IllegalStateException) e.getCause();
throw maybeWrapAsKafkaException(e.getCause());
} catch (InterruptedException e) {
throw new InterruptException(e);
}
}
|
Update subscription state and metadata using the provided committed offsets:
<li>Update partition offsets with the committed offsets</li>
<li>Update the metadata with any newer leader epoch discovered in the committed offsets
metadata</li>
</p>
This will ignore any partition included in the <code>offsetsAndMetadata</code> parameter that
may no longer be assigned.
@param offsetsAndMetadata Committed offsets and metadata to be used for updating the
subscription state and metadata object.
@param metadata Metadata object to update with a new leader epoch if discovered in the
committed offsets' metadata.
@param subscriptions Subscription state to update, setting partitions' offsets to the
committed offsets.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java
| 237
|
[
"future"
] |
T
| true
| 4
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
check_for_prefix_async
|
async def check_for_prefix_async(
self,
client: AioBaseClient,
prefix: str,
delimiter: str,
bucket_name: str | None = None,
) -> bool:
"""
Check that a prefix exists in a bucket.
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:return: False if the prefix does not exist in the bucket and True if it does.
"""
if not prefix.endswith(delimiter):
prefix += delimiter
prefix_split = re.split(rf"(\w+[{delimiter}])$", prefix, 1)
previous_level = prefix_split[0]
plist = await self.list_prefixes_async(client, bucket_name, previous_level, delimiter)
return prefix in plist
|
Check that a prefix exists in a bucket.
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:return: False if the prefix does not exist in the bucket and True if it does.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 596
|
[
"self",
"client",
"prefix",
"delimiter",
"bucket_name"
] |
bool
| true
| 2
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
values
|
@Override
public ImmutableCollection<V> values() {
ImmutableCollection<V> result = values;
return (result == null) ? values = createValues() : result;
}
|
Returns an immutable collection of the values in this map, in the same order that they appear
in {@link #entrySet}.
|
java
|
android/guava/src/com/google/common/collect/ImmutableMap.java
| 984
|
[] | true
| 2
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
initials
|
public static String initials(final String str, final char... delimiters) {
if (StringUtils.isEmpty(str)) {
return str;
}
if (delimiters != null && delimiters.length == 0) {
return StringUtils.EMPTY;
}
final int strLen = str.length();
final char[] buf = new char[strLen / 2 + 1];
int count = 0;
boolean lastWasGap = true;
for (int i = 0; i < strLen; i++) {
final char ch = str.charAt(i);
if (isDelimiter(ch, delimiters)) {
lastWasGap = true;
continue; // ignore ch
}
if (lastWasGap) {
buf[count++] = ch;
lastWasGap = false;
}
}
return new String(buf, 0, count);
}
|
Extracts the initial characters from each word in the String.
<p>All first characters after the defined delimiters are returned as a new string.
Their case is not changed.</p>
<p>If the delimiters array is null, then Whitespace is used.
Whitespace is defined by {@link Character#isWhitespace(char)}.
A {@code null} input String returns {@code null}.
An empty delimiter array returns an empty String.</p>
<pre>
WordUtils.initials(null, *) = null
WordUtils.initials("", *) = ""
WordUtils.initials("Ben John Lee", null) = "BJL"
WordUtils.initials("Ben J.Lee", null) = "BJ"
WordUtils.initials("Ben J.Lee", [' ','.']) = "BJL"
WordUtils.initials(*, new char[0]) = ""
</pre>
@param str the String to get initials from, may be null.
@param delimiters set of characters to determine words, null means whitespace.
@return String of initial characters, {@code null} if null String input.
@see #initials(String)
@since 2.2
|
java
|
src/main/java/org/apache/commons/lang3/text/WordUtils.java
| 260
|
[
"str"
] |
String
| true
| 7
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
escape
|
protected abstract char @Nullable [] escape(char c);
|
Returns the escaped form of the given character, or {@code null} if this character does not
need to be escaped. If an empty array is returned, this effectively strips the input character
from the resulting text.
<p>If the character does not need to be escaped, this method should return {@code null}, rather
than a one-character array containing the character itself. This enables the escaping algorithm
to perform more efficiently.
<p>An escaper is expected to be able to deal with any {@code char} value, so this method should
not throw any exceptions.
@param c the character to escape if necessary
@return the replacement characters, or {@code null} if no escaping was needed
|
java
|
android/guava/src/com/google/common/escape/CharEscaper.java
| 83
|
[
"c"
] | true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
|
assert_all_finite
|
def assert_all_finite(
X,
*,
allow_nan=False,
estimator_name=None,
input_name="",
):
"""Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : {ndarray, sparse matrix}
The input data.
allow_nan : bool, default=False
If True, do not throw error when `X` contains NaN.
estimator_name : str, default=None
The estimator name, used to construct the error message.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
Examples
--------
>>> from sklearn.utils import assert_all_finite
>>> import numpy as np
>>> array = np.array([1, np.inf, np.nan, 4])
>>> try:
... assert_all_finite(array)
... print("Test passed: Array contains only finite values.")
... except ValueError:
... print("Test failed: Array contains non-finite values.")
Test failed: Array contains non-finite values.
"""
_assert_all_finite(
X.data if sp.issparse(X) else X,
allow_nan=allow_nan,
estimator_name=estimator_name,
input_name=input_name,
)
|
Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : {ndarray, sparse matrix}
The input data.
allow_nan : bool, default=False
If True, do not throw error when `X` contains NaN.
estimator_name : str, default=None
The estimator name, used to construct the error message.
input_name : str, default=""
The data name used to construct the error message. In particular
if `input_name` is "X" and the data has NaN values and
allow_nan is False, the error message will link to the imputer
documentation.
Examples
--------
>>> from sklearn.utils import assert_all_finite
>>> import numpy as np
>>> array = np.array([1, np.inf, np.nan, 4])
>>> try:
... assert_all_finite(array)
... print("Test passed: Array contains only finite values.")
... except ValueError:
... print("Test failed: Array contains non-finite values.")
Test failed: Array contains non-finite values.
|
python
|
sklearn/utils/validation.py
| 185
|
[
"X",
"allow_nan",
"estimator_name",
"input_name"
] | false
| 2
| 7.52
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
generate
|
def generate( # type: ignore[override]
self,
**kwargs,
) -> ROCmTemplateCaller:
"""
Generates the ROCm template caller object for the given GEMM template and operation. This ROCmTemplateCaller
may be used to call and benchmark the generated ROCm kernel in a standalone manner to enable Autotuning.
Args:
kwargs: Additional keyword arguments.
Returns:
A ROCmTemplateCaller object representing the generated ROCm template caller.
"""
kernel_name = f"rocm_{self.name}"
kernel_hash_name = f"rocm_{self.name}_{next(self.index_counter)}"
with (
patch.object(V.graph, "get_dtype", self._fake_get_dtype(self.output_node)),
ROCmTemplateKernel(
kernel_name=kernel_name,
runtime_arg_info=self.get_runtime_arg_info(),
runtime_arg_values=self.get_runtime_arg_values(**kwargs),
) as kernel,
):
code = self.render(kernel=kernel, **kwargs)
_, call_args, _, _ = kernel.args.python_argdefs()
log.debug("Autotune key: %s, Generated Code:\n%s", kernel_hash_name, code)
log.debug(
"Args: cpp_argdefs: %s, python_argdefs: %s",
kernel.args.cpp_argdefs(DTYPE_TO_ROCM_TYPE),
kernel.args.python_argdefs(),
)
input_reorder = (
self.input_reorder
if self.input_reorder is not None
else list(range(len(self.input_nodes)))
)
expected_args = list(
unique(self.input_nodes[idx].get_name() for idx in input_reorder)
)
expected_args.extend([self.output_node.get_name()])
assert list(call_args)[: len(expected_args)] == expected_args, (
call_args,
expected_args,
)
size_args = (
self.size_args() if hasattr(self, "size_args") else ()
) # subclass should define def size_args()
size_args_ints = [
V.graph.sizevars.size_hint(arg) for arg in size_args
] # resolve to ints for benchmarking
# The runtime args come right after the size args
runtime_args = self.get_runtime_arg_values(**kwargs)
extra_args = size_args_ints + runtime_args
bmreq = ROCmBenchmarkRequest(
kernel_name=kernel_name,
input_tensor_meta=TensorMeta.from_irnodes(self.input_nodes),
output_tensor_meta=TensorMeta.from_irnodes(self.output_node),
extra_args=extra_args,
source_code=code,
)
def make_kernel_render(
template_node: ROCmTemplateBuffer,
epilogue_nodes: Optional[Sequence[IRNode]] = None,
):
kernel = ROCmTemplateKernel(
kernel_name="KERNEL_NAME",
runtime_arg_info=self.get_runtime_arg_info(),
runtime_arg_values=self.get_runtime_arg_values(**kwargs),
)
render = functools.partial(
self.render,
kernel=kernel,
template_buffer_node=template_node,
epilogue_nodes=epilogue_nodes,
**kwargs, # includes "op" argument in case of CUTLASSGemmTemplate
)
return kernel, render
return ROCmTemplateCaller(
kernel_hash_name,
self.name,
self.input_nodes,
self.output_node.get_layout(),
make_kernel_render,
bmreq,
self,
kwargs,
)
|
Generates the ROCm template caller object for the given GEMM template and operation. This ROCmTemplateCaller
may be used to call and benchmark the generated ROCm kernel in a standalone manner to enable Autotuning.
Args:
kwargs: Additional keyword arguments.
Returns:
A ROCmTemplateCaller object representing the generated ROCm template caller.
|
python
|
torch/_inductor/codegen/rocm/rocm_template.py
| 59
|
[
"self"
] |
ROCmTemplateCaller
| true
| 3
| 7.52
|
pytorch/pytorch
| 96,034
|
google
| false
|
loadBeanDefinitions
|
public int loadBeanDefinitions(Resource resource, @Nullable String prefix) throws BeanDefinitionStoreException {
return loadBeanDefinitions(new EncodedResource(resource), prefix);
}
|
Load bean definitions from the specified properties file.
@param resource the resource descriptor for the properties file
@param prefix a filter within the keys in the map: for example, 'beans.'
(can be empty or {@code null})
@return the number of bean definitions found
@throws BeanDefinitionStoreException in case of loading or parsing errors
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/PropertiesBeanDefinitionReader.java
| 226
|
[
"resource",
"prefix"
] | true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
setupStacktracePrinterOnSigint
|
function setupStacktracePrinterOnSigint() {
if (!getOptionValue('--trace-sigint')) {
return;
}
require('internal/util/trace_sigint').setTraceSigInt(true);
}
|
Patch the process object with legacy properties and normalizations.
Replace `process.argv[0]` with `process.execPath`, preserving the original `argv[0]` value as `process.argv0`.
Replace `process.argv[1]` with the resolved absolute file path of the entry point, if found.
@param {boolean} expandArgv1 - Whether to replace `process.argv[1]` with the resolved absolute file path of
the main entry point.
@returns {string}
|
javascript
|
lib/internal/process/pre_execution.js
| 425
|
[] | false
| 2
| 6.8
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
doStart
|
@Override
protected void doStart() {
if (enabled) {
this.services = createApmServices();
}
}
|
This class is used to make all OpenTelemetry services visible at once
|
java
|
modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java
| 126
|
[] |
void
| true
| 2
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
pop
|
def pop(self, item: Hashable) -> Series:
"""
Return item and drop it from DataFrame. Raise KeyError if not found.
Parameters
----------
item : label
Label of column to be popped.
Returns
-------
Series
Series representing the item that is dropped.
See Also
--------
DataFrame.drop: Drop specified labels from rows or columns.
DataFrame.drop_duplicates: Return DataFrame with duplicate rows removed.
Examples
--------
>>> df = pd.DataFrame(
... [
... ("falcon", "bird", 389.0),
... ("parrot", "bird", 24.0),
... ("lion", "mammal", 80.5),
... ("monkey", "mammal", np.nan),
... ],
... columns=("name", "class", "max_speed"),
... )
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop("class")
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
return super().pop(item=item)
|
Return item and drop it from DataFrame. Raise KeyError if not found.
Parameters
----------
item : label
Label of column to be popped.
Returns
-------
Series
Series representing the item that is dropped.
See Also
--------
DataFrame.drop: Drop specified labels from rows or columns.
DataFrame.drop_duplicates: Return DataFrame with duplicate rows removed.
Examples
--------
>>> df = pd.DataFrame(
... [
... ("falcon", "bird", 389.0),
... ("parrot", "bird", 24.0),
... ("lion", "mammal", 80.5),
... ("monkey", "mammal", np.nan),
... ],
... columns=("name", "class", "max_speed"),
... )
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop("class")
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
|
python
|
pandas/core/frame.py
| 6,289
|
[
"self",
"item"
] |
Series
| true
| 1
| 7.28
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.