function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
wrap
|
public final TypeToken<T> wrap() {
if (isPrimitive()) {
@SuppressWarnings("unchecked") // this is a primitive class
Class<T> type = (Class<T>) runtimeType;
return of(Primitives.wrap(type));
}
return this;
}
|
Returns the corresponding wrapper type if this is a primitive type; otherwise returns {@code
this} itself. Idempotent.
@since 15.0
|
java
|
android/guava/src/com/google/common/reflect/TypeToken.java
| 553
|
[] | true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
getmask
|
def getmask(a):
"""
Return the mask of a masked array, or nomask.
Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the
mask is not `nomask`, else return `nomask`. To guarantee a full array
of booleans of the same shape as a, use `getmaskarray`.
Parameters
----------
a : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getdata : Return the data of a masked array as an ndarray.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getmask(a)
array([[False, True],
[False, False]])
Equivalently use the `MaskedArray` `mask` attribute.
>>> a.mask
array([[False, True],
[False, False]])
Result when mask == `nomask`
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(
data=[[1, 2],
[3, 4]],
mask=False,
fill_value=999999)
>>> ma.nomask
False
>>> ma.getmask(b) == ma.nomask
True
>>> b.mask == ma.nomask
True
"""
return getattr(a, '_mask', nomask)
|
Return the mask of a masked array, or nomask.
Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the
mask is not `nomask`, else return `nomask`. To guarantee a full array
of booleans of the same shape as a, use `getmaskarray`.
Parameters
----------
a : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getdata : Return the data of a masked array as an ndarray.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getmask(a)
array([[False, True],
[False, False]])
Equivalently use the `MaskedArray` `mask` attribute.
>>> a.mask
array([[False, True],
[False, False]])
Result when mask == `nomask`
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(
data=[[1, 2],
[3, 4]],
mask=False,
fill_value=999999)
>>> ma.nomask
False
>>> ma.getmask(b) == ma.nomask
True
>>> b.mask == ma.nomask
True
|
python
|
numpy/ma/core.py
| 1,400
|
[
"a"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
value
|
public JSONStringer value(Object value) throws JSONException {
if (this.stack.isEmpty()) {
throw new JSONException("Nesting problem");
}
if (value instanceof JSONArray) {
((JSONArray) value).writeTo(this);
return this;
}
else if (value instanceof JSONObject) {
((JSONObject) value).writeTo(this);
return this;
}
beforeValue();
if (value == null || value instanceof Boolean || value == JSONObject.NULL) {
this.out.append(value);
}
else if (value instanceof Number) {
this.out.append(JSONObject.numberToString((Number) value));
}
else {
string(value.toString());
}
return this;
}
|
Encodes {@code value}.
@param value a {@link JSONObject}, {@link JSONArray}, String, Boolean, Integer,
Long, Double or null. May not be {@link Double#isNaN() NaNs} or
{@link Double#isInfinite() infinities}.
@return this stringer.
@throws JSONException if processing of json failed
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONStringer.java
| 236
|
[
"value"
] |
JSONStringer
| true
| 8
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
expandtabs
|
def expandtabs(a, tabsize=8):
"""
Return a copy of each string element where all tab characters are
replaced by one or more spaces.
Calls :meth:`str.expandtabs` element-wise.
Return a copy of each string element where all tab characters are
replaced by one or more spaces, depending on the current column
and the given `tabsize`. The column number is reset to zero after
each newline occurring in the string. This doesn't understand other
non-printing characters or escape sequences.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Input array
tabsize : int, optional
Replace tabs with `tabsize` number of spaces. If not given defaults
to 8 spaces.
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input type
See Also
--------
str.expandtabs
Examples
--------
>>> import numpy as np
>>> a = np.array(['\t\tHello\tworld'])
>>> np.strings.expandtabs(a, tabsize=4) # doctest: +SKIP
array([' Hello world'], dtype='<U21') # doctest: +SKIP
"""
a = np.asanyarray(a)
tabsize = np.asanyarray(tabsize)
if a.dtype.char == "T":
return _expandtabs(a, tabsize)
buffersizes = _expandtabs_length(a, tabsize)
out_dtype = f"{a.dtype.char}{buffersizes.max()}"
out = np.empty_like(a, shape=buffersizes.shape, dtype=out_dtype)
return _expandtabs(a, tabsize, out=out)
|
Return a copy of each string element where all tab characters are
replaced by one or more spaces.
Calls :meth:`str.expandtabs` element-wise.
Return a copy of each string element where all tab characters are
replaced by one or more spaces, depending on the current column
and the given `tabsize`. The column number is reset to zero after
each newline occurring in the string. This doesn't understand other
non-printing characters or escape sequences.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Input array
tabsize : int, optional
Replace tabs with `tabsize` number of spaces. If not given defaults
to 8 spaces.
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input type
See Also
--------
str.expandtabs
Examples
--------
>>> import numpy as np
>>> a = np.array(['\t\tHello\tworld'])
>>> np.strings.expandtabs(a, tabsize=4) # doctest: +SKIP
array([' Hello world'], dtype='<U21') # doctest: +SKIP
|
python
|
numpy/_core/strings.py
| 636
|
[
"a",
"tabsize"
] | false
| 2
| 7.52
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
get
|
public static List<String> get(BeanFactory beanFactory) {
try {
return beanFactory.getBean(BEAN, BasePackages.class).get();
}
catch (NoSuchBeanDefinitionException ex) {
throw new IllegalStateException("Unable to retrieve @EnableAutoConfiguration base packages");
}
}
|
Return the auto-configuration base packages for the given bean factory.
@param beanFactory the source bean factory
@return a list of auto-configuration packages
@throws IllegalStateException if auto-configuration is not enabled
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationPackages.java
| 75
|
[
"beanFactory"
] | true
| 2
| 7.12
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
_is_level_reference
|
def _is_level_reference(self, key: Level, axis: Axis = 0) -> bool:
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : Hashable
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis_int = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis_int].names
and not self._is_label_reference(key, axis=axis_int)
)
|
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : Hashable
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
|
python
|
pandas/core/generic.py
| 1,609
|
[
"self",
"key",
"axis"
] |
bool
| true
| 4
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
full
|
def full(shape, fill_value, dtype=None, order='C', *, device=None, like=None):
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or array_like
Fill value.
dtype : data-type, optional
The desired data-type for the array The default, None, means
``np.array(fill_value).dtype``.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
device : str, optional
The device on which to place the created array. Default: None.
For Array-API interoperability only, so must be ``"cpu"`` if passed.
.. versionadded:: 2.0.0
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
See Also
--------
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> import numpy as np
>>> np.full((2, 2), np.inf)
array([[inf, inf],
[inf, inf]])
>>> np.full((2, 2), 10)
array([[10, 10],
[10, 10]])
>>> np.full((2, 2), [1, 2])
array([[1, 2],
[1, 2]])
"""
if like is not None:
return _full_with_like(
like, shape, fill_value, dtype=dtype, order=order, device=device
)
if dtype is None:
fill_value = asarray(fill_value)
dtype = fill_value.dtype
a = empty(shape, dtype, order, device=device)
multiarray.copyto(a, fill_value, casting='unsafe')
return a
|
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or array_like
Fill value.
dtype : data-type, optional
The desired data-type for the array The default, None, means
``np.array(fill_value).dtype``.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
device : str, optional
The device on which to place the created array. Default: None.
For Array-API interoperability only, so must be ``"cpu"`` if passed.
.. versionadded:: 2.0.0
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
See Also
--------
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> import numpy as np
>>> np.full((2, 2), np.inf)
array([[inf, inf],
[inf, inf]])
>>> np.full((2, 2), 10)
array([[10, 10],
[10, 10]])
>>> np.full((2, 2), [1, 2])
array([[1, 2],
[1, 2]])
|
python
|
numpy/_core/numeric.py
| 325
|
[
"shape",
"fill_value",
"dtype",
"order",
"device",
"like"
] | false
| 3
| 7.44
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
fit
|
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Fitted estimator.
"""
_raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight)
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc"],
ensure_2d=True,
allow_nd=True,
dtype=None,
y_numeric=is_regressor(self),
)
sample_weight = _check_sample_weight(
sample_weight, X, dtype=np.float64, copy=True, ensure_non_negative=True
)
sample_weight /= sample_weight.sum()
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
# Initialization of the random number instance that will be used to
# generate a seed at each iteration
random_state = check_random_state(self.random_state)
epsilon = np.finfo(sample_weight.dtype).eps
zero_weight_mask = sample_weight == 0.0
for iboost in range(self.n_estimators):
# avoid extremely small sample weight, for details see issue #20320
sample_weight = np.clip(sample_weight, a_min=epsilon, a_max=None)
# do not clip sample weights that were exactly zero originally
sample_weight[zero_weight_mask] = 0.0
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost, X, y, sample_weight, random_state
)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
if not np.isfinite(sample_weight_sum):
warnings.warn(
(
"Sample weights have reached infinite values,"
f" at iteration {iboost}, causing overflow. "
"Iterations stopped. Try lowering the learning rate."
),
stacklevel=2,
)
break
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
|
Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Fitted estimator.
|
python
|
sklearn/ensemble/_weight_boosting.py
| 108
|
[
"self",
"X",
"y",
"sample_weight"
] | false
| 7
| 6.16
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
from_key_val_list
|
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
Traceback (most recent call last):
...
ValueError: cannot encode objects that are not 2-tuples
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError("cannot encode objects that are not 2-tuples")
return OrderedDict(value)
|
Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
Traceback (most recent call last):
...
ValueError: cannot encode objects that are not 2-tuples
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
|
python
|
src/requests/utils.py
| 308
|
[
"value"
] | false
| 3
| 7.2
|
psf/requests
| 53,586
|
unknown
| false
|
|
poll
|
public void poll(RequestFuture<?> future) {
while (!future.isDone())
poll(time.timer(Long.MAX_VALUE), future);
}
|
Block indefinitely until the given request future has finished.
@param future The request future to await.
@throws WakeupException if {@link #wakeup()} is called from another thread
@throws InterruptException if the calling thread is interrupted
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
| 202
|
[
"future"
] |
void
| true
| 2
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
lookup
|
protected abstract @Nullable Object lookup(Object key);
|
Perform an actual lookup in the underlying store.
@param key the key whose associated value is to be returned
@return the raw store value for the key, or {@code null} if none
|
java
|
spring-context/src/main/java/org/springframework/cache/support/AbstractValueAdaptingCache.java
| 77
|
[
"key"
] |
Object
| true
| 1
| 6.8
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getValueAsText
|
@Override
public Text getValueAsText() throws IOException {
// _tokenIncomplete is true when UTF8StreamJsonParser has already processed this value.
if (_currToken == JsonToken.VALUE_STRING && _tokenIncomplete) {
if (lastOptimisedValue != null) {
return new Text(new XContentString.UTF8Bytes(lastOptimisedValue), stringLength);
}
if (stringEnd > 0) {
final int len = stringEnd - 1 - _inputPtr;
return new Text(new XContentString.UTF8Bytes(_inputBuffer, _inputPtr, len), stringLength);
}
return _finishAndReturnText();
}
return null;
}
|
Method that will try to get underlying UTF-8 encoded bytes of the current string token.
This is only a best-effort attempt; if there is some reason the bytes cannot be retrieved, this method will return null.
|
java
|
libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/ESUTF8StreamJsonParser.java
| 60
|
[] |
Text
| true
| 5
| 7.2
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
parse
|
@Override
public Duration parse(String text, Locale locale) throws ParseException {
if (this.defaultUnit == null) {
//delegate to the style
return DurationFormatterUtils.parse(text, this.style);
}
return DurationFormatterUtils.parse(text, this.style, this.defaultUnit);
}
|
Create a {@code DurationFormatter} in a specific {@link DurationFormat.Style} with an
optional {@code DurationFormat.Unit}.
<p>If a {@code defaultUnit} is specified, it may be used in parsing cases when no
unit is present in the string (provided the style allows for such a case). It will
also be used as the representation's resolution when printing in the
{@link DurationFormat.Style#SIMPLE} style. Otherwise, the style defines its default
unit.
@param style the {@code DurationStyle} to use
@param defaultUnit the {@code DurationFormat.Unit} to fall back to when parsing and printing
|
java
|
spring-context/src/main/java/org/springframework/format/datetime/standard/DurationFormatter.java
| 78
|
[
"text",
"locale"
] |
Duration
| true
| 2
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
build_all
|
def build_all() -> None:
"""
Build mkdocs site for en, and then build each language inside, end result is located
at directory ./site/ with each language inside.
"""
update_languages()
shutil.rmtree(site_path, ignore_errors=True)
langs = [
lang.name
for lang in get_lang_paths()
if (lang.is_dir() and lang.name in SUPPORTED_LANGS)
]
cpu_count = os.cpu_count() or 1
process_pool_size = cpu_count * 4
typer.echo(f"Using process pool size: {process_pool_size}")
with Pool(process_pool_size) as p:
p.map(build_lang, langs)
|
Build mkdocs site for en, and then build each language inside, end result is located
at directory ./site/ with each language inside.
|
python
|
scripts/docs.py
| 259
|
[] |
None
| true
| 3
| 7.04
|
tiangolo/fastapi
| 93,264
|
unknown
| false
|
registerShutdownHook
|
@Override
public void registerShutdownHook() {
if (this.shutdownHook == null) {
// No shutdown hook registered yet.
this.shutdownHook = new Thread(SHUTDOWN_HOOK_THREAD_NAME) {
@Override
public void run() {
if (isStartupShutdownThreadStuck()) {
active.set(false);
return;
}
startupShutdownLock.lock();
try {
doClose();
}
finally {
startupShutdownLock.unlock();
}
}
};
Runtime.getRuntime().addShutdownHook(this.shutdownHook);
}
}
|
Register a shutdown hook {@linkplain Thread#getName() named}
{@code SpringContextShutdownHook} with the JVM runtime, closing this
context on JVM shutdown unless it has already been closed at that time.
<p>Delegates to {@code doClose()} for the actual closing procedure.
@see Runtime#addShutdownHook
@see ConfigurableApplicationContext#SHUTDOWN_HOOK_THREAD_NAME
@see #close()
@see #doClose()
|
java
|
spring-context/src/main/java/org/springframework/context/support/AbstractApplicationContext.java
| 1,066
|
[] |
void
| true
| 3
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
applyNonNull
|
public static <T, R> R applyNonNull(final T value, final Function<? super T, ? extends R> mapper) {
return value != null ? Objects.requireNonNull(mapper, "mapper").apply(value) : null;
}
|
Applies a value to a function if the value isn't {@code null}, otherwise the method returns {@code null}. If the value isn't {@code null} then return the
result of the applying function.
<pre>{@code
Functions.applyNonNull("a", String::toUpperCase) = "A"
Functions.applyNonNull(null, String::toUpperCase) = null
Functions.applyNonNull("a", s -> null) = null
}</pre>
<p>
Useful when working with expressions that may return {@code null} as it allows a single-line expression without using temporary local variables or
evaluating expressions twice. Provides an alternative to using {@link Optional} that is shorter and has less allocation.
</p>
@param <T> The type of the input of this method and the function.
@param <R> The type of the result of the function and this method.
@param value The value to apply the function to, may be {@code null}.
@param mapper The function to apply, must not be {@code null}.
@return The result of the function (which may be {@code null}) or {@code null} if the input value is {@code null}.
@see #applyNonNull(Object, Function, Function)
@see #applyNonNull(Object, Function, Function, Function)
@since 3.19.0
|
java
|
src/main/java/org/apache/commons/lang3/function/Functions.java
| 68
|
[
"value",
"mapper"
] |
R
| true
| 2
| 7.84
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
removeMetric
|
public synchronized KafkaMetric removeMetric(MetricName metricName) {
KafkaMetric metric = this.metrics.remove(metricName);
if (metric != null) {
for (MetricsReporter reporter : reporters) {
try {
reporter.metricRemoval(metric);
} catch (Exception e) {
log.error("Error when removing metric from {}", reporter.getClass().getName(), e);
}
}
log.trace("Removed metric named {}", metricName);
}
return metric;
}
|
Remove a metric if it exists and return it. Return null otherwise. If a metric is removed, `metricRemoval`
will be invoked for each reporter.
@param metricName The name of the metric
@return the removed `KafkaMetric` or null if no such metric exists
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java
| 549
|
[
"metricName"
] |
KafkaMetric
| true
| 3
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
sort
|
public static int[] sort(final int[] array) {
if (array != null) {
Arrays.sort(array);
}
return array;
}
|
Sorts the given array into ascending order and returns it.
@param array the array to sort (may be null).
@return the given array.
@see Arrays#sort(int[])
|
java
|
src/main/java/org/apache/commons/lang3/ArraySorter.java
| 93
|
[
"array"
] | true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
rename_fields
|
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> import numpy as np
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],
dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names is not None:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
|
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> import numpy as np
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],
dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])])
|
python
|
numpy/lib/recfunctions.py
| 608
|
[
"base",
"namemapper"
] | false
| 4
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
create_training_job
|
def create_training_job(
self,
config: dict,
wait_for_completion: bool = True,
print_log: bool = True,
check_interval: int = 30,
max_ingestion_time: int | None = None,
):
"""
Start a model training job.
After training completes, Amazon SageMaker saves the resulting model
artifacts to an Amazon S3 location that you specify.
:param config: the config for training
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to training job creation
"""
self.check_training_config(config)
response = self.get_conn().create_training_job(**config)
if print_log:
self.check_training_status_with_log(
config["TrainingJobName"],
self.non_terminal_states,
self.training_failed_states,
wait_for_completion,
check_interval,
max_ingestion_time,
)
elif wait_for_completion:
describe_response = self.check_status(
config["TrainingJobName"],
"TrainingJobStatus",
self.describe_training_job,
check_interval,
max_ingestion_time,
)
billable_seconds = SageMakerHook.count_billable_seconds(
training_start_time=describe_response["TrainingStartTime"],
training_end_time=describe_response["TrainingEndTime"],
instance_count=describe_response["ResourceConfig"]["InstanceCount"],
)
self.log.info("Billable seconds: %d", billable_seconds)
return response
|
Start a model training job.
After training completes, Amazon SageMaker saves the resulting model
artifacts to an Amazon S3 location that you specify.
:param config: the config for training
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to training job creation
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
| 289
|
[
"self",
"config",
"wait_for_completion",
"print_log",
"check_interval",
"max_ingestion_time"
] | true
| 3
| 7.92
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
length
|
@Override
public int length() {
return size;
}
|
Gets the length of the string builder.
@return the length
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 2,435
|
[] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
pollResponseReceivedDuringReauthentication
|
public Optional<NetworkReceive> pollResponseReceivedDuringReauthentication() {
return authenticator.pollResponseReceivedDuringReauthentication();
}
|
Return the (always non-null but possibly empty) client-side
{@link NetworkReceive} response that arrived during re-authentication but
is unrelated to re-authentication. This corresponds to a request sent
prior to the beginning of re-authentication; the request was made when the
channel was successfully authenticated, and the response arrived during the
re-authentication process.
@return client-side {@link NetworkReceive} response that arrived during
re-authentication that is unrelated to re-authentication. This may
be empty.
|
java
|
clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java
| 658
|
[] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getKeyAlgorithmIdentifier
|
private static String getKeyAlgorithmIdentifier(byte[] keyBytes) throws IOException, GeneralSecurityException {
DerParser parser = new DerParser(keyBytes);
DerParser.Asn1Object sequence = parser.readAsn1Object();
parser = sequence.getParser();
parser.readAsn1Object().getInteger(); // version
DerParser.Asn1Object algSequence = parser.readAsn1Object();
parser = algSequence.getParser();
String oidString = parser.readAsn1Object().getOid();
return switch (oidString) {
case "1.2.840.10040.4.1" -> "DSA";
case "1.2.840.113549.1.1.1" -> "RSA";
case "1.2.840.10045.2.1" -> "EC";
default -> throw new GeneralSecurityException(
"Error parsing key algorithm identifier. Algorithm with OID [" + oidString + "] is not supported"
);
};
}
|
Parses a DER encoded private key and reads its algorithm identifier Object OID.
@param keyBytes the private key raw bytes
@return A string identifier for the key algorithm (RSA, DSA, or EC)
@throws GeneralSecurityException if the algorithm oid that is parsed from ASN.1 is unknown
@throws IOException if the DER encoded key can't be parsed
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java
| 667
|
[
"keyBytes"
] |
String
| true
| 1
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
chunks
|
def chunks(it, n):
"""Split an iterator into chunks with `n` elements each.
Warning:
``it`` must be an actual iterator, if you pass this a
concrete sequence will get you repeating elements.
So ``chunks(iter(range(1000)), 10)`` is fine, but
``chunks(range(1000), 10)`` is not.
Example:
# n == 2
>>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2)
>>> list(x)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]]
# n == 3
>>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3)
>>> list(x)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]]
"""
for item in it:
yield [item] + list(islice(it, n - 1))
|
Split an iterator into chunks with `n` elements each.
Warning:
``it`` must be an actual iterator, if you pass this a
concrete sequence will get you repeating elements.
So ``chunks(iter(range(1000)), 10)`` is fine, but
``chunks(range(1000), 10)`` is not.
Example:
# n == 2
>>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2)
>>> list(x)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]]
# n == 3
>>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3)
>>> list(x)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]]
|
python
|
celery/utils/functional.py
| 112
|
[
"it",
"n"
] | false
| 2
| 6.64
|
celery/celery
| 27,741
|
unknown
| false
|
|
toString
|
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(getClass().getSimpleName());
sb.append('{');
String path = keystorePath;
if (path != null) {
sb.append("path=").append(path).append(", ");
}
sb.append("type=").append(type);
sb.append(", storePassword=").append(storePassword.length == 0 ? "<empty>" : "<non-empty>");
sb.append(", keyPassword=");
if (keyPassword.length == 0) {
sb.append("<empty>");
} else if (Arrays.equals(storePassword, keyPassword)) {
sb.append("<same-as-store-password>");
} else {
sb.append("<set>");
}
sb.append(", algorithm=").append(algorithm);
sb.append('}');
return sb.toString();
}
|
Verifies that the keystore contains at least 1 private key entry.
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoreKeyConfig.java
| 209
|
[] |
String
| true
| 5
| 7.04
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
topicIdValues
|
public Map<Uuid, KafkaFuture<TopicDescription>> topicIdValues() {
return topicIdFutures;
}
|
Use when {@link Admin#describeTopics(TopicCollection, DescribeTopicsOptions)} used a TopicIdCollection
@return a map from topic IDs to futures which can be used to check the status of
individual topics if the request used topic IDs, otherwise return null.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsResult.java
| 60
|
[] | true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
should_skip_the_package
|
def should_skip_the_package(provider_id: str, version_suffix: str) -> tuple[bool, str]:
"""Return True, version if the package should be skipped and False, good version suffix if not.
For RC and official releases we check if the "officially released" version exists
and skip the released if it was. This allows to skip packages that have not been
marked for release in this wave. For "dev" suffixes, we always build all packages.
A local version of an RC release will always be built.
"""
if version_suffix != "" and (
not version_suffix.startswith("rc") or is_local_package_version(version_suffix)
):
return False, version_suffix
if version_suffix == "":
current_tag = get_latest_provider_tag(provider_id, "")
if tag_exists_for_provider(provider_id, current_tag):
get_console().print(f"[warning]The 'final' tag {current_tag} exists. Skipping the package.[/]")
return True, version_suffix
return False, version_suffix
# version_suffix starts with "rc"
current_version = int(version_suffix[2:])
release_tag = get_latest_provider_tag(provider_id, "")
if tag_exists_for_provider(provider_id, release_tag):
get_console().print(f"[warning]The tag {release_tag} exists. Provider is released. Skipping it.[/]")
return True, version_suffix
while True:
current_tag = get_latest_provider_tag(provider_id, f"rc{current_version}")
if tag_exists_for_provider(provider_id, current_tag):
current_version += 1
get_console().print(f"[warning]The tag {current_tag} exists. Checking rc{current_version}.[/]")
else:
return False, f"rc{current_version}"
|
Return True, version if the package should be skipped and False, good version suffix if not.
For RC and official releases we check if the "officially released" version exists
and skip the released if it was. This allows to skip packages that have not been
marked for release in this wave. For "dev" suffixes, we always build all packages.
A local version of an RC release will always be built.
|
python
|
dev/breeze/src/airflow_breeze/prepare_providers/provider_distributions.py
| 51
|
[
"provider_id",
"version_suffix"
] |
tuple[bool, str]
| true
| 10
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
messageWithFallback
|
public String messageWithFallback() {
if (message == null)
return error.message();
return message;
}
|
If `message` is defined, return it. Otherwise fallback to the default error message associated with the error
code.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/ApiError.java
| 89
|
[] |
String
| true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
field
|
public XContentBuilder field(String name, Double value) throws IOException {
return (value == null) ? nullField(name) : field(name, value.doubleValue());
}
|
@return the value of the "human readable" flag. When the value is equal to true,
some types of values are written in a format easier to read for a human.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
| 468
|
[
"name",
"value"
] |
XContentBuilder
| true
| 2
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
cardinality
|
public int cardinality() {
return bitSet.cardinality();
}
|
Returns the number of bits set to {@code true} in this {@link BitSet}.
@return the number of bits set to {@code true} in this {@link BitSet}.
|
java
|
src/main/java/org/apache/commons/lang3/util/FluentBitSet.java
| 129
|
[] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
repeat
|
def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series:
"""
Repeat elements of a Series.
Returns a new Series where each element of the current Series
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
Series.
axis : None
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
Newly created Series with repeated elements.
See Also
--------
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
Examples
--------
>>> s = pd.Series(["a", "b", "c"])
>>> s
0 a
1 b
2 c
dtype: object
>>> s.repeat(2)
0 a
0 a
1 b
1 b
2 c
2 c
dtype: object
>>> s.repeat([1, 2, 3])
0 a
1 b
1 b
2 c
2 c
2 c
dtype: object
"""
nv.validate_repeat((), {"axis": axis})
new_index = self.index.repeat(repeats)
new_values = self._values.repeat(repeats)
return self._constructor(new_values, index=new_index, copy=False).__finalize__(
self, method="repeat"
)
|
Repeat elements of a Series.
Returns a new Series where each element of the current Series
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
Series.
axis : None
Unused. Parameter needed for compatibility with DataFrame.
Returns
-------
Series
Newly created Series with repeated elements.
See Also
--------
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
Examples
--------
>>> s = pd.Series(["a", "b", "c"])
>>> s
0 a
1 b
2 c
dtype: object
>>> s.repeat(2)
0 a
0 a
1 b
1 b
2 c
2 c
dtype: object
>>> s.repeat([1, 2, 3])
0 a
1 b
1 b
2 c
2 c
2 c
dtype: object
|
python
|
pandas/core/series.py
| 1,204
|
[
"self",
"repeats",
"axis"
] |
Series
| true
| 1
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
createHttpClient
|
private CloseableHttpAsyncClient createHttpClient() {
// default timeouts are all infinite
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom()
.setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
.setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS);
if (requestConfigCallback != null) {
requestConfigBuilder = requestConfigCallback.customizeRequestConfig(requestConfigBuilder);
}
try {
HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create()
.setDefaultRequestConfig(requestConfigBuilder.build())
// default settings for connection pooling may be too constraining
.setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE)
.setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL)
.setSSLContext(SSLContext.getDefault())
.setUserAgent(USER_AGENT_HEADER_VALUE)
.setTargetAuthenticationStrategy(new PersistentCredentialsAuthenticationStrategy())
.setThreadFactory(new RestClientThreadFactory());
if (httpClientConfigCallback != null) {
httpClientBuilder = httpClientConfigCallback.customizeHttpClient(httpClientBuilder);
}
final HttpAsyncClientBuilder finalBuilder = httpClientBuilder;
return AccessController.doPrivileged((PrivilegedAction<CloseableHttpAsyncClient>) finalBuilder::build);
} catch (NoSuchAlgorithmException e) {
throw new IllegalStateException("could not create the default ssl context", e);
}
}
|
Similar to {@code org.apache.http.impl.nio.reactor.AbstractMultiworkerIOReactor.DefaultThreadFactory} but with better thread names.
|
java
|
client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java
| 324
|
[] |
CloseableHttpAsyncClient
| true
| 4
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
setup
|
def setup(app):
"""
Sets the plugin up and returns configuration of the plugin.
:param app: application.
:return json description of the configuration that is needed by the plugin.
"""
directives.register_directive("exampleinclude", ExampleInclude)
app.connect("doctree-read", doctree_read)
app.add_config_value("exampleinclude_sourceroot", None, "env")
if not airflow_theme_is_available:
# Sphinx airflow theme has its own styles.
app.add_css_file("exampleinclude.css")
return {"version": "builtin", "parallel_read_safe": True, "parallel_write_safe": True}
|
Sets the plugin up and returns configuration of the plugin.
:param app: application.
:return json description of the configuration that is needed by the plugin.
|
python
|
devel-common/src/sphinx_exts/exampleinclude.py
| 263
|
[
"app"
] | false
| 2
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
comparator
|
public static Comparator<Class<?>> comparator() {
return COMPARATOR;
}
|
Gets the class comparator, comparing by class name.
@return the class comparator.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/ClassUtils.java
| 190
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
abortTransaction
|
default AbortTransactionResult abortTransaction(AbortTransactionSpec spec) {
return abortTransaction(spec, new AbortTransactionOptions());
}
|
Forcefully abort a transaction which is open on a topic partition. See
{@link #abortTransaction(AbortTransactionSpec, AbortTransactionOptions)} for more details.
@param spec The transaction specification including topic partition and producer details
@return The result
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,718
|
[
"spec"
] |
AbortTransactionResult
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
markAsProcessed
|
private void markAsProcessed(Element element) {
if (element instanceof TypeElement) {
this.processedSourceTypes.add(this.typeHelper.getType(element));
}
}
|
Create a new {@code MetadataProcessor} instance.
@param processingEnvironment the processing environment of the build
@param previousMetadata any previous metadata or {@code null}
|
java
|
spring-context-indexer/src/main/java/org/springframework/context/index/processor/MetadataCollector.java
| 68
|
[
"element"
] |
void
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
castIfNecessary
|
private CodeBlock castIfNecessary(boolean castNecessary, Class<?> castType, CodeBlock valueCode) {
return (castNecessary ? CodeBlock.of("($T) $L", castType, valueCode) : valueCode);
}
|
Cast the specified {@code valueCode} to the specified {@code castType} if the
{@code castNecessary} is {@code true}. Otherwise, return the valueCode as-is.
@param castNecessary whether a cast is necessary
@param castType the type to cast to
@param valueCode the code for the value
@return the existing value or a form of {@code (castType) valueCode} if a
cast is necessary
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanDefinitionPropertiesCodeGenerator.java
| 414
|
[
"castNecessary",
"castType",
"valueCode"
] |
CodeBlock
| true
| 2
| 7.68
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
writeLineSeparatorsAndIndentBefore
|
function writeLineSeparatorsAndIndentBefore(node: Node, parent: Node): boolean {
const leadingNewlines = preserveSourceNewlines && getLeadingLineTerminatorCount(parent, node, ListFormat.None);
if (leadingNewlines) {
writeLinesAndIndent(leadingNewlines, /*writeSpaceIfNotIndenting*/ false);
}
return !!leadingNewlines;
}
|
Emits a list without brackets or raising events.
NOTE: You probably don't want to call this directly and should be using `emitList` or `emitExpressionList` instead.
|
typescript
|
src/compiler/emitter.ts
| 5,151
|
[
"node",
"parent"
] | true
| 3
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
applyNonNull
|
public static <T, U, R> R applyNonNull(final T value1, final Function<? super T, ? extends U> mapper1, final Function<? super U, ? extends R> mapper2) {
return applyNonNull(applyNonNull(value1, mapper1), mapper2);
}
|
Applies values to a chain of functions, where a {@code null} can short-circuit each step. A function is only applied if the previous value is not
{@code null}, otherwise this method returns {@code null}.
<pre>{@code
Functions.applyNonNull(" a ", String::toUpperCase, String::trim) = "A"
Functions.applyNonNull(null, String::toUpperCase, String::trim) = null
Functions.applyNonNull(" a ", s -> null, String::trim) = null
Functions.applyNonNull(" a ", String::toUpperCase, s -> null) = null
}</pre>
<p>
Useful when working with expressions that may return {@code null} as it allows a single-line expression without using temporary local variables or
evaluating expressions twice. Provides an alternative to using {@link Optional} that is shorter and has less allocation.
</p>
@param <T> The type of the input of this method and the first function.
@param <U> The type of the result of the first function and the input to the second function.
@param <R> The type of the result of the second function and this method.
@param value1 The value to apply the functions to, may be {@code null}.
@param mapper1 The first function to apply, must not be {@code null}.
@param mapper2 The second function to apply, must not be {@code null}.
@return The result of the final function (which may be {@code null}) or {@code null} if the input value or any intermediate value is {@code null}.
@see #applyNonNull(Object, Function)
@see #applyNonNull(Object, Function, Function, Function)
@since 3.19.0
|
java
|
src/main/java/org/apache/commons/lang3/function/Functions.java
| 98
|
[
"value1",
"mapper1",
"mapper2"
] |
R
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getExportsForCircularRequire
|
function getExportsForCircularRequire(module) {
const requiredESM = module[kRequiredModuleSymbol];
if (requiredESM && requiredESM.getStatus() !== kEvaluated) {
let message = `Cannot require() ES Module ${module.id} in a cycle.`;
const parent = module[kLastModuleParent];
if (parent) {
message += ` (from ${parent.filename})`;
}
throw new ERR_REQUIRE_CYCLE_MODULE(message);
}
if (module.exports &&
!isProxy(module.exports) &&
ObjectGetPrototypeOf(module.exports) === ObjectPrototype &&
// Exclude transpiled ES6 modules / TypeScript code because those may
// employ unusual patterns for accessing 'module.exports'. That should
// be okay because ES6 modules have a different approach to circular
// dependencies anyway.
!module.exports.__esModule) {
// This is later unset once the module is done loading.
ObjectSetPrototypeOf(
module.exports, CircularRequirePrototypeWarningProxy);
}
return module.exports;
}
|
Returns the exports object for a module that has a circular `require`.
If the exports object is a plain object, it is wrapped in a proxy that warns
about circular dependencies.
@param {Module} module The module instance
@returns {object}
|
javascript
|
lib/internal/modules/cjs/loader.js
| 1,004
|
[
"module"
] | false
| 8
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
runUninterruptiblyWithTimeout
|
@Override
public void runUninterruptiblyWithTimeout(
Runnable runnable, long timeoutDuration, TimeUnit timeoutUnit) throws TimeoutException {
checkNotNull(runnable);
checkNotNull(timeoutUnit);
checkPositiveTimeout(timeoutDuration);
Future<?> future = executor.submit(runnable);
try {
getUninterruptibly(future, timeoutDuration, timeoutUnit);
} catch (TimeoutException e) {
future.cancel(true /* mayInterruptIfRunning */);
throw e;
} catch (ExecutionException e) {
wrapAndThrowRuntimeExecutionExceptionOrError(e.getCause());
throw new AssertionError();
}
}
|
Creates a TimeLimiter instance using the given executor service to execute method calls.
<p><b>Warning:</b> using a bounded executor may be counterproductive! If the thread pool fills
up, any time callers spend waiting for a thread may count toward their time limit, and in this
case the call may even time out before the target method is ever invoked.
@param executor the ExecutorService that will execute the method calls on the target objects;
for example, a {@link Executors#newCachedThreadPool()}.
@since 22.0
|
java
|
android/guava/src/com/google/common/util/concurrent/SimpleTimeLimiter.java
| 201
|
[
"runnable",
"timeoutDuration",
"timeoutUnit"
] |
void
| true
| 3
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
i0
|
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`.
Parameters
----------
x : array_like of float
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = float
The modified Bessel function evaluated at each of the elements of `x`.
See Also
--------
scipy.special.i0, scipy.special.iv, scipy.special.ive
Notes
-----
The scipy implementation is recommended over this function: it is a
proper ufunc written in C, and more than an order of magnitude faster.
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
https://personal.math.ubc.ca/~cbm/aands/page_379.htm
.. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero
Examples
--------
>>> import numpy as np
>>> np.i0(0.)
array(1.0)
>>> np.i0([0, 1, 2, 3])
array([1. , 1.26606588, 2.2795853 , 4.88079259])
"""
x = np.asanyarray(x)
if x.dtype.kind == 'c':
raise TypeError("i0 not supported for complex values")
if x.dtype.kind != 'f':
x = x.astype(float)
x = np.abs(x)
return piecewise(x, [x <= 8.0], [_i0_1, _i0_2])
|
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`.
Parameters
----------
x : array_like of float
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = float
The modified Bessel function evaluated at each of the elements of `x`.
See Also
--------
scipy.special.i0, scipy.special.iv, scipy.special.ive
Notes
-----
The scipy implementation is recommended over this function: it is a
proper ufunc written in C, and more than an order of magnitude faster.
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
https://personal.math.ubc.ca/~cbm/aands/page_379.htm
.. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero
Examples
--------
>>> import numpy as np
>>> np.i0(0.)
array(1.0)
>>> np.i0([0, 1, 2, 3])
array([1. , 1.26606588, 2.2795853 , 4.88079259])
|
python
|
numpy/lib/_function_base_impl.py
| 3,533
|
[
"x"
] | false
| 3
| 7.04
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
toCharArray
|
public char[] toCharArray() {
if (size == 0) {
return ArrayUtils.EMPTY_CHAR_ARRAY;
}
return ArrayUtils.arraycopy(buffer, 0, 0, size, char[]::new);
}
|
Copies the builder's character array into a new character array.
@return a new array that represents the contents of the builder
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 2,936
|
[] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
setAgentSetting
|
@SuppressForbidden(reason = "Need to be able to manipulate APM agent-related properties to set them dynamically")
public void setAgentSetting(String key, String value) {
if (key.startsWith("global_labels.")) {
// Invalid agent setting, leftover from flattening global labels in APMJVMOptions
// https://github.com/elastic/elasticsearch/issues/120791
return;
}
final String completeKey = "elastic.apm." + Objects.requireNonNull(key);
if (value == null || value.isEmpty()) {
LOGGER.trace("Clearing system property [{}]", completeKey);
System.clearProperty(completeKey);
} else {
LOGGER.trace("Setting setting property [{}] to [{}]", completeKey, value);
System.setProperty(completeKey, value);
}
}
|
Copies a setting to the APM agent's system properties under <code>elastic.apm</code>, either
by setting the property if {@code value} has a value, or by deleting the property if it doesn't.
All permitted agent properties must be covered by the <code>write_system_properties</code> entitlement,
see the entitlement policy of this module!
@param key the config key to set, without any prefix
@param value the value to set, or <code>null</code>
|
java
|
modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java
| 87
|
[
"key",
"value"
] |
void
| true
| 4
| 7.2
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
toCall
|
public void toCall(Runnable runnable) {
Assert.notNull(runnable, "'runnable' must not be null");
T value = getValue();
if (value != null && test(value)) {
runnable.run();
}
}
|
Complete the mapping by calling the specified method when the value has not
been filtered.
@param runnable the method to call if the value has not been filtered
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/PropertyMapper.java
| 338
|
[
"runnable"
] |
void
| true
| 3
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
addMessage
|
public void addMessage(String code, Locale locale, String msg) {
Assert.notNull(code, "Code must not be null");
Assert.notNull(locale, "Locale must not be null");
Assert.notNull(msg, "Message must not be null");
this.messageMap.computeIfAbsent(code, key -> new HashMap<>(4)).put(locale, new MessageHolder(msg, locale));
if (logger.isDebugEnabled()) {
logger.debug("Added message [" + msg + "] for code [" + code + "] and Locale [" + locale + "]");
}
}
|
Associate the given message with the given code.
@param code the lookup code
@param locale the locale that the message should be found within
@param msg the message associated with this lookup code
|
java
|
spring-context/src/main/java/org/springframework/context/support/StaticMessageSource.java
| 75
|
[
"code",
"locale",
"msg"
] |
void
| true
| 2
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
createSslContext
|
default SSLContext createSslContext(String protocol) {
try {
SSLContext sslContext = SSLContext.getInstance(protocol);
sslContext.init(getKeyManagers(), getTrustManagers(), null);
return sslContext;
}
catch (Exception ex) {
throw new IllegalStateException("Could not load SSL context: " + ex.getMessage(), ex);
}
}
|
Factory method to create a new {@link SSLContext} for the {@link #getKeyManagers()
key managers} and {@link #getTrustManagers() trust managers} managed by this
instance.
@param protocol the standard name of the SSL protocol. See
{@link SSLContext#getInstance(String)}
@return a new {@link SSLContext} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/SslManagerBundle.java
| 83
|
[
"protocol"
] |
SSLContext
| true
| 2
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
representative_combos
|
def representative_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]:
"""
Include only representative combos from the matrix of the two lists - making sure that each of the
elements contributing is present at least once.
:param list_1: first list
:param list_2: second list
:return: list of combinations with guaranteed at least one element from each of the list
"""
all_selected_combinations: list[tuple[str, str]] = []
for i in range(max(len(list_1), len(list_2))):
all_selected_combinations.append((list_1[i % len(list_1)], list_2[i % len(list_2)]))
return all_selected_combinations
|
Include only representative combos from the matrix of the two lists - making sure that each of the
elements contributing is present at least once.
:param list_1: first list
:param list_2: second list
:return: list of combinations with guaranteed at least one element from each of the list
|
python
|
dev/breeze/src/airflow_breeze/utils/exclude_from_matrix.py
| 22
|
[
"list_1",
"list_2"
] |
list[tuple[str, str]]
| true
| 2
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
maybeSendShareAcknowledgementEvent
|
private void maybeSendShareAcknowledgementEvent(Map<TopicIdPartition, Acknowledgements> acknowledgementsMap,
boolean checkForRenewAcknowledgements,
Optional<Integer> acquisitionLockTimeoutMs) {
if (isAcknowledgementCommitCallbackRegistered || checkForRenewAcknowledgements) {
ShareAcknowledgementEvent event = new ShareAcknowledgementEvent(acknowledgementsMap, checkForRenewAcknowledgements, acquisitionLockTimeoutMs);
acknowledgeEventHandler.add(event);
}
}
|
Process acknowledgeRequestStates and prepares a list of acknowledgements to be sent in the poll().
@param currentTimeMs the current time in ms.
@return the PollResult containing zero or more acknowledgements.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 411
|
[
"acknowledgementsMap",
"checkForRenewAcknowledgements",
"acquisitionLockTimeoutMs"
] |
void
| true
| 3
| 7.6
|
apache/kafka
| 31,560
|
javadoc
| false
|
from_product
|
def from_product(
cls,
iterables: Sequence[Iterable[Hashable]],
sortorder: int | None = None,
names: Sequence[Hashable] | Hashable | lib.NoDefault = lib.no_default,
) -> MultiIndex:
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
If not explicitly provided, names will be inferred from the
elements of iterables if an element has a name attribute.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ["green", "purple"]
>>> pd.MultiIndex.from_product([numbers, colors], names=["number", "color"])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
"""
if not is_list_like(iterables):
raise TypeError("Input must be a list / sequence of iterables.")
if is_iterator(iterables):
iterables = list(iterables)
codes, levels = factorize_from_iterables(iterables)
if names is lib.no_default:
names = [getattr(it, "name", None) for it in iterables]
# codes are all ndarrays, so cartesian_product is lossless
codes = cartesian_product(codes)
return cls(levels, codes, sortorder=sortorder, names=names)
|
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
If not explicitly provided, names will be inferred from the
elements of iterables if an element has a name attribute.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ["green", "purple"]
>>> pd.MultiIndex.from_product([numbers, colors], names=["number", "color"])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
|
python
|
pandas/core/indexes/multi.py
| 597
|
[
"cls",
"iterables",
"sortorder",
"names"
] |
MultiIndex
| true
| 4
| 8.32
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_justify
|
def _justify(
head: list[Sequence[str]], tail: list[Sequence[str]]
) -> tuple[list[tuple[str, ...]], list[tuple[str, ...]]]:
"""
Justify items in head and tail, so they are right-aligned when stacked.
Parameters
----------
head : list-like of list-likes of strings
tail : list-like of list-likes of strings
Returns
-------
tuple of list of tuples of strings
Same as head and tail, but items are right aligned when stacked
vertically.
Examples
--------
>>> _justify([["a", "b"]], [["abc", "abcd"]])
([(' a', ' b')], [('abc', 'abcd')])
"""
combined = head + tail
# For each position for the sequences in ``combined``,
# find the length of the largest string.
max_length = [0] * len(combined[0])
for inner_seq in combined:
length = [len(item) for item in inner_seq]
max_length = [max(x, y) for x, y in zip(max_length, length, strict=True)]
# justify each item in each list-like in head and tail using max_length
head_tuples = [
tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length, strict=True))
for seq in head
]
tail_tuples = [
tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length, strict=True))
for seq in tail
]
return head_tuples, tail_tuples
|
Justify items in head and tail, so they are right-aligned when stacked.
Parameters
----------
head : list-like of list-likes of strings
tail : list-like of list-likes of strings
Returns
-------
tuple of list of tuples of strings
Same as head and tail, but items are right aligned when stacked
vertically.
Examples
--------
>>> _justify([["a", "b"]], [["abc", "abcd"]])
([(' a', ' b')], [('abc', 'abcd')])
|
python
|
pandas/io/formats/printing.py
| 471
|
[
"head",
"tail"
] |
tuple[list[tuple[str, ...]], list[tuple[str, ...]]]
| true
| 2
| 8.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
closeAll
|
@SuppressWarnings("CatchingUnchecked") // sneaky checked exception
private static void closeAll(BaseStream<?, ?>[] toClose) {
// If one of the streams throws an exception, continue closing the others, then throw the
// exception later. If more than one stream throws an exception, the later ones are added to the
// first as suppressed exceptions. We don't catch Error on the grounds that it should be allowed
// to propagate immediately.
Exception exception = null;
for (BaseStream<?, ?> stream : toClose) {
try {
stream.close();
} catch (Exception e) { // sneaky checked exception
if (exception == null) {
exception = e;
} else {
exception.addSuppressed(e);
}
}
}
if (exception != null) {
// Normally this is a RuntimeException that doesn't need sneakyThrow.
// But theoretically we could see sneaky checked exception
sneakyThrow(exception);
}
}
|
If a value is present in {@code optional}, returns a stream containing only that element,
otherwise returns an empty stream.
<p><b>Java 9 users:</b> use {@code optional.stream()} instead.
|
java
|
android/guava/src/com/google/common/collect/Streams.java
| 157
|
[
"toClose"
] |
void
| true
| 4
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
isLibraryFileOrClassesDirectory
|
protected boolean isLibraryFileOrClassesDirectory(Archive.Entry entry) {
String name = entry.name();
if (entry.isDirectory()) {
return name.equals("BOOT-INF/classes/");
}
return name.startsWith("BOOT-INF/lib/");
}
|
Determine if the specified entry is a nested item that should be added to the
classpath.
@param entry the entry to check
@return {@code true} if the entry is a nested item (jar or directory)
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/Launcher.java
| 183
|
[
"entry"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
T
|
def T(self):
"""
Returns the transpose of the matrix.
Does *not* conjugate! For the complex conjugate transpose, use ``.H``.
Parameters
----------
None
Returns
-------
ret : matrix object
The (non-conjugated) transpose of the matrix.
See Also
--------
transpose, getH
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]')
>>> m
matrix([[1, 2],
[3, 4]])
>>> m.getT()
matrix([[1, 3],
[2, 4]])
"""
return self.transpose()
|
Returns the transpose of the matrix.
Does *not* conjugate! For the complex conjugate transpose, use ``.H``.
Parameters
----------
None
Returns
-------
ret : matrix object
The (non-conjugated) transpose of the matrix.
See Also
--------
transpose, getH
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]')
>>> m
matrix([[1, 2],
[3, 4]])
>>> m.getT()
matrix([[1, 3],
[2, 4]])
|
python
|
numpy/matrixlib/defmatrix.py
| 943
|
[
"self"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
write
|
@Override
public int write(ByteBuffer src) throws IOException {
return socketChannel.write(src);
}
|
Writes a sequence of bytes to this channel from the given buffer.
@param src The buffer from which bytes are to be retrieved
@return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream
@throws IOException If some other I/O error occurs
|
java
|
clients/src/main/java/org/apache/kafka/common/network/PlaintextTransportLayer.java
| 137
|
[
"src"
] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
lookup
|
@Override
public String lookup(final String key) {
if (map == null) {
return null;
}
return Objects.toString(map.get(key), null);
}
|
Looks up a String key to a String value using the map.
<p>
If the map is null, then null is returned.
The map result object is converted to a string using toString().
</p>
@param key the key to be looked up, may be null.
@return the matching value, null if no match.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrLookup.java
| 78
|
[
"key"
] |
String
| true
| 2
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
wrapperCommit
|
function wrapperCommit() {
return new LodashWrapper(this.value(), this.__chain__);
}
|
Executes the chain sequence and returns the wrapped result.
@name commit
@memberOf _
@since 3.2.0
@category Seq
@returns {Object} Returns the new `lodash` wrapper instance.
@example
var array = [1, 2];
var wrapped = _(array).push(3);
console.log(array);
// => [1, 2]
wrapped = wrapped.commit();
console.log(array);
// => [1, 2, 3]
wrapped.last();
// => 3
console.log(array);
// => [1, 2, 3]
|
javascript
|
lodash.js
| 8,998
|
[] | false
| 1
| 7.44
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
_find_path_from_directory
|
def _find_path_from_directory(
base_dir_path: str | os.PathLike[str],
ignore_file_name: str,
ignore_rule_type: type[_IgnoreRule],
) -> Generator[str, None, None]:
"""
Recursively search the base path and return the list of file paths that should not be ignored.
:param base_dir_path: the base path to be searched
:param ignore_file_name: the file name containing regular expressions for files that should be ignored.
:param ignore_rule_type: the concrete class for ignore rules, which implements the _IgnoreRule interface.
:return: a generator of file paths which should not be ignored.
"""
# A Dict of patterns, keyed using resolved, absolute paths
patterns_by_dir: dict[Path, list[_IgnoreRule]] = {}
for root, dirs, files in os.walk(base_dir_path, followlinks=True):
patterns: list[_IgnoreRule] = patterns_by_dir.get(Path(root).resolve(), [])
ignore_file_path = Path(root) / ignore_file_name
if ignore_file_path.is_file():
with open(ignore_file_path) as ifile:
patterns_to_match_excluding_comments = [
re.sub(r"\s*#.*", "", line) for line in ifile.read().split("\n")
]
# append new patterns and filter out "None" objects, which are invalid patterns
patterns += [
p
for p in [
ignore_rule_type.compile(pattern, Path(base_dir_path), ignore_file_path)
for pattern in patterns_to_match_excluding_comments
if pattern
]
if p is not None
]
# evaluation order of patterns is important with negation
# so that later patterns can override earlier patterns
dirs[:] = [subdir for subdir in dirs if not ignore_rule_type.match(Path(root) / subdir, patterns)]
# explicit loop for infinite recursion detection since we are following symlinks in this walk
for sd in dirs:
dirpath = (Path(root) / sd).resolve()
if dirpath in patterns_by_dir:
raise RuntimeError(
"Detected recursive loop when walking DAG directory "
f"{base_dir_path}: {dirpath} has appeared more than once."
)
patterns_by_dir.update({dirpath: patterns.copy()})
for file in files:
if file != ignore_file_name:
abs_file_path = Path(root) / file
if not ignore_rule_type.match(abs_file_path, patterns):
yield str(abs_file_path)
|
Recursively search the base path and return the list of file paths that should not be ignored.
:param base_dir_path: the base path to be searched
:param ignore_file_name: the file name containing regular expressions for files that should be ignored.
:param ignore_rule_type: the concrete class for ignore rules, which implements the _IgnoreRule interface.
:return: a generator of file paths which should not be ignored.
|
python
|
airflow-core/src/airflow/utils/file.py
| 167
|
[
"base_dir_path",
"ignore_file_name",
"ignore_rule_type"
] |
Generator[str, None, None]
| true
| 8
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
lazyStackTraceIsLazy
|
@Deprecated
@J2ktIncompatible
@GwtIncompatible // getStackTraceElementMethod
public static boolean lazyStackTraceIsLazy() {
return getStackTraceElementMethod != null && getStackTraceDepthMethod != null;
}
|
Returns whether {@link #lazyStackTrace} will use the special implementation described in its
documentation.
@since 19.0
@deprecated This method always returns false on JDK versions past JDK 8 and on all Android
versions.
|
java
|
android/guava/src/com/google/common/base/Throwables.java
| 394
|
[] | true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
indexOf
|
public static int indexOf(boolean[] array, boolean target) {
return indexOf(array, target, 0, array.length);
}
|
Returns the index of the first appearance of the value {@code target} in {@code array}.
<p><b>Note:</b> consider representing the array as a {@link java.util.BitSet} instead, and
using {@link java.util.BitSet#nextSetBit(int)} or {@link java.util.BitSet#nextClearBit(int)}.
@param array an array of {@code boolean} values, possibly empty
@param target a primitive {@code boolean} value
@return the least index {@code i} for which {@code array[i] == target}, or {@code -1} if no
such index exists.
|
java
|
android/guava/src/com/google/common/primitives/Booleans.java
| 161
|
[
"array",
"target"
] | true
| 1
| 6.32
|
google/guava
| 51,352
|
javadoc
| false
|
|
removeElements
|
@SafeVarargs
public static <T> T[] removeElements(final T[] array, final T... values) {
if (isEmpty(array) || isEmpty(values)) {
return clone(array);
}
final HashMap<T, MutableInt> occurrences = new HashMap<>(values.length);
for (final T v : values) {
increment(occurrences, v);
}
final BitSet toRemove = new BitSet();
for (int i = 0; i < array.length; i++) {
final T key = array[i];
final MutableInt count = occurrences.get(key);
if (count != null) {
if (count.decrementAndGet() == 0) {
occurrences.remove(key);
}
toRemove.set(i);
}
}
@SuppressWarnings("unchecked") // removeAll() always creates an array of the same type as its input
final T[] result = (T[]) removeAt(array, toRemove);
return result;
}
|
Removes occurrences of specified elements, in specified quantities,
from the specified array. All subsequent elements are shifted left.
For any element-to-be-removed specified in greater quantities than
contained in the original array, no change occurs beyond the
removal of the existing matching items.
<p>
This method returns a new array with the same elements of the input
array except for the earliest-encountered occurrences of the specified
elements. The component type of the returned array is always the same
as that of the input array.
</p>
<pre>
ArrayUtils.removeElements(null, "a", "b") = null
ArrayUtils.removeElements([], "a", "b") = []
ArrayUtils.removeElements(["a"], "b", "c") = ["a"]
ArrayUtils.removeElements(["a", "b"], "a", "c") = ["b"]
ArrayUtils.removeElements(["a", "b", "a"], "a") = ["b", "a"]
ArrayUtils.removeElements(["a", "b", "a"], "a", "a") = ["b"]
</pre>
@param <T> the component type of the array
@param array the input array, will not be modified, and may be {@code null}.
@param values the values to be removed.
@return A new array containing the existing elements except the
earliest-encountered occurrences of the specified elements.
@since 3.0.1
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 6,308
|
[
"array"
] | true
| 6
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
appendln
|
public StrBuilder appendln(final StrBuilder str) {
return append(str).appendNewLine();
}
|
Appends another string builder followed by a new line to this string builder.
Appending null will call {@link #appendNull()}.
@param str the string builder to append
@return {@code this} instance.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,039
|
[
"str"
] |
StrBuilder
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
alterReplicaLogDirs
|
default AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment) {
return alterReplicaLogDirs(replicaAssignment, new AlterReplicaLogDirsOptions());
}
|
Change the log directory for the specified replicas. If the replica does not exist on the broker, the result
shows REPLICA_NOT_AVAILABLE for the given replica and the replica will be created in the given log directory on the
broker when it is created later. If the replica already exists on the broker, the replica will be moved to the given
log directory if it is not already there. For detailed result, inspect the returned {@link AlterReplicaLogDirsResult} instance.
<p>
This operation is not transactional so it may succeed for some replicas while fail for others.
<p>
This is a convenience method for {@link #alterReplicaLogDirs(Map, AlterReplicaLogDirsOptions)} with default options.
See the overload for more details.
<p>
This operation is supported by brokers with version 1.1.0 or higher.
@param replicaAssignment The replicas with their log directory absolute path
@return The AlterReplicaLogDirsResult
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 549
|
[
"replicaAssignment"
] |
AlterReplicaLogDirsResult
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
manageMapIfNecessary
|
private Object manageMapIfNecessary(Map<?, ?> map) {
boolean containsRuntimeRefs = false;
for (Object element : map.values()) {
if (element instanceof RuntimeBeanReference) {
containsRuntimeRefs = true;
break;
}
}
if (containsRuntimeRefs) {
Map<Object, Object> managedMap = new ManagedMap<>();
managedMap.putAll(map);
return managedMap;
}
return map;
}
|
Checks whether there are any {@link RuntimeBeanReference RuntimeBeanReferences}
inside the {@link Map} and converts it to a {@link ManagedMap} if necessary.
@param map the original Map
@return either the original map or a managed copy of it
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/groovy/GroovyBeanDefinitionReader.java
| 567
|
[
"map"
] |
Object
| true
| 3
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
registerBeanDefinitions
|
public int registerBeanDefinitions(ResourceBundle rb, @Nullable String prefix) throws BeanDefinitionStoreException {
// Simply create a map and call overloaded method.
Map<String, Object> map = new HashMap<>();
Enumeration<String> keys = rb.getKeys();
while (keys.hasMoreElements()) {
String key = keys.nextElement();
map.put(key, rb.getObject(key));
}
return registerBeanDefinitions(map, prefix);
}
|
Register bean definitions contained in a ResourceBundle.
<p>Similar syntax as for a Map. This method is useful to enable
standard Java internationalization support.
@param rb the ResourceBundle to load from
@param prefix a filter within the keys in the map: for example, 'beans.'
(can be empty or {@code null})
@return the number of bean definitions found
@throws BeanDefinitionStoreException in case of loading or parsing errors
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/PropertiesBeanDefinitionReader.java
| 301
|
[
"rb",
"prefix"
] | true
| 2
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
toInteger
|
public static int toInteger(final boolean bool, final int trueValue, final int falseValue) {
return bool ? trueValue : falseValue;
}
|
Converts a boolean to an int specifying the conversion values.
<pre>
BooleanUtils.toInteger(true, 1, 0) = 1
BooleanUtils.toInteger(false, 1, 0) = 0
</pre>
@param bool the to convert
@param trueValue the value to return if {@code true}
@param falseValue the value to return if {@code false}
@return the appropriate value
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 903
|
[
"bool",
"trueValue",
"falseValue"
] | true
| 2
| 7.84
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
make_gaussian_quantiles
|
def make_gaussian_quantiles(
*,
mean=None,
cov=1.0,
n_samples=100,
n_features=2,
n_classes=3,
shuffle=True,
random_state=None,
):
r"""Generate isotropic Gaussian and label samples by quantile.
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array-like of shape (n_features,), default=None
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, default=1.0
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, default=100
The total number of points equally divided among classes.
n_features : int, default=2
The number of features for each sample.
n_classes : int, default=3
The number of classes.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1]_.
References
----------
.. [1] :doi:`J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost."
Statistics and its Interface 2.3 (2009): 349-360.
<10.4310/SII.2009.v2.n3.a8>`
Examples
--------
>>> from sklearn.datasets import make_gaussian_quantiles
>>> X, y = make_gaussian_quantiles(random_state=42)
>>> X.shape
(100, 2)
>>> y.shape
(100,)
>>> list(y[:5])
[np.int64(2), np.int64(0), np.int64(1), np.int64(0), np.int64(2)]
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features), (n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack(
[
np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes),
]
)
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
|
r"""Generate isotropic Gaussian and label samples by quantile.
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array-like of shape (n_features,), default=None
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, default=1.0
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, default=100
The total number of points equally divided among classes.
n_features : int, default=2
The number of features for each sample.
n_classes : int, default=3
The number of classes.
shuffle : bool, default=True
Shuffle the samples.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1]_.
References
----------
.. [1] :doi:`J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost."
Statistics and its Interface 2.3 (2009): 349-360.
<10.4310/SII.2009.v2.n3.a8>`
Examples
--------
>>> from sklearn.datasets import make_gaussian_quantiles
>>> X, y = make_gaussian_quantiles(random_state=42)
>>> X.shape
(100, 2)
>>> y.shape
(100,)
>>> list(y[:5])
[np.int64(2), np.int64(0), np.int64(1), np.int64(0), np.int64(2)]
|
python
|
sklearn/datasets/_samples_generator.py
| 2,005
|
[
"mean",
"cov",
"n_samples",
"n_features",
"n_classes",
"shuffle",
"random_state"
] | false
| 5
| 7.28
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_find_valid_index
|
def _find_valid_index(self, *, how: str) -> Hashable:
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
is_valid = self.notna().values
idxpos = find_valid_index(how=how, is_valid=is_valid)
if idxpos is None:
return None
return self.index[idxpos]
|
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
|
python
|
pandas/core/generic.py
| 11,581
|
[
"self",
"how"
] |
Hashable
| true
| 2
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
stubTrue
|
function stubTrue() {
return true;
}
|
This method returns `true`.
@static
@memberOf _
@since 4.13.0
@category Util
@returns {boolean} Returns `true`.
@example
_.times(2, _.stubTrue);
// => [true, true]
|
javascript
|
lodash.js
| 16,224
|
[] | false
| 1
| 7.12
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
max
|
public static double max(final double... array) {
// Validates input
validateArray(array);
// Finds and returns max
double max = array[0];
for (int j = 1; j < array.length; j++) {
if (Double.isNaN(array[j])) {
return Double.NaN;
}
if (array[j] > max) {
max = array[j];
}
}
return max;
}
|
Returns the maximum value in an array.
@param array an array, must not be null or empty.
@return the maximum value in the array.
@throws NullPointerException if {@code array} is {@code null}.
@throws IllegalArgumentException if {@code array} is empty.
@see IEEE754rUtils#max(double[]) IEEE754rUtils for a version of this method that handles NaN differently.
@since 3.4 Changed signature from max(double[]) to max(double...)
|
java
|
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
| 863
|
[] | true
| 4
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toFullyQualifiedName
|
public static String toFullyQualifiedName(final Class<?> context, final String resourceName) {
Objects.requireNonNull(context, "context");
Objects.requireNonNull(resourceName, "resourceName");
return toFullyQualifiedName(context.getPackage(), resourceName);
}
|
Returns the fully qualified name for the resource with name {@code resourceName} relative to the given context.
<p>
Note that this method does not check whether the resource actually exists. It only constructs the name. Null inputs are not allowed.
</p>
<pre>
ClassPathUtils.toFullyQualifiedName(StringUtils.class, "StringUtils.properties") = "org.apache.commons.lang3.StringUtils.properties"
</pre>
@param context The context for constructing the name.
@param resourceName the resource name to construct the fully qualified name for.
@return the fully qualified name of the resource with name {@code resourceName}.
@throws NullPointerException if either {@code context} or {@code resourceName} is null.
|
java
|
src/main/java/org/apache/commons/lang3/ClassPathUtils.java
| 73
|
[
"context",
"resourceName"
] |
String
| true
| 1
| 6.4
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
get_instance
|
def get_instance(
self,
*,
instance_id: str,
project: str | None = None,
endpoint: str | None = None,
) -> Instance:
"""
Get a MaxCompute task instance.
.. seealso:: https://pyodps.readthedocs.io/en/latest/base-instances.html#instances
:param instance_id: The ID of the instance to get.
:param project: The project ID to use.
:param endpoint: The endpoint to use.
:return: The MaxCompute task instance.
:raises ValueError: If the instance does not exist.
"""
client = self.get_client(project=project, endpoint=endpoint)
return client.get_instance(id_=instance_id, project=project)
|
Get a MaxCompute task instance.
.. seealso:: https://pyodps.readthedocs.io/en/latest/base-instances.html#instances
:param instance_id: The ID of the instance to get.
:param project: The project ID to use.
:param endpoint: The endpoint to use.
:return: The MaxCompute task instance.
:raises ValueError: If the instance does not exist.
|
python
|
providers/alibaba/src/airflow/providers/alibaba/cloud/hooks/maxcompute.py
| 201
|
[
"self",
"instance_id",
"project",
"endpoint"
] |
Instance
| true
| 1
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
resolveAutowiringValue
|
public static Object resolveAutowiringValue(Object autowiringValue, Class<?> requiredType) {
if (autowiringValue instanceof ObjectFactory<?> factory && !requiredType.isInstance(autowiringValue)) {
if (autowiringValue instanceof Serializable && requiredType.isInterface()) {
autowiringValue = Proxy.newProxyInstance(requiredType.getClassLoader(),
new Class<?>[] {requiredType}, new ObjectFactoryDelegatingInvocationHandler(factory));
}
else {
return factory.getObject();
}
}
return autowiringValue;
}
|
Resolve the given autowiring value against the given required type,
for example, an {@link ObjectFactory} value to its actual object result.
@param autowiringValue the value to resolve
@param requiredType the type to assign the result to
@return the resolved value
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AutowireUtils.java
| 134
|
[
"autowiringValue",
"requiredType"
] |
Object
| true
| 5
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
analyze
|
protected abstract @Nullable FailureAnalysis analyze(Throwable rootFailure, T cause);
|
Returns an analysis of the given {@code rootFailure}, or {@code null} if no
analysis was possible.
@param rootFailure the root failure passed to the analyzer
@param cause the actual found cause
@return the analysis or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/diagnostics/AbstractFailureAnalyzer.java
| 47
|
[
"rootFailure",
"cause"
] |
FailureAnalysis
| true
| 1
| 6.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
handle_url_build_error
|
def handle_url_build_error(
self, error: BuildError, endpoint: str, values: dict[str, t.Any]
) -> str:
"""Called by :meth:`.url_for` if a
:exc:`~werkzeug.routing.BuildError` was raised. If this returns
a value, it will be returned by ``url_for``, otherwise the error
will be re-raised.
Each function in :attr:`url_build_error_handlers` is called with
``error``, ``endpoint`` and ``values``. If a function returns
``None`` or raises a ``BuildError``, it is skipped. Otherwise,
its return value is returned by ``url_for``.
:param error: The active ``BuildError`` being handled.
:param endpoint: The endpoint being built.
:param values: The keyword arguments passed to ``url_for``.
"""
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
except BuildError as e:
# make error available outside except block
error = e
else:
if rv is not None:
return rv
# Re-raise if called with an active exception, otherwise raise
# the passed in exception.
if error is sys.exc_info()[1]:
raise
raise error
|
Called by :meth:`.url_for` if a
:exc:`~werkzeug.routing.BuildError` was raised. If this returns
a value, it will be returned by ``url_for``, otherwise the error
will be re-raised.
Each function in :attr:`url_build_error_handlers` is called with
``error``, ``endpoint`` and ``values``. If a function returns
``None`` or raises a ``BuildError``, it is skipped. Otherwise,
its return value is returned by ``url_for``.
:param error: The active ``BuildError`` being handled.
:param endpoint: The endpoint being built.
:param values: The keyword arguments passed to ``url_for``.
|
python
|
src/flask/sansio/app.py
| 974
|
[
"self",
"error",
"endpoint",
"values"
] |
str
| true
| 5
| 6.72
|
pallets/flask
| 70,946
|
sphinx
| false
|
set_dag_run_state_to_success
|
def set_dag_run_state_to_success(
*,
dag: SerializedDAG,
run_id: str | None = None,
commit: bool = False,
session: SASession = NEW_SESSION,
) -> list[TaskInstance]:
"""
Set the dag run's state to success.
Set for a specific logical date and its task instances to success.
:param dag: the DAG of which to alter state
:param run_id: the run_id to start looking from
:param commit: commit DAG and tasks to be altered to the database
:param session: database session
:return: If commit is true, list of tasks that have been updated,
otherwise list of tasks that will be updated
:raises: ValueError if dag or logical_date is invalid
"""
if not dag:
return []
if not run_id:
raise ValueError(f"Invalid dag_run_id: {run_id}")
tasks = dag.tasks
# Mark all task instances of the dag run to success - except for unfinished teardown as they need to complete work.
teardown_tasks = [task for task in tasks if task.is_teardown]
unfinished_teardown_task_ids = set(
session.scalars(
select(TaskInstance.task_id).where(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.run_id == run_id,
TaskInstance.task_id.in_(task.task_id for task in teardown_tasks),
or_(TaskInstance.state.is_(None), TaskInstance.state.in_(State.unfinished)),
)
)
)
# Mark the dag run to success if there are no unfinished teardown tasks.
if commit and len(unfinished_teardown_task_ids) == 0:
_set_dag_run_state(dag.dag_id, run_id, DagRunState.SUCCESS, session)
tasks_to_mark_success = [task for task in tasks if not task.is_teardown] + [
task for task in teardown_tasks if task.task_id not in unfinished_teardown_task_ids
]
for task in tasks_to_mark_success:
task.dag = dag
return set_state(
tasks=tasks_to_mark_success,
run_id=run_id,
state=TaskInstanceState.SUCCESS,
commit=commit,
session=session,
)
|
Set the dag run's state to success.
Set for a specific logical date and its task instances to success.
:param dag: the DAG of which to alter state
:param run_id: the run_id to start looking from
:param commit: commit DAG and tasks to be altered to the database
:param session: database session
:return: If commit is true, list of tasks that have been updated,
otherwise list of tasks that will be updated
:raises: ValueError if dag or logical_date is invalid
|
python
|
airflow-core/src/airflow/api/common/mark_tasks.py
| 208
|
[
"dag",
"run_id",
"commit",
"session"
] |
list[TaskInstance]
| true
| 6
| 7.92
|
apache/airflow
| 43,597
|
sphinx
| false
|
async_get_partitions
|
async def async_get_partitions(
self,
client: Any,
database_name: str,
table_name: str,
expression: str = "",
page_size: int | None = None,
max_items: int | None = 1,
) -> set[tuple]:
"""
Asynchronously retrieves the partition values for a table.
:param database_name: The name of the catalog database where the partitions reside.
:param table_name: The name of the partitions' table.
:param expression: An expression filtering the partitions to be returned.
Please see official AWS documentation for further information.
https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartitions
:param page_size: pagination size
:param max_items: maximum items to return
:return: set of partition values where each value is a tuple since
a partition may be composed of multiple columns. For example:
``{('2018-01-01','1'), ('2018-01-01','2')}``
"""
config = {
"PageSize": page_size,
"MaxItems": max_items,
}
paginator = client.get_paginator("get_partitions")
partitions = set()
async for page in paginator.paginate(
DatabaseName=database_name, TableName=table_name, Expression=expression, PaginationConfig=config
):
for partition in page["Partitions"]:
partitions.add(tuple(partition["Values"]))
return partitions
|
Asynchronously retrieves the partition values for a table.
:param database_name: The name of the catalog database where the partitions reside.
:param table_name: The name of the partitions' table.
:param expression: An expression filtering the partitions to be returned.
Please see official AWS documentation for further information.
https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartitions
:param page_size: pagination size
:param max_items: maximum items to return
:return: set of partition values where each value is a tuple since
a partition may be composed of multiple columns. For example:
``{('2018-01-01','1'), ('2018-01-01','2')}``
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/glue_catalog.py
| 48
|
[
"self",
"client",
"database_name",
"table_name",
"expression",
"page_size",
"max_items"
] |
set[tuple]
| true
| 3
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
toString
|
@Override
public String toString() {
return "ClientQuotaFilter(components=" + components + ", strict=" + strict + ")";
}
|
@return whether the filter is strict, i.e. only includes specified components
|
java
|
clients/src/main/java/org/apache/kafka/common/quota/ClientQuotaFilter.java
| 97
|
[] |
String
| true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
getUsingClassName
|
@SuppressWarnings("unchecked")
private @Nullable StructuredLogFormatter<E> getUsingClassName(String className) {
Object formatter = this.instantiator.instantiate(className);
if (formatter != null) {
Assert.state(formatter instanceof StructuredLogFormatter,
() -> "'%s' is not a StructuredLogFormatter".formatted(className));
checkTypeArgument(formatter);
}
return (StructuredLogFormatter<E>) formatter;
}
|
Get a new {@link StructuredLogFormatter} instance for the specified format.
@param format the format requested (either a {@link CommonStructuredLogFormat} ID
or a fully-qualified class name)
@return a new {@link StructuredLogFormatter} instance
@throws IllegalArgumentException if the format is unknown
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/structured/StructuredLogFormatterFactory.java
| 134
|
[
"className"
] | true
| 2
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
withReplacement
|
ConfigDataEnvironmentContributor withReplacement(ConfigDataEnvironmentContributor existing,
ConfigDataEnvironmentContributor replacement) {
if (this == existing) {
return replacement;
}
Map<ImportPhase, List<ConfigDataEnvironmentContributor>> updatedChildren = new LinkedHashMap<>(
this.children.size());
this.children.forEach((importPhase, contributors) -> {
List<ConfigDataEnvironmentContributor> updatedContributors = new ArrayList<>(contributors.size());
for (ConfigDataEnvironmentContributor contributor : contributors) {
updatedContributors.add(contributor.withReplacement(existing, replacement));
}
updatedChildren.put(importPhase, Collections.unmodifiableList(updatedContributors));
});
return new ConfigDataEnvironmentContributor(this.kind, this.location, this.resource,
this.fromProfileSpecificImport, this.propertySource, this.configurationPropertySource, this.properties,
this.configDataOptions, updatedChildren, this.conversionService);
}
|
Create a new {@link ConfigDataEnvironmentContributor} instance where an existing
child is replaced.
@param existing the existing node that should be replaced
@param replacement the replacement node that should be used instead
@return a new {@link ConfigDataEnvironmentContributor} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironmentContributor.java
| 340
|
[
"existing",
"replacement"
] |
ConfigDataEnvironmentContributor
| true
| 2
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
toString
|
@Override
public String toString() {
if (StringUtils.hasLength(this.file)) {
return this.file;
}
return new File(this.path, "spring.log").getPath();
}
|
Apply log file details to {@code LOG_PATH} and {@code LOG_FILE} map entries.
@param properties the properties to apply to
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/LogFile.java
| 101
|
[] |
String
| true
| 2
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
createEntrySet
|
private ImmutableSet<Entry<E>> createEntrySet() {
return isEmpty() ? ImmutableSet.of() : new EntrySet();
}
|
@since 21.0 (present with return type {@code Set} since 2.0)
|
java
|
android/guava/src/com/google/common/collect/ImmutableMultiset.java
| 358
|
[] | true
| 2
| 6.32
|
google/guava
| 51,352
|
javadoc
| false
|
|
isInterfaceProxied
|
@Override
public boolean isInterfaceProxied(Class<?> ifc) {
for (Class<?> proxyIntf : this.interfaces) {
if (ifc.isAssignableFrom(proxyIntf)) {
return true;
}
}
return false;
}
|
Remove a proxied interface.
<p>Does nothing if the given interface isn't proxied.
@param ifc the interface to remove from the proxy
@return {@code true} if the interface was removed; {@code false}
if the interface was not found and hence could not be removed
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AdvisedSupport.java
| 258
|
[
"ifc"
] | true
| 2
| 8.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
cumsum
|
def cumsum(self, numeric_only: bool = False, *args, **kwargs) -> NDFrameT:
"""
Cumulative sum for each group.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
*args : tuple
Positional arguments to be passed to `func`.
**kwargs : dict
Additional/specific keyword arguments to be passed to the function,
such as `numeric_only` and `skipna`.
Returns
-------
Series or DataFrame
Cumulative sum for each group. Same object type as the caller.
%(see_also)s
Examples
--------
For SeriesGroupBy:
>>> lst = ["a", "a", "b"]
>>> ser = pd.Series([6, 2, 0], index=lst)
>>> ser
a 6
a 2
b 0
dtype: int64
>>> ser.groupby(level=0).cumsum()
a 6
a 8
b 0
dtype: int64
For DataFrameGroupBy:
>>> data = [[1, 8, 2], [1, 2, 5], [2, 6, 9]]
>>> df = pd.DataFrame(
... data, columns=["a", "b", "c"], index=["fox", "gorilla", "lion"]
... )
>>> df
a b c
fox 1 8 2
gorilla 1 2 5
lion 2 6 9
>>> df.groupby("a").groups
{1: ['fox', 'gorilla'], 2: ['lion']}
>>> df.groupby("a").cumsum()
b c
fox 8 2
gorilla 10 7
lion 6 9
"""
nv.validate_groupby_func("cumsum", args, kwargs, ["skipna"])
return self._cython_transform("cumsum", numeric_only, **kwargs)
|
Cumulative sum for each group.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
*args : tuple
Positional arguments to be passed to `func`.
**kwargs : dict
Additional/specific keyword arguments to be passed to the function,
such as `numeric_only` and `skipna`.
Returns
-------
Series or DataFrame
Cumulative sum for each group. Same object type as the caller.
%(see_also)s
Examples
--------
For SeriesGroupBy:
>>> lst = ["a", "a", "b"]
>>> ser = pd.Series([6, 2, 0], index=lst)
>>> ser
a 6
a 2
b 0
dtype: int64
>>> ser.groupby(level=0).cumsum()
a 6
a 8
b 0
dtype: int64
For DataFrameGroupBy:
>>> data = [[1, 8, 2], [1, 2, 5], [2, 6, 9]]
>>> df = pd.DataFrame(
... data, columns=["a", "b", "c"], index=["fox", "gorilla", "lion"]
... )
>>> df
a b c
fox 1 8 2
gorilla 1 2 5
lion 2 6 9
>>> df.groupby("a").groups
{1: ['fox', 'gorilla'], 2: ['lion']}
>>> df.groupby("a").cumsum()
b c
fox 8 2
gorilla 10 7
lion 6 9
|
python
|
pandas/core/groupby/groupby.py
| 4,914
|
[
"self",
"numeric_only"
] |
NDFrameT
| true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_values_for_factorize
|
def _values_for_factorize(self) -> tuple[np.ndarray, Any]:
"""
Return an array and missing value suitable for factorization.
Returns
-------
values : ndarray
An array suitable for factorization. This should maintain order
and be a supported dtype (Float64, Int64, UInt64, String, Object).
By default, the extension array is cast to object dtype.
na_value : object
The value in `values` to consider missing. This will be treated
as NA in the factorization routines, so it will be coded as
`-1` and not included in `uniques`. By default,
``np.nan`` is used.
See Also
--------
util.hash_pandas_object : Hash the pandas object.
Notes
-----
The values returned by this method are also used in
:func:`pandas.util.hash_pandas_object`. If needed, this can be
overridden in the ``self._hash_pandas_object()`` method.
Examples
--------
>>> pd.array([1, 2, 3])._values_for_factorize()
(array([1, 2, 3], dtype=object), nan)
"""
return self.astype(object), np.nan
|
Return an array and missing value suitable for factorization.
Returns
-------
values : ndarray
An array suitable for factorization. This should maintain order
and be a supported dtype (Float64, Int64, UInt64, String, Object).
By default, the extension array is cast to object dtype.
na_value : object
The value in `values` to consider missing. This will be treated
as NA in the factorization routines, so it will be coded as
`-1` and not included in `uniques`. By default,
``np.nan`` is used.
See Also
--------
util.hash_pandas_object : Hash the pandas object.
Notes
-----
The values returned by this method are also used in
:func:`pandas.util.hash_pandas_object`. If needed, this can be
overridden in the ``self._hash_pandas_object()`` method.
Examples
--------
>>> pd.array([1, 2, 3])._values_for_factorize()
(array([1, 2, 3], dtype=object), nan)
|
python
|
pandas/core/arrays/base.py
| 1,605
|
[
"self"
] |
tuple[np.ndarray, Any]
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
on_post_execution
|
def on_post_execution(**kwargs):
"""
Call callbacks after execution.
As it's being called after execution, it can capture status of execution,
duration, etc. Note that any exception from callback will be logged but
won't be propagated.
:param kwargs:
:return: None
"""
logger.debug("Calling callbacks: %s", __post_exec_callbacks)
for callback in __post_exec_callbacks:
try:
callback(**kwargs)
except Exception:
logger.exception("Failed on post-execution callback using %s", callback)
|
Call callbacks after execution.
As it's being called after execution, it can capture status of execution,
duration, etc. Note that any exception from callback will be logged but
won't be propagated.
:param kwargs:
:return: None
|
python
|
airflow-core/src/airflow/utils/cli_action_loggers.py
| 87
|
[] | false
| 2
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
getLoggerConfigurations
|
@Override
public List<LoggerConfiguration> getLoggerConfigurations() {
List<LoggerConfiguration> result = new ArrayList<>();
getAllLoggers().forEach((name, loggerConfig) -> result.add(convertLoggerConfig(name, loggerConfig)));
result.sort(CONFIGURATION_COMPARATOR);
return result;
}
|
Return the configuration location. The result may be:
<ul>
<li>{@code null}: if DefaultConfiguration is used (no explicit config loaded)</li>
<li>A file path: if provided explicitly by the user</li>
<li>A URI: if loaded from the classpath default or a custom location</li>
</ul>
@param configuration the source configuration
@return the config location or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/Log4J2LoggingSystem.java
| 390
|
[] | true
| 1
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
query_task
|
def query_task(self, *ids):
"""Return detail of tasks currently executed by workers.
Arguments:
*ids (str): IDs of tasks to be queried.
Returns:
Dict: Dictionary ``{HOSTNAME: {TASK_ID: [STATE, TASK_INFO]}}``.
Here is the list of ``TASK_INFO`` fields:
* ``id`` - ID of the task
* ``name`` - Name of the task
* ``args`` - Positinal arguments passed to the task
* ``kwargs`` - Keyword arguments passed to the task
* ``type`` - Type of the task
* ``hostname`` - Hostname of the worker processing the task
* ``time_start`` - Time of processing start
* ``acknowledged`` - True when task was acknowledged to broker
* ``delivery_info`` - Dictionary containing delivery information
* ``exchange`` - Name of exchange where task was published
* ``routing_key`` - Routing key used when task was published
* ``priority`` - Priority used when task was published
* ``redelivered`` - True if the task was redelivered
* ``worker_pid`` - PID of worker processing the task
"""
# signature used be unary: query_task(ids=[id1, id2])
# we need this to preserve backward compatibility.
if len(ids) == 1 and isinstance(ids[0], (list, tuple)):
ids = ids[0]
return self._request('query_task', ids=ids)
|
Return detail of tasks currently executed by workers.
Arguments:
*ids (str): IDs of tasks to be queried.
Returns:
Dict: Dictionary ``{HOSTNAME: {TASK_ID: [STATE, TASK_INFO]}}``.
Here is the list of ``TASK_INFO`` fields:
* ``id`` - ID of the task
* ``name`` - Name of the task
* ``args`` - Positinal arguments passed to the task
* ``kwargs`` - Keyword arguments passed to the task
* ``type`` - Type of the task
* ``hostname`` - Hostname of the worker processing the task
* ``time_start`` - Time of processing start
* ``acknowledged`` - True when task was acknowledged to broker
* ``delivery_info`` - Dictionary containing delivery information
* ``exchange`` - Name of exchange where task was published
* ``routing_key`` - Routing key used when task was published
* ``priority`` - Priority used when task was published
* ``redelivered`` - True if the task was redelivered
* ``worker_pid`` - PID of worker processing the task
|
python
|
celery/app/control.py
| 340
|
[
"self"
] | false
| 3
| 7.28
|
celery/celery
| 27,741
|
google
| false
|
|
whenNot
|
public Source<T> whenNot(Predicate<T> predicate) {
Assert.notNull(predicate, "'predicate' must not be null");
return when(predicate.negate());
}
|
Return a filtered version of the source that won't map values that match the
given predicate.
@param predicate the predicate used to filter values
@return a new filtered source instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/PropertyMapper.java
| 266
|
[
"predicate"
] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
putmask
|
def putmask(self, mask, value: MultiIndex) -> MultiIndex:
"""
Return a new MultiIndex of the values set with the mask.
Parameters
----------
mask : array like
value : MultiIndex
Must either be the same length as self or length one
Returns
-------
MultiIndex
"""
mask, noop = validate_putmask(self, mask)
if noop:
return self.copy()
if len(mask) == len(value):
subset = value[mask].remove_unused_levels()
else:
subset = value.remove_unused_levels()
new_levels = []
new_codes = []
for i, (value_level, level, level_codes) in enumerate(
zip(subset.levels, self.levels, self.codes, strict=True)
):
new_level = level.union(value_level, sort=False)
value_codes = new_level.get_indexer_for(subset.get_level_values(i))
new_code = ensure_int64(level_codes)
new_code[mask] = value_codes
new_levels.append(new_level)
new_codes.append(new_code)
return MultiIndex(
levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False
)
|
Return a new MultiIndex of the values set with the mask.
Parameters
----------
mask : array like
value : MultiIndex
Must either be the same length as self or length one
Returns
-------
MultiIndex
|
python
|
pandas/core/indexes/multi.py
| 4,188
|
[
"self",
"mask",
"value"
] |
MultiIndex
| true
| 5
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
asof_locs
|
def asof_locs(
self, where: Index, mask: npt.NDArray[np.bool_]
) -> npt.NDArray[np.intp]:
"""
Return the locations (indices) of labels in the index.
As in the :meth:`pandas.Index.asof`, if the label (a particular entry in
``where``) is not in the index, the latest index label up to the
passed label is chosen and its index returned.
If all of the labels in the index are later than a label in ``where``,
-1 is returned.
``mask`` is used to ignore ``NA`` values in the index during calculation.
Parameters
----------
where : Index
An Index consisting of an array of timestamps.
mask : np.ndarray[bool]
Array of booleans denoting where values in the original
data are not ``NA``.
Returns
-------
np.ndarray[np.intp]
An array of locations (indices) of the labels from the index
which correspond to the return values of :meth:`pandas.Index.asof`
for every element in ``where``.
See Also
--------
Index.asof : Return the label from the index, or, if not present, the
previous one.
Examples
--------
>>> idx = pd.date_range("2023-06-01", periods=3, freq="D")
>>> where = pd.DatetimeIndex(
... ["2023-05-30 00:12:00", "2023-06-01 00:00:00", "2023-06-02 23:59:59"]
... )
>>> mask = np.ones(3, dtype=bool)
>>> idx.asof_locs(where, mask)
array([-1, 0, 1])
We can use ``mask`` to ignore certain values in the index during calculation.
>>> mask[1] = False
>>> idx.asof_locs(where, mask)
array([-1, 0, 0])
"""
# error: No overload variant of "searchsorted" of "ndarray" matches argument
# types "Union[ExtensionArray, ndarray[Any, Any]]", "str"
# TODO: will be fixed when ExtensionArray.searchsorted() is fixed
locs = self._values[mask].searchsorted(
where._values,
side="right", # type: ignore[call-overload]
)
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self), dtype=np.intp)[mask].take(locs)
first_value = self._values[mask.argmax()]
result[(locs == 0) & (where._values < first_value)] = -1
return result
|
Return the locations (indices) of labels in the index.
As in the :meth:`pandas.Index.asof`, if the label (a particular entry in
``where``) is not in the index, the latest index label up to the
passed label is chosen and its index returned.
If all of the labels in the index are later than a label in ``where``,
-1 is returned.
``mask`` is used to ignore ``NA`` values in the index during calculation.
Parameters
----------
where : Index
An Index consisting of an array of timestamps.
mask : np.ndarray[bool]
Array of booleans denoting where values in the original
data are not ``NA``.
Returns
-------
np.ndarray[np.intp]
An array of locations (indices) of the labels from the index
which correspond to the return values of :meth:`pandas.Index.asof`
for every element in ``where``.
See Also
--------
Index.asof : Return the label from the index, or, if not present, the
previous one.
Examples
--------
>>> idx = pd.date_range("2023-06-01", periods=3, freq="D")
>>> where = pd.DatetimeIndex(
... ["2023-05-30 00:12:00", "2023-06-01 00:00:00", "2023-06-02 23:59:59"]
... )
>>> mask = np.ones(3, dtype=bool)
>>> idx.asof_locs(where, mask)
array([-1, 0, 1])
We can use ``mask`` to ignore certain values in the index during calculation.
>>> mask[1] = False
>>> idx.asof_locs(where, mask)
array([-1, 0, 0])
|
python
|
pandas/core/indexes/base.py
| 5,714
|
[
"self",
"where",
"mask"
] |
npt.NDArray[np.intp]
| true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
ordered
|
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
See Also
--------
set_ordered : Set the ordered attribute.
as_ordered : Set the Categorical to be ordered.
as_unordered : Set the Categorical to be unordered.
Examples
--------
For :class:`pandas.Series`:
>>> ser = pd.Series(["a", "b", "c", "a"], dtype="category")
>>> ser.cat.ordered
False
>>> raw_cat = pd.Categorical(["a", "b", "c", "a"], ordered=True)
>>> ser = pd.Series(raw_cat)
>>> ser.cat.ordered
True
For :class:`pandas.Categorical`:
>>> cat = pd.Categorical(["a", "b"], ordered=True)
>>> cat.ordered
True
>>> cat = pd.Categorical(["a", "b"], ordered=False)
>>> cat.ordered
False
For :class:`pandas.CategoricalIndex`:
>>> ci = pd.CategoricalIndex(["a", "b"], ordered=True)
>>> ci.ordered
True
>>> ci = pd.CategoricalIndex(["a", "b"], ordered=False)
>>> ci.ordered
False
"""
return self.dtype.ordered
|
Whether the categories have an ordered relationship.
See Also
--------
set_ordered : Set the ordered attribute.
as_ordered : Set the Categorical to be ordered.
as_unordered : Set the Categorical to be unordered.
Examples
--------
For :class:`pandas.Series`:
>>> ser = pd.Series(["a", "b", "c", "a"], dtype="category")
>>> ser.cat.ordered
False
>>> raw_cat = pd.Categorical(["a", "b", "c", "a"], ordered=True)
>>> ser = pd.Series(raw_cat)
>>> ser.cat.ordered
True
For :class:`pandas.Categorical`:
>>> cat = pd.Categorical(["a", "b"], ordered=True)
>>> cat.ordered
True
>>> cat = pd.Categorical(["a", "b"], ordered=False)
>>> cat.ordered
False
For :class:`pandas.CategoricalIndex`:
>>> ci = pd.CategoricalIndex(["a", "b"], ordered=True)
>>> ci.ordered
True
>>> ci = pd.CategoricalIndex(["a", "b"], ordered=False)
>>> ci.ordered
False
|
python
|
pandas/core/arrays/categorical.py
| 851
|
[
"self"
] |
Ordered
| true
| 1
| 6.64
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
appendSeparator
|
public StrBuilder appendSeparator(final String separator) {
return appendSeparator(separator, null);
}
|
Appends a separator if the builder is currently non-empty.
Appending a null separator will have no effect.
The separator is appended using {@link #append(String)}.
<p>
This method is useful for adding a separator each time around the
loop except the first.
</p>
<pre>
for (Iterator it = list.iterator(); it.hasNext(); ) {
appendSeparator(",");
append(it.next());
}
</pre>
<p>
Note that for this simple example, you should use
{@link #appendWithSeparators(Iterable, String)}.
</p>
@param separator the separator to use, null means no separator
@return {@code this} instance.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,298
|
[
"separator"
] |
StrBuilder
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
newProxy
|
@Override
public <T> T newProxy(
T target, Class<T> interfaceType, long timeoutDuration, TimeUnit timeoutUnit) {
checkNotNull(target);
checkNotNull(interfaceType);
checkNotNull(timeoutUnit);
checkPositiveTimeout(timeoutDuration);
checkArgument(interfaceType.isInterface(), "interfaceType must be an interface type");
Set<Method> interruptibleMethods = findInterruptibleMethods(interfaceType);
InvocationHandler handler =
(obj, method, args) -> {
Callable<@Nullable Object> callable =
() -> {
try {
return method.invoke(target, args);
} catch (InvocationTargetException e) {
throw throwCause(e, /* combineStackTraces= */ false);
}
};
return callWithTimeout(
callable, timeoutDuration, timeoutUnit, interruptibleMethods.contains(method));
};
return newProxy(interfaceType, handler);
}
|
Creates a TimeLimiter instance using the given executor service to execute method calls.
<p><b>Warning:</b> using a bounded executor may be counterproductive! If the thread pool fills
up, any time callers spend waiting for a thread may count toward their time limit, and in this
case the call may even time out before the target method is ever invoked.
@param executor the ExecutorService that will execute the method calls on the target objects;
for example, a {@link Executors#newCachedThreadPool()}.
@since 22.0
|
java
|
android/guava/src/com/google/common/util/concurrent/SimpleTimeLimiter.java
| 75
|
[
"target",
"interfaceType",
"timeoutDuration",
"timeoutUnit"
] |
T
| true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
defaultProperties
|
public Set<Property> defaultProperties() {
return defaultProperties;
}
|
@return a set representing the default properties for this database
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/Database.java
| 221
|
[] | true
| 1
| 6.16
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
fetchOffsets
|
public CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> fetchOffsets(
final Set<TopicPartition> partitions,
final long deadlineMs) {
if (partitions.isEmpty()) {
return CompletableFuture.completedFuture(Collections.emptyMap());
}
CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> result = new CompletableFuture<>();
OffsetFetchRequestState request = createOffsetFetchRequest(partitions, deadlineMs);
fetchOffsetsWithRetries(request, result);
return result;
}
|
Enqueue a request to fetch committed offsets, that will be sent on the next call to {@link #poll(long)}.
@param partitions Partitions to fetch offsets for.
@param deadlineMs Time until which the request should be retried if it fails
with expected retriable errors.
@return Future that will complete when a successful response is received, or the request
fails and cannot be retried. Note that the request is retried whenever it fails with
retriable expected error and the retry time hasn't expired.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
| 513
|
[
"partitions",
"deadlineMs"
] | true
| 2
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getEnterpriseGeoIpTaskState
|
@Nullable
static EnterpriseGeoIpTaskState getEnterpriseGeoIpTaskState(ProjectMetadata projectMetadata) {
PersistentTasksCustomMetadata.PersistentTask<?> task = getTaskWithId(
projectMetadata,
EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER
);
return (task == null) ? null : (EnterpriseGeoIpTaskState) task.getState();
}
|
Retrieves the geoip downloader's task state from the cluster state. This may return null in some circumstances,
for example if the geoip downloader task hasn't been created yet (which it wouldn't be if it's disabled).
@param projectMetadata the project metatdata to read the task state from
@return the geoip downloader's task state or null if there is not a state to read
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java
| 147
|
[
"projectMetadata"
] |
EnterpriseGeoIpTaskState
| true
| 2
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
getObject
|
@Override
public final @Nullable Properties getObject() throws IOException {
if (this.singleton) {
return this.singletonInstance;
}
else {
return createProperties();
}
}
|
Set whether a shared 'singleton' Properties instance should be
created, or rather a new Properties instance on each request.
<p>Default is "true" (a shared singleton).
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/PropertiesFactoryBean.java
| 77
|
[] |
Properties
| true
| 2
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_safe_assign
|
def _safe_assign(X, values, *, row_indexer=None, column_indexer=None):
"""Safe assignment to a numpy array, sparse matrix, or pandas dataframe.
Parameters
----------
X : {ndarray, sparse-matrix, dataframe}
Array to be modified. It is expected to be 2-dimensional.
values : ndarray
The values to be assigned to `X`.
row_indexer : array-like, dtype={int, bool}, default=None
A 1-dimensional array to select the rows of interest. If `None`, all
rows are selected.
column_indexer : array-like, dtype={int, bool}, default=None
A 1-dimensional array to select the columns of interest. If `None`, all
columns are selected.
"""
row_indexer = slice(None, None, None) if row_indexer is None else row_indexer
column_indexer = (
slice(None, None, None) if column_indexer is None else column_indexer
)
if hasattr(X, "iloc"): # pandas dataframe
with warnings.catch_warnings():
# pandas >= 1.5 raises a warning when using iloc to set values in a column
# that does not have the same type as the column being set. It happens
# for instance when setting a categorical column with a string.
# In the future the behavior won't change and the warning should disappear.
# TODO(1.3): check if the warning is still raised or remove the filter.
warnings.simplefilter("ignore", FutureWarning)
X.iloc[row_indexer, column_indexer] = values
else: # numpy array or sparse matrix
X[row_indexer, column_indexer] = values
|
Safe assignment to a numpy array, sparse matrix, or pandas dataframe.
Parameters
----------
X : {ndarray, sparse-matrix, dataframe}
Array to be modified. It is expected to be 2-dimensional.
values : ndarray
The values to be assigned to `X`.
row_indexer : array-like, dtype={int, bool}, default=None
A 1-dimensional array to select the rows of interest. If `None`, all
rows are selected.
column_indexer : array-like, dtype={int, bool}, default=None
A 1-dimensional array to select the columns of interest. If `None`, all
columns are selected.
|
python
|
sklearn/utils/_indexing.py
| 380
|
[
"X",
"values",
"row_indexer",
"column_indexer"
] | false
| 5
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
create_location
|
def create_location(self, location_uri: str, **create_location_kwargs) -> str:
"""
Create a new location.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.create_location_s3`
- :external+boto3:py:meth:`DataSync.Client.create_location_smb`
- :external+boto3:py:meth:`DataSync.Client.create_location_nfs`
- :external+boto3:py:meth:`DataSync.Client.create_location_efs`
:param location_uri: Location URI used to determine the location type (S3, SMB, NFS, EFS).
:param create_location_kwargs: Passed to ``DataSync.Client.create_location_*`` methods.
:return: LocationArn of the created Location.
:raises AirflowException: If location type (prefix from ``location_uri``) is invalid.
"""
schema = urlsplit(location_uri).scheme
if schema == "smb":
location = self.get_conn().create_location_smb(**create_location_kwargs)
elif schema == "s3":
location = self.get_conn().create_location_s3(**create_location_kwargs)
elif schema == "nfs":
location = self.get_conn().create_location_nfs(**create_location_kwargs)
elif schema == "efs":
location = self.get_conn().create_location_efs(**create_location_kwargs)
else:
raise AirflowException(f"Invalid/Unsupported location type: {schema}")
self._refresh_locations()
return location["LocationArn"]
|
Create a new location.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.create_location_s3`
- :external+boto3:py:meth:`DataSync.Client.create_location_smb`
- :external+boto3:py:meth:`DataSync.Client.create_location_nfs`
- :external+boto3:py:meth:`DataSync.Client.create_location_efs`
:param location_uri: Location URI used to determine the location type (S3, SMB, NFS, EFS).
:param create_location_kwargs: Passed to ``DataSync.Client.create_location_*`` methods.
:return: LocationArn of the created Location.
:raises AirflowException: If location type (prefix from ``location_uri``) is invalid.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/datasync.py
| 67
|
[
"self",
"location_uri"
] |
str
| true
| 6
| 7.28
|
apache/airflow
| 43,597
|
sphinx
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.