function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cartesian_product
|
def cartesian_product(X: list[np.ndarray]) -> list[np.ndarray]:
"""
Numpy version of itertools.product.
Sometimes faster (for large inputs)...
Parameters
----------
X : list-like of list-likes
Returns
-------
product : list of ndarrays
Examples
--------
>>> cartesian_product([list("ABC"), [1, 2]])
[array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='<U1'), array([1, 2, 1, 2, 1, 2])]
See Also
--------
itertools.product : Cartesian product of input iterables. Equivalent to
nested for-loops.
"""
msg = "Input must be a list-like of list-likes"
if not is_list_like(X):
raise TypeError(msg)
for x in X:
if not is_list_like(x):
raise TypeError(msg)
if len(X) == 0:
return []
lenX = np.fromiter((len(x) for x in X), dtype=np.intp)
cumprodX = np.cumprod(lenX)
if np.any(cumprodX < 0):
raise ValueError("Product space too large to allocate arrays!")
a = np.roll(cumprodX, 1)
a[0] = 1
if cumprodX[-1] != 0:
b = cumprodX[-1] / cumprodX
else:
# if any factor is empty, the cartesian product is empty
b = np.zeros_like(cumprodX)
return [
np.tile(
np.repeat(x, b[i]),
np.prod(a[i]),
)
for i, x in enumerate(X)
]
|
Numpy version of itertools.product.
Sometimes faster (for large inputs)...
Parameters
----------
X : list-like of list-likes
Returns
-------
product : list of ndarrays
Examples
--------
>>> cartesian_product([list("ABC"), [1, 2]])
[array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='<U1'), array([1, 2, 1, 2, 1, 2])]
See Also
--------
itertools.product : Cartesian product of input iterables. Equivalent to
nested for-loops.
|
python
|
pandas/core/indexes/multi.py
| 4,451
|
[
"X"
] |
list[np.ndarray]
| true
| 8
| 8.16
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
forField
|
public static ResourceElementResolver forField(String fieldName) {
return new ResourceFieldResolver(fieldName, true, fieldName);
}
|
Create a new {@link ResourceFieldResolver} for the specified field.
@param fieldName the field name
@return a new {@link ResourceFieldResolver} instance
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ResourceElementResolver.java
| 68
|
[
"fieldName"
] |
ResourceElementResolver
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
show_versions
|
def show_versions(as_json: str | bool = False) -> None:
"""
Provide useful information, important for bug reports.
It comprises info about hosting operation system, pandas version,
and versions of other installed relative packages.
Parameters
----------
as_json : str or bool, default False
* If False, outputs info in a human readable form to the console.
* If str, it will be considered as a path to a file.
Info will be written to that file in JSON format.
* If True, outputs info in JSON format to the console.
See Also
--------
get_option : Retrieve the value of the specified option.
set_option : Set the value of the specified option or options.
Examples
--------
>>> pd.show_versions() # doctest: +SKIP
Your output may look something like this:
INSTALLED VERSIONS
------------------
commit : 37ea63d540fd27274cad6585082c91b1283f963d
python : 3.10.6.final.0
python-bits : 64
OS : Linux
OS-release : 5.10.102.1-microsoft-standard-WSL2
Version : #1 SMP Wed Mar 2 00:30:59 UTC 2022
machine : x86_64
processor : x86_64
byteorder : little
LC_ALL : None
LANG : en_GB.UTF-8
LOCALE : en_GB.UTF-8
pandas : 2.0.1
numpy : 1.24.3
...
"""
sys_info = _get_sys_info()
deps = _get_dependency_info()
if as_json:
j = {"system": sys_info, "dependencies": deps}
if as_json is True:
sys.stdout.writelines(json.dumps(j, indent=2))
else:
assert isinstance(as_json, str) # needed for mypy
with open(as_json, "w", encoding="utf-8") as f:
json.dump(j, f, indent=2)
else:
assert isinstance(sys_info["LOCALE"], dict) # needed for mypy
language_code = sys_info["LOCALE"]["language-code"]
encoding = sys_info["LOCALE"]["encoding"]
sys_info["LOCALE"] = f"{language_code}.{encoding}"
maxlen = max(len(x) for x in deps)
print("\nINSTALLED VERSIONS")
print("------------------")
for k, v in sys_info.items():
print(f"{k:<{maxlen}}: {v}")
print("")
for k, v in deps.items():
print(f"{k:<{maxlen}}: {v}")
|
Provide useful information, important for bug reports.
It comprises info about hosting operation system, pandas version,
and versions of other installed relative packages.
Parameters
----------
as_json : str or bool, default False
* If False, outputs info in a human readable form to the console.
* If str, it will be considered as a path to a file.
Info will be written to that file in JSON format.
* If True, outputs info in JSON format to the console.
See Also
--------
get_option : Retrieve the value of the specified option.
set_option : Set the value of the specified option or options.
Examples
--------
>>> pd.show_versions() # doctest: +SKIP
Your output may look something like this:
INSTALLED VERSIONS
------------------
commit : 37ea63d540fd27274cad6585082c91b1283f963d
python : 3.10.6.final.0
python-bits : 64
OS : Linux
OS-release : 5.10.102.1-microsoft-standard-WSL2
Version : #1 SMP Wed Mar 2 00:30:59 UTC 2022
machine : x86_64
processor : x86_64
byteorder : little
LC_ALL : None
LANG : en_GB.UTF-8
LOCALE : en_GB.UTF-8
pandas : 2.0.1
numpy : 1.24.3
...
|
python
|
pandas/util/_print_versions.py
| 96
|
[
"as_json"
] |
None
| true
| 7
| 8.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
newTreeSet
|
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <E extends @Nullable Object> TreeSet<E> newTreeSet(
Comparator<? super E> comparator) {
return new TreeSet<>(checkNotNull(comparator));
}
|
Creates a <i>mutable</i>, empty {@code TreeSet} instance with the given comparator.
<p><b>Note:</b> if mutability is not required, use {@code
ImmutableSortedSet.orderedBy(comparator).build()} instead.
<p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
use the {@code TreeSet} constructor directly, taking advantage of <a
href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
syntax</a>. One caveat to this is that the {@code TreeSet} constructor uses a null {@code
Comparator} to mean "natural ordering," whereas this factory rejects null. Clean your code
accordingly.
@param comparator the comparator to use to sort the set
@return a new, empty {@code TreeSet}
@throws NullPointerException if {@code comparator} is null
|
java
|
android/guava/src/com/google/common/collect/Sets.java
| 438
|
[
"comparator"
] | true
| 1
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
addConsoleCtrlHandler
|
public boolean addConsoleCtrlHandler(ConsoleCtrlHandler handler) {
return kernel.SetConsoleCtrlHandler(dwCtrlType -> {
if (logger.isDebugEnabled()) {
logger.debug("console control handler received event [{}]", dwCtrlType);
}
return handler.handle(dwCtrlType);
}, true);
}
|
Adds a Console Ctrl Handler for Windows. On non-windows this is a noop.
@return true if the handler is correctly set
|
java
|
libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsFunctions.java
| 58
|
[
"handler"
] | true
| 2
| 7.04
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
brokersById
|
public Map<Integer, Node> brokersById() {
return holder().brokers;
}
|
Get all brokers returned in metadata response
@return the brokers
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java
| 233
|
[] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
trigger_dag
|
def trigger_dag(
dag_id: str,
*,
triggered_by: DagRunTriggeredByType,
triggering_user_name: str | None = None,
run_after: datetime | None = None,
run_id: str | None = None,
conf: dict | str | None = None,
logical_date: datetime | None = None,
replace_microseconds: bool = True,
session: Session = NEW_SESSION,
) -> DagRun | None:
"""
Triggers execution of DAG specified by dag_id.
:param dag_id: DAG ID
:param triggered_by: the entity which triggers the dag_run
:param triggering_user_name: the user name who triggers the dag_run
:param run_after: the datetime before which dag won't run
:param run_id: ID of the dag_run
:param conf: configuration
:param logical_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:param session: Unused. Only added in compatibility with database isolation mode
:return: first dag run triggered - even if more than one Dag Runs were triggered or None
"""
dag_model = DagModel.get_current(dag_id, session=session)
if dag_model is None:
raise DagNotFound(f"Dag id {dag_id} not found in DagModel")
dagbag = DBDagBag()
dr = _trigger_dag(
dag_id=dag_id,
dag_bag=dagbag,
run_id=run_id,
run_after=run_after or timezone.utcnow(),
conf=conf,
logical_date=logical_date,
replace_microseconds=replace_microseconds,
triggered_by=triggered_by,
triggering_user_name=triggering_user_name,
session=session,
)
return dr if dr else None
|
Triggers execution of DAG specified by dag_id.
:param dag_id: DAG ID
:param triggered_by: the entity which triggers the dag_run
:param triggering_user_name: the user name who triggers the dag_run
:param run_after: the datetime before which dag won't run
:param run_id: ID of the dag_run
:param conf: configuration
:param logical_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:param session: Unused. Only added in compatibility with database isolation mode
:return: first dag run triggered - even if more than one Dag Runs were triggered or None
|
python
|
airflow-core/src/airflow/api/common/trigger_dag.py
| 128
|
[
"dag_id",
"triggered_by",
"triggering_user_name",
"run_after",
"run_id",
"conf",
"logical_date",
"replace_microseconds",
"session"
] |
DagRun | None
| true
| 4
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
var
|
def var(
self,
ddof: int = 1,
engine: Literal["cython", "numba"] | None = None,
engine_kwargs: dict[str, bool] | None = None,
numeric_only: bool = False,
skipna: bool = True,
):
"""
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}``
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
skipna : bool, default True
Exclude NA/null values. If an entire group is NA, the result will be NA.
.. versionadded:: 3.0.0
Returns
-------
Series or DataFrame
Variance of values within each group.
%(see_also)s
Examples
--------
For SeriesGroupBy:
>>> lst = ["a", "a", "a", "b", "b", "b"]
>>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst)
>>> ser
a 7
a 2
a 8
b 4
b 3
b 3
dtype: int64
>>> ser.groupby(level=0).var()
a 10.333333
b 0.333333
dtype: float64
For DataFrameGroupBy:
>>> data = {"a": [1, 3, 5, 7, 7, 8, 3], "b": [1, 4, 8, 4, 4, 2, 1]}
>>> df = pd.DataFrame(
... data, index=["dog", "dog", "dog", "mouse", "mouse", "mouse", "mouse"]
... )
>>> df
a b
dog 1 1
dog 3 4
dog 5 8
mouse 7 4
mouse 7 4
mouse 8 2
mouse 3 1
>>> df.groupby(level=0).var()
a b
dog 4.000000 12.333333
mouse 4.916667 2.250000
"""
if maybe_use_numba(engine):
from pandas.core._numba.kernels import grouped_var
return self._numba_agg_general(
grouped_var,
executor.float_dtype_mapping,
engine_kwargs,
min_periods=0,
ddof=ddof,
skipna=skipna,
)
else:
return self._cython_agg_general(
"var",
alt=lambda x: Series(x, copy=False).var(ddof=ddof, skipna=skipna),
numeric_only=numeric_only,
ddof=ddof,
skipna=skipna,
)
|
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}``
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
skipna : bool, default True
Exclude NA/null values. If an entire group is NA, the result will be NA.
.. versionadded:: 3.0.0
Returns
-------
Series or DataFrame
Variance of values within each group.
%(see_also)s
Examples
--------
For SeriesGroupBy:
>>> lst = ["a", "a", "a", "b", "b", "b"]
>>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst)
>>> ser
a 7
a 2
a 8
b 4
b 3
b 3
dtype: int64
>>> ser.groupby(level=0).var()
a 10.333333
b 0.333333
dtype: float64
For DataFrameGroupBy:
>>> data = {"a": [1, 3, 5, 7, 7, 8, 3], "b": [1, 4, 8, 4, 4, 2, 1]}
>>> df = pd.DataFrame(
... data, index=["dog", "dog", "dog", "mouse", "mouse", "mouse", "mouse"]
... )
>>> df
a b
dog 1 1
dog 3 4
dog 5 8
mouse 7 4
mouse 7 4
mouse 8 2
mouse 3 1
>>> df.groupby(level=0).var()
a b
dog 4.000000 12.333333
mouse 4.916667 2.250000
|
python
|
pandas/core/groupby/groupby.py
| 2,520
|
[
"self",
"ddof",
"engine",
"engine_kwargs",
"numeric_only",
"skipna"
] | true
| 3
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
update
|
function update<T extends Node>(updated: Mutable<T>, original: T): T {
if (updated !== original) {
setOriginal(updated, original);
setTextRange(updated, original);
}
return updated;
}
|
Lifts a NodeArray containing only Statement nodes to a block.
@param nodes The NodeArray.
|
typescript
|
src/compiler/factory/nodeFactory.ts
| 7,179
|
[
"updated",
"original"
] | true
| 2
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
negate
|
default FailableDoublePredicate<E> negate() {
return t -> !test(t);
}
|
Returns a predicate that negates this predicate.
@return a predicate that negates this predicate.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableDoublePredicate.java
| 79
|
[] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
create
|
Reader create(Reader reader);
|
Wraps the given Reader with a CharFilter.
@param reader reader to be wrapped
@return a reader wrapped with CharFilter
|
java
|
libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/CharFilterFactory.java
| 28
|
[
"reader"
] |
Reader
| true
| 1
| 6.48
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
set_head_dim_values
|
def set_head_dim_values(
kernel_options: dict[str, Any], qk_head_dim, v_head_dim, graph_sizevars
):
"""
Mutates kernel options, adding head dimension calculations.
Args:
kernel_options: Dictionary to populate with options
qk_head_dim: Query/Key head dimension
v_head_dim: Value head dimension
graph_sizevars: Graph size variables object with guard_int method
"""
# QK dimensions
qk_head_dim_static = graph_sizevars.guard_int(qk_head_dim)
kernel_options.setdefault("QK_HEAD_DIM", qk_head_dim_static)
kernel_options.setdefault(
"QK_HEAD_DIM_ROUNDED", next_power_of_two(qk_head_dim_static)
)
# V dimensions
v_head_dim_static = graph_sizevars.guard_int(v_head_dim)
kernel_options.setdefault("V_HEAD_DIM", v_head_dim_static)
kernel_options.setdefault(
"V_HEAD_DIM_ROUNDED", next_power_of_two(v_head_dim_static)
)
# Safety flag
kernel_options.setdefault(
"SAFE_HEAD_DIM",
is_power_of_2(qk_head_dim_static) and is_power_of_2(v_head_dim_static),
)
|
Mutates kernel options, adding head dimension calculations.
Args:
kernel_options: Dictionary to populate with options
qk_head_dim: Query/Key head dimension
v_head_dim: Value head dimension
graph_sizevars: Graph size variables object with guard_int method
|
python
|
torch/_inductor/kernel/flex/common.py
| 305
|
[
"kernel_options",
"qk_head_dim",
"v_head_dim",
"graph_sizevars"
] | true
| 2
| 6.24
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
filteredProducerIds
|
public Set<Long> filteredProducerIds() {
return filteredProducerIds;
}
|
Returns the set of producerIds that are being filtered or empty if none have been specified.
@return the current set of filtered states (empty means that no producerIds are filtered and
all transactions will be returned)
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java
| 103
|
[] | true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
trim
|
private void trim() {
int left = 0;
int right = 2 * k - 1;
int minThresholdPosition = 0;
// The leftmost position at which the greatest of the k lower elements
// -- the new value of threshold -- might be found.
int iterations = 0;
int maxIterations = IntMath.log2(right - left, RoundingMode.CEILING) * 3;
while (left < right) {
int pivotIndex = (left + right + 1) >>> 1;
int pivotNewIndex = partition(left, right, pivotIndex);
if (pivotNewIndex > k) {
right = pivotNewIndex - 1;
} else if (pivotNewIndex < k) {
left = max(pivotNewIndex, left + 1);
minThresholdPosition = pivotNewIndex;
} else {
break;
}
iterations++;
if (iterations >= maxIterations) {
@SuppressWarnings("nullness") // safe because we pass sort() a range that contains real Ts
T[] castBuffer = (T[]) buffer;
// We've already taken O(k log k), let's make sure we don't take longer than O(k log k).
sort(castBuffer, left, right + 1, comparator);
break;
}
}
bufferSize = k;
threshold = uncheckedCastNullableTToT(buffer[minThresholdPosition]);
for (int i = minThresholdPosition + 1; i < k; i++) {
if (comparator.compare(
uncheckedCastNullableTToT(buffer[i]), uncheckedCastNullableTToT(threshold))
> 0) {
threshold = buffer[i];
}
}
}
|
Quickselects the top k elements from the 2k elements in the buffer. O(k) expected time, O(k log
k) worst case.
|
java
|
android/guava/src/com/google/common/collect/TopKSelector.java
| 164
|
[] |
void
| true
| 7
| 7.2
|
google/guava
| 51,352
|
javadoc
| false
|
hermgauss
|
def hermgauss(deg):
"""
Gauss-Hermite quadrature.
Computes the sample points and weights for Gauss-Hermite quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]`
with the weight function :math:`f(x) = \\exp(-x^2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`H_n`, and then scaling the results to get
the right value when integrating 1.
Examples
--------
>>> from numpy.polynomial.hermite import hermgauss
>>> hermgauss(2)
(array([-0.70710678, 0.70710678]), array([0.88622693, 0.88622693]))
"""
ideg = pu._as_int(deg, "deg")
if ideg <= 0:
raise ValueError("deg must be a positive integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0] * deg + [1], dtype=np.float64)
m = hermcompanion(c)
x = np.linalg.eigvalsh(m)
# improve roots by one application of Newton
dy = _normed_hermite_n(x, ideg)
df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2 * ideg)
x -= dy / df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = _normed_hermite_n(x, ideg - 1)
fm /= np.abs(fm).max()
w = 1 / (fm * fm)
# for Hermite we can also symmetrize
w = (w + w[::-1]) / 2
x = (x - x[::-1]) / 2
# scale w to get the right value
w *= np.sqrt(np.pi) / w.sum()
return x, w
|
Gauss-Hermite quadrature.
Computes the sample points and weights for Gauss-Hermite quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]`
with the weight function :math:`f(x) = \\exp(-x^2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`H_n`, and then scaling the results to get
the right value when integrating 1.
Examples
--------
>>> from numpy.polynomial.hermite import hermgauss
>>> hermgauss(2)
(array([-0.70710678, 0.70710678]), array([0.88622693, 0.88622693]))
|
python
|
numpy/polynomial/hermite.py
| 1,590
|
[
"deg"
] | false
| 2
| 7.76
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
_disable_dynamo
|
def _disable_dynamo(
fn: Callable[_P, _T] | None = None, recursive: bool = True
) -> Callable[_P, _T] | Callable[[Callable[_P, _T]], Callable[_P, _T]]:
"""
This API should be only used inside torch, external users should still use
torch._dynamo.disable. The main goal of this API is to avoid circular
imports issues that is common while using _dynamo.disable inside torch
itself.
This API avoids it by lazily importing torch._dynamo from the import time to
the invocation of the decorated function.
"""
if fn is not None:
@functools.wraps(fn)
def inner(*args: _P.args, **kwargs: _P.kwargs) -> _T:
# cache this on the first invocation to avoid adding too much overhead.
disable_fn = getattr(fn, "__dynamo_disable", None)
if disable_fn is None:
import torch._dynamo
# We can safely turn off functools.wraps here because the inner
# already wraps fn in the outer scope.
disable_fn = torch._dynamo.disable(fn, recursive, wrapping=False)
fn.__dynamo_disable = disable_fn # type: ignore[attr-defined]
return disable_fn(*args, **kwargs)
return inner
else:
# decorator usage like @_disable_dynamo(recursive=False). The resulting
# object expects the original decorated function as the arg.
return functools.partial(_disable_dynamo, recursive=recursive)
|
This API should be only used inside torch, external users should still use
torch._dynamo.disable. The main goal of this API is to avoid circular
imports issues that is common while using _dynamo.disable inside torch
itself.
This API avoids it by lazily importing torch._dynamo from the import time to
the invocation of the decorated function.
|
python
|
torch/_compile.py
| 28
|
[
"fn",
"recursive"
] |
Callable[_P, _T] | Callable[[Callable[_P, _T]], Callable[_P, _T]]
| true
| 4
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
getFirstBucketMidpoint
|
private static double getFirstBucketMidpoint(ExponentialHistogram.Buckets buckets) {
CopyableBucketIterator iterator = buckets.iterator();
if (iterator.hasNext()) {
return ExponentialScaleUtils.getPointOfLeastRelativeError(iterator.peekIndex(), iterator.scale());
} else {
return Double.NaN;
}
}
|
Estimates the rank of a given value in the distribution represented by the histogram.
In other words, returns the number of values which are less than (or less-or-equal, if {@code inclusive} is true)
the provided value.
@param histo the histogram to query
@param value the value to estimate the rank for
@param inclusive if true, counts values equal to the given value as well
@return the number of elements less than (or less-or-equal, if {@code inclusive} is true) the given value
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramQuantile.java
| 160
|
[
"buckets"
] | true
| 2
| 8.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
calculateFilenamesForLocale
|
protected List<String> calculateFilenamesForLocale(String basename, Locale locale) {
List<String> result = new ArrayList<>(3);
String language = locale.getLanguage();
String country = locale.getCountry();
String variant = locale.getVariant();
StringBuilder temp = new StringBuilder(basename);
temp.append('_');
if (language.length() > 0) {
temp.append(language);
result.add(0, temp.toString());
}
temp.append('_');
if (country.length() > 0) {
temp.append(country);
result.add(0, temp.toString());
}
if (variant.length() > 0 && (language.length() > 0 || country.length() > 0)) {
temp.append('_').append(variant);
result.add(0, temp.toString());
}
return result;
}
|
Calculate the filenames for the given bundle basename and Locale,
appending language code, country code, and variant code.
<p>For example, basename "messages", Locale "de_AT_oo" → "messages_de_AT_OO",
"messages_de_AT", "messages_de".
<p>Follows the rules defined by {@link java.util.Locale#toString()}.
@param basename the basename of the bundle
@param locale the locale
@return the List of filenames to check
|
java
|
spring-context/src/main/java/org/springframework/context/support/ReloadableResourceBundleMessageSource.java
| 373
|
[
"basename",
"locale"
] | true
| 6
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
emptyArray
|
@SuppressWarnings("unchecked")
public static <L, M, R> MutableTriple<L, M, R>[] emptyArray() {
return (MutableTriple<L, M, R>[]) EMPTY_ARRAY;
}
|
Returns the empty array singleton that can be assigned without compiler warning.
@param <L> the left element type.
@param <M> the middle element type.
@param <R> the right element type.
@return the empty array singleton that can be assigned without compiler warning.
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/tuple/MutableTriple.java
| 55
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
max
|
public static short max(final short... array) {
// Validates input
validateArray(array);
// Finds and returns max
short max = array[0];
for (int i = 1; i < array.length; i++) {
if (array[i] > max) {
max = array[i];
}
}
return max;
}
|
Returns the maximum value in an array.
@param array an array, must not be null or empty.
@return the maximum value in the array.
@throws NullPointerException if {@code array} is {@code null}.
@throws IllegalArgumentException if {@code array} is empty.
@since 3.4 Changed signature from max(short[]) to max(short...).
|
java
|
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
| 1,063
|
[] | true
| 3
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
is_datetime64_dtype
|
def is_datetime64_dtype(arr_or_dtype) -> bool:
"""
Check whether an array-like or dtype is of the datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like or dtype
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the datetime64 dtype.
See Also
--------
api.types.is_datetime64_ns_dtype: Check whether the provided array or
dtype is of the datetime64[ns] dtype.
api.types.is_datetime64_any_dtype: Check whether the provided array or
dtype is of the datetime64 dtype.
Examples
--------
>>> from pandas.api.types import is_datetime64_dtype
>>> is_datetime64_dtype(object)
False
>>> is_datetime64_dtype(np.datetime64)
True
>>> is_datetime64_dtype(np.array([], dtype=int))
False
>>> is_datetime64_dtype(np.array([], dtype=np.datetime64))
True
>>> is_datetime64_dtype([1, 2, 3])
False
"""
if isinstance(arr_or_dtype, np.dtype):
# GH#33400 fastpath for dtype object
return arr_or_dtype.kind == "M"
return _is_dtype_type(arr_or_dtype, classes(np.datetime64))
|
Check whether an array-like or dtype is of the datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like or dtype
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the datetime64 dtype.
See Also
--------
api.types.is_datetime64_ns_dtype: Check whether the provided array or
dtype is of the datetime64[ns] dtype.
api.types.is_datetime64_any_dtype: Check whether the provided array or
dtype is of the datetime64 dtype.
Examples
--------
>>> from pandas.api.types import is_datetime64_dtype
>>> is_datetime64_dtype(object)
False
>>> is_datetime64_dtype(np.datetime64)
True
>>> is_datetime64_dtype(np.array([], dtype=int))
False
>>> is_datetime64_dtype(np.array([], dtype=np.datetime64))
True
>>> is_datetime64_dtype([1, 2, 3])
False
|
python
|
pandas/core/dtypes/common.py
| 289
|
[
"arr_or_dtype"
] |
bool
| true
| 2
| 8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
indexIn
|
public int indexIn(CharSequence sequence) {
return indexIn(sequence, 0);
}
|
Returns the index of the first matching BMP character in a character sequence, or {@code -1} if
no matching character is present.
<p>The default implementation iterates over the sequence in forward order calling {@link
#matches} for each character.
@param sequence the character sequence to examine from the beginning
@return an index, or {@code -1} if no character matches
|
java
|
android/guava/src/com/google/common/base/CharMatcher.java
| 544
|
[
"sequence"
] | true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
|
create_queue
|
def create_queue(self, queue_name: str, attributes: dict | None = None) -> dict:
"""
Create queue using connection object.
.. seealso::
- :external+boto3:py:meth:`SQS.Client.create_queue`
:param queue_name: name of the queue.
:param attributes: additional attributes for the queue (default: None)
:return: dict with the information about the queue.
"""
return self.get_conn().create_queue(QueueName=queue_name, Attributes=attributes or {})
|
Create queue using connection object.
.. seealso::
- :external+boto3:py:meth:`SQS.Client.create_queue`
:param queue_name: name of the queue.
:param attributes: additional attributes for the queue (default: None)
:return: dict with the information about the queue.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/sqs.py
| 43
|
[
"self",
"queue_name",
"attributes"
] |
dict
| true
| 2
| 7.6
|
apache/airflow
| 43,597
|
sphinx
| false
|
greatCircleMinLatitude
|
public double greatCircleMinLatitude(LatLng latLng) {
if (isNumericallyIdentical(latLng)) {
return latLng.lat;
}
return latLng.lat < this.lat ? greatCircleMinLatitude(latLng, this) : greatCircleMinLatitude(this, latLng);
}
|
Determines the minimum latitude of the great circle defined by this LatLng to the provided LatLng.
@param latLng The LatLng.
@return The minimum latitude of the great circle in radians.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/LatLng.java
| 158
|
[
"latLng"
] | true
| 3
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
newProperties
|
protected Properties newProperties() {
return new Properties();
}
|
Template method for creating a plain new {@link Properties} instance.
The default implementation simply calls {@link Properties#Properties()}.
<p>Allows for returning a custom {@link Properties} extension in subclasses.
Overriding methods should just instantiate a custom {@link Properties} subclass,
with no further initialization or population to be performed at that point.
@return a plain Properties instance
@since 4.2
|
java
|
spring-context/src/main/java/org/springframework/context/support/ReloadableResourceBundleMessageSource.java
| 603
|
[] |
Properties
| true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
nodeById
|
public Node nodeById(int id) {
return this.nodesById.get(id);
}
|
Get the node by the node id (or null if the node is not online or does not exist)
@param id The id of the node
@return The node, or null if the node is not online or does not exist
|
java
|
clients/src/main/java/org/apache/kafka/common/Cluster.java
| 243
|
[
"id"
] |
Node
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
resolveType
|
private String resolveType(MetadataGenerationEnvironment environment) {
return environment.getTypeUtils().getType(getDeclaringElement(), getType());
}
|
Return if this property has been explicitly marked as nested (for example using an
annotation}.
@param environment the metadata generation environment
@return if the property has been marked as nested
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/PropertyDescriptor.java
| 205
|
[
"environment"
] |
String
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
concat
|
function concat<I0, I1>(list0: L.List<I0>, list1: L.List<I1>) {
let length = list0.length + list1.length
// tipping point where implementation becomes slower
if (length > 200) return list0.concat(list1 as any[])
const _list: (I0 | I1)[] = new Array(length)
for (let i = list1.length - 1; i >= 0; --i) {
_list[--length] = list1[i]
}
for (let i = list0.length - 1; i >= 0; --i) {
_list[--length] = list0[i]
}
return _list
}
|
Combines two lists into a new one.
(more efficient than native concat)
@param list0
@param list1
@returns
|
typescript
|
helpers/blaze/concat.ts
| 13
|
[
"list0",
"list1"
] | false
| 4
| 7.6
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
build_subgraph_buffer
|
def build_subgraph_buffer(
args: list[TensorBox],
subgraph: Subgraph,
):
"""
This function is adapted from ../kernel/flex_attention.py.
The goal is to take in the required args and produce the subgraph buffer
The subgraph buffer is a ComputedBuffer that will be inlined into the triton template
Args:
args: The args that are passed into the subgraph
subgraph: The Subgraph ir for which to produce the output node
"""
cnt = 0
env = {}
for node in subgraph.graph_module.graph.nodes:
if node.op == "placeholder":
env[node] = args[cnt]
cnt += 1
elif node.op == "call_function":
# For call_function we use the default lowerings and pass in the
# already created TensorBoxes as args
args, kwargs = tree_map(lambda x: env.get(x, x), (node.args, node.kwargs))
env[node] = lowerings[node.target](*args, **kwargs)
elif node.op == "output":
def convert_output_node_to_buffer(output):
if output is None:
return None
output_node = output
output_buffer = env[output_node]
assert isinstance(output_buffer, TensorBox), (
"The output node for B2B-GEMM's subgraph must be a TensorBox, but got: ",
type(output_buffer),
)
assert isinstance(output_buffer.data, StorageBox), (
"The output node for B2B-GEMM's subgraph must be a StorageBox, but got: ",
type(output_buffer),
)
device = output_buffer.data.get_device()
assert device is not None
subgraph_buffer = ComputedBuffer(
name=None,
layout=FlexibleLayout(
device=device,
dtype=output_buffer.data.get_dtype(),
size=output_buffer.data.get_size(),
),
data=output_buffer.data.data, # type: ignore[arg-type]
)
return subgraph_buffer
# node.args[0] should be a single element representing the output of the subgraph
return tree_map(convert_output_node_to_buffer, node.args[0])
raise ValueError("B2B-GEMM was passed a subgraph with no output node!")
|
This function is adapted from ../kernel/flex_attention.py.
The goal is to take in the required args and produce the subgraph buffer
The subgraph buffer is a ComputedBuffer that will be inlined into the triton template
Args:
args: The args that are passed into the subgraph
subgraph: The Subgraph ir for which to produce the output node
|
python
|
torch/_inductor/fx_passes/b2b_gemm.py
| 455
|
[
"args",
"subgraph"
] | true
| 6
| 6.8
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
complementOf
|
@J2ktIncompatible
@GwtIncompatible // EnumSet.complementOf
public static <E extends Enum<E>> EnumSet<E> complementOf(Collection<E> collection) {
if (collection instanceof EnumSet) {
return EnumSet.complementOf((EnumSet<E>) collection);
}
checkArgument(
!collection.isEmpty(), "collection is empty; use the other version of this method");
Class<E> type = collection.iterator().next().getDeclaringClass();
return makeComplementByHand(collection, type);
}
|
Creates an {@code EnumSet} consisting of all enum values that are not in the specified
collection. If the collection is an {@link EnumSet}, this method has the same behavior as
{@link EnumSet#complementOf}. Otherwise, the specified collection must contain at least one
element, in order to determine the element type. If the collection could be empty, use {@link
#complementOf(Collection, Class)} instead of this method.
@param collection the collection whose complement should be stored in the enum set
@return a new, modifiable {@code EnumSet} containing all values of the enum that aren't present
in the given collection
@throws IllegalArgumentException if {@code collection} is not an {@code EnumSet} instance and
contains no elements
|
java
|
android/guava/src/com/google/common/collect/Sets.java
| 505
|
[
"collection"
] | true
| 2
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
is_sparse
|
def is_sparse(arr) -> bool:
"""
Check whether an array-like is a 1-D pandas sparse array.
.. deprecated:: 2.1.0
Use isinstance(dtype, pd.SparseDtype) instead.
Check that the one-dimensional array-like is a pandas sparse array.
Returns True if it is a pandas sparse array, not another type of
sparse array.
Parameters
----------
arr : array-like
Array-like to check.
Returns
-------
bool
Whether or not the array-like is a pandas sparse array.
See Also
--------
api.types.SparseDtype : The dtype object for pandas sparse arrays.
Examples
--------
Returns `True` if the parameter is a 1-D pandas sparse array.
>>> from pandas.api.types import is_sparse
>>> is_sparse(pd.arrays.SparseArray([0, 0, 1, 0]))
True
>>> is_sparse(pd.Series(pd.arrays.SparseArray([0, 0, 1, 0])))
True
Returns `False` if the parameter is not sparse.
>>> is_sparse(np.array([0, 0, 1, 0]))
False
>>> is_sparse(pd.Series([0, 1, 0, 0]))
False
Returns `False` if the parameter is not a pandas sparse array.
>>> from scipy.sparse import bsr_matrix
>>> is_sparse(bsr_matrix([0, 1, 0, 0]))
False
Returns `False` if the parameter has more than one dimension.
"""
warnings.warn(
"is_sparse is deprecated and will be removed in a future "
"version. Check `isinstance(dtype, pd.SparseDtype)` instead.",
Pandas4Warning,
stacklevel=2,
)
dtype = getattr(arr, "dtype", arr)
return isinstance(dtype, SparseDtype)
|
Check whether an array-like is a 1-D pandas sparse array.
.. deprecated:: 2.1.0
Use isinstance(dtype, pd.SparseDtype) instead.
Check that the one-dimensional array-like is a pandas sparse array.
Returns True if it is a pandas sparse array, not another type of
sparse array.
Parameters
----------
arr : array-like
Array-like to check.
Returns
-------
bool
Whether or not the array-like is a pandas sparse array.
See Also
--------
api.types.SparseDtype : The dtype object for pandas sparse arrays.
Examples
--------
Returns `True` if the parameter is a 1-D pandas sparse array.
>>> from pandas.api.types import is_sparse
>>> is_sparse(pd.arrays.SparseArray([0, 0, 1, 0]))
True
>>> is_sparse(pd.Series(pd.arrays.SparseArray([0, 0, 1, 0])))
True
Returns `False` if the parameter is not sparse.
>>> is_sparse(np.array([0, 0, 1, 0]))
False
>>> is_sparse(pd.Series([0, 1, 0, 0]))
False
Returns `False` if the parameter is not a pandas sparse array.
>>> from scipy.sparse import bsr_matrix
>>> is_sparse(bsr_matrix([0, 1, 0, 0]))
False
Returns `False` if the parameter has more than one dimension.
|
python
|
pandas/core/dtypes/common.py
| 189
|
[
"arr"
] |
bool
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
state_from_response
|
def state_from_response(response: dict[str, Any]) -> str:
"""
Get state from boto3 response.
:param response: response from AWS API
:return: state
"""
raise NotImplementedError("Please implement state_from_response() in subclass")
|
Get state from boto3 response.
:param response: response from AWS API
:return: state
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/emr.py
| 97
|
[
"response"
] |
str
| true
| 1
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
mixin
|
function mixin(object, source, options) {
var props = keys(source),
methodNames = baseFunctions(source, props);
if (options == null &&
!(isObject(source) && (methodNames.length || !props.length))) {
options = source;
source = object;
object = this;
methodNames = baseFunctions(source, keys(source));
}
var chain = !(isObject(options) && 'chain' in options) || !!options.chain,
isFunc = isFunction(object);
arrayEach(methodNames, function(methodName) {
var func = source[methodName];
object[methodName] = func;
if (isFunc) {
object.prototype[methodName] = function() {
var chainAll = this.__chain__;
if (chain || chainAll) {
var result = object(this.__wrapped__),
actions = result.__actions__ = copyArray(this.__actions__);
actions.push({ 'func': func, 'args': arguments, 'thisArg': object });
result.__chain__ = chainAll;
return result;
}
return func.apply(object, arrayPush([this.value()], arguments));
};
}
});
return object;
}
|
Adds all own enumerable string keyed function properties of a source
object to the destination object. If `object` is a function, then methods
are added to its prototype as well.
**Note:** Use `_.runInContext` to create a pristine `lodash` function to
avoid conflicts caused by modifying the original.
@static
@since 0.1.0
@memberOf _
@category Util
@param {Function|Object} [object=lodash] The destination object.
@param {Object} source The object of functions to add.
@param {Object} [options={}] The options object.
@param {boolean} [options.chain=true] Specify whether mixins are chainable.
@returns {Function|Object} Returns `object`.
@example
function vowels(string) {
return _.filter(string, function(v) {
return /[aeiou]/i.test(v);
});
}
_.mixin({ 'vowels': vowels });
_.vowels('fred');
// => ['e']
_('fred').vowels().value();
// => ['e']
_.mixin({ 'vowels': vowels }, { 'chain': false });
_('fred').vowels();
// => ['e']
|
javascript
|
lodash.js
| 15,817
|
[
"object",
"source",
"options"
] | false
| 10
| 7.2
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
isSingleton
|
@Override
public boolean isSingleton(String name) throws NoSuchBeanDefinitionException {
String beanName = transformedBeanName(name);
Object beanInstance = getSingleton(beanName, false);
if (beanInstance != null) {
if (beanInstance instanceof FactoryBean<?> factoryBean) {
return (BeanFactoryUtils.isFactoryDereference(name) || factoryBean.isSingleton());
}
else {
return !BeanFactoryUtils.isFactoryDereference(name);
}
}
// No singleton instance found -> check bean definition.
BeanFactory parentBeanFactory = getParentBeanFactory();
if (parentBeanFactory != null && !containsBeanDefinition(beanName)) {
// No bean definition found in this factory -> delegate to parent.
return parentBeanFactory.isSingleton(originalBeanName(name));
}
RootBeanDefinition mbd = getMergedLocalBeanDefinition(beanName);
// In case of FactoryBean, return singleton status of created object if not a dereference.
if (mbd.isSingleton()) {
if (isFactoryBean(beanName, mbd)) {
if (BeanFactoryUtils.isFactoryDereference(name)) {
return true;
}
FactoryBean<?> factoryBean = (FactoryBean<?>) getBean(FACTORY_BEAN_PREFIX + beanName);
return factoryBean.isSingleton();
}
else {
return !BeanFactoryUtils.isFactoryDereference(name);
}
}
else {
return false;
}
}
|
Return an instance, which may be shared or independent, of the specified bean.
@param name the name of the bean to retrieve
@param requiredType the required type of the bean to retrieve
@param args arguments to use when creating a bean instance using explicit arguments
(only applied when creating a new instance as opposed to retrieving an existing one)
@param typeCheckOnly whether the instance is obtained for a type check,
not for actual use
@return an instance of the bean
@throws BeansException if the bean could not be created
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 435
|
[
"name"
] | true
| 9
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
roundtrip
|
@SuppressWarnings("unchecked") // OK, because we serialized a type `T`
public static <T extends Serializable> T roundtrip(final T obj) {
return (T) deserialize(serialize(obj));
}
|
Performs a serialization roundtrip. Serializes and deserializes the given object, great for testing objects that
implement {@link Serializable}.
@param <T>
the type of the object involved.
@param obj
the object to roundtrip.
@return the serialized and deserialized object.
@since 3.3
|
java
|
src/main/java/org/apache/commons/lang3/SerializationUtils.java
| 222
|
[
"obj"
] |
T
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
asVariableDeclaration
|
function asVariableDeclaration(variableDeclaration: string | BindingName | VariableDeclaration | undefined) {
if (typeof variableDeclaration === "string" || variableDeclaration && !isVariableDeclaration(variableDeclaration)) {
return createVariableDeclaration(
variableDeclaration,
/*exclamationToken*/ undefined,
/*type*/ undefined,
/*initializer*/ undefined,
);
}
return variableDeclaration;
}
|
Lifts a NodeArray containing only Statement nodes to a block.
@param nodes The NodeArray.
|
typescript
|
src/compiler/factory/nodeFactory.ts
| 7,167
|
[
"variableDeclaration"
] | false
| 4
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
clusterId
|
public String clusterId() {
return this.data.clusterId();
}
|
The cluster identifier returned in the metadata response.
@return cluster identifier if it is present in the response, null otherwise.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java
| 257
|
[] |
String
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
removeAll
|
public static char[] removeAll(final char[] array, final int... indices) {
return (char[]) removeAll((Object) array, indices);
}
|
Removes the elements at the specified positions from the specified array. All remaining elements are shifted to the left.
<p>
This method returns a new array with the same elements of the input array except those at the specified positions. The component type of the returned
array is always the same as that of the input array.
</p>
<p>
If the input array is {@code null}, an IndexOutOfBoundsException will be thrown, because in that case no valid index can be specified.
</p>
<pre>
ArrayUtils.removeAll([1], 0) = []
ArrayUtils.removeAll([2, 6], 0) = [6]
ArrayUtils.removeAll([2, 6], 0, 1) = []
ArrayUtils.removeAll([2, 6, 3], 1, 2) = [2]
ArrayUtils.removeAll([2, 6, 3], 0, 2) = [6]
ArrayUtils.removeAll([2, 6, 3], 0, 1, 2) = []
</pre>
@param array the array to remove the element from, may not be {@code null}.
@param indices the positions of the elements to be removed.
@return A new array containing the existing elements except those at the specified positions.
@throws IndexOutOfBoundsException if any index is out of range (index < 0 || index >= array.length), or if the array is {@code null}.
@since 3.0.1
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 5,022
|
[
"array"
] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toShort
|
public static short toShort(final String str, final short defaultValue) {
try {
return Short.parseShort(str);
} catch (final RuntimeException e) {
return defaultValue;
}
}
|
Converts a {@link String} to an {@code short}, returning a default value if the conversion fails.
<p>
If the string is {@code null}, the default value is returned.
</p>
<pre>
NumberUtils.toShort(null, 1) = 1
NumberUtils.toShort("", 1) = 1
NumberUtils.toShort("1", 0) = 1
</pre>
@param str the string to convert, may be null.
@param defaultValue the default value.
@return the short represented by the string, or the default if conversion fails.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
| 1,788
|
[
"str",
"defaultValue"
] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_get_team_executor_configs
|
def _get_team_executor_configs(cls, validate_teams: bool = True) -> list[tuple[str | None, list[str]]]:
"""
Return a list of executor configs to be loaded.
Each tuple contains the team id as the first element and the second element is the executor config
for that team (a list of executor names/modules/aliases).
:param validate_teams: Whether to validate that team names exist in database
"""
from airflow.configuration import conf
executor_config = conf.get_mandatory_value("core", "executor")
if not executor_config:
raise AirflowConfigException(
"The 'executor' key in the 'core' section of the configuration is mandatory and cannot be empty"
)
configs: list[tuple[str | None, list[str]]] = []
seen_teams: set[str | None] = set()
# The executor_config can look like a few things. One is just a single executor name, such as
# "CeleryExecutor". Or a list of executors, such as "CeleryExecutor,KubernetesExecutor,module.path.to.executor".
# In these cases these are all executors that are available to all teams, with the first one being the
# default executor, as usual. The config can also look like a list of executors, per team, with the team name
# prefixing each list of executors separated by a equal sign and then each team list separated by a
# semi-colon.
# "LocalExecutor;team1=CeleryExecutor;team2=KubernetesExecutor,module.path.to.executor".
for team_executor_config in executor_config.split(";"):
# The first item in the list may not have a team id (either empty string before the equal
# sign or no equal sign at all), which means it is a global executor config.
if "=" not in team_executor_config or team_executor_config.startswith("="):
team_name = None
executor_names = team_executor_config.strip("=")
else:
cls.block_use_of_multi_team()
if conf.getboolean("core", "multi_team", fallback=False):
team_name, executor_names = team_executor_config.split("=")
else:
log.warning(
"The 'multi_team' config is not enabled, but team executors were configured. "
"The following team executor config will be ignored: %s",
team_executor_config,
)
continue
# Check for duplicate team names
if team_name in seen_teams:
raise AirflowConfigException(
f"Team '{team_name}' appears more than once in executor configuration. "
f"Each team can only be specified once in the executor config."
)
seen_teams.add(team_name)
# Split by comma to get the individual executor names and strip spaces off of them
configs.append((team_name, [name.strip() for name in executor_names.split(",")]))
# Validate that at least one global executor exists
has_global_executor = any(team_name is None for team_name, _ in configs)
if not has_global_executor:
raise AirflowConfigException(
"At least one global executor must be configured. Current configuration only contains "
"team-based executors. Please add a global executor configuration (e.g., "
"'CeleryExecutor;team1=LocalExecutor' instead of 'team1=CeleryExecutor;team2=LocalExecutor')."
)
# Validate that all team names exist in the database (excluding None for global configs)
team_names_to_validate = {team_name for team_name in seen_teams if team_name is not None}
if team_names_to_validate and validate_teams:
cls._validate_teams_exist_in_database(team_names_to_validate)
return configs
|
Return a list of executor configs to be loaded.
Each tuple contains the team id as the first element and the second element is the executor config
for that team (a list of executor names/modules/aliases).
:param validate_teams: Whether to validate that team names exist in database
|
python
|
airflow-core/src/airflow/executors/executor_loader.py
| 188
|
[
"cls",
"validate_teams"
] |
list[tuple[str | None, list[str]]]
| true
| 12
| 6.96
|
apache/airflow
| 43,597
|
sphinx
| false
|
connect
|
int connect(int sockfd, SockAddr addr);
|
Connect a socket to an address.
@param sockfd An open socket file descriptor
@param addr The address to connect to
@return 0 on success, -1 on failure with errno set
|
java
|
libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/PosixCLibrary.java
| 128
|
[
"sockfd",
"addr"
] | true
| 1
| 6.8
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
clone
|
public static boolean[] clone(final boolean[] array) {
return array != null ? array.clone() : null;
}
|
Clones an array or returns {@code null}.
<p>
This method returns {@code null} for a {@code null} input array.
</p>
@param array the array to clone, may be {@code null}.
@return the cloned array, {@code null} if {@code null} input.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 1,453
|
[
"array"
] | true
| 2
| 8.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
asConfigurationPropertySource
|
private static @Nullable ConfigurationPropertySource asConfigurationPropertySource(
PropertySource<?> propertySource) {
ConfigurationPropertySource configurationPropertySource = ConfigurationPropertySource.from(propertySource);
if (configurationPropertySource != null && propertySource instanceof PropertySourceInfo propertySourceInfo) {
configurationPropertySource = configurationPropertySource.withPrefix(propertySourceInfo.getPrefix());
}
return configurationPropertySource;
}
|
Factory method to create an {@link Kind#UNBOUND_IMPORT unbound import} contributor.
This contributor has been actively imported from another contributor and may itself
import further contributors later.
@param location the location of this contributor
@param resource the config data resource
@param profileSpecific if the contributor is from a profile specific import
@param configData the config data
@param propertySourceIndex the index of the property source that should be used
@param conversionService the conversion service to use
@param environmentUpdateListener the environment update listener
@return a new {@link ConfigDataEnvironmentContributor} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironmentContributor.java
| 454
|
[
"propertySource"
] |
ConfigurationPropertySource
| true
| 3
| 7.12
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
create
|
R create(@Nullable T value);
|
Create a new instance for the given nullable value.
@param value the value used to create the instance (may be
{@code null})
@return the resulting instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/PropertyMapper.java
| 533
|
[
"value"
] |
R
| true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
reverse
|
public StrBuilder reverse() {
if (size == 0) {
return this;
}
final int half = size / 2;
final char[] buf = buffer;
for (int leftIdx = 0, rightIdx = size - 1; leftIdx < half; leftIdx++, rightIdx--) {
final char swap = buf[leftIdx];
buf[leftIdx] = buf[rightIdx];
buf[rightIdx] = swap;
}
return this;
}
|
Reverses the string builder placing each character in the opposite index.
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 2,739
|
[] |
StrBuilder
| true
| 3
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
matchesAtMostOne
|
public boolean matchesAtMostOne() {
return patternFilter.matchesAtMostOne() && entryFilter.matchesAtMostOne();
}
|
Return true if the resource and entry filters can only match one ACE. In other words, if
there are no ANY or UNKNOWN fields.
|
java
|
clients/src/main/java/org/apache/kafka/common/acl/AclBindingFilter.java
| 86
|
[] | true
| 2
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
resolveInnerBean
|
public <T> T resolveInnerBean(@Nullable String innerBeanName, BeanDefinition innerBd,
BiFunction<String, RootBeanDefinition, T> resolver) {
String nameToUse = (innerBeanName != null ? innerBeanName : "(inner bean)" +
BeanFactoryUtils.GENERATED_BEAN_NAME_SEPARATOR + ObjectUtils.getIdentityHexString(innerBd));
return resolver.apply(nameToUse,
this.beanFactory.getMergedBeanDefinition(nameToUse, innerBd, this.beanDefinition));
}
|
Resolve an inner bean definition and invoke the specified {@code resolver}
on its merged bean definition.
@param innerBeanName the inner bean name (or {@code null} to assign one)
@param innerBd the inner raw bean definition
@param resolver the function to invoke to resolve
@param <T> the type of the resolution
@return a resolved inner bean, as a result of applying the {@code resolver}
@since 6.0
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/BeanDefinitionValueResolver.java
| 257
|
[
"innerBeanName",
"innerBd",
"resolver"
] |
T
| true
| 2
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
toStringYesNo
|
public static String toStringYesNo(final Boolean bool) {
return toString(bool, YES, NO, null);
}
|
Converts a Boolean to a String returning {@code 'yes'},
{@code 'no'}, or {@code null}.
<pre>
BooleanUtils.toStringYesNo(Boolean.TRUE) = "yes"
BooleanUtils.toStringYesNo(Boolean.FALSE) = "no"
BooleanUtils.toStringYesNo(null) = null;
</pre>
@param bool the Boolean to check
@return {@code 'yes'}, {@code 'no'}, or {@code null}
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 1,139
|
[
"bool"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
contains
|
public static boolean contains(char[] array, char target) {
for (char value : array) {
if (value == target) {
return true;
}
}
return false;
}
|
Returns {@code true} if {@code target} is present as an element anywhere in {@code array}.
@param array an array of {@code char} values, possibly empty
@param target a primitive {@code char} value
@return {@code true} if {@code array[i] == target} for some value of {@code i}
|
java
|
android/guava/src/com/google/common/primitives/Chars.java
| 133
|
[
"array",
"target"
] | true
| 2
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
toBoolean
|
public static boolean toBoolean(final int value, final int trueValue, final int falseValue) {
if (value == trueValue) {
return true;
}
if (value == falseValue) {
return false;
}
throw new IllegalArgumentException("The Integer did not match either specified value");
}
|
Converts an int to a boolean specifying the conversion values.
<p>If the {@code trueValue} and {@code falseValue} are the same number then
the return value will be {@code true} in case {@code value} matches it.</p>
<pre>
BooleanUtils.toBoolean(0, 1, 0) = false
BooleanUtils.toBoolean(1, 1, 0) = true
BooleanUtils.toBoolean(1, 1, 1) = true
BooleanUtils.toBoolean(2, 1, 2) = false
BooleanUtils.toBoolean(2, 2, 0) = true
</pre>
@param value the {@link Integer} to convert
@param trueValue the value to match for {@code true}
@param falseValue the value to match for {@code false}
@return {@code true} or {@code false}
@throws IllegalArgumentException if {@code value} does not match neither
{@code trueValue} no {@code falseValue}
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 438
|
[
"value",
"trueValue",
"falseValue"
] | true
| 3
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toStringBuilder
|
private static StringBuilder toStringBuilder(Readable r) throws IOException {
StringBuilder sb = new StringBuilder();
if (r instanceof Reader) {
copyReaderToBuilder((Reader) r, sb);
} else {
copy(r, sb);
}
return sb;
}
|
Reads all characters from a {@link Readable} object into a new {@link StringBuilder} instance.
Does not close the {@code Readable}.
@param r the object to read from
@return a {@link StringBuilder} containing all the characters
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/CharStreams.java
| 174
|
[
"r"
] |
StringBuilder
| true
| 2
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
get_system_encoding
|
def get_system_encoding():
"""
The encoding for the character type functions. Fallback to 'ascii' if the
#encoding is unsupported by Python or could not be determined. See tickets
#10335 and #5846.
"""
try:
encoding = locale.getlocale()[1] or "ascii"
codecs.lookup(encoding)
except Exception:
encoding = "ascii"
return encoding
|
The encoding for the character type functions. Fallback to 'ascii' if the
#encoding is unsupported by Python or could not be determined. See tickets
#10335 and #5846.
|
python
|
django/utils/encoding.py
| 248
|
[] | false
| 2
| 6.4
|
django/django
| 86,204
|
unknown
| false
|
|
txnOffsetCommitHandler
|
private TxnOffsetCommitHandler txnOffsetCommitHandler(TransactionalRequestResult result,
Map<TopicPartition, OffsetAndMetadata> offsets,
ConsumerGroupMetadata groupMetadata) {
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) {
OffsetAndMetadata offsetAndMetadata = entry.getValue();
CommittedOffset committedOffset = new CommittedOffset(offsetAndMetadata.offset(),
offsetAndMetadata.metadata(), offsetAndMetadata.leaderEpoch());
pendingTxnOffsetCommits.put(entry.getKey(), committedOffset);
}
final TxnOffsetCommitRequest.Builder builder =
new TxnOffsetCommitRequest.Builder(transactionalId,
groupMetadata.groupId(),
producerIdAndEpoch.producerId,
producerIdAndEpoch.epoch,
pendingTxnOffsetCommits,
groupMetadata.memberId(),
groupMetadata.generationId(),
groupMetadata.groupInstanceId(),
isTransactionV2Enabled()
);
if (result == null) {
// In this case, transaction V2 is in use.
return new TxnOffsetCommitHandler(builder);
}
return new TxnOffsetCommitHandler(result, builder);
}
|
Check if the transaction is in the prepared state.
@return true if the current state is PREPARED_TRANSACTION
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
| 1,223
|
[
"result",
"offsets",
"groupMetadata"
] |
TxnOffsetCommitHandler
| true
| 2
| 7.2
|
apache/kafka
| 31,560
|
javadoc
| false
|
run
|
@Override
@SuppressWarnings("CatchingUnchecked") // sneaky checked exception
public void run() {
boolean stillRunning = true;
try {
while (true) {
ListenerCallQueue.Event<L> nextToRun;
Object nextLabel;
synchronized (PerListenerQueue.this) {
Preconditions.checkState(isThreadScheduled);
nextToRun = waitQueue.poll();
nextLabel = labelQueue.poll();
if (nextToRun == null) {
isThreadScheduled = false;
stillRunning = false;
break;
}
}
// Always run while _not_ holding the lock, to avoid deadlocks.
try {
nextToRun.call(listener);
} catch (Exception e) { // sneaky checked exception
// Log it and keep going.
logger
.get()
.log(
Level.SEVERE,
"Exception while executing callback: " + listener + " " + nextLabel,
e);
}
}
} finally {
if (stillRunning) {
// An Error is bubbling up. We should mark ourselves as no longer running. That way, if
// anyone tries to keep using us, we won't be corrupted.
synchronized (PerListenerQueue.this) {
isThreadScheduled = false;
}
}
}
}
|
Dispatches all listeners {@linkplain #enqueue enqueued} prior to this call, serially and in
order.
|
java
|
android/guava/src/com/google/common/util/concurrent/ListenerCallQueue.java
| 189
|
[] |
void
| true
| 5
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
invoke
|
@Override
public Object invoke(final Object unusedProxy, final Method method, final Object[] args)
throws IllegalAccessException, IllegalArgumentException, InvocationTargetException {
for (final L listener : listeners) {
try {
method.invoke(listener, args);
} catch (final Throwable t) {
handle(t);
}
}
return null;
}
|
Propagates the method call to all registered listeners in place of the proxy listener object.
<p>
Calls listeners in the order added to the underlying {@link List}.
</p>
@param unusedProxy the proxy object representing a listener on which the invocation was called; not used
@param method the listener method that will be called on all of the listeners.
@param args event arguments to propagate to the listeners.
@return the result of the method call
@throws InvocationTargetException if an error occurs
@throws IllegalArgumentException if an error occurs
@throws IllegalAccessException if an error occurs
|
java
|
src/main/java/org/apache/commons/lang3/event/EventListenerSupport.java
| 120
|
[
"unusedProxy",
"method",
"args"
] |
Object
| true
| 2
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
std
|
def std(
self,
axis=None,
dtype=None,
out=None,
ddof: int = 1,
keepdims: bool = False,
skipna: bool = True,
) -> Timedelta:
"""
Return sample standard deviation over requested axis.
Normalized by `N-1` by default. This can be changed using ``ddof``.
Parameters
----------
axis : int, optional
Axis for the function to be applied on. For :class:`pandas.Series`
this parameter is unused and defaults to ``None``.
dtype : dtype, optional, default None
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types
it is the same as the array type.
out : ndarray, optional, default None
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the
calculated values) will be cast if necessary.
ddof : int, default 1
Degrees of Freedom. The divisor used in calculations is `N - ddof`,
where `N` represents the number of elements.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array. If the default
value is passed, then keepdims will not be passed through to the
std method of sub-classes of ndarray, however any non-default value
will be. If the sub-class method does not implement keepdims any
exceptions will be raised.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is ``NA``, the result
will be ``NA``.
Returns
-------
Timedelta
Standard deviation over requested axis.
See Also
--------
numpy.ndarray.std : Returns the standard deviation of the array elements
along given axis.
Series.std : Return sample standard deviation over requested axis.
Examples
--------
For :class:`pandas.DatetimeIndex`:
>>> idx = pd.date_range("2001-01-01 00:00", periods=3)
>>> idx
DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
dtype='datetime64[us]', freq='D')
>>> idx.std()
Timedelta('1 days 00:00:00')
"""
# Because std is translation-invariant, we can get self.std
# by calculating (self - Timestamp(0)).std, and we can do it
# without creating a copy by using a view on self._ndarray
from pandas.core.arrays import TimedeltaArray
# Find the td64 dtype with the same resolution as our dt64 dtype
dtype_str = self._ndarray.dtype.name.replace("datetime64", "timedelta64")
dtype = np.dtype(dtype_str)
tda = TimedeltaArray._simple_new(self._ndarray.view(dtype), dtype=dtype)
return tda.std(axis=axis, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna)
|
Return sample standard deviation over requested axis.
Normalized by `N-1` by default. This can be changed using ``ddof``.
Parameters
----------
axis : int, optional
Axis for the function to be applied on. For :class:`pandas.Series`
this parameter is unused and defaults to ``None``.
dtype : dtype, optional, default None
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types
it is the same as the array type.
out : ndarray, optional, default None
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the
calculated values) will be cast if necessary.
ddof : int, default 1
Degrees of Freedom. The divisor used in calculations is `N - ddof`,
where `N` represents the number of elements.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array. If the default
value is passed, then keepdims will not be passed through to the
std method of sub-classes of ndarray, however any non-default value
will be. If the sub-class method does not implement keepdims any
exceptions will be raised.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is ``NA``, the result
will be ``NA``.
Returns
-------
Timedelta
Standard deviation over requested axis.
See Also
--------
numpy.ndarray.std : Returns the standard deviation of the array elements
along given axis.
Series.std : Return sample standard deviation over requested axis.
Examples
--------
For :class:`pandas.DatetimeIndex`:
>>> idx = pd.date_range("2001-01-01 00:00", periods=3)
>>> idx
DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
dtype='datetime64[us]', freq='D')
>>> idx.std()
Timedelta('1 days 00:00:00')
|
python
|
pandas/core/arrays/datetimes.py
| 2,326
|
[
"self",
"axis",
"dtype",
"out",
"ddof",
"keepdims",
"skipna"
] |
Timedelta
| true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
run
|
public void run() throws Exception {
printBanner();
try {
runInputLoop();
}
catch (Exception ex) {
if (!(ex instanceof ShellExitException)) {
throw ex;
}
}
}
|
Run the shell until the user exists.
@throws Exception on error
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/shell/Shell.java
| 130
|
[] |
void
| true
| 3
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
maybeCloseRecordStream
|
private void maybeCloseRecordStream() {
if (records != null) {
records.close();
records = null;
}
}
|
Draining a {@link CompletedFetch} will signal that the data has been consumed and the underlying resources
are closed. This is somewhat analogous to {@link Closeable#close() closing}, though no error will result if a
caller invokes {@link #fetchRecords(FetchConfig, Deserializers, int)}; an empty {@link List list} will be
returned instead.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java
| 175
|
[] |
void
| true
| 2
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
highlight_quantile
|
def highlight_quantile(
self,
subset: Subset | None = None,
color: str = "yellow",
axis: Axis | None = 0,
q_left: float = 0.0,
q_right: float = 1.0,
interpolation: QuantileInterpolation = "linear",
inclusive: IntervalClosedType = "both",
props: str | None = None,
) -> Styler:
"""
Highlight values defined by a quantile with a style.
Parameters
----------
%(subset)s
%(color)s
axis : {0 or 'index', 1 or 'columns', None}, default 0
Axis along which to determine and highlight quantiles. If ``None`` quantiles
are measured over the entire DataFrame. See examples.
q_left : float, default 0
Left bound, in [0, q_right), for the target quantile range.
q_right : float, default 1
Right bound, in (q_left, 1], for the target quantile range.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Argument passed to ``Series.quantile`` or ``DataFrame.quantile`` for
quantile estimation.
inclusive : {'both', 'neither', 'left', 'right'}
Identify whether quantile bounds are closed or open.
%(props)s
Returns
-------
Styler
Instance of class where values in quantile highlighted with given style.
See Also
--------
Styler.highlight_null: Highlight missing values with a style.
Styler.highlight_max: Highlight the maximum with a style.
Styler.highlight_min: Highlight the minimum with a style.
Styler.highlight_between: Highlight a defined range with a style.
Notes
-----
This function does not work with ``str`` dtypes.
Examples
--------
Using ``axis=None`` and apply a quantile to all collective data
>>> df = pd.DataFrame(np.arange(10).reshape(2, 5) + 1)
>>> df.style.highlight_quantile(axis=None, q_left=0.8, color="#fffd75")
... # doctest: +SKIP
.. figure:: ../../_static/style/hq_axNone.png
Or highlight quantiles row-wise or column-wise, in this case by row-wise
>>> df.style.highlight_quantile(axis=1, q_left=0.8, color="#fffd75")
... # doctest: +SKIP
.. figure:: ../../_static/style/hq_ax1.png
Use ``props`` instead of default background coloring
>>> df.style.highlight_quantile(
... axis=None,
... q_left=0.2,
... q_right=0.8,
... props="font-weight:bold;color:#e83e8c",
... ) # doctest: +SKIP
.. figure:: ../../_static/style/hq_props.png
"""
subset_ = slice(None) if subset is None else subset
subset_ = non_reducing_slice(subset_)
data = self.data.loc[subset_]
# after quantile is found along axis, e.g. along rows,
# applying the calculated quantile to alternate axis, e.g. to each column
quantiles = [q_left, q_right]
if axis is None:
q = Series(data.to_numpy().ravel()).quantile(
q=quantiles, interpolation=interpolation
)
axis_apply: int | None = None
else:
axis = self.data._get_axis_number(axis)
q = data.quantile(
axis=axis, numeric_only=False, q=quantiles, interpolation=interpolation
)
axis_apply = 1 - axis
if props is None:
props = f"background-color: {color};"
return self.apply(
_highlight_between,
axis=axis_apply,
subset=subset,
props=props,
left=q.iloc[0],
right=q.iloc[1],
inclusive=inclusive,
)
|
Highlight values defined by a quantile with a style.
Parameters
----------
%(subset)s
%(color)s
axis : {0 or 'index', 1 or 'columns', None}, default 0
Axis along which to determine and highlight quantiles. If ``None`` quantiles
are measured over the entire DataFrame. See examples.
q_left : float, default 0
Left bound, in [0, q_right), for the target quantile range.
q_right : float, default 1
Right bound, in (q_left, 1], for the target quantile range.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Argument passed to ``Series.quantile`` or ``DataFrame.quantile`` for
quantile estimation.
inclusive : {'both', 'neither', 'left', 'right'}
Identify whether quantile bounds are closed or open.
%(props)s
Returns
-------
Styler
Instance of class where values in quantile highlighted with given style.
See Also
--------
Styler.highlight_null: Highlight missing values with a style.
Styler.highlight_max: Highlight the maximum with a style.
Styler.highlight_min: Highlight the minimum with a style.
Styler.highlight_between: Highlight a defined range with a style.
Notes
-----
This function does not work with ``str`` dtypes.
Examples
--------
Using ``axis=None`` and apply a quantile to all collective data
>>> df = pd.DataFrame(np.arange(10).reshape(2, 5) + 1)
>>> df.style.highlight_quantile(axis=None, q_left=0.8, color="#fffd75")
... # doctest: +SKIP
.. figure:: ../../_static/style/hq_axNone.png
Or highlight quantiles row-wise or column-wise, in this case by row-wise
>>> df.style.highlight_quantile(axis=1, q_left=0.8, color="#fffd75")
... # doctest: +SKIP
.. figure:: ../../_static/style/hq_ax1.png
Use ``props`` instead of default background coloring
>>> df.style.highlight_quantile(
... axis=None,
... q_left=0.2,
... q_right=0.8,
... props="font-weight:bold;color:#e83e8c",
... ) # doctest: +SKIP
.. figure:: ../../_static/style/hq_props.png
|
python
|
pandas/io/formats/style.py
| 3,576
|
[
"self",
"subset",
"color",
"axis",
"q_left",
"q_right",
"interpolation",
"inclusive",
"props"
] |
Styler
| true
| 5
| 8.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
get_freeable_input_buf
|
def get_freeable_input_buf(
nodes: list[BaseSchedulerNode],
graph_inputs: OrderedSet[str],
) -> dict[str, FreeableInputBuffer]:
"""
Create and keep track of all input buffers that can be freed during the program
Returns:
A dictionary containing all freeable input buffers, keyed by their names.
"""
def _dep_size_hint(dep: Dep) -> int:
return V.graph.get_dep_size_hint(dep)
# get freeable input buffers' successor nodes for memory lifetime (excludes is_fake WeakDeps)
# and for ordering (includes all deps)
dep_name_to_succ_nodes: dict[str, OrderedSet[BaseSchedulerNode]] = (
collections.defaultdict(OrderedSet)
)
dep_name_to_succ_nodes_for_ordering: dict[str, OrderedSet[BaseSchedulerNode]] = (
collections.defaultdict(OrderedSet)
)
dep_name_to_size: dict[str, int] = dict()
for node in nodes:
for dep in node.read_writes.reads:
if dep.name in graph_inputs:
if not is_nonfreeable_buffers(dep):
# All deps contribute to ordering, but fake weak deps do not contribute to
# memory liveness
dep_name_to_succ_nodes_for_ordering[dep.name].add(node)
dep_name_to_size[dep.name] = _dep_size_hint(dep)
if not (isinstance(dep, WeakDep) and dep.is_fake):
dep_name_to_succ_nodes[dep.name].add(node)
# create FreeableInputBuffer objects and add them to the returned dictionary
name_to_freeable_input_buf: dict[str, FreeableInputBuffer] = dict()
for dep_name in dep_name_to_succ_nodes_for_ordering:
name_to_freeable_input_buf[dep_name] = FreeableInputBuffer(
dep_name,
MemoryPlanningInfoForBuffer(
size_free=dep_name_to_size[dep_name],
succ_nodes=dep_name_to_succ_nodes[dep_name],
succ_nodes_for_ordering=dep_name_to_succ_nodes_for_ordering[dep_name],
),
)
return name_to_freeable_input_buf
|
Create and keep track of all input buffers that can be freed during the program
Returns:
A dictionary containing all freeable input buffers, keyed by their names.
|
python
|
torch/_inductor/memory.py
| 89
|
[
"nodes",
"graph_inputs"
] |
dict[str, FreeableInputBuffer]
| true
| 8
| 7.92
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
coordinatorUnknown
|
public boolean coordinatorUnknown() {
return checkAndGetCoordinator() == null;
}
|
Check if we know who the coordinator is and we have an active connection
@return true if the coordinator is unknown
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
| 973
|
[] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getBeanPostProcessorCache
|
BeanPostProcessorCache getBeanPostProcessorCache() {
synchronized (this.beanPostProcessors) {
BeanPostProcessorCache bppCache = this.beanPostProcessorCache;
if (bppCache == null) {
bppCache = new BeanPostProcessorCache();
for (BeanPostProcessor bpp : this.beanPostProcessors) {
if (bpp instanceof InstantiationAwareBeanPostProcessor instantiationAwareBpp) {
bppCache.instantiationAware.add(instantiationAwareBpp);
if (bpp instanceof SmartInstantiationAwareBeanPostProcessor smartInstantiationAwareBpp) {
bppCache.smartInstantiationAware.add(smartInstantiationAwareBpp);
}
}
if (bpp instanceof DestructionAwareBeanPostProcessor destructionAwareBpp) {
bppCache.destructionAware.add(destructionAwareBpp);
}
if (bpp instanceof MergedBeanDefinitionPostProcessor mergedBeanDefBpp) {
bppCache.mergedDefinition.add(mergedBeanDefBpp);
}
}
this.beanPostProcessorCache = bppCache;
}
return bppCache;
}
}
|
Return the internal cache of pre-filtered post-processors,
freshly (re-)building it if necessary.
@since 5.3
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 1,011
|
[] |
BeanPostProcessorCache
| true
| 6
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
hexDigitToInt
|
public static int hexDigitToInt(final char hexChar) {
final int digit = Character.digit(hexChar, 16);
if (digit < 0) {
throw new IllegalArgumentException("Cannot convert '" + hexChar + "' to a hexadecimal digit");
}
return digit;
}
|
Converts a hexadecimal digit into an int using the default (LSB0) bit ordering.
<p>
'1' is converted to 1
</p>
@param hexChar the hexadecimal digit to convert.
@return an int equals to {@code hexDigit}.
@throws IllegalArgumentException if {@code hexDigit} is not a hexadecimal digit.
|
java
|
src/main/java/org/apache/commons/lang3/Conversion.java
| 725
|
[
"hexChar"
] | true
| 2
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
permutations
|
public static <E> Collection<List<E>> permutations(Collection<E> elements) {
return new PermutationCollection<E>(ImmutableList.copyOf(elements));
}
|
Returns a {@link Collection} of all the permutations of the specified {@link Collection}.
<p><i>Notes:</i> This is an implementation of the Plain Changes algorithm for permutations
generation, described in Knuth's "The Art of Computer Programming", Volume 4, Chapter 7,
Section 7.2.1.2.
<p>If the input list contains equal elements, some of the generated permutations will be equal.
<p>An empty collection has only one permutation, which is an empty list.
@param elements the original collection whose elements have to be permuted.
@return an immutable {@link Collection} containing all the different permutations of the
original collection.
@throws NullPointerException if the specified collection is null or has any null elements.
@since 12.0
|
java
|
android/guava/src/com/google/common/collect/Collections2.java
| 567
|
[
"elements"
] | true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
supportsEventType
|
@Override
@SuppressWarnings("unchecked")
public boolean supportsEventType(ResolvableType eventType) {
if (this.delegate instanceof GenericApplicationListener gal) {
return gal.supportsEventType(eventType);
}
else if (this.delegate instanceof SmartApplicationListener sal) {
Class<? extends ApplicationEvent> eventClass = (Class<? extends ApplicationEvent>) eventType.resolve();
return (eventClass != null && sal.supportsEventType(eventClass));
}
else {
return (this.declaredEventType == null || this.declaredEventType.isAssignableFrom(eventType));
}
}
|
Create a new GenericApplicationListener for the given delegate.
@param delegate the delegate listener to be invoked
|
java
|
spring-context/src/main/java/org/springframework/context/event/GenericApplicationListenerAdapter.java
| 67
|
[
"eventType"
] | true
| 5
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
first
|
def first(self, numeric_only: bool = False):
"""
Calculate the expanding First (left-most) element of the window.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
GroupBy.first : Similar method for GroupBy objects.
Expanding.last : Method to get the last element in each window.
Examples
--------
The example below will show an expanding calculation with a window size of
three.
>>> s = pd.Series(range(5))
>>> s.expanding(3).first()
0 NaN
1 NaN
2 0.0
3 0.0
4 0.0
dtype: float64
"""
return super().first(numeric_only=numeric_only)
|
Calculate the expanding First (left-most) element of the window.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
GroupBy.first : Similar method for GroupBy objects.
Expanding.last : Method to get the last element in each window.
Examples
--------
The example below will show an expanding calculation with a window size of
three.
>>> s = pd.Series(range(5))
>>> s.expanding(3).first()
0 NaN
1 NaN
2 0.0
3 0.0
4 0.0
dtype: float64
|
python
|
pandas/core/window/expanding.py
| 997
|
[
"self",
"numeric_only"
] | true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
readFromSocketChannel
|
protected int readFromSocketChannel() throws IOException {
return socketChannel.read(netReadBuffer);
}
|
Reads available bytes from socket channel to `netReadBuffer`.
Visible for testing.
@return number of bytes read
|
java
|
clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java
| 236
|
[] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
convertClassesToClassNames
|
public static List<String> convertClassesToClassNames(final List<Class<?>> classes) {
return classes == null ? null : classes.stream().map(e -> getName(e, null)).collect(Collectors.toList());
}
|
Given a {@link List} of {@link Class} objects, this method converts them into class names.
<p>
A new {@link List} is returned. {@code null} objects will be copied into the returned list as {@code null}.
</p>
@param classes the classes to change.
@return a {@link List} of class names corresponding to the Class objects, {@code null} if null input.
@throws ClassCastException if {@code classes} contains a non-{@link Class} entry.
|
java
|
src/main/java/org/apache/commons/lang3/ClassUtils.java
| 205
|
[
"classes"
] | true
| 2
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
emptyToNull
|
public static @Nullable String emptyToNull(@Nullable String string) {
return Platform.emptyToNull(string);
}
|
Returns the given string if it is nonempty; {@code null} otherwise.
@param string the string to test and possibly return
@return {@code string} itself if it is nonempty; {@code null} if it is empty or null
|
java
|
android/guava/src/com/google/common/base/Strings.java
| 53
|
[
"string"
] |
String
| true
| 1
| 6.96
|
google/guava
| 51,352
|
javadoc
| false
|
filterProducerIds
|
public ListTransactionsOptions filterProducerIds(Collection<Long> producerIdFilters) {
this.filteredProducerIds = new HashSet<>(producerIdFilters);
return this;
}
|
Filter only the transactions from producers in a specific set of producerIds.
If no filter is specified or if the passed collection of producerIds is empty,
then the transactions of all producerIds will be returned.
@param producerIdFilters the set of producerIds to filter by
@return this object
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java
| 56
|
[
"producerIdFilters"
] |
ListTransactionsOptions
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
rehashIfNecessary
|
private void rehashIfNecessary() {
@Nullable Node<K, V>[] oldKToV = hashTableKToV;
if (Hashing.needsResizing(size, oldKToV.length, LOAD_FACTOR)) {
int newTableSize = oldKToV.length * 2;
this.hashTableKToV = createTable(newTableSize);
this.hashTableVToK = createTable(newTableSize);
this.mask = newTableSize - 1;
this.size = 0;
for (Node<K, V> node = firstInKeyInsertionOrder;
node != null;
node = node.nextInKeyInsertionOrder) {
insertSplicingIntoIterationOrder(
node, node.prevInKeyInsertionOrder, node.nextInKeyInsertionOrder);
}
this.modCount++;
}
}
|
Returns {@code true} if this BiMap contains an entry whose value is equal to {@code value} (or,
equivalently, if this inverse view contains a key that is equal to {@code value}).
<p>Due to the property that values in a BiMap are unique, this will tend to execute in
faster-than-linear time.
@param value the object to search for in the values of this BiMap
@return true if a mapping exists from a key to the specified value
|
java
|
guava/src/com/google/common/collect/HashBiMap.java
| 390
|
[] |
void
| true
| 3
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
setAttribute
|
@Override
public void setAttribute(Traceable traceable, String key, boolean value) {
final var span = Span.fromContextOrNull(spans.get(traceable.getSpanId()));
if (span != null) {
span.setAttribute(key, value);
}
}
|
Most of the examples of how to use the OTel API look something like this, where the span context
is automatically propagated:
<pre>{@code
Span span = tracer.spanBuilder("parent").startSpan();
try (Scope scope = parentSpan.makeCurrent()) {
// ...do some stuff, possibly creating further spans
} finally {
span.end();
}
}</pre>
This typically isn't useful in Elasticsearch, because a {@link Scope} can't be used across threads.
However, if a scope is active, then the APM agent can capture additional information, so this method
exists to make it possible to use scopes in the few situation where it makes sense.
@param traceable provides the ID of a currently-open span for which to open a scope.
@return a method to close the scope when you are finished with it.
|
java
|
modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java
| 356
|
[
"traceable",
"key",
"value"
] |
void
| true
| 2
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
equals
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
LoggerConfiguration other = (LoggerConfiguration) obj;
return ObjectUtils.nullSafeEquals(this.name, other.name)
&& ObjectUtils.nullSafeEquals(this.levelConfiguration, other.levelConfiguration)
&& ObjectUtils.nullSafeEquals(this.inheritedLevelConfiguration, other.inheritedLevelConfiguration);
}
|
Return the level configuration for the given scope.
@param scope the configuration scope
@return the level configuration or {@code null} for
{@link ConfigurationScope#DIRECT direct scope} results without applied
configuration
@since 2.7.13
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/LoggerConfiguration.java
| 121
|
[
"obj"
] | true
| 6
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
join
|
public static String join(final Iterator<?> iterator, final char separator) {
// handle null, zero and one elements before building a buffer
if (iterator == null) {
return null;
}
if (!iterator.hasNext()) {
return EMPTY;
}
return Streams.of(iterator).collect(LangCollectors.joining(ObjectUtils.toString(String.valueOf(separator)), EMPTY, EMPTY, ObjectUtils::toString));
}
|
Joins the elements of the provided {@link Iterator} into a single String containing the provided elements.
<p>
No delimiter is added before or after the list. Null objects or empty strings within the iteration are represented by empty strings.
</p>
<p>
See the examples here: {@link #join(Object[],char)}.
</p>
@param iterator the {@link Iterator} of values to join together, may be null.
@param separator the separator character to use.
@return the joined String, {@code null} if null iterator input.
@since 2.0
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 4,303
|
[
"iterator",
"separator"
] |
String
| true
| 3
| 8.4
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
typesSatisfyVariables
|
public static boolean typesSatisfyVariables(final Map<TypeVariable<?>, Type> typeVariableMap) {
Objects.requireNonNull(typeVariableMap, "typeVariableMap");
// all types must be assignable to all the bounds of their mapped
// type variable.
for (final Map.Entry<TypeVariable<?>, Type> entry : typeVariableMap.entrySet()) {
final TypeVariable<?> typeVar = entry.getKey();
final Type type = entry.getValue();
for (final Type bound : getImplicitBounds(typeVar)) {
if (!isAssignable(type, substituteTypeVariables(bound, typeVariableMap), typeVariableMap)) {
return false;
}
}
}
return true;
}
|
Determines whether or not specified types satisfy the bounds of their mapped type variables. When a type parameter extends another (such as
{@code <T, S extends T>}), uses another as a type parameter (such as {@code <T, S extends Comparable>>}), or otherwise depends on another type variable
to be specified, the dependencies must be included in {@code typeVarAssigns}.
@param typeVariableMap specifies the potential types to be assigned to the type variables, not {@code null}.
@return whether or not the types can be assigned to their respective type variables.
@throws NullPointerException if {@code typeVariableMap} is {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 1,566
|
[
"typeVariableMap"
] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getAspectInstance
|
@Override
public final Object getAspectInstance() {
try {
return ReflectionUtils.accessibleConstructor(this.aspectClass).newInstance();
}
catch (NoSuchMethodException ex) {
throw new AopConfigException(
"No default constructor on aspect class: " + this.aspectClass.getName(), ex);
}
catch (InstantiationException ex) {
throw new AopConfigException(
"Unable to instantiate aspect class: " + this.aspectClass.getName(), ex);
}
catch (IllegalAccessException | InaccessibleObjectException ex) {
throw new AopConfigException(
"Could not access aspect constructor: " + this.aspectClass.getName(), ex);
}
catch (InvocationTargetException ex) {
throw new AopConfigException(
"Failed to invoke aspect constructor: " + this.aspectClass.getName(), ex.getTargetException());
}
}
|
Return the specified aspect class (never {@code null}).
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/SimpleAspectInstanceFactory.java
| 58
|
[] |
Object
| true
| 5
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
trim
|
public static String trim(final String str) {
return str == null ? null : str.trim();
}
|
Removes control characters (char <= 32) from both ends of this String, handling {@code null} by returning {@code null}.
<p>
The String is trimmed using {@link String#trim()}. Trim removes start and end characters <= 32. To strip whitespace use {@link #strip(String)}.
</p>
<p>
To trim your choice of characters, use the {@link #strip(String, String)} methods.
</p>
<pre>
StringUtils.trim(null) = null
StringUtils.trim("") = ""
StringUtils.trim(" ") = ""
StringUtils.trim("abc") = "abc"
StringUtils.trim(" abc ") = "abc"
</pre>
@param str the String to be trimmed, may be null.
@return the trimmed string, {@code null} if null String input.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 8,724
|
[
"str"
] |
String
| true
| 2
| 7.84
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
might_contain_dag_via_default_heuristic
|
def might_contain_dag_via_default_heuristic(file_path: str, zip_file: zipfile.ZipFile | None = None) -> bool:
"""
Heuristic that guesses whether a Python file contains an Airflow DAG definition.
:param file_path: Path to the file to be checked.
:param zip_file: if passed, checks the archive. Otherwise, check local filesystem.
:return: True, if file might contain DAGs.
"""
if zip_file:
with zip_file.open(file_path) as current_file:
content = current_file.read()
else:
if zipfile.is_zipfile(file_path):
return True
with open(file_path, "rb") as dag_file:
content = dag_file.read()
content = content.lower()
if b"airflow" not in content:
return False
return any(s in content for s in (b"dag", b"asset"))
|
Heuristic that guesses whether a Python file contains an Airflow DAG definition.
:param file_path: Path to the file to be checked.
:param zip_file: if passed, checks the archive. Otherwise, check local filesystem.
:return: True, if file might contain DAGs.
|
python
|
airflow-core/src/airflow/utils/file.py
| 307
|
[
"file_path",
"zip_file"
] |
bool
| true
| 5
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
getFuture
|
public synchronized Future<T> getFuture() {
if (future == null) {
throw new IllegalStateException("start() must be called first!");
}
return future;
}
|
Gets the {@link Future} object that was created when {@link #start()}
was called. Therefore this method can only be called after {@code
start()}.
@return the {@link Future} object wrapped by this initializer.
@throws IllegalStateException if {@link #start()} has not been called.
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/BackgroundInitializer.java
| 300
|
[] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getCache
|
private static ConcurrentMap<Locale, Strategy> getCache(final int field) {
synchronized (CACHES) {
if (CACHES[field] == null) {
CACHES[field] = new ConcurrentHashMap<>(3);
}
return CACHES[field];
}
}
|
Gets a cache of Strategies for a particular field
@param field The Calendar field
@return a cache of Locale to Strategy
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDateParser.java
| 753
|
[
"field"
] | true
| 2
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
deduceEnvironmentClass
|
private Class<? extends ConfigurableEnvironment> deduceEnvironmentClass() {
WebApplicationType webApplicationType = this.properties.getWebApplicationType();
Class<? extends ConfigurableEnvironment> environmentType = this.applicationContextFactory
.getEnvironmentType(webApplicationType);
if (environmentType == null && this.applicationContextFactory != ApplicationContextFactory.DEFAULT) {
environmentType = ApplicationContextFactory.DEFAULT.getEnvironmentType(webApplicationType);
}
return (environmentType != null) ? environmentType : ApplicationEnvironment.class;
}
|
Run the Spring application, creating and refreshing a new
{@link ApplicationContext}.
@param args the application arguments (usually passed from a Java main method)
@return a running {@link ApplicationContext}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 370
|
[] | true
| 4
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
load_connections_dict
|
def load_connections_dict(file_path: str) -> dict[str, Any]:
"""
Load connection from text file.
``JSON``, `YAML` and ``.env`` files are supported.
:return: A dictionary where the key contains a connection ID and the value contains the connection.
"""
log.debug("Loading connection")
secrets: dict[str, Any] = _parse_secret_file(file_path)
connection_by_conn_id = {}
for key, secret_values in list(secrets.items()):
if isinstance(secret_values, list):
if len(secret_values) > 1:
raise ConnectionNotUnique(f"Found multiple values for {key} in {file_path}.")
for secret_value in secret_values:
connection_by_conn_id[key] = _create_connection(key, secret_value)
else:
connection_by_conn_id[key] = _create_connection(key, secret_values)
num_conn = len(connection_by_conn_id)
log.debug("Loaded %d connections", num_conn)
return connection_by_conn_id
|
Load connection from text file.
``JSON``, `YAML` and ``.env`` files are supported.
:return: A dictionary where the key contains a connection ID and the value contains the connection.
|
python
|
airflow-core/src/airflow/secrets/local_filesystem.py
| 253
|
[
"file_path"
] |
dict[str, Any]
| true
| 6
| 7.04
|
apache/airflow
| 43,597
|
unknown
| false
|
identity
|
function identity(value) {
return value;
}
|
This method returns the first argument it receives.
@static
@since 0.1.0
@memberOf _
@category Util
@param {*} value Any value.
@returns {*} Returns `value`.
@example
var object = { 'a': 1 };
console.log(_.identity(object) === object);
// => true
|
javascript
|
lodash.js
| 15,596
|
[
"value"
] | false
| 1
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
snap
|
def snap(self, freq: Frequency = "S") -> DatetimeIndex:
"""
Snap time stamps to nearest occurring frequency.
Parameters
----------
freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'S'
Frequency strings can have multiples, e.g. '5h'. See
:ref:`here <timeseries.offset_aliases>` for a list of
frequency aliases.
Returns
-------
DatetimeIndex
Time stamps to nearest occurring `freq`.
See Also
--------
DatetimeIndex.round : Perform round operation on the data to the
specified `freq`.
DatetimeIndex.floor : Perform floor operation on the data to the
specified `freq`.
Examples
--------
>>> idx = pd.DatetimeIndex(
... ["2023-01-01", "2023-01-02", "2023-02-01", "2023-02-02"],
... dtype="M8[ns]",
... )
>>> idx
DatetimeIndex(['2023-01-01', '2023-01-02', '2023-02-01', '2023-02-02'],
dtype='datetime64[ns]', freq=None)
>>> idx.snap("MS")
DatetimeIndex(['2023-01-01', '2023-01-01', '2023-02-01', '2023-02-01'],
dtype='datetime64[ns]', freq=None)
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
dta = self._data.copy()
for i, v in enumerate(self):
s = v
if not freq.is_on_offset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
dta[i] = s
return DatetimeIndex._simple_new(dta, name=self.name)
|
Snap time stamps to nearest occurring frequency.
Parameters
----------
freq : str, Timedelta, datetime.timedelta, or DateOffset, default 'S'
Frequency strings can have multiples, e.g. '5h'. See
:ref:`here <timeseries.offset_aliases>` for a list of
frequency aliases.
Returns
-------
DatetimeIndex
Time stamps to nearest occurring `freq`.
See Also
--------
DatetimeIndex.round : Perform round operation on the data to the
specified `freq`.
DatetimeIndex.floor : Perform floor operation on the data to the
specified `freq`.
Examples
--------
>>> idx = pd.DatetimeIndex(
... ["2023-01-01", "2023-01-02", "2023-02-01", "2023-02-02"],
... dtype="M8[ns]",
... )
>>> idx
DatetimeIndex(['2023-01-01', '2023-01-02', '2023-02-01', '2023-02-02'],
dtype='datetime64[ns]', freq=None)
>>> idx.snap("MS")
DatetimeIndex(['2023-01-01', '2023-01-01', '2023-02-01', '2023-02-01'],
dtype='datetime64[ns]', freq=None)
|
python
|
pandas/core/indexes/datetimes.py
| 823
|
[
"self",
"freq"
] |
DatetimeIndex
| true
| 5
| 8.16
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
addGenericArgumentValue
|
public void addGenericArgumentValue(ValueHolder newValue) {
Assert.notNull(newValue, "ValueHolder must not be null");
if (!this.genericArgumentValues.contains(newValue)) {
addOrMergeGenericArgumentValue(newValue);
}
}
|
Add a generic argument value to be matched by type or name (if available).
<p>Note: A single generic argument value will just be used once,
rather than matched multiple times.
@param newValue the argument value in the form of a ValueHolder
<p>Note: Identical ValueHolder instances will only be registered once,
to allow for merging and re-merging of argument value definitions. Distinct
ValueHolder instances carrying the same content are of course allowed.
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/ConstructorArgumentValues.java
| 214
|
[
"newValue"
] |
void
| true
| 2
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
setBeanFactory
|
@Override
public void setBeanFactory(BeanFactory beanFactory) {
if (!StringUtils.hasText(this.targetBeanName)) {
throw new IllegalArgumentException("Property 'targetBeanName' is required");
}
if (!StringUtils.hasText(this.methodName)) {
throw new IllegalArgumentException("Property 'methodName' is required");
}
Class<?> beanClass = beanFactory.getType(this.targetBeanName);
if (beanClass == null) {
throw new IllegalArgumentException("Can't determine type of bean with name '" + this.targetBeanName + "'");
}
this.method = BeanUtils.resolveSignature(this.methodName, beanClass);
if (this.method == null) {
throw new IllegalArgumentException("Unable to locate method [" + this.methodName +
"] on bean [" + this.targetBeanName + "]");
}
}
|
Set the name of the {@link Method} to locate.
<p>This property is required.
@param methodName the name of the {@link Method} to locate
|
java
|
spring-aop/src/main/java/org/springframework/aop/config/MethodLocatingFactoryBean.java
| 62
|
[
"beanFactory"
] |
void
| true
| 5
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
memoize
|
def memoize(
self,
custom_params_encoder: Callable[_P, object] | None = None,
custom_result_encoder: Callable[_P, Callable[[_R], _EncodedR]] | None = None,
custom_result_decoder: Callable[_P, Callable[[_EncodedR], _R]] | None = None,
) -> Callable[[Callable[_P, _R]], Callable[_P, _R]]:
"""Memoize a function with record and replay functionality.
This is a decorator that attempts to replay cached results first.
If a cache miss occurs, it records the result by executing the wrapped function.
Args:
custom_params_encoder: Optional encoder for function parameters.
If None, parameters are pickled directly.
custom_result_encoder: Optional encoder factory for function results.
Takes function parameters and returns an encoder
function that converts R -> _EncodedR.
custom_result_decoder: Optional decoder factory for cached results.
Takes function parameters and returns a decoder
function that converts _EncodedR -> R.
Returns:
A decorator function that can be applied to functions.
Example:
@memoizer.memoize(
custom_params_encoder=my_param_encoder,
custom_result_encoder=my_result_encoder_factory,
custom_result_decoder=my_result_decoder_factory,
)
def expensive_function(x, y):
return x + y
"""
def wrapper(fn: Callable[_P, _R]) -> Callable[_P, _R]:
"""Wrap the function to enable memoization with replay and record.
Args:
fn: The function to wrap.
Returns:
A wrapped version of the function.
"""
# If caching is disabled, return the original function unchanged
if not config.IS_CACHING_MODULE_ENABLED():
return fn
# Create decorated versions using record and replay
replay_fn = self.replay(
custom_params_encoder,
custom_result_decoder,
)(fn)
record_fn = self.record(
custom_params_encoder,
custom_result_encoder,
)(fn)
@functools.wraps(fn)
def inner(*args: _P.args, **kwargs: _P.kwargs) -> _R:
"""Attempt to replay from cache, or record on cache miss.
Args:
*args: Positional arguments to pass to the function.
**kwargs: Keyword arguments to pass to the function.
Returns:
The result from cache (if hit) or from executing the function (if miss).
"""
# Try to replay first
try:
return replay_fn(*args, **kwargs)
except KeyError:
# Cache miss - record the result
return record_fn(*args, **kwargs)
return inner
return wrapper
|
Memoize a function with record and replay functionality.
This is a decorator that attempts to replay cached results first.
If a cache miss occurs, it records the result by executing the wrapped function.
Args:
custom_params_encoder: Optional encoder for function parameters.
If None, parameters are pickled directly.
custom_result_encoder: Optional encoder factory for function results.
Takes function parameters and returns an encoder
function that converts R -> _EncodedR.
custom_result_decoder: Optional decoder factory for cached results.
Takes function parameters and returns a decoder
function that converts _EncodedR -> R.
Returns:
A decorator function that can be applied to functions.
Example:
@memoizer.memoize(
custom_params_encoder=my_param_encoder,
custom_result_encoder=my_result_encoder_factory,
custom_result_decoder=my_result_decoder_factory,
)
def expensive_function(x, y):
return x + y
|
python
|
torch/_inductor/runtime/caching/interfaces.py
| 154
|
[
"self",
"custom_params_encoder",
"custom_result_encoder",
"custom_result_decoder"
] |
Callable[[Callable[_P, _R]], Callable[_P, _R]]
| true
| 2
| 9.12
|
pytorch/pytorch
| 96,034
|
google
| false
|
fromBigInteger
|
private static InetAddress fromBigInteger(BigInteger address, boolean isIpv6) {
checkArgument(address.signum() >= 0, "BigInteger must be greater than or equal to 0");
int numBytes = isIpv6 ? 16 : 4;
byte[] addressBytes = address.toByteArray();
byte[] targetCopyArray = new byte[numBytes];
int srcPos = max(0, addressBytes.length - numBytes);
int copyLength = addressBytes.length - srcPos;
int destPos = numBytes - copyLength;
// Check the extra bytes in the BigInteger are all zero.
for (int i = 0; i < srcPos; i++) {
if (addressBytes[i] != 0x00) {
throw formatIllegalArgumentException(
"BigInteger cannot be converted to InetAddress because it has more than %d"
+ " bytes: %s",
numBytes, address);
}
}
// Copy the bytes into the least significant positions.
System.arraycopy(addressBytes, srcPos, targetCopyArray, destPos, copyLength);
try {
return InetAddress.getByAddress(targetCopyArray);
} catch (UnknownHostException impossible) {
throw new AssertionError(impossible);
}
}
|
Converts a BigInteger to either an IPv4 or IPv6 address. If the IP is IPv4, it must be
constrained to 32 bits, otherwise it is constrained to 128 bits.
@param address the address represented as a big integer
@param isIpv6 whether the created address should be IPv4 or IPv6
@return the BigInteger converted to an address
@throws IllegalArgumentException if the BigInteger is not between 0 and maximum value for IPv4
or IPv6 respectively
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 1,120
|
[
"address",
"isIpv6"
] |
InetAddress
| true
| 5
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
randomInt
|
public int randomInt() {
return randomInt(0, Integer.MAX_VALUE);
}
|
Generates a random int between 0 (inclusive) and Integer.MAX_VALUE (exclusive).
@return the random integer.
@see #randomInt(int, int)
@since 3.16.0
|
java
|
src/main/java/org/apache/commons/lang3/RandomUtils.java
| 386
|
[] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
add_job_flow_steps
|
def add_job_flow_steps(
self,
job_flow_id: str,
steps: list[dict] | str | None = None,
wait_for_completion: bool = False,
waiter_delay: int | None = None,
waiter_max_attempts: int | None = None,
execution_role_arn: str | None = None,
) -> list[str]:
"""
Add new steps to a running cluster.
.. seealso::
- :external+boto3:py:meth:`EMR.Client.add_job_flow_steps`
:param job_flow_id: The id of the job flow to which the steps are being added
:param steps: A list of the steps to be executed by the job flow
:param wait_for_completion: If True, wait for the steps to be completed. Default is False
:param waiter_delay: The amount of time in seconds to wait between attempts. Default is 5
:param waiter_max_attempts: The maximum number of attempts to be made. Default is 100
:param execution_role_arn: The ARN of the runtime role for a step on the cluster.
"""
config = {}
waiter_delay = waiter_delay or 30
waiter_max_attempts = waiter_max_attempts or 60
if execution_role_arn:
config["ExecutionRoleArn"] = execution_role_arn
response = self.get_conn().add_job_flow_steps(JobFlowId=job_flow_id, Steps=steps, **config)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Adding steps failed: {response}")
self.log.info("Steps %s added to JobFlow", response["StepIds"])
if wait_for_completion:
waiter = self.get_conn().get_waiter("step_complete")
for step_id in response["StepIds"]:
try:
wait(
waiter=waiter,
waiter_max_attempts=waiter_max_attempts,
waiter_delay=waiter_delay,
args={"ClusterId": job_flow_id, "StepId": step_id},
failure_message=f"EMR Steps failed: {step_id}",
status_message="EMR Step status is",
status_args=["Step.Status.State", "Step.Status.StateChangeReason"],
)
except AirflowException as ex:
if "EMR Steps failed" in str(ex):
resp = self.get_conn().describe_step(ClusterId=job_flow_id, StepId=step_id)
failure_details = resp["Step"]["Status"].get("FailureDetails", None)
if failure_details:
self.log.error("EMR Steps failed: %s", failure_details)
raise
return response["StepIds"]
|
Add new steps to a running cluster.
.. seealso::
- :external+boto3:py:meth:`EMR.Client.add_job_flow_steps`
:param job_flow_id: The id of the job flow to which the steps are being added
:param steps: A list of the steps to be executed by the job flow
:param wait_for_completion: If True, wait for the steps to be completed. Default is False
:param waiter_delay: The amount of time in seconds to wait between attempts. Default is 5
:param waiter_max_attempts: The maximum number of attempts to be made. Default is 100
:param execution_role_arn: The ARN of the runtime role for a step on the cluster.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/emr.py
| 139
|
[
"self",
"job_flow_id",
"steps",
"wait_for_completion",
"waiter_delay",
"waiter_max_attempts",
"execution_role_arn"
] |
list[str]
| true
| 9
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
print
|
String print(T object, Locale locale);
|
Print the object of type T for display.
@param object the instance to print
@param locale the current user locale
@return the printed text string
|
java
|
spring-context/src/main/java/org/springframework/format/Printer.java
| 37
|
[
"object",
"locale"
] |
String
| true
| 1
| 6.8
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
fstat
|
function fstat(fd, options = { bigint: false }, callback) {
if (typeof options === 'function') {
callback = options;
options = kEmptyObject;
}
callback = makeStatsCallback(callback);
const req = new FSReqCallback(options.bigint);
req.oncomplete = callback;
binding.fstat(fd, options.bigint, req);
}
|
Invokes the callback with the `fs.Stats`
for the file descriptor.
@param {number} fd
@param {{ bigint?: boolean; }} [options]
@param {(
err?: Error,
stats?: Stats
) => any} [callback]
@returns {void}
|
javascript
|
lib/fs.js
| 1,566
|
[
"fd",
"callback"
] | false
| 2
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
unsubscribe
|
@Override
public void unsubscribe() {
acquireAndEnsureOpen();
try {
fetchBuffer.retainAll(Collections.emptySet());
Timer timer = time.timer(defaultApiTimeoutMs);
UnsubscribeEvent unsubscribeEvent = new UnsubscribeEvent(calculateDeadlineMs(timer));
applicationEventHandler.add(unsubscribeEvent);
log.info("Unsubscribing all topics or patterns and assigned partitions {}",
subscriptions.assignedPartitions());
try {
// If users have fatal error, they will get some exceptions in the background queue.
// When running unsubscribe, these exceptions should be ignored, or users can't unsubscribe successfully.
processBackgroundEvents(unsubscribeEvent.future(), timer, e -> (e instanceof GroupAuthorizationException || e instanceof TopicAuthorizationException));
log.info("Unsubscribed all topics or patterns and assigned partitions");
} catch (TimeoutException e) {
log.error("Failed while waiting for the unsubscribe event to complete");
}
resetGroupMetadata();
} catch (Exception e) {
log.error("Unsubscribe failed", e);
throw e;
} finally {
release();
}
}
|
Get the current subscription. or an empty set if no such call has
been made.
@return The set of topics currently subscribed to
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
| 1,829
|
[] |
void
| true
| 4
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
poll
|
public void poll(Timer timer, PollCondition pollCondition, boolean disableWakeup) {
// there may be handlers which need to be invoked if we woke up the previous call to poll
firePendingCompletedRequests();
lock.lock();
try {
// Handle async disconnects prior to attempting any sends
handlePendingDisconnects();
// send all the requests we can send now
long pollDelayMs = trySend(timer.currentTimeMs());
// check whether the poll is still needed by the caller. Note that if the expected completion
// condition becomes satisfied after the call to shouldBlock() (because of a fired completion
// handler), the client will be woken up.
if (pendingCompletion.isEmpty() && (pollCondition == null || pollCondition.shouldBlock())) {
// if there are no requests in flight, do not block longer than the retry backoff
long pollTimeout = Math.min(timer.remainingMs(), pollDelayMs);
if (client.inFlightRequestCount() == 0)
pollTimeout = Math.min(pollTimeout, retryBackoffMs);
client.poll(pollTimeout, timer.currentTimeMs());
} else {
client.poll(0, timer.currentTimeMs());
}
timer.update();
// handle any disconnects by failing the active requests. note that disconnects must
// be checked immediately following poll since any subsequent call to client.ready()
// will reset the disconnect status
checkDisconnects(timer.currentTimeMs());
if (!disableWakeup) {
// trigger wakeups after checking for disconnects so that the callbacks will be ready
// to be fired on the next call to poll()
maybeTriggerWakeup();
}
// throw InterruptException if this thread is interrupted
maybeThrowInterruptException();
// try again to send requests since buffer space may have been
// cleared or a connect finished in the poll
trySend(timer.currentTimeMs());
// fail requests that couldn't be sent if they have expired
failExpiredRequests(timer.currentTimeMs());
// clean unsent requests collection to keep the map from growing indefinitely
unsent.clean();
} finally {
lock.unlock();
}
// called without the lock to avoid deadlock potential if handlers need to acquire locks
firePendingCompletedRequests();
metadata.maybeThrowAnyException();
}
|
Poll for any network IO.
@param timer Timer bounding how long this method can block
@param pollCondition Nullable blocking condition
@param disableWakeup If TRUE disable triggering wake-ups
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
| 262
|
[
"timer",
"pollCondition",
"disableWakeup"
] |
void
| true
| 6
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
chebpow
|
def chebpow(c, pow, maxpower=16):
"""Raise a Chebyshev series to a power.
Returns the Chebyshev series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmulx, chebmul, chebdiv
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> C.chebpow([1, 2, 3, 4], 2)
array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ])
"""
# note: this is more efficient than `pu._pow(chebmul, c1, c2)`, as it
# avoids converting between z and c series repeatedly
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
zs = _cseries_to_zseries(c)
prd = zs
for i in range(2, power + 1):
prd = np.convolve(prd, zs)
return _zseries_to_cseries(prd)
|
Raise a Chebyshev series to a power.
Returns the Chebyshev series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmulx, chebmul, chebdiv
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> C.chebpow([1, 2, 3, 4], 2)
array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ])
|
python
|
numpy/polynomial/chebyshev.py
| 814
|
[
"c",
"pow",
"maxpower"
] | false
| 9
| 7.84
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
_set_axis_name
|
def _set_axis_name(
self, name, axis: Axis = 0, *, inplace: bool = False
) -> Self | None:
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ["dog", "cat", "monkey"]]
... )
>>> df._set_axis_name(["type", "name"])
num_legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy(deep=False)
if axis == 0:
renamed.index = idx
else:
renamed.columns = idx
if not inplace:
return renamed
return None
|
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ["dog", "cat", "monkey"]]
... )
>>> df._set_axis_name(["type", "name"])
num_legs
type name
mammal dog 4
cat 4
monkey 2
|
python
|
pandas/core/generic.py
| 1,296
|
[
"self",
"name",
"axis",
"inplace"
] |
Self | None
| true
| 5
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
close
|
synchronized void close() {
KafkaException shutdownException = new KafkaException("The producer closed forcefully");
pendingRequests.forEach(handler ->
handler.fatalError(shutdownException));
if (pendingTransition != null) {
pendingTransition.result.fail(shutdownException);
}
}
|
Returns the first inflight sequence for a given partition. This is the base sequence of an inflight batch with
the lowest sequence number.
@return the lowest inflight sequence if the transaction manager is tracking inflight requests for this partition.
If there are no inflight requests being tracked for this partition, this method will return
RecordBatch.NO_SEQUENCE.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
| 951
|
[] |
void
| true
| 2
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
createRequest
|
abstract AbstractRequest.Builder<?> createRequest(int timeoutMs);
|
Create an AbstractRequest.Builder for this Call.
@param timeoutMs The timeout in milliseconds.
@return The AbstractRequest builder.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 972
|
[
"timeoutMs"
] | true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
estimateSum
|
public static double estimateSum(BucketIterator negativeBuckets, BucketIterator positiveBuckets) {
assert negativeBuckets.scale() == positiveBuckets.scale();
// for each bucket index, sum up the counts, but account for the positive/negative sign
BucketIterator it = new MergingBucketIterator(
positiveBuckets,
negativeBuckets,
positiveBuckets.scale(),
(positiveCount, negativeCount) -> positiveCount - negativeCount
);
double sum = 0.0;
while (it.hasNext()) {
long countWithSign = it.peekCount();
double bucketMidPoint = ExponentialScaleUtils.getPointOfLeastRelativeError(it.peekIndex(), it.scale());
if (countWithSign != 0) { // avoid 0 * INFINITY = NaN
double toAdd = bucketMidPoint * countWithSign;
if (Double.isFinite(toAdd)) {
sum += toAdd;
} else {
// Avoid NaN in case we end up with e.g. -Infinity+Infinity
// we consider the bucket with the bigger index the winner for the sign
sum = toAdd;
}
}
it.advance();
}
return sum;
}
|
Estimates the sum of all values of a histogram just based on the populated buckets.
Will never return NaN, but might return +/-Infinity if the histogram is too big.
@param negativeBuckets the negative buckets of the histogram
@param positiveBuckets the positive buckets of the histogram
@return the estimated sum of all values in the histogram, guaranteed to be zero if there are no buckets.
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramUtils.java
| 37
|
[
"negativeBuckets",
"positiveBuckets"
] | true
| 4
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
nullToEmpty
|
public static long[] nullToEmpty(final long[] array) {
return isEmpty(array) ? EMPTY_LONG_ARRAY : array;
}
|
Defensive programming technique to change a {@code null}
reference to an empty one.
<p>
This method returns an empty array for a {@code null} input array.
</p>
<p>
As a memory optimizing technique an empty array passed in will be overridden with
the empty {@code public static} references in this class.
</p>
@param array the array to check for {@code null} or empty.
@return the same array, {@code public static} empty array if {@code null} or empty input.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,527
|
[
"array"
] | true
| 2
| 8.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.