function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
make_sparse_spd_matrix
|
def make_sparse_spd_matrix(
n_dim=1,
*,
alpha=0.95,
norm_diag=False,
smallest_coef=0.1,
largest_coef=0.9,
sparse_format=None,
random_state=None,
):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int, default=1
The size of the random matrix to generate.
.. versionchanged:: 1.4
Renamed from ``dim`` to ``n_dim``.
alpha : float, default=0.95
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity. The value should be in the range 0 and 1.
norm_diag : bool, default=False
Whether to normalize the output matrix to make the leading diagonal
elements all 1.
smallest_coef : float, default=0.1
The value of the smallest coefficient between 0 and 1.
largest_coef : float, default=0.9
The value of the largest coefficient between 0 and 1.
sparse_format : str, default=None
String representing the output sparse format, such as 'csc', 'csr', etc.
If ``None``, return a dense numpy ndarray.
.. versionadded:: 1.4
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
prec : ndarray or sparse matrix of shape (dim, dim)
The generated matrix. If ``sparse_format=None``, this would be an ndarray.
Otherwise, this will be a sparse matrix of the specified format.
See Also
--------
make_spd_matrix : Generate a random symmetric, positive-definite matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
Examples
--------
>>> from sklearn.datasets import make_sparse_spd_matrix
>>> make_sparse_spd_matrix(n_dim=4, norm_diag=False, random_state=42)
array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
"""
random_state = check_random_state(random_state)
chol = -sp.eye(n_dim)
aux = sp.random(
m=n_dim,
n=n_dim,
density=1 - alpha,
data_rvs=lambda x: random_state.uniform(
low=smallest_coef, high=largest_coef, size=x
),
random_state=random_state,
)
# We need to avoid "coo" format because it does not support slicing
aux = sp.tril(aux, k=-1, format="csc")
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(n_dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = chol.T @ chol
if norm_diag:
# Form the diagonal vector into a row matrix
d = sp.diags(1.0 / np.sqrt(prec.diagonal()))
prec = d @ prec @ d
if sparse_format is None:
return prec.toarray()
else:
return prec.asformat(sparse_format)
|
Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int, default=1
The size of the random matrix to generate.
.. versionchanged:: 1.4
Renamed from ``dim`` to ``n_dim``.
alpha : float, default=0.95
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity. The value should be in the range 0 and 1.
norm_diag : bool, default=False
Whether to normalize the output matrix to make the leading diagonal
elements all 1.
smallest_coef : float, default=0.1
The value of the smallest coefficient between 0 and 1.
largest_coef : float, default=0.9
The value of the largest coefficient between 0 and 1.
sparse_format : str, default=None
String representing the output sparse format, such as 'csc', 'csr', etc.
If ``None``, return a dense numpy ndarray.
.. versionadded:: 1.4
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
prec : ndarray or sparse matrix of shape (dim, dim)
The generated matrix. If ``sparse_format=None``, this would be an ndarray.
Otherwise, this will be a sparse matrix of the specified format.
See Also
--------
make_spd_matrix : Generate a random symmetric, positive-definite matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
Examples
--------
>>> from sklearn.datasets import make_sparse_spd_matrix
>>> make_sparse_spd_matrix(n_dim=4, norm_diag=False, random_state=42)
array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
|
python
|
sklearn/datasets/_samples_generator.py
| 1,746
|
[
"n_dim",
"alpha",
"norm_diag",
"smallest_coef",
"largest_coef",
"sparse_format",
"random_state"
] | false
| 4
| 7.6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
available
|
@Override
public int available() {
return decompressedBuffer == null ? 0 : decompressedBuffer.remaining();
}
|
Decompresses (if necessary) buffered data, optionally computes and validates a XXHash32 checksum, and writes the
result to a buffer.
@throws IOException
|
java
|
clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockInputStream.java
| 259
|
[] | true
| 2
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
clear
|
def clear(
cls,
*,
dag_id: str,
task_id: str,
run_id: str,
map_index: int | None = None,
session: Session = NEW_SESSION,
) -> None:
"""
Clear all XCom data from the database for the given task instance.
.. note:: This **will not** purge any data from a custom XCom backend.
:param dag_id: ID of DAG to clear the XCom for.
:param task_id: ID of task to clear the XCom for.
:param run_id: ID of DAG run to clear the XCom for.
:param map_index: If given, only clear XCom from this particular mapped
task. The default ``None`` clears *all* XComs from the task.
:param session: Database session. If not given, a new session will be
created for this function.
"""
# Given the historic order of this function (logical_date was first argument) to add a new optional
# param we need to add default values for everything :(
if dag_id is None:
raise TypeError("clear() missing required argument: dag_id")
if task_id is None:
raise TypeError("clear() missing required argument: task_id")
if not run_id:
raise ValueError(f"run_id must be passed. Passed run_id={run_id}")
query = select(cls).where(cls.dag_id == dag_id, cls.task_id == task_id, cls.run_id == run_id)
if map_index is not None:
query = query.where(cls.map_index == map_index)
for xcom in session.scalars(query):
# print(f"Clearing XCOM {xcom} with value {xcom.value}")
session.delete(xcom)
session.commit()
|
Clear all XCom data from the database for the given task instance.
.. note:: This **will not** purge any data from a custom XCom backend.
:param dag_id: ID of DAG to clear the XCom for.
:param task_id: ID of task to clear the XCom for.
:param run_id: ID of DAG run to clear the XCom for.
:param map_index: If given, only clear XCom from this particular mapped
task. The default ``None`` clears *all* XComs from the task.
:param session: Database session. If not given, a new session will be
created for this function.
|
python
|
airflow-core/src/airflow/models/xcom.py
| 117
|
[
"cls",
"dag_id",
"task_id",
"run_id",
"map_index",
"session"
] |
None
| true
| 6
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
unescape
|
function unescape(string) {
string = toString(string);
return (string && reHasEscapedHtml.test(string))
? string.replace(reEscapedHtml, unescapeHtmlChar)
: string;
}
|
The inverse of `_.escape`; this method converts the HTML entities
`&`, `<`, `>`, `"`, and `'` in `string` to
their corresponding characters.
**Note:** No other HTML entities are unescaped. To unescape additional
HTML entities use a third-party library like [_he_](https://mths.be/he).
@static
@memberOf _
@since 0.6.0
@category String
@param {string} [string=''] The string to unescape.
@returns {string} Returns the unescaped string.
@example
_.unescape('fred, barney, & pebbles');
// => 'fred, barney, & pebbles'
|
javascript
|
lodash.js
| 15,260
|
[
"string"
] | false
| 3
| 7.36
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
describe_ndframe
|
def describe_ndframe(
*,
obj: NDFrameT,
include: str | Sequence[str] | None,
exclude: str | Sequence[str] | None,
percentiles: Sequence[float] | np.ndarray | None,
) -> NDFrameT:
"""Describe series or dataframe.
Called from pandas.core.generic.NDFrame.describe()
Parameters
----------
obj: DataFrame or Series
Either dataframe or series to be described.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored for ``Series``.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored for ``Series``.
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should fall between 0 and 1.
The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
Returns
-------
Dataframe or series description.
"""
percentiles = _refine_percentiles(percentiles)
describer: NDFrameDescriberAbstract
if obj.ndim == 1:
describer = SeriesDescriber(
obj=cast("Series", obj),
)
else:
describer = DataFrameDescriber(
obj=cast("DataFrame", obj),
include=include,
exclude=exclude,
)
result = describer.describe(percentiles=percentiles)
return cast(NDFrameT, result)
|
Describe series or dataframe.
Called from pandas.core.generic.NDFrame.describe()
Parameters
----------
obj: DataFrame or Series
Either dataframe or series to be described.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored for ``Series``.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored for ``Series``.
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should fall between 0 and 1.
The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
Returns
-------
Dataframe or series description.
|
python
|
pandas/core/methods/describe.py
| 55
|
[
"obj",
"include",
"exclude",
"percentiles"
] |
NDFrameT
| true
| 3
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getMatchOutcome
|
@Override
public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata metadata) {
BindResult<?> property = Binder.get(context.getEnvironment()).bind(this.propertyName, STRING_LIST);
ConditionMessage.Builder messageBuilder = this.messageBuilder.get();
if (property.isBound()) {
return ConditionOutcome.match(messageBuilder.found("property").items(this.propertyName));
}
return ConditionOutcome.noMatch(messageBuilder.didNotFind("property").items(this.propertyName));
}
|
Create a new instance with the property to check and the message builder to use.
@param propertyName the name of the property
@param messageBuilder a message builder supplier that should provide a fresh
instance on each call
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/OnPropertyListCondition.java
| 56
|
[
"context",
"metadata"
] |
ConditionOutcome
| true
| 2
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
findColumnForFirstNonWhitespaceCharacterInLine
|
function findColumnForFirstNonWhitespaceCharacterInLine(lineAndCharacter: LineAndCharacter, sourceFile: SourceFile, options: EditorSettings): number {
const lineStart = sourceFile.getPositionOfLineAndCharacter(lineAndCharacter.line, 0);
return findFirstNonWhitespaceColumn(lineStart, lineStart + lineAndCharacter.character, sourceFile, options);
}
|
@param assumeNewLineBeforeCloseBrace
`false` when called on text from a real source file.
`true` when we need to assume `position` is on a newline.
This is useful for codefixes. Consider
```
function f() {
|}
```
with `position` at `|`.
When inserting some text after an open brace, we would like to get indentation as if a newline was already there.
By default indentation at `position` will be 0 so 'assumeNewLineBeforeCloseBrace' overrides this behavior.
|
typescript
|
src/services/formatting/smartIndenter.ts
| 593
|
[
"lineAndCharacter",
"sourceFile",
"options"
] | true
| 1
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
_opt_info
|
def _opt_info():
"""
Returns a string containing the CPU features supported
by the current build.
The format of the string can be explained as follows:
- Dispatched features supported by the running machine end with `*`.
- Dispatched features not supported by the running machine
end with `?`.
- Remaining features represent the baseline.
Returns:
str: A formatted string indicating the supported CPU features.
"""
from numpy._core._multiarray_umath import (
__cpu_baseline__,
__cpu_dispatch__,
__cpu_features__,
)
if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0:
return ''
enabled_features = ' '.join(__cpu_baseline__)
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
enabled_features += f" {feature}*"
else:
enabled_features += f" {feature}?"
return enabled_features
|
Returns a string containing the CPU features supported
by the current build.
The format of the string can be explained as follows:
- Dispatched features supported by the running machine end with `*`.
- Dispatched features not supported by the running machine
end with `?`.
- Remaining features represent the baseline.
Returns:
str: A formatted string indicating the supported CPU features.
|
python
|
numpy/lib/_utils_impl.py
| 449
|
[] | false
| 6
| 7.12
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
emptyArray
|
@SuppressWarnings("unchecked")
public static <L, R> Pair<L, R>[] emptyArray() {
return (Pair<L, R>[]) EMPTY_ARRAY;
}
|
Returns the empty array singleton that can be assigned without compiler warning.
@param <L> the left element type.
@param <R> the right element type.
@return the empty array singleton that can be assigned without compiler warning.
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/tuple/Pair.java
| 65
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
propertyOf
|
function propertyOf(object) {
return function(path) {
return object == null ? undefined : baseGet(object, path);
};
}
|
The opposite of `_.property`; this method creates a function that returns
the value at a given path of `object`.
@static
@memberOf _
@since 3.0.0
@category Util
@param {Object} object The object to query.
@returns {Function} Returns the new accessor function.
@example
var array = [0, 1, 2],
object = { 'a': array, 'b': array, 'c': array };
_.map(['a[2]', 'c[0]'], _.propertyOf(object));
// => [2, 0]
_.map([['a', '2'], ['c', '0']], _.propertyOf(object));
// => [2, 0]
|
javascript
|
lodash.js
| 16,046
|
[
"object"
] | false
| 2
| 7.52
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
nested
|
default Builder<T> nested() {
return nested(true);
}
|
Use nested fields when adding JSON from user defined properties.
@return this builder
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/structured/StructuredLoggingJsonMembersCustomizer.java
| 69
|
[] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
isAspectJAdvice
|
private static boolean isAspectJAdvice(Advisor advisor) {
return (advisor instanceof InstantiationModelAwarePointcutAdvisor ||
advisor.getAdvice() instanceof AbstractAspectJAdvice ||
(advisor instanceof PointcutAdvisor pointcutAdvisor &&
pointcutAdvisor.getPointcut() instanceof AspectJExpressionPointcut));
}
|
Determine whether the given Advisor contains an AspectJ advice.
@param advisor the Advisor to check
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AspectJProxyUtils.java
| 73
|
[
"advisor"
] | true
| 4
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
iterator
|
@Override
public CopyableBucketIterator iterator() {
int start = startSlot();
return new BucketArrayIterator(bucketScale, bucketCounts, bucketIndices, start, start + numBuckets);
}
|
@return the position of the first bucket of this set of buckets within {@link #bucketCounts} and {@link #bucketIndices}.
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/FixedCapacityExponentialHistogram.java
| 288
|
[] |
CopyableBucketIterator
| true
| 1
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
take
|
def take(
self,
indices,
axis: Axis = 0,
allow_fill: bool = True,
fill_value=None,
**kwargs,
) -> Self:
"""
Return a new Index of the values selected by the indices.
For internal compatibility with numpy arrays.
Parameters
----------
indices : array-like
Indices to be taken.
axis : {0 or 'index'}, optional
The axis over which to select values, always 0 or 'index'.
allow_fill : bool, default True
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : scalar, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 are regarded as NA. If Index doesn't hold NA, raise ValueError.
**kwargs
Required for compatibility with numpy.
Returns
-------
Index
An index formed of elements at the given indices. Will be the same
type as self, except for RangeIndex.
See Also
--------
numpy.ndarray.take: Return an array formed from the
elements of a at the given indices.
Examples
--------
>>> idx = pd.Index(["a", "b", "c"])
>>> idx.take([2, 2, 1, 2])
Index(['c', 'c', 'b', 'c'], dtype='str')
"""
nv.validate_take((), kwargs)
indices = np.asarray(indices, dtype=np.intp)
result = NDArrayBackedExtensionIndex.take(
self, indices, axis, allow_fill, fill_value, **kwargs
)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
freq = self._data._get_getitem_freq(maybe_slice)
result._data._freq = freq
return result
|
Return a new Index of the values selected by the indices.
For internal compatibility with numpy arrays.
Parameters
----------
indices : array-like
Indices to be taken.
axis : {0 or 'index'}, optional
The axis over which to select values, always 0 or 'index'.
allow_fill : bool, default True
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : scalar, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 are regarded as NA. If Index doesn't hold NA, raise ValueError.
**kwargs
Required for compatibility with numpy.
Returns
-------
Index
An index formed of elements at the given indices. Will be the same
type as self, except for RangeIndex.
See Also
--------
numpy.ndarray.take: Return an array formed from the
elements of a at the given indices.
Examples
--------
>>> idx = pd.Index(["a", "b", "c"])
>>> idx.take([2, 2, 1, 2])
Index(['c', 'c', 'b', 'c'], dtype='str')
|
python
|
pandas/core/indexes/datetimelike.py
| 1,076
|
[
"self",
"indices",
"axis",
"allow_fill",
"fill_value"
] |
Self
| true
| 2
| 8.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
iterator
|
@Override
Iterator<Layer> iterator();
|
Return the jar layers in the order that they should be added (starting with the
least frequently changed layer).
@return the layers iterator
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Layers.java
| 42
|
[] | true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
castFunction
|
function castFunction(value) {
return typeof value == 'function' ? value : identity;
}
|
Casts `value` to `identity` if it's not a function.
@private
@param {*} value The value to inspect.
@returns {Function} Returns cast function.
|
javascript
|
lodash.js
| 4,544
|
[
"value"
] | false
| 2
| 6.16
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
opj_uint_max
|
static INLINE OPJ_UINT32 opj_uint_max(OPJ_UINT32 a, OPJ_UINT32 b)
{
return (a > b) ? a : b;
}
|
Get the maximum of two integers
@return Returns a if a > b else b
|
cpp
|
3rdparty/openjpeg/openjp2/opj_intmath.h
| 83
|
[
"a",
"b"
] | true
| 2
| 6.48
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
_get_dep_statuses
|
def _get_dep_statuses(self, ti, session, dep_context=None):
"""
Determine if the pool task instance is in has available slots.
:param ti: the task instance to get the dependency status for
:param session: database session
:param dep_context: the context for which this dependency should be evaluated for
:return: True if there are available slots in the pool.
"""
from airflow.models.pool import Pool # To avoid a circular dependency
pool_name = ti.pool
# Controlled by UNIQUE key in slot_pool table, only (at most) one result can be returned.
pool: Pool | None = session.scalar(select(Pool).where(Pool.pool == pool_name))
if pool is None:
yield self._failing_status(
reason=f"Tasks using non-existent pool '{pool_name}' will not be scheduled"
)
return
open_slots = pool.open_slots(session=session)
if ti.state in pool.get_occupied_states():
open_slots += ti.pool_slots
if open_slots <= (ti.pool_slots - 1):
yield self._failing_status(
reason=f"Not scheduling since there are {open_slots} open slots in pool {pool_name} "
f"and require {ti.pool_slots} pool slots"
)
else:
yield self._passing_status(
reason=f"There are enough open slots in {pool_name} to execute the task",
)
|
Determine if the pool task instance is in has available slots.
:param ti: the task instance to get the dependency status for
:param session: database session
:param dep_context: the context for which this dependency should be evaluated for
:return: True if there are available slots in the pool.
|
python
|
airflow-core/src/airflow/ti_deps/deps/pool_slots_available_dep.py
| 35
|
[
"self",
"ti",
"session",
"dep_context"
] | false
| 5
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
all
|
public KafkaFuture<Void> all() {
return (topicIdFutures == null) ? KafkaFuture.allOf(nameFutures.values().toArray(new KafkaFuture<?>[0])) :
KafkaFuture.allOf(topicIdFutures.values().toArray(new KafkaFuture<?>[0]));
}
|
@return a future which succeeds only if all the topic deletions succeed.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/DeleteTopicsResult.java
| 72
|
[] | true
| 2
| 8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
compile_llvm_ir_to_code_object
|
def compile_llvm_ir_to_code_object(
llvm_ir_path: str, output_path: str, target_arch: str
) -> bool:
"""
Compile unbundled LLVM IR to a single-arch code object.
Args:
llvm_ir_path: Path to .ll file
output_path: Where to write .hsaco file
target_arch: Target architecture (e.g., 'gfx90a')
Returns:
True if successful
"""
if not os.path.exists(llvm_ir_path):
return False
os.makedirs(os.path.dirname(output_path), exist_ok=True)
try:
clang = get_rocm_compiler()
except RuntimeError:
return False
# Using clang and not hipcc since we are not compiling source code
# Instead we use the LLVM IR (.ll) provided by triton
cmd = [
clang,
"-target",
"amdgcn-amd-amdhsa",
f"-mcpu={target_arch}",
llvm_ir_path,
"-o",
output_path,
]
try:
subprocess.run(cmd, capture_output=True, text=True, check=True)
if not os.path.exists(output_path):
return False
return True
except subprocess.CalledProcessError:
return False
|
Compile unbundled LLVM IR to a single-arch code object.
Args:
llvm_ir_path: Path to .ll file
output_path: Where to write .hsaco file
target_arch: Target architecture (e.g., 'gfx90a')
Returns:
True if successful
|
python
|
torch/_inductor/rocm_multiarch_utils.py
| 99
|
[
"llvm_ir_path",
"output_path",
"target_arch"
] |
bool
| true
| 3
| 7.92
|
pytorch/pytorch
| 96,034
|
google
| false
|
opj_int_clamp
|
static INLINE OPJ_INT32 opj_int_clamp(OPJ_INT32 a, OPJ_INT32 min,
OPJ_INT32 max)
{
if (a < min) {
return min;
}
if (a > max) {
return max;
}
return a;
}
|
Clamp an integer inside an interval
@return
<ul>
<li>Returns a if (min < a < max)
<li>Returns max if (a > max)
<li>Returns min if (a < min)
</ul>
|
cpp
|
3rdparty/openjpeg/openjp2/opj_intmath.h
| 116
|
[
"a",
"min",
"max"
] | true
| 3
| 6.56
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
hashCode
|
public static int hashCode(final Annotation a) {
int result = 0;
final Class<? extends Annotation> type = a.annotationType();
for (final Method m : type.getDeclaredMethods()) {
try {
final Object value = m.invoke(a);
if (value == null) {
throw new IllegalStateException(String.format("Annotation method %s returned null", m));
}
result += hashMember(m.getName(), value);
} catch (final ReflectiveOperationException ex) {
throw new UncheckedException(ex);
}
}
return result;
}
|
Generate a hash code for the given annotation using the algorithm
presented in the {@link Annotation#hashCode()} API docs.
@param a the Annotation for a hash code calculation is desired, not
{@code null}
@return the calculated hash code
@throws RuntimeException if an {@link Exception} is encountered during
annotation member access
@throws IllegalStateException if an annotation method invocation returns
{@code null}
|
java
|
src/main/java/org/apache/commons/lang3/AnnotationUtils.java
| 239
|
[
"a"
] | true
| 3
| 7.28
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
prependIfMissingIgnoreCase
|
@Deprecated
public static String prependIfMissingIgnoreCase(final String str, final CharSequence prefix, final CharSequence... prefixes) {
return Strings.CI.prependIfMissing(str, prefix, prefixes);
}
|
Prepends the prefix to the start of the string if the string does not already start, case-insensitive, with any of the prefixes.
<pre>
StringUtils.prependIfMissingIgnoreCase(null, null) = null
StringUtils.prependIfMissingIgnoreCase("abc", null) = "abc"
StringUtils.prependIfMissingIgnoreCase("", "xyz") = "xyz"
StringUtils.prependIfMissingIgnoreCase("abc", "xyz") = "xyzabc"
StringUtils.prependIfMissingIgnoreCase("xyzabc", "xyz") = "xyzabc"
StringUtils.prependIfMissingIgnoreCase("XYZabc", "xyz") = "XYZabc"
</pre>
<p>
With additional prefixes,
</p>
<pre>
StringUtils.prependIfMissingIgnoreCase(null, null, null) = null
StringUtils.prependIfMissingIgnoreCase("abc", null, null) = "abc"
StringUtils.prependIfMissingIgnoreCase("", "xyz", null) = "xyz"
StringUtils.prependIfMissingIgnoreCase("abc", "xyz", new CharSequence[]{null}) = "xyzabc"
StringUtils.prependIfMissingIgnoreCase("abc", "xyz", "") = "abc"
StringUtils.prependIfMissingIgnoreCase("abc", "xyz", "mno") = "xyzabc"
StringUtils.prependIfMissingIgnoreCase("xyzabc", "xyz", "mno") = "xyzabc"
StringUtils.prependIfMissingIgnoreCase("mnoabc", "xyz", "mno") = "mnoabc"
StringUtils.prependIfMissingIgnoreCase("XYZabc", "xyz", "mno") = "XYZabc"
StringUtils.prependIfMissingIgnoreCase("MNOabc", "xyz", "mno") = "MNOabc"
</pre>
@param str The string.
@param prefix The prefix to prepend to the start of the string.
@param prefixes Additional prefixes that are valid (optional).
@return A new String if prefix was prepended, the same string otherwise.
@since 3.2
@deprecated Use {@link Strings#prependIfMissing(String, CharSequence, CharSequence...) Strings.CI.prependIfMissing(String, CharSequence,
CharSequence...)}.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 5,648
|
[
"str",
"prefix"
] |
String
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_set_noconvert_dtype_columns
|
def _set_noconvert_dtype_columns(
self, col_indices: list[int], names: Sequence[Hashable]
) -> set[int]:
"""
Set the columns that should not undergo dtype conversions.
Currently, any column that is involved with date parsing will not
undergo such conversions. If usecols is specified, the positions of the columns
not to cast is relative to the usecols not to all columns.
Parameters
----------
col_indices: The indices specifying order and positions of the columns
names: The column names which order is corresponding with the order
of col_indices
Returns
-------
A set of integers containing the positions of the columns not to convert.
"""
usecols: list[int] | list[str] | None
noconvert_columns = set()
if self.usecols_dtype == "integer":
# A set of integers will be converted to a list in
# the correct order every single time.
usecols = sorted(self.usecols)
elif callable(self.usecols) or self.usecols_dtype not in ("empty", None):
# The names attribute should have the correct columns
# in the proper order for indexing with parse_dates.
usecols = col_indices
else:
# Usecols is empty.
usecols = None
def _set(x) -> int:
if usecols is not None and is_integer(x):
x = usecols[x]
if not is_integer(x):
x = col_indices[names.index(x)]
return x
if isinstance(self.parse_dates, list):
validate_parse_dates_presence(self.parse_dates, names)
for val in self.parse_dates:
noconvert_columns.add(_set(val))
elif self.parse_dates:
if isinstance(self.index_col, list):
for k in self.index_col:
noconvert_columns.add(_set(k))
elif self.index_col is not None:
noconvert_columns.add(_set(self.index_col))
return noconvert_columns
|
Set the columns that should not undergo dtype conversions.
Currently, any column that is involved with date parsing will not
undergo such conversions. If usecols is specified, the positions of the columns
not to cast is relative to the usecols not to all columns.
Parameters
----------
col_indices: The indices specifying order and positions of the columns
names: The column names which order is corresponding with the order
of col_indices
Returns
-------
A set of integers containing the positions of the columns not to convert.
|
python
|
pandas/io/parsers/base_parser.py
| 391
|
[
"self",
"col_indices",
"names"
] |
set[int]
| true
| 14
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
parse
|
@Contract("!null -> !null")
static @Nullable List<X509Certificate> parse(@Nullable String text) {
if (text == null) {
return null;
}
CertificateFactory factory = getCertificateFactory();
List<X509Certificate> certs = new ArrayList<>();
readCertificates(text, factory, certs::add);
Assert.state(!CollectionUtils.isEmpty(certs), "Missing certificates or unrecognized format");
return List.copyOf(certs);
}
|
Parse certificates from the specified string.
@param text the text to parse
@return the parsed certificates
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemCertificateParser.java
| 60
|
[
"text"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
calculate_number_of_dag_runs
|
def calculate_number_of_dag_runs(performance_dag_conf: dict[str, str]) -> int:
"""
Calculate how many Dag Runs will be created with given performance DAG configuration.
:param performance_dag_conf: dict with environment variables as keys and their values as values
:return: total number of Dag Runs
:rtype: int
"""
max_runs = get_performance_dag_environment_variable(performance_dag_conf, "PERF_MAX_RUNS")
total_dags_count = get_dags_count(performance_dag_conf)
# if PERF_MAX_RUNS is missing from the configuration,
# it means that PERF_SCHEDULE_INTERVAL must be '@once'
if max_runs is None:
return total_dags_count
return int(max_runs) * total_dags_count
|
Calculate how many Dag Runs will be created with given performance DAG configuration.
:param performance_dag_conf: dict with environment variables as keys and their values as values
:return: total number of Dag Runs
:rtype: int
|
python
|
performance/src/performance_dags/performance_dag/performance_dag_utils.py
| 485
|
[
"performance_dag_conf"
] |
int
| true
| 2
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
copyTo
|
public void copyTo(MailMessage target) {
Assert.notNull(target, "'target' MailMessage must not be null");
if (getFrom() != null) {
target.setFrom(getFrom());
}
if (getReplyTo() != null) {
target.setReplyTo(getReplyTo());
}
if (getTo() != null) {
target.setTo(copy(getTo()));
}
if (getCc() != null) {
target.setCc(copy(getCc()));
}
if (getBcc() != null) {
target.setBcc(copy(getBcc()));
}
if (getSentDate() != null) {
target.setSentDate(getSentDate());
}
if (getSubject() != null) {
target.setSubject(getSubject());
}
if (getText() != null) {
target.setText(getText());
}
}
|
Copy the contents of this message to the given target message.
@param target the {@code MailMessage} to copy to
|
java
|
spring-context-support/src/main/java/org/springframework/mail/SimpleMailMessage.java
| 180
|
[
"target"
] |
void
| true
| 9
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
checkStrictModeBinaryExpression
|
function checkStrictModeBinaryExpression(node: BinaryExpression) {
if (inStrictMode && isLeftHandSideExpression(node.left) && isAssignmentOperator(node.operatorToken.kind)) {
// ECMA 262 (Annex C) The identifier eval or arguments may not appear as the LeftHandSideExpression of an
// Assignment operator(11.13) or of a PostfixExpression(11.3)
checkStrictModeEvalOrArguments(node, node.left as Identifier);
}
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 2,617
|
[
"node"
] | false
| 4
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
createInstance
|
@Override
@SuppressWarnings("unchecked")
protected Set<Object> createInstance() {
if (this.sourceSet == null) {
throw new IllegalArgumentException("'sourceSet' is required");
}
Set<Object> result = null;
if (this.targetSetClass != null) {
result = BeanUtils.instantiateClass(this.targetSetClass);
}
else {
result = CollectionUtils.newLinkedHashSet(this.sourceSet.size());
}
Class<?> valueType = null;
if (this.targetSetClass != null) {
valueType = ResolvableType.forClass(this.targetSetClass).asCollection().resolveGeneric();
}
if (valueType != null) {
TypeConverter converter = getBeanTypeConverter();
for (Object elem : this.sourceSet) {
result.add(converter.convertIfNecessary(elem, valueType));
}
}
else {
result.addAll(this.sourceSet);
}
return result;
}
|
Set the class to use for the target Set. Can be populated with a fully
qualified class name when defined in a Spring application context.
<p>Default is a linked HashSet, keeping the registration order.
@see java.util.LinkedHashSet
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/SetFactoryBean.java
| 76
|
[] | true
| 5
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
parseBeanDefinitions
|
protected void parseBeanDefinitions(Element root, BeanDefinitionParserDelegate delegate) {
if (delegate.isDefaultNamespace(root)) {
NodeList nl = root.getChildNodes();
for (int i = 0; i < nl.getLength(); i++) {
Node node = nl.item(i);
if (node instanceof Element ele) {
if (delegate.isDefaultNamespace(ele)) {
parseDefaultElement(ele, delegate);
}
else {
delegate.parseCustomElement(ele);
}
}
}
}
else {
delegate.parseCustomElement(root);
}
}
|
Parse the elements at the root level in the document:
"import", "alias", "bean".
@param root the DOM root element of the document
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/DefaultBeanDefinitionDocumentReader.java
| 166
|
[
"root",
"delegate"
] |
void
| true
| 5
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
ary
|
function ary(func, n, guard) {
n = guard ? undefined : n;
n = (func && n == null) ? func.length : n;
return createWrap(func, WRAP_ARY_FLAG, undefined, undefined, undefined, undefined, n);
}
|
Creates a function that invokes `func`, with up to `n` arguments,
ignoring any additional arguments.
@static
@memberOf _
@since 3.0.0
@category Function
@param {Function} func The function to cap arguments for.
@param {number} [n=func.length] The arity cap.
@param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
@returns {Function} Returns the new capped function.
@example
_.map(['6', '8', '10'], _.ary(parseInt, 1));
// => [6, 8, 10]
|
javascript
|
lodash.js
| 10,126
|
[
"func",
"n",
"guard"
] | false
| 4
| 7.68
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
validPosition
|
private FetchPosition validPosition() {
if (hasValidPosition()) {
return position;
} else {
return null;
}
}
|
Clear the awaiting validation state and enter fetching.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 1,198
|
[] |
FetchPosition
| true
| 2
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
__enter__
|
def __enter__(self) -> Self:
"""
Context manager entry point that initiates log capture.
Returns:
Self instance for use in context manager.
"""
self._capture()
return self
|
Context manager entry point that initiates log capture.
Returns:
Self instance for use in context manager.
|
python
|
airflow-core/src/airflow/utils/log/log_stream_accumulator.py
| 138
|
[
"self"
] |
Self
| true
| 1
| 6.56
|
apache/airflow
| 43,597
|
unknown
| false
|
applyAsDouble
|
double applyAsDouble(long value) throws E;
|
Applies this function to the given argument.
@param value the function argument
@return the function result
@throws E Thrown when the function fails.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableLongToDoubleFunction.java
| 53
|
[
"value"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getArgs
|
private Object @Nullable [] getArgs(Class<?>[] parameterTypes) {
Object[] args = new Object[parameterTypes.length];
for (int i = 0; i < parameterTypes.length; i++) {
Function<Class<?>, Object> parameter = getAvailableParameter(parameterTypes[i]);
if (parameter == null) {
return null;
}
args[i] = parameter.apply(this.type);
}
return args;
}
|
Get an injectable argument instance for the given type. This method can be used
when manually instantiating an object without reflection.
@param <A> the argument type
@param type the argument type
@return the argument to inject or {@code null}
@since 3.4.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/util/Instantiator.java
| 221
|
[
"parameterTypes"
] | true
| 3
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
nanargmax
|
def nanargmax(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
mask: npt.NDArray[np.bool_] | None = None,
) -> int | np.ndarray:
"""
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : int or ndarray[int]
The index/indices of max value in specified axis or -1 in the NA case
Examples
--------
>>> from pandas.core import nanops
>>> arr = np.array([1, 2, 3, np.nan, 4])
>>> nanops.nanargmax(arr)
np.int64(4)
>>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3)
>>> arr[2:, 2] = np.nan
>>> arr
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., nan],
[ 9., 10., nan]])
>>> nanops.nanargmax(arr, axis=1)
array([2, 2, 1, 1])
"""
values, mask = _get_values(values, True, fill_value_typ="-inf", mask=mask)
result = values.argmax(axis)
# error: Argument 1 to "_maybe_arg_null_out" has incompatible type "Any |
# signedinteger[Any]"; expected "ndarray[Any, Any]"
result = _maybe_arg_null_out(result, axis, mask, skipna) # type: ignore[arg-type]
return result
|
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : int or ndarray[int]
The index/indices of max value in specified axis or -1 in the NA case
Examples
--------
>>> from pandas.core import nanops
>>> arr = np.array([1, 2, 3, np.nan, 4])
>>> nanops.nanargmax(arr)
np.int64(4)
>>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3)
>>> arr[2:, 2] = np.nan
>>> arr
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., nan],
[ 9., 10., nan]])
>>> nanops.nanargmax(arr, axis=1)
array([2, 2, 1, 1])
|
python
|
pandas/core/nanops.py
| 1,120
|
[
"values",
"axis",
"skipna",
"mask"
] |
int | np.ndarray
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
validate_endpoints
|
def validate_endpoints(closed: str | None) -> tuple[bool, bool]:
"""
Check that the `closed` argument is among [None, "left", "right"]
Parameters
----------
closed : {None, "left", "right"}
Returns
-------
left_closed : bool
right_closed : bool
Raises
------
ValueError : if argument is not among valid values
"""
left_closed = False
right_closed = False
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
return left_closed, right_closed
|
Check that the `closed` argument is among [None, "left", "right"]
Parameters
----------
closed : {None, "left", "right"}
Returns
-------
left_closed : bool
right_closed : bool
Raises
------
ValueError : if argument is not among valid values
|
python
|
pandas/util/_validators.py
| 391
|
[
"closed"
] |
tuple[bool, bool]
| true
| 5
| 6.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getBalanceScore
|
private int getBalanceScore(Map<String, List<TopicPartition>> assignment) {
int score = 0;
Map<String, Integer> consumer2AssignmentSize = new HashMap<>();
for (Entry<String, List<TopicPartition>> entry: assignment.entrySet())
consumer2AssignmentSize.put(entry.getKey(), entry.getValue().size());
Iterator<Entry<String, Integer>> it = consumer2AssignmentSize.entrySet().iterator();
while (it.hasNext()) {
Entry<String, Integer> entry = it.next();
int consumerAssignmentSize = entry.getValue();
it.remove();
for (Entry<String, Integer> otherEntry: consumer2AssignmentSize.entrySet())
score += Math.abs(consumerAssignmentSize - otherEntry.getValue());
}
return score;
}
|
@return the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs.
A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0.
Lower balance score indicates a more balanced assignment.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java
| 1,243
|
[
"assignment"
] | true
| 2
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
Security
|
def Security( # noqa: N802
dependency: Annotated[
Optional[Callable[..., Any]],
Doc(
"""
A "dependable" callable (like a function).
Don't call it directly, FastAPI will call it for you, just pass the object
directly.
"""
),
] = None,
*,
scopes: Annotated[
Optional[Sequence[str]],
Doc(
"""
OAuth2 scopes required for the *path operation* that uses this Security
dependency.
The term "scope" comes from the OAuth2 specification, it seems to be
intentionally vague and interpretable. It normally refers to permissions,
in cases to roles.
These scopes are integrated with OpenAPI (and the API docs at `/docs`).
So they are visible in the OpenAPI specification.
)
"""
),
] = None,
use_cache: Annotated[
bool,
Doc(
"""
By default, after a dependency is called the first time in a request, if
the dependency is declared again for the rest of the request (for example
if the dependency is needed by several dependencies), the value will be
re-used for the rest of the request.
Set `use_cache` to `False` to disable this behavior and ensure the
dependency is called again (if declared more than once) in the same request.
"""
),
] = True,
) -> Any:
"""
Declare a FastAPI Security dependency.
The only difference with a regular dependency is that it can declare OAuth2
scopes that will be integrated with OpenAPI and the automatic UI docs (by default
at `/docs`).
It takes a single "dependable" callable (like a function).
Don't call it directly, FastAPI will call it for you.
Read more about it in the
[FastAPI docs for Security](https://fastapi.tiangolo.com/tutorial/security/) and
in the
[FastAPI docs for OAuth2 scopes](https://fastapi.tiangolo.com/advanced/security/oauth2-scopes/).
**Example**
```python
from typing import Annotated
from fastapi import Security, FastAPI
from .db import User
from .security import get_current_active_user
app = FastAPI()
@app.get("/users/me/items/")
async def read_own_items(
current_user: Annotated[User, Security(get_current_active_user, scopes=["items"])]
):
return [{"item_id": "Foo", "owner": current_user.username}]
```
"""
return params.Security(dependency=dependency, scopes=scopes, use_cache=use_cache)
|
Declare a FastAPI Security dependency.
The only difference with a regular dependency is that it can declare OAuth2
scopes that will be integrated with OpenAPI and the automatic UI docs (by default
at `/docs`).
It takes a single "dependable" callable (like a function).
Don't call it directly, FastAPI will call it for you.
Read more about it in the
[FastAPI docs for Security](https://fastapi.tiangolo.com/tutorial/security/) and
in the
[FastAPI docs for OAuth2 scopes](https://fastapi.tiangolo.com/advanced/security/oauth2-scopes/).
**Example**
```python
from typing import Annotated
from fastapi import Security, FastAPI
from .db import User
from .security import get_current_active_user
app = FastAPI()
@app.get("/users/me/items/")
async def read_own_items(
current_user: Annotated[User, Security(get_current_active_user, scopes=["items"])]
):
return [{"item_id": "Foo", "owner": current_user.username}]
```
|
python
|
fastapi/param_functions.py
| 2,302
|
[
"dependency",
"scopes",
"use_cache"
] |
Any
| true
| 1
| 6.56
|
tiangolo/fastapi
| 93,264
|
unknown
| false
|
maybeAutoCommitOffsetsSync
|
private void maybeAutoCommitOffsetsSync(Timer timer) {
if (autoCommitEnabled) {
Map<TopicPartition, OffsetAndMetadata> allConsumedOffsets = subscriptions.allConsumed();
try {
log.debug("Sending synchronous auto-commit of offsets {}", allConsumedOffsets);
if (!commitOffsetsSync(allConsumedOffsets, timer))
log.debug("Auto-commit of offsets {} timed out before completion", allConsumedOffsets);
} catch (WakeupException | InterruptException e) {
log.debug("Auto-commit of offsets {} was interrupted before completion", allConsumedOffsets);
// rethrow wakeups since they are triggered by the user
throw e;
} catch (Exception e) {
// consistent with async auto-commit failures, we do not propagate the exception
log.warn("Synchronous auto-commit of offsets {} failed: {}", allConsumedOffsets, e.getMessage());
}
}
}
|
Commit offsets synchronously. This method will retry until the commit completes successfully
or an unrecoverable error is encountered.
@param offsets The offsets to be committed
@throws org.apache.kafka.common.errors.AuthorizationException if the consumer is not authorized to the group
or to any of the specified partitions. See the exception for more details
@throws CommitFailedException if an unrecoverable error occurs before the commit can be completed
@throws FencedInstanceIdException if a static member gets fenced
@return If the offset commit was successfully sent and a successful response was received from
the coordinator
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java
| 1,180
|
[
"timer"
] |
void
| true
| 5
| 7.6
|
apache/kafka
| 31,560
|
javadoc
| false
|
_stata_elapsed_date_to_datetime_vec
|
def _stata_elapsed_date_to_datetime_vec(dates: Series, fmt: str) -> Series:
"""
Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates, "%tw")
0 1961-01-01
dtype: datetime64[s]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
"""
if fmt.startswith(("%tc", "tc")):
# Delta ms relative to base
td = np.timedelta64(stata_epoch - unix_epoch, "ms")
res = np.array(dates._values, dtype="M8[ms]") + td
return Series(res, index=dates.index)
elif fmt.startswith(("%td", "td", "%d", "d")):
# Delta days relative to base
td = np.timedelta64(stata_epoch - unix_epoch, "D")
res = np.array(dates._values, dtype="M8[D]") + td
return Series(res, index=dates.index)
elif fmt.startswith(("%tm", "tm")):
# Delta months relative to base
ordinals = dates + (stata_epoch.year - unix_epoch.year) * 12
res = np.array(ordinals, dtype="M8[M]").astype("M8[s]")
return Series(res, index=dates.index)
elif fmt.startswith(("%tq", "tq")):
# Delta quarters relative to base
ordinals = dates + (stata_epoch.year - unix_epoch.year) * 4
res = np.array(ordinals, dtype="M8[3M]").astype("M8[s]")
return Series(res, index=dates.index)
elif fmt.startswith(("%th", "th")):
# Delta half-years relative to base
ordinals = dates + (stata_epoch.year - unix_epoch.year) * 2
res = np.array(ordinals, dtype="M8[6M]").astype("M8[s]")
return Series(res, index=dates.index)
elif fmt.startswith(("%ty", "ty")):
# Years -- not delta
ordinals = dates - 1970
res = np.array(ordinals, dtype="M8[Y]").astype("M8[s]")
return Series(res, index=dates.index)
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
dates._values[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt.startswith(("%tC", "tC")):
warnings.warn(
"Encountered %tC format. Leaving in Stata Internal Format.",
stacklevel=find_stack_level(),
)
conv_dates = Series(dates, dtype=object)
if has_bad_values:
conv_dates[bad_locs] = NaT
return conv_dates
# does not count leap days - 7 days is a week.
# 52nd week may have more than 7 days
elif fmt.startswith(("%tw", "tw")):
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
per_y = (year - 1970).array.view("Period[Y]")
per_d = per_y.asfreq("D", how="S")
per_d_shifted = per_d + days._values
per_s = per_d_shifted.asfreq("s", how="S")
conv_dates_arr = per_s.view("M8[s]")
conv_dates = Series(conv_dates_arr, index=dates.index)
else:
raise ValueError(f"Date fmt {fmt} not understood")
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
|
Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates, "%tw")
0 1961-01-01
dtype: datetime64[s]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
|
python
|
pandas/io/stata.py
| 161
|
[
"dates",
"fmt"
] |
Series
| true
| 13
| 8.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
onHeartbeatSuccess
|
public void onHeartbeatSuccess(StreamsGroupHeartbeatResponse response) {
StreamsGroupHeartbeatResponseData responseData = response.data();
throwIfUnexpectedError(responseData);
if (state == MemberState.LEAVING) {
log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} is " +
"already leaving the group.", memberId, memberEpoch);
return;
}
if (state == MemberState.UNSUBSCRIBED && responseData.memberEpoch() < 0 && maybeCompleteLeaveInProgress()) {
log.debug("Member {} with epoch {} received a successful response to the heartbeat " +
"to leave the group and completed the leave operation. ", memberId, memberEpoch);
return;
}
if (isNotInGroup()) {
log.debug("Ignoring heartbeat response received from broker. Member {} is in {} state" +
" so it's not a member of the group. ", memberId, state);
return;
}
if (responseData.memberEpoch() < 0) {
log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} " +
"is in {} state and the member epoch is invalid: {}. ", memberId, memberEpoch, state,
responseData.memberEpoch());
maybeCompleteLeaveInProgress();
return;
}
updateMemberEpoch(responseData.memberEpoch());
final List<StreamsGroupHeartbeatResponseData.TaskIds> activeTasks = responseData.activeTasks();
final List<StreamsGroupHeartbeatResponseData.TaskIds> standbyTasks = responseData.standbyTasks();
final List<StreamsGroupHeartbeatResponseData.TaskIds> warmupTasks = responseData.warmupTasks();
final boolean isGroupReady = isGroupReady(responseData.status());
if (activeTasks != null && standbyTasks != null && warmupTasks != null) {
if (!state.canHandleNewAssignment()) {
log.debug("Ignoring new assignment: active tasks {}, standby tasks {}, and warm-up tasks {} received " +
"from server because member is in {} state.",
activeTasks, standbyTasks, warmupTasks, state);
return;
}
processAssignmentReceived(
toTasksAssignment(activeTasks),
toTasksAssignment(standbyTasks),
toTasksAssignment(warmupTasks),
isGroupReady
);
} else if (responseData.activeTasks() != null || responseData.standbyTasks() != null || responseData.warmupTasks() != null) {
throw new IllegalStateException("Invalid response data, task collections must be all null or all non-null: "
+ responseData);
} else if (isGroupReady != targetAssignment.isGroupReady) {
// If the client did not provide a new assignment, but the group is now ready or not ready anymore, so
// update the target assignment and reconcile it.
processAssignmentReceived(
targetAssignment.activeTasks,
targetAssignment.standbyTasks,
targetAssignment.warmupTasks,
isGroupReady
);
}
}
|
Notify about a successful heartbeat response.
@param response Heartbeat response to extract member info and errors from.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
| 667
|
[
"response"
] |
void
| true
| 15
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
get_pool
|
def get_pool(pool_name: str, session: Session = NEW_SESSION) -> Pool | None:
"""
Get the Pool with specific pool name from the Pools.
:param pool_name: The pool name of the Pool to get.
:param session: SQLAlchemy ORM Session
:return: the pool object
"""
return session.scalar(select(Pool).where(Pool.pool == pool_name))
|
Get the Pool with specific pool name from the Pools.
:param pool_name: The pool name of the Pool to get.
:param session: SQLAlchemy ORM Session
:return: the pool object
|
python
|
airflow-core/src/airflow/models/pool.py
| 78
|
[
"pool_name",
"session"
] |
Pool | None
| true
| 1
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
send
|
public RequestFuture<ClientResponse> send(Node node,
AbstractRequest.Builder<?> requestBuilder,
int requestTimeoutMs) {
long now = time.milliseconds();
RequestFutureCompletionHandler completionHandler = new RequestFutureCompletionHandler();
ClientRequest clientRequest = client.newClientRequest(node.idString(), requestBuilder, now, true,
requestTimeoutMs, completionHandler);
unsent.put(node, clientRequest);
// wakeup the client in case it is blocking in poll so that we can send the queued request
client.wakeup();
return completionHandler.future;
}
|
Send a new request. Note that the request is not actually transmitted on the
network until one of the {@link #poll(Timer)} variants is invoked. At this
point the request will either be transmitted successfully or will fail.
Use the returned future to obtain the result of the send. Note that there is no
need to check for disconnects explicitly on the {@link ClientResponse} object;
instead, the future will be failed with a {@link DisconnectException}.
@param node The destination of the request
@param requestBuilder A builder for the request payload
@param requestTimeoutMs Maximum time in milliseconds to await a response before disconnecting the socket and
cancelling the request. The request may be cancelled sooner if the socket disconnects
for any reason.
@return A future which indicates the result of the send.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
| 126
|
[
"node",
"requestBuilder",
"requestTimeoutMs"
] | true
| 1
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
onEmitNode
|
function onEmitNode(hint: EmitHint, node: Node, emitCallback: (hint: EmitHint, node: Node) => void): void {
if (node.kind === SyntaxKind.SourceFile) {
currentSourceFile = node as SourceFile;
currentModuleInfo = moduleInfoMap[getOriginalNodeId(currentSourceFile)];
previousOnEmitNode(hint, node, emitCallback);
currentSourceFile = undefined!;
currentModuleInfo = undefined!;
}
else {
previousOnEmitNode(hint, node, emitCallback);
}
}
|
Hook for node emit notifications.
@param hint A hint as to the intended usage of the node.
@param node The node to emit.
@param emit A callback used to emit the node in the printer.
|
typescript
|
src/compiler/transformers/module/module.ts
| 2,259
|
[
"hint",
"node",
"emitCallback"
] | true
| 3
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
visitFunctionDeclaration
|
function visitFunctionDeclaration(node: FunctionDeclaration): VisitResult<Statement | undefined> {
let statements: Statement[] | undefined;
if (hasSyntacticModifier(node, ModifierFlags.Export)) {
statements = append(
statements,
setOriginalNode(
setTextRange(
factory.createFunctionDeclaration(
visitNodes(node.modifiers, modifierVisitor, isModifier),
node.asteriskToken,
factory.getDeclarationName(node, /*allowComments*/ true, /*allowSourceMaps*/ true),
/*typeParameters*/ undefined,
visitNodes(node.parameters, visitor, isParameter),
/*type*/ undefined,
visitEachChild(node.body, visitor, context),
),
/*location*/ node,
),
/*original*/ node,
),
);
}
else {
statements = append(statements, visitEachChild(node, visitor, context));
}
// NOTE: CommonJS/AMD/UMD exports are hoisted to the top of the module body and do not need to be added here.
return singleOrMany(statements);
}
|
Visits a FunctionDeclaration node.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/module/module.ts
| 1,749
|
[
"node"
] | true
| 3
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
closeIfNotCached
|
void closeIfNotCached(URL jarFileUrl, JarFile jarFile) throws IOException {
JarFile cached = getCached(jarFileUrl);
if (cached != jarFile) {
jarFile.close();
}
}
|
Close the given {@link JarFile} only if it is not contained in the cache.
@param jarFileUrl the jar file URL
@param jarFile the jar file
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/UrlJarFiles.java
| 105
|
[
"jarFileUrl",
"jarFile"
] |
void
| true
| 2
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
validate
|
private Map<String, ConfigValue> validate(Map<String, Object> parsed, Map<String, ConfigValue> configValues) {
Set<String> configsWithNoParent = getConfigsWithNoParent();
for (String name: configsWithNoParent) {
validate(name, parsed, configValues);
}
return configValues;
}
|
Validate the current configuration values with the configuration definition.
@param props the current configuration values
@return List of Config, each Config contains the updated configuration information given
the current configuration values.
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 595
|
[
"parsed",
"configValues"
] | true
| 1
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
isAlphaSpace
|
public static boolean isAlphaSpace(final CharSequence cs) {
if (cs == null) {
return false;
}
final int sz = cs.length();
for (int i = 0; i < sz; i++) {
final char nowChar = cs.charAt(i);
if (nowChar != ' ' && !Character.isLetter(nowChar)) {
return false;
}
}
return true;
}
|
Tests if the CharSequence contains only Unicode letters and space (' ').
<p>
{@code null} will return {@code false} An empty CharSequence (length()=0) will return {@code true}.
</p>
<pre>
StringUtils.isAlphaSpace(null) = false
StringUtils.isAlphaSpace("") = true
StringUtils.isAlphaSpace(" ") = true
StringUtils.isAlphaSpace("abc") = true
StringUtils.isAlphaSpace("ab c") = true
StringUtils.isAlphaSpace("ab2c") = false
StringUtils.isAlphaSpace("ab-c") = false
</pre>
@param cs the CharSequence to check, may be null.
@return {@code true} if only contains letters and space, and is non-null.
@since 3.0 Changed signature from isAlphaSpace(String) to isAlphaSpace(CharSequence)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 3,368
|
[
"cs"
] | true
| 5
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
onRefresh
|
@Override
public void onRefresh() {
if (checkpointOnRefresh) {
checkpointOnRefresh = false;
new CracDelegate().checkpointRestore();
}
if (exitOnRefresh) {
Runtime.getRuntime().halt(0);
}
this.stoppedBeans = null;
try {
startBeans(true);
}
catch (ApplicationContextException ex) {
// Some bean failed to auto-start within context refresh:
// stop already started beans on context refresh failure.
stopBeans(false);
throw ex;
}
this.running = true;
}
|
Stop all registered beans that implement {@link Lifecycle} and <i>are</i>
currently running. Any bean that implements {@link SmartLifecycle} will be
stopped within its 'phase', and all phases will be ordered from highest to
lowest value. All beans that do not implement {@link SmartLifecycle} will be
stopped in the default phase 0. A bean declared as dependent on another bean
will be stopped before the dependency bean regardless of the declared phase.
|
java
|
spring-context/src/main/java/org/springframework/context/support/DefaultLifecycleProcessor.java
| 294
|
[] |
void
| true
| 4
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
parseInt
|
private static int parseInt(final String group) {
return group != null ? Integer.parseInt(group) : 0;
}
|
Gets a TimeZone, looking first for GMT custom ids, then falling back to Olson ids.
A GMT custom id can be 'Z', or 'UTC', or has an optional prefix of GMT,
followed by sign, hours digit(s), optional colon(':'), and optional minutes digits.
i.e. <em>[GMT] (+|-) Hours [[:] Minutes]</em>
@param id A GMT custom id (or Olson id
@return A time zone
|
java
|
src/main/java/org/apache/commons/lang3/time/FastTimeZone.java
| 83
|
[
"group"
] | true
| 2
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
leaveGroup
|
private CompletableFuture<Void> leaveGroup(final boolean isOnClose) {
if (isNotInGroup()) {
if (state == MemberState.FENCED) {
clearTaskAndPartitionAssignment();
transitionTo(MemberState.UNSUBSCRIBED);
}
subscriptionState.unsubscribe();
notifyAssignmentChange(Collections.emptySet());
return CompletableFuture.completedFuture(null);
}
if (state == MemberState.PREPARE_LEAVING || state == MemberState.LEAVING) {
log.debug("Leave group operation already in progress for member {}", memberId);
return leaveGroupInProgress.get();
}
transitionTo(MemberState.PREPARE_LEAVING);
CompletableFuture<Void> onGroupLeft = new CompletableFuture<>();
leaveGroupInProgress = Optional.of(onGroupLeft);
if (isOnClose) {
leaving();
} else {
CompletableFuture<Void> onAllActiveTasksReleasedCallbackExecuted = releaseActiveTasks();
onAllActiveTasksReleasedCallbackExecuted
.whenComplete((__, callbackError) -> leavingAfterReleasingActiveTasks(callbackError));
}
return onGroupLeft;
}
|
Leaves the group.
<p>
This method does the following:
<ol>
<li>Transitions member state to {@link MemberState#PREPARE_LEAVING}.</li>
<li>Requests the invocation of the revocation callback or lost callback.</li>
<li>Once the callback completes, it clears the current and target assignment, unsubscribes from
all topics and transitions the member state to {@link MemberState#LEAVING}.</li>
</ol>
States {@link MemberState#PREPARE_LEAVING} and {@link MemberState#LEAVING} cause the heartbeat request manager
to send a leave group heartbeat.
</p>
@return future that will complete when the revocation callback execution completes and the heartbeat
to leave the group has been sent out.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
| 920
|
[
"isOnClose"
] | true
| 6
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
intersection
|
public ComposablePointcut intersection(ClassFilter other) {
this.classFilter = ClassFilters.intersection(this.classFilter, other);
return this;
}
|
Apply an intersection with the given ClassFilter.
@param other the ClassFilter to apply an intersection with
@return this composable pointcut (for call chaining)
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/ComposablePointcut.java
| 126
|
[
"other"
] |
ComposablePointcut
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
shutdownAndAwaitTermination
|
@CanIgnoreReturnValue
@J2ktIncompatible
@GwtIncompatible // java.time.Duration
@IgnoreJRERequirement // Users will use this only if they're already using Duration.
public static boolean shutdownAndAwaitTermination(ExecutorService service, Duration timeout) {
return shutdownAndAwaitTermination(service, toNanosSaturated(timeout), TimeUnit.NANOSECONDS);
}
|
Shuts down the given executor service gradually, first disabling new submissions and later, if
necessary, cancelling remaining tasks.
<p>The method takes the following steps:
<ol>
<li>calls {@link ExecutorService#shutdown()}, disabling acceptance of new submitted tasks.
<li>awaits executor service termination for half of the specified timeout.
<li>if the timeout expires, it calls {@link ExecutorService#shutdownNow()}, cancelling
pending tasks and interrupting running tasks.
<li>awaits executor service termination for the other half of the specified timeout.
</ol>
<p>If, at any step of the process, the calling thread is interrupted, the method calls {@link
ExecutorService#shutdownNow()} and returns.
<p>For a version of this method that waits <i>indefinitely</i>, use {@link
ExecutorService#close}.
@param service the {@code ExecutorService} to shut down
@param timeout the maximum time to wait for the {@code ExecutorService} to terminate
@return {@code true} if the {@code ExecutorService} was terminated successfully, {@code false}
if the call timed out or was interrupted
@since 33.4.0 (but since 28.0 in the JRE flavor)
|
java
|
android/guava/src/com/google/common/util/concurrent/MoreExecutors.java
| 992
|
[
"service",
"timeout"
] | true
| 1
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
baseRepeat
|
function baseRepeat(string, n) {
var result = '';
if (!string || n < 1 || n > MAX_SAFE_INTEGER) {
return result;
}
// Leverage the exponentiation by squaring algorithm for a faster repeat.
// See https://en.wikipedia.org/wiki/Exponentiation_by_squaring for more details.
do {
if (n % 2) {
result += string;
}
n = nativeFloor(n / 2);
if (n) {
string += string;
}
} while (n);
return result;
}
|
The base implementation of `_.repeat` which doesn't coerce arguments.
@private
@param {string} string The string to repeat.
@param {number} n The number of times to repeat the string.
@returns {string} Returns the repeated string.
|
javascript
|
lodash.js
| 3,954
|
[
"string",
"n"
] | false
| 6
| 6.4
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
computeIndex
|
public static long computeIndex(double value, int scale) {
checkScaleBounds(scale);
return Base2ExponentialHistogramIndexer.computeIndex(value, scale);
}
|
Provides the index of the bucket of the exponential histogram with the given scale that contains the provided value.
@param value the value to find the bucket for
@param scale the scale of the histogram
@return the index of the bucket
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialScaleUtils.java
| 269
|
[
"value",
"scale"
] | true
| 1
| 6.8
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
createCollection
|
@SuppressWarnings({"rawtypes", "unchecked"})
protected Collection<Object> createCollection(Class<? extends Collection> collectionType, int initialCapacity) {
if (!collectionType.isInterface()) {
try {
return ReflectionUtils.accessibleConstructor(collectionType).newInstance();
}
catch (Throwable ex) {
throw new IllegalArgumentException(
"Could not instantiate collection class: " + collectionType.getName(), ex);
}
}
else if (List.class == collectionType) {
return new ArrayList<>(initialCapacity);
}
else if (SortedSet.class == collectionType) {
return new TreeSet<>();
}
else {
return new LinkedHashSet<>(initialCapacity);
}
}
|
Create a Collection of the given type, with the given
initial capacity (if supported by the Collection type).
@param collectionType a sub-interface of Collection
@param initialCapacity the initial capacity
@return the new Collection instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/CustomCollectionEditor.java
| 153
|
[
"collectionType",
"initialCapacity"
] | true
| 5
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
splitToStream
|
@IgnoreJRERequirement
public Stream<String> splitToStream(CharSequence sequence) {
// Can't use Streams.stream() from base
return StreamSupport.stream(split(sequence).spliterator(), false);
}
|
Splits {@code sequence} into string components and makes them available through an {@link
Stream}, which may be lazily evaluated. If you want an eagerly computed {@link List}, use
{@link #splitToList(CharSequence)}.
@param sequence the sequence of characters to split
@return a stream over the segments split from the parameter
@since 33.4.0 (but since 28.2 in the JRE flavor)
|
java
|
android/guava/src/com/google/common/base/Splitter.java
| 422
|
[
"sequence"
] | true
| 1
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
solvePnPRefine
|
static void solvePnPRefine(InputArray _objectPoints, InputArray _imagePoints,
InputArray _cameraMatrix, InputArray _distCoeffs,
InputOutputArray _rvec, InputOutputArray _tvec,
SolvePnPRefineMethod _flags,
TermCriteria _criteria=TermCriteria(TermCriteria::EPS+TermCriteria::COUNT, 20, FLT_EPSILON),
double _vvslambda=1)
{
CV_INSTRUMENT_REGION();
Mat opoints_ = _objectPoints.getMat(), ipoints_ = _imagePoints.getMat();
Mat opoints, ipoints;
opoints_.convertTo(opoints, CV_64F);
ipoints_.convertTo(ipoints, CV_64F);
int npoints = opoints.checkVector(3, CV_64F);
CV_Assert( npoints >= 3 && npoints == ipoints.checkVector(2, CV_64F) );
CV_Assert( !_rvec.empty() && !_tvec.empty() );
int rtype = _rvec.type(), ttype = _tvec.type();
Size rsize = _rvec.size(), tsize = _tvec.size();
CV_Assert( (rtype == CV_32FC1 || rtype == CV_64FC1) &&
(ttype == CV_32FC1 || ttype == CV_64FC1) );
CV_Assert( (rsize == Size(1, 3) || rsize == Size(3, 1)) &&
(tsize == Size(1, 3) || tsize == Size(3, 1)) );
Mat cameraMatrix0 = _cameraMatrix.getMat();
Mat distCoeffs0 = _distCoeffs.getMat();
Mat cameraMatrix = Mat_<double>(cameraMatrix0);
Mat distCoeffs = Mat_<double>(distCoeffs0);
if (_flags == SOLVEPNP_REFINE_LM)
{
Mat rvec0 = _rvec.getMat(), tvec0 = _tvec.getMat();
Mat rvec, tvec;
rvec0.convertTo(rvec, CV_64F);
tvec0.convertTo(tvec, CV_64F);
Mat params(6, 1, CV_64FC1);
for (int i = 0; i < 3; i++)
{
params.at<double>(i,0) = rvec.at<double>(i,0);
params.at<double>(i+3,0) = tvec.at<double>(i,0);
}
LMSolver::create(makePtr<SolvePnPRefineLMCallback>(opoints, ipoints, cameraMatrix, distCoeffs), _criteria.maxCount, _criteria.epsilon)->run(params);
params.rowRange(0, 3).convertTo(rvec0, rvec0.depth());
params.rowRange(3, 6).convertTo(tvec0, tvec0.depth());
}
else if (_flags == SOLVEPNP_REFINE_VVS)
{
Mat rvec0 = _rvec.getMat(), tvec0 = _tvec.getMat();
Mat rvec, tvec;
rvec0.convertTo(rvec, CV_64F);
tvec0.convertTo(tvec, CV_64F);
std::vector<Point2d> ipoints_normalized;
undistortPoints(ipoints, ipoints_normalized, cameraMatrix, distCoeffs);
Mat sd = Mat(ipoints_normalized).reshape(1, npoints*2);
Mat objectPoints0 = opoints.reshape(1, npoints);
Mat imagePoints0 = ipoints.reshape(1, npoints*2);
Mat L(npoints*2, 6, CV_64FC1), s(npoints*2, 1, CV_64FC1);
double residuals_1 = std::numeric_limits<double>::max(), residuals = 0;
Mat err;
Mat R;
Rodrigues(rvec, R);
for (int iter = 0; iter < _criteria.maxCount; iter++)
{
computeInteractionMatrixAndResiduals(objectPoints0, R, tvec, L, s);
err = s - sd;
Mat Lp = L.inv(cv::DECOMP_SVD);
Mat dq = -_vvslambda * Lp * err;
Mat R1, t1;
exponentialMapToSE3Inv(dq, R1, t1);
R = R1 * R;
tvec = R1 * tvec + t1;
residuals_1 = residuals;
Mat res = err.t()*err;
residuals = res.at<double>(0,0);
if (std::fabs(residuals - residuals_1) < _criteria.epsilon)
break;
}
Rodrigues(R, rvec);
rvec.convertTo(rvec0, rvec0.depth());
tvec.convertTo(tvec0, tvec0.depth());
}
}
|
@brief The exponential map from se(3) to SE(3).
@param twist A twist (v, w) represents the velocity of a rigid body as an angular velocity
around an axis and a linear velocity along this axis.
@param R1 Resultant rotation matrix from the twist.
@param t1 Resultant translation vector from the twist.
|
cpp
|
modules/calib3d/src/solvepnp.cpp
| 714
|
[
"_objectPoints",
"_imagePoints",
"_cameraMatrix",
"_distCoeffs",
"_rvec",
"_tvec",
"_flags"
] | true
| 15
| 6.96
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
peek
|
CompletedFetch peek() {
try {
lock.lock();
return completedFetches.peek();
} finally {
lock.unlock();
}
}
|
Return whether we have any completed fetches pending return to the user. This method is thread-safe. Has
visibility for testing.
@return {@code true} if there are completed fetches that match the {@link Predicate}, {@code false} otherwise
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java
| 134
|
[] |
CompletedFetch
| true
| 1
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
canFollowExportModifier
|
function canFollowExportModifier(): boolean {
return token() === SyntaxKind.AtToken
|| token() !== SyntaxKind.AsteriskToken
&& token() !== SyntaxKind.AsKeyword
&& token() !== SyntaxKind.OpenBraceToken
&& canFollowModifier();
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 2,794
|
[] | true
| 5
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
declareNamedObject
|
@Override
public <T> void declareNamedObject(BiConsumer<Value, T> consumer, NamedObjectParser<T, Context> namedObjectParser, ParseField field) {
BiFunction<XContentParser, Context, T> objectParser = (XContentParser p, Context c) -> {
try {
XContentParser.Token token = p.nextToken();
assert token == XContentParser.Token.FIELD_NAME;
String currentName = p.currentName();
try {
T namedObject = namedObjectParser.parse(p, c, currentName);
// consume the end object token
token = p.nextToken();
assert token == XContentParser.Token.END_OBJECT;
return namedObject;
} catch (Exception e) {
throw rethrowFieldParseFailure(field, p, currentName, e);
}
} catch (IOException e) {
throw wrapParseError(field, p, e, "error while parsing named object");
}
};
declareField((XContentParser p, Value v, Context c) -> consumer.accept(v, objectParser.apply(p, c)), field, ValueType.OBJECT);
}
|
Parses a Value from the given {@link XContentParser}
@param parser the parser to build a value from
@param value the value to fill from the parser
@param context a context that is passed along to all declared field parsers
@return the parsed value
@throws IOException if an IOException occurs.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java
| 455
|
[
"consumer",
"namedObjectParser",
"field"
] |
void
| true
| 3
| 7.76
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
putByte
|
@CanIgnoreReturnValue
PrimitiveSink putByte(byte b);
|
Puts a byte into this sink.
@param b a byte
@return this instance
|
java
|
android/guava/src/com/google/common/hash/PrimitiveSink.java
| 36
|
[
"b"
] |
PrimitiveSink
| true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
createBody
|
function createBody(block: Block | undefined, quotePreference: QuotePreference, ambient?: boolean) {
return ambient ? undefined :
getSynthesizedDeepClone(block, /*includeTrivia*/ false) || createStubbedMethodBody(quotePreference);
}
|
(#49811)
Note that there are cases in which the symbol declaration is not present. For example, in the code below both
`MappedIndirect.ax` and `MappedIndirect.ay` have no declaration node attached (due to their mapped-type
parent):
```ts
type Base = { ax: number; ay: string };
type BaseKeys = keyof Base;
type MappedIndirect = { [K in BaseKeys]: boolean };
```
In such cases, we assume the declaration to be a `PropertySignature`.
|
typescript
|
src/services/codefixes/helpers.ts
| 356
|
[
"block",
"quotePreference",
"ambient?"
] | false
| 3
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
run_beam_command
|
def run_beam_command(
cmd: list[str],
log: Logger,
process_line_callback: Callable[[str], None] | None = None,
working_directory: str | None = None,
is_dataflow_job_id_exist_callback: Callable[[], bool] | None = None,
) -> None:
"""
Run pipeline command in subprocess.
:param cmd: Parts of the command to be run in subprocess
:param process_line_callback: Optional callback which can be used to process
stdout and stderr to detect job id
:param working_directory: Working directory
:param log: logger.
"""
log.info("Running command: %s", " ".join(shlex.quote(c) for c in cmd))
proc = subprocess.Popen(
cmd,
cwd=working_directory,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
# Waits for Apache Beam pipeline to complete.
log.info("Start waiting for Apache Beam process to complete.")
reads = [proc.stderr, proc.stdout]
while True:
# Wait for at least one available fd.
readable_fds, _, _ = select.select(reads, [], [], 5)
if readable_fds is None:
log.info("Waiting for Apache Beam process to complete.")
continue
for readable_fd in readable_fds:
process_fd(proc, readable_fd, log, process_line_callback, is_dataflow_job_id_exist_callback)
if is_dataflow_job_id_exist_callback and is_dataflow_job_id_exist_callback():
return
if proc.poll() is not None:
break
# Corner case: check if more output was created between the last read and the process termination
for readable_fd in reads:
process_fd(proc, readable_fd, log, process_line_callback, is_dataflow_job_id_exist_callback)
log.info("Process exited with return code: %s", proc.returncode)
if proc.returncode != 0:
raise AirflowException(f"Apache Beam process failed with return code {proc.returncode}")
|
Run pipeline command in subprocess.
:param cmd: Parts of the command to be run in subprocess
:param process_line_callback: Optional callback which can be used to process
stdout and stderr to detect job id
:param working_directory: Working directory
:param log: logger.
|
python
|
providers/apache/beam/src/airflow/providers/apache/beam/hooks/beam.py
| 153
|
[
"cmd",
"log",
"process_line_callback",
"working_directory",
"is_dataflow_job_id_exist_callback"
] |
None
| true
| 9
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
canBeModified
|
static bool canBeModified(ASTContext *Context, const Expr *E) {
if (E->getType().isConstQualified())
return false;
auto Parents = Context->getParents(*E);
if (Parents.size() != 1)
return true;
if (const auto *Cast = Parents[0].get<ImplicitCastExpr>()) {
if ((Cast->getCastKind() == CK_NoOp &&
ASTContext::hasSameType(Cast->getType(), E->getType().withConst())) ||
(Cast->getCastKind() == CK_LValueToRValue &&
!Cast->getType().isNull() && Cast->getType()->isFundamentalType()))
return false;
}
// FIXME: Make this function more generic.
return true;
}
|
guaranteed this element cannot be modified as a result of this usage.
|
cpp
|
clang-tools-extra/clang-tidy/modernize/LoopConvertCheck.cpp
| 497
|
[] | true
| 9
| 7.04
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
init_await
|
inline void init_await(Async<void>&& async) {
await_async(std::move(async));
}
|
A utility to start annotating at top of stack (eg. the task which is added to
fiber manager) A function must not return an Async wrapper if it uses
`init_await` instead of `await` (again, enforce via static analysis)
|
cpp
|
folly/fibers/async/Async.h
| 154
|
[] | true
| 2
| 6.48
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
concat
|
public static ByteSource concat(ByteSource... sources) {
return concat(ImmutableList.copyOf(sources));
}
|
Concatenates multiple {@link ByteSource} instances into a single source. Streams returned from
the source will contain the concatenated data from the streams of the underlying sources.
<p>Only one underlying stream will be open at a time. Closing the concatenated stream will
close the open underlying stream.
@param sources the sources to concatenate
@return a {@code ByteSource} containing the concatenated data
@throws NullPointerException if any of {@code sources} is {@code null}
@since 15.0
|
java
|
android/guava/src/com/google/common/io/ByteSource.java
| 412
|
[] |
ByteSource
| true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
capabilities
|
def capabilities(self) -> Capabilities:
"""
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing.
Dask support boolean indexing as long as both the index
and the indexed arrays have known shapes.
Note however that the output .shape and .size properties
will contain a non-compliant math.nan instead of None.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes.
Dask implements unique_values et.al.
Note however that the output .shape and .size properties
will contain a non-compliant math.nan instead of None.
- **"max dimensions"**: integer indicating the maximum number of
dimensions supported by the array library.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
"""
return {
"boolean indexing": True,
"data-dependent shapes": True,
"max dimensions": 64,
}
|
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing.
Dask support boolean indexing as long as both the index
and the indexed arrays have known shapes.
Note however that the output .shape and .size properties
will contain a non-compliant math.nan instead of None.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes.
Dask implements unique_values et.al.
Note however that the output .shape and .size properties
will contain a non-compliant math.nan instead of None.
- **"max dimensions"**: integer indicating the maximum number of
dimensions supported by the array library.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
|
python
|
sklearn/externals/array_api_compat/dask/array/_info.py
| 90
|
[
"self"
] |
Capabilities
| true
| 1
| 6.64
|
scikit-learn/scikit-learn
| 64,340
|
unknown
| false
|
partitionsFor
|
@Override
public List<PartitionInfo> partitionsFor(String topic, Duration timeout) {
return delegate.partitionsFor(topic, timeout);
}
|
Get metadata about the partitions for a given topic. This method will issue a remote call to the server if it
does not already have any metadata about the given topic.
@param topic The topic to get partition metadata for
@param timeout The maximum of time to await topic metadata
@return The list of partitions, which will be empty when the given topic is not found
@throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this
function is called
@throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while
this function is called
@throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details
@throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the specified topic. See
the exception for more details
@throws org.apache.kafka.common.errors.TimeoutException if topic metadata cannot be fetched before expiration
of the passed timeout
@throws org.apache.kafka.common.KafkaException for any other unrecoverable errors
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
| 1,444
|
[
"topic",
"timeout"
] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
generateSetBeanInstanceSupplierCode
|
CodeBlock generateSetBeanInstanceSupplierCode(
GenerationContext generationContext, BeanRegistrationCode beanRegistrationCode,
CodeBlock instanceSupplierCode, List<MethodReference> postProcessors);
|
Generate the code that sets the instance supplier on the bean definition.
<p>The {@code postProcessors} represent methods to be exposed once the
instance has been created to further configure it. Each method should
accept two parameters, the {@link RegisteredBean} and the bean
instance, and should return the modified bean instance.
@param generationContext the generation context
@param beanRegistrationCode the bean registration code
@param instanceSupplierCode the instance supplier code
@param postProcessors any instance post processors that should be applied
@return the generated code
@see #generateInstanceSupplierCode
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanRegistrationCodeFragments.java
| 108
|
[
"generationContext",
"beanRegistrationCode",
"instanceSupplierCode",
"postProcessors"
] |
CodeBlock
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
substringsBetween
|
public static String[] substringsBetween(final String str, final String open, final String close) {
if (str == null || isEmpty(open) || isEmpty(close)) {
return null;
}
final int strLen = str.length();
if (strLen == 0) {
return ArrayUtils.EMPTY_STRING_ARRAY;
}
final int closeLen = close.length();
final int openLen = open.length();
final List<String> list = new ArrayList<>();
int pos = 0;
while (pos < strLen - closeLen) {
int start = str.indexOf(open, pos);
if (start < 0) {
break;
}
start += openLen;
final int end = str.indexOf(close, start);
if (end < 0) {
break;
}
list.add(str.substring(start, end));
pos = end + closeLen;
}
if (list.isEmpty()) {
return null;
}
return list.toArray(ArrayUtils.EMPTY_STRING_ARRAY);
}
|
Searches a String for substrings delimited by a start and end tag, returning all matching substrings in an array.
<p>
A {@code null} input String returns {@code null}. A {@code null} open/close returns {@code null} (no match). An empty ("") open/close returns
{@code null} (no match).
</p>
<pre>
StringUtils.substringsBetween("[a][b][c]", "[", "]") = ["a","b","c"]
StringUtils.substringsBetween(null, *, *) = null
StringUtils.substringsBetween(*, null, *) = null
StringUtils.substringsBetween(*, *, null) = null
StringUtils.substringsBetween("", "[", "]") = []
</pre>
@param str the String containing the substrings, null returns null, empty returns empty.
@param open the String identifying the start of the substring, empty returns null.
@param close the String identifying the end of the substring, empty returns null.
@return a String Array of substrings, or {@code null} if no match.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 8,543
|
[
"str",
"open",
"close"
] | true
| 9
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
validIndex
|
public static <T> T[] validIndex(final T[] array, final int index, final String message, final Object... values) {
Objects.requireNonNull(array, "array");
if (index < 0 || index >= array.length) {
throw new IndexOutOfBoundsException(getMessage(message, values));
}
return array;
}
|
Validates that the index is within the bounds of the argument
array; otherwise throwing an exception with the specified message.
<pre>Validate.validIndex(myArray, 2, "The array index is invalid: ");</pre>
<p>If the array is {@code null}, then the message of the exception
is "The validated object is null".</p>
@param <T> the array type.
@param array the array to check, validated not null by this method.
@param index the index to check.
@param message the {@link String#format(String, Object...)} exception message if invalid, not null.
@param values the optional values for the formatted exception message, null array not recommended.
@return the validated array (never {@code null} for method chaining).
@throws NullPointerException if the array is {@code null}.
@throws IndexOutOfBoundsException if the index is invalid.
@see #validIndex(Object[], int)
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 1,221
|
[
"array",
"index",
"message"
] | true
| 3
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
appendQuotedString
|
private StringBuilder appendQuotedString(final String pattern, final ParsePosition pos,
final StringBuilder appendTo) {
assert pattern.toCharArray()[pos.getIndex()] == QUOTE :
"Quoted string must start with quote character";
// handle quote character at the beginning of the string
if (appendTo != null) {
appendTo.append(QUOTE);
}
next(pos);
final int start = pos.getIndex();
final char[] c = pattern.toCharArray();
for (int i = pos.getIndex(); i < pattern.length(); i++) {
if (c[pos.getIndex()] == QUOTE) {
next(pos);
return appendTo == null ? null : appendTo.append(c, start,
pos.getIndex() - start);
}
next(pos);
}
throw new IllegalArgumentException(
"Unterminated quoted string at position " + start);
}
|
Consume a quoted string, adding it to {@code appendTo} if
specified.
@param pattern pattern to parse
@param pos current parse position
@param appendTo optional StringBuilder to append
@return {@code appendTo}
|
java
|
src/main/java/org/apache/commons/lang3/text/ExtendedMessageFormat.java
| 148
|
[
"pattern",
"pos",
"appendTo"
] |
StringBuilder
| true
| 5
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
parse
|
public static Duration parse(String value, DurationFormat.Style style) {
return parse(value, style, null);
}
|
Parse the given value to a duration.
@param value the value to parse
@param style the style in which to parse
@return a duration
|
java
|
spring-context/src/main/java/org/springframework/format/datetime/standard/DurationFormatterUtils.java
| 84
|
[
"value",
"style"
] |
Duration
| true
| 1
| 6.96
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
applyRules
|
@Deprecated
protected StringBuffer applyRules(final Calendar calendar, final StringBuffer buf) {
return (StringBuffer) applyRules(calendar, (Appendable) buf);
}
|
Performs the formatting by applying the rules to the
specified calendar.
@param calendar the calendar to format.
@param buf the buffer to format into.
@return the specified string buffer.
@deprecated Use {@link #format(Calendar)} or {@link #format(Calendar, Appendable)}
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDatePrinter.java
| 1,086
|
[
"calendar",
"buf"
] |
StringBuffer
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
close
|
@SuppressWarnings("IdentifierName")
public static void close(@Nullable Closeable closeable, boolean swallowIOException)
throws IOException {
if (closeable == null) {
return;
}
try {
closeable.close();
} catch (IOException e) {
if (swallowIOException) {
logger.log(Level.WARNING, "IOException thrown while closing Closeable.", e);
} else {
throw e;
}
}
}
|
Closes a {@link Closeable}, with control over whether an {@code IOException} may be thrown.
This is primarily useful in a finally block, where a thrown exception needs to be logged but
not propagated (otherwise the original exception will be lost).
<p>If {@code swallowIOException} is true then we never throw {@code IOException} but merely log
it.
<p>Example:
{@snippet :
public void useStreamNicely() throws IOException {
SomeStream stream = new SomeStream("foo");
boolean threw = true;
try {
// ... code which does something with the stream ...
threw = false;
} finally {
// If an exception occurs, rethrow it only if threw==false:
Closeables.close(stream, threw);
}
}
}
@param closeable the {@code Closeable} object to be closed, or null, in which case this method
does nothing
@param swallowIOException if true, don't propagate IO exceptions thrown by the {@code close}
methods
@throws IOException if {@code swallowIOException} is false and {@code close} throws an {@code
IOException}.
|
java
|
android/guava/src/com/google/common/io/Closeables.java
| 81
|
[
"closeable",
"swallowIOException"
] |
void
| true
| 4
| 8.32
|
google/guava
| 51,352
|
javadoc
| false
|
afterPropertiesSet
|
@Override
public final void afterPropertiesSet() throws IOException {
if (this.singleton) {
this.singletonInstance = createProperties();
}
}
|
Set whether a shared 'singleton' Properties instance should be
created, or rather a new Properties instance on each request.
<p>Default is "true" (a shared singleton).
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/PropertiesFactoryBean.java
| 70
|
[] |
void
| true
| 2
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
count
|
def count(self) -> int:
"""
Return number of non-NA/null observations in the Series.
Returns
-------
int
Number of non-null values in the Series.
See Also
--------
DataFrame.count : Count non-NA cells for each column or row.
Examples
--------
>>> s = pd.Series([0.0, 1.0, np.nan])
>>> s.count()
2
"""
return maybe_unbox_numpy_scalar(notna(self._values).sum().astype("int64"))
|
Return number of non-NA/null observations in the Series.
Returns
-------
int
Number of non-null values in the Series.
See Also
--------
DataFrame.count : Count non-NA cells for each column or row.
Examples
--------
>>> s = pd.Series([0.0, 1.0, np.nan])
>>> s.count()
2
|
python
|
pandas/core/series.py
| 2,064
|
[
"self"
] |
int
| true
| 1
| 6.08
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
first
|
def first(
self,
numeric_only: bool = False,
min_count: int = 0,
skipna: bool = True,
):
"""
Compute the first non-null entry of each column.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
skipna : bool, default True
Exclude NA/null values. If an entire group is NA, the result will be NA.
Returns
-------
Series or DataFrame
First values within each group.
See Also
--------
core.resample.Resampler.last : Compute the last non-null value in each group.
core.resample.Resampler.mean : Compute mean of groups, excluding missing values.
Examples
--------
>>> s = pd.Series(
... [1, 2, 3, 4],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
... ),
... )
>>> s
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
>>> s.resample("MS").first()
2023-01-01 1
2023-02-01 3
Freq: MS, dtype: int64
"""
return self._downsample(
"first", numeric_only=numeric_only, min_count=min_count, skipna=skipna
)
|
Compute the first non-null entry of each column.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
skipna : bool, default True
Exclude NA/null values. If an entire group is NA, the result will be NA.
Returns
-------
Series or DataFrame
First values within each group.
See Also
--------
core.resample.Resampler.last : Compute the last non-null value in each group.
core.resample.Resampler.mean : Compute mean of groups, excluding missing values.
Examples
--------
>>> s = pd.Series(
... [1, 2, 3, 4],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
... ),
... )
>>> s
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
>>> s.resample("MS").first()
2023-01-01 1
2023-02-01 3
Freq: MS, dtype: int64
|
python
|
pandas/core/resample.py
| 1,323
|
[
"self",
"numeric_only",
"min_count",
"skipna"
] | true
| 1
| 7.28
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
complete
|
@Override
public boolean complete(T value) {
throw erroneousCompletionException();
}
|
Completes this future exceptionally. For internal use by the Kafka clients, not by user code.
@param throwable the exception.
@return {@code true} if this invocation caused this CompletableFuture
to transition to a completed state, else {@code false}
|
java
|
clients/src/main/java/org/apache/kafka/common/internals/KafkaCompletableFuture.java
| 52
|
[
"value"
] | true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
generateKey
|
protected Object generateKey(CacheOperationInvocationContext<O> context) {
KeyGenerator keyGenerator = context.getOperation().getKeyGenerator();
Object key = keyGenerator.generate(context.getTarget(), context.getMethod(), context.getArgs());
if (logger.isTraceEnabled()) {
logger.trace("Computed cache key " + key + " for operation " + context.getOperation());
}
return key;
}
|
Generate a key for the specified invocation.
@param context the context of the invocation
@return the key to use
|
java
|
spring-context-support/src/main/java/org/springframework/cache/jcache/interceptor/AbstractKeyCacheInterceptor.java
| 49
|
[
"context"
] |
Object
| true
| 2
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
start
|
def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
This property returns the starting value of the `RangeIndex`. If the `start`
value is not explicitly provided during the creation of the `RangeIndex`,
it defaults to 0.
See Also
--------
RangeIndex : Immutable index implementing a range-based index.
RangeIndex.stop : Returns the stop value of the `RangeIndex`.
RangeIndex.step : Returns the step value of the `RangeIndex`.
Examples
--------
>>> idx = pd.RangeIndex(5)
>>> idx.start
0
>>> idx = pd.RangeIndex(2, -10, -3)
>>> idx.start
2
"""
# GH 25710
return self._range.start
|
The value of the `start` parameter (``0`` if this was not supplied).
This property returns the starting value of the `RangeIndex`. If the `start`
value is not explicitly provided during the creation of the `RangeIndex`,
it defaults to 0.
See Also
--------
RangeIndex : Immutable index implementing a range-based index.
RangeIndex.stop : Returns the stop value of the `RangeIndex`.
RangeIndex.step : Returns the step value of the `RangeIndex`.
Examples
--------
>>> idx = pd.RangeIndex(5)
>>> idx.start
0
>>> idx = pd.RangeIndex(2, -10, -3)
>>> idx.start
2
|
python
|
pandas/core/indexes/range.py
| 316
|
[
"self"
] |
int
| true
| 1
| 7.44
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
splitPreserveAllTokens
|
public static String[] splitPreserveAllTokens(final String str, final char separatorChar) {
return splitWorker(str, separatorChar, true);
}
|
Splits the provided text into an array, separator specified, preserving all tokens, including empty tokens created by adjacent separators. This is an
alternative to using StringTokenizer.
<p>
The separator is not included in the returned String array. Adjacent separators are treated as separators for empty tokens. For more control over the
split use the StrTokenizer class.
</p>
<p>
A {@code null} input String returns {@code null}.
</p>
<pre>
StringUtils.splitPreserveAllTokens(null, *) = null
StringUtils.splitPreserveAllTokens("", *) = []
StringUtils.splitPreserveAllTokens("a.b.c", '.') = ["a", "b", "c"]
StringUtils.splitPreserveAllTokens("a..b.c", '.') = ["a", "", "b", "c"]
StringUtils.splitPreserveAllTokens("a:b:c", '.') = ["a:b:c"]
StringUtils.splitPreserveAllTokens("a\tb\nc", null) = ["a", "b", "c"]
StringUtils.splitPreserveAllTokens("a b c", ' ') = ["a", "b", "c"]
StringUtils.splitPreserveAllTokens("a b c ", ' ') = ["a", "b", "c", ""]
StringUtils.splitPreserveAllTokens("a b c ", ' ') = ["a", "b", "c", "", ""]
StringUtils.splitPreserveAllTokens(" a b c", ' ') = ["", "a", "b", "c"]
StringUtils.splitPreserveAllTokens(" a b c", ' ') = ["", "", "a", "b", "c"]
StringUtils.splitPreserveAllTokens(" a b c ", ' ') = ["", "a", "b", "c", ""]
</pre>
@param str the String to parse, may be {@code null}.
@param separatorChar the character used as the delimiter, {@code null} splits on whitespace.
@return an array of parsed Strings, {@code null} if null String input.
@since 2.1
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 7,472
|
[
"str",
"separatorChar"
] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
retainsBufferReference
|
private static boolean retainsBufferReference(Schema schema) {
final AtomicBoolean hasBuffer = new AtomicBoolean(false);
Schema.Visitor detector = new Schema.Visitor() {
@Override
public void visit(Type field) {
// avoid BooleanExpressionComplexity checkstyle warning
boolean isBytesType = field == BYTES || field == NULLABLE_BYTES ||
field == COMPACT_BYTES || field == COMPACT_NULLABLE_BYTES;
boolean isRecordsType = field == RECORDS || field == NULLABLE_RECORDS ||
field == COMPACT_RECORDS || field == COMPACT_NULLABLE_RECORDS;
if (isBytesType || isRecordsType) {
hasBuffer.set(true);
}
}
};
schema.walk(detector);
return hasBuffer.get();
}
|
To workaround a critical bug in librdkafka, the api versions response is inconsistent with the actual versions
supported by `produce` - this method handles that. It should be called in the context of the api response protocol
handling.
It should not be used by code generating protocol documentation - we keep that consistent with the actual versions
supported by `produce`.
See `PRODUCE_API_VERSIONS_RESPONSE_MIN_VERSION` for details.
|
java
|
clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java
| 342
|
[
"schema"
] | true
| 9
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
replaceFrom
|
public String replaceFrom(CharSequence sequence, CharSequence replacement) {
int replacementLen = replacement.length();
if (replacementLen == 0) {
return removeFrom(sequence);
}
if (replacementLen == 1) {
return replaceFrom(sequence, replacement.charAt(0));
}
String string = sequence.toString();
int pos = indexIn(string);
if (pos == -1) {
return string;
}
int len = string.length();
StringBuilder buf = new StringBuilder((len * 3 / 2) + 16);
int oldpos = 0;
do {
buf.append(string, oldpos, pos);
buf.append(replacement);
oldpos = pos + 1;
pos = indexIn(string, oldpos);
} while (pos != -1);
buf.append(string, oldpos, len);
return buf.toString();
}
|
Returns a string copy of the input character sequence, with each matching BMP character
replaced by a given replacement sequence. For example:
{@snippet :
CharMatcher.is('a').replaceFrom("yaha", "oo")
}
... returns {@code "yoohoo"}.
<p><b>Note:</b> If the replacement is a fixed string with only one character, you are better
off calling {@link #replaceFrom(CharSequence, char)} directly.
@param sequence the character sequence to replace matching characters in
@param replacement the characters to append to the result string in place of each matching
character in {@code sequence}
@return the new string
|
java
|
android/guava/src/com/google/common/base/CharMatcher.java
| 714
|
[
"sequence",
"replacement"
] |
String
| true
| 4
| 7.6
|
google/guava
| 51,352
|
javadoc
| false
|
endsWith
|
function endsWith(string, target, position) {
string = toString(string);
target = baseToString(target);
var length = string.length;
position = position === undefined
? length
: baseClamp(toInteger(position), 0, length);
var end = position;
position -= target.length;
return position >= 0 && string.slice(position, end) == target;
}
|
Checks if `string` ends with the given target string.
@static
@memberOf _
@since 3.0.0
@category String
@param {string} [string=''] The string to inspect.
@param {string} [target] The string to search for.
@param {number} [position=string.length] The position to search up to.
@returns {boolean} Returns `true` if `string` ends with `target`,
else `false`.
@example
_.endsWith('abc', 'c');
// => true
_.endsWith('abc', 'b');
// => false
_.endsWith('abc', 'b', 2);
// => true
|
javascript
|
lodash.js
| 14,316
|
[
"string",
"target",
"position"
] | false
| 3
| 7.52
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
requireNonEmpty
|
public static <T> T requireNonEmpty(final T obj, final String message) {
// check for null first to give the most precise exception.
Objects.requireNonNull(obj, message);
if (isEmpty(obj)) {
throw new IllegalArgumentException(message);
}
return obj;
}
|
Checks that the specified object reference is not {@code null} or empty per {@link #isEmpty(Object)}. Use this
method for validation, for example:
<pre>
public Foo(Bar bar) {
this.bar = Objects.requireNonEmpty(bar, "bar");
}
</pre>
@param <T> the type of the reference.
@param obj the object reference to check for nullity.
@param message the exception message.
@return {@code obj} if not {@code null}.
@throws NullPointerException if {@code obj} is {@code null}.
@throws IllegalArgumentException if {@code obj} is empty per {@link #isEmpty(Object)}.
@see #isEmpty(Object)
@since 3.12.0
|
java
|
src/main/java/org/apache/commons/lang3/ObjectUtils.java
| 1,209
|
[
"obj",
"message"
] |
T
| true
| 2
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
maybeTriggerWakeup
|
public void maybeTriggerWakeup() {
final AtomicBoolean throwWakeupException = new AtomicBoolean(false);
pendingTask.getAndUpdate(task -> {
if (task == null) {
return null;
} else if (task instanceof WakeupFuture) {
throwWakeupException.set(true);
return null;
} else {
return task;
}
});
if (throwWakeupException.get()) {
throw new WakeupException();
}
}
|
If there is no pending task, set the pending task active.
If wakeup was called before setting an active task, the current task will complete exceptionally with
WakeupException right away.
If there is an active task, throw exception.
@param currentTask
@param <T>
@return
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/WakeupTrigger.java
| 145
|
[] |
void
| true
| 4
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
map
|
def map(self, mapper, na_action: Literal["ignore"] | None = None) -> Self:
"""
Map categories using an input mapping or function.
Parameters
----------
mapper : dict, Series, callable
The correspondence from old values to new.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NA values, without passing them to the
mapping correspondence.
Returns
-------
SparseArray
The output array will have the same density as the input.
The output fill value will be the result of applying the
mapping to ``self.fill_value``
Examples
--------
>>> arr = pd.arrays.SparseArray([0, 1, 2])
>>> arr.map(lambda x: x + 10)
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.map({0: 10, 1: 11, 2: 12})
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2]))
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
"""
is_map = isinstance(mapper, (abc.Mapping, ABCSeries))
fill_val = self.fill_value
if na_action is None or notna(fill_val):
fill_val = mapper.get(fill_val, fill_val) if is_map else mapper(fill_val)
def func(sp_val):
new_sp_val = mapper.get(sp_val, None) if is_map else mapper(sp_val)
# check identity and equality because nans are not equal to each other
if new_sp_val is fill_val or new_sp_val == fill_val:
msg = "fill value in the sparse values not supported"
raise ValueError(msg)
return new_sp_val
sp_values = [func(x) for x in self.sp_values]
return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_val)
|
Map categories using an input mapping or function.
Parameters
----------
mapper : dict, Series, callable
The correspondence from old values to new.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NA values, without passing them to the
mapping correspondence.
Returns
-------
SparseArray
The output array will have the same density as the input.
The output fill value will be the result of applying the
mapping to ``self.fill_value``
Examples
--------
>>> arr = pd.arrays.SparseArray([0, 1, 2])
>>> arr.map(lambda x: x + 10)
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.map({0: 10, 1: 11, 2: 12})
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2]))
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
|
python
|
pandas/core/arrays/sparse/array.py
| 1,339
|
[
"self",
"mapper",
"na_action"
] |
Self
| true
| 7
| 8.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
listTransactions
|
@Override
public ListTransactionsResult listTransactions(ListTransactionsOptions options) {
AllBrokersStrategy.AllBrokersFuture<Collection<TransactionListing>> future =
ListTransactionsHandler.newFuture();
ListTransactionsHandler handler = new ListTransactionsHandler(options, logContext);
invokeDriver(handler, future, options.timeoutMs);
return new ListTransactionsResult(future.all());
}
|
Forcefully terminates an ongoing transaction for a given transactional ID.
<p>
This API is intended for well-formed but long-running transactions that are known to the
transaction coordinator. It is primarily designed for supporting 2PC (two-phase commit) workflows,
where a coordinator may need to unilaterally terminate a participant transaction that hasn't completed.
</p>
@param transactionalId The transactional ID whose active transaction should be forcefully terminated.
@return a {@link TerminateTransactionResult} that can be used to await the operation result.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 4,865
|
[
"options"
] |
ListTransactionsResult
| true
| 1
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
isNested
|
boolean isNested(MetadataGenerationEnvironment environment) {
Element typeElement = environment.getTypeUtils().asElement(getType());
if (!(typeElement instanceof TypeElement) || typeElement.getKind() == ElementKind.ENUM
|| environment.getConfigurationPropertiesAnnotation(getGetter()) != null) {
return false;
}
if (isMarkedAsNested(environment)) {
return true;
}
return !isCyclePresent(typeElement, getDeclaringElement())
&& isParentTheSame(environment, typeElement, getDeclaringElement());
}
|
Return if this is a nested property.
@param environment the metadata generation environment
@return if the property is nested
@see #isMarkedAsNested(MetadataGenerationEnvironment)
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/PropertyDescriptor.java
| 126
|
[
"environment"
] | true
| 6
| 7.12
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
getIfElseOccurrences
|
function getIfElseOccurrences(ifStatement: IfStatement, sourceFile: SourceFile): HighlightSpan[] {
const keywords = getIfElseKeywords(ifStatement, sourceFile);
const result: HighlightSpan[] = [];
// We'd like to highlight else/ifs together if they are only separated by whitespace
// (i.e. the keywords are separated by no comments, no newlines).
for (let i = 0; i < keywords.length; i++) {
if (keywords[i].kind === SyntaxKind.ElseKeyword && i < keywords.length - 1) {
const elseKeyword = keywords[i];
const ifKeyword = keywords[i + 1]; // this *should* always be an 'if' keyword.
let shouldCombineElseAndIf = true;
// Avoid recalculating getStart() by iterating backwards.
for (let j = ifKeyword.getStart(sourceFile) - 1; j >= elseKeyword.end; j--) {
if (!isWhiteSpaceSingleLine(sourceFile.text.charCodeAt(j))) {
shouldCombineElseAndIf = false;
break;
}
}
if (shouldCombineElseAndIf) {
result.push({
fileName: sourceFile.fileName,
textSpan: createTextSpanFromBounds(elseKeyword.getStart(), ifKeyword.end),
kind: HighlightSpanKind.reference,
});
i++; // skip the next keyword
continue;
}
}
// Ordinary case: just highlight the keyword.
result.push(getHighlightSpanForNode(keywords[i], sourceFile));
}
return result;
}
|
For lack of a better name, this function takes a throw statement and returns the
nearest ancestor that is a try-block (whose try statement has a catch clause),
function-block, or source file.
|
typescript
|
src/services/documentHighlights.ts
| 523
|
[
"ifStatement",
"sourceFile"
] | true
| 7
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
contextLabels
|
Map<String, String> contextLabels();
|
Returns the labels for this metrics context.
@return the map of label keys and values; never null but possibly empty
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/MetricsContext.java
| 52
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
deprecate_nonkeyword_arguments
|
def deprecate_nonkeyword_arguments(
klass: type[PandasChangeWarning],
allowed_args: list[str] | None = None,
name: str | None = None,
) -> Callable[[F], F]:
"""
Decorator to deprecate a use of non-keyword arguments of a function.
Parameters
----------
klass : Warning
The warning class to use.
allowed_args : list, optional
In case of list, it must be the list of names of some
first arguments of the decorated functions that are
OK to be given as positional arguments. In case of None value,
defaults to list of all arguments not having the
default value.
name : str, optional
The specific name of the function to show in the warning
message. If None, then the Qualified name of the function
is used.
"""
def decorate(func):
old_sig = inspect.signature(func)
if allowed_args is not None:
allow_args = allowed_args
else:
allow_args = [
p.name
for p in old_sig.parameters.values()
if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD)
and p.default is p.empty
]
new_params = [
p.replace(kind=p.KEYWORD_ONLY)
if (
p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD)
and p.name not in allow_args
)
else p
for p in old_sig.parameters.values()
]
new_params.sort(key=lambda p: p.kind)
new_sig = old_sig.replace(parameters=new_params)
num_allow_args = len(allow_args)
msg = (
f"{future_version_msg(klass.version())} all arguments of "
f"{name or func.__qualname__}{{arguments}} will be keyword-only."
)
@wraps(func)
def wrapper(*args, **kwargs):
if len(args) > num_allow_args:
warnings.warn(
msg.format(arguments=_format_argument_list(allow_args)),
klass,
stacklevel=find_stack_level(),
)
return func(*args, **kwargs)
# error: "Callable[[VarArg(Any), KwArg(Any)], Any]" has no
# attribute "__signature__"
wrapper.__signature__ = new_sig # type: ignore[attr-defined]
return wrapper
return decorate
|
Decorator to deprecate a use of non-keyword arguments of a function.
Parameters
----------
klass : Warning
The warning class to use.
allowed_args : list, optional
In case of list, it must be the list of names of some
first arguments of the decorated functions that are
OK to be given as positional arguments. In case of None value,
defaults to list of all arguments not having the
default value.
name : str, optional
The specific name of the function to show in the warning
message. If None, then the Qualified name of the function
is used.
|
python
|
pandas/util/_decorators.py
| 273
|
[
"klass",
"allowed_args",
"name"
] |
Callable[[F], F]
| true
| 8
| 6.96
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
get_config
|
def get_config():
"""Retrieve the current scikit-learn configuration.
This reflects the effective global configurations as established by default upon
library import, or modified via :func:`set_config` or :func:`config_context`.
Returns
-------
config : dict
Keys are parameter names that can be passed to :func:`set_config`.
See Also
--------
config_context : Context manager for global scikit-learn configuration.
set_config : Set global scikit-learn configuration.
Examples
--------
>>> import sklearn
>>> config = sklearn.get_config()
>>> config.keys()
dict_keys([...])
"""
# Return a copy of the threadlocal configuration so that users will
# not be able to modify the configuration with the returned dict.
return _get_threadlocal_config().copy()
|
Retrieve the current scikit-learn configuration.
This reflects the effective global configurations as established by default upon
library import, or modified via :func:`set_config` or :func:`config_context`.
Returns
-------
config : dict
Keys are parameter names that can be passed to :func:`set_config`.
See Also
--------
config_context : Context manager for global scikit-learn configuration.
set_config : Set global scikit-learn configuration.
Examples
--------
>>> import sklearn
>>> config = sklearn.get_config()
>>> config.keys()
dict_keys([...])
|
python
|
sklearn/_config.py
| 35
|
[] | false
| 1
| 6
|
scikit-learn/scikit-learn
| 64,340
|
unknown
| false
|
|
maybeExpire
|
boolean maybeExpire() {
return numAttempts > 0 && isExpired();
}
|
Moves all the in-flight acknowledgements to incomplete acknowledgements to retry
in the next request.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 1,403
|
[] | true
| 2
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
mean
|
def mean(
self,
axis: Axis | None = 0,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
) -> Any:
"""
Return the mean of the values over the requested axis.
Parameters
----------
axis : {index (0)}
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to 0.
For DataFrames, specifying ``axis=None`` will apply the aggregation
across both axes.
.. versionadded:: 2.0.0
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
scalar or Series (if level specified)
Mean of the values for the requested axis.
See Also
--------
numpy.median : Equivalent numpy function for computing median.
Series.sum : Sum of the values.
Series.median : Median of the values.
Series.std : Standard deviation of the values.
Series.var : Variance of the values.
Series.min : Minimum value.
Series.max : Maximum value.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.mean()
2.0
"""
return NDFrame.mean(
self, axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs
)
|
Return the mean of the values over the requested axis.
Parameters
----------
axis : {index (0)}
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to 0.
For DataFrames, specifying ``axis=None`` will apply the aggregation
across both axes.
.. versionadded:: 2.0.0
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
scalar or Series (if level specified)
Mean of the values for the requested axis.
See Also
--------
numpy.median : Equivalent numpy function for computing median.
Series.sum : Sum of the values.
Series.median : Median of the values.
Series.std : Standard deviation of the values.
Series.var : Variance of the values.
Series.min : Minimum value.
Series.max : Maximum value.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.mean()
2.0
|
python
|
pandas/core/series.py
| 7,729
|
[
"self",
"axis",
"skipna",
"numeric_only"
] |
Any
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
generate_run_id
|
def generate_run_id(
self,
*,
run_type: DagRunType,
data_interval: DataInterval | None,
run_after: DateTime,
**extra,
) -> str:
"""
Generate Run ID based on Run Type, run_after and logical Date.
:param run_type: type of DagRun
:param data_interval: the data interval
:param run_after: the date before which dag run won't start.
"""
from airflow.models.dagrun import DagRun
logical_date = data_interval.start if data_interval is not None else run_after
return DagRun.generate_run_id(run_type=run_type, logical_date=logical_date, run_after=run_after)
|
Generate Run ID based on Run Type, run_after and logical Date.
:param run_type: type of DagRun
:param data_interval: the data interval
:param run_after: the date before which dag run won't start.
|
python
|
airflow-core/src/airflow/timetables/simple.py
| 208
|
[
"self",
"run_type",
"data_interval",
"run_after"
] |
str
| true
| 2
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
isIsatapAddress
|
public static boolean isIsatapAddress(Inet6Address ip) {
// If it's a Teredo address with the right port (41217, or 0xa101)
// which would be encoded as 0x5efe then it can't be an ISATAP address.
if (isTeredoAddress(ip)) {
return false;
}
byte[] bytes = ip.getAddress();
if ((bytes[8] | (byte) 0x03) != (byte) 0x03) {
// Verify that high byte of the 64 bit identifier is zero, modulo
// the U/L and G bits, with which we are not concerned.
return false;
}
return (bytes[9] == (byte) 0x00) && (bytes[10] == (byte) 0x5e) && (bytes[11] == (byte) 0xfe);
}
|
Evaluates whether the argument is an ISATAP address.
<p>From RFC 5214: "ISATAP interface identifiers are constructed in Modified EUI-64 format [...]
by concatenating the 24-bit IANA OUI (00-00-5E), the 8-bit hexadecimal value 0xFE, and a 32-bit
IPv4 address in network byte order [...]"
<p>For more on ISATAP addresses see section 6.1 of <a target="_parent"
href="http://tools.ietf.org/html/rfc5214#section-6.1">RFC 5214</a>.
@param ip {@link Inet6Address} to be examined for ISATAP address format
@return {@code true} if the argument is an ISATAP address
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 857
|
[
"ip"
] | true
| 5
| 7.6
|
google/guava
| 51,352
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.