function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
visitContinueStatement
|
function visitContinueStatement(node: ContinueStatement): Statement {
if (inStatementContainingYield) {
const label = findContinueTarget(node.label && idText(node.label));
if (label > 0) {
return createInlineBreak(label, /*location*/ node);
}
}
return visitEachChild(node, visitor, context);
}
|
Visits an ElementAccessExpression that contains a YieldExpression.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/generators.ts
| 1,753
|
[
"node"
] | true
| 4
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
_base_dir
|
def _base_dir(self) -> Path:
"""Get the base directory for cache storage.
Returns:
Path to the cache directory based on the default cache dir
and the specified subdirectory.
"""
from torch._inductor.runtime.runtime_utils import default_cache_dir
return Path(default_cache_dir(), "cache")
|
Get the base directory for cache storage.
Returns:
Path to the cache directory based on the default cache dir
and the specified subdirectory.
|
python
|
torch/_inductor/runtime/caching/implementations.py
| 200
|
[
"self"
] |
Path
| true
| 1
| 6.56
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
random
|
function random(lower, upper, floating) {
if (floating && typeof floating != 'boolean' && isIterateeCall(lower, upper, floating)) {
upper = floating = undefined;
}
if (floating === undefined) {
if (typeof upper == 'boolean') {
floating = upper;
upper = undefined;
}
else if (typeof lower == 'boolean') {
floating = lower;
lower = undefined;
}
}
if (lower === undefined && upper === undefined) {
lower = 0;
upper = 1;
}
else {
lower = toFinite(lower);
if (upper === undefined) {
upper = lower;
lower = 0;
} else {
upper = toFinite(upper);
}
}
if (lower > upper) {
var temp = lower;
lower = upper;
upper = temp;
}
if (floating || lower % 1 || upper % 1) {
var rand = nativeRandom();
return nativeMin(lower + (rand * (upper - lower + freeParseFloat('1e-' + ((rand + '').length - 1)))), upper);
}
return baseRandom(lower, upper);
}
|
Produces a random number between the inclusive `lower` and `upper` bounds.
If only one argument is provided a number between `0` and the given number
is returned. If `floating` is `true`, or either `lower` or `upper` are
floats, a floating-point number is returned instead of an integer.
**Note:** JavaScript follows the IEEE-754 standard for resolving
floating-point values which can produce unexpected results.
@static
@memberOf _
@since 0.7.0
@category Number
@param {number} [lower=0] The lower bound.
@param {number} [upper=1] The upper bound.
@param {boolean} [floating] Specify returning a floating-point number.
@returns {number} Returns the random number.
@example
_.random(0, 5);
// => an integer between 0 and 5
_.random(5);
// => also an integer between 0 and 5
_.random(5, true);
// => a floating-point number between 0 and 5
_.random(1.2, 5.2);
// => a floating-point number between 1.2 and 5.2
|
javascript
|
lodash.js
| 14,185
|
[
"lower",
"upper",
"floating"
] | false
| 17
| 6.32
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
voidSuccess
|
public static RequestFuture<Void> voidSuccess() {
RequestFuture<Void> future = new RequestFuture<>();
future.complete(null);
return future;
}
|
Convert from a request future of one type to another type
@param adapter The adapter which does the conversion
@param <S> The type of the future adapted to
@return The new future
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java
| 237
|
[] | true
| 1
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
printSubCommandList
|
private void printSubCommandList(Consumer<String> println) {
if (subcommands.isEmpty()) {
throw new IllegalStateException("No subcommands configured");
}
println.accept("Commands");
println.accept("--------");
for (Map.Entry<String, Command> subcommand : subcommands.entrySet()) {
println.accept(subcommand.getKey() + " - " + subcommand.getValue().description);
}
println.accept("");
}
|
Construct the multi-command with the specified command description and runnable to execute before main is invoked.
@param description the multi-command description
|
java
|
libs/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java
| 61
|
[
"println"
] |
void
| true
| 2
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
checkByteSize
|
private static void checkByteSize(MemorySegment a, MemorySegment b) {
if (a.byteSize() != b.byteSize()) {
throw new IllegalArgumentException("dimensions differ: " + a.byteSize() + "!=" + b.byteSize());
}
}
|
Computes the square distance of given float32 vectors.
@param a address of the first vector
@param b address of the second vector
@param elementCount the vector dimensions, number of float32 elements in the segment
|
java
|
libs/native/src/main/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java
| 285
|
[
"a",
"b"
] |
void
| true
| 2
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
terminate_job
|
def terminate_job(self, jobId: str, reason: str) -> dict:
"""
Terminate a Batch job.
:param jobId: a job ID to terminate
:param reason: a reason to terminate job ID
:return: an API response
"""
...
|
Terminate a Batch job.
:param jobId: a job ID to terminate
:param reason: a reason to terminate job ID
:return: an API response
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/batch_client.py
| 134
|
[
"self",
"jobId",
"reason"
] |
dict
| true
| 1
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
convert_json_field_to_pandas_type
|
def convert_json_field_to_pandas_type(field) -> str | CategoricalDtype:
"""
Converts a JSON field descriptor into its corresponding NumPy / pandas type
Parameters
----------
field
A JSON field descriptor
Returns
-------
dtype
Raises
------
ValueError
If the type of the provided field is unknown or currently unsupported
Examples
--------
>>> convert_json_field_to_pandas_type({"name": "an_int", "type": "integer"})
'int64'
>>> convert_json_field_to_pandas_type(
... {
... "name": "a_categorical",
... "type": "any",
... "constraints": {"enum": ["a", "b", "c"]},
... "ordered": True,
... }
... )
CategoricalDtype(categories=['a', 'b', 'c'], ordered=True, categories_dtype=str)
>>> convert_json_field_to_pandas_type({"name": "a_datetime", "type": "datetime"})
'datetime64[ns]'
>>> convert_json_field_to_pandas_type(
... {"name": "a_datetime_with_tz", "type": "datetime", "tz": "US/Central"}
... )
'datetime64[ns, US/Central]'
"""
typ = field["type"]
if typ == "string":
return field.get("extDtype", None)
elif typ == "integer":
return field.get("extDtype", "int64")
elif typ == "number":
return field.get("extDtype", "float64")
elif typ == "boolean":
return field.get("extDtype", "bool")
elif typ == "duration":
return "timedelta64"
elif typ == "datetime":
if field.get("tz"):
return f"datetime64[ns, {field['tz']}]"
elif field.get("freq"):
# GH#9586 rename frequency M to ME for offsets
offset = to_offset(field["freq"])
freq = PeriodDtype(offset)._freqstr
# GH#47747 using datetime over period to minimize the change surface
return f"period[{freq}]"
else:
return "datetime64[ns]"
elif typ == "any":
if "constraints" in field and "ordered" in field:
return CategoricalDtype(
categories=field["constraints"]["enum"], ordered=field["ordered"]
)
elif "extDtype" in field:
return registry.find(field["extDtype"])
else:
return "object"
raise ValueError(f"Unsupported or invalid field type: {typ}")
|
Converts a JSON field descriptor into its corresponding NumPy / pandas type
Parameters
----------
field
A JSON field descriptor
Returns
-------
dtype
Raises
------
ValueError
If the type of the provided field is unknown or currently unsupported
Examples
--------
>>> convert_json_field_to_pandas_type({"name": "an_int", "type": "integer"})
'int64'
>>> convert_json_field_to_pandas_type(
... {
... "name": "a_categorical",
... "type": "any",
... "constraints": {"enum": ["a", "b", "c"]},
... "ordered": True,
... }
... )
CategoricalDtype(categories=['a', 'b', 'c'], ordered=True, categories_dtype=str)
>>> convert_json_field_to_pandas_type({"name": "a_datetime", "type": "datetime"})
'datetime64[ns]'
>>> convert_json_field_to_pandas_type(
... {"name": "a_datetime_with_tz", "type": "datetime", "tz": "US/Central"}
... )
'datetime64[ns, US/Central]'
|
python
|
pandas/io/json/_table_schema.py
| 157
|
[
"field"
] |
str | CategoricalDtype
| true
| 15
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
indexOf
|
public static int indexOf(final float[] array, final float valueToFind, final int startIndex) {
if (isEmpty(array)) {
return INDEX_NOT_FOUND;
}
final boolean searchNaN = Float.isNaN(valueToFind);
for (int i = max0(startIndex); i < array.length; i++) {
final float element = array[i];
if (valueToFind == element || searchNaN && Float.isNaN(element)) {
return i;
}
}
return INDEX_NOT_FOUND;
}
|
Finds the index of the given value in the array starting at the given index.
<p>
This method returns {@link #INDEX_NOT_FOUND} ({@code -1}) for a {@code null} input array.
</p>
<p>
A negative startIndex is treated as zero. A startIndex larger than the array length will return {@link #INDEX_NOT_FOUND} ({@code -1}).
</p>
@param array the array to search for the object, may be {@code null}.
@param valueToFind the value to find.
@param startIndex the index to start searching.
@return the index of the value within the array, {@link #INDEX_NOT_FOUND} ({@code -1}) if not found or {@code null} array input.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 2,597
|
[
"array",
"valueToFind",
"startIndex"
] | true
| 6
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
headerNameToString
|
function headerNameToString (value) {
return typeof value === 'string'
? headerNameLowerCasedRecord[value] ?? value.toLowerCase()
: tree.lookup(value) ?? value.toString('latin1').toLowerCase()
}
|
Retrieves a header name and returns its lowercase value.
@param {string | Buffer} value Header name
@returns {string}
|
javascript
|
deps/undici/src/lib/core/util.js
| 400
|
[
"value"
] | false
| 2
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
visitFunctionDeclaration
|
function visitFunctionDeclaration(node: FunctionDeclaration): VisitResult<Statement> {
let parameters: NodeArray<ParameterDeclaration>;
const savedLexicalArgumentsBinding = lexicalArgumentsBinding;
lexicalArgumentsBinding = undefined;
const functionFlags = getFunctionFlags(node);
const updated = factory.updateFunctionDeclaration(
node,
visitNodes(node.modifiers, visitor, isModifierLike),
node.asteriskToken,
node.name,
/*typeParameters*/ undefined,
parameters = functionFlags & FunctionFlags.Async ?
transformAsyncFunctionParameterList(node) :
visitParameterList(node.parameters, visitor, context),
/*type*/ undefined,
functionFlags & FunctionFlags.Async ?
transformAsyncFunctionBody(node, parameters) :
visitFunctionBody(node.body, visitor, context),
);
lexicalArgumentsBinding = savedLexicalArgumentsBinding;
return updated;
}
|
Visits a FunctionDeclaration node.
This function will be called when one of the following conditions are met:
- The node is marked async
@param node The node to visit.
|
typescript
|
src/compiler/transformers/es2017.ts
| 493
|
[
"node"
] | true
| 3
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
reconstructErrorStack
|
function reconstructErrorStack(err, parentPath, parentSource) {
const errLine = StringPrototypeSplit(
StringPrototypeSlice(err.stack, StringPrototypeIndexOf(
err.stack, ' at ')), '\n', 1)[0];
const { 1: line, 2: col } =
RegExpPrototypeExec(/(\d+):(\d+)\)/, errLine) || [];
if (line && col) {
const srcLine = StringPrototypeSplit(parentSource, '\n', line)[line - 1];
const frame = `${parentPath}:${line}\n${srcLine}\n${StringPrototypeRepeat(' ', col - 1)}^\n`;
setArrowMessage(err, frame);
}
}
|
Get the source code of a module, using cached ones if it's cached. This is used
for TypeScript, JavaScript and JSON loading.
After this returns, mod[kFormat], mod[kModuleSource] and mod[kURL] will be set.
@param {Module} mod Module instance whose source is potentially already cached.
@param {string} filename Absolute path to the file of the module.
@returns {{source: string, format?: string}}
|
javascript
|
lib/internal/modules/cjs/loader.js
| 1,804
|
[
"err",
"parentPath",
"parentSource"
] | false
| 4
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
createSegment
|
Segment<K, V> createSegment(
int initialCapacity, long maxSegmentWeight, StatsCounter statsCounter) {
return new Segment<>(this, initialCapacity, maxSegmentWeight, statsCounter);
}
|
Returns the segment that should be used for a key with the given hash.
@param hash the hash code for the key
@return the segment
|
java
|
android/guava/src/com/google/common/cache/LocalCache.java
| 1,760
|
[
"initialCapacity",
"maxSegmentWeight",
"statsCounter"
] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
vector_norm
|
def vector_norm(x, /, *, axis=None, keepdims=False, ord=2):
"""
Computes the vector norm of a vector (or batch of vectors) ``x``.
This function is Array API compatible.
Parameters
----------
x : array_like
Input array.
axis : {None, int, 2-tuple of ints}, optional
If an integer, ``axis`` specifies the axis (dimension) along which
to compute vector norms. If an n-tuple, ``axis`` specifies the axes
(dimensions) along which to compute batched vector norms. If ``None``,
the vector norm must be computed over all array values (i.e.,
equivalent to computing the vector norm of a flattened array).
Default: ``None``.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in
the result as dimensions with size one. Default: False.
ord : {int, float, inf, -inf}, optional
The order of the norm. For details see the table under ``Notes``
in `numpy.linalg.norm`.
See Also
--------
numpy.linalg.norm : Generic norm function
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) + 1
>>> a
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> b = a.reshape((3, 3))
>>> b
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> LA.vector_norm(b)
16.881943016134134
>>> LA.vector_norm(b, ord=np.inf)
9.0
>>> LA.vector_norm(b, ord=-np.inf)
1.0
>>> LA.vector_norm(b, ord=0)
9.0
>>> LA.vector_norm(b, ord=1)
45.0
>>> LA.vector_norm(b, ord=-1)
0.3534857623790153
>>> LA.vector_norm(b, ord=2)
16.881943016134134
>>> LA.vector_norm(b, ord=-2)
0.8058837395885292
"""
x = asanyarray(x)
shape = list(x.shape)
if axis is None:
# Note: np.linalg.norm() doesn't handle 0-D arrays
x = x.ravel()
_axis = 0
elif isinstance(axis, tuple):
# Note: The axis argument supports any number of axes, whereas
# np.linalg.norm() only supports a single axis for vector norm.
normalized_axis = normalize_axis_tuple(axis, x.ndim)
rest = tuple(i for i in range(x.ndim) if i not in normalized_axis)
newshape = axis + rest
x = _core_transpose(x, newshape).reshape(
(
prod([x.shape[i] for i in axis], dtype=int),
*[x.shape[i] for i in rest]
)
)
_axis = 0
else:
_axis = axis
res = norm(x, axis=_axis, ord=ord)
if keepdims:
# We can't reuse np.linalg.norm(keepdims) because of the reshape hacks
# above to avoid matrix norm logic.
_axis = normalize_axis_tuple(
range(len(shape)) if axis is None else axis, len(shape)
)
for i in _axis:
shape[i] = 1
res = res.reshape(tuple(shape))
return res
|
Computes the vector norm of a vector (or batch of vectors) ``x``.
This function is Array API compatible.
Parameters
----------
x : array_like
Input array.
axis : {None, int, 2-tuple of ints}, optional
If an integer, ``axis`` specifies the axis (dimension) along which
to compute vector norms. If an n-tuple, ``axis`` specifies the axes
(dimensions) along which to compute batched vector norms. If ``None``,
the vector norm must be computed over all array values (i.e.,
equivalent to computing the vector norm of a flattened array).
Default: ``None``.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in
the result as dimensions with size one. Default: False.
ord : {int, float, inf, -inf}, optional
The order of the norm. For details see the table under ``Notes``
in `numpy.linalg.norm`.
See Also
--------
numpy.linalg.norm : Generic norm function
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) + 1
>>> a
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> b = a.reshape((3, 3))
>>> b
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> LA.vector_norm(b)
16.881943016134134
>>> LA.vector_norm(b, ord=np.inf)
9.0
>>> LA.vector_norm(b, ord=-np.inf)
1.0
>>> LA.vector_norm(b, ord=0)
9.0
>>> LA.vector_norm(b, ord=1)
45.0
>>> LA.vector_norm(b, ord=-1)
0.3534857623790153
>>> LA.vector_norm(b, ord=2)
16.881943016134134
>>> LA.vector_norm(b, ord=-2)
0.8058837395885292
|
python
|
numpy/linalg/_linalg.py
| 3,503
|
[
"x",
"axis",
"keepdims",
"ord"
] | false
| 7
| 7.76
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
read
|
@Override
public int read(ByteBuffer dst, long pos) throws IOException {
if (pos < 0 || pos >= this.size) {
return -1;
}
int lastReadPart = this.lastReadPart;
int partIndex = 0;
long offset = 0;
int result = 0;
if (pos >= this.offsets[lastReadPart]) {
partIndex = lastReadPart;
offset = this.offsets[lastReadPart];
}
while (partIndex < this.parts.length) {
DataBlock part = this.parts[partIndex];
while (pos >= offset && pos < offset + part.size()) {
int count = part.read(dst, pos - offset);
result += Math.max(count, 0);
if (count <= 0 || !dst.hasRemaining()) {
this.lastReadPart = partIndex;
return result;
}
pos += count;
}
offset += part.size();
partIndex++;
}
return result;
}
|
Set the parts that make up the virtual data block.
@param parts the data block parts
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/VirtualDataBlock.java
| 78
|
[
"dst",
"pos"
] | true
| 9
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
writeToString
|
default String writeToString(@Nullable T instance) {
return write(instance).toJsonString();
}
|
Write the given instance to a JSON string.
@param instance the instance to write (may be {@code null})
@return the JSON string
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 95
|
[
"instance"
] |
String
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getSignature
|
@Override
public Signature getSignature() {
if (this.signature == null) {
this.signature = new MethodSignatureImpl();
}
return this.signature;
}
|
Returns the Spring AOP target. May be {@code null} if there is no target.
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/MethodInvocationProceedingJoinPoint.java
| 122
|
[] |
Signature
| true
| 2
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
detectIfAnsiCapable
|
private static boolean detectIfAnsiCapable() {
try {
if (Boolean.FALSE.equals(consoleAvailable)) {
return false;
}
if (consoleAvailable == null) {
Console console = System.console();
if (console == null) {
return false;
}
Method isTerminalMethod = ClassUtils.getMethodIfAvailable(Console.class, "isTerminal");
if (isTerminalMethod != null) {
Boolean isTerminal = (Boolean) isTerminalMethod.invoke(console);
if (Boolean.FALSE.equals(isTerminal)) {
return false;
}
}
}
return !(OPERATING_SYSTEM_NAME.contains("win"));
}
catch (Throwable ex) {
return false;
}
}
|
Create a new ANSI string from the specified elements. Any {@link AnsiElement}s will
be encoded as required.
@param elements the elements to encode
@return a string of the encoded elements
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ansi/AnsiOutput.java
| 156
|
[] | true
| 7
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
freshTarget
|
protected abstract Object freshTarget();
|
Obtain a fresh target object.
<p>Only invoked if a refresh check has found that a refresh is required
(that is, {@link #requiresRefresh()} has returned {@code true}).
@return the fresh target object
|
java
|
spring-aop/src/main/java/org/springframework/aop/target/dynamic/AbstractRefreshableTargetSource.java
| 143
|
[] |
Object
| true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
close
|
@Override
public void close() {
if (resultAlreadyReturned == false) {
Releasables.close(result);
}
}
|
Sets the given bucket of the negative buckets. If the bucket already exists, it will be replaced.
Buckets may be set in arbitrary order. However, for best performance and minimal allocations,
buckets should be set in order of increasing index and all negative buckets should be set before positive buckets.
@param index the index of the bucket
@param count the count of the bucket, must be at least 1
@return the builder
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramBuilder.java
| 283
|
[] |
void
| true
| 2
| 8.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
empty
|
def empty(self) -> bool:
"""
Indicator whether Series/DataFrame is empty.
True if Series/DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If Series/DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna : Return series without null values.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
Notes
-----
If Series/DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({"A": []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({"A": [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
>>> ser_empty = pd.Series({"A": []})
>>> ser_empty
A []
dtype: object
>>> ser_empty.empty
False
>>> ser_empty = pd.Series()
>>> ser_empty.empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
|
Indicator whether Series/DataFrame is empty.
True if Series/DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If Series/DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna : Return series without null values.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
Notes
-----
If Series/DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({"A": []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({"A": [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
>>> ser_empty = pd.Series({"A": []})
>>> ser_empty
A []
dtype: object
>>> ser_empty.empty
False
>>> ser_empty = pd.Series()
>>> ser_empty.empty
True
|
python
|
pandas/core/generic.py
| 1,964
|
[
"self"
] |
bool
| true
| 1
| 7.28
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
throttleDelayMs
|
public long throttleDelayMs(Node node, long now) {
return connectionStates.throttleDelayMs(node.idString(), now);
}
|
Returns the number of milliseconds to wait, based on the connection state, before attempting to send data. When
disconnected, this respects the reconnect backoff time. When connecting or connected, this handles slow/stalled
connections.
@param node The node to check
@param now The current timestamp
@return The number of milliseconds to wait.
|
java
|
clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
| 471
|
[
"node",
"now"
] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
items
|
public ConditionMessage items(@Nullable Collection<?> items) {
return items(Style.NORMAL, items);
}
|
Indicate the items. For example
{@code didNotFind("bean", "beans").items(Collections.singleton("x")} results in
the message "did not find bean x".
@param items the source of the items (may be {@code null})
@return a built {@link ConditionMessage}
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionMessage.java
| 371
|
[
"items"
] |
ConditionMessage
| true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
scheduleCleanUp
|
function scheduleCleanUp() {
if (cleanUpIsScheduled === false && size > LIMIT) {
// The cache size exceeds the limit. Schedule a callback to delete the
// least recently used entries.
cleanUpIsScheduled = true;
scheduleCallback(IdlePriority, cleanUp);
}
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-cache/src/LRU.js
| 42
|
[] | false
| 3
| 6.4
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
_critical_section_enqueue_task_instances
|
def _critical_section_enqueue_task_instances(self, session: Session) -> int:
"""
Enqueues TaskInstances for execution.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do not exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
HA note: This function is a "critical section" meaning that only a single scheduler process can
execute this function at the same time. This is achieved by doing
``SELECT ... from pool FOR UPDATE``. For DBs that support NOWAIT, a "blocked" scheduler will skip
this and continue on with other tasks (creating new DAG runs, progressing TIs from None to SCHEDULED
etc.); DBs that don't support this (such as MariaDB or MySQL 5.x) the other schedulers will wait for
the lock before continuing.
:param session:
:return: Number of task instance with state changed.
"""
# The user can either request a certain number of tis to schedule per main scheduler loop (default
# is non-zero). If that value has been set to zero, that means use the value of core.parallelism (or
# however many free slots are left). core.parallelism represents the max number of running TIs per
# scheduler. Historically this value was stored in the executor, who's job it was to control/enforce
# it. However, with multiple executors, any of which can run up to core.parallelism TIs individually,
# we need to make sure in the scheduler now that we don't schedule more than core.parallelism totally
# across all executors.
num_occupied_slots = sum([executor.slots_occupied for executor in self.job.executors])
parallelism = conf.getint("core", "parallelism")
if self.job.max_tis_per_query == 0:
max_tis = parallelism - num_occupied_slots
else:
max_tis = min(self.job.max_tis_per_query, parallelism - num_occupied_slots)
if max_tis <= 0:
self.log.debug("max_tis query size is less than or equal to zero. No query will be performed!")
return 0
queued_tis = self._executable_task_instances_to_queued(max_tis, session=session)
# Sort queued TIs to their respective executor
executor_to_queued_tis = self._executor_to_tis(queued_tis, session)
for executor, queued_tis_per_executor in executor_to_queued_tis.items():
self.log.info(
"Trying to enqueue tasks: %s for executor: %s",
queued_tis_per_executor,
executor,
)
self._enqueue_task_instances_with_queued_state(queued_tis_per_executor, executor, session=session)
return len(queued_tis)
|
Enqueues TaskInstances for execution.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do not exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
HA note: This function is a "critical section" meaning that only a single scheduler process can
execute this function at the same time. This is achieved by doing
``SELECT ... from pool FOR UPDATE``. For DBs that support NOWAIT, a "blocked" scheduler will skip
this and continue on with other tasks (creating new DAG runs, progressing TIs from None to SCHEDULED
etc.); DBs that don't support this (such as MariaDB or MySQL 5.x) the other schedulers will wait for
the lock before continuing.
:param session:
:return: Number of task instance with state changed.
|
python
|
airflow-core/src/airflow/jobs/scheduler_job_runner.py
| 826
|
[
"self",
"session"
] |
int
| true
| 5
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
getArrayComponentType
|
public static Type getArrayComponentType(final Type type) {
if (type instanceof Class<?>) {
final Class<?> cls = (Class<?>) type;
return cls.isArray() ? cls.getComponentType() : null;
}
if (type instanceof GenericArrayType) {
return ((GenericArrayType) type).getGenericComponentType();
}
return null;
}
|
Gets the array component type of {@code type}.
@param type the type to be checked.
@return component type or null if type is not an array type.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 593
|
[
"type"
] |
Type
| true
| 4
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
endsWith
|
public boolean endsWith(final String str) {
if (str == null) {
return false;
}
final int len = str.length();
if (len == 0) {
return true;
}
if (len > size) {
return false;
}
int pos = size - len;
for (int i = 0; i < len; i++, pos++) {
if (buffer[pos] != str.charAt(i)) {
return false;
}
}
return true;
}
|
Checks whether this builder ends with the specified string.
<p>
Note that this method handles null input quietly, unlike String.
</p>
@param str the string to search for, null returns false
@return true if the builder ends with the string
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,815
|
[
"str"
] | true
| 6
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
arraycopy
|
public static <T> T arraycopy(final T source, final int sourcePos, final int destPos, final int length, final Function<Integer, T> allocator) {
return arraycopy(source, sourcePos, allocator.apply(length), destPos, length);
}
|
A fluent version of {@link System#arraycopy(Object, int, Object, int, int)} that returns the destination array.
@param <T> the type.
@param source the source array.
@param sourcePos starting position in the source array.
@param destPos starting position in the destination data.
@param length the number of array elements to be copied.
@param allocator allocates the array to populate and return.
@return dest
@throws IndexOutOfBoundsException if copying would cause access of data outside array bounds.
@throws ArrayStoreException if an element in the {@code src} array could not be stored into the {@code dest} array because of a type
mismatch.
@throws NullPointerException if either {@code src} or {@code dest} is {@code null}.
@since 3.15.0
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 1,399
|
[
"source",
"sourcePos",
"destPos",
"length",
"allocator"
] |
T
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
maximumSize
|
@CanIgnoreReturnValue
public CacheBuilder<K, V> maximumSize(long maximumSize) {
checkState(
this.maximumSize == UNSET_INT, "maximum size was already set to %s", this.maximumSize);
checkState(
this.maximumWeight == UNSET_INT,
"maximum weight was already set to %s",
this.maximumWeight);
checkState(this.weigher == null, "maximum size can not be combined with weigher");
checkArgument(maximumSize >= 0, "maximum size must not be negative");
this.maximumSize = maximumSize;
return this;
}
|
Specifies the maximum number of entries the cache may contain.
<p>Note that the cache <b>may evict an entry before this limit is exceeded</b>. For example, in
the current implementation, when {@code concurrencyLevel} is greater than {@code 1}, each
resulting segment inside the cache <i>independently</i> limits its own size to approximately
{@code maximumSize / concurrencyLevel}.
<p>When eviction is necessary, the cache evicts entries that are less likely to be used again.
For example, the cache may evict an entry because it hasn't been used recently or very often.
<p>If {@code maximumSize} is zero, elements will be evicted immediately after being loaded into
cache. This can be useful in testing, or to disable caching temporarily.
<p>This feature cannot be used in conjunction with {@link #maximumWeight}.
@param maximumSize the maximum size of the cache
@return this {@code CacheBuilder} instance (for chaining)
@throws IllegalArgumentException if {@code maximumSize} is negative
@throws IllegalStateException if a maximum size or weight was already set
|
java
|
android/guava/src/com/google/common/cache/CacheBuilder.java
| 494
|
[
"maximumSize"
] | true
| 1
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
skip_if_no
|
def skip_if_no(package: str, min_version: str | None = None) -> pytest.MarkDecorator:
"""
Generic function to help skip tests when required packages are not
present on the testing system.
This function returns a pytest mark with a skip condition that will be
evaluated during test collection. An attempt will be made to import the
specified ``package`` and optionally ensure it meets the ``min_version``
The mark can be used as either a decorator for a test class or to be
applied to parameters in pytest.mark.parametrize calls or parametrized
fixtures. Use pytest.importorskip if an imported moduled is later needed
or for test functions.
If the import and version check are unsuccessful, then the test function
(or test case when used in conjunction with parametrization) will be
skipped.
Parameters
----------
package: str
The name of the required package.
min_version: str or None, default None
Optional minimum version of the package.
Returns
-------
pytest.MarkDecorator
a pytest.mark.skipif to use as either a test decorator or a
parametrization mark.
"""
msg = f"Could not import '{package}'"
if min_version:
msg += f" satisfying a min_version of {min_version}"
return pytest.mark.skipif(
not bool(
import_optional_dependency(
package, errors="ignore", min_version=min_version
)
),
reason=msg,
)
|
Generic function to help skip tests when required packages are not
present on the testing system.
This function returns a pytest mark with a skip condition that will be
evaluated during test collection. An attempt will be made to import the
specified ``package`` and optionally ensure it meets the ``min_version``
The mark can be used as either a decorator for a test class or to be
applied to parameters in pytest.mark.parametrize calls or parametrized
fixtures. Use pytest.importorskip if an imported moduled is later needed
or for test functions.
If the import and version check are unsuccessful, then the test function
(or test case when used in conjunction with parametrization) will be
skipped.
Parameters
----------
package: str
The name of the required package.
min_version: str or None, default None
Optional minimum version of the package.
Returns
-------
pytest.MarkDecorator
a pytest.mark.skipif to use as either a test decorator or a
parametrization mark.
|
python
|
pandas/util/_test_decorators.py
| 67
|
[
"package",
"min_version"
] |
pytest.MarkDecorator
| true
| 2
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
toInt
|
public static int toInt(final String str, final int defaultValue) {
try {
return Integer.parseInt(str);
} catch (final RuntimeException e) {
return defaultValue;
}
}
|
Converts a {@link String} to an {@code int}, returning a default value if the conversion fails.
<p>
If the string is {@code null}, the default value is returned.
</p>
<pre>
NumberUtils.toInt(null, 1) = 1
NumberUtils.toInt("", 1) = 1
NumberUtils.toInt("1", 0) = 1
</pre>
@param str the string to convert, may be null.
@param defaultValue the default value.
@return the int represented by the string, or the default if conversion fails.
@since 2.1
|
java
|
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
| 1,562
|
[
"str",
"defaultValue"
] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
asMapOfRanges
|
@Override
public ImmutableMap<Range<K>, V> asMapOfRanges() {
if (ranges.isEmpty()) {
return ImmutableMap.of();
}
RegularImmutableSortedSet<Range<K>> rangeSet =
new RegularImmutableSortedSet<>(ranges, rangeLexOrdering());
return new ImmutableSortedMap<>(rangeSet, values);
}
|
Guaranteed to throw an exception and leave the {@code RangeMap} unmodified.
@throws UnsupportedOperationException always
@deprecated Unsupported operation.
@since 28.1
|
java
|
guava/src/com/google/common/collect/ImmutableRangeMap.java
| 308
|
[] | true
| 2
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
sensor
|
public synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor... parents) {
return this.sensor(name, config, inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel.INFO, parents);
}
|
Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will
receive every value recorded with this sensor. This uses a default recording level of INFO.
@param name The name of the sensor
@param config A default configuration to use for this sensor for metrics that don't have their own config
@param inactiveSensorExpirationTimeSeconds If no value is recorded on the Sensor for this duration of time,
it is eligible for removal
@param parents The parent sensors
@return The sensor that is created
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java
| 427
|
[
"name",
"config",
"inactiveSensorExpirationTimeSeconds"
] |
Sensor
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
permission_denied
|
def permission_denied(request, exception, template_name=ERROR_403_TEMPLATE_NAME):
"""
Permission denied (403) handler.
Templates: :template:`403.html`
Context:
exception
The message from the exception which triggered the 403 (if one was
supplied).
If the template does not exist, an Http403 response containing the text
"403 Forbidden" (as per RFC 9110 Section 15.5.4) will be returned.
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name != ERROR_403_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
return HttpResponseForbidden(
ERROR_PAGE_TEMPLATE % {"title": "403 Forbidden", "details": ""},
)
return HttpResponseForbidden(
template.render(request=request, context={"exception": str(exception)})
)
|
Permission denied (403) handler.
Templates: :template:`403.html`
Context:
exception
The message from the exception which triggered the 403 (if one was
supplied).
If the template does not exist, an Http403 response containing the text
"403 Forbidden" (as per RFC 9110 Section 15.5.4) will be returned.
|
python
|
django/views/defaults.py
| 126
|
[
"request",
"exception",
"template_name"
] | false
| 2
| 6.24
|
django/django
| 86,204
|
unknown
| false
|
|
is_busday
|
def is_busday(dates, weekmask="1111100", holidays=None, busdaycal=None, out=None):
"""
is_busday(
dates,
weekmask='1111100',
holidays=None,
busdaycal=None,
out=None,
)
Calculates which of the given dates are valid days, and which are not.
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar : An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> import numpy as np
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True])
"""
return (dates, weekmask, holidays, out)
|
is_busday(
dates,
weekmask='1111100',
holidays=None,
busdaycal=None,
out=None,
)
Calculates which of the given dates are valid days, and which are not.
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar : An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> import numpy as np
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True])
|
python
|
numpy/_core/multiarray.py
| 1,440
|
[
"dates",
"weekmask",
"holidays",
"busdaycal",
"out"
] | false
| 1
| 6.24
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
create
|
@Contract("_, _, !null -> !null")
private static @Nullable ConfigurationPropertiesBean create(String name, @Nullable Object instance,
@Nullable Bindable<Object> bindTarget) {
return (bindTarget != null) ? new ConfigurationPropertiesBean(name, instance, bindTarget) : null;
}
|
Return a {@link ConfigurationPropertiesBean @ConfigurationPropertiesBean} instance
for the given bean details or {@code null} if the bean is not a
{@link ConfigurationProperties @ConfigurationProperties} object. Annotations are
considered both on the bean itself, as well as any factory method (for example a
{@link Bean @Bean} method).
@param applicationContext the source application context
@param bean the bean to consider
@param beanName the bean name
@return a configuration properties bean or {@code null} if the neither the bean nor
factory method are annotated with
{@link ConfigurationProperties @ConfigurationProperties}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/ConfigurationPropertiesBean.java
| 289
|
[
"name",
"instance",
"bindTarget"
] |
ConfigurationPropertiesBean
| true
| 2
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
add_template_filter
|
def add_template_filter(
self, f: ft.TemplateFilterCallable, name: str | None = None
) -> None:
"""Register a function to use as a custom Jinja filter.
The :meth:`template_filter` decorator can be used to register a function
by decorating instead.
:param f: The function to register.
:param name: The name to register the filter as. If not given, uses the
function's name.
"""
self.jinja_env.filters[name or f.__name__] = f
|
Register a function to use as a custom Jinja filter.
The :meth:`template_filter` decorator can be used to register a function
by decorating instead.
:param f: The function to register.
:param name: The name to register the filter as. If not given, uses the
function's name.
|
python
|
src/flask/sansio/app.py
| 696
|
[
"self",
"f",
"name"
] |
None
| true
| 2
| 6.88
|
pallets/flask
| 70,946
|
sphinx
| false
|
clear_data_home
|
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache.
Parameters
----------
data_home : str or path-like, default=None
The path to scikit-learn data directory. If `None`, the default path
is `~/scikit_learn_data`.
Examples
--------
>>> from sklearn.datasets import clear_data_home
>>> clear_data_home() # doctest: +SKIP
"""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
|
Delete all the content of the data home cache.
Parameters
----------
data_home : str or path-like, default=None
The path to scikit-learn data directory. If `None`, the default path
is `~/scikit_learn_data`.
Examples
--------
>>> from sklearn.datasets import clear_data_home
>>> clear_data_home() # doctest: +SKIP
|
python
|
sklearn/datasets/_base.py
| 95
|
[
"data_home"
] | false
| 1
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
parse_oss_url
|
def parse_oss_url(ossurl: str) -> tuple:
"""
Parse the OSS Url into a bucket name and key.
:param ossurl: The OSS Url to parse.
:return: the parsed bucket name and key
"""
parsed_url = urlsplit(ossurl)
if not parsed_url.netloc:
raise AirflowException(f'Please provide a bucket_name instead of "{ossurl}"')
bucket_name = parsed_url.netloc
key = parsed_url.path.lstrip("/")
return bucket_name, key
|
Parse the OSS Url into a bucket name and key.
:param ossurl: The OSS Url to parse.
:return: the parsed bucket name and key
|
python
|
providers/alibaba/src/airflow/providers/alibaba/cloud/hooks/oss.py
| 98
|
[
"ossurl"
] |
tuple
| true
| 2
| 7.92
|
apache/airflow
| 43,597
|
sphinx
| false
|
concat
|
public static boolean[] concat(boolean[]... arrays) {
long length = 0;
for (boolean[] array : arrays) {
length += array.length;
}
boolean[] result = new boolean[checkNoOverflow(length)];
int pos = 0;
for (boolean[] array : arrays) {
System.arraycopy(array, 0, result, pos, array.length);
pos += array.length;
}
return result;
}
|
Returns the values from each provided array combined into a single array. For example, {@code
concat(new boolean[] {a, b}, new boolean[] {}, new boolean[] {c}} returns the array {@code {a,
b, c}}.
@param arrays zero or more {@code boolean} arrays
@return a single array containing all the values from the source arrays, in order
@throws IllegalArgumentException if the total number of elements in {@code arrays} does not fit
in an {@code int}
|
java
|
android/guava/src/com/google/common/primitives/Booleans.java
| 236
|
[] | true
| 1
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
create_url_adapter
|
def create_url_adapter(self, request: Request | None) -> MapAdapter | None:
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set
up so the request is passed explicitly.
.. versionchanged:: 3.1
If :data:`SERVER_NAME` is set, it does not restrict requests to
only that domain, for both ``subdomain_matching`` and
``host_matching``.
.. versionchanged:: 1.0
:data:`SERVER_NAME` no longer implicitly enables subdomain
matching. Use :attr:`subdomain_matching` instead.
.. versionchanged:: 0.9
This can be called outside a request when the URL adapter is created
for an application context.
.. versionadded:: 0.6
"""
if request is not None:
if (trusted_hosts := self.config["TRUSTED_HOSTS"]) is not None:
request.trusted_hosts = trusted_hosts
# Check trusted_hosts here until bind_to_environ does.
request.host = get_host(request.environ, request.trusted_hosts) # pyright: ignore
subdomain = None
server_name = self.config["SERVER_NAME"]
if self.url_map.host_matching:
# Don't pass SERVER_NAME, otherwise it's used and the actual
# host is ignored, which breaks host matching.
server_name = None
elif not self.subdomain_matching:
# Werkzeug doesn't implement subdomain matching yet. Until then,
# disable it by forcing the current subdomain to the default, or
# the empty string.
subdomain = self.url_map.default_subdomain or ""
return self.url_map.bind_to_environ(
request.environ, server_name=server_name, subdomain=subdomain
)
# Need at least SERVER_NAME to match/build outside a request.
if self.config["SERVER_NAME"] is not None:
return self.url_map.bind(
self.config["SERVER_NAME"],
script_name=self.config["APPLICATION_ROOT"],
url_scheme=self.config["PREFERRED_URL_SCHEME"],
)
return None
|
Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set
up so the request is passed explicitly.
.. versionchanged:: 3.1
If :data:`SERVER_NAME` is set, it does not restrict requests to
only that domain, for both ``subdomain_matching`` and
``host_matching``.
.. versionchanged:: 1.0
:data:`SERVER_NAME` no longer implicitly enables subdomain
matching. Use :attr:`subdomain_matching` instead.
.. versionchanged:: 0.9
This can be called outside a request when the URL adapter is created
for an application context.
.. versionadded:: 0.6
|
python
|
src/flask/app.py
| 508
|
[
"self",
"request"
] |
MapAdapter | None
| true
| 7
| 6.88
|
pallets/flask
| 70,946
|
unknown
| false
|
_update_ctx
|
def _update_ctx(self, attrs: DataFrame) -> None:
"""
Update the state of the ``Styler`` for data cells.
Collects a mapping of {index_label: [('<property>', '<value>'), ..]}.
Parameters
----------
attrs : DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
if not self.index.is_unique or not self.columns.is_unique:
raise KeyError(
"`Styler.apply` and `.map` are not compatible "
"with non-unique index or columns."
)
for cn in attrs.columns:
j = self.columns.get_loc(cn)
ser = attrs[cn]
for rn, c in ser.items():
if not c or pd.isna(c):
continue
css_list = maybe_convert_css_to_tuples(c)
i = self.index.get_loc(rn)
self.ctx[(i, j)].extend(css_list)
|
Update the state of the ``Styler`` for data cells.
Collects a mapping of {index_label: [('<property>', '<value>'), ..]}.
Parameters
----------
attrs : DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
|
python
|
pandas/io/formats/style.py
| 1,672
|
[
"self",
"attrs"
] |
None
| true
| 7
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
union
|
public static ClassFilter union(ClassFilter[] classFilters) {
Assert.notEmpty(classFilters, "ClassFilter array must not be empty");
return new UnionClassFilter(classFilters);
}
|
Match all classes that <i>either</i> (or all) of the given ClassFilters matches.
@param classFilters the ClassFilters to match
@return a distinct ClassFilter that matches all classes that either
of the given ClassFilter matches
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/ClassFilters.java
| 61
|
[
"classFilters"
] |
ClassFilter
| true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_as_pairs
|
def _as_pairs(x, ndim, as_index=False):
"""
Broadcast `x` to an array with the shape (`ndim`, 2).
A helper function for `pad` that prepares and validates arguments like
`pad_width` for iteration in pairs.
Parameters
----------
x : {None, scalar, array-like}
The object to broadcast to the shape (`ndim`, 2).
ndim : int
Number of pairs the broadcasted `x` will have.
as_index : bool, optional
If `x` is not None, try to round each element of `x` to an integer
(dtype `np.intp`) and ensure every element is positive.
Returns
-------
pairs : nested iterables, shape (`ndim`, 2)
The broadcasted version of `x`.
Raises
------
ValueError
If `as_index` is True and `x` contains negative elements.
Or if `x` is not broadcastable to the shape (`ndim`, 2).
"""
if x is None:
# Pass through None as a special case, otherwise np.round(x) fails
# with an AttributeError
return ((None, None),) * ndim
x = np.array(x)
if as_index:
x = np.round(x).astype(np.intp, copy=False)
if x.ndim < 3:
# Optimization: Possibly use faster paths for cases where `x` has
# only 1 or 2 elements. `np.broadcast_to` could handle these as well
# but is currently slower
if x.size == 1:
# x was supplied as a single value
x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
if as_index and x < 0:
raise ValueError("index can't contain negative values")
return ((x[0], x[0]),) * ndim
if x.size == 2 and x.shape != (2, 1):
# x was supplied with a single value for each side
# but except case when each dimension has a single value
# which should be broadcasted to a pair,
# e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
x = x.ravel() # Ensure x[0], x[1] works
if as_index and (x[0] < 0 or x[1] < 0):
raise ValueError("index can't contain negative values")
return ((x[0], x[1]),) * ndim
if as_index and x.min() < 0:
raise ValueError("index can't contain negative values")
# Converting the array with `tolist` seems to improve performance
# when iterating and indexing the result (see usage in `pad`)
return np.broadcast_to(x, (ndim, 2)).tolist()
|
Broadcast `x` to an array with the shape (`ndim`, 2).
A helper function for `pad` that prepares and validates arguments like
`pad_width` for iteration in pairs.
Parameters
----------
x : {None, scalar, array-like}
The object to broadcast to the shape (`ndim`, 2).
ndim : int
Number of pairs the broadcasted `x` will have.
as_index : bool, optional
If `x` is not None, try to round each element of `x` to an integer
(dtype `np.intp`) and ensure every element is positive.
Returns
-------
pairs : nested iterables, shape (`ndim`, 2)
The broadcasted version of `x`.
Raises
------
ValueError
If `as_index` is True and `x` contains negative elements.
Or if `x` is not broadcastable to the shape (`ndim`, 2).
|
python
|
numpy/lib/_arraypad_impl.py
| 471
|
[
"x",
"ndim",
"as_index"
] | false
| 14
| 6.16
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
ofNonNull
|
public static <L, R> ImmutablePair<L, R> ofNonNull(final L left, final R right) {
return of(Objects.requireNonNull(left, "left"), Objects.requireNonNull(right, "right"));
}
|
Creates an immutable pair of two non-null objects inferring the generic types.
@param <L> the left element type.
@param <R> the right element type.
@param left the left element, may not be null.
@param right the right element, may not be null.
@return an immutable formed from the two parameters, not null.
@throws NullPointerException if any input is null.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/tuple/ImmutablePair.java
| 133
|
[
"left",
"right"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toBooleanObject
|
public static Boolean toBooleanObject(final Integer value) {
if (value == null) {
return null;
}
return value.intValue() == 0 ? Boolean.FALSE : Boolean.TRUE;
}
|
Converts an Integer to a Boolean using the convention that {@code zero}
is {@code false}, every other numeric value is {@code true}.
<p>{@code null} will be converted to {@code null}.</p>
<p>NOTE: This method may return {@code null} and may throw a {@link NullPointerException}
if unboxed to a {@code boolean}.</p>
<pre>
BooleanUtils.toBooleanObject(Integer.valueOf(0)) = Boolean.FALSE
BooleanUtils.toBooleanObject(Integer.valueOf(1)) = Boolean.TRUE
BooleanUtils.toBooleanObject(Integer.valueOf(null)) = null
</pre>
@param value the Integer to convert
@return Boolean.TRUE if non-zero, Boolean.FALSE if zero,
{@code null} if {@code null} input
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 644
|
[
"value"
] |
Boolean
| true
| 3
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
enqueue
|
void enqueue(Call call, long now) {
if (call.tries > maxRetries) {
log.debug("Max retries {} for {} reached", maxRetries, call);
call.handleTimeoutFailure(time.milliseconds(), new TimeoutException(
"Exceeded maxRetries after " + call.tries + " tries."));
return;
}
if (log.isDebugEnabled()) {
log.debug("Queueing {} with a timeout {} ms from now.", call,
Math.min(requestTimeoutMs, call.deadlineMs - now));
}
boolean accepted = false;
synchronized (this) {
if (!closing) {
newCalls.add(call);
accepted = true;
}
}
if (accepted) {
client.wakeup(); // wake the thread if it is in poll()
} else {
log.debug("The AdminClient thread has exited. Timing out {}.", call);
call.handleTimeoutFailure(time.milliseconds(),
new TimeoutException("The AdminClient thread has exited."));
}
}
|
Queue a call for sending.
<p>
If the AdminClient thread has exited, this will fail. Otherwise, it will succeed (even
if the AdminClient is shutting down). This function should called when retrying an
existing call.
@param call The new call object.
@param now The current time in milliseconds.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 1,550
|
[
"call",
"now"
] |
void
| true
| 5
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
get
|
static ImportPhase get(@Nullable ConfigDataActivationContext activationContext) {
if (activationContext != null && activationContext.getProfiles() != null) {
return AFTER_PROFILE_ACTIVATION;
}
return BEFORE_PROFILE_ACTIVATION;
}
|
Return the {@link ImportPhase} based on the given activation context.
@param activationContext the activation context
@return the import phase
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironmentContributor.java
| 535
|
[
"activationContext"
] |
ImportPhase
| true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
values
|
Collection<V> values();
|
Returns a collection of all values, which may contain duplicates. Changes to the returned
collection will update the underlying table, and vice versa.
@return collection of values
|
java
|
android/guava/src/com/google/common/collect/Table.java
| 232
|
[] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
parseUnsignedLong
|
@CanIgnoreReturnValue
public static long parseUnsignedLong(String string, int radix) {
checkNotNull(string);
if (string.length() == 0) {
throw new NumberFormatException("empty string");
}
if (radix < Character.MIN_RADIX || radix > Character.MAX_RADIX) {
throw new NumberFormatException("illegal radix: " + radix);
}
int maxSafePos = ParseOverflowDetection.maxSafeDigits[radix] - 1;
long value = 0;
for (int pos = 0; pos < string.length(); pos++) {
int digit = Character.digit(string.charAt(pos), radix);
if (digit == -1) {
throw new NumberFormatException(string);
}
if (pos > maxSafePos && ParseOverflowDetection.overflowInParse(value, digit, radix)) {
throw new NumberFormatException("Too large for unsigned long: " + string);
}
value = (value * radix) + digit;
}
return value;
}
|
Returns the unsigned {@code long} value represented by a string with the given radix.
<p><b>Java 8+ users:</b> use {@link Long#parseUnsignedLong(String, int)} instead.
@param string the string containing the unsigned {@code long} representation to be parsed.
@param radix the radix to use while parsing {@code string}
@throws NumberFormatException if the string does not contain a valid unsigned {@code long} with
the given radix, or if {@code radix} is not between {@link Character#MIN_RADIX} and {@link
Character#MAX_RADIX}.
@throws NullPointerException if {@code string} is null (in contrast to {@link
Long#parseLong(String)})
|
java
|
android/guava/src/com/google/common/primitives/UnsignedLongs.java
| 339
|
[
"string",
"radix"
] | true
| 8
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
getNameWithoutExtension
|
public static String getNameWithoutExtension(String file) {
checkNotNull(file);
String fileName = new File(file).getName();
int dotIndex = fileName.lastIndexOf('.');
return (dotIndex == -1) ? fileName : fileName.substring(0, dotIndex);
}
|
Returns the file name without its <a
href="http://en.wikipedia.org/wiki/Filename_extension">file extension</a> or path. This is
similar to the {@code basename} unix command. The result does not include the '{@code .}'.
@param file The name of the file to trim the extension from. This can be either a fully
qualified file name (including a path) or just a file name.
@return The file name without its path or extension.
@since 14.0
|
java
|
android/guava/src/com/google/common/io/Files.java
| 812
|
[
"file"
] |
String
| true
| 2
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
collapseOverlappingBucketsForAll
|
public ZeroBucket collapseOverlappingBucketsForAll(BucketIterator... bucketIterators) {
ZeroBucket current = this;
ZeroBucket previous;
do {
previous = current;
for (BucketIterator buckets : bucketIterators) {
current = current.collapseOverlappingBuckets(buckets);
}
} while (previous.compareZeroThreshold(current) != 0);
return current;
}
|
Collapses all buckets from the given iterators whose lower boundaries are smaller than the zero threshold.
The iterators are advanced to point at the first, non-collapsed bucket.
@param bucketIterators The iterators whose buckets may be collapsed.
@return A potentially updated {@link ZeroBucket} with the collapsed buckets' counts and an adjusted threshold.
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ZeroBucket.java
| 215
|
[] |
ZeroBucket
| true
| 1
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
rfind
|
def rfind(a, sub, start=0, end=None):
"""
For each element, return the highest index in the string where
substring ``sub`` is found, such that ``sub`` is contained in the
range [``start``, ``end``).
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
The substring to search for.
start, end : array_like, with any integer dtype
The range to look in, interpreted as in slice notation.
Returns
-------
y : ndarray
Output array of ints
See Also
--------
str.rfind
Examples
--------
>>> import numpy as np
>>> a = np.array(["Computer Science"])
>>> np.strings.rfind(a, "Science", start=0, end=None)
array([9])
>>> np.strings.rfind(a, "Science", start=0, end=8)
array([-1])
>>> b = np.array(["Computer Science", "Science"])
>>> np.strings.rfind(b, "Science", start=0, end=None)
array([9, 0])
"""
end = end if end is not None else MAX
return _rfind_ufunc(a, sub, start, end)
|
For each element, return the highest index in the string where
substring ``sub`` is found, such that ``sub`` is contained in the
range [``start``, ``end``).
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
The substring to search for.
start, end : array_like, with any integer dtype
The range to look in, interpreted as in slice notation.
Returns
-------
y : ndarray
Output array of ints
See Also
--------
str.rfind
Examples
--------
>>> import numpy as np
>>> a = np.array(["Computer Science"])
>>> np.strings.rfind(a, "Science", start=0, end=None)
array([9])
>>> np.strings.rfind(a, "Science", start=0, end=8)
array([-1])
>>> b = np.array(["Computer Science", "Science"])
>>> np.strings.rfind(b, "Science", start=0, end=None)
array([9, 0])
|
python
|
numpy/_core/strings.py
| 294
|
[
"a",
"sub",
"start",
"end"
] | false
| 2
| 7.36
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
get_orm_mapper
|
def get_orm_mapper():
"""Get the correct ORM mapper for the installed SQLAlchemy version."""
import sqlalchemy.orm.mapper
return sqlalchemy.orm.mapper if is_sqlalchemy_v1() else sqlalchemy.orm.Mapper
|
Get the correct ORM mapper for the installed SQLAlchemy version.
|
python
|
airflow-core/src/airflow/utils/sqlalchemy.py
| 471
|
[] | false
| 2
| 6.16
|
apache/airflow
| 43,597
|
unknown
| false
|
|
translateCellErrorOutput
|
function translateCellErrorOutput(output: NotebookCellOutput): nbformat.IError {
// it should have at least one output item
const firstItem = output.items[0];
// Bug in VS Code.
if (!firstItem.data) {
return {
output_type: 'error',
ename: '',
evalue: '',
traceback: []
};
}
const originalError: undefined | nbformat.IError = output.metadata?.originalError;
const value: Error = JSON.parse(textDecoder.decode(firstItem.data));
return {
output_type: 'error',
ename: value.name,
evalue: value.message,
// VS Code needs an `Error` object which requires a `stack` property as a string.
// Its possible the format could change when converting from `traceback` to `string` and back again to `string`
// When .NET stores errors in output (with their .NET kernel),
// stack is empty, hence store the message instead of stack (so that somethign gets displayed in ipynb).
traceback: originalError?.traceback || splitMultilineString(value.stack || value.message || '')
};
}
|
Splits the source of a cell into an array of strings, each representing a line.
Also normalizes line endings to use LF (`\n`) instead of CRLF (`\r\n`).
Same is done in deserializer as well.
|
typescript
|
extensions/ipynb/src/serializers.ts
| 266
|
[
"output"
] | true
| 5
| 6
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
toString
|
@Override
public String toString() {
StringBuilder sb = new StringBuilder("Acknowledgements(");
sb.append(acknowledgements);
sb.append(", acknowledgeException=");
sb.append(acknowledgeException != null ? Errors.forException(acknowledgeException) : "null");
sb.append(", completed=");
sb.append(completed);
sb.append(")");
return sb.toString();
}
|
@return Returns true if the array of acknowledge types in the share fetch batch contains a single acknowledge type
and the array size can be reduced to 1.
Returns false when the array has more than one acknowledge type or is already optimised.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/Acknowledgements.java
| 316
|
[] |
String
| true
| 2
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
toUnsentRequest
|
public NetworkClientDelegate.UnsentRequest toUnsentRequest() {
Map<String, Uuid> topicIds = metadata.topicIds();
boolean canUseTopicIds = true;
Map<String, OffsetCommitRequestData.OffsetCommitRequestTopic> requestTopicDataMap = new HashMap<>();
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) {
TopicPartition topicPartition = entry.getKey();
OffsetAndMetadata offsetAndMetadata = entry.getValue();
Uuid topicId = topicIds.getOrDefault(topicPartition.topic(), Uuid.ZERO_UUID);
if (topicId.equals(Uuid.ZERO_UUID)) {
canUseTopicIds = false;
}
OffsetCommitRequestData.OffsetCommitRequestTopic topic = requestTopicDataMap
.getOrDefault(topicPartition.topic(),
new OffsetCommitRequestData.OffsetCommitRequestTopic()
.setName(topicPartition.topic())
.setTopicId(topicId)
);
topic.partitions().add(new OffsetCommitRequestData.OffsetCommitRequestPartition()
.setPartitionIndex(topicPartition.partition())
.setCommittedOffset(offsetAndMetadata.offset())
.setCommittedLeaderEpoch(offsetAndMetadata.leaderEpoch().orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH))
.setCommittedMetadata(offsetAndMetadata.metadata())
);
requestTopicDataMap.put(topicPartition.topic(), topic);
}
OffsetCommitRequestData data = new OffsetCommitRequestData()
.setGroupId(this.groupId)
.setGroupInstanceId(groupInstanceId.orElse(null))
.setTopics(new ArrayList<>(requestTopicDataMap.values()));
data = data.setMemberId(memberInfo.memberId);
if (memberInfo.memberEpoch.isPresent()) {
data = data.setGenerationIdOrMemberEpoch(memberInfo.memberEpoch.get());
lastEpochSentOnCommit = memberInfo.memberEpoch;
} else {
lastEpochSentOnCommit = Optional.empty();
}
OffsetCommitRequest.Builder builder = canUseTopicIds
? OffsetCommitRequest.Builder.forTopicIdsOrNames(data)
: OffsetCommitRequest.Builder.forTopicNames(data);
return buildRequestWithResponseHandling(builder);
}
|
Future containing the offsets that were committed. It completes when a response is
received for the commit request.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
| 711
|
[] | true
| 4
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
isInfoEnabled
|
@Override
public boolean isInfoEnabled() {
synchronized (this.lines) {
return (this.destination == null) || this.destination.isInfoEnabled();
}
}
|
Create a new {@link DeferredLog} instance managed by a {@link DeferredLogFactory}.
@param destination the switch-over destination
@param lines the lines backing all related deferred logs
@since 2.4.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/DeferredLog.java
| 79
|
[] | true
| 2
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
withJsonResource
|
public ConfigurationMetadataRepositoryJsonBuilder withJsonResource(InputStream inputStream) throws IOException {
return withJsonResource(inputStream, this.defaultCharset);
}
|
Add the content of a {@link ConfigurationMetadataRepository} defined by the
specified {@link InputStream} JSON document using the default charset. If this
metadata repository holds items that were loaded previously, these are ignored.
<p>
Leaves the stream open when done.
@param inputStream the source input stream
@return this builder
@throws IOException in case of I/O errors
|
java
|
configuration-metadata/spring-boot-configuration-metadata/src/main/java/org/springframework/boot/configurationmetadata/ConfigurationMetadataRepositoryJsonBuilder.java
| 56
|
[
"inputStream"
] |
ConfigurationMetadataRepositoryJsonBuilder
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
update
|
public synchronized void update(int requestVersion, MetadataResponse response, boolean isPartialUpdate, long nowMs) {
Objects.requireNonNull(response, "Metadata response cannot be null");
if (isClosed())
throw new IllegalStateException("Update requested after metadata close");
this.needPartialUpdate = requestVersion < this.requestVersion;
this.lastRefreshMs = nowMs;
this.attempts = 0;
this.updateVersion += 1;
if (!isPartialUpdate) {
this.needFullUpdate = false;
this.lastSuccessfulRefreshMs = nowMs;
}
// If we subsequently find that the metadata response is not equivalent to the metadata already known,
// this count is reset to 0 in updateLatestMetadata()
this.equivalentResponseCount++;
String previousClusterId = metadataSnapshot.clusterResource().clusterId();
this.metadataSnapshot = handleMetadataResponse(response, isPartialUpdate, nowMs);
Cluster cluster = metadataSnapshot.cluster();
maybeSetMetadataError(cluster);
this.lastSeenLeaderEpochs.keySet().removeIf(tp -> !retainTopic(tp.topic(), false, nowMs));
String newClusterId = metadataSnapshot.clusterResource().clusterId();
if (!Objects.equals(previousClusterId, newClusterId)) {
log.info("Cluster ID: {}", newClusterId);
}
clusterResourceListeners.onUpdate(metadataSnapshot.clusterResource());
log.debug("Updated cluster metadata updateVersion {} to {}", this.updateVersion, this.metadataSnapshot);
}
|
Updates the cluster metadata. If topic expiry is enabled, expiry time
is set for topics if required and expired topics are removed from the metadata.
@param requestVersion The request version corresponding to the update response, as provided by
{@link #newMetadataRequestAndVersion(long)}.
@param response metadata response received from the broker
@param isPartialUpdate whether the metadata request was for a subset of the active topics
@param nowMs current time in milliseconds
|
java
|
clients/src/main/java/org/apache/kafka/clients/Metadata.java
| 337
|
[
"requestVersion",
"response",
"isPartialUpdate",
"nowMs"
] |
void
| true
| 4
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
isConfigurationPropertiesBean
|
private static boolean isConfigurationPropertiesBean(ConfigurableListableBeanFactory beanFactory, String beanName) {
try {
if (beanFactory.getBeanDefinition(beanName).isAbstract()) {
return false;
}
if (beanFactory.findAnnotationOnBean(beanName, ConfigurationProperties.class) != null) {
return true;
}
Method factoryMethod = findFactoryMethod(beanFactory, beanName);
return findMergedAnnotation(factoryMethod, ConfigurationProperties.class).isPresent();
}
catch (NoSuchBeanDefinitionException ex) {
return false;
}
}
|
Return all {@link ConfigurationProperties @ConfigurationProperties} beans contained
in the given application context. Both directly annotated beans, as well as beans
that have {@link ConfigurationProperties @ConfigurationProperties} annotated
factory methods are included.
@param applicationContext the source application context
@return a map of all configuration properties beans keyed by the bean name
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/ConfigurationPropertiesBean.java
| 174
|
[
"beanFactory",
"beanName"
] | true
| 4
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
in_interactive_session
|
def in_interactive_session() -> bool:
"""
Check if we're running in an interactive shell.
Returns
-------
bool
True if running under python/ipython interactive shell.
"""
from pandas import get_option
def check_main() -> bool:
try:
import __main__ as main
except ModuleNotFoundError:
return get_option("mode.sim_interactive")
return not hasattr(main, "__file__") or get_option("mode.sim_interactive")
try:
# error: Name '__IPYTHON__' is not defined
return __IPYTHON__ or check_main() # type: ignore[name-defined]
except NameError:
return check_main()
|
Check if we're running in an interactive shell.
Returns
-------
bool
True if running under python/ipython interactive shell.
|
python
|
pandas/io/formats/console.py
| 55
|
[] |
bool
| true
| 3
| 6.88
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
get_scorer
|
def get_scorer(scoring):
"""Get a scorer from string.
Read more in the :ref:`User Guide <scoring_parameter>`.
:func:`~sklearn.metrics.get_scorer_names` can be used to retrieve the names
of all available scorers.
Parameters
----------
scoring : str, callable or None
Scoring method as string. If callable it is returned as is.
If None, returns None.
Returns
-------
scorer : callable
The scorer.
Notes
-----
When passed a string, this function always returns a copy of the scorer
object. Calling `get_scorer` twice for the same scorer results in two
separate scorer objects.
Examples
--------
>>> import numpy as np
>>> from sklearn.dummy import DummyClassifier
>>> from sklearn.metrics import get_scorer
>>> X = np.reshape([0, 1, -1, -0.5, 2], (-1, 1))
>>> y = np.array([0, 1, 1, 0, 1])
>>> classifier = DummyClassifier(strategy="constant", constant=0).fit(X, y)
>>> accuracy = get_scorer("accuracy")
>>> accuracy(classifier, X, y)
0.4
"""
if isinstance(scoring, str):
try:
scorer = copy.deepcopy(_SCORERS[scoring])
except KeyError:
raise ValueError(
"%r is not a valid scoring value. "
"Use sklearn.metrics.get_scorer_names() "
"to get valid options." % scoring
)
else:
scorer = scoring
return scorer
|
Get a scorer from string.
Read more in the :ref:`User Guide <scoring_parameter>`.
:func:`~sklearn.metrics.get_scorer_names` can be used to retrieve the names
of all available scorers.
Parameters
----------
scoring : str, callable or None
Scoring method as string. If callable it is returned as is.
If None, returns None.
Returns
-------
scorer : callable
The scorer.
Notes
-----
When passed a string, this function always returns a copy of the scorer
object. Calling `get_scorer` twice for the same scorer results in two
separate scorer objects.
Examples
--------
>>> import numpy as np
>>> from sklearn.dummy import DummyClassifier
>>> from sklearn.metrics import get_scorer
>>> X = np.reshape([0, 1, -1, -0.5, 2], (-1, 1))
>>> y = np.array([0, 1, 1, 0, 1])
>>> classifier = DummyClassifier(strategy="constant", constant=0).fit(X, y)
>>> accuracy = get_scorer("accuracy")
>>> accuracy(classifier, X, y)
0.4
|
python
|
sklearn/metrics/_scorer.py
| 426
|
[
"scoring"
] | false
| 3
| 7.52
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
ofNonNull
|
public static <L, R> MutablePair<L, R> ofNonNull(final L left, final R right) {
return of(Objects.requireNonNull(left, "left"), Objects.requireNonNull(right, "right"));
}
|
Creates a mutable pair of two non-null objects inferring the generic types.
@param <L> the left element type.
@param <R> the right element type.
@param left the left element, may not be null.
@param right the right element, may not be null.
@return a mutable pair formed from the two parameters, not null.
@throws NullPointerException if any input is null.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/tuple/MutablePair.java
| 104
|
[
"left",
"right"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
of
|
static <T> ValueProcessor<T> of(UnaryOperator<@Nullable T> action) {
Assert.notNull(action, "'action' must not be null");
return (name, value) -> action.apply(value);
}
|
Factory method to crate a new {@link ValueProcessor} that applies the given
action.
@param <T> the value type
@param action the action to apply
@return a new {@link ValueProcessor} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 1,057
|
[
"action"
] | true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
_fetch_remote
|
def _fetch_remote(remote, dirname=None, n_retries=3, delay=1):
"""Helper function to download a remote dataset.
Fetch a dataset pointed by remote's url, save into path using remote's
filename and ensure its integrity based on the SHA256 checksum of the
downloaded file.
.. versionchanged:: 1.6
If the file already exists locally and the SHA256 checksums match, the
path to the local file is returned without re-downloading.
Parameters
----------
remote : RemoteFileMetadata
Named tuple containing remote dataset meta information: url, filename
and checksum.
dirname : str or Path, default=None
Directory to save the file to. If None, the current working directory
is used.
n_retries : int, default=3
Number of retries when HTTP errors are encountered.
.. versionadded:: 1.5
delay : int, default=1
Number of seconds between retries.
.. versionadded:: 1.5
Returns
-------
file_path: Path
Full path of the created file.
"""
if dirname is None:
folder_path = Path(".")
else:
folder_path = Path(dirname)
file_path = folder_path / remote.filename
if file_path.exists():
if remote.checksum is None:
return file_path
checksum = _sha256(file_path)
if checksum == remote.checksum:
return file_path
else:
warnings.warn(
f"SHA256 checksum of existing local file {file_path.name} "
f"({checksum}) differs from expected ({remote.checksum}): "
f"re-downloading from {remote.url} ."
)
# We create a temporary file dedicated to this particular download to avoid
# conflicts with parallel downloads. If the download is successful, the
# temporary file is atomically renamed to the final file path (with
# `shutil.move`). We therefore pass `delete=False` to `NamedTemporaryFile`.
# Otherwise, garbage collecting temp_file would raise an error when
# attempting to delete a file that was already renamed. If the download
# fails or the result does not match the expected SHA256 digest, the
# temporary file is removed manually in the except block.
temp_file = NamedTemporaryFile(
prefix=remote.filename + ".part_", dir=folder_path, delete=False
)
# Note that Python 3.12's `delete_on_close=True` is ignored as we set
# `delete=False` explicitly. So after this line the empty temporary file still
# exists on disk to make sure that it's uniquely reserved for this specific call of
# `_fetch_remote` and therefore it protects against any corruption by parallel
# calls.
temp_file.close()
try:
temp_file_path = Path(temp_file.name)
while True:
try:
urlretrieve(remote.url, temp_file_path)
break
except (URLError, TimeoutError):
if n_retries == 0:
# If no more retries are left, re-raise the caught exception.
raise
warnings.warn(f"Retry downloading from url: {remote.url}")
n_retries -= 1
time.sleep(delay)
checksum = _sha256(temp_file_path)
if remote.checksum is not None and remote.checksum != checksum:
raise OSError(
f"The SHA256 checksum of {remote.filename} ({checksum}) "
f"differs from expected ({remote.checksum})."
)
except (Exception, KeyboardInterrupt):
os.unlink(temp_file.name)
raise
# The following renaming is atomic whenever temp_file_path and
# file_path are on the same filesystem. This should be the case most of
# the time, but we still use shutil.move instead of os.rename in case
# they are not.
shutil.move(temp_file_path, file_path)
return file_path
|
Helper function to download a remote dataset.
Fetch a dataset pointed by remote's url, save into path using remote's
filename and ensure its integrity based on the SHA256 checksum of the
downloaded file.
.. versionchanged:: 1.6
If the file already exists locally and the SHA256 checksums match, the
path to the local file is returned without re-downloading.
Parameters
----------
remote : RemoteFileMetadata
Named tuple containing remote dataset meta information: url, filename
and checksum.
dirname : str or Path, default=None
Directory to save the file to. If None, the current working directory
is used.
n_retries : int, default=3
Number of retries when HTTP errors are encountered.
.. versionadded:: 1.5
delay : int, default=1
Number of seconds between retries.
.. versionadded:: 1.5
Returns
-------
file_path: Path
Full path of the created file.
|
python
|
sklearn/datasets/_base.py
| 1,434
|
[
"remote",
"dirname",
"n_retries",
"delay"
] | false
| 11
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
adaptJob
|
protected Job adaptJob(Object jobObject) throws Exception {
if (jobObject instanceof Job job) {
return job;
}
else if (jobObject instanceof Runnable runnable) {
return new DelegatingJob(runnable);
}
else {
throw new IllegalArgumentException(
"Unable to execute job class [" + jobObject.getClass().getName() +
"]: only [org.quartz.Job] and [java.lang.Runnable] supported.");
}
}
|
Adapt the given job object to the Quartz Job interface.
<p>The default implementation supports straight Quartz Jobs
as well as Runnables, which get wrapped in a DelegatingJob.
@param jobObject the original instance of the specified job class
@return the adapted Quartz Job instance
@throws Exception if the given job could not be adapted
@see DelegatingJob
|
java
|
spring-context-support/src/main/java/org/springframework/scheduling/quartz/AdaptableJobFactory.java
| 73
|
[
"jobObject"
] |
Job
| true
| 3
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
hexRingPosToH3
|
public static long hexRingPosToH3(long h3, int ringPos) {
// for pentagons, we skip direction at position 2
final int pos = H3Index.H3_is_pentagon(h3) && ringPos >= 2 ? ringPos + 1 : ringPos;
if (pos < 0 || pos > 5) {
throw new IllegalArgumentException("invalid ring position");
}
return HexRing.h3NeighborInDirection(h3, HexRing.DIRECTIONS[pos].digit());
}
|
Returns the neighbor index at the given position.
@param h3 Origin index
@param ringPos position of the neighbour index
@return the actual neighbour at the given position
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/H3.java
| 400
|
[
"h3",
"ringPos"
] | true
| 5
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
contains
|
public abstract boolean contains(CharSequence seq, CharSequence searchSeq);
|
Tests if CharSequence contains a search CharSequence, handling {@code null}. This method uses {@link String#indexOf(String)} if possible.
<p>
A {@code null} CharSequence will return {@code false}.
</p>
<p>
Case-sensitive examples
</p>
<pre>
Strings.CS.contains(null, *) = false
Strings.CS.contains(*, null) = false
Strings.CS.contains("", "") = true
Strings.CS.contains("abc", "") = true
Strings.CS.contains("abc", "a") = true
Strings.CS.contains("abc", "z") = false
</pre>
<p>
Case-insensitive examples
</p>
<pre>
Strings.CI.contains(null, *) = false
Strings.CI.contains(*, null) = false
Strings.CI.contains("", "") = true
Strings.CI.contains("abc", "") = true
Strings.CI.contains("abc", "a") = true
Strings.CI.contains("abc", "z") = false
Strings.CI.contains("abc", "A") = true
Strings.CI.contains("abc", "Z") = false
</pre>
@param seq the CharSequence to check, may be null
@param searchSeq the CharSequence to find, may be null
@return true if the CharSequence contains the search CharSequence, false if not or {@code null} string input
|
java
|
src/main/java/org/apache/commons/lang3/Strings.java
| 517
|
[
"seq",
"searchSeq"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
count
|
@Override
public int count(@Nullable Object element) {
AtomicInteger existingCounter = safeGet(countMap, element);
return (existingCounter == null) ? 0 : existingCounter.get();
}
|
Returns the number of occurrences of {@code element} in this multiset.
@param element the element to look for
@return the nonnegative number of occurrences of the element
|
java
|
android/guava/src/com/google/common/collect/ConcurrentHashMultiset.java
| 152
|
[
"element"
] | true
| 2
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
chebfromroots
|
def chebfromroots(roots):
"""
Generate a Chebyshev series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Chebyshev form, where the :math:`r_n` are the roots specified in
`roots`. If a zero has multiplicity n, then it must appear in `roots`
n times. For instance, if 2 is a root of multiplicity three and 3 is a
root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3].
The roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Chebyshev form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
numpy.polynomial.polynomial.polyfromroots
numpy.polynomial.legendre.legfromroots
numpy.polynomial.laguerre.lagfromroots
numpy.polynomial.hermite.hermfromroots
numpy.polynomial.hermite_e.hermefromroots
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([1.5+0.j, 0. +0.j, 0.5+0.j])
"""
return pu._fromroots(chebline, chebmul, roots)
|
Generate a Chebyshev series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Chebyshev form, where the :math:`r_n` are the roots specified in
`roots`. If a zero has multiplicity n, then it must appear in `roots`
n times. For instance, if 2 is a root of multiplicity three and 3 is a
root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3].
The roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Chebyshev form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
numpy.polynomial.polynomial.polyfromroots
numpy.polynomial.legendre.legfromroots
numpy.polynomial.laguerre.lagfromroots
numpy.polynomial.hermite.hermfromroots
numpy.polynomial.hermite_e.hermefromroots
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([1.5+0.j, 0. +0.j, 0.5+0.j])
|
python
|
numpy/polynomial/chebyshev.py
| 512
|
[
"roots"
] | false
| 1
| 6.32
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
acknowledge
|
public void acknowledge(final ConsumerRecord<K, V> record, final AcknowledgeType type) {
for (Map.Entry<TopicIdPartition, ShareInFlightBatch<K, V>> tipBatch : batches.entrySet()) {
TopicIdPartition tip = tipBatch.getKey();
if (tip.topic().equals(record.topic()) && (tip.partition() == record.partition())) {
tipBatch.getValue().acknowledge(record, type);
return;
}
}
throw new IllegalStateException("The record cannot be acknowledged.");
}
|
Acknowledge a single record in the current batch.
@param record The record to acknowledge
@param type The acknowledge type which indicates whether it was processed successfully
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetch.java
| 159
|
[
"record",
"type"
] |
void
| true
| 3
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
updateEstimation
|
public static float updateEstimation(String topic, CompressionType type, float observedRatio) {
float[] compressionRatioForTopic = getAndCreateEstimationIfAbsent(topic);
float currentEstimation = compressionRatioForTopic[type.id];
synchronized (compressionRatioForTopic) {
if (observedRatio > currentEstimation)
compressionRatioForTopic[type.id] = Math.max(currentEstimation + COMPRESSION_RATIO_DETERIORATE_STEP, observedRatio);
else if (observedRatio < currentEstimation) {
compressionRatioForTopic[type.id] = Math.max(currentEstimation - COMPRESSION_RATIO_IMPROVING_STEP, observedRatio);
}
}
return compressionRatioForTopic[type.id];
}
|
Update the compression ratio estimation for a topic and compression type.
@param topic the topic to update compression ratio estimation.
@param type the compression type.
@param observedRatio the observed compression ratio.
@return the compression ratio estimation after the update.
|
java
|
clients/src/main/java/org/apache/kafka/common/record/CompressionRatioEstimator.java
| 42
|
[
"topic",
"type",
"observedRatio"
] | true
| 3
| 7.44
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
toBytes
|
byte[] toBytes() {
Transport.Type protoType = protocol.getType();
boolean hasPort = TRANSPORTS_WITH_PORTS.contains(protoType);
int len = source.getAddress().length + destination.getAddress().length + 2 + (hasPort ? 4 : 0);
ByteBuffer bb = ByteBuffer.allocate(len);
boolean isOneWay = false;
if (protoType == Transport.Type.Icmp || protoType == Transport.Type.IcmpIpV6) {
// ICMP protocols populate port fields with ICMP data
Integer equivalent = IcmpType.codeEquivalent(icmpType, protoType == Transport.Type.IcmpIpV6);
isOneWay = equivalent == null;
sourcePort = icmpType;
destinationPort = equivalent == null ? icmpCode : equivalent;
}
boolean keepOrder = isOrdered() || ((protoType == Transport.Type.Icmp || protoType == Transport.Type.IcmpIpV6) && isOneWay);
bb.put(keepOrder ? source.getAddress() : destination.getAddress());
bb.put(keepOrder ? destination.getAddress() : source.getAddress());
bb.put(toUint16(protocol.getTransportNumber() << 8));
if (hasPort) {
bb.put(keepOrder ? toUint16(sourcePort) : toUint16(destinationPort));
bb.put(keepOrder ? toUint16(destinationPort) : toUint16(sourcePort));
}
return bb.array();
}
|
@return true iff the source address/port is numerically less than the destination address/port as described
in the <a href="https://github.com/corelight/community-id-spec">Community ID</a> spec.
|
java
|
modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CommunityIdProcessor.java
| 371
|
[] | true
| 13
| 7.76
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
read_and_validate_value_from_cache
|
def read_and_validate_value_from_cache(param_name: str, default_param_value: str) -> tuple[bool, str | None]:
"""
Reads and validates value from cache is present and whether its value is valid according to current rules.
It could happen that the allowed values have been modified since the last time cached value was set,
so this check is crucial to check outdated values.
If the value is not set or in case the cached value stored is not currently allowed,
the default value is stored in the cache and returned instead.
:param param_name: name of the parameter
:param default_param_value: default value of the parameter
:return: Tuple informing whether the value was read from cache and the parameter value that is
set in the cache after this method returns.
"""
is_from_cache = False
cached_value = read_from_cache_file(param_name)
if cached_value is None:
write_to_cache_file(param_name, default_param_value)
cached_value = default_param_value
else:
allowed, allowed_values = check_if_values_allowed(param_name, cached_value)
if allowed:
is_from_cache = True
else:
write_to_cache_file(param_name, default_param_value)
cached_value = default_param_value
return is_from_cache, cached_value
|
Reads and validates value from cache is present and whether its value is valid according to current rules.
It could happen that the allowed values have been modified since the last time cached value was set,
so this check is crucial to check outdated values.
If the value is not set or in case the cached value stored is not currently allowed,
the default value is stored in the cache and returned instead.
:param param_name: name of the parameter
:param default_param_value: default value of the parameter
:return: Tuple informing whether the value was read from cache and the parameter value that is
set in the cache after this method returns.
|
python
|
dev/breeze/src/airflow_breeze/utils/cache.py
| 74
|
[
"param_name",
"default_param_value"
] |
tuple[bool, str | None]
| true
| 5
| 7.92
|
apache/airflow
| 43,597
|
sphinx
| false
|
getField
|
public static Field getField(final Class<?> cls, final String fieldName, final boolean forceAccess) {
Objects.requireNonNull(cls, "cls");
Validate.isTrue(StringUtils.isNotBlank(fieldName), "The field name must not be blank/empty");
// FIXME is this workaround still needed? lang requires Java 6
// Sun Java 1.3 has a bugged implementation of getField hence we write the
// code ourselves
// getField() will return the Field object with the declaring class
// set correctly to the class that declares the field. Thus requesting the
// field on a subclass will return the field from the superclass.
//
// priority order for lookup:
// searchclass private/protected/package/public
// superclass protected/package/public
// private/different package blocks access to further superclasses
// implementedinterface public
// check up the superclass hierarchy
for (Class<?> acls = cls; acls != null; acls = acls.getSuperclass()) {
try {
final Field field = acls.getDeclaredField(fieldName);
// getDeclaredField checks for non-public scopes as well
// and it returns accurate results
if (!MemberUtils.isPublic(field)) {
if (!forceAccess) {
continue;
}
field.setAccessible(true);
}
return field;
} catch (final NoSuchFieldException ignored) {
// ignore
}
}
// check the public interface case. This must be manually searched for
// incase there is a public supersuperclass field hidden by a private/package
// superclass field.
Field match = null;
for (final Class<?> class1 : ClassUtils.getAllInterfaces(cls)) {
try {
final Field test = class1.getField(fieldName);
Validate.isTrue(match == null,
"Reference to field %s is ambiguous relative to %s; a matching field exists on two or more implemented interfaces.", fieldName, cls);
match = test;
} catch (final NoSuchFieldException ignored) {
// ignore
}
}
return match;
}
|
Gets an accessible {@link Field} by name, breaking scope if requested. Superclasses/interfaces will be
considered.
@param cls
the {@link Class} to reflect, must not be {@code null}.
@param fieldName
the field name to obtain.
@param forceAccess
whether to break scope restrictions using the
{@link AccessibleObject#setAccessible(boolean)} method. {@code false} will only
match {@code public} fields.
@return the Field object.
@throws NullPointerException if the class is {@code null}.
@throws IllegalArgumentException if the field name is blank or empty or is matched at multiple places
in the inheritance hierarchy.
@throws SecurityException if an underlying accessible object's method denies the request.
@see SecurityManager#checkPermission
|
java
|
src/main/java/org/apache/commons/lang3/reflect/FieldUtils.java
| 178
|
[
"cls",
"fieldName",
"forceAccess"
] |
Field
| true
| 6
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
postValidateSaslMechanismConfig
|
public static void postValidateSaslMechanismConfig(AbstractConfig config) {
SecurityProtocol securityProtocol = SecurityProtocol.forName(config.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG));
String clientSaslMechanism = config.getString(SaslConfigs.SASL_MECHANISM);
if (securityProtocol == SecurityProtocol.SASL_PLAINTEXT || securityProtocol == SecurityProtocol.SASL_SSL) {
if (clientSaslMechanism == null || clientSaslMechanism.isEmpty()) {
throw new ConfigException(SaslConfigs.SASL_MECHANISM, null, "When the " + CommonClientConfigs.SECURITY_PROTOCOL_CONFIG +
" configuration enables SASL, mechanism must be non-null and non-empty string.");
}
}
}
|
Log warning if the exponential backoff is disabled due to initial backoff value is greater than max backoff value.
@param config The config object.
|
java
|
clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java
| 297
|
[
"config"
] |
void
| true
| 5
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
trimArrayElements
|
@SuppressWarnings("NullAway")
private String[] trimArrayElements(String[] array) {
return StringUtils.trimArrayElements(array);
}
|
Sets the profile name or expression.
@param name the profile name or expression
@return this
@see Profiles#of(String...)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/SpringProfileArbiter.java
| 111
|
[
"array"
] | true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
describeFeatures
|
default DescribeFeaturesResult describeFeatures() {
return describeFeatures(new DescribeFeaturesOptions());
}
|
Describes finalized as well as supported features.
<p>
This is a convenience method for {@link #describeFeatures(DescribeFeaturesOptions)} with default options.
See the overload for more details.
@return the {@link DescribeFeaturesResult} containing the result
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,523
|
[] |
DescribeFeaturesResult
| true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
findInBuffer
|
private static int findInBuffer(ByteBuffer buffer) {
for (int pos = buffer.limit() - 4; pos >= 0; pos--) {
buffer.position(pos);
if (buffer.getInt() == SIGNATURE) {
return pos;
}
}
return -1;
}
|
Create a new {@link ZipEndOfCentralDirectoryRecord} instance from the specified
{@link DataBlock} by searching backwards from the end until a valid record is
located.
@param dataBlock the source data block
@return the {@link Located located} {@link ZipEndOfCentralDirectoryRecord}
@throws IOException if the {@link ZipEndOfCentralDirectoryRecord} cannot be read
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipEndOfCentralDirectoryRecord.java
| 140
|
[
"buffer"
] | true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
getVectorSelectorPositions
|
function getVectorSelectorPositions(query: string): VectorSelectorPosition[] {
const tree = parser.parse(query);
const positions: VectorSelectorPosition[] = [];
tree.iterate({
enter: ({ to, from, type }): false | void => {
if (type.id === VectorSelector) {
const visQuery = buildVisualQueryFromString(query.substring(from, to));
positions.push({ query: visQuery.query, from, to });
return false;
}
},
});
return positions;
}
|
Parse the string and get all VectorSelector positions in the query together with parsed representation of the vector
selector.
@param query
|
typescript
|
packages/grafana-prometheus/src/add_label_to_query.ts
| 44
|
[
"query"
] | true
| 2
| 6.4
|
grafana/grafana
| 71,362
|
jsdoc
| false
|
|
replaceAdvisor
|
@Override
public boolean replaceAdvisor(Advisor a, Advisor b) throws AopConfigException {
Assert.notNull(a, "Advisor a must not be null");
Assert.notNull(b, "Advisor b must not be null");
int index = indexOf(a);
if (index == -1) {
return false;
}
removeAdvisor(index);
addAdvisor(index, b);
return true;
}
|
Remove a proxied interface.
<p>Does nothing if the given interface isn't proxied.
@param ifc the interface to remove from the proxy
@return {@code true} if the interface was removed; {@code false}
if the interface was not found and hence could not be removed
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AdvisedSupport.java
| 354
|
[
"a",
"b"
] | true
| 2
| 8.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
getPainlessScriptEngine
|
private static ScriptEngine getPainlessScriptEngine(final Settings settings) throws IOException {
try (PainlessPlugin painlessPlugin = new PainlessPlugin()) {
painlessPlugin.loadExtensions(new ExtensiblePlugin.ExtensionLoader() {
@Override
@SuppressWarnings("unchecked")
public <T> List<T> loadExtensions(Class<T> extensionPointType) {
if (extensionPointType.isAssignableFrom(PainlessExtension.class)) {
final List<PainlessExtension> extensions = new ArrayList<>();
extensions.add(new ConstantKeywordPainlessExtension()); // module: constant-keyword
extensions.add(new ProcessorsWhitelistExtension()); // module: ingest-common
extensions.add(new SpatialPainlessExtension()); // module: spatial
extensions.add(new WildcardPainlessExtension()); // module: wildcard
return (List<T>) extensions;
} else {
return List.of();
}
}
});
return painlessPlugin.getScriptEngine(settings, Set.of(IngestScript.CONTEXT, IngestConditionalScript.CONTEXT));
}
}
|
@param settings the Elasticsearch settings object
@return a {@link ScriptEngine} for painless scripts for use in {@link IngestScript} and
{@link IngestConditionalScript} contexts, including all available {@link PainlessExtension}s.
@throws IOException when the underlying script engine cannot be created
|
java
|
libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/script/ScriptServiceBridge.java
| 102
|
[
"settings"
] |
ScriptEngine
| true
| 2
| 7.28
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
load
|
public static PemContent load(Path path) throws IOException {
Assert.notNull(path, "'path' must not be null");
try (InputStream in = Files.newInputStream(path, StandardOpenOption.READ)) {
return load(in);
}
}
|
Load {@link PemContent} from the given {@link Path}.
@param path a path to load the content from
@return the loaded PEM content
@throws IOException on IO error
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemContent.java
| 140
|
[
"path"
] |
PemContent
| true
| 1
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
reregister
|
private void reregister(KafkaMbean mbean) {
unregister(mbean);
try {
ManagementFactory.getPlatformMBeanServer().registerMBean(mbean, mbean.name());
} catch (JMException e) {
throw new KafkaException("Error registering mbean " + mbean.name(), e);
}
}
|
@param metricName
@return standard JMX MBean name in the following format domainName:type=metricType,key1=val1,key2=val2
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/JmxReporter.java
| 209
|
[
"mbean"
] |
void
| true
| 2
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
forCertificate
|
public static PemSslStoreDetails forCertificate(@Nullable String certificate) {
return forCertificates(certificate);
}
|
Factory method to create a new {@link PemSslStoreDetails} instance for the given
certificate. <b>Note:</b> This method doesn't actually check if the provided value
only contains a single certificate. It is functionally equivalent to
{@link #forCertificates(String)}.
@param certificate the certificate content (either the PEM content itself or a
reference to the resource to load)
@return a new {@link PemSslStoreDetails} instance.
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemSslStoreDetails.java
| 154
|
[
"certificate"
] |
PemSslStoreDetails
| true
| 1
| 6.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
write
|
private static void write(ByteBuffer buffer,
byte magic,
long timestamp,
ByteBuffer key,
ByteBuffer value,
CompressionType compressionType,
TimestampType timestampType) {
try (DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buffer))) {
write(out, magic, timestamp, key, value, compressionType, timestampType);
} catch (IOException e) {
throw new KafkaException(e);
}
}
|
Write the header for a compressed record set in-place (i.e. assuming the compressed record data has already
been written at the value offset in a wrapped record). This lets you dynamically create a compressed message
set, and then go back later and fill in its size and CRC, which saves the need for copying to another buffer.
@param buffer The buffer containing the compressed record data positioned at the first offset of the
@param magic The magic value of the record set
@param recordSize The size of the record (including record overhead)
@param timestamp The timestamp of the wrapper record
@param compressionType The compression type used
@param timestampType The timestamp type of the wrapper record
|
java
|
clients/src/main/java/org/apache/kafka/common/record/LegacyRecord.java
| 373
|
[
"buffer",
"magic",
"timestamp",
"key",
"value",
"compressionType",
"timestampType"
] |
void
| true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
equals
|
def equals(self, other) -> bool:
"""
Return if another array is equivalent to this array.
Equivalent means that both arrays have the same shape and dtype, and
all values compare equal. Missing values in the same location are
considered equal (in contrast with normal equality).
Parameters
----------
other : ExtensionArray
Array to compare to this Array.
Returns
-------
boolean
Whether the arrays are equivalent.
See Also
--------
numpy.array_equal : Equivalent method for numpy array.
Series.equals : Equivalent method for Series.
DataFrame.equals : Equivalent method for DataFrame.
Examples
--------
>>> arr1 = pd.array([1, 2, np.nan])
>>> arr2 = pd.array([1, 2, np.nan])
>>> arr1.equals(arr2)
True
>>> arr1 = pd.array([1, 3, np.nan])
>>> arr2 = pd.array([1, 2, np.nan])
>>> arr1.equals(arr2)
False
"""
if type(self) != type(other):
return False
if other.dtype != self.dtype:
return False
# GH#44382 if e.g. self[1] is np.nan and other[1] is pd.NA, we are NOT
# equal.
if not np.array_equal(self._mask, other._mask):
return False
left = self._data[~self._mask]
right = other._data[~other._mask]
return array_equivalent(left, right, strict_nan=True, dtype_equal=True)
|
Return if another array is equivalent to this array.
Equivalent means that both arrays have the same shape and dtype, and
all values compare equal. Missing values in the same location are
considered equal (in contrast with normal equality).
Parameters
----------
other : ExtensionArray
Array to compare to this Array.
Returns
-------
boolean
Whether the arrays are equivalent.
See Also
--------
numpy.array_equal : Equivalent method for numpy array.
Series.equals : Equivalent method for Series.
DataFrame.equals : Equivalent method for DataFrame.
Examples
--------
>>> arr1 = pd.array([1, 2, np.nan])
>>> arr2 = pd.array([1, 2, np.nan])
>>> arr1.equals(arr2)
True
>>> arr1 = pd.array([1, 3, np.nan])
>>> arr2 = pd.array([1, 2, np.nan])
>>> arr1.equals(arr2)
False
|
python
|
pandas/core/arrays/masked.py
| 1,434
|
[
"self",
"other"
] |
bool
| true
| 4
| 8.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
run_autotune_in_subprocess
|
def run_autotune_in_subprocess(
benchmark_request: BenchmarkRequest,
) -> float:
"""
Run autotuning benchmarks in a subprocess.
This function is submitted to AutotuneProcessPool and runs in isolation
to prevent GPU contention with the main compilation process.
Args:
picklable_choices: List of picklable choice information
Returns:
timing
"""
try:
# Run the benchmark directly - bmreq is already a BenchmarkRequest
timing = benchmark_request.benchmark()
return timing
except Exception:
autotuning_log.error(
"Failed to benchmark choice %s",
benchmark_request,
)
# Use infinity for failed benchmarks so they're not selected
return float("inf")
|
Run autotuning benchmarks in a subprocess.
This function is submitted to AutotuneProcessPool and runs in isolation
to prevent GPU contention with the main compilation process.
Args:
picklable_choices: List of picklable choice information
Returns:
timing
|
python
|
torch/_inductor/autotune_process.py
| 1,171
|
[
"benchmark_request"
] |
float
| true
| 1
| 6.24
|
pytorch/pytorch
| 96,034
|
google
| false
|
of
|
static SslBundle of(@Nullable SslStoreBundle stores) {
return of(stores, null, null);
}
|
Factory method to create a new {@link SslBundle} instance.
@param stores the stores or {@code null}
@return a new {@link SslBundle} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/SslBundle.java
| 98
|
[
"stores"
] |
SslBundle
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
readMetadata
|
ConfigurationMetadata readMetadata() {
return readMetadata(METADATA_PATH);
}
|
Read the existing {@link ConfigurationMetadata} of the current module or
{@code null} if it is not available yet.
@return the metadata or {@code null} if none is present
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataStore.java
| 74
|
[] |
ConfigurationMetadata
| true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_smallest_admissible_index_dtype
|
def _smallest_admissible_index_dtype(arrays=(), maxval=None, check_contents=False):
"""Based on input (integer) arrays `a`, determine a suitable index data
type that can hold the data in the arrays.
This function returns `np.int64` if it either required by `maxval` or based on the
largest precision of the dtype of the arrays passed as argument, or by their
contents (when `check_contents is True`). If none of the condition requires
`np.int64` then this function returns `np.int32`.
Parameters
----------
arrays : ndarray or tuple of ndarrays, default=()
Input arrays whose types/contents to check.
maxval : float, default=None
Maximum value needed.
check_contents : bool, default=False
Whether to check the values in the arrays and not just their types.
By default, check only the types.
Returns
-------
dtype : {np.int32, np.int64}
Suitable index data type (int32 or int64).
"""
int32min = np.int32(np.iinfo(np.int32).min)
int32max = np.int32(np.iinfo(np.int32).max)
if maxval is not None:
if maxval > np.iinfo(np.int64).max:
raise ValueError(
f"maxval={maxval} is to large to be represented as np.int64."
)
if maxval > int32max:
return np.int64
if isinstance(arrays, np.ndarray):
arrays = (arrays,)
for arr in arrays:
if not isinstance(arr, np.ndarray):
raise TypeError(
f"Arrays should be of type np.ndarray, got {type(arr)} instead."
)
if not np.issubdtype(arr.dtype, np.integer):
raise ValueError(
f"Array dtype {arr.dtype} is not supported for index dtype. We expect "
"integral values."
)
if not np.can_cast(arr.dtype, np.int32):
if not check_contents:
# when `check_contents` is False, we stay on the safe side and return
# np.int64.
return np.int64
if arr.size == 0:
# a bigger type not needed yet, let's look at the next array
continue
else:
maxval = arr.max()
minval = arr.min()
if minval < int32min or maxval > int32max:
# a big index type is actually needed
return np.int64
return np.int32
|
Based on input (integer) arrays `a`, determine a suitable index data
type that can hold the data in the arrays.
This function returns `np.int64` if it either required by `maxval` or based on the
largest precision of the dtype of the arrays passed as argument, or by their
contents (when `check_contents is True`). If none of the condition requires
`np.int64` then this function returns `np.int32`.
Parameters
----------
arrays : ndarray or tuple of ndarrays, default=()
Input arrays whose types/contents to check.
maxval : float, default=None
Maximum value needed.
check_contents : bool, default=False
Whether to check the values in the arrays and not just their types.
By default, check only the types.
Returns
-------
dtype : {np.int32, np.int64}
Suitable index data type (int32 or int64).
|
python
|
sklearn/utils/fixes.py
| 254
|
[
"arrays",
"maxval",
"check_contents"
] | false
| 14
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_get_range
|
def _get_range(self) -> torch.Tensor:
"""
Get a tensor representing the range [0, size) for this dimension.
Returns:
A 1D tensor with values [0, 1, 2, ..., size-1]
"""
if self._range is None:
self._range = torch.arange(self.size)
return self._range
|
Get a tensor representing the range [0, size) for this dimension.
Returns:
A 1D tensor with values [0, 1, 2, ..., size-1]
|
python
|
functorch/dim/__init__.py
| 929
|
[
"self"
] |
torch.Tensor
| true
| 2
| 8.08
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
get_variable_from_secrets
|
def get_variable_from_secrets(key: str, team_name: str | None = None) -> str | None:
"""
Get Airflow Variable by iterating over all Secret Backends.
:param key: Variable Key
:param team_name: Team name associated to the task trying to access the variable (if any)
:return: Variable Value
"""
# Disable cache if the variable belongs to a team. We might enable it later
if not team_name:
# check cache first
# enabled only if SecretCache.init() has been called first
try:
return SecretCache.get_variable(key)
except SecretCache.NotPresentException:
pass # continue business
var_val = None
# iterate over backends if not in cache (or expired)
for secrets_backend in ensure_secrets_loaded():
try:
var_val = secrets_backend.get_variable(key=key, team_name=team_name)
if var_val is not None:
break
except Exception:
log.exception(
"Unable to retrieve variable from secrets backend (%s). "
"Checking subsequent secrets backend.",
type(secrets_backend).__name__,
)
SecretCache.save_variable(key, var_val) # we save None as well
return var_val
|
Get Airflow Variable by iterating over all Secret Backends.
:param key: Variable Key
:param team_name: Team name associated to the task trying to access the variable (if any)
:return: Variable Value
|
python
|
airflow-core/src/airflow/models/variable.py
| 489
|
[
"key",
"team_name"
] |
str | None
| true
| 4
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
throwMissingRequiredFields
|
private static void throwMissingRequiredFields(List<String[]> requiredFields) {
final StringBuilder message = new StringBuilder();
for (int i = 0; i < requiredFields.size(); i++) {
if (i > 0) {
message.append(" ");
}
message.append("Required one of fields ").append(Arrays.toString(requiredFields.get(i))).append(", but none were specified.");
}
throw new IllegalArgumentException(message.toString());
}
|
Parses a Value from the given {@link XContentParser}
@param parser the parser to build a value from
@param value the value to fill from the parser
@param context a context that is passed along to all declared field parsers
@return the parsed value
@throws IOException if an IOException occurs.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java
| 334
|
[
"requiredFields"
] |
void
| true
| 3
| 7.6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
set
|
@CanIgnoreReturnValue
public @Nullable V set(int rowIndex, int columnIndex, @Nullable V value) {
// In GWT array access never throws IndexOutOfBoundsException.
checkElementIndex(rowIndex, rowList.size());
checkElementIndex(columnIndex, columnList.size());
V oldValue = array[rowIndex][columnIndex];
array[rowIndex][columnIndex] = value;
return oldValue;
}
|
Associates {@code value} with the specified row and column indices. The logic {@code
put(rowKeyList().get(rowIndex), columnKeyList().get(columnIndex), value)} has the same
behavior, but this method runs more quickly.
@param rowIndex position of the row key in {@link #rowKeyList()}
@param columnIndex position of the row key in {@link #columnKeyList()}
@param value value to store in the table
@return the previous value with the specified row and column
@throws IndexOutOfBoundsException if either index is negative, {@code rowIndex} is greater than
or equal to the number of allowed row keys, or {@code columnIndex} is greater than or equal
to the number of allowed column keys
|
java
|
android/guava/src/com/google/common/collect/ArrayTable.java
| 343
|
[
"rowIndex",
"columnIndex",
"value"
] |
V
| true
| 1
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
negate
|
public Fraction negate() {
// the positive range is one smaller than the negative range of an int.
if (numerator == Integer.MIN_VALUE) {
throw new ArithmeticException("overflow: too large to negate");
}
return new Fraction(-numerator, denominator);
}
|
Gets a fraction that is the negative (-fraction) of this one.
<p>
The returned fraction is not reduced.
</p>
@return a new fraction instance with the opposite signed numerator
|
java
|
src/main/java/org/apache/commons/lang3/math/Fraction.java
| 801
|
[] |
Fraction
| true
| 2
| 8.4
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
beans
|
public GroovyBeanDefinitionReader beans(Closure<?> closure) {
return invokeBeanDefiningClosure(closure);
}
|
Defines a set of beans for the given block or closure.
@param closure the block or closure
@return this {@code GroovyBeanDefinitionReader} instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/groovy/GroovyBeanDefinitionReader.java
| 294
|
[
"closure"
] |
GroovyBeanDefinitionReader
| true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
get_class_if_classified_error
|
def get_class_if_classified_error(e: Exception) -> Optional[str]:
"""
Returns a string case name if the export error e is classified.
Returns None otherwise.
"""
from torch._dynamo.exc import TorchRuntimeError, Unsupported, UserError
ALWAYS_CLASSIFIED = "always_classified"
DEFAULT_CLASS_SIGIL = "case_name"
# add error types that should be classified, along with any attribute name
# whose presence acts like a sigil to further distinguish which errors of
# that type should be classified. If the attribute name is None, then the
# error type is always classified.
_ALLOW_LIST = {
Unsupported: DEFAULT_CLASS_SIGIL,
UserError: DEFAULT_CLASS_SIGIL,
TorchRuntimeError: None,
}
if type(e) in _ALLOW_LIST:
# pyrefly: ignore [bad-index, index-error]
attr_name = _ALLOW_LIST[type(e)]
if attr_name is None:
return ALWAYS_CLASSIFIED
return getattr(e, attr_name, None)
return None
|
Returns a string case name if the export error e is classified.
Returns None otherwise.
|
python
|
torch/_export/db/logging.py
| 21
|
[
"e"
] |
Optional[str]
| true
| 3
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
findConfig
|
private @Nullable String findConfig(String[] locations) {
for (String location : locations) {
ClassPathResource resource = new ClassPathResource(location, this.classLoader);
if (resource.exists()) {
return "classpath:" + location;
}
}
return null;
}
|
Return any spring specific initialization config that should be applied. By default
this method checks {@link #getSpringConfigLocations()}.
@return the spring initialization config or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/AbstractLoggingSystem.java
| 111
|
[
"locations"
] |
String
| true
| 2
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.