function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
isocalendar
|
def isocalendar(self) -> DataFrame:
"""
Calculate year, week, and day according to the ISO 8601 standard.
Returns
-------
DataFrame
With columns year, week and day.
See Also
--------
Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
week number, and weekday for the given Timestamp object.
datetime.date.isocalendar : Return a named tuple object with
three components: year, week and weekday.
Examples
--------
>>> ser = pd.to_datetime(pd.Series(["2010-01-01", pd.NaT]))
>>> ser.dt.isocalendar()
year week day
0 2009 53 5
1 <NA> <NA> <NA>
>>> ser.dt.isocalendar().week
0 53
1 <NA>
Name: week, dtype: UInt32
"""
return self._get_values().isocalendar().set_index(self._parent.index)
|
Calculate year, week, and day according to the ISO 8601 standard.
Returns
-------
DataFrame
With columns year, week and day.
See Also
--------
Timestamp.isocalendar : Function return a 3-tuple containing ISO year,
week number, and weekday for the given Timestamp object.
datetime.date.isocalendar : Return a named tuple object with
three components: year, week and weekday.
Examples
--------
>>> ser = pd.to_datetime(pd.Series(["2010-01-01", pd.NaT]))
>>> ser.dt.isocalendar()
year week day
0 2009 53 5
1 <NA> <NA> <NA>
>>> ser.dt.isocalendar().week
0 53
1 <NA>
Name: week, dtype: UInt32
|
python
|
pandas/core/indexes/accessors.py
| 401
|
[
"self"
] |
DataFrame
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
empty
|
public boolean empty() {
return features.isEmpty();
}
|
@param features Map of feature name to SupportedVersionRange.
@return Returns a new Features object representing supported features.
|
java
|
clients/src/main/java/org/apache/kafka/common/feature/Features.java
| 67
|
[] | true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
buildGetInstanceMethodForFactoryMethod
|
private void buildGetInstanceMethodForFactoryMethod(MethodSpec.Builder method,
String beanName, Method factoryMethod, Class<?> targetClass,
@Nullable String factoryBeanName, javax.lang.model.element.Modifier... modifiers) {
String factoryMethodName = factoryMethod.getName();
Class<?> suppliedType = ClassUtils.resolvePrimitiveIfNecessary(factoryMethod.getReturnType());
CodeWarnings codeWarnings = new CodeWarnings();
codeWarnings.detectDeprecation(ClassUtils.getUserClass(targetClass), factoryMethod, suppliedType)
.detectDeprecation(Arrays.stream(factoryMethod.getParameters()).map(Parameter::getType));
method.addJavadoc("Get the bean instance supplier for '$L'.", beanName);
method.addModifiers(modifiers);
codeWarnings.suppress(method);
method.returns(ParameterizedTypeName.get(BeanInstanceSupplier.class, suppliedType));
CodeBlock.Builder code = CodeBlock.builder();
code.add(generateInstanceSupplierForFactoryMethod(
factoryMethod, suppliedType, targetClass, factoryMethodName));
boolean hasArguments = factoryMethod.getParameterCount() > 0;
CodeBlock arguments = hasArguments ?
new AutowiredArgumentsCodeGenerator(ClassUtils.getUserClass(targetClass), factoryMethod)
.generateCode(factoryMethod.getParameterTypes()) : NO_ARGS;
CodeBlock newInstance = generateNewInstanceCodeForMethod(
factoryBeanName, ClassUtils.getUserClass(targetClass), factoryMethodName, arguments);
code.add(generateWithGeneratorCode(hasArguments, newInstance));
method.addStatement(code.build());
}
|
Generate the instance supplier code.
@param registeredBean the bean to handle
@param instantiationDescriptor the executable to use to create the bean
@return the generated code
@since 6.1.7
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/InstanceSupplierCodeGenerator.java
| 311
|
[
"method",
"beanName",
"factoryMethod",
"targetClass",
"factoryBeanName"
] |
void
| true
| 2
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
appendLegacyRecord
|
private long appendLegacyRecord(long offset, long timestamp, ByteBuffer key, ByteBuffer value, byte magic) throws IOException {
ensureOpenForRecordAppend();
int size = LegacyRecord.recordSize(magic, key, value);
AbstractLegacyRecordBatch.writeHeader(appendStream, toInnerOffset(offset), size);
if (timestampType == TimestampType.LOG_APPEND_TIME)
timestamp = logAppendTime;
long crc = LegacyRecord.write(appendStream, magic, timestamp, key, value, CompressionType.NONE, timestampType);
recordWritten(offset, timestamp, size + Records.LOG_OVERHEAD);
return crc;
}
|
Append the record at the next consecutive offset. If no records have been appended yet, use the base
offset of this builder.
@param record The record to add
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
| 767
|
[
"offset",
"timestamp",
"key",
"value",
"magic"
] | true
| 2
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
setPatterns
|
public void setPatterns(String... patterns) {
Assert.notEmpty(patterns, "'patterns' must not be empty");
this.patterns = new String[patterns.length];
for (int i = 0; i < patterns.length; i++) {
this.patterns[i] = patterns[i].strip();
}
initPatternRepresentation(this.patterns);
}
|
Set the regular expressions defining methods to match.
Matching will be the union of all these; if any match, the pointcut matches.
@see #setPattern
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/AbstractRegexpMethodPointcut.java
| 80
|
[] |
void
| true
| 2
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
visitModuleDeclaration
|
function visitModuleDeclaration(node: ModuleDeclaration): VisitResult<Statement> {
if (!shouldEmitModuleDeclaration(node)) {
return factory.createNotEmittedStatement(node);
}
Debug.assertNode(node.name, isIdentifier, "A TypeScript namespace should have an Identifier name.");
enableSubstitutionForNamespaceExports();
const statements: Statement[] = [];
// We request to be advised when the printer is about to print this node. This allows
// us to set up the correct state for later substitutions.
let emitFlags = EmitFlags.AdviseOnEmitNode;
// If needed, we should emit a variable declaration for the module. If we emit
// a leading variable declaration, we should not emit leading comments for the
// module body.
const varAdded = addVarForEnumOrModuleDeclaration(statements, node);
if (varAdded) {
// We should still emit the comments if we are emitting a system module.
if (moduleKind !== ModuleKind.System || currentLexicalScope !== currentSourceFile) {
emitFlags |= EmitFlags.NoLeadingComments;
}
}
// `parameterName` is the declaration name used inside of the namespace.
const parameterName = getNamespaceParameterName(node);
// `containerName` is the expression used inside of the namespace for exports.
const containerName = getNamespaceContainerName(node);
// `exportName` is the expression used within this node's container for any exported references.
const exportName = isExportOfNamespace(node)
? factory.getExternalModuleOrNamespaceExportName(currentNamespaceContainerName, node, /*allowComments*/ false, /*allowSourceMaps*/ true)
: factory.getDeclarationName(node, /*allowComments*/ false, /*allowSourceMaps*/ true);
// x || (x = {})
// exports.x || (exports.x = {})
let moduleArg = factory.createLogicalOr(
exportName,
factory.createAssignment(
exportName,
factory.createObjectLiteralExpression(),
),
);
if (isExportOfNamespace(node)) {
// `localName` is the expression used within this node's containing scope for any local references.
const localName = factory.getLocalName(node, /*allowComments*/ false, /*allowSourceMaps*/ true);
// x = (exports.x || (exports.x = {}))
moduleArg = factory.createAssignment(localName, moduleArg);
}
// (function (x_1) {
// x_1.y = ...;
// })(x || (x = {}));
const moduleStatement = factory.createExpressionStatement(
factory.createCallExpression(
factory.createFunctionExpression(
/*modifiers*/ undefined,
/*asteriskToken*/ undefined,
/*name*/ undefined,
/*typeParameters*/ undefined,
[factory.createParameterDeclaration(/*modifiers*/ undefined, /*dotDotDotToken*/ undefined, parameterName)],
/*type*/ undefined,
transformModuleBody(node, containerName),
),
/*typeArguments*/ undefined,
[moduleArg],
),
);
setOriginalNode(moduleStatement, node);
if (varAdded) {
// If a variable was added, synthetic comments are emitted on it, not on the moduleStatement.
setSyntheticLeadingComments(moduleStatement, undefined);
setSyntheticTrailingComments(moduleStatement, undefined);
}
setTextRange(moduleStatement, node);
addEmitFlags(moduleStatement, emitFlags);
statements.push(moduleStatement);
return statements;
}
|
Visits a module declaration node.
This function will be called any time a TypeScript namespace (ModuleDeclaration) is encountered.
@param node The module declaration node.
|
typescript
|
src/compiler/transformers/ts.ts
| 2,077
|
[
"node"
] | true
| 8
| 6.48
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
handleResponse
|
public boolean handleResponse(ShareFetchResponse response, short version) {
if ((response.error() == Errors.SHARE_SESSION_NOT_FOUND) ||
(response.error() == Errors.INVALID_SHARE_SESSION_EPOCH) ||
(response.error() == Errors.SHARE_SESSION_LIMIT_REACHED)) {
log.info("Node {} was unable to process the ShareFetch request with {}: {}.",
node, nextMetadata, response.error());
nextMetadata = nextMetadata.nextCloseExistingAttemptNew();
return false;
}
if (response.error() != Errors.NONE) {
log.info("Node {} was unable to process the ShareFetch request with {}: {}.",
node, nextMetadata, response.error());
nextMetadata = nextMetadata.nextEpoch();
return false;
}
// The share session was continued by the server
if (log.isDebugEnabled())
log.debug("Node {} sent a ShareFetch response with throttleTimeMs = {} " +
"for session {}", node, response.throttleTimeMs(), memberId);
nextMetadata = nextMetadata.nextEpoch();
return true;
}
|
Handle the ShareFetch response.
@param response The response.
@param version The version of the request.
@return True if the response is well-formed; false if it can't be processed
because of missing or unexpected partitions.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java
| 263
|
[
"response",
"version"
] | true
| 6
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
visitReturnStatement
|
function visitReturnStatement(node: ReturnStatement): Statement {
if (convertedLoopState) {
convertedLoopState.nonLocalJumps! |= Jump.Return;
if (isReturnVoidStatementInConstructorWithCapturedSuper(node)) {
node = returnCapturedThis(node);
}
return factory.createReturnStatement(
factory.createObjectLiteralExpression(
[
factory.createPropertyAssignment(
factory.createIdentifier("value"),
node.expression
? Debug.checkDefined(visitNode(node.expression, visitor, isExpression))
: factory.createVoidZero(),
),
],
),
);
}
else if (isReturnVoidStatementInConstructorWithCapturedSuper(node)) {
return returnCapturedThis(node);
}
return visitEachChild(node, visitor, context);
}
|
Restores the `HierarchyFacts` for this node's ancestor after visiting this node's
subtree, propagating specific facts from the subtree.
@param ancestorFacts The `HierarchyFacts` of the ancestor to restore after visiting the subtree.
@param excludeFacts The existing `HierarchyFacts` of the subtree that should not be propagated.
@param includeFacts The new `HierarchyFacts` of the subtree that should be propagated.
|
typescript
|
src/compiler/transformers/es2015.ts
| 829
|
[
"node"
] | true
| 6
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
values
|
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame(
... {"age": [3, 29], "height": [94, 170], "weight": [31, 115]}
... )
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame(
... [
... ("parrot", 24.0, "second"),
... ("lion", 80.5, 1),
... ("monkey", np.nan, None),
... ],
... columns=("name", "max_speed", "rank"),
... )
>>> df2.dtypes
name str
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
return self._mgr.as_array()
|
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame(
... {"age": [3, 29], "height": [94, 170], "weight": [31, 115]}
... )
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame(
... [
... ("parrot", 24.0, "second"),
... ("lion", 80.5, 1),
... ("monkey", np.nan, None),
... ],
... columns=("name", "max_speed", "rank"),
... )
>>> df2.dtypes
name str
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
|
python
|
pandas/core/frame.py
| 15,016
|
[
"self"
] |
np.ndarray
| true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
newTreeSet
|
@SuppressWarnings({
"rawtypes", // https://github.com/google/guava/issues/989
"NonApiType", // acts as a direct substitute for a constructor call
})
public static <E extends Comparable> TreeSet<E> newTreeSet() {
return new TreeSet<>();
}
|
Creates a <i>mutable</i>, empty {@code TreeSet} instance sorted by the natural sort ordering of
its elements.
<p><b>Note:</b> if mutability is not required, use {@link ImmutableSortedSet#of()} instead.
<p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
use the {@code TreeSet} constructor directly, taking advantage of <a
href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
syntax</a>.
@return a new, empty {@code TreeSet}
|
java
|
android/guava/src/com/google/common/collect/Sets.java
| 381
|
[] | true
| 1
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
setNextAllowedRetry
|
synchronized void setNextAllowedRetry(Set<TopicPartition> partitions, long nextAllowResetTimeMs) {
for (TopicPartition partition : partitions) {
assignedState(partition).setNextAllowedRetry(nextAllowResetTimeMs);
}
}
|
Unset the preferred read replica. This causes the fetcher to go back to the leader for fetches.
@param tp The topic partition
@return the removed preferred read replica if set, Empty otherwise.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 808
|
[
"partitions",
"nextAllowResetTimeMs"
] |
void
| true
| 1
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
nextInt
|
@Deprecated
public static int nextInt() {
return secure().randomInt();
}
|
Generates a random int between 0 (inclusive) and Integer.MAX_VALUE (exclusive).
@return the random integer.
@see #nextInt(int, int)
@since 3.5
@deprecated Use {@link #secure()}, {@link #secureStrong()}, or {@link #insecure()}.
|
java
|
src/main/java/org/apache/commons/lang3/RandomUtils.java
| 193
|
[] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
canConnect
|
public boolean canConnect(String id, long now) {
NodeConnectionState state = nodeState.get(id);
if (state == null)
return true;
else
return state.state.isDisconnected() &&
now - state.lastConnectAttemptMs >= state.reconnectBackoffMs;
}
|
Return true iff we can currently initiate a new connection. This will be the case if we are not
connected and haven't been connected for at least the minimum reconnection backoff period.
@param id the connection id to check
@param now the current time in ms
@return true if we can initiate a new connection
|
java
|
clients/src/main/java/org/apache/kafka/clients/ClusterConnectionStates.java
| 77
|
[
"id",
"now"
] | true
| 3
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
fuse_seed_creation_pass
|
def fuse_seed_creation_pass(graph: torch.fx.Graph):
"""
Horizontally fuse all the seed generation on each device
a = inductor_seed(dev)
b = inductor_seed(dev)
Becomes:
seeds = inductor_seeds(2, dev)
a = inductor_lookup_seed(seeds, 0)
b = inductor_lookup_seed(seeds, 1)
We do this because seed creation is entirely launch overhead bound.
"""
device_seeds = collections.defaultdict(list)
for node in graph.nodes:
if CallFunctionVarArgs(inductor_prims.seed).match(node):
device_seeds[node.args[0]].append(node)
if not device_seeds:
return 0
for device, seeds in device_seeds.items():
with graph.inserting_before(seeds[0]):
combined = graph.call_function(inductor_prims.seeds, (len(seeds), device))
with V.fake_mode:
combined.meta["val"] = torch.empty(
[len(seeds)], device=device, dtype=torch.int64
)
combined.meta["tensor_meta"] = _extract_tensor_metadata(
combined.meta["val"]
)
for idx, seed in enumerate(seeds):
with graph.inserting_before(seed):
new_seed = graph.call_function(
inductor_prims.lookup_seed, (combined, idx)
)
seed.replace_all_uses_with(new_seed)
new_seed.meta.update(seed.meta)
graph.erase_node(seed)
return len(device_seeds)
|
Horizontally fuse all the seed generation on each device
a = inductor_seed(dev)
b = inductor_seed(dev)
Becomes:
seeds = inductor_seeds(2, dev)
a = inductor_lookup_seed(seeds, 0)
b = inductor_lookup_seed(seeds, 1)
We do this because seed creation is entirely launch overhead bound.
|
python
|
torch/_inductor/fx_passes/replace_random.py
| 36
|
[
"graph"
] | true
| 6
| 6.08
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
lookupCache
|
protected final @Nullable Cache lookupCache(String name) {
return this.cacheMap.get(name);
}
|
Check for a registered cache of the given name.
In contrast to {@link #getCache(String)}, this method does not trigger
the lazy creation of missing caches via {@link #getMissingCache(String)}.
@param name the cache identifier (must not be {@code null})
@return the associated Cache instance, or {@code null} if none found
@since 4.1
@see #getCache(String)
@see #getMissingCache(String)
|
java
|
spring-context/src/main/java/org/springframework/cache/support/AbstractCacheManager.java
| 138
|
[
"name"
] |
Cache
| true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getGmtTimeZone
|
public static TimeZone getGmtTimeZone(final String pattern) {
if ("Z".equals(pattern) || "UTC".equals(pattern)) {
return GREENWICH;
}
final Matcher m = GMT_PATTERN.matcher(pattern);
if (m.matches()) {
final int hours = parseInt(m.group(2));
final int minutes = parseInt(m.group(4));
if (hours == 0 && minutes == 0) {
return GREENWICH;
}
return new GmtTimeZone(parseSign(m.group(1)), hours, minutes);
}
return null;
}
|
Gets a TimeZone with GMT offsets. A GMT offset must be either 'Z', or 'UTC', or match
<em>(GMT)? hh?(:?mm?)?</em>, where h and m are digits representing hours and minutes.
@param pattern The GMT offset
@return A TimeZone with offset from GMT or null, if pattern does not match.
|
java
|
src/main/java/org/apache/commons/lang3/time/FastTimeZone.java
| 49
|
[
"pattern"
] |
TimeZone
| true
| 6
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
indexesOf
|
public static BitSet indexesOf(final Object[] array, final Object objectToFind, int startIndex) {
final BitSet bitSet = new BitSet();
if (array == null) {
return bitSet;
}
while (startIndex < array.length) {
startIndex = indexOf(array, objectToFind, startIndex);
if (startIndex == INDEX_NOT_FOUND) {
break;
}
bitSet.set(startIndex);
++startIndex;
}
return bitSet;
}
|
Finds the indices of the given object in the array starting at the given index.
<p>This method returns an empty BitSet for a {@code null} input array.</p>
<p>A negative startIndex is treated as zero. A startIndex larger than the array
length will return an empty BitSet.</p>
@param array the array to search for the object, may be {@code null}.
@param objectToFind the object to find, may be {@code null}.
@param startIndex the index to start searching.
@return a BitSet of all the indices of the object within the array starting at the index,
an empty BitSet if not found or {@code null} array input.
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 2,297
|
[
"array",
"objectToFind",
"startIndex"
] |
BitSet
| true
| 4
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
optJSONObject
|
public JSONObject optJSONObject(int index) {
Object object = opt(index);
return object instanceof JSONObject ? (JSONObject) object : null;
}
|
Returns the value at {@code index} if it exists and is a {@code
JSONObject}. Returns null otherwise.
@param index the index to get the value from
@return the object at {@code index} or {@code null}
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 569
|
[
"index"
] |
JSONObject
| true
| 2
| 8.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
from_pyfile
|
def from_pyfile(
self, filename: str | os.PathLike[str], silent: bool = False
) -> bool:
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to ``True`` if you want silent failure for missing
files.
:return: ``True`` if the file was loaded successfully.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = types.ModuleType("config")
d.__file__ = filename
try:
with open(filename, mode="rb") as config_file:
exec(compile(config_file.read(), filename, "exec"), d.__dict__)
except OSError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR, errno.ENOTDIR):
return False
e.strerror = f"Unable to load configuration file ({e.strerror})"
raise
self.from_object(d)
return True
|
Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to ``True`` if you want silent failure for missing
files.
:return: ``True`` if the file was loaded successfully.
.. versionadded:: 0.7
`silent` parameter.
|
python
|
src/flask/config.py
| 187
|
[
"self",
"filename",
"silent"
] |
bool
| true
| 3
| 8.08
|
pallets/flask
| 70,946
|
sphinx
| false
|
union1d
|
def union1d(ar1, ar2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union1d : ndarray
Unique, sorted union of the input arrays.
Examples
--------
>>> import numpy as np
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
To find the union of more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([1, 2, 3, 4, 6])
"""
return unique(np.concatenate((ar1, ar2), axis=None))
|
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union1d : ndarray
Unique, sorted union of the input arrays.
Examples
--------
>>> import numpy as np
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
To find the union of more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([1, 2, 3, 4, 6])
|
python
|
numpy/lib/_arraysetops_impl.py
| 1,084
|
[
"ar1",
"ar2"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
to_tvm_tensor
|
def to_tvm_tensor(torch_tensor: torch.Tensor) -> tvm.nd.array:
"""A helper function to transfer a torch.tensor to NDArray."""
if torch_tensor.dtype == torch.bool:
# same reason as above, fallback to numpy conversion which
# could introduce data copy overhead
return tvm.nd.array(torch_tensor.cpu().numpy())
return tvm.nd.from_dlpack(torch_tensor)
|
A helper function to transfer a torch.tensor to NDArray.
|
python
|
torch/_dynamo/backends/tvm.py
| 144
|
[
"torch_tensor"
] |
tvm.nd.array
| true
| 2
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
elementSet
|
@Override
public Set<E> elementSet() {
Set<E> result = elementSet;
if (result == null) {
elementSet = result = createElementSet();
}
return result;
}
|
{@inheritDoc}
<p>This implementation is highly efficient when {@code elementsToAdd} is itself a {@link
Multiset}.
|
java
|
android/guava/src/com/google/common/collect/AbstractMultiset.java
| 130
|
[] | true
| 2
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
from
|
public static Iterable<ConfigurationPropertySource> from(Iterable<PropertySource<?>> sources) {
return new SpringConfigurationPropertySources(sources);
}
|
Return {@link Iterable} containing new {@link ConfigurationPropertySource}
instances adapted from the given Spring {@link PropertySource PropertySources}.
<p>
This method will flatten any nested property sources and will filter all
{@link StubPropertySource stub property sources}. Updates to the underlying source,
identified by changes in the sources returned by its iterator, will be
automatically tracked. The underlying source should be thread safe, for example a
{@link MutablePropertySources}
@param sources the Spring property sources to adapt
@return an {@link Iterable} containing newly adapted
{@link SpringConfigurationPropertySource} instances
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertySources.java
| 156
|
[
"sources"
] | true
| 1
| 6.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
convertKey
|
protected Object convertKey(Object key) {
return key;
}
|
Hook to convert each encountered Map key.
The default implementation simply returns the passed-in key as-is.
<p>Can be overridden to perform conversion of certain keys,
for example from String to Integer.
<p>Only called if actually creating a new Map!
This is by default not the case if the type of the passed-in Map
already matches. Override {@link #alwaysCreateNewMap()} to
enforce creating a new Map in every case.
@param key the source key
@return the key to be used in the target Map
@see #alwaysCreateNewMap
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/CustomMapEditor.java
| 173
|
[
"key"
] |
Object
| true
| 1
| 6.8
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
get_all_providers_in_dist
|
def get_all_providers_in_dist(distribution_format: str, install_selected_providers: str) -> list[str]:
"""
Returns all providers in dist, optionally filtered by install_selected_providers.
:param distribution_format: package format to look for
:param install_selected_providers: list of providers to filter by
"""
if distribution_format == "sdist":
all_found_providers = list(
_get_all_providers_in_dist(
filename_prefix=SDIST_FILENAME_PREFIX, filename_pattern=SDIST_FILENAME_PATTERN
)
)
elif distribution_format == "wheel":
all_found_providers = list(
_get_all_providers_in_dist(
filename_prefix=WHEEL_FILENAME_PREFIX, filename_pattern=WHEEL_FILENAME_PATTERN
)
)
else:
raise SystemExit(f"Unknown package format {distribution_format}")
if install_selected_providers:
filter_list = install_selected_providers.split(",")
return [provider for provider in all_found_providers if provider in filter_list]
return all_found_providers
|
Returns all providers in dist, optionally filtered by install_selected_providers.
:param distribution_format: package format to look for
:param install_selected_providers: list of providers to filter by
|
python
|
dev/breeze/src/airflow_breeze/commands/release_management_commands.py
| 1,470
|
[
"distribution_format",
"install_selected_providers"
] |
list[str]
| true
| 5
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
threadNamePrefix
|
public SimpleAsyncTaskExecutorBuilder threadNamePrefix(@Nullable String threadNamePrefix) {
return new SimpleAsyncTaskExecutorBuilder(this.virtualThreads, threadNamePrefix,
this.cancelRemainingTasksOnClose, this.rejectTasksWhenLimitReached, this.concurrencyLimit,
this.taskDecorator, this.customizers, this.taskTerminationTimeout);
}
|
Set the prefix to use for the names of newly created threads.
@param threadNamePrefix the thread name prefix to set
@return a new builder instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/task/SimpleAsyncTaskExecutorBuilder.java
| 90
|
[
"threadNamePrefix"
] |
SimpleAsyncTaskExecutorBuilder
| true
| 1
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getVersion
|
private Runtime.Version getVersion(URL url) {
// The standard JDK handler uses #runtime to indicate that the runtime version
// should be used. This unfortunately doesn't work for us as
// jdk.internal.loader.URLClassPath only adds the runtime fragment when the URL
// is using the internal JDK handler. We need to flip the default to use
// the runtime version. See gh-38050
return "base".equals(url.getRef()) ? JarFile.baseVersion() : JarFile.runtimeVersion();
}
|
Create a new {@link UrlJarFile} or {@link UrlNestedJarFile} instance.
@param jarFileUrl the jar file URL
@param closeAction the action to call when the file is closed
@return a new {@link JarFile} instance
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/UrlJarFileFactory.java
| 60
|
[
"url"
] | true
| 2
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
get_waiter
|
def get_waiter(
self,
waiter_name: str,
parameters: dict[str, str] | None = None,
config_overrides: dict[str, Any] | None = None,
deferrable: bool = False,
client=None,
) -> botocore.waiter.Waiter:
"""
Get an AWS Batch service waiter, using the configured ``.waiter_model``.
The ``.waiter_model`` is combined with the ``.client`` to get a specific waiter and
the properties of that waiter can be modified without any accidental impact on the
generation of new waiters from the ``.waiter_model``, e.g.
.. code-block:: python
waiters.get_waiter("JobExists").config.delay # -> 5
waiter = waiters.get_waiter("JobExists") # a new waiter object
waiter.config.delay = 10
waiters.get_waiter("JobExists").config.delay # -> 5 as defined by waiter_model
To use a specific waiter, update the config and call the `wait()` method for jobId, e.g.
.. code-block:: python
import random
waiter = waiters.get_waiter("JobExists") # a new waiter object
waiter.config.delay = random.uniform(1, 10) # seconds
waiter.config.max_attempts = 10
waiter.wait(jobs=[jobId])
:param waiter_name: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing); see ``.list_waiters``.
:param parameters: unused, just here to match the method signature in base_aws
:param config_overrides: unused, just here to match the method signature in base_aws
:param deferrable: unused, just here to match the method signature in base_aws
:param client: unused, just here to match the method signature in base_aws
:return: a waiter object for the named AWS Batch service
"""
return botocore.waiter.create_waiter_with_client(waiter_name, self.waiter_model, self.client)
|
Get an AWS Batch service waiter, using the configured ``.waiter_model``.
The ``.waiter_model`` is combined with the ``.client`` to get a specific waiter and
the properties of that waiter can be modified without any accidental impact on the
generation of new waiters from the ``.waiter_model``, e.g.
.. code-block:: python
waiters.get_waiter("JobExists").config.delay # -> 5
waiter = waiters.get_waiter("JobExists") # a new waiter object
waiter.config.delay = 10
waiters.get_waiter("JobExists").config.delay # -> 5 as defined by waiter_model
To use a specific waiter, update the config and call the `wait()` method for jobId, e.g.
.. code-block:: python
import random
waiter = waiters.get_waiter("JobExists") # a new waiter object
waiter.config.delay = random.uniform(1, 10) # seconds
waiter.config.max_attempts = 10
waiter.wait(jobs=[jobId])
:param waiter_name: The name of the waiter. The name should match
the name (including the casing) of the key name in the waiter
model file (typically this is CamelCasing); see ``.list_waiters``.
:param parameters: unused, just here to match the method signature in base_aws
:param config_overrides: unused, just here to match the method signature in base_aws
:param deferrable: unused, just here to match the method signature in base_aws
:param client: unused, just here to match the method signature in base_aws
:return: a waiter object for the named AWS Batch service
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/batch_waiters.py
| 146
|
[
"self",
"waiter_name",
"parameters",
"config_overrides",
"deferrable",
"client"
] |
botocore.waiter.Waiter
| true
| 1
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
filter
|
function filter(collection, predicate) {
var func = isArray(collection) ? arrayFilter : baseFilter;
return func(collection, getIteratee(predicate, 3));
}
|
Iterates over elements of `collection`, returning an array of all elements
`predicate` returns truthy for. The predicate is invoked with three
arguments: (value, index|key, collection).
**Note:** Unlike `_.remove`, this method returns a new array.
@static
@memberOf _
@since 0.1.0
@category Collection
@param {Array|Object} collection The collection to iterate over.
@param {Function} [predicate=_.identity] The function invoked per iteration.
@returns {Array} Returns the new filtered array.
@see _.reject
@example
var users = [
{ 'user': 'barney', 'age': 36, 'active': true },
{ 'user': 'fred', 'age': 40, 'active': false }
];
_.filter(users, function(o) { return !o.active; });
// => objects for ['fred']
// The `_.matches` iteratee shorthand.
_.filter(users, { 'age': 36, 'active': true });
// => objects for ['barney']
// The `_.matchesProperty` iteratee shorthand.
_.filter(users, ['active', false]);
// => objects for ['fred']
// The `_.property` iteratee shorthand.
_.filter(users, 'active');
// => objects for ['barney']
// Combining several predicates using `_.overEvery` or `_.overSome`.
_.filter(users, _.overSome([{ 'age': 36 }, ['age', 40]]));
// => objects for ['fred', 'barney']
|
javascript
|
lodash.js
| 9,278
|
[
"collection",
"predicate"
] | false
| 2
| 7.12
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
_gotitem
|
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
if key is not None:
subset = subset[key]
else:
# reached via Apply.agg_dict_like with selection=None, ndim=1
assert subset.ndim == 1
# Try to select from a DataFrame, falling back to a Series
try:
if isinstance(key, list) and self.key not in key and self.key is not None:
key.append(self.key)
groupby = self._groupby[key]
except IndexError:
groupby = self._groupby
selection = self._infer_selection(key, subset)
new_rs = type(self)(
groupby=groupby,
parent=cast(Resampler, self),
selection=selection,
)
return new_rs
|
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
|
python
|
pandas/core/resample.py
| 2,022
|
[
"self",
"key",
"ndim",
"subset"
] | false
| 7
| 6.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
getInt
|
public int getInt(String name) throws JSONException {
Object object = get(name);
Integer result = JSON.toInteger(object);
if (result == null) {
throw JSON.typeMismatch(name, object, "int");
}
return result;
}
|
Returns the value mapped by {@code name} if it exists and is an int or can be
coerced to an int.
@param name the name of the property
@return the value
@throws JSONException if the mapping doesn't exist or cannot be coerced to an int.
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 474
|
[
"name"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
exclusiveBetween
|
public static <T> void exclusiveBetween(final T start, final T end, final Comparable<T> value) {
// TODO when breaking BC, consider returning value
if (value.compareTo(start) <= 0 || value.compareTo(end) >= 0) {
throw new IllegalArgumentException(String.format(DEFAULT_EXCLUSIVE_BETWEEN_EX_MESSAGE, value, start, end));
}
}
|
Validate that the specified argument object fall between the two
exclusive values specified; otherwise, throws an exception.
<pre>Validate.exclusiveBetween(0, 2, 1);</pre>
@param <T> the type of the argument object.
@param start the exclusive start value, not null.
@param end the exclusive end value, not null.
@param value the object to validate, not null.
@throws IllegalArgumentException if the value falls outside the boundaries.
@see #exclusiveBetween(Object, Object, Comparable, String, Object...)
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 176
|
[
"start",
"end",
"value"
] |
void
| true
| 3
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
sum
|
def sum(
self,
numeric_only: bool = False,
engine: Literal["cython", "numba"] | None = None,
engine_kwargs: dict[str, bool] | None = None,
):
"""
Calculate the expanding sum.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}``
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.sum : Aggregating sum for Series.
DataFrame.sum : Aggregating sum for DataFrame.
Notes
-----
See :ref:`window.numba_engine` and :ref:`enhancingperf.numba` for extended
documentation and performance considerations for the Numba engine.
Examples
--------
>>> ser = pd.Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
>>> ser.expanding().sum()
a 1.0
b 3.0
c 6.0
d 10.0
dtype: float64
"""
return super().sum(
numeric_only=numeric_only,
engine=engine,
engine_kwargs=engine_kwargs,
)
|
Calculate the expanding sum.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}``
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.sum : Aggregating sum for Series.
DataFrame.sum : Aggregating sum for DataFrame.
Notes
-----
See :ref:`window.numba_engine` and :ref:`enhancingperf.numba` for extended
documentation and performance considerations for the Numba engine.
Examples
--------
>>> ser = pd.Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
>>> ser.expanding().sum()
a 1.0
b 3.0
c 6.0
d 10.0
dtype: float64
|
python
|
pandas/core/window/expanding.py
| 422
|
[
"self",
"numeric_only",
"engine",
"engine_kwargs"
] | true
| 1
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
invalidate
|
default boolean invalidate() {
clear();
return false;
}
|
Invalidate the cache through removing all mappings, expecting all
entries to be immediately invisible for subsequent lookups.
@return {@code true} if the cache was known to have mappings before,
{@code false} if it did not (or if prior presence of entries could
not be determined)
@since 5.2
@see #clear()
|
java
|
spring-context/src/main/java/org/springframework/cache/Cache.java
| 282
|
[] | true
| 1
| 6.8
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
resolveValue
|
private Object resolveValue(RegisteredBean registeredBean) {
ConfigurableListableBeanFactory factory = registeredBean.getBeanFactory();
Object resource;
Set<String> autowiredBeanNames;
DependencyDescriptor descriptor = createDependencyDescriptor(registeredBean);
if (this.defaultName && !factory.containsBean(this.name)) {
autowiredBeanNames = new LinkedHashSet<>();
resource = factory.resolveDependency(descriptor, registeredBean.getBeanName(), autowiredBeanNames, null);
if (resource == null) {
throw new NoSuchBeanDefinitionException(descriptor.getDependencyType(), "No resolvable resource object");
}
}
else {
resource = factory.resolveBeanByName(this.name, descriptor);
autowiredBeanNames = Collections.singleton(this.name);
}
for (String autowiredBeanName : autowiredBeanNames) {
if (factory.containsBean(autowiredBeanName)) {
factory.registerDependentBean(autowiredBeanName, registeredBean.getBeanName());
}
}
return resource;
}
|
Resolve the value to inject for this instance.
@param registeredBean the bean registration
@return the value to inject
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ResourceElementResolver.java
| 177
|
[
"registeredBean"
] |
Object
| true
| 5
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
resolveInnerBeanValue
|
private @Nullable Object resolveInnerBeanValue(Object argName, String innerBeanName, RootBeanDefinition mbd) {
try {
// Check given bean name whether it is unique. If not already unique,
// add counter - increasing the counter until the name is unique.
String actualInnerBeanName = innerBeanName;
if (mbd.isSingleton()) {
actualInnerBeanName = adaptInnerBeanName(innerBeanName);
}
this.beanFactory.registerContainedBean(actualInnerBeanName, this.beanName);
// Guarantee initialization of beans that the inner bean depends on.
String[] dependsOn = mbd.getDependsOn();
if (dependsOn != null) {
for (String dependsOnBean : dependsOn) {
this.beanFactory.registerDependentBean(dependsOnBean, actualInnerBeanName);
this.beanFactory.getBean(dependsOnBean);
}
}
// Actually create the inner bean instance now...
Object innerBean = this.beanFactory.createBean(actualInnerBeanName, mbd, null);
if (innerBean instanceof FactoryBean<?> factoryBean) {
boolean synthetic = mbd.isSynthetic();
innerBean = this.beanFactory.getObjectFromFactoryBean(
factoryBean, null, actualInnerBeanName, !synthetic);
}
if (innerBean instanceof NullBean) {
innerBean = null;
}
return innerBean;
}
catch (BeansException ex) {
throw new BeanCreationException(
this.beanDefinition.getResourceDescription(), this.beanName,
"Cannot create inner bean '" + innerBeanName + "' " +
(mbd.getBeanClassName() != null ? "of type [" + mbd.getBeanClassName() + "] " : "") +
"while setting " + argName, ex);
}
}
|
Resolve an inner bean definition.
@param argName the name of the argument that the inner bean is defined for
@param innerBeanName the name of the inner bean
@param mbd the merged bean definition for the inner bean
@return the resolved inner bean instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/BeanDefinitionValueResolver.java
| 388
|
[
"argName",
"innerBeanName",
"mbd"
] |
Object
| true
| 7
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
appendExportsOfHoistedDeclaration
|
function appendExportsOfHoistedDeclaration(statements: Statement[] | undefined, decl: ClassDeclaration | FunctionDeclaration): Statement[] | undefined {
if (moduleInfo.exportEquals) {
return statements;
}
let excludeName: string | undefined;
if (hasSyntacticModifier(decl, ModifierFlags.Export)) {
const exportName = hasSyntacticModifier(decl, ModifierFlags.Default) ? factory.createStringLiteral("default") : decl.name!;
statements = appendExportStatement(statements, exportName, factory.getLocalName(decl));
excludeName = getTextOfIdentifierOrLiteral(exportName);
}
if (decl.name) {
statements = appendExportsOfDeclaration(statements, decl, excludeName);
}
return statements;
}
|
Appends the exports of a ClassDeclaration or FunctionDeclaration to a statement list,
returning the statement list.
@param statements A statement list to which the down-level export statements are to be
appended. If `statements` is `undefined`, a new array is allocated if statements are
appended.
@param decl The declaration whose exports are to be recorded.
|
typescript
|
src/compiler/transformers/module/system.ts
| 1,132
|
[
"statements",
"decl"
] | true
| 5
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
maybeCompleteWithPreviousException
|
private boolean maybeCompleteWithPreviousException(CompletableFuture<Void> result) {
Throwable cachedException = cachedUpdatePositionsException.getAndSet(null);
if (cachedException != null) {
result.completeExceptionally(cachedException);
return true;
}
return false;
}
|
Update fetch positions for assigned partitions that do not have a position. This will:
<ul>
<li>check if all assigned partitions already have fetch positions and return right away if that's the case</li>
<li>trigger an async request to validate positions (detect log truncation)</li>
<li>fetch committed offsets if enabled, and use the response to update the positions</li>
<li>fetch partition offsets for partitions that may still require a position, and use the response to
update the positions</li>
</ul>
@param deadlineMs Time in milliseconds when the triggering application event expires. Any error received after
this will be saved, and used to complete the result exceptionally on the next call to this
function.
@return Future that will complete with a boolean indicating if all assigned partitions have positions (based
on {@link SubscriptionState#hasAllFetchPositions()}). It will complete immediately, with true, if all positions
are already available. If some positions are missing, the future will complete once the offsets are retrieved and positions are updated.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java
| 266
|
[
"result"
] | true
| 2
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
isrealobj
|
def isrealobj(x):
"""
Return True if x is a not complex type or an array of complex numbers.
The type of the input is checked, not the value. So even if the input
has an imaginary part equal to zero, `isrealobj` evaluates to False
if the data type is complex.
Parameters
----------
x : any
The input can be of any type and shape.
Returns
-------
y : bool
The return value, False if `x` is of a complex type.
See Also
--------
iscomplexobj, isreal
Notes
-----
The function is only meant for arrays with numerical values but it
accepts all other objects. Since it assumes array input, the return
value of other objects may be True.
>>> np.isrealobj('A string')
True
>>> np.isrealobj(False)
True
>>> np.isrealobj(None)
True
Examples
--------
>>> import numpy as np
>>> np.isrealobj(1)
True
>>> np.isrealobj(1+0j)
False
>>> np.isrealobj([3, 1+0j, True])
False
"""
return not iscomplexobj(x)
|
Return True if x is a not complex type or an array of complex numbers.
The type of the input is checked, not the value. So even if the input
has an imaginary part equal to zero, `isrealobj` evaluates to False
if the data type is complex.
Parameters
----------
x : any
The input can be of any type and shape.
Returns
-------
y : bool
The return value, False if `x` is of a complex type.
See Also
--------
iscomplexobj, isreal
Notes
-----
The function is only meant for arrays with numerical values but it
accepts all other objects. Since it assumes array input, the return
value of other objects may be True.
>>> np.isrealobj('A string')
True
>>> np.isrealobj(False)
True
>>> np.isrealobj(None)
True
Examples
--------
>>> import numpy as np
>>> np.isrealobj(1)
True
>>> np.isrealobj(1+0j)
False
>>> np.isrealobj([3, 1+0j, True])
False
|
python
|
numpy/lib/_type_check_impl.py
| 313
|
[
"x"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
addAll
|
@CanIgnoreReturnValue
public Builder<E> addAll(Iterable<? extends E> elements) {
for (E element : elements) {
add(element);
}
return this;
}
|
Adds each element of {@code elements} to the {@code ImmutableCollection} being built.
<p>Note that each builder class overrides this method in order to covariantly return its own
type.
@param elements the elements to add
@return this {@code Builder} instance
@throws NullPointerException if {@code elements} is null or contains a null element
|
java
|
android/guava/src/com/google/common/collect/ImmutableCollection.java
| 459
|
[
"elements"
] | true
| 1
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
lastIndexOf
|
public static int lastIndexOf(final CharSequence seq, final int searchChar) {
if (isEmpty(seq)) {
return INDEX_NOT_FOUND;
}
return CharSequenceUtils.lastIndexOf(seq, searchChar, seq.length());
}
|
Returns the index within {@code seq} of the last occurrence of the specified character. For values of {@code searchChar} in the range from 0 to 0xFFFF
(inclusive), the index (in Unicode code units) returned is the largest value <em>k</em> such that:
<pre>
this.charAt(<em>k</em>) == searchChar
</pre>
<p>
is true. For other values of {@code searchChar}, it is the largest value <em>k</em> such that:
</p>
<pre>
this.codePointAt(<em>k</em>) == searchChar
</pre>
<p>
is true. In either case, if no such character occurs in this string, then {@code -1} is returned. Furthermore, a {@code null} or empty ("")
{@link CharSequence} will return {@code -1}. The {@code seq} {@link CharSequence} object is searched backwards starting at the last character.
</p>
<pre>
StringUtils.lastIndexOf(null, *) = -1
StringUtils.lastIndexOf("", *) = -1
StringUtils.lastIndexOf("aabaabaa", 'a') = 7
StringUtils.lastIndexOf("aabaabaa", 'b') = 5
</pre>
@param seq the {@link CharSequence} to check, may be null.
@param searchChar the character to find.
@return the last index of the search character, -1 if no match or {@code null} string input.
@since 2.0
@since 3.0 Changed signature from lastIndexOf(String, int) to lastIndexOf(CharSequence, int)
@since 3.6 Updated {@link CharSequenceUtils} call to behave more like {@link String}
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 4,833
|
[
"seq",
"searchChar"
] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
setupChildProcessIpcChannel
|
function setupChildProcessIpcChannel() {
if (process.env.NODE_CHANNEL_FD) {
const fd = NumberParseInt(process.env.NODE_CHANNEL_FD, 10);
assert(fd >= 0);
// Make sure it's not accidentally inherited by child processes.
delete process.env.NODE_CHANNEL_FD;
const serializationMode =
process.env.NODE_CHANNEL_SERIALIZATION_MODE || 'json';
delete process.env.NODE_CHANNEL_SERIALIZATION_MODE;
require('child_process')._forkChild(fd, serializationMode);
assert(process.send);
}
}
|
Patch the process object with legacy properties and normalizations.
Replace `process.argv[0]` with `process.execPath`, preserving the original `argv[0]` value as `process.argv0`.
Replace `process.argv[1]` with the resolved absolute file path of the entry point, if found.
@param {boolean} expandArgv1 - Whether to replace `process.argv[1]` with the resolved absolute file path of
the main entry point.
@returns {string}
|
javascript
|
lib/internal/process/pre_execution.js
| 578
|
[] | false
| 3
| 6.96
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
calculateNextTransformValue
|
function calculateNextTransformValue(
srcValue: TransformValue,
targetValue: TransformValue,
changeRate: number,
): TransformValue {
const nextValue: TransformValue = {
type: 'transform',
values: new Map(),
};
for (const [func, numData] of targetValue.values) {
const srcNumData = srcValue.values.get(func)!;
const newNumData: [number, string][] = [];
for (let i = 0; i < numData.length; i++) {
const target = numData[i];
const src = srcNumData[i];
const numDelta = calculateValueDelta(src[0], target[0], changeRate);
// We should check both source and target for the unit
// since we might have zero-based value without a unit
// (e.g. rotate(0) <-> rotate(180deg))
const unit = target[1] || src[1];
newNumData.push([src[0] + numDelta, unit]);
}
nextValue.values.set(func, newNumData);
}
return nextValue;
}
|
Calculate the next `CssPropertyValue` based on the source and a target one.
@param srcValue The source value
@param targetValue The target values (it's either the final or the initial value)
@param changeRate The change rate relative to the target (i.e. 1 = target value; 0 = source value)
@returns The newly generated value
|
typescript
|
adev/src/app/features/home/animation/calculations/calc-css-value.ts
| 67
|
[
"srcValue",
"targetValue",
"changeRate"
] | true
| 3
| 8.08
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
forms_of_context
|
def forms_of_context() -> Sequence[str]:
"""Return a sequence of context form names provided by this context class.
Returns:
A sequence of strings representing the available context forms.
"""
|
Return a sequence of context form names provided by this context class.
Returns:
A sequence of strings representing the available context forms.
|
python
|
torch/_inductor/runtime/caching/context.py
| 28
|
[] |
Sequence[str]
| true
| 1
| 6.56
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
buildOrThrow
|
@Override
public ImmutableBiMap<K, V> buildOrThrow() {
if (size == 0) {
return of();
}
if (valueComparator != null) {
if (entriesUsed) {
alternatingKeysAndValues = Arrays.copyOf(alternatingKeysAndValues, 2 * size);
}
sortEntries(alternatingKeysAndValues, size, valueComparator);
}
entriesUsed = true;
return new RegularImmutableBiMap<K, V>(alternatingKeysAndValues, size);
}
|
Returns a newly-created immutable bimap, or throws an exception if any key or value was added
more than once. The iteration order of the returned bimap is the order in which entries were
inserted into the builder, unless {@link #orderEntriesByValue} was called, in which case
entries are sorted by value.
@throws IllegalArgumentException if duplicate keys or values were added
@since 31.0
|
java
|
android/guava/src/com/google/common/collect/ImmutableBiMap.java
| 473
|
[] | true
| 4
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
toSearchResult
|
function toSearchResult<T>(value: T | undefined): SearchResult<T> {
return value !== undefined ? { value } : undefined;
}
|
Wraps value to SearchResult.
@returns undefined if value is undefined or { value } otherwise
|
typescript
|
src/compiler/moduleNameResolver.ts
| 3,409
|
[
"value"
] | true
| 2
| 6.16
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
lstrip
|
def lstrip(a, chars=None):
"""
For each element in `a`, return a copy with the leading characters
removed.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
chars : scalar with the same dtype as ``a``, optional
The ``chars`` argument is a string specifying the set of
characters to be removed. If ``None``, the ``chars``
argument defaults to removing whitespace. The ``chars`` argument
is not a prefix or suffix; rather, all combinations of its
values are stripped.
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input types
See Also
--------
str.lstrip
Examples
--------
>>> import numpy as np
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
# The 'a' variable is unstripped from c[1] because of leading whitespace.
>>> np.strings.lstrip(c, 'a')
array(['AaAaA', ' aA ', 'bBABba'], dtype='<U7')
>>> np.strings.lstrip(c, 'A') # leaves c unchanged
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
>>> (np.strings.lstrip(c, ' ') == np.strings.lstrip(c, '')).all()
np.False_
>>> (np.strings.lstrip(c, ' ') == np.strings.lstrip(c)).all()
np.True_
"""
if chars is None:
return _lstrip_whitespace(a)
return _lstrip_chars(a, chars)
|
For each element in `a`, return a copy with the leading characters
removed.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
chars : scalar with the same dtype as ``a``, optional
The ``chars`` argument is a string specifying the set of
characters to be removed. If ``None``, the ``chars``
argument defaults to removing whitespace. The ``chars`` argument
is not a prefix or suffix; rather, all combinations of its
values are stripped.
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input types
See Also
--------
str.lstrip
Examples
--------
>>> import numpy as np
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
# The 'a' variable is unstripped from c[1] because of leading whitespace.
>>> np.strings.lstrip(c, 'a')
array(['AaAaA', ' aA ', 'bBABba'], dtype='<U7')
>>> np.strings.lstrip(c, 'A') # leaves c unchanged
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
>>> (np.strings.lstrip(c, ' ') == np.strings.lstrip(c, '')).all()
np.False_
>>> (np.strings.lstrip(c, ' ') == np.strings.lstrip(c)).all()
np.True_
|
python
|
numpy/_core/strings.py
| 943
|
[
"a",
"chars"
] | false
| 2
| 7.84
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
getRemovalListener
|
@SuppressWarnings("unchecked")
<K1 extends K, V1 extends V> RemovalListener<K1, V1> getRemovalListener() {
return (RemovalListener<K1, V1>)
MoreObjects.firstNonNull(removalListener, NullListener.INSTANCE);
}
|
Specifies a listener instance that caches should notify each time an entry is removed for any
{@linkplain RemovalCause reason}. Each cache created by this builder will invoke this listener
as part of the routine maintenance described in the class documentation above.
<p><b>Warning:</b> after invoking this method, do not continue to use <i>this</i> cache builder
reference; instead use the reference this method <i>returns</i>. At runtime, these point to the
same instance, but only the returned reference has the correct generic type information to
ensure type safety. For best results, use the standard method-chaining idiom illustrated in the
class documentation above, configuring a builder and building your cache in a single statement.
Failure to heed this advice can result in a {@link ClassCastException} being thrown by a cache
operation at some <i>undefined</i> point in the future.
<p><b>Warning:</b> any exception thrown by {@code listener} will <i>not</i> be propagated to
the {@code Cache} user, only logged via a {@link Logger}.
@return the cache builder reference that should be used instead of {@code this} for any
remaining configuration and cache building
@throws IllegalStateException if a removal listener was already set
|
java
|
android/guava/src/com/google/common/cache/CacheBuilder.java
| 995
|
[] | true
| 1
| 6.32
|
google/guava
| 51,352
|
javadoc
| false
|
|
getTypeForFactoryMethod
|
protected @Nullable Class<?> getTypeForFactoryMethod(String beanName, RootBeanDefinition mbd, Class<?>... typesToMatch) {
ResolvableType cachedReturnType = mbd.factoryMethodReturnType;
if (cachedReturnType != null) {
return cachedReturnType.resolve();
}
Class<?> commonType = null;
Method uniqueCandidate = mbd.factoryMethodToIntrospect;
if (uniqueCandidate == null) {
Class<?> factoryClass;
boolean isStatic = true;
String factoryBeanName = mbd.getFactoryBeanName();
if (factoryBeanName != null) {
if (factoryBeanName.equals(beanName)) {
throw new BeanDefinitionStoreException(mbd.getResourceDescription(), beanName,
"factory-bean reference points back to the same bean definition");
}
// Check declared factory method return type on factory class.
factoryClass = getType(factoryBeanName);
isStatic = false;
}
else {
// Check declared factory method return type on bean class.
factoryClass = resolveBeanClass(mbd, beanName, typesToMatch);
}
if (factoryClass == null) {
return null;
}
factoryClass = ClassUtils.getUserClass(factoryClass);
// If all factory methods have the same return type, return that type.
// Can't clearly figure out exact method due to type converting / autowiring!
int minNrOfArgs =
(mbd.hasConstructorArgumentValues() ? mbd.getConstructorArgumentValues().getArgumentCount() : 0);
Method[] candidates = this.factoryMethodCandidateCache.computeIfAbsent(factoryClass,
clazz -> ReflectionUtils.getUniqueDeclaredMethods(clazz, ReflectionUtils.USER_DECLARED_METHODS));
for (Method candidate : candidates) {
if (Modifier.isStatic(candidate.getModifiers()) == isStatic && mbd.isFactoryMethod(candidate) &&
candidate.getParameterCount() >= minNrOfArgs) {
// Declared type variables to inspect?
if (candidate.getTypeParameters().length > 0) {
try {
// Fully resolve parameter names and argument values.
ConstructorArgumentValues cav = mbd.getConstructorArgumentValues();
Class<?>[] paramTypes = candidate.getParameterTypes();
@Nullable String[] paramNames = null;
if (cav.containsNamedArgument()) {
ParameterNameDiscoverer pnd = getParameterNameDiscoverer();
if (pnd != null) {
paramNames = pnd.getParameterNames(candidate);
}
}
Set<ConstructorArgumentValues.ValueHolder> usedValueHolders = CollectionUtils.newHashSet(paramTypes.length);
@Nullable Object[] args = new Object[paramTypes.length];
for (int i = 0; i < args.length; i++) {
ConstructorArgumentValues.ValueHolder valueHolder = cav.getArgumentValue(
i, paramTypes[i], (paramNames != null ? paramNames[i] : null), usedValueHolders);
if (valueHolder == null) {
valueHolder = cav.getGenericArgumentValue(null, null, usedValueHolders);
}
if (valueHolder != null) {
args[i] = valueHolder.getValue();
usedValueHolders.add(valueHolder);
}
}
Class<?> returnType = AutowireUtils.resolveReturnTypeForFactoryMethod(
candidate, args, getBeanClassLoader());
uniqueCandidate = (commonType == null && returnType == candidate.getReturnType() ?
candidate : null);
commonType = ClassUtils.determineCommonAncestor(returnType, commonType);
if (commonType == null) {
// Ambiguous return types found: return null to indicate "not determinable".
return null;
}
}
catch (Throwable ex) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to resolve generic return type for factory method: " + ex);
}
}
}
else {
uniqueCandidate = (commonType == null ? candidate : null);
commonType = ClassUtils.determineCommonAncestor(candidate.getReturnType(), commonType);
if (commonType == null) {
// Ambiguous return types found: return null to indicate "not determinable".
return null;
}
}
}
}
mbd.factoryMethodToIntrospect = uniqueCandidate;
if (commonType == null) {
return null;
}
}
// Common return type found: all factory methods return same type. For a non-parameterized
// unique candidate, cache the full type declaration context of the target factory method.
try {
cachedReturnType = (uniqueCandidate != null ?
ResolvableType.forMethodReturnType(uniqueCandidate) : ResolvableType.forClass(commonType));
mbd.factoryMethodReturnType = cachedReturnType;
return cachedReturnType.resolve();
}
catch (LinkageError err) {
// For example, a NoClassDefFoundError for a generic method return type
if (logger.isDebugEnabled()) {
logger.debug("Failed to resolve type for factory method of bean '" + beanName + "': " +
(uniqueCandidate != null ? uniqueCandidate : commonType), err);
}
return null;
}
}
|
Determine the target type for the given bean definition which is based on
a factory method. Only called if there is no singleton instance registered
for the target bean already.
<p>This implementation determines the type matching {@link #createBean}'s
different creation strategies. As far as possible, we'll perform static
type checking to avoid creation of the target bean.
@param beanName the name of the bean (for error handling purposes)
@param mbd the merged bean definition for the bean
@param typesToMatch the types to match in case of internal type matching purposes
(also signals that the returned {@code Class} will never be exposed to application code)
@return the type for the bean if determinable, or {@code null} otherwise
@see #createBean
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractAutowireCapableBeanFactory.java
| 712
|
[
"beanName",
"mbd"
] | true
| 29
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
complementOf
|
@J2ktIncompatible
@GwtIncompatible // EnumSet.complementOf
public static <E extends Enum<E>> EnumSet<E> complementOf(
Collection<E> collection, Class<E> type) {
checkNotNull(collection);
return (collection instanceof EnumSet)
? EnumSet.complementOf((EnumSet<E>) collection)
: makeComplementByHand(collection, type);
}
|
Creates an {@code EnumSet} consisting of all enum values that are not in the specified
collection. This is equivalent to {@link EnumSet#complementOf}, but can act on any input
collection, as long as the elements are of enum type.
@param collection the collection whose complement should be stored in the {@code EnumSet}
@param type the type of the elements in the set
@return a new, modifiable {@code EnumSet} initially containing all the values of the enum not
present in the given collection
|
java
|
android/guava/src/com/google/common/collect/Sets.java
| 527
|
[
"collection",
"type"
] | true
| 2
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
|
declareLocal
|
function declareLocal(name?: string): Identifier {
const temp = name
? factory.createUniqueName(name)
: factory.createTempVariable(/*recordTempVariable*/ undefined);
hoistVariableDeclaration(temp);
return temp;
}
|
Visits an ElementAccessExpression that contains a YieldExpression.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/generators.ts
| 2,098
|
[
"name?"
] | true
| 2
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getAsText
|
@Override
public String getAsText() {
Date value = (Date) getValue();
return (value != null ? this.dateFormat.format(value) : "");
}
|
Format the Date as String, using the specified DateFormat.
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/CustomDateEditor.java
| 121
|
[] |
String
| true
| 2
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
nextInLineFetch
|
CompletedFetch nextInLineFetch() {
try {
lock.lock();
return nextInLineFetch;
} finally {
lock.unlock();
}
}
|
Return whether we have any completed fetches pending return to the user. This method is thread-safe. Has
visibility for testing.
@return {@code true} if there are completed fetches that match the {@link Predicate}, {@code false} otherwise
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java
| 116
|
[] |
CompletedFetch
| true
| 1
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
getFlattenedMap
|
protected final Map<String, Object> getFlattenedMap(Map<String, Object> source) {
Map<String, Object> result = new LinkedHashMap<>();
buildFlattenedMap(result, source, null);
return result;
}
|
Return a flattened version of the given map, recursively following any nested Map
or Collection values. Entries from the resulting map retain the same order as the
source. When called with the Map from a {@link MatchCallback} the result will
contain the same values as the {@link MatchCallback} Properties.
@param source the source map
@return a flattened map
@since 4.1.3
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/YamlProcessor.java
| 306
|
[
"source"
] | true
| 1
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
enhance
|
public Class<?> enhance(Class<?> configClass, @Nullable ClassLoader classLoader) {
if (EnhancedConfiguration.class.isAssignableFrom(configClass)) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("Ignoring request to enhance %s as it has " +
"already been enhanced. This usually indicates that more than one " +
"ConfigurationClassPostProcessor has been registered (for example, via " +
"<context:annotation-config>). This is harmless, but you may " +
"want check your configuration and remove one CCPP if possible",
configClass.getName()));
}
return configClass;
}
try {
// Use original ClassLoader if config class not locally loaded in overriding class loader
boolean classLoaderMismatch = (classLoader != null && classLoader != configClass.getClassLoader());
if (classLoaderMismatch && classLoader instanceof SmartClassLoader smartClassLoader) {
classLoader = smartClassLoader.getOriginalClassLoader();
classLoaderMismatch = (classLoader != configClass.getClassLoader());
}
// Use original ClassLoader if config class relies on package visibility
if (classLoaderMismatch && reliesOnPackageVisibility(configClass)) {
classLoader = configClass.getClassLoader();
classLoaderMismatch = false;
}
Enhancer enhancer = newEnhancer(configClass, classLoader);
Class<?> enhancedClass = createClass(enhancer, classLoaderMismatch);
if (logger.isTraceEnabled()) {
logger.trace(String.format("Successfully enhanced %s; enhanced class name is: %s",
configClass.getName(), enhancedClass.getName()));
}
return enhancedClass;
}
catch (CodeGenerationException ex) {
throw new BeanDefinitionStoreException("Could not enhance configuration class [" + configClass.getName() +
"]. Consider declaring @Configuration(proxyBeanMethods=false) without inter-bean references " +
"between @Bean methods on the configuration class, avoiding the need for CGLIB enhancement.", ex);
}
}
|
Loads the specified class and generates a CGLIB subclass of it equipped with
container-aware callbacks capable of respecting scoping and other bean semantics.
@return the enhanced subclass
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ConfigurationClassEnhancer.java
| 102
|
[
"configClass",
"classLoader"
] | true
| 10
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
populateSearchSymbolSet
|
function populateSearchSymbolSet(symbol: Symbol, location: Node, checker: TypeChecker, isForRename: boolean, providePrefixAndSuffixText: boolean, implementations: boolean): Symbol[] {
const result: Symbol[] = [];
forEachRelatedSymbol<void>(
symbol,
location,
checker,
isForRename,
!(isForRename && providePrefixAndSuffixText),
(sym, root, base) => {
// static method/property and instance method/property might have the same name. Only include static or only include instance.
if (base) {
if (isStaticSymbol(symbol) !== isStaticSymbol(base)) {
base = undefined;
}
}
result.push(base || root || sym);
}, // when try to find implementation, implementations is true, and not allowed to find base class
/*allowBaseTypes*/ () => !implementations,
);
return result;
}
|
Determines if the parent symbol occurs somewhere in the child's ancestry. If the parent symbol
is an interface, determines if some ancestor of the child symbol extends or inherits from it.
Also takes in a cache of previous results which makes this slightly more efficient and is
necessary to avoid potential loops like so:
class A extends B { }
class B extends A { }
We traverse the AST rather than using the type checker because users are typically only interested
in explicit implementations of an interface/class when calling "Go to Implementation". Sibling
implementations of types that share a common ancestor with the type whose implementation we are
searching for need to be filtered out of the results. The type checker doesn't let us make the
distinction between structurally compatible implementations and explicit implementations, so we
must use the AST.
@param symbol A class or interface Symbol
@param parent Another class or interface Symbol
@param cachedResults A map of symbol id pairs (i.e. "child,parent") to booleans indicating previous results
|
typescript
|
src/services/findAllReferences.ts
| 2,511
|
[
"symbol",
"location",
"checker",
"isForRename",
"providePrefixAndSuffixText",
"implementations"
] | true
| 6
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
wait_for_availability
|
def wait_for_availability(
self,
replication_group_id: str,
initial_sleep_time: float | None = None,
exponential_back_off_factor: float | None = None,
max_retries: int | None = None,
) -> bool:
"""
Check if replication group is available or not by performing a describe over it.
:param replication_group_id: ID of replication group to check for availability
:param initial_sleep_time: Initial sleep time in seconds
If this is not supplied then this is defaulted to class level value
:param exponential_back_off_factor: Multiplication factor for deciding next sleep time
If this is not supplied then this is defaulted to class level value
:param max_retries: Max retries for checking availability of replication group
If this is not supplied then this is defaulted to class level value
:return: True if replication is available else False
"""
sleep_time = initial_sleep_time or self.initial_poke_interval
exponential_back_off_factor = exponential_back_off_factor or self.exponential_back_off_factor
max_retries = max_retries or self.max_retries
num_tries = 0
status = "not-found"
stop_poking = False
while not stop_poking and num_tries <= max_retries:
status = self.get_replication_group_status(replication_group_id=replication_group_id)
stop_poking = status in self.TERMINAL_STATES
self.log.info(
"Current status of replication group with ID %s is %s", replication_group_id, status
)
if not stop_poking:
num_tries += 1
# No point in sleeping if all tries have exhausted
if num_tries > max_retries:
break
self.log.info("Poke retry %s. Sleep time %s seconds. Sleeping...", num_tries, sleep_time)
time.sleep(sleep_time)
sleep_time *= exponential_back_off_factor
if status != "available":
self.log.warning('Replication group is not available. Current status is "%s"', status)
return False
return True
|
Check if replication group is available or not by performing a describe over it.
:param replication_group_id: ID of replication group to check for availability
:param initial_sleep_time: Initial sleep time in seconds
If this is not supplied then this is defaulted to class level value
:param exponential_back_off_factor: Multiplication factor for deciding next sleep time
If this is not supplied then this is defaulted to class level value
:param max_retries: Max retries for checking availability of replication group
If this is not supplied then this is defaulted to class level value
:return: True if replication is available else False
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/elasticache_replication_group.py
| 120
|
[
"self",
"replication_group_id",
"initial_sleep_time",
"exponential_back_off_factor",
"max_retries"
] |
bool
| true
| 9
| 7.76
|
apache/airflow
| 43,597
|
sphinx
| false
|
nextBatch
|
@Override
public FileChannelRecordBatch nextBatch() throws IOException {
FileChannel channel = fileRecords.channel();
if (position >= end - HEADER_SIZE_UP_TO_MAGIC)
return null;
logHeaderBuffer.rewind();
Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header");
logHeaderBuffer.rewind();
long offset = logHeaderBuffer.getLong(OFFSET_OFFSET);
int size = logHeaderBuffer.getInt(SIZE_OFFSET);
// V0 has the smallest overhead, stricter checking is done later
if (size < LegacyRecord.RECORD_OVERHEAD_V0)
throw new CorruptRecordException(String.format("Found record size %d smaller than minimum record " +
"overhead (%d) in file %s.", size, LegacyRecord.RECORD_OVERHEAD_V0, fileRecords.file()));
if (position > end - LOG_OVERHEAD - size)
return null;
byte magic = logHeaderBuffer.get(MAGIC_OFFSET);
final FileChannelRecordBatch batch;
if (magic < RecordBatch.MAGIC_VALUE_V2)
batch = new LegacyFileChannelRecordBatch(offset, magic, fileRecords, position, size);
else
batch = new DefaultFileChannelRecordBatch(offset, magic, fileRecords, position, size);
position += batch.sizeInBytes();
return batch;
}
|
Create a new log input stream over the FileChannel
@param records Underlying FileRecords instance
@param start Position in the file channel to start from
@param end Position in the file channel not to read past
|
java
|
clients/src/main/java/org/apache/kafka/common/record/FileLogInputStream.java
| 62
|
[] |
FileChannelRecordBatch
| true
| 5
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
collectImports
|
private void collectImports(SourceClass sourceClass, Set<SourceClass> imports, Set<SourceClass> visited)
throws IOException {
if (visited.add(sourceClass)) {
for (SourceClass ifc : sourceClass.getInterfaces()) {
collectImports(ifc, imports, visited);
}
for (SourceClass annotation : sourceClass.getAnnotations()) {
String annName = annotation.getMetadata().getClassName();
if (!annName.equals(Import.class.getName())) {
collectImports(annotation, imports, visited);
}
}
imports.addAll(sourceClass.getAnnotationAttributes(Import.class.getName(), "value"));
}
}
|
Recursively collect all declared {@code @Import} values. Unlike most
meta-annotations it is valid to have several {@code @Import}s declared with
different values; the usual process of returning values from the first
meta-annotation on a class is not sufficient.
<p>For example, it is common for a {@code @Configuration} class to declare direct
{@code @Import}s in addition to meta-imports originating from an {@code @Enable}
annotation.
<p>As of Spring Framework 7.0, {@code @Import} annotations declared on interfaces
implemented by the configuration class are also considered. This allows imports to
be triggered indirectly via marker interfaces or shared base interfaces.
@param sourceClass the class to search
@param imports the imports collected so far
@param visited used to track visited classes and interfaces to prevent infinite
recursion
@throws IOException if there is any problem reading metadata from the named class
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ConfigurationClassParser.java
| 562
|
[
"sourceClass",
"imports",
"visited"
] |
void
| true
| 3
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
synchronizedNavigableMap
|
@GwtIncompatible // NavigableMap
@J2ktIncompatible // Synchronized
public static <K extends @Nullable Object, V extends @Nullable Object>
NavigableMap<K, V> synchronizedNavigableMap(NavigableMap<K, V> navigableMap) {
return Synchronized.navigableMap(navigableMap);
}
|
Returns a synchronized (thread-safe) navigable map backed by the specified navigable map. In
order to guarantee serial access, it is critical that <b>all</b> access to the backing
navigable map is accomplished through the returned navigable map (or its views).
<p>It is imperative that the user manually synchronize on the returned navigable map when
iterating over any of its collection views, or the collections views of any of its {@code
descendingMap}, {@code subMap}, {@code headMap} or {@code tailMap} views.
{@snippet :
NavigableMap<K, V> map = synchronizedNavigableMap(new TreeMap<K, V>());
// Needn't be in synchronized block
NavigableSet<K> set = map.navigableKeySet();
synchronized (map) { // Synchronizing on map, not set!
Iterator<K> it = set.iterator(); // Must be in synchronized block
while (it.hasNext()) {
foo(it.next());
}
}
}
<p>or:
{@snippet :
NavigableMap<K, V> map = synchronizedNavigableMap(new TreeMap<K, V>());
NavigableMap<K, V> map2 = map.subMap(foo, false, bar, true);
// Needn't be in synchronized block
NavigableSet<K> set2 = map2.descendingKeySet();
synchronized (map) { // Synchronizing on map, not map2 or set2!
Iterator<K> it = set2.iterator(); // Must be in synchronized block
while (it.hasNext()) {
foo(it.next());
}
}
}
<p>Failure to follow this advice may result in non-deterministic behavior.
<p>The returned navigable map will be serializable if the specified navigable map is
serializable.
@param navigableMap the navigable map to be "wrapped" in a synchronized navigable map.
@return a synchronized view of the specified navigable map.
@since 13.0
|
java
|
android/guava/src/com/google/common/collect/Maps.java
| 3,501
|
[
"navigableMap"
] | true
| 1
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
equals
|
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ZstdCompression that = (ZstdCompression) o;
return level == that.level;
}
|
Size of intermediate buffer which contains uncompressed data.
This size should be <= ZSTD_BLOCKSIZE_MAX
see: https://github.com/facebook/zstd/blob/189653a9c10c9f4224a5413a6d6a69dd01d7c3bd/lib/zstd.h#L854
|
java
|
clients/src/main/java/org/apache/kafka/common/compress/ZstdCompression.java
| 111
|
[
"o"
] | true
| 4
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
forcePut
|
@CanIgnoreReturnValue
@Nullable V forcePut(@ParametricNullness K key, @ParametricNullness V value);
|
An alternate form of {@code put} that silently removes any existing entry with the value {@code
value} before proceeding with the {@link #put} operation. If the bimap previously contained the
provided key-value mapping, this method has no effect.
<p>Note that a successful call to this method could cause the size of the bimap to increase by
one, stay the same, or even decrease by one.
<p><b>Warning:</b> If an existing entry with this value is removed, the key for that entry is
discarded and not returned.
@param key the key with which the specified value is to be associated
@param value the value to be associated with the specified key
@return the value that was previously associated with the key, or {@code null} if there was no
previous entry. (If the bimap contains null values, then {@code forcePut}, like {@code
put}, returns {@code null} both if the key is absent and if it is present with a null
value.)
|
java
|
android/guava/src/com/google/common/collect/BiMap.java
| 78
|
[
"key",
"value"
] |
V
| true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
freshAdvisorChain
|
private List<Advisor> freshAdvisorChain() {
Advisor[] advisors = getAdvisors();
List<Advisor> freshAdvisors = new ArrayList<>(advisors.length);
for (Advisor advisor : advisors) {
if (advisor instanceof PrototypePlaceholderAdvisor ppa) {
if (logger.isDebugEnabled()) {
logger.debug("Refreshing bean named '" + ppa.getBeanName() + "'");
}
// Replace the placeholder with a fresh prototype instance resulting from a getBean lookup
if (this.beanFactory == null) {
throw new IllegalStateException("No BeanFactory available anymore (probably due to " +
"serialization) - cannot resolve prototype advisor '" + ppa.getBeanName() + "'");
}
Object bean = this.beanFactory.getBean(ppa.getBeanName());
Advisor refreshedAdvisor = namedBeanToAdvisor(bean);
freshAdvisors.add(refreshedAdvisor);
}
else {
// Add the shared instance.
freshAdvisors.add(advisor);
}
}
return freshAdvisors;
}
|
Return an independent advisor chain.
We need to do this every time a new prototype instance is returned,
to return distinct instances of prototype Advisors and Advices.
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/ProxyFactoryBean.java
| 458
|
[] | true
| 4
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
getFinalizeReferentMethod
|
private @Nullable Method getFinalizeReferentMethod() {
Class<?> finalizableReferenceClass = finalizableReferenceClassReference.get();
if (finalizableReferenceClass == null) {
/*
* FinalizableReference's class loader was reclaimed. While there's a chance that other
* finalizable references could be enqueued subsequently (at which point the class loader
* would be resurrected by virtue of us having a strong reference to it), we should pretty
* much just shut down and make sure we don't keep it alive any longer than necessary.
*/
return null;
}
try {
return finalizableReferenceClass.getMethod("finalizeReferent");
} catch (NoSuchMethodException e) {
throw new AssertionError(e);
}
}
|
Looks up FinalizableReference.finalizeReferent() method.
|
java
|
android/guava/src/com/google/common/base/internal/Finalizer.java
| 214
|
[] |
Method
| true
| 3
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
parseImportType
|
function parseImportType(): ImportTypeNode {
sourceFlags |= NodeFlags.PossiblyContainsDynamicImport;
const pos = getNodePos();
const isTypeOf = parseOptional(SyntaxKind.TypeOfKeyword);
parseExpected(SyntaxKind.ImportKeyword);
parseExpected(SyntaxKind.OpenParenToken);
const type = parseType();
let attributes: ImportAttributes | undefined;
if (parseOptional(SyntaxKind.CommaToken)) {
const openBracePosition = scanner.getTokenStart();
parseExpected(SyntaxKind.OpenBraceToken);
const currentToken = token();
if (currentToken === SyntaxKind.WithKeyword || currentToken === SyntaxKind.AssertKeyword) {
nextToken();
}
else {
parseErrorAtCurrentToken(Diagnostics._0_expected, tokenToString(SyntaxKind.WithKeyword));
}
parseExpected(SyntaxKind.ColonToken);
attributes = parseImportAttributes(currentToken as SyntaxKind.WithKeyword | SyntaxKind.AssertKeyword, /*skipKeyword*/ true);
parseOptional(SyntaxKind.CommaToken);
if (!parseExpected(SyntaxKind.CloseBraceToken)) {
const lastError = lastOrUndefined(parseDiagnostics);
if (lastError && lastError.code === Diagnostics._0_expected.code) {
addRelatedInfo(
lastError,
createDetachedDiagnostic(fileName, sourceText, openBracePosition, 1, Diagnostics.The_parser_expected_to_find_a_1_to_match_the_0_token_here, "{", "}"),
);
}
}
}
parseExpected(SyntaxKind.CloseParenToken);
const qualifier = parseOptional(SyntaxKind.DotToken) ? parseEntityNameOfTypeReference() : undefined;
const typeArguments = parseTypeArgumentsOfTypeReference();
return finishNode(factory.createImportTypeNode(type, attributes, qualifier, typeArguments, isTypeOf), pos);
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 4,547
|
[] | true
| 9
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
writeReplace
|
@Override
@J2ktIncompatible // serialization
Object writeReplace() {
return new SerializedForm<E>(this);
}
|
A builder for creating immutable multiset instances, especially {@code public static final}
multisets ("constant multisets"). Example:
{@snippet :
public static final ImmutableSortedMultiset<Bean> BEANS =
new ImmutableSortedMultiset.Builder<Bean>(colorComparator())
.addCopies(Bean.COCOA, 4)
.addCopies(Bean.GARDEN, 6)
.addCopies(Bean.RED, 8)
.addCopies(Bean.BLACK_EYED, 10)
.build();
}
<p>Builder instances can be reused; it is safe to call {@link #build} multiple times to build
multiple multisets in series.
@since 12.0
|
java
|
android/guava/src/com/google/common/collect/ImmutableSortedMultiset.java
| 736
|
[] |
Object
| true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
uniqueBeanName
|
public static String uniqueBeanName(String beanName, BeanDefinitionRegistry registry) {
String id = beanName;
int counter = -1;
// Increase counter until the id is unique.
String prefix = beanName + GENERATED_BEAN_NAME_SEPARATOR;
while (counter == -1 || registry.containsBeanDefinition(id)) {
counter++;
id = prefix + counter;
}
return id;
}
|
Turn the given bean name into a unique bean name for the given bean factory,
appending a unique counter as suffix if necessary.
@param beanName the original bean name
@param registry the bean factory that the definition is going to be
registered with (to check for existing bean names)
@return the unique bean name to use
@since 5.1
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/BeanDefinitionReaderUtils.java
| 139
|
[
"beanName",
"registry"
] |
String
| true
| 3
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
generate_uuid
|
def generate_uuid(*values: str | None, namespace: UUID = NAMESPACE_OID) -> str:
"""
Convert input values to deterministic UUID string representation.
This function is only intended to generate a hash which used as an identifier, not for any security use.
Generates a UUID v5 (SHA-1 + Namespace) for each value provided,
and this UUID is used as the Namespace for the next element.
If only one non-None value is provided to the function, then the result of the function
would be the same as result of ``uuid.uuid5``.
All ``None`` values are replaced by NIL UUID. If it only one value is provided then return NIL UUID.
:param namespace: Initial namespace value to pass into the ``uuid.uuid5`` function.
"""
if not values:
raise ValueError("Expected at least 1 argument")
if len(values) == 1 and values[0] is None:
return str(NIL_UUID)
result = namespace
for item in values:
result = uuid5(result, item if item is not None else str(NIL_UUID))
return str(result)
|
Convert input values to deterministic UUID string representation.
This function is only intended to generate a hash which used as an identifier, not for any security use.
Generates a UUID v5 (SHA-1 + Namespace) for each value provided,
and this UUID is used as the Namespace for the next element.
If only one non-None value is provided to the function, then the result of the function
would be the same as result of ``uuid.uuid5``.
All ``None`` values are replaced by NIL UUID. If it only one value is provided then return NIL UUID.
:param namespace: Initial namespace value to pass into the ``uuid.uuid5`` function.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/utils/identifiers.py
| 25
|
[
"namespace"
] |
str
| true
| 6
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
pad
|
function pad(string, length, chars) {
string = toString(string);
length = toInteger(length);
var strLength = length ? stringSize(string) : 0;
if (!length || strLength >= length) {
return string;
}
var mid = (length - strLength) / 2;
return (
createPadding(nativeFloor(mid), chars) +
string +
createPadding(nativeCeil(mid), chars)
);
}
|
Pads `string` on the left and right sides if it's shorter than `length`.
Padding characters are truncated if they can't be evenly divided by `length`.
@static
@memberOf _
@since 3.0.0
@category String
@param {string} [string=''] The string to pad.
@param {number} [length=0] The padding length.
@param {string} [chars=' '] The string used as padding.
@returns {string} Returns the padded string.
@example
_.pad('abc', 8);
// => ' abc '
_.pad('abc', 8, '_-');
// => '_-abc_-_'
_.pad('abc', 3);
// => 'abc'
|
javascript
|
lodash.js
| 14,478
|
[
"string",
"length",
"chars"
] | false
| 4
| 7.52
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
submit_event
|
def submit_event(cls, trigger_id, event: TriggerEvent, session: Session = NEW_SESSION) -> None:
"""
Fire an event.
Resume all tasks that were in deferred state.
Send an event to all assets associated to the trigger.
"""
# Resume deferred tasks
for task_instance in session.scalars(
select(TaskInstance).where(
TaskInstance.trigger_id == trigger_id, TaskInstance.state == TaskInstanceState.DEFERRED
)
):
handle_event_submit(event, task_instance=task_instance, session=session)
# Send an event to assets
trigger = session.scalars(select(cls).where(cls.id == trigger_id)).one_or_none()
if trigger is None:
# Already deleted for some reason
return
for asset in trigger.assets:
AssetManager.register_asset_change(
asset=asset.to_serialized(),
extra={"from_trigger": True, "payload": event.payload},
session=session,
)
if trigger.callback:
trigger.callback.handle_event(event, session)
|
Fire an event.
Resume all tasks that were in deferred state.
Send an event to all assets associated to the trigger.
|
python
|
airflow-core/src/airflow/models/trigger.py
| 248
|
[
"cls",
"trigger_id",
"event",
"session"
] |
None
| true
| 5
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
addInitialImportContributors
|
private void addInitialImportContributors(List<ConfigDataEnvironmentContributor> initialContributors,
ConfigDataLocation[] locations) {
for (int i = locations.length - 1; i >= 0; i--) {
if (ConfigDataLocation.isNotEmpty(locations[i])) {
initialContributors.add(createInitialImportContributor(locations[i]));
}
}
}
|
Create a new {@link ConfigDataEnvironment} instance.
@param logFactory the deferred log factory
@param bootstrapContext the bootstrap context
@param environment the Spring {@link Environment}.
@param resourceLoader {@link ResourceLoader} to load resource locations
@param additionalProfiles any additional profiles to activate
@param environmentUpdateListener optional
{@link ConfigDataEnvironmentUpdateListener} that can be used to track
{@link Environment} updates.
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironment.java
| 214
|
[
"initialContributors",
"locations"
] |
void
| true
| 3
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
forEach
|
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
checkNotNull(action);
ImmutableList<K> keyList = keySet.asList();
for (int i = 0; i < size(); i++) {
action.accept(keyList.get(i), valueList.get(i));
}
}
|
A builder for creating immutable sorted map instances, especially {@code public static final}
maps ("constant maps"). Example:
{@snippet :
static final ImmutableSortedMap<Integer, String> INT_TO_WORD =
new ImmutableSortedMap.Builder<Integer, String>(Ordering.natural())
.put(1, "one")
.put(2, "two")
.put(3, "three")
.buildOrThrow();
}
<p>For <i>small</i> immutable sorted maps, the {@code ImmutableSortedMap.of()} methods are even
more convenient.
<p>Builder instances can be reused - it is safe to call {@link #buildOrThrow} multiple times to
build multiple maps in series. Each map is a superset of the maps created before it.
@since 2.0
|
java
|
guava/src/com/google/common/collect/ImmutableSortedMap.java
| 780
|
[
"action"
] |
void
| true
| 2
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
start
|
@Override
public StartupStep start(String name) {
int id = this.idSeq.getAndIncrement();
Instant start = this.clock.instant();
while (true) {
BufferedStartupStep current = this.current.get();
BufferedStartupStep parent = getLatestActive(current);
BufferedStartupStep next = new BufferedStartupStep(parent, name, id, start, this::record);
if (this.current.compareAndSet(current, next)) {
return next;
}
}
}
|
Add a predicate filter to the list of existing ones.
<p>
A {@link StartupStep step} that doesn't match all filters will not be recorded.
@param filter the predicate filter to add.
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/metrics/buffering/BufferingApplicationStartup.java
| 110
|
[
"name"
] |
StartupStep
| true
| 3
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
minimalWithCount
|
public static ZeroBucket minimalWithCount(long count) {
if (count == 0) {
return MINIMAL_EMPTY;
} else {
return new ZeroBucket(MINIMAL_EMPTY, count);
}
}
|
Creates a zero bucket with the smallest possible threshold and a given count.
@param count The number of values in the bucket.
@return A new {@link ZeroBucket}.
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ZeroBucket.java
| 112
|
[
"count"
] |
ZeroBucket
| true
| 2
| 8.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
mulAndCheck
|
private static int mulAndCheck(final int x, final int y) {
final long m = (long) x * (long) y;
if (m < Integer.MIN_VALUE || m > Integer.MAX_VALUE) {
throw new ArithmeticException("overflow: mul");
}
return (int) m;
}
|
Multiplies two integers, checking for overflow.
@param x a factor
@param y a factor
@return the product {@code x*y}
@throws ArithmeticException if the result cannot be represented as
an int
|
java
|
src/main/java/org/apache/commons/lang3/math/Fraction.java
| 414
|
[
"x",
"y"
] | true
| 3
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_fit_transform
|
def _fit_transform(self, X, W=None, H=None, update_H=True):
"""Learn a NMF model for the data X and returns the transformed data.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Data matrix to be decomposed.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is initialised as an array of zeros, unless
`solver='mu'`, then it is filled with values calculated by
`np.sqrt(X.mean() / self._n_components)`.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is used as a constant, to solve for W only.
If `None`, uses the initialisation method specified in `init`.
update_H : bool, default=True
If True, both W and H will be estimated from initial guesses,
this corresponds to a call to the `fit_transform` method.
If False, only W will be estimated, this corresponds to a call
to the `transform` method.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
H : ndarray of shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter : int
Actual number of started iterations over the whole dataset.
n_steps : int
Number of mini-batches processed.
"""
check_non_negative(X, "MiniBatchNMF (input X)")
self._check_params(X)
if X.min() == 0 and self._beta_loss <= 0:
raise ValueError(
"When beta_loss <= 0 and X contains zeros, "
"the solver may diverge. Please add small values "
"to X, or use a positive beta_loss."
)
n_samples = X.shape[0]
# initialize or check W and H
W, H = self._check_w_h(X, W, H, update_H)
H_buffer = H.copy()
# Initialize auxiliary matrices
self._components_numerator = H.copy()
self._components_denominator = np.ones(H.shape, dtype=H.dtype)
# Attributes to monitor the convergence
self._ewa_cost = None
self._ewa_cost_min = None
self._no_improvement = 0
batches = gen_batches(n_samples, self._batch_size)
batches = itertools.cycle(batches)
n_steps_per_iter = int(np.ceil(n_samples / self._batch_size))
n_steps = self.max_iter * n_steps_per_iter
for i, batch in zip(range(n_steps), batches):
batch_cost = self._minibatch_step(X[batch], W[batch], H, update_H)
if update_H and self._minibatch_convergence(
X[batch], batch_cost, H, H_buffer, n_samples, i, n_steps
):
break
H_buffer[:] = H
if self.fresh_restarts:
W = self._solve_W(X, H, self._transform_max_iter)
n_steps = i + 1
n_iter = int(np.ceil(n_steps / n_steps_per_iter))
if n_iter == self.max_iter and self.tol > 0:
warnings.warn(
(
f"Maximum number of iterations {self.max_iter} reached. "
"Increase it to improve convergence."
),
ConvergenceWarning,
)
return W, H, n_iter, n_steps
|
Learn a NMF model for the data X and returns the transformed data.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Data matrix to be decomposed.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is initialised as an array of zeros, unless
`solver='mu'`, then it is filled with values calculated by
`np.sqrt(X.mean() / self._n_components)`.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is used as a constant, to solve for W only.
If `None`, uses the initialisation method specified in `init`.
update_H : bool, default=True
If True, both W and H will be estimated from initial guesses,
this corresponds to a call to the `fit_transform` method.
If False, only W will be estimated, this corresponds to a call
to the `transform` method.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Transformed data.
H : ndarray of shape (n_components, n_features)
Factorization matrix, sometimes called 'dictionary'.
n_iter : int
Actual number of started iterations over the whole dataset.
n_steps : int
Number of mini-batches processed.
|
python
|
sklearn/decomposition/_nmf.py
| 2,219
|
[
"self",
"X",
"W",
"H",
"update_H"
] | false
| 9
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
checkValue
|
static void checkValue(double x) {
if (Double.isNaN(x) || Double.isInfinite(x)) {
throw new IllegalArgumentException("Invalid value: " + x);
}
}
|
Add a single sample to this TDigest.
@param x The data value to add
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/TDigest.java
| 112
|
[
"x"
] |
void
| true
| 3
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
as_ctypes
|
def as_ctypes(obj):
"""
Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted.
Examples
--------
Create ctypes object from inferred int ``np.array``:
>>> inferred_int_array = np.array([1, 2, 3])
>>> c_int_array = np.ctypeslib.as_ctypes(inferred_int_array)
>>> type(c_int_array)
<class 'c_long_Array_3'>
>>> c_int_array[:]
[1, 2, 3]
Create ctypes object from explicit 8 bit unsigned int ``np.array`` :
>>> exp_int_array = np.array([1, 2, 3], dtype=np.uint8)
>>> c_int_array = np.ctypeslib.as_ctypes(exp_int_array)
>>> type(c_int_array)
<class 'c_ubyte_Array_3'>
>>> c_int_array[:]
[1, 2, 3]
"""
ai = obj.__array_interface__
if ai["strides"]:
raise TypeError("strided arrays not supported")
if ai["version"] != 3:
raise TypeError("only __array_interface__ version 3 supported")
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
# can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows
# dtype.itemsize (gh-14214)
ctype_scalar = as_ctypes_type(ai["typestr"])
result_type = _ctype_ndarray(ctype_scalar, ai["shape"])
result = result_type.from_address(addr)
result.__keep = obj
return result
|
Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted.
Examples
--------
Create ctypes object from inferred int ``np.array``:
>>> inferred_int_array = np.array([1, 2, 3])
>>> c_int_array = np.ctypeslib.as_ctypes(inferred_int_array)
>>> type(c_int_array)
<class 'c_long_Array_3'>
>>> c_int_array[:]
[1, 2, 3]
Create ctypes object from explicit 8 bit unsigned int ``np.array`` :
>>> exp_int_array = np.array([1, 2, 3], dtype=np.uint8)
>>> c_int_array = np.ctypeslib.as_ctypes(exp_int_array)
>>> type(c_int_array)
<class 'c_ubyte_Array_3'>
>>> c_int_array[:]
[1, 2, 3]
|
python
|
numpy/ctypeslib/_ctypeslib.py
| 562
|
[
"obj"
] | false
| 4
| 6.32
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
back
|
public void back() {
if (--this.pos == -1) {
this.pos = 0;
}
}
|
Returns the current position and the entire input string.
@return the current position and the entire input string.
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONTokener.java
| 534
|
[] |
void
| true
| 2
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
applyScopedProxyMode
|
static BeanDefinitionHolder applyScopedProxyMode(
ScopeMetadata metadata, BeanDefinitionHolder definition, BeanDefinitionRegistry registry) {
ScopedProxyMode scopedProxyMode = metadata.getScopedProxyMode();
if (scopedProxyMode.equals(ScopedProxyMode.NO)) {
return definition;
}
boolean proxyTargetClass = scopedProxyMode.equals(ScopedProxyMode.TARGET_CLASS);
return ScopedProxyCreator.createScopedProxy(definition, registry, proxyTargetClass);
}
|
Register all relevant annotation post processors in the given registry.
@param registry the registry to operate on
@param source the configuration source element (already extracted)
that this registration was triggered from. May be {@code null}.
@return a Set of BeanDefinitionHolders, containing all bean definitions
that have actually been registered by this call
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/AnnotationConfigUtils.java
| 278
|
[
"metadata",
"definition",
"registry"
] |
BeanDefinitionHolder
| true
| 2
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
parentContextContainsSameBean
|
private boolean parentContextContainsSameBean(ApplicationContext context, @Nullable String beanKey) {
if (context.getParent() == null) {
return false;
}
try {
ApplicationContext parent = this.applicationContext.getParent();
Assert.state(parent != null, "'parent' must not be null");
Assert.state(beanKey != null, "'beanKey' must not be null");
parent.getBean(beanKey);
return true;
}
catch (BeansException ex) {
return parentContextContainsSameBean(context.getParent(), beanKey);
}
}
|
Set if unique runtime object names should be ensured.
@param ensureUniqueRuntimeObjectNames {@code true} if unique names should be
ensured.
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/jmx/ParentAwareNamingStrategy.java
| 80
|
[
"context",
"beanKey"
] | true
| 3
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
to_clipboard
|
def to_clipboard(
self, *, excel: bool = True, sep: str | None = None, **kwargs
) -> None:
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
Produce output in a csv format for easy pasting into excel.
- True, use the provided separator for csv pasting.
- False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- macOS : none
This method uses the processes developed for the package `pyperclip`. A
solution to render any output string format is given in the examples.
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
>>> df.to_clipboard(sep=",") # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=",", index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
Using the original `pyperclip` package for any string output format.
.. code-block:: python
import pyperclip
html = df.style.to_html()
pyperclip.copy(html)
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
|
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
Produce output in a csv format for easy pasting into excel.
- True, use the provided separator for csv pasting.
- False, write a string representation of the object to the clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- macOS : none
This method uses the processes developed for the package `pyperclip`. A
solution to render any output string format is given in the examples.
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
>>> df.to_clipboard(sep=",") # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=",", index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
Using the original `pyperclip` package for any string output format.
.. code-block:: python
import pyperclip
html = df.style.to_html()
pyperclip.copy(html)
|
python
|
pandas/core/generic.py
| 3,130
|
[
"self",
"excel",
"sep"
] |
None
| true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
run_query
|
def run_query(
self,
query: str,
query_context: dict[str, str],
result_configuration: dict[str, Any],
client_request_token: str | None = None,
workgroup: str = "primary",
) -> str:
"""
Run a Trino/Presto query on Athena with provided config.
.. seealso::
- :external+boto3:py:meth:`Athena.Client.start_query_execution`
:param query: Trino/Presto query to run.
:param query_context: Context in which query need to be run.
:param result_configuration: Dict with path to store results in and
config related to encryption.
:param client_request_token: Unique token created by user to avoid
multiple executions of same query.
:param workgroup: Athena workgroup name, when not specified, will be ``'primary'``.
:return: Submitted query execution ID.
"""
params = {
"QueryString": query,
"QueryExecutionContext": query_context,
"ResultConfiguration": result_configuration,
"WorkGroup": workgroup,
}
if client_request_token:
params["ClientRequestToken"] = client_request_token
if self.log_query:
self.log.info("Running Query with params:\n%s", query_params_to_string(params))
response = self.get_conn().start_query_execution(**params)
query_execution_id = response["QueryExecutionId"]
self.log.info("Query execution id: %s", query_execution_id)
return query_execution_id
|
Run a Trino/Presto query on Athena with provided config.
.. seealso::
- :external+boto3:py:meth:`Athena.Client.start_query_execution`
:param query: Trino/Presto query to run.
:param query_context: Context in which query need to be run.
:param result_configuration: Dict with path to store results in and
config related to encryption.
:param client_request_token: Unique token created by user to avoid
multiple executions of same query.
:param workgroup: Athena workgroup name, when not specified, will be ``'primary'``.
:return: Submitted query execution ID.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/athena.py
| 93
|
[
"self",
"query",
"query_context",
"result_configuration",
"client_request_token",
"workgroup"
] |
str
| true
| 3
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
replaceSystemProperties
|
public static String replaceSystemProperties(final Object source) {
return new StrSubstitutor(StrLookup.systemPropertiesLookup()).replace(source);
}
|
Replaces all the occurrences of variables in the given source object with
their matching values from the system properties.
@param source the source text containing the variables to substitute, null returns null.
@return the result of the replace operation.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrSubstitutor.java
| 225
|
[
"source"
] |
String
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
mapInternal
|
private static MappedByteBuffer mapInternal(File file, MapMode mode, long size)
throws IOException {
checkNotNull(file);
checkNotNull(mode);
Closer closer = Closer.create();
try {
RandomAccessFile raf =
closer.register(new RandomAccessFile(file, mode == MapMode.READ_ONLY ? "r" : "rw"));
FileChannel channel = closer.register(raf.getChannel());
return channel.map(mode, 0, size == -1 ? channel.size() : size);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
|
Maps a file in to memory as per {@link FileChannel#map(java.nio.channels.FileChannel.MapMode,
long, long)} using the requested {@link MapMode}.
<p>Files are mapped from offset 0 to {@code size}.
<p>If the mode is {@link MapMode#READ_WRITE} and the file does not exist, it will be created
with the requested {@code size}. Thus this method is useful for creating memory mapped files
which do not yet exist.
<p>This only works for files ≤ {@link Integer#MAX_VALUE} bytes.
@param file the file to map
@param mode the mode to use when mapping {@code file}
@return a buffer reflecting {@code file}
@throws IOException if an I/O error occurs
@see FileChannel#map(MapMode, long, long)
@since 2.0
|
java
|
android/guava/src/com/google/common/io/Files.java
| 695
|
[
"file",
"mode",
"size"
] |
MappedByteBuffer
| true
| 4
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
replace
|
public String replace(final String text, final String searchString, final String replacement) {
return replace(text, searchString, replacement, -1);
}
|
Case insensitively replaces all occurrences of a String within another String.
<p>
A {@code null} reference passed to this method is a no-op.
</p>
<p>
Case-sensitive examples
</p>
<pre>
Strings.CS.replace(null, *, *) = null
Strings.CS.replace("", *, *) = ""
Strings.CS.replace("any", null, *) = "any"
Strings.CS.replace("any", *, null) = "any"
Strings.CS.replace("any", "", *) = "any"
Strings.CS.replace("aba", "a", null) = "aba"
Strings.CS.replace("aba", "a", "") = "b"
Strings.CS.replace("aba", "a", "z") = "zbz"
</pre>
<p>
Case-insensitive examples
</p>
<pre>
Strings.CI.replace(null, *, *) = null
Strings.CI.replace("", *, *) = ""
Strings.CI.replace("any", null, *) = "any"
Strings.CI.replace("any", *, null) = "any"
Strings.CI.replace("any", "", *) = "any"
Strings.CI.replace("aba", "a", null) = "aba"
Strings.CI.replace("abA", "A", "") = "b"
Strings.CI.replace("aba", "A", "z") = "zbz"
</pre>
@see #replace(String text, String searchString, String replacement, int max)
@param text text to search and replace in, may be null
@param searchString the String to search for (case-insensitive), may be null
@param replacement the String to replace it with, may be null
@return the text with any replacements processed, {@code null} if null String input
|
java
|
src/main/java/org/apache/commons/lang3/Strings.java
| 1,237
|
[
"text",
"searchString",
"replacement"
] |
String
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
fetchablePartitions
|
public synchronized List<TopicPartition> fetchablePartitions(Predicate<TopicPartition> isAvailable) {
// Since this is in the hot-path for fetching, we do this instead of using java.util.stream API
List<TopicPartition> result = new ArrayList<>();
assignment.forEach((topicPartition, topicPartitionState) -> {
// Cheap check is first to avoid evaluating the predicate if possible
if ((subscriptionType.equals(SubscriptionType.AUTO_TOPICS_SHARE) || isFetchableAndSubscribed(topicPartition, topicPartitionState))
&& isAvailable.test(topicPartition)) {
result.add(topicPartition);
}
});
return result;
}
|
Provides the number of assigned partitions in a thread safe manner.
@return the number of assigned partitions.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 486
|
[
"isAvailable"
] | true
| 4
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
addOrMergeSource
|
private void addOrMergeSource(Map<String, ConfigurationMetadataSource> sources, String name,
ConfigurationMetadataSource source) {
ConfigurationMetadataSource existingSource = sources.get(name);
if (existingSource == null) {
sources.put(name, source);
}
else {
source.getProperties().forEach((k, v) -> existingSource.getProperties().putIfAbsent(k, v));
}
}
|
Merge the content of the specified repository to this repository.
@param repository the repository to include
|
java
|
configuration-metadata/spring-boot-configuration-metadata/src/main/java/org/springframework/boot/configurationmetadata/SimpleConfigurationMetadataRepository.java
| 106
|
[
"sources",
"name",
"source"
] |
void
| true
| 2
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
asByteArray
|
byte[] asByteArray() {
ByteBuffer buffer = ByteBuffer.allocate((int) size());
buffer.order(ByteOrder.LITTLE_ENDIAN);
if (this.includeSignature) {
buffer.putInt(SIGNATURE);
}
buffer.putInt(this.crc32);
buffer.putInt(this.compressedSize);
buffer.putInt(this.uncompressedSize);
return buffer.array();
}
|
Return the contents of this record as a byte array suitable for writing to a zip.
@return the record as a byte array
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipDataDescriptorRecord.java
| 54
|
[] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
anyNotNull
|
public static boolean anyNotNull(final Object... values) {
return firstNonNull(values) != null;
}
|
Tests if any value in the given array is not {@code null}.
<p>
If all the values are {@code null} or the array is {@code null} or empty then {@code false} is returned. Otherwise {@code true} is returned.
</p>
<pre>
ObjectUtils.anyNotNull(*) = true
ObjectUtils.anyNotNull(*, null) = true
ObjectUtils.anyNotNull(null, *) = true
ObjectUtils.anyNotNull(null, null, *, *) = true
ObjectUtils.anyNotNull(null) = false
ObjectUtils.anyNotNull(null, null) = false
</pre>
@param values the values to test, may be {@code null} or empty.
@return {@code true} if there is at least one non-null value in the array, {@code false} if all values in the array are {@code null}s. If the array is
{@code null} or empty {@code false} is also returned.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/ObjectUtils.java
| 193
|
[] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getFormatter
|
protected DateTimeFormatter getFormatter(DateTimeFormat annotation, Class<?> fieldType) {
DateTimeFormatterFactory factory = new DateTimeFormatterFactory();
String style = resolveEmbeddedValue(annotation.style());
if (StringUtils.hasLength(style)) {
factory.setStylePattern(style);
}
factory.setIso(annotation.iso());
String pattern = resolveEmbeddedValue(annotation.pattern());
if (StringUtils.hasLength(pattern)) {
factory.setPattern(pattern);
}
return factory.createDateTimeFormatter();
}
|
Factory method used to create a {@link DateTimeFormatter}.
@param annotation the format annotation for the field
@param fieldType the declared type of the field
@return a {@link DateTimeFormatter} instance
|
java
|
spring-context/src/main/java/org/springframework/format/datetime/standard/Jsr310DateTimeFormatAnnotationFormatterFactory.java
| 118
|
[
"annotation",
"fieldType"
] |
DateTimeFormatter
| true
| 3
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
loadBeans
|
private <T> Map<String, T> loadBeans(Class<T> type) {
return (this.beanFactory != null ?
BeanFactoryUtils.beansOfTypeIncludingAncestors(this.beanFactory, type, true, false) :
Collections.emptyMap());
}
|
Load all AOT services of the given type.
@param <T> the service type
@param type the service type
@return a new {@link AotServices} instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/AotServices.java
| 213
|
[
"type"
] | true
| 2
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
appendRecursiveTypes
|
private static void appendRecursiveTypes(final StringBuilder builder, final int[] recursiveTypeIndexes, final Type[] argumentTypes) {
for (int i = 0; i < recursiveTypeIndexes.length; i++) {
// toString() or SO
GT_JOINER.join(builder, argumentTypes[i].toString());
}
final Type[] argumentsFiltered = ArrayUtils.removeAll(argumentTypes, recursiveTypeIndexes);
if (argumentsFiltered.length > 0) {
GT_JOINER.join(builder, (Object[]) argumentsFiltered);
}
}
|
A wildcard instance matching {@code ?}.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 330
|
[
"builder",
"recursiveTypeIndexes",
"argumentTypes"
] |
void
| true
| 3
| 6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
baseMatches
|
function baseMatches(source) {
var matchData = getMatchData(source);
if (matchData.length == 1 && matchData[0][2]) {
return matchesStrictComparable(matchData[0][0], matchData[0][1]);
}
return function(object) {
return object === source || baseIsMatch(object, source, matchData);
};
}
|
The base implementation of `_.matches` which doesn't clone `source`.
@private
@param {Object} source The object of property values to match.
@returns {Function} Returns the new spec function.
|
javascript
|
lodash.js
| 3,597
|
[
"source"
] | false
| 4
| 6.08
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
generateCode
|
@Deprecated(since = "6.1.7")
public CodeBlock generateCode(RegisteredBean registeredBean, Executable constructorOrFactoryMethod) {
return generateCode(registeredBean, new InstantiationDescriptor(
constructorOrFactoryMethod, constructorOrFactoryMethod.getDeclaringClass()));
}
|
Generate the instance supplier code.
@param registeredBean the bean to handle
@param constructorOrFactoryMethod the executable to use to create the bean
@return the generated code
@deprecated in favor of {@link #generateCode(RegisteredBean, InstantiationDescriptor)}
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/InstanceSupplierCodeGenerator.java
| 128
|
[
"registeredBean",
"constructorOrFactoryMethod"
] |
CodeBlock
| true
| 1
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
maybeBindReturningVariable
|
private void maybeBindReturningVariable() {
if (this.numberOfRemainingUnboundArguments == 0) {
throw new IllegalStateException(
"Algorithm assumes that there must be at least one unbound parameter on entry to this method");
}
if (this.returningName != null) {
if (this.numberOfRemainingUnboundArguments > 1) {
throw new AmbiguousBindingException("Binding of returning parameter '" + this.returningName +
"' is ambiguous: there are " + this.numberOfRemainingUnboundArguments + " candidates. " +
"Consider compiling with -parameters in order to make declared parameter names available.");
}
// We're all set... find the unbound parameter, and bind it.
for (int i = 0; i < this.parameterNameBindings.length; i++) {
if (this.parameterNameBindings[i] == null) {
bindParameterName(i, this.returningName);
break;
}
}
}
}
|
If a returning variable was specified and there is only one choice remaining, bind it.
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AspectJAdviceParameterNameDiscoverer.java
| 362
|
[] |
void
| true
| 6
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
baseArity
|
function baseArity(func, n) {
return n == 2
? function(a, b) { return func.apply(undefined, arguments); }
: function(a) { return func.apply(undefined, arguments); };
}
|
Creates a function, with an arity of `n`, that invokes `func` with the
arguments it receives.
@private
@param {Function} func The function to wrap.
@param {number} n The arity of the new function.
@returns {Function} Returns the new function.
|
javascript
|
fp/_baseConvert.js
| 16
|
[
"func",
"n"
] | false
| 2
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
writeEntries
|
final void writeEntries(JarFile jarFile, EntryTransformer entryTransformer, UnpackHandler unpackHandler,
Function<JarEntry, @Nullable Library> libraryLookup) throws IOException {
Enumeration<JarEntry> entries = jarFile.entries();
while (entries.hasMoreElements()) {
JarEntry entry = entries.nextElement();
Library library = libraryLookup.apply(entry);
if (library == null || library.isIncluded()) {
writeEntry(jarFile, entryTransformer, unpackHandler, new JarArchiveEntry(entry), library);
}
}
}
|
Write the specified manifest.
@param manifest the manifest to write
@throws IOException of the manifest cannot be written
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/AbstractJarWriter.java
| 88
|
[
"jarFile",
"entryTransformer",
"unpackHandler",
"libraryLookup"
] |
void
| true
| 4
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
andThen
|
default FailableLongUnaryOperator<E> andThen(final FailableLongUnaryOperator<E> after) {
Objects.requireNonNull(after);
return (final long t) -> after.applyAsLong(applyAsLong(t));
}
|
Returns a composed {@link FailableDoubleUnaryOperator} like {@link LongUnaryOperator#andThen(LongUnaryOperator)}.
@param after the operator to apply after this one.
@return a composed {@link FailableLongUnaryOperator} like {@link LongUnaryOperator#andThen(LongUnaryOperator)}.
@throws NullPointerException if after is null.
@see #compose(FailableLongUnaryOperator)
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableLongUnaryOperator.java
| 64
|
[
"after"
] | true
| 1
| 6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
kafkaShareConsumerMetrics
|
@Override
public KafkaShareConsumerMetrics kafkaShareConsumerMetrics() {
return kafkaShareConsumerMetrics;
}
|
This method can be used by cases where the caller has an event that needs to both block for completion but
also process background events. For some events, in order to fully process the associated logic, the
{@link ConsumerNetworkThread background thread} needs assistance from the application thread to complete.
If the application thread simply blocked on the event after submitting it, the processing would deadlock.
The logic herein is basically a loop that performs two tasks in each iteration:
<ol>
<li>Process background events, if any</li>
<li><em>Briefly</em> wait for {@link CompletableApplicationEvent an event} to complete</li>
</ol>
<p/>
Each iteration gives the application thread an opportunity to process background events, which may be
necessary to complete the overall processing.
@param future Event that contains a {@link CompletableFuture}; it is on this future that the
application thread will wait for completion
@param timer Overall timer that bounds how long to wait for the event to complete
@param ignoreErrorEventException Predicate to ignore background errors.
Any exceptions found while processing background events that match the predicate won't be propagated.
@return {@code true} if the event completed within the timeout, {@code false} otherwise
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java
| 1,355
|
[] |
KafkaShareConsumerMetrics
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.