function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
of
|
public static ConditionMessage of(@Nullable Collection<? extends ConditionMessage> messages) {
ConditionMessage result = new ConditionMessage();
if (messages != null) {
for (ConditionMessage message : messages) {
result = new ConditionMessage(result, message.toString());
}
}
return result;
}
|
Factory method to create a new {@link ConditionMessage} comprised of the specified
messages.
@param messages the source messages (may be {@code null})
@return a new {@link ConditionMessage} instance
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionMessage.java
| 163
|
[
"messages"
] |
ConditionMessage
| true
| 2
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
bindReturnOrThrow
|
function bindReturnOrThrow(node: ReturnStatement | ThrowStatement): void {
const savedInReturnPosition = inReturnPosition;
inReturnPosition = true;
bind(node.expression);
inReturnPosition = savedInReturnPosition;
if (node.kind === SyntaxKind.ReturnStatement) {
hasExplicitReturn = true;
if (currentReturnTarget) {
addAntecedent(currentReturnTarget, currentFlow);
}
}
currentFlow = unreachableFlow;
hasFlowEffects = true;
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 1,591
|
[
"node"
] | true
| 3
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
truncatedCompareTo
|
public static int truncatedCompareTo(final Calendar cal1, final Calendar cal2, final int field) {
final Calendar truncatedCal1 = truncate(cal1, field);
final Calendar truncatedCal2 = truncate(cal2, field);
return truncatedCal1.compareTo(truncatedCal2);
}
|
Determines how two calendars compare up to no more than the specified
most significant field.
@param cal1 the first calendar, not {@code null}.
@param cal2 the second calendar, not {@code null}.
@param field the field from {@link Calendar}.
@return a negative integer, zero, or a positive integer as the first
calendar is less than, equal to, or greater than the second.
@throws NullPointerException if any argument is {@code null}.
@see #truncate(Calendar, int)
@see #truncatedCompareTo(Date, Date, int)
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/time/DateUtils.java
| 1,785
|
[
"cal1",
"cal2",
"field"
] | true
| 1
| 6.72
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
autoCommitSyncBeforeRebalanceWithRetries
|
private void autoCommitSyncBeforeRebalanceWithRetries(OffsetCommitRequestState requestAttempt,
CompletableFuture<Void> result) {
CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> commitAttempt = requestAutoCommit(requestAttempt);
commitAttempt.whenComplete((committedOffsets, error) -> {
if (error == null) {
result.complete(null);
} else {
if (error instanceof RetriableException || isStaleEpochErrorAndValidEpochAvailable(error)) {
if (requestAttempt.isExpired()) {
log.debug("Auto-commit sync before rebalance timed out and won't be retried anymore");
result.completeExceptionally(maybeWrapAsTimeoutException(error));
} else if (error instanceof UnknownTopicOrPartitionException) {
log.debug("Auto-commit sync before rebalance failed because topic or partition were deleted");
result.completeExceptionally(error);
} else {
// Make sure the auto-commit is retried with the latest offsets
log.debug("Member {} will retry auto-commit of latest offsets after receiving retriable error {}",
memberInfo.memberId,
error.getMessage());
requestAttempt.offsets = subscriptions.allConsumed();
requestAttempt.resetFuture();
autoCommitSyncBeforeRebalanceWithRetries(requestAttempt, result);
}
} else {
log.debug("Auto-commit sync before rebalance failed with non-retriable error", error);
result.completeExceptionally(error);
}
}
});
}
|
Commit consumed offsets if auto-commit is enabled, regardless of the auto-commit interval.
This is used for committing offsets before rebalance. This will retry committing
the latest offsets until the request succeeds, fails with a fatal error, or the timeout
expires. Note that:
<ul>
<li>Considers {@link Errors#STALE_MEMBER_EPOCH} as a retriable error, and will retry it
including the member ID and latest member epoch received from the broker.</li>
<li>Considers {@link Errors#UNKNOWN_TOPIC_OR_PARTITION} as a fatal error, and will not
retry it although the error extends RetriableException. The reason is that if a topic
or partition is deleted, rebalance would not finish in time since the auto commit would keep retrying.</li>
</ul>
Also note that this will generate a commit request even if there is another one in-flight,
generated by the auto-commit on the interval logic, to ensure that the latest offsets are
committed before rebalance.
@return Future that will complete when the offsets are successfully committed. It will
complete exceptionally if the commit fails with a non-retriable error, or if the retry
timeout expires.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
| 342
|
[
"requestAttempt",
"result"
] |
void
| true
| 6
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
apiVersionToText
|
private String apiVersionToText(ApiVersion apiVersion) {
StringBuilder bld = new StringBuilder();
ApiKeys apiKey = null;
if (ApiKeys.hasId(apiVersion.apiKey())) {
apiKey = ApiKeys.forId(apiVersion.apiKey());
bld.append(apiKey.name).append("(").append(apiKey.id).append("): ");
} else {
bld.append("UNKNOWN(").append(apiVersion.apiKey()).append("): ");
}
if (apiVersion.minVersion() == apiVersion.maxVersion()) {
bld.append(apiVersion.minVersion());
} else {
bld.append(apiVersion.minVersion()).append(" to ").append(apiVersion.maxVersion());
}
if (apiKey != null) {
ApiVersion supportedVersion = supportedVersions.get(apiKey);
if (apiKey.latestVersion() < supportedVersion.minVersion()) {
bld.append(" [unusable: node too new]");
} else if (supportedVersion.maxVersion() < apiKey.oldestVersion()) {
bld.append(" [unusable: node too old]");
} else {
short latestUsableVersion = Utils.min(apiKey.latestVersion(), supportedVersion.maxVersion());
bld.append(" [usable: ").append(latestUsableVersion).append("]");
}
}
return bld.toString();
}
|
Convert the object to a string.
@param lineBreaks True if we should add a linebreak after each api.
|
java
|
clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java
| 213
|
[
"apiVersion"
] |
String
| true
| 6
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
appendExportsOfBindingElement
|
function appendExportsOfBindingElement(statements: Statement[] | undefined, decl: VariableDeclaration | BindingElement, exportSelf: boolean): Statement[] | undefined {
if (moduleInfo.exportEquals) {
return statements;
}
if (isBindingPattern(decl.name)) {
for (const element of decl.name.elements) {
if (!isOmittedExpression(element)) {
statements = appendExportsOfBindingElement(statements, element, exportSelf);
}
}
}
else if (!isGeneratedIdentifier(decl.name)) {
let excludeName: string | undefined;
if (exportSelf) {
statements = appendExportStatement(statements, decl.name, factory.getLocalName(decl));
excludeName = idText(decl.name);
}
statements = appendExportsOfDeclaration(statements, decl, excludeName);
}
return statements;
}
|
Appends the exports of a VariableDeclaration or BindingElement to a statement list,
returning the statement list.
@param statements A statement list to which the down-level export statements are to be
appended. If `statements` is `undefined`, a new array is allocated if statements are
appended.
@param decl The declaration whose exports are to be recorded.
@param exportSelf A value indicating whether to also export the declaration itself.
|
typescript
|
src/compiler/transformers/module/system.ts
| 1,098
|
[
"statements",
"decl",
"exportSelf"
] | true
| 7
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
parseJSDocParameter
|
function parseJSDocParameter(): ParameterDeclaration {
const pos = getNodePos();
let name: Identifier | undefined;
if (token() === SyntaxKind.ThisKeyword || token() === SyntaxKind.NewKeyword) {
name = parseIdentifierName();
parseExpected(SyntaxKind.ColonToken);
}
return finishNode(
factory.createParameterDeclaration(
/*modifiers*/ undefined,
/*dotDotDotToken*/ undefined,
// TODO(rbuckton): JSDoc parameters don't have names (except `this`/`new`), should we manufacture an empty identifier?
name!,
/*questionToken*/ undefined,
parseJSDocType(),
/*initializer*/ undefined,
),
pos,
);
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 3,889
|
[] | true
| 3
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
include
|
public void include(ConfigurationMetadataRepository repository) {
for (ConfigurationMetadataGroup group : repository.getAllGroups().values()) {
ConfigurationMetadataGroup existingGroup = this.allGroups.get(group.getId());
if (existingGroup == null) {
this.allGroups.put(group.getId(), group);
}
else {
// Merge properties
group.getProperties().forEach((name, value) -> existingGroup.getProperties().putIfAbsent(name, value));
// Merge sources
group.getSources().forEach((name, value) -> addOrMergeSource(existingGroup.getSources(), name, value));
}
}
}
|
Merge the content of the specified repository to this repository.
@param repository the repository to include
|
java
|
configuration-metadata/spring-boot-configuration-metadata/src/main/java/org/springframework/boot/configurationmetadata/SimpleConfigurationMetadataRepository.java
| 83
|
[
"repository"
] |
void
| true
| 2
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
hermline
|
def hermline(off, scl):
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
numpy.polynomial.polynomial.polyline
numpy.polynomial.chebyshev.chebline
numpy.polynomial.legendre.legline
numpy.polynomial.laguerre.lagline
numpy.polynomial.hermite_e.hermeline
Examples
--------
>>> from numpy.polynomial.hermite import hermline, hermval
>>> hermval(0,hermline(3, 2))
3.0
>>> hermval(1,hermline(3, 2))
5.0
"""
if scl != 0:
return np.array([off, scl / 2])
else:
return np.array([off])
|
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
numpy.polynomial.polynomial.polyline
numpy.polynomial.chebyshev.chebline
numpy.polynomial.legendre.legline
numpy.polynomial.laguerre.lagline
numpy.polynomial.hermite_e.hermeline
Examples
--------
>>> from numpy.polynomial.hermite import hermline, hermval
>>> hermval(0,hermline(3, 2))
3.0
>>> hermval(1,hermline(3, 2))
5.0
|
python
|
numpy/polynomial/hermite.py
| 216
|
[
"off",
"scl"
] | false
| 3
| 7.04
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
_validate_meta
|
def _validate_meta(meta: str | list[str | list[str]] | None) -> None:
"""
Validate that meta parameter contains only strings or lists of strings.
Parameters
----------
meta : str or list of str or list of list of str or None
The meta parameter to validate.
Raises
------
TypeError
If meta contains elements that are not strings or lists of strings.
"""
if meta is None:
return
if isinstance(meta, str):
return
for item in meta:
if isinstance(item, list):
for subitem in item:
if not isinstance(subitem, str):
raise TypeError(
"All elements in nested meta paths must be strings. "
f"Found {type(subitem).__name__}: {subitem!r}"
)
elif not isinstance(item, str):
raise TypeError(
"All elements in 'meta' must be strings or lists of strings. "
f"Found {type(item).__name__}: {item!r}"
)
|
Validate that meta parameter contains only strings or lists of strings.
Parameters
----------
meta : str or list of str or list of list of str or None
The meta parameter to validate.
Raises
------
TypeError
If meta contains elements that are not strings or lists of strings.
|
python
|
pandas/io/json/_normalize.py
| 270
|
[
"meta"
] |
None
| true
| 8
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
ensureCapacity
|
public static short[] ensureCapacity(short[] array, int minLength, int padding) {
checkArgument(minLength >= 0, "Invalid minLength: %s", minLength);
checkArgument(padding >= 0, "Invalid padding: %s", padding);
return (array.length < minLength) ? Arrays.copyOf(array, minLength + padding) : array;
}
|
Returns an array containing the same values as {@code array}, but guaranteed to be of a
specified minimum length. If {@code array} already has a length of at least {@code minLength},
it is returned directly. Otherwise, a new array of size {@code minLength + padding} is
returned, containing the values of {@code array}, and zeroes in the remaining places.
@param array the source array
@param minLength the minimum length the returned array must guarantee
@param padding an extra amount to "grow" the array by if growth is necessary
@throws IllegalArgumentException if {@code minLength} or {@code padding} is negative
@return an array containing the values of {@code array}, with guaranteed minimum length {@code
minLength}
|
java
|
android/guava/src/com/google/common/primitives/Shorts.java
| 404
|
[
"array",
"minLength",
"padding"
] | true
| 2
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
makeMap
|
public <K, V> ConcurrentMap<K, V> makeMap() {
if (!useCustomMap) {
return new ConcurrentHashMap<>(getInitialCapacity(), 0.75f, getConcurrencyLevel());
}
return MapMakerInternalMap.create(this);
}
|
Builds a thread-safe map. This method does not alter the state of this {@code MapMaker}
instance, so it can be invoked again to create multiple independent maps.
<p>The bulk operations {@code putAll}, {@code equals}, and {@code clear} are not guaranteed to
be performed atomically on the returned map. Additionally, {@code size} and {@code
containsValue} are implemented as bulk read operations, and thus may fail to observe concurrent
writes.
@return a serializable concurrent map having the requested features
|
java
|
android/guava/src/com/google/common/collect/MapMaker.java
| 283
|
[] | true
| 2
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
describeTransactions
|
DescribeTransactionsResult describeTransactions(Collection<String> transactionalIds, DescribeTransactionsOptions options);
|
Describe the state of a set of transactional IDs from the respective transaction coordinators,
which are dynamically discovered.
@param transactionalIds The set of transactional IDs to query
@param options Options to control the method behavior
@return The result
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,709
|
[
"transactionalIds",
"options"
] |
DescribeTransactionsResult
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
setSortTextToMemberDeclaredBySpreadAssignment
|
function setSortTextToMemberDeclaredBySpreadAssignment(membersDeclaredBySpreadAssignment: Set<string>, contextualMemberSymbols: Symbol[]): void {
if (membersDeclaredBySpreadAssignment.size === 0) {
return;
}
for (const contextualMemberSymbol of contextualMemberSymbols) {
if (membersDeclaredBySpreadAssignment.has(contextualMemberSymbol.name)) {
symbolToSortTextMap[getSymbolId(contextualMemberSymbol)] = SortText.MemberDeclaredBySpreadAssignment;
}
}
}
|
Filters out completion suggestions for named imports or exports.
@returns Symbols to be suggested in an object binding pattern or object literal expression, barring those whose declarations
do not occur at the current position and have not otherwise been typed.
|
typescript
|
src/services/completions.ts
| 5,204
|
[
"membersDeclaredBySpreadAssignment",
"contextualMemberSymbols"
] | true
| 3
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
createInternal
|
static KafkaAdminClient createInternal(AdminClientConfig config, TimeoutProcessorFactory timeoutProcessorFactory) {
return createInternal(config, timeoutProcessorFactory, null);
}
|
Pretty-print an exception.
@param throwable The exception.
@return A compact human-readable string.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 515
|
[
"config",
"timeoutProcessorFactory"
] |
KafkaAdminClient
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
transpose
|
def transpose(self, *axes: int) -> Self:
"""
Return a transposed view on this array.
Because ExtensionArrays are always 1D, this is a no-op. It is included
for compatibility with np.ndarray.
Returns
-------
ExtensionArray
Examples
--------
>>> pd.array([1, 2, 3]).transpose()
<IntegerArray>
[1, 2, 3]
Length: 3, dtype: Int64
"""
return self[:]
|
Return a transposed view on this array.
Because ExtensionArrays are always 1D, this is a no-op. It is included
for compatibility with np.ndarray.
Returns
-------
ExtensionArray
Examples
--------
>>> pd.array([1, 2, 3]).transpose()
<IntegerArray>
[1, 2, 3]
Length: 3, dtype: Int64
|
python
|
pandas/core/arrays/base.py
| 2,085
|
[
"self"
] |
Self
| true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
containsAny
|
public static boolean containsAny(final CharSequence cs, final char... searchChars) {
if (isEmpty(cs) || ArrayUtils.isEmpty(searchChars)) {
return false;
}
final int csLength = cs.length();
final int searchLength = searchChars.length;
final int csLast = csLength - 1;
final int searchLast = searchLength - 1;
for (int i = 0; i < csLength; i++) {
final char ch = cs.charAt(i);
for (int j = 0; j < searchLength; j++) {
if (searchChars[j] == ch) {
if (!Character.isHighSurrogate(ch) || j == searchLast || i < csLast && searchChars[j + 1] == cs.charAt(i + 1)) {
return true;
}
}
}
}
return false;
}
|
Tests if the CharSequence contains any character in the given set of characters.
<p>
A {@code null} CharSequence will return {@code false}. A {@code null} or zero length search array will return {@code false}.
</p>
<pre>
StringUtils.containsAny(null, *) = false
StringUtils.containsAny("", *) = false
StringUtils.containsAny(*, null) = false
StringUtils.containsAny(*, []) = false
StringUtils.containsAny("zzabyycdxx", ['z', 'a']) = true
StringUtils.containsAny("zzabyycdxx", ['b', 'y']) = true
StringUtils.containsAny("zzabyycdxx", ['z', 'y']) = true
StringUtils.containsAny("aba", ['z']) = false
</pre>
@param cs the CharSequence to check, may be null.
@param searchChars the chars to search for, may be null.
@return the {@code true} if any of the chars are found, {@code false} if no match or null input.
@since 2.4
@since 3.0 Changed signature from containsAny(String, char[]) to containsAny(CharSequence, char...)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 1,046
|
[
"cs"
] | true
| 10
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
std
|
def std(
self,
ddof: int = 1,
numeric_only: bool = False,
engine: Literal["cython", "numba"] | None = None,
engine_kwargs: dict[str, bool] | None = None,
):
"""
Calculate the expanding standard deviation.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}``
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
numpy.std : Equivalent method for NumPy array.
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.std : Aggregating std for Series.
DataFrame.std : Aggregating std for DataFrame.
Notes
-----
The default ``ddof`` of 1 used in :meth:`Series.std` is different
than the default ``ddof`` of 0 in :func:`numpy.std`.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
return super().std(
ddof=ddof,
numeric_only=numeric_only,
engine=engine,
engine_kwargs=engine_kwargs,
)
|
Calculate the expanding standard deviation.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or globally setting
``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}``
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
numpy.std : Equivalent method for NumPy array.
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.std : Aggregating std for Series.
DataFrame.std : Aggregating std for DataFrame.
Notes
-----
The default ``ddof`` of 1 used in :meth:`Series.std` is different
than the default ``ddof`` of 0 in :func:`numpy.std`.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
|
python
|
pandas/core/window/expanding.py
| 722
|
[
"self",
"ddof",
"numeric_only",
"engine",
"engine_kwargs"
] | true
| 1
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
is_terminal
|
def is_terminal() -> bool:
"""
Detect if Python is running in a terminal.
Returns True if Python is running in a terminal or False if not.
"""
try:
# error: Name 'get_ipython' is not defined
ip = get_ipython() # type: ignore[name-defined]
except NameError: # assume standard Python interpreter in a terminal
return True
else:
if hasattr(ip, "kernel"): # IPython as a Jupyter kernel
return False
else: # IPython in a terminal
return True
|
Detect if Python is running in a terminal.
Returns True if Python is running in a terminal or False if not.
|
python
|
pandas/core/config_init.py
| 296
|
[] |
bool
| true
| 4
| 7.2
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
values
|
private XContentBuilder values(boolean[] values) throws IOException {
if (values == null) {
return nullValue();
}
startArray();
for (boolean b : values) {
value(b);
}
endArray();
return this;
}
|
@return the value of the "human readable" flag. When the value is equal to true,
some types of values are written in a format easier to read for a human.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
| 406
|
[
"values"
] |
XContentBuilder
| true
| 2
| 7.04
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
s3_read
|
def s3_read(self, remote_log_location: str, return_error: bool = False) -> str:
"""
Return the log found at the remote_log_location or '' if no logs are found or there is an error.
:param remote_log_location: the log's location in remote storage
:param return_error: if True, returns a string error message if an
error occurs. Otherwise returns '' when an error occurs.
:return: the log found at the remote_log_location
"""
try:
return self.hook.read_key(remote_log_location)
except Exception as error:
msg = f"Could not read logs from {remote_log_location} with error: {error}"
self.log.exception(msg)
# return error if needed
if return_error:
return msg
return ""
|
Return the log found at the remote_log_location or '' if no logs are found or there is an error.
:param remote_log_location: the log's location in remote storage
:param return_error: if True, returns a string error message if an
error occurs. Otherwise returns '' when an error occurs.
:return: the log found at the remote_log_location
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/log/s3_task_handler.py
| 83
|
[
"self",
"remote_log_location",
"return_error"
] |
str
| true
| 2
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
get_default_device
|
def get_default_device() -> "torch.device":
r"""Gets the default ``torch.Tensor`` to be allocated on ``device``"""
global _GLOBAL_DEVICE_CONTEXT
from torch.overrides import _get_current_function_mode_stack
from torch.utils._device import DeviceContext
def _get_device_with_index(device):
if device.index is not None:
return device
else:
# TODO: Call like get_device_index() method corresponding to
# each device type
return torch.tensor([]).device
# Get device from any active DeviceContext.
device_mode = next(
filter(
lambda mode: isinstance(mode, DeviceContext),
reversed(_get_current_function_mode_stack()),
),
None,
)
if device_mode:
device = device_mode.device
return _get_device_with_index(device)
device_context = getattr(_GLOBAL_DEVICE_CONTEXT, "device_context", None)
if device_context is not None:
return _get_device_with_index(device_context.device)
return torch.device("cpu")
|
r"""Gets the default ``torch.Tensor`` to be allocated on ``device``
|
python
|
torch/__init__.py
| 1,191
|
[] |
"torch.device"
| true
| 5
| 6.4
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
findByBeanName
|
public @Nullable T findByBeanName(String beanName) {
return this.beans.get(beanName);
}
|
Find the AOT service that was loaded for the given bean name.
@param beanName the bean name
@return the AOT service or {@code null}
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/AotServices.java
| 170
|
[
"beanName"
] |
T
| true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
checkArgument
|
public static void checkArgument(boolean expression, @Nullable Object errorMessage) {
if (!expression) {
throw new IllegalArgumentException(Platform.stringValueOf(errorMessage));
}
}
|
Ensures the truth of an expression involving one or more parameters to the calling method.
@param expression a boolean expression
@param errorMessage the exception message to use if the check fails; will be converted to a
string using {@link String#valueOf(Object)}
@throws IllegalArgumentException if {@code expression} is false
|
java
|
android/guava/src/com/google/common/base/Preconditions.java
| 139
|
[
"expression",
"errorMessage"
] |
void
| true
| 2
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
inclusiveBetween
|
public static void inclusiveBetween(final long start, final long end, final long value, final String message) {
// TODO when breaking BC, consider returning value
if (value < start || value > end) {
throw new IllegalArgumentException(message);
}
}
|
Validate that the specified primitive value falls between the two
inclusive values specified; otherwise, throws an exception with the
specified message.
<pre>Validate.inclusiveBetween(0, 2, 1, "Not in range");</pre>
@param start the inclusive start value.
@param end the inclusive end value.
@param value the value to validate.
@param message the exception message if invalid, not null.
@throws IllegalArgumentException if the value falls outside the boundaries.
@since 3.3
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 332
|
[
"start",
"end",
"value",
"message"
] |
void
| true
| 3
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
get
|
@Override
public T get() throws ConcurrentException {
try {
return getFuture().get();
} catch (final ExecutionException execex) {
ConcurrentUtils.handleCause(execex);
return null; // should not be reached
} catch (final InterruptedException iex) {
// reset interrupted state
Thread.currentThread().interrupt();
throw new ConcurrentException(iex);
}
}
|
Gets the result of the background initialization. This method blocks
until initialization is complete. If the background processing caused a
runtime exception, it is directly thrown by this method. Checked
exceptions, including {@link InterruptedException} are wrapped in a
{@link ConcurrentException}. Calling this method before {@link #start()}
was called causes an {@link IllegalStateException} exception to be
thrown.
@return the object produced by this initializer.
@throws ConcurrentException if a checked exception occurred during
background processing.
@throws IllegalStateException if {@link #start()} has not been called.
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/BackgroundInitializer.java
| 256
|
[] |
T
| true
| 3
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
clearCache
|
void clearCache() {
this.cache.clear();
}
|
Reconnect to the {@link JarFile}, returning a replacement {@link URLConnection}.
@param jarFile the jar file
@param existingConnection the existing connection
@return a newly opened connection inhering the same {@code useCaches} value as the
existing connection
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/UrlJarFiles.java
| 138
|
[] |
void
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
paddedPartition
|
public static <T extends @Nullable Object>
UnmodifiableIterator<List<@Nullable T>> paddedPartition(Iterator<T> iterator, int size) {
return partitionImpl(iterator, size, true);
}
|
Divides an iterator into unmodifiable sublists of the given size, padding the final iterator
with null values if necessary. For example, partitioning an iterator containing {@code [a, b,
c, d, e]} with a partition size of 3 yields {@code [[a, b, c], [d, e, null]]} -- an outer
iterator containing two inner lists of three elements each, all in the original order.
<p>The returned lists implement {@link java.util.RandomAccess}.
@param iterator the iterator to return a partitioned view of
@param size the desired size of each partition
@return an iterator of immutable lists containing the elements of {@code iterator} divided into
partitions (the final iterable may have trailing null elements)
@throws IllegalArgumentException if {@code size} is nonpositive
|
java
|
android/guava/src/com/google/common/collect/Iterators.java
| 621
|
[
"iterator",
"size"
] | true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
writeTo
|
public void writeTo(OutputStream out) throws IOException {
MultiValueMap<Layer, String> index = new LinkedMultiValueMap<>();
this.root.buildIndex("", index);
index.values().forEach(Collections::sort);
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out, StandardCharsets.UTF_8));
for (Layer layer : this.layers) {
List<String> names = index.get(layer);
writer.write("- \"" + layer + "\":\n");
if (names != null) {
for (String name : names) {
writer.write(" - \"" + name + "\"\n");
}
}
}
writer.flush();
}
|
Write the layer index to an output stream.
@param out the destination stream
@throws IOException on IO error
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/LayersIndex.java
| 93
|
[
"out"
] |
void
| true
| 2
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
needsRefresh
|
protected boolean needsRefresh(Class<?> clazz) {
return (this.targetClass != clazz);
}
|
Determine whether this metadata instance needs to be refreshed.
@param clazz the current target class
@return {@code true} indicating a refresh, {@code false} otherwise
@since 5.2.4
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/annotation/InjectionMetadata.java
| 119
|
[
"clazz"
] | true
| 1
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
transitionToSendingLeaveGroup
|
public void transitionToSendingLeaveGroup(boolean dueToExpiredPollTimer) {
if (state == MemberState.FATAL) {
log.warn("Member {} with epoch {} won't send leave group request because it is in " +
"FATAL state", memberId, memberEpoch);
return;
}
if (state == MemberState.UNSUBSCRIBED) {
log.warn("Member {} won't send leave group request because it is already out of the group.",
memberId);
return;
}
if (dueToExpiredPollTimer) {
this.isPollTimerExpired = true;
// Briefly transition through prepare leaving. The member does not have to release
// any assignment before sending the leave group given that is stale. It will invoke
// onPartitionsLost after sending the leave group on the STALE state.
transitionTo(MemberState.PREPARE_LEAVING);
}
updateMemberEpoch(leaveGroupEpoch());
currentAssignment = LocalAssignment.NONE;
transitionTo(MemberState.LEAVING);
}
|
Reset member epoch to the value required for the leave the group heartbeat request, and
transition to the {@link MemberState#LEAVING} state so that a heartbeat request is sent
out with it.
@param dueToExpiredPollTimer True if the leave group is due to an expired poll timer. This
will indicate that the member must remain STALE after leaving,
until it releases its assignment and the timer is reset.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 646
|
[
"dueToExpiredPollTimer"
] |
void
| true
| 4
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
ready
|
boolean ready(Node node, long now);
|
Initiate a connection to the given node (if necessary), and return true if already connected. The readiness of a
node will change only when poll is invoked.
@param node The node to connect to.
@param now The current time
@return true iff we are ready to immediately initiate the sending of another request to the given node.
|
java
|
clients/src/main/java/org/apache/kafka/clients/KafkaClient.java
| 48
|
[
"node",
"now"
] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
mean_squared_error
|
def mean_squared_error(
y_true,
y_pred,
*,
sample_weight=None,
multioutput="uniform_average",
):
"""Mean squared error regression loss.
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or array of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred)
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
array([0.41666667, 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.825...
"""
xp, _ = get_namespace(y_true, y_pred, sample_weight, multioutput)
_, y_true, y_pred, sample_weight, multioutput = (
_check_reg_targets_with_floating_dtype(
y_true, y_pred, sample_weight, multioutput, xp=xp
)
)
output_errors = _average(
(y_true - y_pred) ** 2, axis=0, weights=sample_weight, xp=xp
)
if isinstance(multioutput, str):
if multioutput == "raw_values":
return output_errors
elif multioutput == "uniform_average":
# pass None as weights to _average: uniform mean
multioutput = None
# Average across the outputs (if needed).
# The second call to `_average` should always return
# a scalar array that we convert to a Python float to
# consistently return the same eager evaluated value.
# Therefore, `axis=None`.
mean_squared_error = _average(output_errors, weights=multioutput, xp=xp)
return float(mean_squared_error)
|
Mean squared error regression loss.
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or array of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred)
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
array([0.41666667, 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.825...
|
python
|
sklearn/metrics/_regression.py
| 522
|
[
"y_true",
"y_pred",
"sample_weight",
"multioutput"
] | false
| 4
| 7.28
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
object_exists
|
def object_exists(self, key: str, bucket_name: str | None = None) -> bool:
"""
Check if object exists.
:param key: the path of the object
:param bucket_name: the name of the bucket
:return: True if it exists and False if not.
"""
try:
return self.get_bucket(bucket_name).object_exists(key)
except ClientError as e:
self.log.error(e.message)
return False
|
Check if object exists.
:param key: the path of the object
:param bucket_name: the name of the bucket
:return: True if it exists and False if not.
|
python
|
providers/alibaba/src/airflow/providers/alibaba/cloud/hooks/oss.py
| 117
|
[
"self",
"key",
"bucket_name"
] |
bool
| true
| 1
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
load
|
static Zip64EndOfCentralDirectoryRecord load(DataBlock dataBlock, Zip64EndOfCentralDirectoryLocator locator)
throws IOException {
if (locator == null) {
return null;
}
ByteBuffer buffer = ByteBuffer.allocate(MINIMUM_SIZE);
buffer.order(ByteOrder.LITTLE_ENDIAN);
long size = locator.pos() - locator.offsetToZip64EndOfCentralDirectoryRecord();
long pos = locator.pos() - size;
debug.log("Loading Zip64EndOfCentralDirectoryRecord from position %s size %s", pos, size);
dataBlock.readFully(buffer, pos);
buffer.rewind();
int signature = buffer.getInt();
if (signature != SIGNATURE) {
debug.log("Found incorrect Zip64EndOfCentralDirectoryRecord signature %s at position %s", signature, pos);
throw new IOException("Zip64 'End Of Central Directory Record' not found at position " + pos
+ ". Zip file is corrupt or includes prefixed bytes which are not supported with Zip64 files");
}
return new Zip64EndOfCentralDirectoryRecord(size, buffer.getLong(), buffer.getShort(), buffer.getShort(),
buffer.getInt(), buffer.getInt(), buffer.getLong(), buffer.getLong(), buffer.getLong(),
buffer.getLong());
}
|
Load the {@link Zip64EndOfCentralDirectoryRecord} from the given data block based
on the offset given in the locator.
@param dataBlock the source data block
@param locator the {@link Zip64EndOfCentralDirectoryLocator} or {@code null}
@return a new {@link ZipCentralDirectoryFileHeaderRecord} instance or {@code null}
if the locator is {@code null}
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/Zip64EndOfCentralDirectoryRecord.java
| 66
|
[
"dataBlock",
"locator"
] |
Zip64EndOfCentralDirectoryRecord
| true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
repeat
|
function repeat<P extends L.Update<P, 0, R>, R>(f: (...p: P) => R, again: (...p: F.NoInfer<P>) => boolean) {
return (...p: P) => {
// ts does not understand
const pClone: any = [...p]
while (again(...pClone)) {
pClone[0] = f(...pClone)
}
return pClone[0] as R
}
}
|
Repeat an {@link Accumulable} function.
@param f to be repeated until...
@param again return false to exit
@returns
@example
```ts
// concats `[2]` 10 times on `[1]`
repeat(concat, times(10))([1], [2])
```
|
typescript
|
helpers/blaze/repeat.ts
| 22
|
[
"f",
"again"
] | false
| 2
| 9.2
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
writeLegacyCompressedWrapperHeader
|
private int writeLegacyCompressedWrapperHeader() {
ensureOpenForRecordBatchWrite();
ByteBuffer buffer = bufferStream.buffer();
int pos = buffer.position();
buffer.position(initialPosition);
int wrapperSize = pos - initialPosition - Records.LOG_OVERHEAD;
int writtenCompressed = wrapperSize - LegacyRecord.recordOverhead(magic);
AbstractLegacyRecordBatch.writeHeader(buffer, lastOffset, wrapperSize);
long timestamp = timestampType == TimestampType.LOG_APPEND_TIME ? logAppendTime : maxTimestamp;
LegacyRecord.writeCompressedRecordHeader(buffer, magic, wrapperSize, timestamp, compression.type(), timestampType);
buffer.position(pos);
return writtenCompressed;
}
|
Write the header to the legacy batch.
@return the written compressed bytes.
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
| 435
|
[] | true
| 2
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_setitem_single_block
|
def _setitem_single_block(self, indexer, value, name: str) -> None:
"""
_setitem_with_indexer for the case when we have a single Block.
"""
from pandas import Series
if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict):
# TODO(EA): ExtensionBlock.setitem this causes issues with
# setting for extensionarrays that store dicts. Need to decide
# if it's worth supporting that.
value = self._align_series(indexer, Series(value))
info_axis = self.obj._info_axis_number
item_labels = self.obj._get_axis(info_axis)
if isinstance(indexer, tuple):
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if (
self.ndim == len(indexer) == 2
and is_integer(indexer[1])
and com.is_null_slice(indexer[0])
):
col = item_labels[indexer[info_axis]]
if len(item_labels.get_indexer_for([col])) == 1:
# e.g. test_loc_setitem_empty_append_expands_rows
loc = item_labels.get_loc(col)
self._setitem_single_column(loc, value, indexer[0])
return
indexer = maybe_convert_ix(*indexer) # e.g. test_setitem_frame_align
if isinstance(value, ABCDataFrame) and name != "iloc":
value = self._align_frame(indexer, value)._values
# actually do the set
self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value)
|
_setitem_with_indexer for the case when we have a single Block.
|
python
|
pandas/core/indexing.py
| 2,183
|
[
"self",
"indexer",
"value",
"name"
] |
None
| true
| 11
| 6
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
doWithSlice
|
static void doWithSlice(List<Registration> registrations, int sliceSize,
BiConsumer<Integer, Integer> action) {
int index = 0;
int end = 0;
while (end < registrations.size()) {
int start = index * sliceSize;
end = Math.min(start + sliceSize, registrations.size());
action.accept(start, end);
index++;
}
}
|
Invoke an action for each slice of the given {@code registrations}. The
{@code action} is invoked for each slice with the start and end index of the
given list of registrations. Elements to process can be retrieved using
{@link List#subList(int, int)}.
@param registrations the registrations to process
@param sliceSize the size of a slice
@param action the action to invoke for each slice
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanRegistrationsAotContribution.java
| 172
|
[
"registrations",
"sliceSize",
"action"
] |
void
| true
| 2
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
update
|
@GuardedBy("this")
private void update(int len) throws IOException {
if (memory != null && (memory.getCount() + len > fileThreshold)) {
File temp = TempFileCreator.INSTANCE.createTempFile("FileBackedOutputStream");
if (resetOnFinalize) {
// Finalizers are not guaranteed to be called on system shutdown;
// this is insurance.
temp.deleteOnExit();
}
FileOutputStream transfer = null;
try {
transfer = new FileOutputStream(temp);
transfer.write(memory.getBuffer(), 0, memory.getCount());
transfer.flush();
// We've successfully transferred the data; switch to writing to file
out = transfer;
} catch (IOException e) {
if (transfer != null) {
try {
transfer.close();
} catch (IOException closeException) {
e.addSuppressed(closeException);
}
}
temp.delete();
throw e;
}
file = temp;
memory = null;
}
}
|
Checks if writing {@code len} bytes would go over threshold, and switches to file buffering if
so.
|
java
|
android/guava/src/com/google/common/io/FileBackedOutputStream.java
| 232
|
[
"len"
] |
void
| true
| 7
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
finalizedFeaturesEpoch
|
public long finalizedFeaturesEpoch() {
return finalizedFeaturesEpoch;
}
|
Get the version information for a given API.
@param apiKey The api key to lookup
@return The api version information from the broker or null if it is unsupported
|
java
|
clients/src/main/java/org/apache/kafka/clients/NodeApiVersions.java
| 265
|
[] | true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
saturatedMultiply
|
@SuppressWarnings("ShortCircuitBoolean")
public static long saturatedMultiply(long a, long b) {
// see checkedMultiply for explanation
int leadingZeros =
Long.numberOfLeadingZeros(a)
+ Long.numberOfLeadingZeros(~a)
+ Long.numberOfLeadingZeros(b)
+ Long.numberOfLeadingZeros(~b);
if (leadingZeros > Long.SIZE + 1) {
return a * b;
}
// the return value if we will overflow (which we calculate by overflowing a long :) )
long limit = Long.MAX_VALUE + ((a ^ b) >>> (Long.SIZE - 1));
if (leadingZeros < Long.SIZE | (a < 0 & b == Long.MIN_VALUE)) {
// overflow
return limit;
}
long result = a * b;
if (a == 0 || result / a == b) {
return result;
}
return limit;
}
|
Returns the product of {@code a} and {@code b} unless it would overflow or underflow in which
case {@code Long.MAX_VALUE} or {@code Long.MIN_VALUE} is returned, respectively.
@since 20.0
|
java
|
android/guava/src/com/google/common/math/LongMath.java
| 678
|
[
"a",
"b"
] | true
| 5
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
get_namespace
|
def get_namespace(
self, namespace: str, lowercase: bool = True, trim_namespace: bool = True
) -> dict[str, t.Any]:
"""Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage::
app.config['IMAGE_STORE_TYPE'] = 'fs'
app.config['IMAGE_STORE_PATH'] = '/var/app/images'
app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
image_store_config = app.config.get_namespace('IMAGE_STORE_')
The resulting dictionary `image_store_config` would look like::
{
'type': 'fs',
'path': '/var/app/images',
'base_url': 'http://img.website.com'
}
This is often useful when configuration options map directly to
keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
.. versionadded:: 0.11
"""
rv = {}
for k, v in self.items():
if not k.startswith(namespace):
continue
if trim_namespace:
key = k[len(namespace) :]
else:
key = k
if lowercase:
key = key.lower()
rv[key] = v
return rv
|
Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage::
app.config['IMAGE_STORE_TYPE'] = 'fs'
app.config['IMAGE_STORE_PATH'] = '/var/app/images'
app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
image_store_config = app.config.get_namespace('IMAGE_STORE_')
The resulting dictionary `image_store_config` would look like::
{
'type': 'fs',
'path': '/var/app/images',
'base_url': 'http://img.website.com'
}
This is often useful when configuration options map directly to
keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
.. versionadded:: 0.11
|
python
|
src/flask/config.py
| 323
|
[
"self",
"namespace",
"lowercase",
"trim_namespace"
] |
dict[str, t.Any]
| true
| 6
| 6.24
|
pallets/flask
| 70,946
|
sphinx
| false
|
deriveBeanName
|
String deriveBeanName(MethodMetadata beanMethod, @Nullable String beanName);
|
Derive a default bean name for the given {@link Bean @Bean} method,
providing the {@link Bean#name() name} attribute specified.
@param beanMethod the method metadata for the {@link Bean @Bean} method
@param beanName the {@link Bean#name() name} attribute or {@code null} if non is specified
@return the default bean name to use
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ConfigurationBeanNameGenerator.java
| 45
|
[
"beanMethod",
"beanName"
] |
String
| true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getFirstEnum
|
public static <E extends Enum<E>> E getFirstEnum(final Class<E> enumClass, final int value, final ToIntFunction<E> toIntFunction, final E defaultEnum) {
if (!isEnum(enumClass)) {
return defaultEnum;
}
return stream(enumClass).filter(e -> value == toIntFunction.applyAsInt(e)).findFirst().orElse(defaultEnum);
}
|
Gets the enum for the class and value, returning {@code defaultEnum} if not found.
<p>
This method differs from {@link Enum#valueOf} in that it does not throw an exception for an invalid enum name and performs case insensitive matching of
the name.
</p>
@param <E> the type of the enumeration.
@param enumClass the class of the enum to query, not null.
@param value the enum name, null returns default enum.
@param toIntFunction the function that gets an int for an enum for comparison to {@code value}.
@param defaultEnum the default enum.
@return an enum, default enum if not found.
@since 3.18.0
|
java
|
src/main/java/org/apache/commons/lang3/EnumUtils.java
| 345
|
[
"enumClass",
"value",
"toIntFunction",
"defaultEnum"
] |
E
| true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_bind_dims_to_size
|
def _bind_dims_to_size(sz: int, sd: int, dims: list, nsz: list, nsd: list) -> None:
"""
Bind dimensions to size and calculate proper strides for dim packs.
"""
from . import DimensionBindError
rhs_prod = 1
for i, dim in enumerate(dims):
if not dim.is_bound:
# Check for multiple unbound dimensions
for j in range(i + 1, len(dims)):
if not dims[j].is_bound:
raise DimensionBindError(
f"cannot infer the sizes of two dimensions at once {dim!r} and {dims[j]!r}"
)
rhs_prod *= dims[j].size
# Calculate the size for this unbound dimension
if sz % rhs_prod != 0:
tup = tuple(dim.size if dim.is_bound else "?" for dim in dims)
raise DimensionBindError(
f"inferred dimension does not evenly fit into larger dimension: {sz} vs {tup}"
)
inferred_size = sz // rhs_prod
dim.size = inferred_size
rhs_prod = sz
break
else:
rhs_prod *= dim.size
# Final validation that dimensions match
if rhs_prod != sz:
tup = tuple(dims)
raise DimensionBindError(
f"Dimension sizes to do not match ({sz} != {rhs_prod}) when matching dimension pack {tup}"
)
# Calculate new sizes and strides for each dimension in the pack
# First calculate all strides by iterating in reverse
new_strides = [0] * len(dims)
current_stride = sd
for i in reversed(range(len(dims))):
new_strides[i] = current_stride
current_stride *= dims[i].size
# Then append sizes and strides in forward order
for i in range(len(dims)):
nsz.append(dims[i].size)
nsd.append(new_strides[i])
|
Bind dimensions to size and calculate proper strides for dim packs.
|
python
|
functorch/dim/_getsetitem.py
| 57
|
[
"sz",
"sd",
"dims",
"nsz",
"nsd"
] |
None
| true
| 11
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
builder
|
public static RestClientBuilder builder(HttpHost... hosts) {
if (hosts == null || hosts.length == 0) {
throw new IllegalArgumentException("hosts must not be null nor empty");
}
List<Node> nodes = Arrays.stream(hosts).map(Node::new).collect(Collectors.toList());
return new RestClientBuilder(nodes);
}
|
Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation.
Creates a new builder instance and sets the nodes that the client will send requests to.
<p>
You can use this if you do not have metadata up front about the nodes. If you do, prefer
{@link #builder(Node...)}.
@see Node#Node(HttpHost)
|
java
|
client/rest/src/main/java/org/elasticsearch/client/RestClient.java
| 214
|
[] |
RestClientBuilder
| true
| 3
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
onSubstituteNode
|
function onSubstituteNode(hint: EmitHint, node: Node) {
node = previousOnSubstituteNode(hint, node);
if (hint === EmitHint.Expression) {
return substituteExpression(node as Expression);
}
return node;
}
|
Hooks node substitutions.
@param hint The context for the emitter.
@param node The node to substitute.
|
typescript
|
src/compiler/transformers/classFields.ts
| 3,246
|
[
"hint",
"node"
] | false
| 2
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getCandidateTypes
|
public Set<String> getCandidateTypes(String basePackage, String stereotype) {
List<Entry> candidates = this.index.get(stereotype);
if (candidates != null) {
return candidates.stream()
.filter(entry -> entry.match(basePackage))
.map(entry -> entry.type)
.collect(Collectors.toSet());
}
return Collections.emptySet();
}
|
Return the candidate types that are associated with the specified stereotype.
@param basePackage the package to check for candidates
@param stereotype the stereotype to use
@return the candidate types associated with the specified {@code stereotype}
or an empty set if none has been found for the specified {@code basePackage}
|
java
|
spring-context/src/main/java/org/springframework/context/index/CandidateComponentsIndex.java
| 148
|
[
"basePackage",
"stereotype"
] | true
| 2
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
close
|
@Override
public void close() {
if (closed == false) {
closed = true;
arrays.adjustBreaker(-SHALLOW_SIZE);
Releasables.close(centroids, counts, aggregatedCounts, tree);
}
}
|
Return the total count of points that have been added to the tree.
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/AVLGroupTree.java
| 325
|
[] |
void
| true
| 2
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
toObject
|
public static Double[] toObject(final double[] array) {
if (array == null) {
return null;
}
if (array.length == 0) {
return EMPTY_DOUBLE_OBJECT_ARRAY;
}
return setAll(new Double[array.length], i -> Double.valueOf(array[i]));
}
|
Converts an array of primitive doubles to objects.
<p>This method returns {@code null} for a {@code null} input array.</p>
@param array a {@code double} array.
@return a {@link Double} array, {@code null} if null array input.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 8,726
|
[
"array"
] | true
| 3
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
readAndCheckByte
|
private byte readAndCheckByte() throws IOException, EOFException {
int b1 = in.read();
if (b1 == -1) {
throw new EOFException();
}
return (byte) b1;
}
|
Reads a byte from the input stream checking that the end of file (EOF) has not been
encountered.
@return byte read from input
@throws IOException if an error is encountered while reading
@throws EOFException if the end of file (EOF) is encountered.
|
java
|
android/guava/src/com/google/common/io/LittleEndianDataInputStream.java
| 230
|
[] | true
| 2
| 8.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
closeExpectNoException
|
public static void closeExpectNoException(Releasable... releasables) {
try {
close(releasables);
} catch (RuntimeException e) {
assert false : e;
throw e;
}
}
|
Release the provided {@link Releasable}s expecting no exception to by thrown by any of them.
|
java
|
libs/core/src/main/java/org/elasticsearch/core/Releasables.java
| 60
|
[] |
void
| true
| 2
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
lookupKeys
|
Set<K> lookupKeys();
|
The initial set of lookup keys. Although this will usually match the fulfillment
keys, it does not necessarily have to. For example, in the case of
{@link AllBrokersStrategy.AllBrokersFuture},
we use the lookup phase in order to discover the set of keys that will be searched
during the fulfillment phase.
@return non-empty set of initial lookup keys
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiFuture.java
| 39
|
[] | true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
typeName
|
public abstract String typeName();
|
Short name of the type to identify it in documentation;
@return the name of the type
|
java
|
clients/src/main/java/org/apache/kafka/common/protocol/types/Type.java
| 105
|
[] |
String
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
create_fargate_profile
|
def create_fargate_profile(
self,
clusterName: str,
fargateProfileName: str | None,
podExecutionRoleArn: str | None,
selectors: list,
**kwargs,
) -> dict:
"""
Create an AWS Fargate profile for an Amazon EKS cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.create_fargate_profile`
:param clusterName: The name of the Amazon EKS cluster to apply the Fargate profile to.
:param fargateProfileName: The name of the Fargate profile.
:param podExecutionRoleArn: The Amazon Resource Name (ARN) of the pod execution role to
use for pods that match the selectors in the Fargate profile.
:param selectors: The selectors to match for pods to use this Fargate profile.
:return: Returns descriptive information about the created Fargate profile.
"""
eks_client = self.conn
response = eks_client.create_fargate_profile(
clusterName=clusterName,
fargateProfileName=fargateProfileName,
podExecutionRoleArn=podExecutionRoleArn,
selectors=selectors,
**kwargs,
)
self.log.info(
"Created AWS Fargate profile with the name %s for Amazon EKS cluster %s.",
response.get("fargateProfile").get("fargateProfileName"),
response.get("fargateProfile").get("clusterName"),
)
return response
|
Create an AWS Fargate profile for an Amazon EKS cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.create_fargate_profile`
:param clusterName: The name of the Amazon EKS cluster to apply the Fargate profile to.
:param fargateProfileName: The name of the Fargate profile.
:param podExecutionRoleArn: The Amazon Resource Name (ARN) of the pod execution role to
use for pods that match the selectors in the Fargate profile.
:param selectors: The selectors to match for pods to use this Fargate profile.
:return: Returns descriptive information about the created Fargate profile.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/eks.py
| 210
|
[
"self",
"clusterName",
"fargateProfileName",
"podExecutionRoleArn",
"selectors"
] |
dict
| true
| 1
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
indexOfAny
|
public static int indexOfAny(final CharSequence cs, final String searchChars) {
if (isEmpty(cs) || isEmpty(searchChars)) {
return INDEX_NOT_FOUND;
}
return indexOfAny(cs, searchChars.toCharArray());
}
|
Search a CharSequence to find the first index of any character in the given set of characters.
<p>
A {@code null} String will return {@code -1}. A {@code null} search string will return {@code -1}.
</p>
<pre>
StringUtils.indexOfAny(null, *) = -1
StringUtils.indexOfAny("", *) = -1
StringUtils.indexOfAny(*, null) = -1
StringUtils.indexOfAny(*, "") = -1
StringUtils.indexOfAny("zzabyycdxx", "za") = 0
StringUtils.indexOfAny("zzabyycdxx", "by") = 3
StringUtils.indexOfAny("aba", "z") = -1
</pre>
@param cs the CharSequence to check, may be null.
@param searchChars the chars to search for, may be null.
@return the index of any of the chars, -1 if no match or null input.
@since 2.0
@since 3.0 Changed signature from indexOfAny(String, String) to indexOfAny(CharSequence, String)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 2,834
|
[
"cs",
"searchChars"
] | true
| 3
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
withClientSslSupport
|
public ConfigDef withClientSslSupport() {
SslConfigs.addClientSslSupport(this);
return this;
}
|
Add standard SSL client configuration options.
@return this
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 493
|
[] |
ConfigDef
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
filterPreferredAddresses
|
static List<InetAddress> filterPreferredAddresses(InetAddress[] allAddresses) {
List<InetAddress> preferredAddresses = new ArrayList<>();
Class<? extends InetAddress> clazz = null;
for (InetAddress address : allAddresses) {
if (clazz == null) {
clazz = address.getClass();
}
if (clazz.isInstance(address)) {
preferredAddresses.add(address);
}
}
return preferredAddresses;
}
|
Return a list containing the first address in `allAddresses` and subsequent addresses
that are a subtype of the first address.
The outcome is that all returned addresses are either IPv4 or IPv6 (InetAddress has two
subclasses: Inet4Address and Inet6Address).
|
java
|
clients/src/main/java/org/apache/kafka/clients/ClientUtils.java
| 139
|
[
"allAddresses"
] | true
| 3
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
stream
|
public static <T> Stream<T> stream(final Class<T> clazz) {
return clazz != null ? Streams.of(clazz.getEnumConstants()) : Stream.empty();
}
|
Returns a sequential ordered stream whose elements are the given class' enum values.
@param <T> the type of stream elements.
@param clazz the class containing the enum values, may be null.
@return the new stream, empty of {@code clazz} is null.
@since 3.18.0
@see Class#getEnumConstants()
|
java
|
src/main/java/org/apache/commons/lang3/EnumUtils.java
| 466
|
[
"clazz"
] | true
| 2
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
countBeansIncludingAncestors
|
public static int countBeansIncludingAncestors(ListableBeanFactory lbf) {
return beanNamesIncludingAncestors(lbf).length;
}
|
Count all beans in any hierarchy in which this factory participates.
Includes counts of ancestor bean factories.
<p>Beans that are "overridden" (specified in a descendant factory
with the same name) are only counted once.
@param lbf the bean factory
@return count of beans including those defined in ancestor factories
@see #beanNamesIncludingAncestors
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/BeanFactoryUtils.java
| 138
|
[
"lbf"
] | true
| 1
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
mutex
|
private Object mutex() {
Object mutex = mutexDoNotUseDirectly;
if (mutex == null) {
synchronized (this) {
mutex = mutexDoNotUseDirectly;
if (mutex == null) {
mutexDoNotUseDirectly = mutex = new Object();
}
}
}
return mutex;
}
|
The underlying timer; used both to measure elapsed time and sleep as necessary. A separate
object to facilitate testing.
|
java
|
android/guava/src/com/google/common/util/concurrent/RateLimiter.java
| 222
|
[] |
Object
| true
| 3
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
generate_presigned_url
|
def generate_presigned_url(
self,
client_method: str,
params: dict | None = None,
expires_in: int = 3600,
http_method: str | None = None,
) -> str | None:
"""
Generate a presigned url given a client, its method, and arguments.
.. seealso::
- :external+boto3:py:meth:`S3.Client.generate_presigned_url`
:param client_method: The client method to presign for.
:param params: The parameters normally passed to ClientMethod.
:param expires_in: The number of seconds the presigned url is valid for.
By default it expires in an hour (3600 seconds).
:param http_method: The http method to use on the generated url.
By default, the http method is whatever is used in the method's model.
:return: The presigned url.
"""
s3_client = self.get_conn()
try:
return s3_client.generate_presigned_url(
ClientMethod=client_method,
Params=params,
ExpiresIn=expires_in,
HttpMethod=http_method,
)
except ClientError as e:
self.log.error(e.response["Error"]["Message"])
return None
|
Generate a presigned url given a client, its method, and arguments.
.. seealso::
- :external+boto3:py:meth:`S3.Client.generate_presigned_url`
:param client_method: The client method to presign for.
:param params: The parameters normally passed to ClientMethod.
:param expires_in: The number of seconds the presigned url is valid for.
By default it expires in an hour (3600 seconds).
:param http_method: The http method to use on the generated url.
By default, the http method is whatever is used in the method's model.
:return: The presigned url.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 1,610
|
[
"self",
"client_method",
"params",
"expires_in",
"http_method"
] |
str | None
| true
| 1
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
log
|
public static double log(double value) {
if (value > 0.0) {
if (value == Double.POSITIVE_INFINITY) {
return Double.POSITIVE_INFINITY;
}
// For normal values not close to 1.0, we use the following formula:
// log(value)
// = log(2^exponent*1.mantissa)
// = log(2^exponent) + log(1.mantissa)
// = exponent * log(2) + log(1.mantissa)
// = exponent * log(2) + log(1.mantissaApprox) + log(1.mantissa/1.mantissaApprox)
// = exponent * log(2) + log(1.mantissaApprox) + log(1+epsilon)
// = exponent * log(2) + log(1.mantissaApprox) + epsilon-epsilon^2/2+epsilon^3/3-epsilon^4/4+...
// with:
// 1.mantissaApprox <= 1.mantissa,
// log(1.mantissaApprox) in table,
// epsilon = (1.mantissa/1.mantissaApprox)-1
//
// To avoid bad relative error for small results,
// values close to 1.0 are treated aside, with the formula:
// log(x) = z*(2+z^2*((2.0/3)+z^2*((2.0/5))+z^2*((2.0/7))+...)))
// with z=(x-1)/(x+1)
double h;
if (value > 0.95) {
if (value < 1.14) {
double z = (value - 1.0) / (value + 1.0);
double z2 = z * z;
return z * (2 + z2 * ((2.0 / 3) + z2 * ((2.0 / 5) + z2 * ((2.0 / 7) + z2 * ((2.0 / 9) + z2 * ((2.0 / 11)))))));
}
h = 0.0;
} else if (value < MIN_DOUBLE_NORMAL) {
// Ensuring value is normal.
value *= TWO_POW_52;
// log(x*2^52)
// = log(x)-ln(2^52)
// = log(x)-52*ln(2)
h = -52 * LOG_2;
} else {
h = 0.0;
}
int valueBitsHi = (int) (Double.doubleToRawLongBits(value) >> 32);
int valueExp = (valueBitsHi >> 20) - MAX_DOUBLE_EXPONENT;
// Getting the first LOG_BITS bits of the mantissa.
int xIndex = ((valueBitsHi << 12) >>> (32 - LOG_BITS));
// 1.mantissa/1.mantissaApprox - 1
double z = (value * twoPowTab[-valueExp - MIN_DOUBLE_EXPONENT]) * logXInvTab[xIndex] - 1;
z *= (1 - z * ((1.0 / 2) - z * ((1.0 / 3))));
return h + valueExp * LOG_2 + (logXLogTab[xIndex] + z);
} else if (value == 0.0) {
return Double.NEGATIVE_INFINITY;
} else { // value < 0.0, or value is NaN
return Double.NaN;
}
}
|
@param value A double value.
@return Value logarithm (base e).
|
java
|
libs/core/src/main/java/org/elasticsearch/core/FastMath.java
| 223
|
[
"value"
] | true
| 7
| 8.16
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
items
|
def items(self) -> Iterable[tuple[Hashable, Any]]:
"""
Lazily iterate over (index, value) tuples.
This method returns an iterable tuple (index, value). This is
convenient if you want to create a lazy iterator.
Returns
-------
iterable
Iterable of tuples containing the (index, value) pairs from a
Series.
See Also
--------
DataFrame.items : Iterate over (column name, Series) pairs.
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.
Examples
--------
>>> s = pd.Series(["A", "B", "C"])
>>> for index, value in s.items():
... print(f"Index : {index}, Value : {value}")
Index : 0, Value : A
Index : 1, Value : B
Index : 2, Value : C
"""
return zip(iter(self.index), iter(self), strict=True)
|
Lazily iterate over (index, value) tuples.
This method returns an iterable tuple (index, value). This is
convenient if you want to create a lazy iterator.
Returns
-------
iterable
Iterable of tuples containing the (index, value) pairs from a
Series.
See Also
--------
DataFrame.items : Iterate over (column name, Series) pairs.
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.
Examples
--------
>>> s = pd.Series(["A", "B", "C"])
>>> for index, value in s.items():
... print(f"Index : {index}, Value : {value}")
Index : 0, Value : A
Index : 1, Value : B
Index : 2, Value : C
|
python
|
pandas/core/series.py
| 1,698
|
[
"self"
] |
Iterable[tuple[Hashable, Any]]
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
create
|
static Admin create(Properties props) {
return KafkaAdminClient.createInternal(new AdminClientConfig(props, true), null);
}
|
Create a new Admin with the given configuration.
@param props The configuration.
@return The new KafkaAdminClient.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 133
|
[
"props"
] |
Admin
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
on
|
static <T> UncheckedFuture<T> on(final Future<T> future) {
return new UncheckedFutureImpl<>(future);
}
|
Creates a new instance on the given Future.
@param <T> The result type returned by this Future's {@link #get()} and {@link #get(long, TimeUnit)} methods.
@param future The Future to uncheck.
@return a new instance.
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/UncheckedFuture.java
| 69
|
[
"future"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
readMetadata
|
private ConfigurationMetadata readMetadata(String location) {
try {
return readMetadata(location, getMetadataResource(location).openInputStream());
}
catch (IOException ex) {
return null;
}
}
|
Read the existing {@link ConfigurationMetadata} for the specified type or
{@code null} if it is not available yet.
@param typeElement the type to read metadata for
@return the metadata for the given type or {@code null}
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataStore.java
| 88
|
[
"location"
] |
ConfigurationMetadata
| true
| 2
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
remove
|
def remove(self, key: str, where=None, start=None, stop=None) -> int | None:
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : str
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Raises
------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except KeyError:
# the key is not a valid store, re-raising KeyError
raise
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as err:
# In tests we get here with ClosedFileError, TypeError, and
# _table_mod.NoSuchNodeError. TODO: Catch only these?
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!"
) from err
# we are actually trying to remove a node (with children)
node = self.get_node(key)
if node is not None:
node._f_remove(recursive=True)
return None
# remove the node
if com.all_none(where, start, stop):
s.group._f_remove(recursive=True)
return None
# delete from the table
if not s.is_table:
raise ValueError("can only remove with where on objects written as tables")
return s.delete(where=where, start=start, stop=stop)
|
Remove pandas object partially by specifying the where condition
Parameters
----------
key : str
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Raises
------
raises KeyError if key is not a valid store
|
python
|
pandas/io/pytables.py
| 1,235
|
[
"self",
"key",
"where",
"start",
"stop"
] |
int | None
| true
| 5
| 6.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
visitHeritageClause
|
function visitHeritageClause(node: HeritageClause): HeritageClause | undefined {
if (node.token === SyntaxKind.ImplementsKeyword) {
// implements clauses are elided
return undefined;
}
return visitEachChild(node, visitor, context);
}
|
Transforms a HeritageClause with TypeScript syntax.
This function will only be called when one of the following conditions are met:
- The node is a non-`extends` heritage clause that should be elided.
- The node is an `extends` heritage clause that should be visited, but only allow a single type.
@param node The HeritageClause to transform.
|
typescript
|
src/compiler/transformers/ts.ts
| 1,263
|
[
"node"
] | true
| 2
| 7.04
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
hasAnyProfileSpecificChildren
|
@Contract("null -> false")
private boolean hasAnyProfileSpecificChildren(@Nullable List<ConfigDataEnvironmentContributor> contributors) {
if (CollectionUtils.isEmpty(contributors)) {
return false;
}
for (ConfigDataEnvironmentContributor contributor : contributors) {
for (ImportPhase importPhase : ImportPhase.values()) {
if (contributor.getChildren(importPhase)
.stream()
.anyMatch((child) -> child.hasConfigDataOption(ConfigData.Option.PROFILE_SPECIFIC))) {
return true;
}
}
}
return false;
}
|
Create a new {@link ConfigDataEnvironmentContributor} instance with a new set of
children for the given phase.
@param importPhase the import phase
@param children the new children
@return a new contributor instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironmentContributor.java
| 316
|
[
"contributors"
] | true
| 3
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
onEmitNode
|
function onEmitNode(hint: EmitHint, node: Node, emitCallback: (hint: EmitHint, node: Node) => void): void {
if (isSourceFile(node)) {
if ((isExternalModule(node) || getIsolatedModules(compilerOptions)) && compilerOptions.importHelpers) {
helperNameSubstitutions = new Map<string, Identifier>();
}
currentSourceFile = node;
previousOnEmitNode(hint, node, emitCallback);
currentSourceFile = undefined;
helperNameSubstitutions = undefined;
}
else {
previousOnEmitNode(hint, node, emitCallback);
}
}
|
Hook for node emit.
@param hint A hint as to the intended usage of the node.
@param node The node to emit.
@param emit A callback used to emit the node in the printer.
|
typescript
|
src/compiler/transformers/module/esnextAnd2015.ts
| 386
|
[
"hint",
"node",
"emitCallback"
] | true
| 6
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
createEsmNotFoundErr
|
function createEsmNotFoundErr(request, path) {
// eslint-disable-next-line no-restricted-syntax
const err = new Error(`Cannot find module '${request}'`);
err.code = 'MODULE_NOT_FOUND';
if (path) {
err.path = path;
}
// TODO(BridgeAR): Add the requireStack as well.
return err;
}
|
Creates an error object for when a requested ES module cannot be found.
@param {string} request The name of the requested module
@param {string} [path] The path to the requested module
@returns {Error}
|
javascript
|
lib/internal/modules/cjs/loader.js
| 1,455
|
[
"request",
"path"
] | false
| 2
| 6.4
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
_get_feature_names
|
def _get_feature_names(X):
"""Get feature names from X.
Support for other array containers should place its implementation here.
Parameters
----------
X : {ndarray, dataframe} of shape (n_samples, n_features)
Array container to extract feature names.
- pandas dataframe : The columns will be considered to be feature
names. If the dataframe contains non-string feature names, `None` is
returned.
- All other array containers will return `None`.
Returns
-------
names: ndarray or None
Feature names of `X`. Unrecognized array containers will return `None`.
"""
feature_names = None
# extract feature names for support array containers
if is_pandas_df(X):
# Make sure we can inspect columns names from pandas, even with
# versions too old to expose a working implementation of
# __dataframe__.column_names() and avoid introducing any
# additional copy.
# TODO: remove the pandas-specific branch once the minimum supported
# version of pandas has a working implementation of
# __dataframe__.column_names() that is guaranteed to not introduce any
# additional copy of the data without having to impose allow_copy=False
# that could fail with other libraries. Note: in the longer term, we
# could decide to instead rely on the __dataframe_namespace__ API once
# adopted by our minimally supported pandas version.
feature_names = np.asarray(X.columns, dtype=object)
elif hasattr(X, "__dataframe__"):
df_protocol = X.__dataframe__()
feature_names = np.asarray(list(df_protocol.column_names()), dtype=object)
if feature_names is None or len(feature_names) == 0:
return
types = sorted(t.__qualname__ for t in set(type(v) for v in feature_names))
# mixed type of string and non-string is not supported
if len(types) > 1 and "str" in types:
raise TypeError(
"Feature names are only supported if all input features have string names, "
f"but your input has {types} as feature name / column name types. "
"If you want feature names to be stored and validated, you must convert "
"them all to strings, by using X.columns = X.columns.astype(str) for "
"example. Otherwise you can remove feature / column names from your input "
"data, or convert them all to a non-string data type."
)
# Only feature names of all strings are supported
if len(types) == 1 and types[0] == "str":
return feature_names
|
Get feature names from X.
Support for other array containers should place its implementation here.
Parameters
----------
X : {ndarray, dataframe} of shape (n_samples, n_features)
Array container to extract feature names.
- pandas dataframe : The columns will be considered to be feature
names. If the dataframe contains non-string feature names, `None` is
returned.
- All other array containers will return `None`.
Returns
-------
names: ndarray or None
Feature names of `X`. Unrecognized array containers will return `None`.
|
python
|
sklearn/utils/validation.py
| 2,310
|
[
"X"
] | false
| 9
| 6.24
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
createFactory
|
protected abstract ArrayValuesSourceAggregationBuilder<?> createFactory(
String aggregationName,
ValuesSourceType valuesSourceType,
Map<ParseField, Object> otherOptions
);
|
Creates a {@link ValuesSourceAggregationBuilder} from the information
gathered by the subclass. Options parsed in
{@link ArrayValuesSourceParser} itself will be added to the factory
after it has been returned by this method.
@param aggregationName
the name of the aggregation
@param valuesSourceType
the type of the {@link ValuesSource}
@param otherOptions
a {@link Map} containing the extra options parsed by the
{@link #token(String, String, XContentParser.Token, XContentParser, Map)}
method
@return the created factory
|
java
|
modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceParser.java
| 218
|
[
"aggregationName",
"valuesSourceType",
"otherOptions"
] | true
| 1
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
getString
|
public String getString(int index) throws JSONException {
Object object = get(index);
String result = JSON.toString(object);
if (result == null) {
throw JSON.typeMismatch(index, object, "String");
}
return result;
}
|
Returns the value at {@code index} if it exists, coercing it if necessary.
@param index the index to get the value from
@return the {@code value}
@throws JSONException if no such value exists.
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 484
|
[
"index"
] |
String
| true
| 2
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
deriveActualIndentationFromList
|
function deriveActualIndentationFromList(list: readonly Node[], index: number, sourceFile: SourceFile, options: EditorSettings): number {
Debug.assert(index >= 0 && index < list.length);
const node = list[index];
// walk toward the start of the list starting from current node and check if the line is the same for all items.
// if end line for item [i - 1] differs from the start line for item [i] - find column of the first non-whitespace character on the line of item [i]
let lineAndCharacter = getStartLineAndCharacterForNode(node, sourceFile);
for (let i = index - 1; i >= 0; i--) {
if (list[i].kind === SyntaxKind.CommaToken) {
continue;
}
// skip list items that ends on the same line with the current list element
const prevEndLine = sourceFile.getLineAndCharacterOfPosition(list[i].end).line;
if (prevEndLine !== lineAndCharacter.line) {
return findColumnForFirstNonWhitespaceCharacterInLine(lineAndCharacter, sourceFile, options);
}
lineAndCharacter = getStartLineAndCharacterForNode(list[i], sourceFile);
}
return Value.Unknown;
}
|
@param assumeNewLineBeforeCloseBrace
`false` when called on text from a real source file.
`true` when we need to assume `position` is on a newline.
This is useful for codefixes. Consider
```
function f() {
|}
```
with `position` at `|`.
When inserting some text after an open brace, we would like to get indentation as if a newline was already there.
By default indentation at `position` will be 0 so 'assumeNewLineBeforeCloseBrace' overrides this behavior.
|
typescript
|
src/services/formatting/smartIndenter.ts
| 571
|
[
"list",
"index",
"sourceFile",
"options"
] | true
| 5
| 8.48
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
reverse
|
public static void reverse(final long[] array) {
if (array != null) {
reverse(array, 0, array.length);
}
}
|
Reverses the order of the given array.
<p>
This method does nothing for a {@code null} input array.
</p>
@param array the array to reverse, may be {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 6,602
|
[
"array"
] |
void
| true
| 2
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
generateCodeForAccessibleConstructor
|
private CodeBlock generateCodeForAccessibleConstructor(ConstructorDescriptor descriptor) {
Constructor<?> constructor = descriptor.constructor();
this.generationContext.getRuntimeHints().reflection().registerType(constructor.getDeclaringClass());
if (constructor.getParameterCount() == 0) {
if (!this.allowDirectSupplierShortcut) {
return CodeBlock.of("$T.using($T::new)", InstanceSupplier.class, descriptor.actualType());
}
if (!isThrowingCheckedException(constructor)) {
return CodeBlock.of("$T::new", descriptor.actualType());
}
return CodeBlock.of("$T.of($T::new)", ThrowingSupplier.class, descriptor.actualType());
}
GeneratedMethod generatedMethod = generateGetInstanceSupplierMethod(method ->
buildGetInstanceMethodForConstructor(method, descriptor, PRIVATE_STATIC));
return generateReturnStatement(generatedMethod);
}
|
Generate the instance supplier code.
@param registeredBean the bean to handle
@param instantiationDescriptor the executable to use to create the bean
@return the generated code
@since 6.1.7
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/InstanceSupplierCodeGenerator.java
| 179
|
[
"descriptor"
] |
CodeBlock
| true
| 4
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
fnv32_FIXED
|
constexpr uint32_t fnv32_FIXED(
const char* buf, uint32_t hash = fnv32_hash_start) noexcept {
for (; *buf; ++buf) {
hash = fnv32_append_byte_FIXED(hash, static_cast<uint8_t>(*buf));
}
return hash;
}
|
FNV hash of a c-str.
Continues hashing until a null byte is reached.
@param hash The initial hash seed.
@methodset fnv
|
cpp
|
folly/hash/FnvHash.h
| 174
|
[] | true
| 2
| 7.04
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
slogdet
|
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
A namedtuple with the following attributes:
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logabsdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logabsdet`
will be -inf. In all cases, the determinant is equal to
``sign * np.exp(logabsdet)``.
See Also
--------
det
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> import numpy as np
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logabsdet) = np.linalg.slogdet(a)
>>> (sign, logabsdet)
(-1, 0.69314718055994529) # may vary
>>> sign * np.exp(logabsdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logabsdet = np.linalg.slogdet(a)
>>> (sign, logabsdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logabsdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
sign = sign.astype(result_t, copy=False)
logdet = logdet.astype(real_t, copy=False)
return SlogdetResult(sign, logdet)
|
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
A namedtuple with the following attributes:
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logabsdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logabsdet`
will be -inf. In all cases, the determinant is equal to
``sign * np.exp(logabsdet)``.
See Also
--------
det
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> import numpy as np
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logabsdet) = np.linalg.slogdet(a)
>>> (sign, logabsdet)
(-1, 0.69314718055994529) # may vary
>>> sign * np.exp(logabsdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logabsdet = np.linalg.slogdet(a)
>>> (sign, logabsdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logabsdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
|
python
|
numpy/linalg/_linalg.py
| 2,273
|
[
"a"
] | false
| 2
| 6.24
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
scanClassSetOperand
|
function scanClassSetOperand(): string {
mayContainStrings = false;
switch (charCodeChecked(pos)) {
case CharacterCodes.EOF:
return "";
case CharacterCodes.openBracket:
pos++;
scanClassSetExpression();
scanExpectedChar(CharacterCodes.closeBracket);
return "";
case CharacterCodes.backslash:
pos++;
if (scanCharacterClassEscape()) {
return "";
}
else if (charCodeChecked(pos) === CharacterCodes.q) {
pos++;
if (charCodeChecked(pos) === CharacterCodes.openBrace) {
pos++;
scanClassStringDisjunctionContents();
scanExpectedChar(CharacterCodes.closeBrace);
return "";
}
else {
error(Diagnostics.q_must_be_followed_by_string_alternatives_enclosed_in_braces, pos - 2, 2);
return "q";
}
}
pos--;
// falls through
default:
return scanClassSetCharacter();
}
}
|
A stack of scopes for named capturing groups. @see {scanGroupName}
|
typescript
|
src/compiler/scanner.ts
| 3,290
|
[] | true
| 6
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
acquire
|
private void acquire() {
final Thread thread = Thread.currentThread();
final long threadId = thread.getId();
if (threadId != currentThread.get() && !currentThread.compareAndSet(NO_CURRENT_THREAD, threadId))
throw new ConcurrentModificationException("KafkaConsumer is not safe for multi-threaded access. " +
"currentThread(name: " + thread.getName() + ", id: " + threadId + ")" +
" otherThread(id: " + currentThread.get() + ")"
);
refCount.incrementAndGet();
}
|
Acquire the light lock protecting this consumer from multithreaded access. Instead of blocking
when the lock is not available, however, we just throw an exception (since multithreaded usage is not
supported).
@throws ConcurrentModificationException if another thread already has the lock
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
| 2,088
|
[] |
void
| true
| 3
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
hfloat
|
def hfloat(f, p=5):
"""Convert float to value suitable for humans.
Arguments:
f (float): The floating point number.
p (int): Floating point precision (default is 5).
"""
i = int(f)
return i if i == f else '{0:.{p}}'.format(f, p=p)
|
Convert float to value suitable for humans.
Arguments:
f (float): The floating point number.
p (int): Floating point precision (default is 5).
|
python
|
celery/utils/debug.py
| 119
|
[
"f",
"p"
] | false
| 2
| 6.08
|
celery/celery
| 27,741
|
google
| false
|
|
keys
|
def keys(self) -> Index:
"""
Return alias for index.
Returns
-------
Index
Index of the Series.
See Also
--------
Series.index : The index (axis labels) of the Series.
Examples
--------
>>> s = pd.Series([1, 2, 3], index=[0, 1, 2])
>>> s.keys()
Index([0, 1, 2], dtype='int64')
"""
return self.index
|
Return alias for index.
Returns
-------
Index
Index of the Series.
See Also
--------
Series.index : The index (axis labels) of the Series.
Examples
--------
>>> s = pd.Series([1, 2, 3], index=[0, 1, 2])
>>> s.keys()
Index([0, 1, 2], dtype='int64')
|
python
|
pandas/core/series.py
| 1,730
|
[
"self"
] |
Index
| true
| 1
| 6.08
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
addExportsAndOpens
|
public void addExportsAndOpens(Module target) {
String targetName = target.getName();
if (targets.contains(targetName) == false) {
throw new IllegalArgumentException(
"Module " + module.getName() + " does not contain qualified exports or opens for module " + targetName
);
}
List<String> exports = qualifiedExports.getOrDefault(targetName, List.of());
for (String export : exports) {
addExports(export, target);
}
List<String> opens = qualifiedOpens.getOrDefault(targetName, List.of());
for (String open : opens) {
addOpens(open, target);
}
}
|
Add exports and opens for a target module.
@param target A module whose name exists in {@link #getTargets()}
|
java
|
libs/core/src/main/java/org/elasticsearch/jdk/ModuleQualifiedExportsService.java
| 143
|
[
"target"
] |
void
| true
| 2
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
set8BitColor
|
function set8BitColor(styleCodes: number[], colorType: 'foreground' | 'background' | 'underline'): void {
let colorNumber = styleCodes[2];
const color = calcANSI8bitColor(colorNumber);
if (color) {
changeColor(colorType, color);
} else if (colorNumber >= 0 && colorNumber <= 15) {
if (colorType === 'underline') {
// for underline colors we just decode the 0-15 color number to theme color, set and return
changeColor(colorType, ansiColorIdentifiers[colorNumber].colorValue);
return;
}
// Need to map to one of the four basic color ranges (30-37, 90-97, 40-47, 100-107)
colorNumber += 30;
if (colorNumber >= 38) {
// Bright colors
colorNumber += 52;
}
if (colorType === 'background') {
colorNumber += 10;
}
setBasicColor(colorNumber);
}
}
|
Calculate and set styling for advanced 8-bit ANSI color codes.
@param styleCodes Full list of integer codes that make up the ANSI
sequence, including the two defining codes and the one color code.
@param colorType If `'foreground'`, will set foreground color, if
`'background'`, will set background color and if it is `'underline'`
will set the underline color.
@see {@link https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit }
|
typescript
|
extensions/notebook-renderers/src/ansi.ts
| 323
|
[
"styleCodes",
"colorType"
] | true
| 8
| 6.72
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
tar_and_s3_upload
|
def tar_and_s3_upload(self, path: str, key: str, bucket: str) -> None:
"""
Tar the local file or directory and upload to s3.
:param path: local file or directory
:param key: s3 key
:param bucket: s3 bucket
"""
with tempfile.TemporaryFile() as temp_file:
if os.path.isdir(path):
files = [os.path.join(path, name) for name in os.listdir(path)]
else:
files = [path]
with tarfile.open(mode="w:gz", fileobj=temp_file) as tar_file:
for f in files:
tar_file.add(f, arcname=os.path.basename(f))
temp_file.seek(0)
self.s3_hook.load_file_obj(temp_file, key, bucket, replace=True)
|
Tar the local file or directory and upload to s3.
:param path: local file or directory
:param key: s3 key
:param bucket: s3 bucket
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
| 171
|
[
"self",
"path",
"key",
"bucket"
] |
None
| true
| 4
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
getSortedDependencies
|
private List<Dependency> getSortedDependencies(InitializrServiceMetadata metadata) {
List<Dependency> dependencies = new ArrayList<>(metadata.getDependencies());
dependencies.sort(Comparator.comparing(Dependency::getId));
return dependencies;
}
|
Generate a report for the specified service. The report contains the available
capabilities as advertised by the root endpoint.
@param url the url of the service
@return the report that describes the service
@throws IOException if the report cannot be generated
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/ServiceCapabilitiesReportGenerator.java
| 93
|
[
"metadata"
] | true
| 1
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
strip
|
def strip(a, chars=None):
"""
For each element in `a`, return a copy with the leading and
trailing characters removed.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
chars : scalar with the same dtype as ``a``, optional
The ``chars`` argument is a string specifying the set of
characters to be removed. If ``None``, the ``chars``
argument defaults to removing whitespace. The ``chars`` argument
is not a prefix or suffix; rather, all combinations of its
values are stripped.
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input types
See Also
--------
str.strip
Examples
--------
>>> import numpy as np
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
>>> np.strings.strip(c)
array(['aAaAaA', 'aA', 'abBABba'], dtype='<U7')
# 'a' unstripped from c[1] because of leading whitespace.
>>> np.strings.strip(c, 'a')
array(['AaAaA', ' aA ', 'bBABb'], dtype='<U7')
# 'A' unstripped from c[1] because of trailing whitespace.
>>> np.strings.strip(c, 'A')
array(['aAaAa', ' aA ', 'abBABba'], dtype='<U7')
"""
if chars is None:
return _strip_whitespace(a)
return _strip_chars(a, chars)
|
For each element in `a`, return a copy with the leading and
trailing characters removed.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
chars : scalar with the same dtype as ``a``, optional
The ``chars`` argument is a string specifying the set of
characters to be removed. If ``None``, the ``chars``
argument defaults to removing whitespace. The ``chars`` argument
is not a prefix or suffix; rather, all combinations of its
values are stripped.
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input types
See Also
--------
str.strip
Examples
--------
>>> import numpy as np
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
>>> np.strings.strip(c)
array(['aAaAaA', 'aA', 'abBABba'], dtype='<U7')
# 'a' unstripped from c[1] because of leading whitespace.
>>> np.strings.strip(c, 'a')
array(['AaAaA', ' aA ', 'bBABb'], dtype='<U7')
# 'A' unstripped from c[1] because of trailing whitespace.
>>> np.strings.strip(c, 'A')
array(['aAaAa', ' aA ', 'abBABba'], dtype='<U7')
|
python
|
numpy/_core/strings.py
| 1,034
|
[
"a",
"chars"
] | false
| 2
| 7.84
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
isAlreadyPackaged
|
protected final boolean isAlreadyPackaged(File file) {
try (JarFile jarFile = new JarFile(file)) {
Manifest manifest = jarFile.getManifest();
return (manifest != null && manifest.getMainAttributes().getValue(BOOT_VERSION_ATTRIBUTE) != null);
}
catch (IOException ex) {
throw new IllegalStateException("Error reading archive file", ex);
}
}
|
Sets if jarmode jars relevant for the packaging should be automatically included.
@param includeRelevantJarModeJars if relevant jars are included
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Packager.java
| 182
|
[
"file"
] | true
| 3
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
_ask_the_user_for_the_type_of_changes
|
def _ask_the_user_for_the_type_of_changes(non_interactive: bool) -> TypeOfChange:
"""Ask user to specify type of changes (case-insensitive).
:return: Type of change.
"""
# have to do that while waiting for Python 3.11+ StrEnum [*TypeOfChange] :(
type_of_changes_array = [t.value for t in TypeOfChange]
if non_interactive:
# Simulate all possible non-terminal answers - this is useful for running on CI where we want to
# Test all possibilities.
return TypeOfChange(random.choice(type_of_changes_array))
display_answers = "/".join(type_of_changes_array) + "/q"
while True:
get_console().print(
"[warning]Type of change (d)ocumentation, (b)ugfix, (f)eature, (x)breaking "
f"change, (m)isc, (s)kip, airflow_min_(v)ersion_bump (q)uit [{display_answers}]?[/] ",
end="",
)
try:
given_answer = input("").lower()
except KeyboardInterrupt:
raise PrepareReleaseDocsUserQuitException()
if given_answer == "q":
raise PrepareReleaseDocsUserQuitException()
if given_answer in type_of_changes_array:
return TypeOfChange(given_answer)
get_console().print(
f"[warning] Wrong answer given: '{given_answer}'. Should be one of {display_answers}"
)
|
Ask user to specify type of changes (case-insensitive).
:return: Type of change.
|
python
|
dev/breeze/src/airflow_breeze/prepare_providers/provider_documentation.py
| 503
|
[
"non_interactive"
] |
TypeOfChange
| true
| 5
| 7.04
|
apache/airflow
| 43,597
|
unknown
| false
|
setLocale
|
public static void setLocale(@Nullable Locale locale, boolean inheritable) {
LocaleContext localeContext = getLocaleContext();
TimeZone timeZone = (localeContext instanceof TimeZoneAwareLocaleContext timeZoneAware ?
timeZoneAware.getTimeZone() : null);
if (timeZone != null) {
localeContext = new SimpleTimeZoneAwareLocaleContext(locale, timeZone);
}
else if (locale != null) {
localeContext = new SimpleLocaleContext(locale);
}
else {
localeContext = null;
}
setLocaleContext(localeContext, inheritable);
}
|
Associate the given Locale with the current thread,
preserving any TimeZone that may have been set already.
<p>Will implicitly create a LocaleContext for the given Locale.
@param locale the current Locale, or {@code null} to reset
the locale part of thread-bound context
@param inheritable whether to expose the LocaleContext as inheritable
for child threads (using an {@link InheritableThreadLocal})
@see #setTimeZone(TimeZone, boolean)
@see SimpleLocaleContext#SimpleLocaleContext(Locale)
|
java
|
spring-context/src/main/java/org/springframework/context/i18n/LocaleContextHolder.java
| 151
|
[
"locale",
"inheritable"
] |
void
| true
| 4
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getAsText
|
@Override
public String getAsText() {
Class<?> clazz = (Class<?>) getValue();
if (clazz != null) {
return ClassUtils.getQualifiedName(clazz);
}
else {
return "";
}
}
|
Create a default ClassEditor, using the given ClassLoader.
@param classLoader the ClassLoader to use
(or {@code null} for the thread context ClassLoader)
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/ClassEditor.java
| 72
|
[] |
String
| true
| 2
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
setDaemon
|
@CanIgnoreReturnValue
public ThreadFactoryBuilder setDaemon(boolean daemon) {
this.daemon = daemon;
return this;
}
|
Sets daemon or not for new threads created with this ThreadFactory.
<p><b>Java 21+ users:</b> use {@link Thread.Builder.OfPlatform#daemon(boolean)} instead.
@param daemon whether or not new Threads created with this ThreadFactory will be daemon threads
@return this for the builder pattern
|
java
|
android/guava/src/com/google/common/util/concurrent/ThreadFactoryBuilder.java
| 103
|
[
"daemon"
] |
ThreadFactoryBuilder
| true
| 1
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
hasActiveExternalCalls
|
private boolean hasActiveExternalCalls() {
if (hasActiveExternalCalls(pendingCalls)) {
return true;
}
for (List<Call> callList : callsToSend.values()) {
if (hasActiveExternalCalls(callList)) {
return true;
}
}
return hasActiveExternalCalls(correlationIdToCalls.values());
}
|
Return true if there are currently active external calls.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 1,430
|
[] | true
| 3
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
categories
|
def categories(self) -> Index:
"""
An ``Index`` containing the unique categories allowed.
See Also
--------
ordered : Whether the categories have an ordered relationship.
Examples
--------
>>> cat_type = pd.CategoricalDtype(categories=["a", "b"], ordered=True)
>>> cat_type.categories
Index(['a', 'b'], dtype='str')
"""
return self._categories
|
An ``Index`` containing the unique categories allowed.
See Also
--------
ordered : Whether the categories have an ordered relationship.
Examples
--------
>>> cat_type = pd.CategoricalDtype(categories=["a", "b"], ordered=True)
>>> cat_type.categories
Index(['a', 'b'], dtype='str')
|
python
|
pandas/core/dtypes/dtypes.py
| 638
|
[
"self"
] |
Index
| true
| 1
| 6.48
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
toString
|
@Override
public String toString() {
StringBuilder sb = new StringBuilder(super.toString());
if (this.relatedCauses != null) {
for (Throwable relatedCause : this.relatedCauses) {
sb.append("\nRelated cause: ");
sb.append(relatedCause);
}
}
return sb.toString();
}
|
Return the related causes, if any.
@return the array of related causes, or {@code null} if none
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/BeanCreationException.java
| 157
|
[] |
String
| true
| 2
| 8.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
formatter
|
private static @Nullable DateTimeFormatter formatter(@Nullable String pattern) {
return StringUtils.hasText(pattern)
? DateTimeFormatter.ofPattern(pattern).withResolverStyle(ResolverStyle.SMART) : null;
}
|
Configures the date-time format using the given {@code pattern}.
@param pattern the pattern for formatting date-times
@return {@code this} for chained method invocation
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/format/DateTimeFormatters.java
| 102
|
[
"pattern"
] |
DateTimeFormatter
| true
| 2
| 7.68
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
start_execution
|
def start_execution(
self,
state_machine_arn: str,
name: str | None = None,
state_machine_input: dict | str | None = None,
is_redrive_execution: bool = False,
) -> str:
"""
Start Execution of the State Machine.
.. seealso::
- :external+boto3:py:meth:`SFN.Client.start_execution`
:param state_machine_arn: AWS Step Function State Machine ARN.
:param is_redrive_execution: Restarts unsuccessful executions of Standard workflows that did not
complete successfully in the last 14 days.
:param name: The name of the execution.
:param state_machine_input: JSON data input to pass to the State Machine.
:return: Execution ARN.
"""
if is_redrive_execution:
if not name:
raise AirflowFailException(
"Execution name is required to start RedriveExecution for %s.", state_machine_arn
)
elements = state_machine_arn.split(":stateMachine:")
execution_arn = f"{elements[0]}:execution:{elements[1]}:{name}"
self.conn.redrive_execution(executionArn=execution_arn)
self.log.info(
"Successfully started RedriveExecution for Step Function State Machine: %s.",
state_machine_arn,
)
return execution_arn
execution_args = {"stateMachineArn": state_machine_arn}
if name is not None:
execution_args["name"] = name
if state_machine_input is not None:
if isinstance(state_machine_input, str):
execution_args["input"] = state_machine_input
elif isinstance(state_machine_input, dict):
execution_args["input"] = json.dumps(state_machine_input)
self.log.info("Executing Step Function State Machine: %s", state_machine_arn)
response = self.conn.start_execution(**execution_args)
return response.get("executionArn")
|
Start Execution of the State Machine.
.. seealso::
- :external+boto3:py:meth:`SFN.Client.start_execution`
:param state_machine_arn: AWS Step Function State Machine ARN.
:param is_redrive_execution: Restarts unsuccessful executions of Standard workflows that did not
complete successfully in the last 14 days.
:param name: The name of the execution.
:param state_machine_input: JSON data input to pass to the State Machine.
:return: Execution ARN.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/step_function.py
| 42
|
[
"self",
"state_machine_arn",
"name",
"state_machine_input",
"is_redrive_execution"
] |
str
| true
| 7
| 7.6
|
apache/airflow
| 43,597
|
sphinx
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.