function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
is_any_real_numeric_dtype
|
def is_any_real_numeric_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a real number dtype.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a real number dtype.
See Also
--------
is_numeric_dtype : Check if a dtype is numeric.
is_complex_dtype : Check if a dtype is complex.
is_bool_dtype : Check if a dtype is boolean.
Examples
--------
>>> from pandas.api.types import is_any_real_numeric_dtype
>>> is_any_real_numeric_dtype(int)
True
>>> is_any_real_numeric_dtype(float)
True
>>> is_any_real_numeric_dtype(object)
False
>>> is_any_real_numeric_dtype(str)
False
>>> is_any_real_numeric_dtype(complex(1, 2))
False
>>> is_any_real_numeric_dtype(bool)
False
"""
return (
is_numeric_dtype(arr_or_dtype)
and not is_complex_dtype(arr_or_dtype)
and not is_bool_dtype(arr_or_dtype)
)
|
Check whether the provided array or dtype is of a real number dtype.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a real number dtype.
See Also
--------
is_numeric_dtype : Check if a dtype is numeric.
is_complex_dtype : Check if a dtype is complex.
is_bool_dtype : Check if a dtype is boolean.
Examples
--------
>>> from pandas.api.types import is_any_real_numeric_dtype
>>> is_any_real_numeric_dtype(int)
True
>>> is_any_real_numeric_dtype(float)
True
>>> is_any_real_numeric_dtype(object)
False
>>> is_any_real_numeric_dtype(str)
False
>>> is_any_real_numeric_dtype(complex(1, 2))
False
>>> is_any_real_numeric_dtype(bool)
False
|
python
|
pandas/core/dtypes/common.py
| 1,301
|
[
"arr_or_dtype"
] |
bool
| true
| 3
| 7.84
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getProfiles
|
private Collection<String> getProfiles(Environment environment, Binder binder, ProfilesValidator validator,
Type type) {
String environmentPropertyValue = environment.getProperty(type.getName());
Set<String> environmentPropertyProfiles = (!StringUtils.hasLength(environmentPropertyValue))
? Collections.emptySet()
: StringUtils.commaDelimitedListToSet(StringUtils.trimAllWhitespace(environmentPropertyValue));
validator.validate(environmentPropertyProfiles,
() -> "Invalid profile property value found in Environment under '%s'".formatted(type.getName()));
Set<String> environmentProfiles = new LinkedHashSet<>(Arrays.asList(type.get(environment)));
BindResult<Set<String>> boundProfiles = binder.bind(type.getName(), STRING_SET, validator);
if (hasProgrammaticallySetProfiles(type, environmentPropertyValue, environmentPropertyProfiles,
environmentProfiles)) {
if (!type.isMergeWithEnvironmentProfiles() || !boundProfiles.isBound()) {
return environmentProfiles;
}
return boundProfiles.map((bound) -> merge(environmentProfiles, bound)).get();
}
return boundProfiles.orElse(type.getDefaultValue());
}
|
Create a new {@link Profiles} instance based on the {@link Environment} and
{@link Binder}.
@param environment the source environment
@param binder the binder for profile properties
@param additionalProfiles any additional active profiles
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/Profiles.java
| 102
|
[
"environment",
"binder",
"validator",
"type"
] | true
| 5
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
is
|
private boolean is(Type formalType, TypeVariable<?> declaration) {
if (runtimeType.equals(formalType)) {
return true;
}
if (formalType instanceof WildcardType) {
WildcardType your = canonicalizeWildcardType(declaration, (WildcardType) formalType);
// if "formalType" is <? extends Foo>, "this" can be:
// Foo, SubFoo, <? extends Foo>, <? extends SubFoo>, <T extends Foo> or
// <T extends SubFoo>.
// if "formalType" is <? super Foo>, "this" can be:
// Foo, SuperFoo, <? super Foo> or <? super SuperFoo>.
return every(your.getUpperBounds()).isSupertypeOf(runtimeType)
&& every(your.getLowerBounds()).isSubtypeOf(runtimeType);
}
return canonicalizeWildcardsInType(runtimeType).equals(canonicalizeWildcardsInType(formalType));
}
|
{@code A.is(B)} is defined as {@code Foo<A>.isSubtypeOf(Foo<B>)}.
<p>Specifically, returns true if any of the following conditions is met:
<ol>
<li>'this' and {@code formalType} are equal.
<li>'this' and {@code formalType} have equal canonical form.
<li>{@code formalType} is {@code <? extends Foo>} and 'this' is a subtype of {@code Foo}.
<li>{@code formalType} is {@code <? super Foo>} and 'this' is a supertype of {@code Foo}.
</ol>
Note that condition 2 isn't technically accurate under the context of a recursively bounded
type variables. For example, {@code Enum<? extends Enum<E>>} canonicalizes to {@code Enum<?>}
where {@code E} is the type variable declared on the {@code Enum} class declaration. It's
technically <em>not</em> true that {@code Foo<Enum<? extends Enum<E>>>} is a subtype of {@code
Foo<Enum<?>>} according to JLS. See testRecursiveWildcardSubtypeBug() for a real example.
<p>It appears that properly handling recursive type bounds in the presence of implicit type
bounds is not easy. For now we punt, hoping that this defect should rarely cause issues in real
code.
@param formalType is {@code Foo<formalType>} a supertype of {@code Foo<T>}?
@param declaration The type variable in the context of a parameterized type. Used to infer type
bound when {@code formalType} is a wildcard with implicit upper bound.
|
java
|
android/guava/src/com/google/common/reflect/TypeToken.java
| 984
|
[
"formalType",
"declaration"
] | true
| 4
| 8.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
handleShareAcknowledgeFailure
|
private void handleShareAcknowledgeFailure(Node fetchTarget,
ShareAcknowledgeRequestData requestData,
AcknowledgeRequestState acknowledgeRequestState,
Throwable error,
long responseCompletionTimeMs) {
try {
log.debug("Completed ShareAcknowledge request from node {} unsuccessfully {}", fetchTarget.id(), Errors.forException(error));
acknowledgeRequestState.sessionHandler().handleError(error);
acknowledgeRequestState.onFailedAttempt(responseCompletionTimeMs);
requestData.topics().forEach(topic -> topic.partitions().forEach(partition -> {
TopicIdPartition tip = lookupTopicId(topic.topicId(), partition.partitionIndex());
if (tip == null) {
return;
}
metricsManager.recordFailedAcknowledgements(acknowledgeRequestState.getInFlightAcknowledgementsCount(tip));
acknowledgeRequestState.handleAcknowledgeErrorCode(tip, Errors.forException(error), requestData.isRenewAck(), Optional.empty());
}));
acknowledgeRequestState.processingComplete();
} finally {
log.debug("Removing pending request for node {} - failed", fetchTarget.id());
nodesWithPendingRequests.remove(fetchTarget.id());
if (acknowledgeRequestState.isCloseRequest()) {
log.debug("Removing node from ShareSession {}", fetchTarget.id());
sessionHandlers.remove(fetchTarget.id());
}
}
}
|
The method checks whether the leader for a topicIdPartition has changed.
@param nodeId The previous leader for the partition.
@param topicIdPartition The TopicIdPartition to check.
@return Returns true if leader information is available and leader has changed.
If the leader information is not available or if the leader has not changed, it returns false.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 1,017
|
[
"fetchTarget",
"requestData",
"acknowledgeRequestState",
"error",
"responseCompletionTimeMs"
] |
void
| true
| 3
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
generate_online_numba_ewma_func
|
def generate_online_numba_ewma_func(
nopython: bool,
nogil: bool,
parallel: bool,
):
"""
Generate a numba jitted groupby ewma function specified by values
from engine_kwargs.
Parameters
----------
nopython : bool
nopython to be passed into numba.jit
nogil : bool
nogil to be passed into numba.jit
parallel : bool
parallel to be passed into numba.jit
Returns
-------
Numba function
"""
if TYPE_CHECKING:
import numba
else:
numba = import_optional_dependency("numba")
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def online_ewma(
values: np.ndarray,
deltas: np.ndarray,
minimum_periods: int,
old_wt_factor: float,
new_wt: float,
old_wt: np.ndarray,
adjust: bool,
ignore_na: bool,
):
"""
Compute online exponentially weighted mean per column over 2D values.
Takes the first observation as is, then computes the subsequent
exponentially weighted mean accounting minimum periods.
"""
result = np.empty(values.shape)
weighted_avg = values[0].copy()
nobs = (~np.isnan(weighted_avg)).astype(np.int64)
result[0] = np.where(nobs >= minimum_periods, weighted_avg, np.nan)
for i in range(1, len(values)):
cur = values[i]
is_observations = ~np.isnan(cur)
nobs += is_observations.astype(np.int64)
for j in numba.prange(len(cur)):
if not np.isnan(weighted_avg[j]):
if is_observations[j] or not ignore_na:
# note that len(deltas) = len(vals) - 1 and deltas[i] is to be
# used in conjunction with vals[i+1]
old_wt[j] *= old_wt_factor ** deltas[j - 1]
if is_observations[j]:
# avoid numerical errors on constant series
if weighted_avg[j] != cur[j]:
weighted_avg[j] = (
(old_wt[j] * weighted_avg[j]) + (new_wt * cur[j])
) / (old_wt[j] + new_wt)
if adjust:
old_wt[j] += new_wt
else:
old_wt[j] = 1.0
elif is_observations[j]:
weighted_avg[j] = cur[j]
result[i] = np.where(nobs >= minimum_periods, weighted_avg, np.nan)
return result, old_wt
return online_ewma
|
Generate a numba jitted groupby ewma function specified by values
from engine_kwargs.
Parameters
----------
nopython : bool
nopython to be passed into numba.jit
nogil : bool
nogil to be passed into numba.jit
parallel : bool
parallel to be passed into numba.jit
Returns
-------
Numba function
|
python
|
pandas/core/window/online.py
| 10
|
[
"nopython",
"nogil",
"parallel"
] | true
| 13
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
sendEligibleCalls
|
private long sendEligibleCalls(long now) {
long pollTimeout = Long.MAX_VALUE;
for (Iterator<Map.Entry<Node, List<Call>>> iter = callsToSend.entrySet().iterator(); iter.hasNext(); ) {
Map.Entry<Node, List<Call>> entry = iter.next();
List<Call> calls = entry.getValue();
if (calls.isEmpty()) {
iter.remove();
continue;
}
Node node = entry.getKey();
if (callsInFlight.containsKey(node.idString())) {
log.trace("Still waiting for other calls to finish on node {}.", node);
nodeReadyDeadlines.remove(node);
continue;
}
if (!client.ready(node, now)) {
Long deadline = nodeReadyDeadlines.get(node);
if (deadline != null) {
if (now >= deadline) {
log.info("Disconnecting from {} and revoking {} node assignment(s) " +
"because the node is taking too long to become ready.",
node.idString(), calls.size());
transitionToPendingAndClearList(calls);
client.disconnect(node.idString());
nodeReadyDeadlines.remove(node);
iter.remove();
continue;
}
pollTimeout = Math.min(pollTimeout, deadline - now);
} else {
nodeReadyDeadlines.put(node, now + requestTimeoutMs);
}
long nodeTimeout = client.pollDelayMs(node, now);
pollTimeout = Math.min(pollTimeout, nodeTimeout);
log.trace("Client is not ready to send to {}. Must delay {} ms", node, nodeTimeout);
continue;
}
// Subtract the time we spent waiting for the node to become ready from
// the total request time.
int remainingRequestTime;
Long deadlineMs = nodeReadyDeadlines.remove(node);
if (deadlineMs == null) {
remainingRequestTime = requestTimeoutMs;
} else {
remainingRequestTime = calcTimeoutMsRemainingAsInt(now, deadlineMs);
}
while (!calls.isEmpty()) {
Call call = calls.remove(0);
int timeoutMs = Math.min(remainingRequestTime,
calcTimeoutMsRemainingAsInt(now, call.deadlineMs));
AbstractRequest.Builder<?> requestBuilder;
try {
requestBuilder = call.createRequest(timeoutMs);
} catch (Throwable t) {
call.fail(now, new KafkaException(String.format(
"Internal error sending %s to %s.", call.callName, node), t));
continue;
}
ClientRequest clientRequest = client.newClientRequest(node.idString(),
requestBuilder, now, true, timeoutMs, null);
log.debug("Sending {} to {}. correlationId={}, timeoutMs={}",
requestBuilder, node, clientRequest.correlationId(), timeoutMs);
client.send(clientRequest, now);
callsInFlight.put(node.idString(), call);
correlationIdToCalls.put(clientRequest.correlationId(), call);
break;
}
}
return pollTimeout;
}
|
Send the calls which are ready.
@param now The current time in milliseconds.
@return The minimum timeout we need for poll().
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 1,240
|
[
"now"
] | true
| 10
| 8.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
parseTypeQuery
|
function parseTypeQuery(): TypeQueryNode {
const pos = getNodePos();
parseExpected(SyntaxKind.TypeOfKeyword);
const entityName = parseEntityName(/*allowReservedWords*/ true);
// Make sure we perform ASI to prevent parsing the next line's type arguments as part of an instantiation expression.
const typeArguments = !scanner.hasPrecedingLineBreak() ? tryParseTypeArguments() : undefined;
return finishNode(factory.createTypeQueryNode(entityName, typeArguments), pos);
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 3,946
|
[] | true
| 2
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
isNotPropertyAccessOnIntegerLiteral
|
function isNotPropertyAccessOnIntegerLiteral(context: FormattingContext): boolean {
return !isPropertyAccessExpression(context.contextNode)
|| !isNumericLiteral(context.contextNode.expression)
|| context.contextNode.expression.getText().includes(".");
}
|
A rule takes a two tokens (left/right) and a particular context
for which you're meant to look at them. You then declare what should the
whitespace annotation be between these tokens via the action param.
@param debugName Name to print
@param left The left side of the comparison
@param right The right side of the comparison
@param context A set of filters to narrow down the space in which this formatter rule applies
@param action a declaration of the expected whitespace
@param flags whether the rule deletes a line or not, defaults to no-op
|
typescript
|
src/services/formatting/rules.ts
| 989
|
[
"context"
] | true
| 3
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
create
|
public static ConfigurationMetadataRepositoryJsonBuilder create() {
return create(StandardCharsets.UTF_8);
}
|
Create a new builder instance using {@link StandardCharsets#UTF_8} as the default
charset.
@return a new {@link ConfigurationMetadataRepositoryJsonBuilder} instance.
|
java
|
configuration-metadata/spring-boot-configuration-metadata/src/main/java/org/springframework/boot/configurationmetadata/ConfigurationMetadataRepositoryJsonBuilder.java
| 166
|
[] |
ConfigurationMetadataRepositoryJsonBuilder
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
check_random_state
|
def check_random_state(seed):
"""Turn seed into an np.random.RandomState instance.
Parameters
----------
seed : None, int or instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
Returns
-------
:class:`numpy:numpy.random.RandomState`
The random state object based on `seed` parameter.
Examples
--------
>>> from sklearn.utils.validation import check_random_state
>>> check_random_state(42)
RandomState(MT19937) at 0x...
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
"%r cannot be used to seed a numpy.random.RandomState instance" % seed
)
|
Turn seed into an np.random.RandomState instance.
Parameters
----------
seed : None, int or instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
Returns
-------
:class:`numpy:numpy.random.RandomState`
The random state object based on `seed` parameter.
Examples
--------
>>> from sklearn.utils.validation import check_random_state
>>> check_random_state(42)
RandomState(MT19937) at 0x...
|
python
|
sklearn/utils/validation.py
| 1,439
|
[
"seed"
] | false
| 5
| 7.04
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
drainBufferedTimeline
|
public StartupTimeline drainBufferedTimeline() {
List<TimelineEvent> events = new ArrayList<>();
Iterator<TimelineEvent> iterator = this.events.iterator();
while (iterator.hasNext()) {
events.add(iterator.next());
iterator.remove();
}
this.estimatedSize.set(0);
return new StartupTimeline(this.startTime, events);
}
|
Return the {@link StartupTimeline timeline} by pulling steps from the buffer.
<p>
This removes steps from the buffer, see {@link #getBufferedTimeline()} for its
read-only counterpart.
@return buffered steps drained from the buffer.
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/metrics/buffering/BufferingApplicationStartup.java
| 164
|
[] |
StartupTimeline
| true
| 2
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
deferred_slots
|
def deferred_slots(self, session: Session = NEW_SESSION) -> int:
"""
Get the number of slots deferred at the moment.
:param session: SQLAlchemy ORM Session
:return: the number of deferred slots
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
return int(
session.scalar(
select(func.sum(TaskInstance.pool_slots)).where(
TaskInstance.pool == self.pool, TaskInstance.state == TaskInstanceState.DEFERRED
)
)
or 0
)
|
Get the number of slots deferred at the moment.
:param session: SQLAlchemy ORM Session
:return: the number of deferred slots
|
python
|
airflow-core/src/airflow/models/pool.py
| 329
|
[
"self",
"session"
] |
int
| true
| 2
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
_replace_booleans
|
def _replace_booleans(tok: tuple[int, str]) -> tuple[int, str]:
"""
Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
precedence is changed to boolean precedence.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
tuple of int, str
Either the input or token or the replacement values
"""
toknum, tokval = tok
if toknum == tokenize.OP:
if tokval == "&":
return tokenize.NAME, "and"
elif tokval == "|":
return tokenize.NAME, "or"
return toknum, tokval
return toknum, tokval
|
Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
precedence is changed to boolean precedence.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
tuple of int, str
Either the input or token or the replacement values
|
python
|
pandas/core/computation/expr.py
| 74
|
[
"tok"
] |
tuple[int, str]
| true
| 4
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
classToString
|
private static <T> String classToString(final Class<T> cls) {
if (cls.isArray()) {
return toString(cls.getComponentType()) + "[]";
}
if (isCyclical(cls)) {
return cls.getSimpleName() + "(cycle)";
}
final StringBuilder buf = new StringBuilder();
if (cls.getEnclosingClass() != null) {
buf.append(classToString(cls.getEnclosingClass())).append('.').append(cls.getSimpleName());
} else {
buf.append(cls.getName());
}
if (cls.getTypeParameters().length > 0) {
GT_JOINER.join(buf, (Object[]) cls.getTypeParameters());
}
return buf.toString();
}
|
Formats a {@link Class} as a {@link String}.
@param cls {@link Class} to format.
@return The class as a String.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 347
|
[
"cls"
] |
String
| true
| 5
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
putmask_inplace
|
def putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) -> None:
"""
ExtensionArray-compatible implementation of np.putmask. The main
difference is we do not handle repeating or truncating like numpy.
Parameters
----------
values: np.ndarray or ExtensionArray
mask : np.ndarray[bool]
We assume extract_bool_array has already been called.
value : Any
"""
if (
not isinstance(values, np.ndarray)
or (values.dtype == object and not lib.is_scalar(value))
# GH#43424: np.putmask raises TypeError if we cannot cast between types with
# rule = "safe", a stricter guarantee we may not have here
or (
isinstance(value, np.ndarray) and not np.can_cast(value.dtype, values.dtype)
)
):
# GH#19266 using np.putmask gives unexpected results with listlike value
# along with object dtype
if is_list_like(value) and len(value) == len(values):
values[mask] = value[mask]
else:
values[mask] = value
else:
# GH#37833 np.putmask is more performant than __setitem__
np.putmask(values, mask, value)
|
ExtensionArray-compatible implementation of np.putmask. The main
difference is we do not handle repeating or truncating like numpy.
Parameters
----------
values: np.ndarray or ExtensionArray
mask : np.ndarray[bool]
We assume extract_bool_array has already been called.
value : Any
|
python
|
pandas/core/array_algos/putmask.py
| 30
|
[
"values",
"mask",
"value"
] |
None
| true
| 10
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
fit
|
def fit(self, X, y=None):
"""Fit data X by computing the binning thresholds.
The last bin is reserved for missing values, whether missing values
are present in the data or not.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to bin.
y: None
Ignored.
Returns
-------
self : object
"""
if not (3 <= self.n_bins <= 256):
# min is 3: at least 2 distinct bins and a missing values bin
raise ValueError(
"n_bins={} should be no smaller than 3 and no larger than 256.".format(
self.n_bins
)
)
X = check_array(X, dtype=[X_DTYPE], ensure_all_finite=False)
max_bins = self.n_bins - 1
rng = check_random_state(self.random_state)
if self.subsample is not None and X.shape[0] > self.subsample:
subset = rng.choice(X.shape[0], self.subsample, replace=False)
X = X.take(subset, axis=0)
if self.is_categorical is None:
self.is_categorical_ = np.zeros(X.shape[1], dtype=np.uint8)
else:
self.is_categorical_ = np.asarray(self.is_categorical, dtype=np.uint8)
n_features = X.shape[1]
known_categories = self.known_categories
if known_categories is None:
known_categories = [None] * n_features
# validate is_categorical and known_categories parameters
for f_idx in range(n_features):
is_categorical = self.is_categorical_[f_idx]
known_cats = known_categories[f_idx]
if is_categorical and known_cats is None:
raise ValueError(
f"Known categories for feature {f_idx} must be provided."
)
if not is_categorical and known_cats is not None:
raise ValueError(
f"Feature {f_idx} isn't marked as a categorical feature, "
"but categories were passed."
)
self.missing_values_bin_idx_ = self.n_bins - 1
self.bin_thresholds_ = [None] * n_features
n_bins_non_missing = [None] * n_features
non_cat_thresholds = Parallel(n_jobs=self.n_threads, backend="threading")(
delayed(_find_binning_thresholds)(X[:, f_idx], max_bins)
for f_idx in range(n_features)
if not self.is_categorical_[f_idx]
)
non_cat_idx = 0
for f_idx in range(n_features):
if self.is_categorical_[f_idx]:
# Since categories are assumed to be encoded in
# [0, n_cats] and since n_cats <= max_bins,
# the thresholds *are* the unique categorical values. This will
# lead to the correct mapping in transform()
thresholds = known_categories[f_idx]
n_bins_non_missing[f_idx] = thresholds.shape[0]
self.bin_thresholds_[f_idx] = thresholds
else:
self.bin_thresholds_[f_idx] = non_cat_thresholds[non_cat_idx]
n_bins_non_missing[f_idx] = self.bin_thresholds_[f_idx].shape[0] + 1
non_cat_idx += 1
self.n_bins_non_missing_ = np.array(n_bins_non_missing, dtype=np.uint32)
return self
|
Fit data X by computing the binning thresholds.
The last bin is reserved for missing values, whether missing values
are present in the data or not.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to bin.
y: None
Ignored.
Returns
-------
self : object
|
python
|
sklearn/ensemble/_hist_gradient_boosting/binning.py
| 178
|
[
"self",
"X",
"y"
] | false
| 15
| 6.16
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
getRootJarFile
|
private File getRootJarFile(JarFile jarFile) {
String name = jarFile.getName();
int separator = name.indexOf("!/");
if (separator > 0) {
name = name.substring(0, separator);
}
return new File(name);
}
|
Create a new {@link ApplicationHome} instance for the specified source class.
@param sourceClass the source class or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/system/ApplicationHome.java
| 132
|
[
"jarFile"
] |
File
| true
| 2
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
nextAlphanumeric
|
public String nextAlphanumeric(final int minLengthInclusive, final int maxLengthExclusive) {
return nextAlphanumeric(randomUtils().randomInt(minLengthInclusive, maxLengthExclusive));
}
|
Creates a random string whose length is between the inclusive minimum and the exclusive maximum.
<p>
Characters will be chosen from the set of Latin alphabetic characters (a-z, A-Z) and the digits 0-9.
</p>
@param minLengthInclusive the inclusive minimum length of the string to generate.
@param maxLengthExclusive the exclusive maximum length of the string to generate.
@return the random string.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/RandomStringUtils.java
| 856
|
[
"minLengthInclusive",
"maxLengthExclusive"
] |
String
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
equals
|
public static boolean equals(final Annotation a1, final Annotation a2) {
if (a1 == a2) {
return true;
}
if (a1 == null || a2 == null) {
return false;
}
final Class<? extends Annotation> type1 = a1.annotationType();
final Class<? extends Annotation> type2 = a2.annotationType();
Validate.notNull(type1, "Annotation %s with null annotationType()", a1);
Validate.notNull(type2, "Annotation %s with null annotationType()", a2);
if (!type1.equals(type2)) {
return false;
}
try {
for (final Method m : type1.getDeclaredMethods()) {
if (m.getParameterTypes().length == 0
&& isValidAnnotationMemberType(m.getReturnType())) {
final Object v1 = m.invoke(a1);
final Object v2 = m.invoke(a2);
if (!memberEquals(m.getReturnType(), v1, v2)) {
return false;
}
}
}
} catch (final ReflectiveOperationException ex) {
return false;
}
return true;
}
|
Checks if two annotations are equal using the criteria for equality
presented in the {@link Annotation#equals(Object)} API docs.
@param a1 the first Annotation to compare, {@code null} returns
{@code false} unless both are {@code null}
@param a2 the second Annotation to compare, {@code null} returns
{@code false} unless both are {@code null}
@return {@code true} if the two annotations are {@code equal} or both
{@code null}
|
java
|
src/main/java/org/apache/commons/lang3/AnnotationUtils.java
| 196
|
[
"a1",
"a2"
] | true
| 9
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
opj_int_floorlog2
|
static INLINE OPJ_INT32 opj_int_floorlog2(OPJ_INT32 a)
{
OPJ_INT32 l;
for (l = 0; a > 1; l++) {
a >>= 1;
}
return l;
}
|
Get logarithm of an integer and round downwards
@return Returns log2(a)
|
cpp
|
3rdparty/openjpeg/openjp2/opj_intmath.h
| 236
|
[
"a"
] | true
| 2
| 6.4
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
createAvlTreeDigest
|
public static AVLTreeDigest createAvlTreeDigest(TDigestArrays arrays, double compression) {
return AVLTreeDigest.create(arrays, compression);
}
|
Creates an {@link AVLTreeDigest}. This is the most accurate implementation, delivering relative accuracy close to 0.01% for large
sample populations. Still, its construction takes 2x-10x longer than {@link MergingDigest}, while its memory footprint increases
(slowly) with the sample population size.
@param compression The compression parameter. 100 is a common value for normal uses. 1000 is extremely large.
The number of centroids retained will be a smallish (usually less than 10) multiple of this number.
@return the AvlTreeDigest
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/TDigest.java
| 68
|
[
"arrays",
"compression"
] |
AVLTreeDigest
| true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
resource_name_for_dag
|
def resource_name_for_dag(dag_id: str) -> str:
"""
Return the resource name for a DAG id.
Note: This function is kept for backwards compatibility.
"""
if dag_id == RESOURCE_DAG:
return dag_id
if dag_id.startswith(RESOURCE_DAG_PREFIX):
return dag_id
return f"{RESOURCE_DAG_PREFIX}{dag_id}"
|
Return the resource name for a DAG id.
Note: This function is kept for backwards compatibility.
|
python
|
airflow-core/src/airflow/security/permissions.py
| 116
|
[
"dag_id"
] |
str
| true
| 3
| 7.04
|
apache/airflow
| 43,597
|
unknown
| false
|
parseRsaDer
|
private static RSAPrivateCrtKeySpec parseRsaDer(byte[] keyBytes) throws IOException {
DerParser parser = new DerParser(keyBytes);
DerParser.Asn1Object sequence = parser.readAsn1Object();
parser = sequence.getParser();
parser.readAsn1Object().getInteger(); // (version) We don't need it but must read to get to modulus
BigInteger modulus = parser.readAsn1Object().getInteger();
BigInteger publicExponent = parser.readAsn1Object().getInteger();
BigInteger privateExponent = parser.readAsn1Object().getInteger();
BigInteger prime1 = parser.readAsn1Object().getInteger();
BigInteger prime2 = parser.readAsn1Object().getInteger();
BigInteger exponent1 = parser.readAsn1Object().getInteger();
BigInteger exponent2 = parser.readAsn1Object().getInteger();
BigInteger coefficient = parser.readAsn1Object().getInteger();
return new RSAPrivateCrtKeySpec(modulus, publicExponent, privateExponent, prime1, prime2, exponent1, exponent2, coefficient);
}
|
Parses a DER encoded RSA key to a {@link RSAPrivateCrtKeySpec} using a minimal {@link DerParser}
@param keyBytes the private key raw bytes
@return {@link RSAPrivateCrtKeySpec}
@throws IOException if the DER encoded key can't be parsed
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java
| 623
|
[
"keyBytes"
] |
RSAPrivateCrtKeySpec
| true
| 1
| 6.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
toString
|
@Override
public String toString() {
StringBuilder sb = new StringBuilder("AspectJExpressionPointcut: (");
for (int i = 0; i < this.pointcutParameterTypes.length; i++) {
sb.append(this.pointcutParameterTypes[i].getName());
sb.append(' ');
sb.append(this.pointcutParameterNames[i]);
if ((i+1) < this.pointcutParameterTypes.length) {
sb.append(", ");
}
}
sb.append(") ");
if (getExpression() != null) {
sb.append(getExpression());
}
else {
sb.append("<pointcut expression not set>");
}
return sb.toString();
}
|
Get a new pointcut expression based on a target class's loader rather than the default.
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AspectJExpressionPointcut.java
| 567
|
[] |
String
| true
| 4
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
create_nodegroup
|
def create_nodegroup(
self,
clusterName: str,
nodegroupName: str,
subnets: list[str],
nodeRole: str | None,
*,
tags: dict | None = None,
**kwargs,
) -> dict:
"""
Create an Amazon EKS managed node group for an Amazon EKS Cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.create_nodegroup`
:param clusterName: The name of the Amazon EKS cluster to create the EKS Managed Nodegroup in.
:param nodegroupName: The unique name to give your managed nodegroup.
:param subnets: The subnets to use for the Auto Scaling group that is created for your nodegroup.
:param nodeRole: The Amazon Resource Name (ARN) of the IAM role to associate with your nodegroup.
:param tags: Optional tags to apply to your nodegroup.
:return: Returns descriptive information about the created EKS Managed Nodegroup.
"""
eks_client = self.conn
# The below tag is mandatory and must have a value of either 'owned' or 'shared'
# A value of 'owned' denotes that the subnets are exclusive to the nodegroup.
# The 'shared' value allows more than one resource to use the subnet.
cluster_tag_key = f"kubernetes.io/cluster/{clusterName}"
resolved_tags = tags or {}
if cluster_tag_key not in resolved_tags:
resolved_tags[cluster_tag_key] = "owned"
response = eks_client.create_nodegroup(
clusterName=clusterName,
nodegroupName=nodegroupName,
subnets=subnets,
nodeRole=nodeRole,
tags=resolved_tags,
**kwargs,
)
self.log.info(
"Created an Amazon EKS managed node group named %s in Amazon EKS cluster %s",
response.get("nodegroup").get("nodegroupName"),
response.get("nodegroup").get("clusterName"),
)
return response
|
Create an Amazon EKS managed node group for an Amazon EKS Cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.create_nodegroup`
:param clusterName: The name of the Amazon EKS cluster to create the EKS Managed Nodegroup in.
:param nodegroupName: The unique name to give your managed nodegroup.
:param subnets: The subnets to use for the Auto Scaling group that is created for your nodegroup.
:param nodeRole: The Amazon Resource Name (ARN) of the IAM role to associate with your nodegroup.
:param tags: Optional tags to apply to your nodegroup.
:return: Returns descriptive information about the created EKS Managed Nodegroup.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/eks.py
| 161
|
[
"self",
"clusterName",
"nodegroupName",
"subnets",
"nodeRole",
"tags"
] |
dict
| true
| 3
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
rootFirst
|
public static StandardStackTracePrinter rootFirst() {
return new StandardStackTracePrinter(EnumSet.of(Option.ROOT_FIRST), UNLIMITED, null, null, null, null, null,
null);
}
|
Return a {@link StandardStackTracePrinter} that prints the stack trace with the
root exception first (the opposite of {@link Throwable#printStackTrace()}).
@return a {@link StandardStackTracePrinter} that prints the stack trace root first
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/StandardStackTracePrinter.java
| 307
|
[] |
StandardStackTracePrinter
| true
| 1
| 6.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
dispatchListenerEvents
|
private void dispatchListenerEvents() {
if (!monitor.isOccupiedByCurrentThread()) {
listeners.dispatch();
}
}
|
Attempts to execute all the listeners in {@link #listeners} while not holding the {@link
#monitor}.
|
java
|
android/guava/src/com/google/common/util/concurrent/AbstractService.java
| 507
|
[] |
void
| true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
nextPage
|
public void nextPage() {
if (!isLastPage()) {
this.page++;
}
}
|
Switch to next page.
Will stay on last page if already on last page.
|
java
|
spring-beans/src/main/java/org/springframework/beans/support/PagedListHolder.java
| 248
|
[] |
void
| true
| 2
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
beforeInitialize
|
@Override
public void beforeInitialize() {
LoggerContext loggerContext = getLoggerContext();
if (isAlreadyInitialized(loggerContext)) {
return;
}
if (!configureJdkLoggingBridgeHandler()) {
super.beforeInitialize();
}
loggerContext.getConfiguration().addFilter(FILTER);
}
|
Return the configuration location. The result may be:
<ul>
<li>{@code null}: if DefaultConfiguration is used (no explicit config loaded)</li>
<li>A file path: if provided explicitly by the user</li>
<li>A URI: if loaded from the classpath default or a custom location</li>
</ul>
@param configuration the source configuration
@return the config location or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/Log4J2LoggingSystem.java
| 171
|
[] |
void
| true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
ifBound
|
public void ifBound(Consumer<? super T> consumer) {
Assert.notNull(consumer, "'consumer' must not be null");
if (this.value != null) {
consumer.accept(this.value);
}
}
|
Invoke the specified consumer with the bound value, or do nothing if no value has
been bound.
@param consumer block to execute if a value has been bound
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/BindResult.java
| 76
|
[
"consumer"
] |
void
| true
| 2
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
bindOrCreate
|
@SuppressWarnings("NullAway") // https://github.com/uber/NullAway/issues/1232
public <T> T bindOrCreate(ConfigurationPropertyName name, Bindable<T> target, @Nullable BindHandler handler) {
return bind(name, target, handler, true);
}
|
Bind the specified target {@link Bindable} using this binder's
{@link ConfigurationPropertySource property sources} or create a new instance using
the type of the {@link Bindable} if the result of the binding is {@code null}.
@param name the configuration property name to bind
@param target the target bindable
@param handler the bind handler (may be {@code null})
@param <T> the bound or created type
@return the bound or created object
@since 2.2.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
| 348
|
[
"name",
"target",
"handler"
] |
T
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
call
|
public static <V, E extends Throwable> V call(final FailableCallable<V, E> callable) {
return get(callable::call);
}
|
Calls a callable and rethrows any exception as a {@link RuntimeException}.
@param callable the callable to call
@param <V> the return type of the callable
@param <E> the type of checked exception the callable may throw
@return the value returned from the callable
|
java
|
src/main/java/org/apache/commons/lang3/function/Failable.java
| 395
|
[
"callable"
] |
V
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getAndAdd
|
public long getAndAdd(final Number operand) {
final long last = value;
this.value += operand.longValue();
return last;
}
|
Increments this instance's value by {@code operand}; this method returns the value associated with the instance
immediately prior to the addition operation. This method is not thread safe.
@param operand the quantity to add, not null.
@throws NullPointerException if {@code operand} is null.
@return the value associated with this instance immediately before the operand was added.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableLong.java
| 221
|
[
"operand"
] | true
| 1
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
combinations
|
public static <E> Set<Set<E>> combinations(Set<E> set, int size) {
ImmutableMap<E, Integer> index = Maps.indexMap(set);
checkNonnegative(size, "size");
checkArgument(size <= index.size(), "size (%s) must be <= set.size() (%s)", size, index.size());
if (size == 0) {
return ImmutableSet.of(ImmutableSet.of());
} else if (size == index.size()) {
return ImmutableSet.of(index.keySet());
}
return new AbstractSet<Set<E>>() {
@Override
public boolean contains(@Nullable Object o) {
if (o instanceof Set) {
Set<?> s = (Set<?>) o;
return s.size() == size && index.keySet().containsAll(s);
}
return false;
}
@Override
public Iterator<Set<E>> iterator() {
return new AbstractIterator<Set<E>>() {
final BitSet bits = new BitSet(index.size());
@Override
protected @Nullable Set<E> computeNext() {
if (bits.isEmpty()) {
bits.set(0, size);
} else {
int firstSetBit = bits.nextSetBit(0);
int bitToFlip = bits.nextClearBit(firstSetBit);
if (bitToFlip == index.size()) {
return endOfData();
}
/*
* The current set in sorted order looks like
* {firstSetBit, firstSetBit + 1, ..., bitToFlip - 1, ...}
* where it does *not* contain bitToFlip.
*
* The next combination is
*
* {0, 1, ..., bitToFlip - firstSetBit - 2, bitToFlip, ...}
*
* This is lexicographically next if you look at the combinations in descending order
* e.g. {2, 1, 0}, {3, 1, 0}, {3, 2, 0}, {3, 2, 1}, {4, 1, 0}...
*/
bits.set(0, bitToFlip - firstSetBit - 1);
bits.clear(bitToFlip - firstSetBit - 1, bitToFlip);
bits.set(bitToFlip);
}
BitSet copy = (BitSet) bits.clone();
return new AbstractSet<E>() {
@Override
public boolean contains(@Nullable Object o) {
Integer i = index.get(o);
return i != null && copy.get(i);
}
@Override
public Iterator<E> iterator() {
return new AbstractIterator<E>() {
int i = -1;
@Override
protected @Nullable E computeNext() {
i = copy.nextSetBit(i + 1);
if (i == -1) {
return endOfData();
}
return index.keySet().asList().get(i);
}
};
}
@Override
public int size() {
return size;
}
};
}
};
}
@Override
public int size() {
return IntMath.binomial(index.size(), size);
}
@Override
public String toString() {
return "Sets.combinations(" + index.keySet() + ", " + size + ")";
}
};
}
|
Returns the set of all subsets of {@code set} of size {@code size}. For example, {@code
combinations(ImmutableSet.of(1, 2, 3), 2)} returns the set {@code {{1, 2}, {1, 3}, {2, 3}}}.
<p>Elements appear in these subsets in the same iteration order as they appeared in the input
set. The order in which these subsets appear in the outer set is undefined.
<p>The returned set and its constituent sets use {@code equals} to decide whether two elements
are identical, even if the input set uses a different concept of equivalence.
<p><i>Performance notes:</i> the memory usage of the returned set is only {@code O(n)}. When
the result set is constructed, the input set is merely copied. Only as the result set is
iterated are the individual subsets created. Each of these subsets occupies an additional O(n)
memory but only for as long as the user retains a reference to it. That is, the set returned by
{@code combinations} does not retain the individual subsets.
@param set the set of elements to take combinations of
@param size the number of elements per combination
@return the set of all combinations of {@code size} elements from {@code set}
@throws IllegalArgumentException if {@code size} is not between 0 and {@code set.size()}
inclusive
@throws NullPointerException if {@code set} is or contains {@code null}
@since 23.0
|
java
|
android/guava/src/com/google/common/collect/Sets.java
| 1,764
|
[
"set",
"size"
] | true
| 9
| 8
|
google/guava
| 51,352
|
javadoc
| false
|
|
hashCode
|
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ObjectUtils.nullSafeHashCode(getExceptionName());
result = prime * result + ObjectUtils.nullSafeHashCode(this.path);
result = prime * result + getStatusCode();
return result;
}
|
Return if this error page is a global one (matches all unmatched status and
exception types).
@return if this is a global error page
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/web/error/ErrorPage.java
| 124
|
[] | true
| 1
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
copy
|
CopyableBucketIterator copy();
|
Creates a copy of this bucket iterator, pointing at the same bucket of the same range of buckets.
Calling {@link #advance()} on the copied iterator does not affect this instance and vice-versa.
@return a copy of this iterator
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/CopyableBucketIterator.java
| 35
|
[] |
CopyableBucketIterator
| true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
wasProcessed
|
boolean wasProcessed(String className);
|
Return {@code true} if the specified class name was processed by the annotation
processor.
@param className the source class
@return if the class was processed
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationMetadata.java
| 39
|
[
"className"
] | true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
kurt
|
def kurt(
self,
axis: Axis | None = 0,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
) -> Series | Any:
"""
Return unbiased kurtosis over requested axis.
Kurtosis obtained using Fisher's definition of
kurtosis (kurtosis of normal == 0.0). Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to 0.
For DataFrames, specifying ``axis=None`` will apply the aggregation
across both axes.
.. versionadded:: 2.0.0
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
Series or scalar
Unbiased kurtosis over requested axis.
See Also
--------
Dataframe.kurtosis : Returns unbiased kurtosis over requested axis.
Examples
--------
>>> s = pd.Series([1, 2, 2, 3], index=["cat", "dog", "dog", "mouse"])
>>> s
cat 1
dog 2
dog 2
mouse 3
dtype: int64
>>> s.kurt()
1.5
With a DataFrame
>>> df = pd.DataFrame(
... {"a": [1, 2, 2, 3], "b": [3, 4, 4, 4]},
... index=["cat", "dog", "dog", "mouse"],
... )
>>> df
a b
cat 1 3
dog 2 4
dog 2 4
mouse 3 4
>>> df.kurt()
a 1.5
b 4.0
dtype: float64
With axis=None
>>> df.kurt(axis=None).round(6)
-0.988693
Using axis=1
>>> df = pd.DataFrame(
... {"a": [1, 2], "b": [3, 4], "c": [3, 4], "d": [1, 2]},
... index=["cat", "dog"],
... )
>>> df.kurt(axis=1)
cat -6.0
dog -6.0
dtype: float64
"""
result = super().kurt(
axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs
)
if isinstance(result, Series):
result = result.__finalize__(self, method="kurt")
return result
|
Return unbiased kurtosis over requested axis.
Kurtosis obtained using Fisher's definition of
kurtosis (kurtosis of normal == 0.0). Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
For `Series` this parameter is unused and defaults to 0.
For DataFrames, specifying ``axis=None`` will apply the aggregation
across both axes.
.. versionadded:: 2.0.0
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
Series or scalar
Unbiased kurtosis over requested axis.
See Also
--------
Dataframe.kurtosis : Returns unbiased kurtosis over requested axis.
Examples
--------
>>> s = pd.Series([1, 2, 2, 3], index=["cat", "dog", "dog", "mouse"])
>>> s
cat 1
dog 2
dog 2
mouse 3
dtype: int64
>>> s.kurt()
1.5
With a DataFrame
>>> df = pd.DataFrame(
... {"a": [1, 2, 2, 3], "b": [3, 4, 4, 4]},
... index=["cat", "dog", "dog", "mouse"],
... )
>>> df
a b
cat 1 3
dog 2 4
dog 2 4
mouse 3 4
>>> df.kurt()
a 1.5
b 4.0
dtype: float64
With axis=None
>>> df.kurt(axis=None).round(6)
-0.988693
Using axis=1
>>> df = pd.DataFrame(
... {"a": [1, 2], "b": [3, 4], "c": [3, 4], "d": [1, 2]},
... index=["cat", "dog"],
... )
>>> df.kurt(axis=1)
cat -6.0
dog -6.0
dtype: float64
|
python
|
pandas/core/frame.py
| 13,910
|
[
"self",
"axis",
"skipna",
"numeric_only"
] |
Series | Any
| true
| 2
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
resetInitializingPositions
|
public synchronized void resetInitializingPositions(Predicate<TopicPartition> initPartitionsToInclude) {
final Set<TopicPartition> partitionsWithNoOffsets = new HashSet<>();
assignment.forEach((tp, partitionState) -> {
if (partitionState.shouldInitialize() && initPartitionsToInclude.test(tp)) {
if (defaultResetStrategy == AutoOffsetResetStrategy.NONE)
partitionsWithNoOffsets.add(tp);
else
requestOffsetReset(tp);
}
});
if (!partitionsWithNoOffsets.isEmpty())
throw new NoOffsetForPartitionException(partitionsWithNoOffsets);
}
|
Request reset for partitions that require a position, using the configured reset strategy.
@param initPartitionsToInclude Initializing partitions to include in the reset. Assigned partitions that
require a positions but are not included in this set won't be reset.
@throws NoOffsetForPartitionException If there are partitions assigned that require a position but
there is no reset strategy configured.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 859
|
[
"initPartitionsToInclude"
] |
void
| true
| 5
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
batchIterator
|
@Override
public AbstractIterator<MutableRecordBatch> batchIterator() {
return new RecordBatchIterator<>(new ByteBufferLogInputStream(buffer.duplicate(), Integer.MAX_VALUE));
}
|
The total number of bytes in this message set not including any partial, trailing messages. This
may be smaller than what is returned by {@link #sizeInBytes()}.
@return The number of valid bytes
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java
| 109
|
[] | true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
postProcessProperties
|
default @Nullable PropertyValues postProcessProperties(PropertyValues pvs, Object bean, String beanName)
throws BeansException {
return pvs;
}
|
Post-process the given property values before the factory applies them
to the given bean.
<p>The default implementation returns the given {@code pvs} as-is.
@param pvs the property values that the factory is about to apply (never {@code null})
@param bean the bean instance created, but whose properties have not yet been set
@param beanName the name of the bean
@return the actual property values to apply to the given bean (can be the passed-in
PropertyValues instance), or {@code null} to skip property population
@throws org.springframework.beans.BeansException in case of errors
@since 5.1
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/InstantiationAwareBeanPostProcessor.java
| 105
|
[
"pvs",
"bean",
"beanName"
] |
PropertyValues
| true
| 1
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
findSchemaRoot
|
async function findSchemaRoot(filePath: string, filesResolver: FilesResolver): Promise<string | undefined> {
let dir = path.dirname(filePath)
while (dir !== filePath) {
const parentDir = path.dirname(dir)
const contents = await filesResolver.listDirContents(parentDir)
const prismaFiles = contents.filter((file) => path.extname(file) === '.prisma')
if (prismaFiles.length === 0) {
// No prisma files in directory, found root dir
return dir
}
dir = parentDir
}
// walked all the way to the root - should probably never happen, but it case it does
// let's say we have not found anything
return undefined
}
|
Given a single file path, returns
all files composing the same schema
@param filePath
@param filesResolver
@returns
|
typescript
|
packages/schema-files-loader/src/loadRelatedSchemaFiles.ts
| 32
|
[
"filePath",
"filesResolver"
] | true
| 3
| 7.6
|
prisma/prisma
| 44,834
|
jsdoc
| true
|
|
getProperty
|
@Override
public @Nullable Object getProperty(String name) {
if (StringUtils.hasLength(name)) {
for (Mapping mapping : MAPPINGS) {
String prefix = mapping.getPrefix();
if (name.startsWith(prefix)) {
String postfix = name.substring(prefix.length());
AnsiElement element = mapping.getElement(postfix);
if (element != null) {
return (this.encode) ? AnsiOutput.encode(element) : element;
}
}
}
}
return null;
}
|
Create a new {@link AnsiPropertySource} instance.
@param name the name of the property source
@param encode if the output should be encoded
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ansi/AnsiPropertySource.java
| 77
|
[
"name"
] |
Object
| true
| 5
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
increment
|
static <K> void increment(final Map<K, MutableInt> occurrences, final K boxed) {
occurrences.computeIfAbsent(boxed, k -> new MutableInt()).increment();
}
|
Gets a hash code for an array handling multidimensional arrays correctly.
<p>
Multi-dimensional primitive arrays are also handled correctly by this method.
</p>
@param array the array to get a hash code for, {@code null} returns zero.
@return a hash code for the array.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 1,879
|
[
"occurrences",
"boxed"
] |
void
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
process
|
private void process(final ResetOffsetEvent event) {
try {
Collection<TopicPartition> parts = event.topicPartitions().isEmpty() ?
subscriptions.assignedPartitions() : event.topicPartitions();
subscriptions.requestOffsetReset(parts, event.offsetResetStrategy());
event.future().complete(null);
} catch (Exception e) {
event.future().completeExceptionally(e);
}
}
|
Process event indicating that the consumer unsubscribed from all topics. This will make
the consumer release its assignment and send a request to leave the group.
@param event Unsubscribe event containing a future that will complete when the callback
execution for releasing the assignment completes, and the request to leave
the group is sent out.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java
| 429
|
[
"event"
] |
void
| true
| 3
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
convert
|
private static Iterable<?> convert(Object value) {
if (value == null) {
return null;
}
if (value instanceof Map) {
return ((Map<?, ?>) value).values();
} else if ((value instanceof Iterable) && (value instanceof Path == false)) {
return (Iterable<?>) value;
} else if (value instanceof Object[]) {
return Arrays.asList((Object[]) value);
} else {
return null;
}
}
|
Returns a version used for serialising a response.
@return a compatible version
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
| 1,316
|
[
"value"
] | true
| 6
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
clone
|
def clone(self, args=None, kwargs=None, **opts):
"""Create a copy of this signature.
Arguments:
args (Tuple): Partial args to be prepended to the existing args.
kwargs (Dict): Partial kwargs to be merged with existing kwargs.
options (Dict): Partial options to be merged with
existing options.
"""
args = args if args else ()
kwargs = kwargs if kwargs else {}
# need to deepcopy options so origins links etc. is not modified.
if args or kwargs or opts:
args, kwargs, opts = self._merge(args, kwargs, opts)
else:
args, kwargs, opts = self.args, self.kwargs, self.options
signature = Signature.from_dict({'task': self.task,
'args': tuple(args),
'kwargs': kwargs,
'options': deepcopy(opts),
'subtask_type': self.subtask_type,
'immutable': self.immutable},
app=self._app)
signature._type = self._type
return signature
|
Create a copy of this signature.
Arguments:
args (Tuple): Partial args to be prepended to the existing args.
kwargs (Dict): Partial kwargs to be merged with existing kwargs.
options (Dict): Partial options to be merged with
existing options.
|
python
|
celery/canvas.py
| 444
|
[
"self",
"args",
"kwargs"
] | false
| 7
| 6.24
|
celery/celery
| 27,741
|
google
| false
|
|
createHybridDigest
|
public static HybridDigest createHybridDigest(TDigestArrays arrays, double compression) {
return HybridDigest.create(arrays, compression);
}
|
Creates a {@link HybridDigest}. HybridDigest uses a SortingDigest for small sample populations, then switches to a MergingDigest,
thus combining the best of both implementations: fastest overall, small footprint and perfect accuracy for small populations,
constant memory footprint and acceptable accuracy for larger ones.
@param compression The compression parameter. 100 is a common value for normal uses. 1000 is extremely large.
The number of centroids retained will be a smallish (usually less than 10) multiple of this number.
@return the HybridDigest
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/TDigest.java
| 91
|
[
"arrays",
"compression"
] |
HybridDigest
| true
| 1
| 6.48
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
searchForOffsetFromPosition
|
public LogOffsetPosition searchForOffsetFromPosition(long targetOffset, int startingPosition) {
FileChannelRecordBatch prevBatch = null;
// The following logic is intentionally designed to minimize memory usage by avoiding
// unnecessary calls to lastOffset() for every batch.
// Instead, we use baseOffset() comparisons when possible, and only check lastOffset() when absolutely necessary.
for (FileChannelRecordBatch batch : batchesFrom(startingPosition)) {
// If baseOffset exactly equals targetOffset, return immediately
if (batch.baseOffset() == targetOffset) {
return LogOffsetPosition.fromBatch(batch);
}
// If we find the first batch with baseOffset greater than targetOffset
if (batch.baseOffset() > targetOffset) {
// If the previous batch contains the target
if (prevBatch != null && prevBatch.lastOffset() >= targetOffset)
return LogOffsetPosition.fromBatch(prevBatch);
else {
// If there's no previous batch or the previous batch doesn't contain the
// target, return the current batch
return LogOffsetPosition.fromBatch(batch);
}
}
prevBatch = batch;
}
// Only one case would reach here: all batches have baseOffset less than targetOffset
// Check if the last batch contains the target
if (prevBatch != null && prevBatch.lastOffset() >= targetOffset)
return LogOffsetPosition.fromBatch(prevBatch);
return null;
}
|
Search forward for the file position of the message batch whose last offset that is greater
than or equal to the target offset. If no such batch is found, return null.
@param targetOffset The offset to search for.
@param startingPosition The starting position in the file to begin searching from.
@return the batch's base offset, its physical position, and its size (including log overhead)
|
java
|
clients/src/main/java/org/apache/kafka/common/record/FileRecords.java
| 313
|
[
"targetOffset",
"startingPosition"
] |
LogOffsetPosition
| true
| 7
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
_try_cast
|
def _try_cast(
arr: list | np.ndarray,
dtype: np.dtype,
copy: bool,
) -> ArrayLike:
"""
Convert input to numpy ndarray and optionally cast to a given dtype.
Parameters
----------
arr : ndarray or list
Excludes: ExtensionArray, Series, Index.
dtype : np.dtype
copy : bool
If False, don't copy the data if not needed.
Returns
-------
np.ndarray or ExtensionArray
"""
is_ndarray = isinstance(arr, np.ndarray)
if dtype == object:
if not is_ndarray:
subarr = construct_1d_object_array_from_listlike(arr)
return subarr
return ensure_wrapped_if_datetimelike(arr).astype(dtype, copy=copy)
elif dtype.kind == "U":
# TODO: test cases with arr.dtype.kind in "mM"
if is_ndarray:
arr = cast(np.ndarray, arr)
shape = arr.shape
if arr.ndim > 1:
arr = arr.ravel()
else:
shape = (len(arr),)
return lib.ensure_string_array(arr, convert_na_value=False, copy=copy).reshape(
shape
)
elif dtype.kind in "mM":
if is_ndarray:
arr = cast(np.ndarray, arr)
if arr.ndim == 2 and arr.shape[1] == 1:
# GH#60081: DataFrame Constructor converts 1D data to array of
# shape (N, 1), but maybe_cast_to_datetime assumes 1D input
return maybe_cast_to_datetime(arr[:, 0], dtype).reshape(arr.shape)
return maybe_cast_to_datetime(arr, dtype)
# GH#15832: Check if we are requesting a numeric dtype and
# that we can convert the data to the requested dtype.
elif dtype.kind in "iu":
# this will raise if we have e.g. floats
subarr = maybe_cast_to_integer_array(arr, dtype)
elif not copy:
subarr = np.asarray(arr, dtype=dtype)
else:
subarr = np.array(arr, dtype=dtype, copy=copy)
return subarr
|
Convert input to numpy ndarray and optionally cast to a given dtype.
Parameters
----------
arr : ndarray or list
Excludes: ExtensionArray, Series, Index.
dtype : np.dtype
copy : bool
If False, don't copy the data if not needed.
Returns
-------
np.ndarray or ExtensionArray
|
python
|
pandas/core/construction.py
| 791
|
[
"arr",
"dtype",
"copy"
] |
ArrayLike
| true
| 14
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
ISO8859_1_UNESCAPE
|
public static String[][] ISO8859_1_UNESCAPE() {
return ISO8859_1_UNESCAPE.clone();
}
|
Reverse of {@link #ISO8859_1_ESCAPE()} for unescaping purposes.
@return the mapping table.
|
java
|
src/main/java/org/apache/commons/lang3/text/translate/EntityArrays.java
| 445
|
[] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
introspectInterfaces
|
private void introspectInterfaces(Class<?> beanClass, Class<?> currClass, Set<String> readMethodNames)
throws IntrospectionException {
for (Class<?> ifc : currClass.getInterfaces()) {
if (!ClassUtils.isJavaLanguageInterface(ifc)) {
for (PropertyDescriptor pd : getBeanInfo(ifc).getPropertyDescriptors()) {
PropertyDescriptor existingPd = this.propertyDescriptors.get(pd.getName());
if (existingPd == null ||
(existingPd.getReadMethod() == null && pd.getReadMethod() != null)) {
// GenericTypeAwarePropertyDescriptor leniently resolves a set* write method
// against a declared read method, so we prefer read method descriptors here.
pd = buildGenericTypeAwarePropertyDescriptor(beanClass, pd);
if (pd.getWriteMethod() == null &&
isInvalidReadOnlyPropertyType(pd.getPropertyType(), beanClass)) {
// Ignore read-only properties such as ClassLoader - no need to bind to those
continue;
}
this.propertyDescriptors.put(pd.getName(), pd);
Method readMethod = pd.getReadMethod();
if (readMethod != null) {
readMethodNames.add(readMethod.getName());
}
}
}
introspectInterfaces(ifc, ifc, readMethodNames);
}
}
}
|
Create a new CachedIntrospectionResults instance for the given class.
@param beanClass the bean class to analyze
@throws BeansException in case of introspection failure
|
java
|
spring-beans/src/main/java/org/springframework/beans/CachedIntrospectionResults.java
| 303
|
[
"beanClass",
"currClass",
"readMethodNames"
] |
void
| true
| 8
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
all_locale_paths
|
def all_locale_paths():
"""
Return a list of paths to user-provides languages files.
"""
globalpath = os.path.join(
os.path.dirname(sys.modules[settings.__module__].__file__), "locale"
)
app_paths = []
for app_config in apps.get_app_configs():
locale_path = os.path.join(app_config.path, "locale")
if os.path.exists(locale_path):
app_paths.append(locale_path)
return [globalpath, *settings.LOCALE_PATHS, *app_paths]
|
Return a list of paths to user-provides languages files.
|
python
|
django/utils/translation/trans_real.py
| 450
|
[] | false
| 3
| 6.24
|
django/django
| 86,204
|
unknown
| false
|
|
is_mask
|
def is_mask(m):
"""
Return True if m is a valid, standard mask.
This function does not check the contents of the input, only that the
type is MaskType. In particular, this function returns False if the
mask has a flexible dtype.
Parameters
----------
m : array_like
Array to test.
Returns
-------
result : bool
True if `m.dtype.type` is MaskType, False otherwise.
See Also
--------
ma.isMaskedArray : Test whether input is an instance of MaskedArray.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> m
masked_array(data=[--, 1, --, 2, 3],
mask=[ True, False, True, False, False],
fill_value=0)
>>> ma.is_mask(m)
False
>>> ma.is_mask(m.mask)
True
Input must be an ndarray (or have similar attributes)
for it to be considered a valid mask.
>>> m = [False, True, False]
>>> ma.is_mask(m)
False
>>> m = np.array([False, True, False])
>>> m
array([False, True, False])
>>> ma.is_mask(m)
True
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
... 'formats':[bool, bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
... dtype=dtype)
>>> m
array([( True, False), (False, True), ( True, False)],
dtype=[('monty', '?'), ('pithon', '?')])
>>> ma.is_mask(m)
False
"""
try:
return m.dtype.type is MaskType
except AttributeError:
return False
|
Return True if m is a valid, standard mask.
This function does not check the contents of the input, only that the
type is MaskType. In particular, this function returns False if the
mask has a flexible dtype.
Parameters
----------
m : array_like
Array to test.
Returns
-------
result : bool
True if `m.dtype.type` is MaskType, False otherwise.
See Also
--------
ma.isMaskedArray : Test whether input is an instance of MaskedArray.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> m
masked_array(data=[--, 1, --, 2, 3],
mask=[ True, False, True, False, False],
fill_value=0)
>>> ma.is_mask(m)
False
>>> ma.is_mask(m.mask)
True
Input must be an ndarray (or have similar attributes)
for it to be considered a valid mask.
>>> m = [False, True, False]
>>> ma.is_mask(m)
False
>>> m = np.array([False, True, False])
>>> m
array([False, True, False])
>>> ma.is_mask(m)
True
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
... 'formats':[bool, bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
... dtype=dtype)
>>> m
array([( True, False), (False, True), ( True, False)],
dtype=[('monty', '?'), ('pithon', '?')])
>>> ma.is_mask(m)
False
|
python
|
numpy/ma/core.py
| 1,517
|
[
"m"
] | false
| 1
| 6.4
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
limit
|
@J2ktIncompatible
public static InputStream limit(InputStream in, long limit) {
return new LimitedInputStream(in, limit);
}
|
Wraps a {@link InputStream}, limiting the number of bytes which can be read.
@param in the input stream to be wrapped
@param limit the maximum number of bytes to be read
@return a length-limited {@link InputStream}
@since 14.0 (since 1.0 as com.google.common.io.LimitInputStream)
|
java
|
android/guava/src/com/google/common/io/ByteStreams.java
| 709
|
[
"in",
"limit"
] |
InputStream
| true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
getAnnotationElementStringValue
|
String getAnnotationElementStringValue(AnnotationMirror annotation, String name) {
return annotation.getElementValues()
.entrySet()
.stream()
.filter((element) -> element.getKey().getSimpleName().toString().equals(name))
.map((element) -> asString(getAnnotationValue(element.getValue())))
.findFirst()
.orElse(null);
}
|
Collect the annotations that are annotated or meta-annotated with the specified
{@link TypeElement annotation}.
@param element the element to inspect
@param annotationType the annotation to discover
@return the annotations that are annotated or meta-annotated with this annotation
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataGenerationEnvironment.java
| 319
|
[
"annotation",
"name"
] |
String
| true
| 1
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
rethrow
|
private void rethrow(Throwable ex) throws IOException, ServletException {
if (ex instanceof RuntimeException runtimeException) {
throw runtimeException;
}
if (ex instanceof Error error) {
throw error;
}
if (ex instanceof IOException ioException) {
throw ioException;
}
if (ex instanceof ServletException servletException) {
throw servletException;
}
throw new IllegalStateException(ex);
}
|
Return the description for the given request. By default this method will return a
description based on the request {@code servletPath} and {@code pathInfo}.
@param request the source request
@return the description
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/web/servlet/support/ErrorPageFilter.java
| 262
|
[
"ex"
] |
void
| true
| 5
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
pendingRequestCount
|
public int pendingRequestCount() {
lock.lock();
try {
return unsent.requestCount() + client.inFlightRequestCount();
} finally {
lock.unlock();
}
}
|
Get the total count of pending requests from all nodes. This includes both requests that
have been transmitted (i.e. in-flight requests) and those which are awaiting transmission.
@return The total count of pending requests
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
| 397
|
[] | true
| 1
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
createDataSource
|
protected DataSource createDataSource(
final InputStreamSource inputStreamSource, final String contentType, final String name) {
return new DataSource() {
@Override
public InputStream getInputStream() throws IOException {
return inputStreamSource.getInputStream();
}
@Override
public OutputStream getOutputStream() {
throw new UnsupportedOperationException("Read-only jakarta.activation.DataSource");
}
@Override
public String getContentType() {
return contentType;
}
@Override
public String getName() {
return name;
}
};
}
|
Create an Activation Framework DataSource for the given InputStreamSource.
@param inputStreamSource the InputStreamSource (typically a Spring Resource)
@param contentType the content type
@param name the name of the DataSource
@return the Activation Framework DataSource
|
java
|
spring-context-support/src/main/java/org/springframework/mail/javamail/MimeMessageHelper.java
| 1,207
|
[
"inputStreamSource",
"contentType",
"name"
] |
DataSource
| true
| 1
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
forPayload
|
static <T> ApplicationListener<PayloadApplicationEvent<T>> forPayload(Consumer<T> consumer) {
return event -> consumer.accept(event.getPayload());
}
|
Create a new {@code ApplicationListener} for the given payload consumer.
@param consumer the event payload consumer
@param <T> the type of the event payload
@return a corresponding {@code ApplicationListener} instance
@since 5.3
@see PayloadApplicationEvent
|
java
|
spring-context/src/main/java/org/springframework/context/ApplicationListener.java
| 72
|
[
"consumer"
] | true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
_escape_latex_math
|
def _escape_latex_math(s: str) -> str:
r"""
All characters in LaTeX math mode are preserved.
The substrings in LaTeX math mode, which either are surrounded
by two characters ``$`` or start with the character ``\(`` and end with ``\)``,
are preserved without escaping. Otherwise regular LaTeX escaping applies.
Parameters
----------
s : str
Input to be escaped
Return
------
str :
Escaped string
"""
s = s.replace(r"\$", r"rt8§=§7wz")
ps_d = re.compile(r"\$.*?\$").search(s, 0)
ps_p = re.compile(r"\(.*?\)").search(s, 0)
mode = []
if ps_d:
mode.append(ps_d.span()[0])
if ps_p:
mode.append(ps_p.span()[0])
if len(mode) == 0:
return _escape_latex(s.replace(r"rt8§=§7wz", r"\$"))
if s[mode[0]] == r"$":
return _math_mode_with_dollar(s.replace(r"rt8§=§7wz", r"\$"))
if s[mode[0] - 1 : mode[0] + 1] == r"\(":
return _math_mode_with_parentheses(s.replace(r"rt8§=§7wz", r"\$"))
else:
return _escape_latex(s.replace(r"rt8§=§7wz", r"\$"))
|
r"""
All characters in LaTeX math mode are preserved.
The substrings in LaTeX math mode, which either are surrounded
by two characters ``$`` or start with the character ``\(`` and end with ``\)``,
are preserved without escaping. Otherwise regular LaTeX escaping applies.
Parameters
----------
s : str
Input to be escaped
Return
------
str :
Escaped string
|
python
|
pandas/io/formats/style_render.py
| 2,648
|
[
"s"
] |
str
| true
| 7
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
svd_flip
|
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
If u_based_decision is False, then the same sign correction is applied to
so that the rows in v that are largest in absolute value are always
positive.
Parameters
----------
u : ndarray
Parameters u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`.
u can be None if `u_based_decision` is False.
v : ndarray
Parameters u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`. The input v should
really be called vt to be consistent with scipy's output.
v can be None if `u_based_decision` is True.
u_based_decision : bool, default=True
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted : ndarray
Array u with adjusted columns and the same dimensions as u.
v_adjusted : ndarray
Array v with adjusted rows and the same dimensions as v.
"""
xp, _ = get_namespace(*[a for a in [u, v] if a is not None])
if u_based_decision:
# columns of u, rows of v, or equivalently rows of u.T and v
max_abs_u_cols = xp.argmax(xp.abs(u.T), axis=1)
shift = xp.arange(u.T.shape[0], device=device(u))
indices = max_abs_u_cols + shift * u.T.shape[1]
signs = xp.sign(xp.take(xp.reshape(u.T, (-1,)), indices, axis=0))
u *= signs[np.newaxis, :]
if v is not None:
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_v_rows = xp.argmax(xp.abs(v), axis=1)
shift = xp.arange(v.shape[0], device=device(v))
indices = max_abs_v_rows + shift * v.shape[1]
signs = xp.sign(xp.take(xp.reshape(v, (-1,)), indices, axis=0))
if u is not None:
u *= signs[np.newaxis, :]
v *= signs[:, np.newaxis]
return u, v
|
Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
If u_based_decision is False, then the same sign correction is applied to
so that the rows in v that are largest in absolute value are always
positive.
Parameters
----------
u : ndarray
Parameters u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`.
u can be None if `u_based_decision` is False.
v : ndarray
Parameters u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`. The input v should
really be called vt to be consistent with scipy's output.
v can be None if `u_based_decision` is True.
u_based_decision : bool, default=True
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted : ndarray
Array u with adjusted columns and the same dimensions as u.
v_adjusted : ndarray
Array v with adjusted rows and the same dimensions as v.
|
python
|
sklearn/utils/extmath.py
| 920
|
[
"u",
"v",
"u_based_decision"
] | false
| 5
| 6.16
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
isRequestStateInProgress
|
private boolean isRequestStateInProgress(AcknowledgeRequestState acknowledgeRequestState) {
if (acknowledgeRequestState == null) {
return false;
} else if (acknowledgeRequestState.isCloseRequest()) {
return !acknowledgeRequestState.isProcessed;
} else {
return !(acknowledgeRequestState.isEmpty());
}
}
|
Prunes the empty acknowledgementRequestStates in {@link #acknowledgeRequestStates}
@return Returns true if there are still any acknowledgements left to be processed.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 511
|
[
"acknowledgeRequestState"
] | true
| 3
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
finite
|
public static void finite(final double value, final String message, final Object... values) {
if (Double.isNaN(value) || Double.isInfinite(value)) {
throw new IllegalArgumentException(getMessage(message, values));
}
}
|
Validates that the specified argument is not infinite or Not-a-Number (NaN);
otherwise throwing an exception with the specified message.
<pre>Validate.finite(myDouble, "The argument must contain a numeric value");</pre>
@param value the value to validate.
@param message the {@link String#format(String, Object...)} exception message if invalid, not null.
@param values the optional values for the formatted exception message.
@throws IllegalArgumentException if the value is infinite or Not-a-Number (NaN).
@see #finite(double)
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 237
|
[
"value",
"message"
] |
void
| true
| 3
| 6.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
or
|
default FailableBiPredicate<T, U, E> or(final FailableBiPredicate<? super T, ? super U, E> other) {
Objects.requireNonNull(other);
return (final T t, final U u) -> test(t, u) || other.test(t, u);
}
|
Returns a composed {@link FailableBiPredicate} like {@link BiPredicate#and(BiPredicate)}.
@param other a predicate that will be logically-ORed with this predicate.
@return a composed {@link FailableBiPredicate} like {@link BiPredicate#and(BiPredicate)}.
@throws NullPointerException if other is null
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableBiPredicate.java
| 96
|
[
"other"
] | true
| 2
| 7.36
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
tryAcquire
|
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
public boolean tryAcquire(long timeout, TimeUnit unit) {
return tryAcquire(1, timeout, unit);
}
|
Acquires a permit from this {@code RateLimiter} if it can be obtained without exceeding the
specified {@code timeout}, or returns {@code false} immediately (without waiting) if the permit
would not have been granted before the timeout expired.
<p>This method is equivalent to {@code tryAcquire(1, timeout, unit)}.
@param timeout the maximum time to wait for the permit. Negative values are treated as zero.
@param unit the time unit of the timeout argument
@return {@code true} if the permit was acquired, {@code false} otherwise
@throws IllegalArgumentException if the requested number of permits is negative or zero
|
java
|
android/guava/src/com/google/common/util/concurrent/RateLimiter.java
| 352
|
[
"timeout",
"unit"
] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
runPeriodic
|
private void runPeriodic() {
if (isCancelled() || isCompleted() || threadPool.scheduler().isShutdown()) {
logger.debug("Not running periodic downloader because task is cancelled, completed, or shutting down");
return;
}
logger.trace("Running periodic downloader");
// There's a chance that an on-demand run is already in progress, in which case this periodic run is redundant.
// However, we don't try to avoid that case here, as it's harmless to run the downloader more than strictly necessary (due to
// the high default poll interval of 3d), and it simplifies the logic considerably.
requestRunOnDemand();
synchronized (this) {
scheduledPeriodicRun = threadPool.schedule(this::runPeriodic, pollIntervalSupplier.get(), threadPool.generic());
}
}
|
Runs the downloader now and schedules the next periodic run using the poll interval.
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/AbstractGeoIpDownloader.java
| 89
|
[] |
void
| true
| 4
| 7.04
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
assertNoUnboundChildren
|
private void assertNoUnboundChildren(Set<String> unboundIndexedChildren, IterableConfigurationPropertySource source,
ConfigurationPropertyName root) {
if (unboundIndexedChildren.isEmpty()) {
return;
}
Set<ConfigurationProperty> unboundProperties = new TreeSet<>();
for (ConfigurationPropertyName name : source.filter(root::isAncestorOf)) {
ConfigurationPropertyName choppedName = name.chop(root.getNumberOfElements() + 1);
if (choppedName.isLastElementIndexed()
&& unboundIndexedChildren.contains(choppedName.getLastElement(Form.UNIFORM))) {
unboundProperties.add(source.getConfigurationProperty(name));
}
}
if (!unboundProperties.isEmpty()) {
throw new UnboundConfigurationPropertiesException(unboundProperties);
}
}
|
Bind indexed elements to the supplied collection.
@param name the name of the property to bind
@param target the target bindable
@param elementBinder the binder to use for elements
@param aggregateType the aggregate type, may be a collection or an array
@param elementType the element type
@param result the destination for results
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/IndexedElementsBinder.java
| 141
|
[
"unboundIndexedChildren",
"source",
"root"
] |
void
| true
| 5
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
tryAcquire
|
@IgnoreJRERequirement // Users will use this only if they're already using Duration.
public boolean tryAcquire(Duration timeout) {
return tryAcquire(1, toNanosSaturated(timeout), TimeUnit.NANOSECONDS);
}
|
Acquires a permit from this {@code RateLimiter} if it can be obtained without exceeding the
specified {@code timeout}, or returns {@code false} immediately (without waiting) if the permit
would not have been granted before the timeout expired.
<p>This method is equivalent to {@code tryAcquire(1, timeout)}.
@param timeout the maximum time to wait for the permit. Negative values are treated as zero.
@return {@code true} if the permit was acquired, {@code false} otherwise
@throws IllegalArgumentException if the requested number of permits is negative or zero
@since 33.4.0 (but since 28.0 in the JRE flavor)
|
java
|
android/guava/src/com/google/common/util/concurrent/RateLimiter.java
| 335
|
[
"timeout"
] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
add
|
void add(ItemMetadata metadata, Consumer<ItemMetadata> onConflict) {
ItemMetadata existing = find(metadata.getName());
if (existing != null) {
onConflict.accept(existing);
return;
}
add(metadata);
}
|
Creates a new {@code MetadataProcessor} instance.
@param mergeRequired specify whether an item can be merged
@param previousMetadata any previous metadata or {@code null}
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataCollector.java
| 62
|
[
"metadata",
"onConflict"
] |
void
| true
| 2
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
immutableCopy
|
public ImmutableSet<@NonNull E> immutableCopy() {
// Not using ImmutableSet.copyOf() to avoid iterating thrice (isEmpty, size, iterator).
int maxSize = maxSize();
if (maxSize == 0) {
return ImmutableSet.of();
}
ImmutableSet.Builder<@NonNull E> builder = ImmutableSet.builderWithExpectedSize(maxSize);
for (E element : this) {
builder.add(checkNotNull(element));
}
return builder.build();
}
|
Returns an immutable copy of the current contents of this set view. Does not support null
elements.
<p><b>Warning:</b> this may have unexpected results if a backing set of this view uses a
nonstandard notion of equivalence, for example if it is a {@link TreeSet} using a comparator
that is inconsistent with {@link Object#equals(Object)}.
|
java
|
android/guava/src/com/google/common/collect/Sets.java
| 602
|
[] | true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
markBeanAsCreated
|
protected void markBeanAsCreated(String beanName) {
if (!this.alreadyCreated.contains(beanName)) {
synchronized (this.mergedBeanDefinitions) {
if (!isBeanEligibleForMetadataCaching(beanName)) {
// Let the bean definition get re-merged now that we're actually creating
// the bean... just in case some of its metadata changed in the meantime.
clearMergedBeanDefinition(beanName);
}
this.alreadyCreated.add(beanName);
}
}
}
|
Mark the specified bean as already created (or about to be created).
<p>This allows the bean factory to optimize its caching for repeated
creation of the specified bean.
@param beanName the name of the bean
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 1,772
|
[
"beanName"
] |
void
| true
| 3
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getEnvironment
|
@Override
public ConfigurableEnvironment getEnvironment() {
if (this.environment == null) {
this.environment = createEnvironment();
}
return this.environment;
}
|
Return the {@code Environment} for this application context in configurable
form, allowing for further customization.
<p>If none specified, a default environment will be initialized via
{@link #createEnvironment()}.
|
java
|
spring-context/src/main/java/org/springframework/context/support/AbstractApplicationContext.java
| 335
|
[] |
ConfigurableEnvironment
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getdata
|
def getdata(a, subok=True):
"""
Return the data of a masked array as an ndarray.
Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,
else return `a` as a ndarray or subclass (depending on `subok`) if not.
Parameters
----------
a : array_like
Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
subok : bool
Whether to force the output to be a `pure` ndarray (False) or to
return a subclass of ndarray if appropriate (True, default).
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getdata(a)
array([[1, 2],
[3, 4]])
Equivalently use the ``MaskedArray`` `data` attribute.
>>> a.data
array([[1, 2],
[3, 4]])
"""
try:
data = a._data
except AttributeError:
data = np.array(a, copy=None, subok=subok)
if not subok:
return data.view(ndarray)
return data
|
Return the data of a masked array as an ndarray.
Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``,
else return `a` as a ndarray or subclass (depending on `subok`) if not.
Parameters
----------
a : array_like
Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
subok : bool
Whether to force the output to be a `pure` ndarray (False) or to
return a subclass of ndarray if appropriate (True, default).
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getdata(a)
array([[1, 2],
[3, 4]])
Equivalently use the ``MaskedArray`` `data` attribute.
>>> a.data
array([[1, 2],
[3, 4]])
|
python
|
numpy/ma/core.py
| 708
|
[
"a",
"subok"
] | false
| 2
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
toIntegerObject
|
public static Integer toIntegerObject(final Boolean bool, final Integer trueValue, final Integer falseValue, final Integer nullValue) {
if (bool == null) {
return nullValue;
}
return bool.booleanValue() ? trueValue : falseValue;
}
|
Converts a Boolean to an Integer specifying the conversion values.
<pre>
BooleanUtils.toIntegerObject(Boolean.TRUE, Integer.valueOf(1), Integer.valueOf(0), Integer.valueOf(2)) = Integer.valueOf(1)
BooleanUtils.toIntegerObject(Boolean.FALSE, Integer.valueOf(1), Integer.valueOf(0), Integer.valueOf(2)) = Integer.valueOf(0)
BooleanUtils.toIntegerObject(null, Integer.valueOf(1), Integer.valueOf(0), Integer.valueOf(2)) = Integer.valueOf(2)
</pre>
@param bool the Boolean to convert
@param trueValue the value to return if {@code true}, may be {@code null}
@param falseValue the value to return if {@code false}, may be {@code null}
@param nullValue the value to return if {@code null}, may be {@code null}
@return the appropriate value
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 998
|
[
"bool",
"trueValue",
"falseValue",
"nullValue"
] |
Integer
| true
| 3
| 7.28
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
nullToEmpty
|
public static Byte[] nullToEmpty(final Byte[] array) {
return nullTo(array, EMPTY_BYTE_OBJECT_ARRAY);
}
|
Defensive programming technique to change a {@code null}
reference to an empty one.
<p>
This method returns an empty array for a {@code null} input array.
</p>
<p>
As a memory optimizing technique an empty array passed in will be overridden with
the empty {@code public static} references in this class.
</p>
@param array the array to check for {@code null} or empty.
@return the same array, {@code public static} empty array if {@code null} or empty input.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,337
|
[
"array"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
return "LoggerConfiguration [name=" + this.name + ", levelConfiguration=" + this.levelConfiguration
+ ", inheritedLevelConfiguration=" + this.inheritedLevelConfiguration + "]";
}
|
Return the level configuration for the given scope.
@param scope the configuration scope
@return the level configuration or {@code null} for
{@link ConfigurationScope#DIRECT direct scope} results without applied
configuration
@since 2.7.13
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/LoggerConfiguration.java
| 140
|
[] |
String
| true
| 1
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
markSequenceUnresolved
|
synchronized void markSequenceUnresolved(ProducerBatch batch) {
int nextSequence = batch.lastSequence() + 1;
partitionsWithUnresolvedSequences.compute(batch.topicPartition,
(k, v) -> v == null ? nextSequence : Math.max(v, nextSequence));
log.debug("Marking partition {} unresolved with next sequence number {}", batch.topicPartition,
partitionsWithUnresolvedSequences.get(batch.topicPartition));
}
|
Returns the first inflight sequence for a given partition. This is the base sequence of an inflight batch with
the lowest sequence number.
@return the lowest inflight sequence if the transaction manager is tracking inflight requests for this partition.
If there are no inflight requests being tracked for this partition, this method will return
RecordBatch.NO_SEQUENCE.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
| 842
|
[
"batch"
] |
void
| true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
wrap
|
public static <T> Typed<T> wrap(final Class<T> type) {
return wrap((Type) type);
}
|
Wraps the specified {@link Class} in a {@link Typed} wrapper.
@param <T> generic type.
@param type to wrap.
@return {@code Typed<T>}.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 1,731
|
[
"type"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
urlDecode
|
public static String urlDecode(String value) {
return URLDecodeProcessor.apply(value);
}
|
Uses {@link URLDecodeProcessor} to URL-decode a string.
@param value string to decode
@return URL-decoded value
|
java
|
modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Processors.java
| 112
|
[
"value"
] |
String
| true
| 1
| 6.16
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
wrap
|
@SuppressWarnings("rawtypes")
public static Object wrap(Object o) {
if (o == null) {
return NULL;
}
if (o instanceof JSONArray || o instanceof JSONObject) {
return o;
}
if (o.equals(NULL)) {
return o;
}
try {
if (o instanceof Collection) {
return new JSONArray((Collection) o);
}
else if (o.getClass().isArray()) {
return new JSONArray(o);
}
if (o instanceof Map) {
return new JSONObject((Map) o);
}
if (o instanceof Boolean || o instanceof Byte || o instanceof Character || o instanceof Double
|| o instanceof Float || o instanceof Integer || o instanceof Long || o instanceof Short
|| o instanceof String) {
return o;
}
if (o.getClass().getPackage().getName().startsWith("java.")) {
return o.toString();
}
}
catch (Exception ex) {
// Ignore
}
return null;
}
|
Wraps the given object if necessary.
<p>
If the object is null or, returns {@link #NULL}. If the object is a
{@code JSONArray} or {@code JSONObject}, no wrapping is necessary. If the object is
{@code NULL}, no wrapping is necessary. If the object is an array or
{@code Collection}, returns an equivalent {@code JSONArray}. If the object is a
{@code Map}, returns an equivalent {@code JSONObject}. If the object is a primitive
wrapper type or {@code String}, returns the object. Otherwise if the object is from
a {@code java} package, returns the result of {@code toString}. If wrapping fails,
returns null.
@param o the object to wrap
@return the wrapped object
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 800
|
[
"o"
] |
Object
| true
| 19
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getReferencesForSuperKeyword
|
function getReferencesForSuperKeyword(superKeyword: Node): SymbolAndEntries[] | undefined {
let searchSpaceNode: SuperContainer | ClassLikeDeclaration | TypeLiteralNode | InterfaceDeclaration | ObjectLiteralExpression | undefined = getSuperContainer(superKeyword, /*stopOnFunctions*/ false);
if (!searchSpaceNode) {
return undefined;
}
// Whether 'super' occurs in a static context within a class.
let staticFlag = ModifierFlags.Static;
switch (searchSpaceNode.kind) {
case SyntaxKind.PropertyDeclaration:
case SyntaxKind.PropertySignature:
case SyntaxKind.MethodDeclaration:
case SyntaxKind.MethodSignature:
case SyntaxKind.Constructor:
case SyntaxKind.GetAccessor:
case SyntaxKind.SetAccessor:
staticFlag &= getSyntacticModifierFlags(searchSpaceNode);
searchSpaceNode = searchSpaceNode.parent; // re-assign to be the owning class
break;
default:
return undefined;
}
const sourceFile = searchSpaceNode.getSourceFile();
const references = mapDefined(getPossibleSymbolReferenceNodes(sourceFile, "super", searchSpaceNode), node => {
if (node.kind !== SyntaxKind.SuperKeyword) {
return;
}
const container = getSuperContainer(node, /*stopOnFunctions*/ false);
// If we have a 'super' container, we must have an enclosing class.
// Now make sure the owning class is the same as the search-space
// and has the same static qualifier as the original 'super's owner.
return container && isStatic(container) === !!staticFlag && container.parent.symbol === searchSpaceNode.symbol ? nodeEntry(node) : undefined;
});
return [{ definition: { type: DefinitionKind.Symbol, symbol: searchSpaceNode.symbol }, references }];
}
|
Determines if the parent symbol occurs somewhere in the child's ancestry. If the parent symbol
is an interface, determines if some ancestor of the child symbol extends or inherits from it.
Also takes in a cache of previous results which makes this slightly more efficient and is
necessary to avoid potential loops like so:
class A extends B { }
class B extends A { }
We traverse the AST rather than using the type checker because users are typically only interested
in explicit implementations of an interface/class when calling "Go to Implementation". Sibling
implementations of types that share a common ancestor with the type whose implementation we are
searching for need to be filtered out of the results. The type checker doesn't let us make the
distinction between structurally compatible implementations and explicit implementations, so we
must use the AST.
@param symbol A class or interface Symbol
@param parent Another class or interface Symbol
@param cachedResults A map of symbol id pairs (i.e. "child,parent") to booleans indicating previous results
|
typescript
|
src/services/findAllReferences.ts
| 2,362
|
[
"superKeyword"
] | true
| 6
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
dtypes
|
def dtypes(self, *, device=None, kind=None):
"""
The array API data types supported by CuPy.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : str, optional
The device to get the data types for.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
CuPy data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': cupy.int8,
'int16': cupy.int16,
'int32': cupy.int32,
'int64': cupy.int64}
"""
# TODO: Does this depend on device?
if kind is None:
return {
"bool": dtype(bool),
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "bool":
return {"bool": bool}
if kind == "signed integer":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
}
if kind == "unsigned integer":
return {
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "integral":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "real floating":
return {
"float32": dtype(float32),
"float64": dtype(float64),
}
if kind == "complex floating":
return {
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "numeric":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if isinstance(kind, tuple):
res = {}
for k in kind:
res.update(self.dtypes(kind=k))
return res
raise ValueError(f"unsupported kind: {kind!r}")
|
The array API data types supported by CuPy.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : str, optional
The device to get the data types for.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
CuPy data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': cupy.int8,
'int16': cupy.int16,
'int32': cupy.int32,
'int64': cupy.int64}
|
python
|
sklearn/externals/array_api_compat/cupy/_info.py
| 189
|
[
"self",
"device",
"kind"
] | false
| 11
| 6.96
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
tolist
|
def tolist(self) -> list:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
list
List containing the values as Python or pandas scalers.
See Also
--------
numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
nested list of Python scalars.
Examples
--------
For Series
>>> s = pd.Series([1, 2, 3])
>>> s.to_list()
[1, 2, 3]
For Index:
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.to_list()
[1, 2, 3]
"""
return self._values.tolist()
|
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
list
List containing the values as Python or pandas scalers.
See Also
--------
numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
nested list of Python scalars.
Examples
--------
For Series
>>> s = pd.Series([1, 2, 3])
>>> s.to_list()
[1, 2, 3]
For Index:
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.to_list()
[1, 2, 3]
|
python
|
pandas/core/base.py
| 843
|
[
"self"
] |
list
| true
| 1
| 6.08
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
objectText
|
@Override
public Object objectText() throws IOException {
JsonToken currentToken = parser.getCurrentToken();
if (currentToken == JsonToken.VALUE_STRING) {
return text();
} else if (currentToken == JsonToken.VALUE_NUMBER_INT || currentToken == JsonToken.VALUE_NUMBER_FLOAT) {
return parser.getNumberValue();
} else if (currentToken == JsonToken.VALUE_TRUE) {
return Boolean.TRUE;
} else if (currentToken == JsonToken.VALUE_FALSE) {
return Boolean.FALSE;
} else if (currentToken == JsonToken.VALUE_NULL) {
return null;
} else {
return text();
}
}
|
Handle parser exception depending on type.
This converts known exceptions to XContentParseException and rethrows them.
|
java
|
libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java
| 177
|
[] |
Object
| true
| 7
| 6.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
initializeHeapSnapshotSignalHandlers
|
function initializeHeapSnapshotSignalHandlers() {
const signal = getOptionValue('--heapsnapshot-signal');
const diagnosticDir = getOptionValue('--diagnostic-dir');
if (!signal)
return;
require('internal/validators').validateSignalName(signal);
const { writeHeapSnapshot } = require('v8');
function doWriteHeapSnapshot() {
const heapSnapshotFilename = getHeapSnapshotFilename(diagnosticDir);
writeHeapSnapshot(heapSnapshotFilename);
}
process.on(signal, doWriteHeapSnapshot);
// The code above would add the listener back during deserialization,
// if applicable.
if (isBuildingSnapshot()) {
addSerializeCallback(() => {
process.removeListener(signal, doWriteHeapSnapshot);
});
}
}
|
Patch the process object with legacy properties and normalizations.
Replace `process.argv[0]` with `process.execPath`, preserving the original `argv[0]` value as `process.argv0`.
Replace `process.argv[1]` with the resolved absolute file path of the entry point, if found.
@param {boolean} expandArgv1 - Whether to replace `process.argv[1]` with the resolved absolute file path of
the main entry point.
@returns {string}
|
javascript
|
lib/internal/process/pre_execution.js
| 459
|
[] | false
| 3
| 6.96
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
check_training_status_with_log
|
def check_training_status_with_log(
self,
job_name: str,
non_terminal_states: set,
failed_states: set,
wait_for_completion: bool,
check_interval: int,
max_ingestion_time: int | None = None,
):
"""
Display logs for a given training job.
Optionally tailing them until the job is complete.
:param job_name: name of the training job to check status and display logs for
:param non_terminal_states: the set of non_terminal states
:param failed_states: the set of failed states
:param wait_for_completion: Whether to keep looking for new log entries
until the job completes
:param check_interval: The interval in seconds between polling for new log entries and job completion
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
"""
sec = 0
description = self.describe_training_job(job_name)
self.log.info(secondary_training_status_message(description, None))
instance_count = description["ResourceConfig"]["InstanceCount"]
status = description["TrainingJobStatus"]
stream_names: list = [] # The list of log streams
positions: dict = {} # The current position in each stream, map of stream name -> position
job_already_completed = status not in non_terminal_states
state = LogState.TAILING if wait_for_completion and not job_already_completed else LogState.COMPLETE
# The loop below implements a state machine that alternates between checking the job status and
# reading whatever is available in the logs at this point. Note, that if we were called with
# wait_for_completion == False, we never check the job status.
#
# If wait_for_completion == TRUE and job is not completed, the initial state is TAILING
# If wait_for_completion == FALSE, the initial state is COMPLETE
# (doesn't matter if the job really is complete).
#
# The state table:
#
# STATE ACTIONS CONDITION NEW STATE
# ---------------- ---------------- ----------------- ----------------
# TAILING Read logs, Pause, Get status Job complete JOB_COMPLETE
# Else TAILING
# JOB_COMPLETE Read logs, Pause Any COMPLETE
# COMPLETE Read logs, Exit N/A
#
# Notes:
# - The JOB_COMPLETE state forces us to do an extra pause and read any items that
# got to Cloudwatch after the job was marked complete.
last_describe_job_call = time.monotonic()
last_description = description
while True:
time.sleep(check_interval)
sec += check_interval
state, last_description, last_describe_job_call = self.describe_training_job_with_log(
job_name,
positions,
stream_names,
instance_count,
state,
last_description,
last_describe_job_call,
)
if state == LogState.COMPLETE:
break
if max_ingestion_time and sec > max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
raise AirflowException(f"SageMaker job took more than {max_ingestion_time} seconds")
if wait_for_completion:
status = last_description["TrainingJobStatus"]
if status in failed_states:
reason = last_description.get("FailureReason", "(No reason provided)")
raise AirflowException(f"Error training {job_name}: {status} Reason: {reason}")
billable_seconds = SageMakerHook.count_billable_seconds(
training_start_time=last_description["TrainingStartTime"],
training_end_time=last_description["TrainingEndTime"],
instance_count=instance_count,
)
self.log.info("Billable seconds: %d", billable_seconds)
|
Display logs for a given training job.
Optionally tailing them until the job is complete.
:param job_name: name of the training job to check status and display logs for
:param non_terminal_states: the set of non_terminal states
:param failed_states: the set of failed states
:param wait_for_completion: Whether to keep looking for new log entries
until the job completes
:param check_interval: The interval in seconds between polling for new log entries and job completion
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
| 767
|
[
"self",
"job_name",
"non_terminal_states",
"failed_states",
"wait_for_completion",
"check_interval",
"max_ingestion_time"
] | true
| 9
| 6.8
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
getMessage
|
private static String getMessage(@Nullable ValidationErrors errors) {
StringBuilder message = new StringBuilder("Binding validation errors");
if (errors != null) {
message.append(" on ").append(errors.getName());
errors.getAllErrors().forEach((error) -> message.append(String.format("%n - %s", error)));
}
return message.toString();
}
|
Return the validation errors that caused the exception.
@return the validationErrors the validation errors
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/validation/BindValidationException.java
| 50
|
[
"errors"
] |
String
| true
| 2
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
formatDateTime
|
function formatDateTime(str: string, opt_values: string[]) {
if (opt_values) {
str = str.replace(/\{([^}]+)}/g, function (match, key) {
return opt_values != null && key in opt_values ? opt_values[key] : match;
});
}
return str;
}
|
Create a new Date object with the given date value, and the time set to midnight.
We cannot use `new Date(year, month, date)` because it maps years between 0 and 99 to 1900-1999.
See: https://github.com/angular/angular/issues/40377
Note that this function returns a Date object whose time is midnight in the current locale's
timezone. In the future we might want to change this to be midnight in UTC, but this would be a
considerable breaking change.
|
typescript
|
packages/common/src/i18n/format_date.ts
| 263
|
[
"str",
"opt_values"
] | false
| 4
| 6.24
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
inner
|
def inner(*args: _P.args, **kwargs: _P.kwargs) -> _R:
"""Call the original function and cache the result in both caches.
Args:
*args: Positional arguments to pass to the function.
**kwargs: Keyword arguments to pass to the function.
Returns:
The result of calling the original function.
"""
# Call the memory-cached version (which calls fn and caches in memory)
result = memory_record_fn(*args, **kwargs)
# Also store in disk cache
cache_key = self._make_key(custom_params_encoder, *args, **kwargs)
# Get the cache entry from memory cache
# We know it must be there since memory_record_fn just cached it
cached_hit = self._memoizer._cache.get(cache_key)
assert cached_hit, "Cache entry must exist in memory cache"
cache_entry = cast(CacheEntry, cached_hit.value)
# Store the full CacheEntry in disk cache for easier debugging
pickled_entry: bytes = pickle.dumps(cache_entry)
self._disk_cache.insert(cache_key, pickled_entry)
return result
|
Call the original function and cache the result in both caches.
Args:
*args: Positional arguments to pass to the function.
**kwargs: Keyword arguments to pass to the function.
Returns:
The result of calling the original function.
|
python
|
torch/_inductor/runtime/caching/interfaces.py
| 680
|
[] |
_R
| true
| 1
| 6.88
|
pytorch/pytorch
| 96,034
|
google
| false
|
resolveAndLoad
|
Map<ConfigDataResolutionResult, ConfigData> resolveAndLoad(@Nullable ConfigDataActivationContext activationContext,
ConfigDataLocationResolverContext locationResolverContext, ConfigDataLoaderContext loaderContext,
List<ConfigDataLocation> locations) {
try {
Profiles profiles = (activationContext != null) ? activationContext.getProfiles() : null;
List<ConfigDataResolutionResult> resolved = resolve(locationResolverContext, profiles, locations);
return load(loaderContext, resolved);
}
catch (IOException ex) {
throw new IllegalStateException("IO error on loading imports from " + locations, ex);
}
}
|
Resolve and load the given list of locations, filtering any that have been
previously loaded.
@param activationContext the activation context
@param locationResolverContext the location resolver context
@param loaderContext the loader context
@param locations the locations to resolve
@return a map of the loaded locations and data
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataImporter.java
| 82
|
[
"activationContext",
"locationResolverContext",
"loaderContext",
"locations"
] | true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
add_forward_offload_stream_ops
|
def add_forward_offload_stream_ops(graph: fx.Graph) -> None:
"""
Add stream operations for forward pass CPU offloading.
Pattern: record_event → fork → wait_event → record_stream → device_put → record_event_2 → join → wait_event_2
This ensures that:
1. Offloading waits for the last use to complete (record_event on default stream)
2. Offloading happens on a separate stream (fork → wait_event → device_put)
3. The tensor is marked as used in the offload stream (record_stream)
4. Execution returns to the default stream after offloading and
waits for offload to complete (record_event_2 → join → wait_event_2)
NOTE: For stream optimization and overlapping compute with communication,
the "wait_event_2" ops can be sinked to the end of the graph.
Args:
graph: The forward graph to modify
"""
# Find all CPU offload nodes
offload_nodes: list[fx.Node] = [
node
for node in graph.nodes
if CPU_OFFLOAD_PREFIX in node.name and node.op == "call_function"
]
if not offload_nodes:
return
# Get default stream id and offload stream id
current_stream_id: int = get_current_stream(
offload_nodes[0].args[0].meta["val"].device # type: ignore[assignment]
)
offload_stream_id: int = new_stream()
for offload_node in offload_nodes:
offload_ready_event_id: int = new_event()
offload_completion_event_id: int = new_event()
# Get the tensor being offloaded
tensor_node: fx.Node = offload_node.args[0] # type: ignore[assignment]
with graph.inserting_before(offload_node):
# Record event on default stream to ensure last use completes
graph.call_function(
torch.ops.streams.record_event.default,
args=(offload_ready_event_id, current_stream_id),
)
# Fork to offload stream
graph.call_function(
torch.ops.streams.fork.default,
args=(current_stream_id, offload_stream_id),
name=f"stream_in_{offload_node.name}",
)
# Wait for the event on offload stream
graph.call_function(
torch.ops.streams.wait_event.default,
args=(offload_ready_event_id, offload_stream_id),
)
# Inform the CUDA Caching Allocator that this tensor will be accessed in the
# offload stream. Without this, the program may prematurely free its memory
# even though the async offload operation is still in progress, and this can
# lead to memory corruption, especially with reordering for compute and
# communication overlaps.
graph.call_function(
torch.ops.streams.record_stream.default,
args=(tensor_node, offload_stream_id),
name=f"record_stream_{tensor_node.name}",
)
with graph.inserting_after(offload_node):
# Record event on offload stream after device_put completes
record_event_node = graph.call_function(
torch.ops.streams.record_event.default,
args=(offload_completion_event_id, offload_stream_id),
)
with graph.inserting_after(record_event_node):
# Join back to default stream
join_node = graph.call_function(
torch.ops.streams.join.default,
args=(offload_stream_id, current_stream_id),
name=f"stream_out_{offload_node.name}",
)
with graph.inserting_after(join_node):
# Wait for the offload to complete on default stream
graph.call_function(
torch.ops.streams.wait_event.default,
args=(offload_completion_event_id, current_stream_id),
)
|
Add stream operations for forward pass CPU offloading.
Pattern: record_event → fork → wait_event → record_stream → device_put → record_event_2 → join → wait_event_2
This ensures that:
1. Offloading waits for the last use to complete (record_event on default stream)
2. Offloading happens on a separate stream (fork → wait_event → device_put)
3. The tensor is marked as used in the offload stream (record_stream)
4. Execution returns to the default stream after offloading and
waits for offload to complete (record_event_2 → join → wait_event_2)
NOTE: For stream optimization and overlapping compute with communication,
the "wait_event_2" ops can be sinked to the end of the graph.
Args:
graph: The forward graph to modify
|
python
|
torch/_functorch/_activation_offloading/activation_offloading.py
| 344
|
[
"graph"
] |
None
| true
| 4
| 6.96
|
pytorch/pytorch
| 96,034
|
google
| false
|
_log_state
|
def _log_state(*, task_instance: TaskInstance, lead_msg: str = "") -> None:
"""
Log task state.
:param task_instance: the task instance
:param lead_msg: lead message
:meta private:
"""
params: list[str | int] = [
lead_msg,
str(task_instance.state).upper(),
task_instance.dag_id,
task_instance.task_id,
task_instance.run_id,
]
message = "%sMarking task as %s. dag_id=%s, task_id=%s, run_id=%s, "
if task_instance.map_index >= 0:
params.append(task_instance.map_index)
message += "map_index=%d, "
message += "logical_date=%s, start_date=%s, end_date=%s"
log.info(
message,
*params,
_date_or_empty(task_instance=task_instance, attr="logical_date"),
_date_or_empty(task_instance=task_instance, attr="start_date"),
_date_or_empty(task_instance=task_instance, attr="end_date"),
stacklevel=2,
)
|
Log task state.
:param task_instance: the task instance
:param lead_msg: lead message
:meta private:
|
python
|
airflow-core/src/airflow/models/taskinstance.py
| 316
|
[
"task_instance",
"lead_msg"
] |
None
| true
| 2
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
_cast_types
|
def _cast_types(self, values: ArrayLike, cast_type: DtypeObj, column) -> ArrayLike:
"""
Cast values to specified type
Parameters
----------
values : ndarray or ExtensionArray
cast_type : np.dtype or ExtensionDtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
-------
converted : ndarray or ExtensionArray
"""
if isinstance(cast_type, CategoricalDtype):
known_cats = cast_type.categories is not None
if not is_object_dtype(values.dtype) and not known_cats:
# TODO: this is for consistency with
# c-parser which parses all categories
# as strings
values = lib.ensure_string_array(
values, skipna=False, convert_na_value=False
)
cats = Index(values).unique().dropna()
values = Categorical._from_inferred_categories(
cats, cats.get_indexer(values), cast_type, true_values=self.true_values
)
# use the EA's implementation of casting
elif isinstance(cast_type, ExtensionDtype):
array_type = cast_type.construct_array_type()
try:
if isinstance(cast_type, BooleanDtype):
# error: Unexpected keyword argument "true_values" for
# "_from_sequence_of_strings" of "ExtensionArray"
values_str = [str(val) for val in values]
return array_type._from_sequence_of_strings( # type: ignore[call-arg]
values_str,
dtype=cast_type,
true_values=self.true_values, # pyright: ignore[reportCallIssue]
false_values=self.false_values, # pyright: ignore[reportCallIssue]
none_values=self.na_values, # pyright: ignore[reportCallIssue]
)
else:
return array_type._from_sequence_of_strings(values, dtype=cast_type)
except NotImplementedError as err:
raise NotImplementedError(
f"Extension Array: {array_type} must implement "
"_from_sequence_of_strings in order to be used in parser methods"
) from err
elif isinstance(values, ExtensionArray):
values = values.astype(cast_type, copy=False)
elif issubclass(cast_type.type, str):
# TODO: why skipna=True here and False above? some tests depend
# on it here, but nothing fails if we change it above
# (as no tests get there as of 2022-12-06)
values = lib.ensure_string_array(
values, skipna=True, convert_na_value=False
)
else:
try:
values = astype_array(values, cast_type, copy=True)
except ValueError as err:
raise ValueError(
f"Unable to convert column {column} to type {cast_type}"
) from err
return values
|
Cast values to specified type
Parameters
----------
values : ndarray or ExtensionArray
cast_type : np.dtype or ExtensionDtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
-------
converted : ndarray or ExtensionArray
|
python
|
pandas/io/parsers/python_parser.py
| 485
|
[
"self",
"values",
"cast_type",
"column"
] |
ArrayLike
| true
| 10
| 6.16
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
openChannel
|
private static FileChannel openChannel(File file,
boolean mutable,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate) throws IOException {
if (mutable) {
if (fileAlreadyExists || !preallocate) {
return FileChannel.open(file.toPath(), StandardOpenOption.CREATE, StandardOpenOption.READ,
StandardOpenOption.WRITE);
} else {
RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw");
randomAccessFile.setLength(initFileSize);
return randomAccessFile.getChannel();
}
} else {
return FileChannel.open(file.toPath());
}
}
|
Open a channel for the given file
For windows NTFS and some old LINUX file system, set preallocate to true and initFileSize
with one value (for example 512 * 1025 *1024 ) can improve the kafka produce performance.
@param file File path
@param mutable mutable
@param fileAlreadyExists File already exists or not
@param initFileSize The size used for pre allocate file, for example 512 * 1025 *1024
@param preallocate Pre-allocate file or not, gotten from configuration.
|
java
|
clients/src/main/java/org/apache/kafka/common/record/FileRecords.java
| 479
|
[
"file",
"mutable",
"fileAlreadyExists",
"initFileSize",
"preallocate"
] |
FileChannel
| true
| 4
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
maybeBeginServerReauthentication
|
public boolean maybeBeginServerReauthentication(NetworkReceive saslHandshakeNetworkReceive,
Supplier<Long> nowNanosSupplier) throws AuthenticationException, IOException {
if (!ready())
throw new IllegalStateException(
"KafkaChannel should be \"ready\" when processing SASL Handshake for potential re-authentication");
/*
* Re-authentication is disabled if there is no session expiration time, in
* which case the SASL handshake network receive will be processed normally,
* which results in a failure result being sent to the client. Also, no need to
* check if we are muted since we are processing a received packet when we invoke
* this.
*/
if (authenticator.serverSessionExpirationTimeNanos() == null)
return false;
/*
* We've delayed getting the time as long as possible in case we don't need it,
* but at this point we need it -- so get it now.
*/
long nowNanos = nowNanosSupplier.get();
/*
* Cannot re-authenticate more than once every second; an attempt to do so will
* result in the SASL handshake network receive being processed normally, which
* results in a failure result being sent to the client.
*/
if (lastReauthenticationStartNanos != 0
&& nowNanos - lastReauthenticationStartNanos < MIN_REAUTH_INTERVAL_ONE_SECOND_NANOS)
return false;
lastReauthenticationStartNanos = nowNanos;
swapAuthenticatorsAndBeginReauthentication(
new ReauthenticationContext(authenticator, saslHandshakeNetworkReceive, nowNanos));
return true;
}
|
If this is a server-side connection that has an expiration time and at least
1 second has passed since the prior re-authentication (if any) started then
begin the process of re-authenticating the connection and return true,
otherwise return false
@param saslHandshakeNetworkReceive
the mandatory {@link NetworkReceive} containing the
{@code SaslHandshakeRequest} that has been received on the server
and that initiates re-authentication.
@param nowNanosSupplier
{@code Supplier} of the current time. The value must be in
nanoseconds as per {@code System.nanoTime()} and is therefore only
useful when compared to such a value -- it's absolute value is
meaningless.
@return true if this is a server-side connection that has an expiration time
and at least 1 second has passed since the prior re-authentication
(if any) started to indicate that the re-authentication process has
begun, otherwise false
@throws AuthenticationException
if re-authentication fails due to invalid credentials or other
security configuration errors
@throws IOException
if read/write fails due to an I/O error
@throws IllegalStateException
if this channel is not "ready"
|
java
|
clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java
| 540
|
[
"saslHandshakeNetworkReceive",
"nowNanosSupplier"
] | true
| 5
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_autotune_local_nodes
|
def _autotune_local_nodes(
scheduler: torch._inductor.scheduler.Scheduler,
) -> list[_SerializedChoice]:
"""
Go through the nodes in the scheduler and autotune the kernels which
should be autotuned by this rank.
"""
autotune_results: list[_SerializedChoice] = []
for node in scheduler.nodes:
if not isinstance(node, SchedulerNode):
continue
if (inner_node := node.node) is None:
continue
if isinstance(inner_node, _DistributedAutotuneBuffer):
# This is marked for remote autotuning.
continue
if not isinstance(inner_node, MultiTemplateBuffer):
continue
if (origin_node := inner_node.origin_node) is None:
continue
if (meta := origin_node.meta) is None:
continue
info = meta.get(_DISTRIBUTED_AUTOTUNE_KEY)
if info is None:
continue
assert info.local
# We force autotuning here
# Still takes advantage of async precompile
# We need all the configs before fusion
min_choice, _ = inner_node.get_min_choice()
choice = _SerializedChoice(info.index, min_choice)
autotune_results.append(choice)
state = V.distributed_autotune_state
assert len(autotune_results) == state.autotuned_local_count, (
f"incorrect local autotuned nodes found ({len(autotune_results)} != {state.autotuned_local_count})"
)
return autotune_results
|
Go through the nodes in the scheduler and autotune the kernels which
should be autotuned by this rank.
|
python
|
torch/_inductor/distributed_autotune.py
| 310
|
[
"scheduler"
] |
list[_SerializedChoice]
| true
| 9
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
newLinkedHashMapWithExpectedSize
|
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call
public static <K extends @Nullable Object, V extends @Nullable Object>
LinkedHashMap<K, V> newLinkedHashMapWithExpectedSize(int expectedSize) {
return new LinkedHashMap<>(capacity(expectedSize));
}
|
Creates a {@code LinkedHashMap} instance, with a high enough "initial capacity" that it
<i>should</i> hold {@code expectedSize} elements without growth. This behavior cannot be
broadly guaranteed, but it is observed to be true for OpenJDK 1.7. It also can't be guaranteed
that the method isn't inadvertently <i>oversizing</i> the returned map.
@param expectedSize the number of entries you expect to add to the returned map
@return a new, empty {@code LinkedHashMap} with enough capacity to hold {@code expectedSize}
entries without resizing
@throws IllegalArgumentException if {@code expectedSize} is negative
@since 19.0
|
java
|
android/guava/src/com/google/common/collect/Maps.java
| 331
|
[
"expectedSize"
] | true
| 1
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
quantile
|
def quantile(
self,
q: float,
interpolation: QuantileInterpolation = "linear",
numeric_only: bool = False,
):
"""
Calculate the expanding quantile.
Parameters
----------
q : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.quantile : Aggregating quantile for Series.
DataFrame.quantile : Aggregating quantile for DataFrame.
Examples
--------
>>> ser = pd.Series([1, 2, 3, 4, 5, 6], index=["a", "b", "c", "d", "e", "f"])
>>> ser.expanding(min_periods=4).quantile(0.25)
a NaN
b NaN
c NaN
d 1.75
e 2.00
f 2.25
dtype: float64
"""
return super().quantile(
q=q,
interpolation=interpolation,
numeric_only=numeric_only,
)
|
Calculate the expanding quantile.
Parameters
----------
q : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.quantile : Aggregating quantile for Series.
DataFrame.quantile : Aggregating quantile for DataFrame.
Examples
--------
>>> ser = pd.Series([1, 2, 3, 4, 5, 6], index=["a", "b", "c", "d", "e", "f"])
>>> ser.expanding(min_periods=4).quantile(0.25)
a NaN
b NaN
c NaN
d 1.75
e 2.00
f 2.25
dtype: float64
|
python
|
pandas/core/window/expanding.py
| 1,067
|
[
"self",
"q",
"interpolation",
"numeric_only"
] | true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
toHexCharArray
|
static char[] toHexCharArray(byte[] bytes) {
Objects.requireNonNull(bytes);
final char[] result = new char[2 * bytes.length];
for (int i = 0; i < bytes.length; i++) {
byte b = bytes[i];
result[2 * i] = HEX_DIGITS[b >> 4 & 0xf];
result[2 * i + 1] = HEX_DIGITS[b & 0xf];
}
return result;
}
|
Encodes the byte array into a newly created hex char array, without allocating any other temporary variables.
@param bytes the input to be encoded as hex.
@return the hex encoding of the input as a char array.
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslUtil.java
| 56
|
[
"bytes"
] | true
| 2
| 8.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.