function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
serializeTypeReferenceNode
|
function serializeTypeReferenceNode(node: TypeReferenceNode): SerializedTypeNode {
const kind = resolver.getTypeReferenceSerializationKind(node.typeName, currentNameScope ?? currentLexicalScope);
switch (kind) {
case TypeReferenceSerializationKind.Unknown:
// From conditional type type reference that cannot be resolved is Similar to any or unknown
if (findAncestor(node, n => n.parent && isConditionalTypeNode(n.parent) && (n.parent.trueType === n || n.parent.falseType === n))) {
return factory.createIdentifier("Object");
}
const serialized = serializeEntityNameAsExpressionFallback(node.typeName);
const temp = factory.createTempVariable(hoistVariableDeclaration);
return factory.createConditionalExpression(
factory.createTypeCheck(factory.createAssignment(temp, serialized), "function"),
/*questionToken*/ undefined,
temp,
/*colonToken*/ undefined,
factory.createIdentifier("Object"),
);
case TypeReferenceSerializationKind.TypeWithConstructSignatureAndValue:
return serializeEntityNameAsExpression(node.typeName);
case TypeReferenceSerializationKind.VoidNullableOrNeverType:
return factory.createVoidZero();
case TypeReferenceSerializationKind.BigIntLikeType:
return getGlobalConstructor("BigInt", ScriptTarget.ES2020);
case TypeReferenceSerializationKind.BooleanType:
return factory.createIdentifier("Boolean");
case TypeReferenceSerializationKind.NumberLikeType:
return factory.createIdentifier("Number");
case TypeReferenceSerializationKind.StringLikeType:
return factory.createIdentifier("String");
case TypeReferenceSerializationKind.ArrayLikeType:
return factory.createIdentifier("Array");
case TypeReferenceSerializationKind.ESSymbolType:
return getGlobalConstructor("Symbol", ScriptTarget.ES2015);
case TypeReferenceSerializationKind.TypeWithCallSignature:
return factory.createIdentifier("Function");
case TypeReferenceSerializationKind.Promise:
return factory.createIdentifier("Promise");
case TypeReferenceSerializationKind.ObjectType:
return factory.createIdentifier("Object");
default:
return Debug.assertNever(kind);
}
}
|
Serializes a TypeReferenceNode to an appropriate JS constructor value for use with decorator type metadata.
@param node The type reference node.
|
typescript
|
src/compiler/transformers/typeSerializer.ts
| 490
|
[
"node"
] | true
| 5
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
_replace_locals
|
def _replace_locals(tok: tuple[int, str]) -> tuple[int, str]:
"""
Replace local variables with a syntactically valid name.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
tuple of int, str
Either the input or token or the replacement values
Notes
-----
This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as
``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_``
is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it.
"""
toknum, tokval = tok
if toknum == tokenize.OP and tokval == "@":
return tokenize.OP, LOCAL_TAG
return toknum, tokval
|
Replace local variables with a syntactically valid name.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
tuple of int, str
Either the input or token or the replacement values
Notes
-----
This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as
``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_``
is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it.
|
python
|
pandas/core/computation/expr.py
| 99
|
[
"tok"
] |
tuple[int, str]
| true
| 3
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
from
|
public static Object from(BeanFactory beanFactory, String beanName, Class<?> beanType) {
ConfigurationPropertiesBean bean = ConfigurationPropertiesBean.forValueObject(beanType, beanName);
ConfigurationPropertiesBinder binder = ConfigurationPropertiesBinder.get(beanFactory);
try {
return binder.bindOrCreate(bean);
}
catch (Exception ex) {
throw new ConfigurationPropertiesBindException(bean, ex);
}
}
|
Create an immutable {@link ConfigurationProperties} instance for the specified
{@code beanName} and {@code beanType} using the specified {@link BeanFactory}.
@param beanFactory the bean factory to use
@param beanName the name of the bean
@param beanType the type of the bean
@return an instance from the specified bean
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/ConstructorBound.java
| 40
|
[
"beanFactory",
"beanName",
"beanType"
] |
Object
| true
| 2
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
visitAwaitExpression
|
function visitAwaitExpression(node: AwaitExpression): Expression {
if (enclosingFunctionFlags & FunctionFlags.Async && enclosingFunctionFlags & FunctionFlags.Generator) {
return setOriginalNode(
setTextRange(
factory.createYieldExpression(/*asteriskToken*/ undefined, emitHelpers().createAwaitHelper(visitNode(node.expression, visitor, isExpression))),
/*location*/ node,
),
node,
);
}
return visitEachChild(node, visitor, context);
}
|
@param expressionResultIsUnused Indicates the result of an expression is unused by the parent node (i.e., the left side of a comma or the
expression of an `ExpressionStatement`).
|
typescript
|
src/compiler/transformers/es2018.ts
| 395
|
[
"node"
] | true
| 3
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
enterIfInterruptibly
|
public boolean enterIfInterruptibly(Guard guard) throws InterruptedException {
if (guard.monitor != this) {
throw new IllegalMonitorStateException();
}
ReentrantLock lock = this.lock;
lock.lockInterruptibly();
boolean satisfied = false;
try {
return satisfied = guard.isSatisfied();
} finally {
if (!satisfied) {
lock.unlock();
}
}
}
|
Enters this monitor if the guard is satisfied. Blocks indefinitely acquiring the lock, but does
not wait for the guard to be satisfied, and may be interrupted.
@return whether the monitor was entered, which guarantees that the guard is now satisfied
@throws InterruptedException if interrupted while waiting
|
java
|
android/guava/src/com/google/common/util/concurrent/Monitor.java
| 742
|
[
"guard"
] | true
| 3
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
collapseOverlappingBuckets
|
public ZeroBucket collapseOverlappingBuckets(BucketIterator buckets) {
long collapsedCount = 0;
long highestCollapsedIndex = 0;
while (buckets.hasNext() && compareExponentiallyScaledValues(buckets.peekIndex(), buckets.scale(), index(), scale()) < 0) {
highestCollapsedIndex = buckets.peekIndex();
collapsedCount += buckets.peekCount();
buckets.advance();
}
if (collapsedCount == 0) {
return this;
} else {
long newZeroCount = count + collapsedCount;
// +1 because we need to adjust the zero threshold to the upper boundary of the collapsed bucket
long collapsedUpperBoundIndex = highestCollapsedIndex + 1;
if (compareExponentiallyScaledValues(index(), scale(), collapsedUpperBoundIndex, buckets.scale()) >= 0) {
// Our current zero-threshold is larger than the upper boundary of the largest collapsed bucket, so we keep it.
return new ZeroBucket(this, newZeroCount);
} else {
return new ZeroBucket(collapsedUpperBoundIndex, buckets.scale(), newZeroCount);
}
}
}
|
Collapses all buckets from the given iterator whose lower boundaries are smaller than the zero threshold.
The iterator is advanced to point at the first, non-collapsed bucket.
@param buckets The iterator whose buckets may be collapsed.
@return A potentially updated {@link ZeroBucket} with the collapsed buckets' counts and an adjusted threshold.
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ZeroBucket.java
| 245
|
[
"buckets"
] |
ZeroBucket
| true
| 5
| 8.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
as_json_table_type
|
def as_json_table_type(x: DtypeObj) -> str:
"""
Convert a NumPy / pandas type to its corresponding json_table.
Parameters
----------
x : np.dtype or ExtensionDtype
Returns
-------
str
the Table Schema data types
Notes
-----
This table shows the relationship between NumPy / pandas dtypes,
and Table Schema dtypes.
============== =================
Pandas type Table Schema type
============== =================
int64 integer
float64 number
bool boolean
datetime64[ns] datetime
timedelta64[ns] duration
object str
categorical any
=============== =================
"""
if is_integer_dtype(x):
return "integer"
elif is_bool_dtype(x):
return "boolean"
elif is_numeric_dtype(x):
return "number"
elif lib.is_np_dtype(x, "M") or isinstance(x, (DatetimeTZDtype, PeriodDtype)):
return "datetime"
elif lib.is_np_dtype(x, "m"):
return "duration"
elif is_string_dtype(x):
return "string"
else:
return "any"
|
Convert a NumPy / pandas type to its corresponding json_table.
Parameters
----------
x : np.dtype or ExtensionDtype
Returns
-------
str
the Table Schema data types
Notes
-----
This table shows the relationship between NumPy / pandas dtypes,
and Table Schema dtypes.
============== =================
Pandas type Table Schema type
============== =================
int64 integer
float64 number
bool boolean
datetime64[ns] datetime
timedelta64[ns] duration
object str
categorical any
=============== =================
|
python
|
pandas/io/json/_table_schema.py
| 55
|
[
"x"
] |
str
| true
| 9
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
withGenerator
|
public BeanInstanceSupplier<T> withGenerator(ThrowingBiFunction<RegisteredBean, AutowiredArguments, T> generator) {
Assert.notNull(generator, "'generator' must not be null");
return new BeanInstanceSupplier<>(this.lookup, null, generator, this.shortcutBeanNames);
}
|
Return a new {@link BeanInstanceSupplier} instance that uses the specified
{@code generator} bi-function to instantiate the underlying bean.
@param generator a {@link ThrowingBiFunction} that uses the
{@link RegisteredBean} and resolved {@link AutowiredArguments} to
instantiate the underlying bean
@return a new {@link BeanInstanceSupplier} instance with the specified generator
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanInstanceSupplier.java
| 156
|
[
"generator"
] | true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
onFiber
|
inline bool onFiber() {
auto fm = FiberManager::getFiberManagerUnsafe();
return fm ? fm->hasActiveFiber() : false;
}
|
@return true iff we are running in a fiber's context
|
cpp
|
folly/fibers/FiberManagerInternal.h
| 634
|
[] | true
| 2
| 7.36
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
format
|
@Deprecated
@Override
public StringBuffer format(final long millis, final StringBuffer buf) {
return printer.format(millis, buf);
}
|
Formats a millisecond {@code long} value into the supplied {@link StringBuffer}.
@param millis the millisecond value to format.
@param buf the buffer to format into.
@return the specified string buffer.
@since 2.1
@deprecated Use {{@link #format(long, Appendable)}.
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDateFormat.java
| 510
|
[
"millis",
"buf"
] |
StringBuffer
| true
| 1
| 6.4
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
all
|
public KafkaFuture<Void> all() {
final KafkaFutureImpl<Void> result = new KafkaFutureImpl<>();
partitions().whenComplete(
(topicPartitions, throwable) -> {
if (throwable != null) {
result.completeExceptionally(throwable);
} else {
for (Optional<Throwable> exception : topicPartitions.values()) {
if (exception.isPresent()) {
result.completeExceptionally(exception.get());
return;
}
}
result.complete(null);
}
});
return result;
}
|
Return a future which succeeds if all the topic elections succeed.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ElectLeadersResult.java
| 54
|
[] | true
| 3
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
format_html_join
|
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(
conditional_escape(sep).join(
(
format_html(format_string, **args)
if isinstance(args, Mapping)
else format_html(format_string, *args)
)
for args in args_generator
)
)
|
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
|
python
|
django/utils/html.py
| 148
|
[
"sep",
"format_string",
"args_generator"
] | false
| 2
| 7.68
|
django/django
| 86,204
|
unknown
| false
|
|
k
|
public abstract double k(double q, double normalizer);
|
Converts a quantile to the k-scale. The normalizer value depends on compression and (possibly) number of points
in the digest. #normalizer(double, double)
@param q The quantile
@param normalizer The normalizer value which depends on compression and (possibly) number of points in the
digest.
@return The corresponding value of k
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/ScaleFunction.java
| 508
|
[
"q",
"normalizer"
] | true
| 1
| 6.48
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
maybeCloseFetchSessions
|
protected void maybeCloseFetchSessions(final Timer timer) {
final List<RequestFuture<ClientResponse>> requestFutures = sendFetchesInternal(
prepareCloseFetchSessionRequests(),
this::handleCloseFetchSessionSuccess,
this::handleCloseFetchSessionFailure
);
// Poll to ensure that request has been written to the socket. Wait until either the timer has expired or until
// all requests have received a response.
while (timer.notExpired() && !requestFutures.stream().allMatch(RequestFuture::isDone)) {
client.poll(timer, null, true);
timer.update();
}
if (!requestFutures.stream().allMatch(RequestFuture::isDone)) {
// we ran out of time before completing all futures. It is ok since we don't want to block the shutdown
// here.
log.debug("All requests couldn't be sent in the specific timeout period {}ms. " +
"This may result in unnecessary fetch sessions at the broker. Consider increasing the timeout passed for " +
"KafkaConsumer.close(...)", timer.timeoutMs());
}
}
|
Set up a fetch request for any node that we have assigned partitions for which doesn't already have
an in-flight fetch or pending fetch data.
@return number of fetches sent
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java
| 122
|
[
"timer"
] |
void
| true
| 4
| 7.2
|
apache/kafka
| 31,560
|
javadoc
| false
|
DialogContent
|
function DialogContent({
unsupportedBridgeProtocol,
}: {
unsupportedBridgeProtocol: BridgeProtocol,
}) {
const {version, minNpmVersion, maxNpmVersion} = unsupportedBridgeProtocol;
let instructions;
if (maxNpmVersion === null) {
const upgradeInstructions = `npm i -g react-devtools@^${minNpmVersion}`;
instructions = (
<>
<p className={styles.Paragraph}>
To fix this, upgrade the DevTools NPM package:
</p>
<pre className={styles.NpmCommand}>
{upgradeInstructions}
<Button
onClick={withPermissionsCheck(
{permissions: ['clipboardWrite']},
() => copy(upgradeInstructions),
)}
title="Copy upgrade command to clipboard">
<ButtonIcon type="copy" />
</Button>
</pre>
</>
);
} else {
const downgradeInstructions = `npm i -g react-devtools@${maxNpmVersion}`;
instructions = (
<>
<p className={styles.Paragraph}>
To fix this, downgrade the DevTools NPM package:
</p>
<pre className={styles.NpmCommand}>
{downgradeInstructions}
<Button
onClick={withPermissionsCheck(
{permissions: ['clipboardWrite']},
() => copy(downgradeInstructions),
)}
title="Copy downgrade command to clipboard">
<ButtonIcon type="copy" />
</Button>
</pre>
</>
);
}
return (
<Fragment>
<div className={styles.Column}>
<div className={styles.Title}>Unsupported DevTools backend version</div>
<p className={styles.Paragraph}>
You are running <code>react-devtools</code> version{' '}
<span className={styles.Version}>{DEVTOOLS_VERSION}</span>.
</p>
<p className={styles.Paragraph}>
This requires bridge protocol{' '}
<span className={styles.Version}>
version {currentBridgeProtocol.version}
</span>
. However the current backend version uses bridge protocol{' '}
<span className={styles.Version}>version {version}</span>.
</p>
{instructions}
<p className={styles.Paragraph}>
Or{' '}
<a className={styles.Link} href={INSTRUCTIONS_FB_URL} target="_blank">
click here
</a>{' '}
for more information.
</p>
</div>
</Fragment>
);
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-devtools-shared/src/devtools/views/UnsupportedBridgeProtocolDialog.js
| 68
|
[] | false
| 3
| 6.16
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
saturatedPow
|
@SuppressWarnings("ShortCircuitBoolean")
public static int saturatedPow(int b, int k) {
checkNonNegative("exponent", k);
switch (b) {
case 0:
return (k == 0) ? 1 : 0;
case 1:
return 1;
case -1:
return ((k & 1) == 0) ? 1 : -1;
case 2:
if (k >= Integer.SIZE - 1) {
return Integer.MAX_VALUE;
}
return 1 << k;
case -2:
if (k >= Integer.SIZE) {
return Integer.MAX_VALUE + (k & 1);
}
return ((k & 1) == 0) ? 1 << k : -1 << k;
default:
// continue below to handle the general case
}
int accum = 1;
// if b is negative and k is odd then the limit is MIN otherwise the limit is MAX
int limit = Integer.MAX_VALUE + ((b >>> (Integer.SIZE - 1)) & (k & 1));
while (true) {
switch (k) {
case 0:
return accum;
case 1:
return saturatedMultiply(accum, b);
default:
if ((k & 1) != 0) {
accum = saturatedMultiply(accum, b);
}
k >>= 1;
if (k > 0) {
if (-FLOOR_SQRT_MAX_INT > b | b > FLOOR_SQRT_MAX_INT) {
return limit;
}
b *= b;
}
}
}
}
|
Returns the {@code b} to the {@code k}th power, unless it would overflow or underflow in which
case {@code Integer.MAX_VALUE} or {@code Integer.MIN_VALUE} is returned, respectively.
@since 20.0
|
java
|
android/guava/src/com/google/common/math/IntMath.java
| 571
|
[
"b",
"k"
] | true
| 10
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
coerceDate
|
private static void coerceDate(Properties properties, String key) {
String value = properties.getProperty(key);
if (value != null) {
try {
String updatedValue = String
.valueOf(DateTimeFormatter.ISO_INSTANT.parse(value, Instant::from).toEpochMilli());
properties.setProperty(key, updatedValue);
}
catch (DateTimeException ex) {
// Ignore and store the original value
}
}
}
|
Return the timestamp of the build or {@code null}.
<p>
If the original value could not be parsed properly, it is still available with the
{@code time} key.
@return the build time
@see #get(String)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/info/BuildProperties.java
| 97
|
[
"properties",
"key"
] |
void
| true
| 3
| 7.2
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
opj_int_max
|
static INLINE OPJ_INT32 opj_int_max(OPJ_INT32 a, OPJ_INT32 b)
{
return (a > b) ? a : b;
}
|
Get the maximum of two integers
@return Returns a if a > b else b
|
cpp
|
3rdparty/openjpeg/openjp2/opj_intmath.h
| 74
|
[
"a",
"b"
] | true
| 2
| 6.48
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
getAndIncrement
|
public long getAndIncrement() {
final long last = value;
value++;
return last;
}
|
Increments this instance's value by 1; this method returns the value associated with the instance
immediately prior to the increment operation. This method is not thread safe.
@return the value associated with the instance before it was incremented.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableLong.java
| 247
|
[] | true
| 1
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
streamingIterator
|
CloseableIterator<Record> streamingIterator(BufferSupplier decompressionBufferSupplier);
|
Return a streaming iterator which basically delays decompression of the record stream until the records
are actually asked for using {@link Iterator#next()}. If the message format does not support streaming
iteration, then the normal iterator is returned. Either way, callers should ensure that the iterator is closed.
@param decompressionBufferSupplier The supplier of ByteBuffer(s) used for decompression if supported.
For small record batches, allocating a potentially large buffer (64 KB for LZ4)
will dominate the cost of decompressing and iterating over the records in the
batch. As such, a supplier that reuses buffers will have a significant
performance impact.
@return The closeable iterator
|
java
|
clients/src/main/java/org/apache/kafka/common/record/RecordBatch.java
| 240
|
[
"decompressionBufferSupplier"
] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
finishToValueAndCloser
|
public void finishToValueAndCloser(
ValueAndCloserConsumer<? super V> consumer, Executor executor) {
checkNotNull(consumer);
if (!compareAndUpdateState(OPEN, WILL_CREATE_VALUE_AND_CLOSER)) {
switch (state.get()) {
case SUBSUMED:
throw new IllegalStateException(
"Cannot call finishToValueAndCloser() after deriving another step");
case WILL_CLOSE:
case CLOSING:
case CLOSED:
throw new IllegalStateException(
"Cannot call finishToValueAndCloser() after calling finishToFuture()");
case WILL_CREATE_VALUE_AND_CLOSER:
throw new IllegalStateException("Cannot call finishToValueAndCloser() twice");
case OPEN:
break;
}
throw new AssertionError(state);
}
future.addListener(() -> provideValueAndCloser(consumer, ClosingFuture.this), executor);
}
|
Marks this step as the last step in the {@code ClosingFuture} pipeline. When this step is done,
{@code receiver} will be called with an object that contains the result of the operation. The
receiver can store the {@link ValueAndCloser} outside the receiver for later synchronous use.
<p>After calling this method, you may not call {@link #finishToFuture()}, this method again, or
any other derivation method on the original {@code ClosingFuture} instance.
@param consumer a callback whose method will be called (using {@code executor}) when this
operation is done
|
java
|
android/guava/src/com/google/common/util/concurrent/ClosingFuture.java
| 1,039
|
[
"consumer",
"executor"
] |
void
| true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
add
|
@Override
public void add(double x, long w) {
reserve(w);
if (mergingDigest != null) {
mergingDigest.add(x, w);
} else {
sortingDigest.add(x, w);
}
}
|
Similar to the constructor above. The limit for switching from a {@link SortingDigest} to a {@link MergingDigest} implementation
is calculated based on the passed compression factor.
@param compression The compression factor for the MergingDigest
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/HybridDigest.java
| 99
|
[
"x",
"w"
] |
void
| true
| 2
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
validate_metadata
|
def validate_metadata(self, *, method, params):
"""Validate given metadata for a method.
This raises a ``TypeError`` if some of the passed metadata are not
understood by child objects.
Parameters
----------
method : str
The name of the :term:`router`'s method through which the metadata is
routed. For example, if called inside the :term:`fit` method of a router,
this would be `"fit"`.
params : dict
A dictionary of provided metadata.
"""
param_names = self._get_param_names(
method=method, return_alias=False, ignore_self_request=False
)
if self._self_request:
self_params = self._self_request._get_param_names(
method=method, return_alias=False
)
else:
self_params = set()
extra_keys = set(params.keys()) - param_names - self_params
if extra_keys:
raise TypeError(
f"{_routing_repr(self.owner)}.{method} got unexpected argument(s)"
f" {extra_keys}, which are not routed to any object."
)
|
Validate given metadata for a method.
This raises a ``TypeError`` if some of the passed metadata are not
understood by child objects.
Parameters
----------
method : str
The name of the :term:`router`'s method through which the metadata is
routed. For example, if called inside the :term:`fit` method of a router,
this would be `"fit"`.
params : dict
A dictionary of provided metadata.
|
python
|
sklearn/utils/_metadata_requests.py
| 1,112
|
[
"self",
"method",
"params"
] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
get_connection_with_tls_context
|
def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None):
"""Returns a urllib3 connection for the given request and TLS settings.
This should not be called from user code, and is only exposed for use
when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request:
The :class:`PreparedRequest <PreparedRequest>` object to be sent
over the connection.
:param verify:
Either a boolean, in which case it controls whether we verify the
server's TLS certificate, or a string, in which case it must be a
path to a CA bundle to use.
:param proxies:
(optional) The proxies dictionary to apply to the request.
:param cert:
(optional) Any user-provided SSL certificate to be used for client
authentication (a.k.a., mTLS).
:rtype:
urllib3.ConnectionPool
"""
proxy = select_proxy(request.url, proxies)
try:
host_params, pool_kwargs = self.build_connection_pool_key_attributes(
request,
verify,
cert,
)
except ValueError as e:
raise InvalidURL(e, request=request)
if proxy:
proxy = prepend_scheme_if_needed(proxy, "http")
proxy_url = parse_url(proxy)
if not proxy_url.host:
raise InvalidProxyURL(
"Please check proxy URL. It is malformed "
"and could be missing the host."
)
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_host(
**host_params, pool_kwargs=pool_kwargs
)
else:
# Only scheme should be lower case
conn = self.poolmanager.connection_from_host(
**host_params, pool_kwargs=pool_kwargs
)
return conn
|
Returns a urllib3 connection for the given request and TLS settings.
This should not be called from user code, and is only exposed for use
when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request:
The :class:`PreparedRequest <PreparedRequest>` object to be sent
over the connection.
:param verify:
Either a boolean, in which case it controls whether we verify the
server's TLS certificate, or a string, in which case it must be a
path to a CA bundle to use.
:param proxies:
(optional) The proxies dictionary to apply to the request.
:param cert:
(optional) Any user-provided SSL certificate to be used for client
authentication (a.k.a., mTLS).
:rtype:
urllib3.ConnectionPool
|
python
|
src/requests/adapters.py
| 423
|
[
"self",
"request",
"verify",
"proxies",
"cert"
] | false
| 4
| 6.08
|
psf/requests
| 53,586
|
sphinx
| false
|
|
setupWebsocket
|
function setupWebsocket() {
if (getOptionValue('--no-experimental-websocket')) {
delete globalThis.WebSocket;
delete globalThis.CloseEvent;
}
}
|
Patch the process object with legacy properties and normalizations.
Replace `process.argv[0]` with `process.execPath`, preserving the original `argv[0]` value as `process.argv0`.
Replace `process.argv[1]` with the resolved absolute file path of the entry point, if found.
@param {boolean} expandArgv1 - Whether to replace `process.argv[1]` with the resolved absolute file path of
the main entry point.
@returns {string}
|
javascript
|
lib/internal/process/pre_execution.js
| 347
|
[] | false
| 2
| 6.8
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
on_chord_body
|
def on_chord_body(self, sig, **header) -> dict:
"""Method that is called on chord body stamping.
Arguments:
sig (chord): chord that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
Returns:
Dict: headers to update.
"""
return {}
|
Method that is called on chord body stamping.
Arguments:
sig (chord): chord that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
Returns:
Dict: headers to update.
|
python
|
celery/canvas.py
| 197
|
[
"self",
"sig"
] |
dict
| true
| 1
| 6.88
|
celery/celery
| 27,741
|
google
| false
|
findSource
|
private File findSource(URL location) throws IOException, URISyntaxException {
URLConnection connection = location.openConnection();
if (connection instanceof JarURLConnection jarURLConnection) {
return getRootJarFile(jarURLConnection.getJarFile());
}
return new File(location.toURI());
}
|
Create a new {@link ApplicationHome} instance for the specified source class.
@param sourceClass the source class or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/system/ApplicationHome.java
| 124
|
[
"location"
] |
File
| true
| 2
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
get_transform_params
|
def get_transform_params(
self,
split_node: torch.fx.Node,
next_users: list[torch.fx.Node],
user_inputs_list: list[list[torch.fx.Node | _Range]],
) -> list[list[_TransformParam]] | None:
"""
Figure out what transforms are needed for each input to each cat node.
We replace a split node with an unflatten followed by a movedim
"""
split_dim = _get_dim(split_node)
split_sections = split_node.args[1]
transform_params_list: list[list[_TransformParam]] = []
for user_node, user_inputs in zip(next_users, user_inputs_list):
if user_node.target not in (torch.cat, torch.stack):
transform_params_list.append([])
continue
cat_dim = get_arg_value(user_node, 1, "dim")
transform_params: list[_TransformParam] = []
for user_input in user_inputs:
if split_dim == cat_dim and user_node.target is torch.cat:
# No transform needed
transform_params.append((None, None, None, None))
elif isinstance(user_input, tuple): # Split being simplified
# Verify equal split
subset_split_sections = split_sections[ # type: ignore[index]
# pyrefly: ignore [bad-index]
user_input[0] : user_input[1]
+ 1 # type: ignore[index]
]
# All sections should be equal
if len(OrderedSet(subset_split_sections)) != 1: # type: ignore[arg-type]
return None
num_splits = len(subset_split_sections) # type: ignore[arg-type]
unflatten_params = (split_dim, (num_splits, -1))
movedim_params = (
(split_dim, cat_dim) if split_dim != cat_dim else None
)
transform_params.append(
(unflatten_params, movedim_params, None, None)
)
elif (
user_node.target is torch.stack or split_dim != cat_dim
): # We need to unsqueeze inputs not coming through split
transform_params.append((None, None, (cat_dim,), None))
else: # Non-split inputs
transform_params.append((None, None, None, None))
transform_params_list.append(transform_params)
return transform_params_list
|
Figure out what transforms are needed for each input to each cat node.
We replace a split node with an unflatten followed by a movedim
|
python
|
torch/_inductor/fx_passes/split_cat.py
| 844
|
[
"self",
"split_node",
"next_users",
"user_inputs_list"
] |
list[list[_TransformParam]] | None
| true
| 12
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
insertCaptureNewTargetIfNeeded
|
function insertCaptureNewTargetIfNeeded(statements: Statement[], node: FunctionLikeDeclaration): Statement[] {
if (hierarchyFacts & HierarchyFacts.NewTarget) {
let newTarget: Expression;
switch (node.kind) {
case SyntaxKind.ArrowFunction:
return statements;
case SyntaxKind.MethodDeclaration:
case SyntaxKind.GetAccessor:
case SyntaxKind.SetAccessor:
// Methods and accessors cannot be constructors, so 'new.target' will
// always return 'undefined'.
newTarget = factory.createVoidZero();
break;
case SyntaxKind.Constructor:
// Class constructors can only be called with `new`, so `this.constructor`
// should be relatively safe to use.
newTarget = factory.createPropertyAccessExpression(
setEmitFlags(factory.createThis(), EmitFlags.NoSubstitution),
"constructor",
);
break;
case SyntaxKind.FunctionDeclaration:
case SyntaxKind.FunctionExpression:
// Functions can be called or constructed, and may have a `this` due to
// being a member or when calling an imported function via `other_1.f()`.
newTarget = factory.createConditionalExpression(
factory.createLogicalAnd(
setEmitFlags(factory.createThis(), EmitFlags.NoSubstitution),
factory.createBinaryExpression(
setEmitFlags(factory.createThis(), EmitFlags.NoSubstitution),
SyntaxKind.InstanceOfKeyword,
factory.getLocalName(node),
),
),
/*questionToken*/ undefined,
factory.createPropertyAccessExpression(
setEmitFlags(factory.createThis(), EmitFlags.NoSubstitution),
"constructor",
),
/*colonToken*/ undefined,
factory.createVoidZero(),
);
break;
default:
return Debug.failBadSyntaxKind(node);
}
const captureNewTargetStatement = factory.createVariableStatement(
/*modifiers*/ undefined,
factory.createVariableDeclarationList([
factory.createVariableDeclaration(
factory.createUniqueName("_newTarget", GeneratedIdentifierFlags.Optimistic | GeneratedIdentifierFlags.FileLevel),
/*exclamationToken*/ undefined,
/*type*/ undefined,
newTarget,
),
]),
);
setEmitFlags(captureNewTargetStatement, EmitFlags.NoComments | EmitFlags.CustomPrologue);
insertStatementAfterCustomPrologue(statements, captureNewTargetStatement);
}
return statements;
}
|
Adds a statement to capture the `this` of a function declaration if it is needed.
NOTE: This must be executed *after* the subtree has been visited.
@param statements The statements for the new function body.
@param node A node.
|
typescript
|
src/compiler/transformers/es2015.ts
| 2,177
|
[
"statements",
"node"
] | true
| 2
| 6.96
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
hashCode
|
static int hashCode(ExponentialHistogram histogram) {
int hash = histogram.scale();
hash = 31 * hash + Double.hashCode(histogram.sum());
hash = 31 * hash + Long.hashCode(histogram.valueCount());
hash = 31 * hash + Double.hashCode(histogram.min());
hash = 31 * hash + Double.hashCode(histogram.max());
hash = 31 * hash + histogram.zeroBucket().hashCode();
// we intentionally don't include the hash of the buckets here, because that is likely expensive to compute
// instead, we assume that the value count and sum are a good enough approximation in most cases to minimize collisions
// the value count is typically available as a cached value and doesn't involve iterating over all buckets
return hash;
}
|
Default hash code implementation to be used with {@link #equals(ExponentialHistogram, ExponentialHistogram)}.
@param histogram the histogram to hash
@return the hash code
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogram.java
| 206
|
[
"histogram"
] | true
| 1
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
asContributors
|
private List<ConfigDataEnvironmentContributor> asContributors(
Map<ConfigDataResolutionResult, ConfigData> imported) {
List<ConfigDataEnvironmentContributor> contributors = new ArrayList<>(imported.size() * 5);
imported.forEach((resolutionResult, data) -> {
ConfigDataLocation location = resolutionResult.getLocation();
ConfigDataResource resource = resolutionResult.getResource();
boolean profileSpecific = resolutionResult.isProfileSpecific();
if (data.getPropertySources().isEmpty()) {
contributors.add(ConfigDataEnvironmentContributor.ofEmptyLocation(location, profileSpecific,
this.conversionService));
}
else {
for (int i = data.getPropertySources().size() - 1; i >= 0; i--) {
contributors.add(ConfigDataEnvironmentContributor.ofUnboundImport(location, resource,
profileSpecific, data, i, this.conversionService, this.environmentUpdateListener));
}
}
});
return Collections.unmodifiableList(contributors);
}
|
Processes imports from all active contributors and return a new
{@link ConfigDataEnvironmentContributors} instance.
@param importer the importer used to import {@link ConfigData}
@param activationContext the current activation context or {@code null} if the
context has not yet been created
@return a {@link ConfigDataEnvironmentContributors} instance with all relevant
imports have been processed
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironmentContributors.java
| 172
|
[
"imported"
] | true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
clearMetadataCache
|
@Override
public void clearMetadataCache() {
super.clearMetadataCache();
this.mergedBeanDefinitionHolders.clear();
clearByTypeCache();
}
|
Determine whether the specified bean definition qualifies as an autowire candidate,
to be injected into other beans which declare a dependency of matching type.
@param beanName the name of the bean definition to check
@param mbd the merged bean definition to check
@param descriptor the descriptor of the dependency to resolve
@param resolver the AutowireCandidateResolver to use for the actual resolution algorithm
@return whether the bean should be considered as autowire candidate
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
| 990
|
[] |
void
| true
| 1
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
checkState
|
boolean checkState();
|
Checks the state of this circuit breaker and changes it if necessary. The return
value indicates whether the circuit breaker is now in state <em>closed</em>; a value
of <strong>true</strong> typically means that the current operation can continue.
@return <strong>true</strong> if the circuit breaker is now closed;
<strong>false</strong> otherwise.
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/CircuitBreaker.java
| 51
|
[] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_check_ns_shape_dtype
|
def _check_ns_shape_dtype(
actual: Array,
desired: Array,
check_dtype: bool,
check_shape: bool,
check_scalar: bool,
) -> ModuleType: # numpydoc ignore=RT03
"""
Assert that namespace, shape and dtype of the two arrays match.
Parameters
----------
actual : Array
The array produced by the tested function.
desired : Array
The expected array (typically hardcoded).
check_dtype, check_shape : bool, default: True
Whether to check agreement between actual and desired dtypes and shapes
check_scalar : bool, default: False
NumPy only: whether to check agreement between actual and desired types -
0d array vs scalar.
Returns
-------
Arrays namespace.
"""
actual_xp = array_namespace(actual) # Raises on scalars and lists
desired_xp = array_namespace(desired)
msg = f"namespaces do not match: {actual_xp} != f{desired_xp}"
assert actual_xp == desired_xp, msg
# Dask uses nan instead of None for unknown shapes
actual_shape = cast(tuple[float, ...], actual.shape)
desired_shape = cast(tuple[float, ...], desired.shape)
assert None not in actual_shape # Requires explicit support
assert None not in desired_shape
if is_dask_namespace(desired_xp):
if any(math.isnan(i) for i in actual_shape):
actual_shape = actual.compute().shape # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue]
if any(math.isnan(i) for i in desired_shape):
desired_shape = desired.compute().shape # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue]
if check_shape:
msg = f"shapes do not match: {actual_shape} != f{desired_shape}"
assert actual_shape == desired_shape, msg
else:
# Ignore shape, but check flattened size. This is normally done by
# np.testing.assert_array_equal etc even when strict=False, but not for
# non-materializable arrays.
actual_size = math.prod(actual_shape) # pyright: ignore[reportUnknownArgumentType]
desired_size = math.prod(desired_shape) # pyright: ignore[reportUnknownArgumentType]
msg = f"sizes do not match: {actual_size} != f{desired_size}"
assert actual_size == desired_size, msg
if check_dtype:
msg = f"dtypes do not match: {actual.dtype} != {desired.dtype}"
assert actual.dtype == desired.dtype, msg
if is_numpy_namespace(actual_xp) and check_scalar:
# only NumPy distinguishes between scalars and arrays; we do if check_scalar.
_msg = (
"array-ness does not match:\n Actual: "
f"{type(actual)}\n Desired: {type(desired)}"
)
assert np.isscalar(actual) == np.isscalar(desired), _msg
return desired_xp
|
Assert that namespace, shape and dtype of the two arrays match.
Parameters
----------
actual : Array
The array produced by the tested function.
desired : Array
The expected array (typically hardcoded).
check_dtype, check_shape : bool, default: True
Whether to check agreement between actual and desired dtypes and shapes
check_scalar : bool, default: False
NumPy only: whether to check agreement between actual and desired types -
0d array vs scalar.
Returns
-------
Arrays namespace.
|
python
|
sklearn/externals/array_api_extra/_lib/_testing.py
| 34
|
[
"actual",
"desired",
"check_dtype",
"check_shape",
"check_scalar"
] |
ModuleType
| true
| 9
| 6.8
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
cleanupLoggingSystem
|
void cleanupLoggingSystem() {
if (this.loggingSystem != null) {
this.loggingSystem.cleanUp();
}
}
|
The name of the {@link Lifecycle} bean used to handle cleanup.
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/logging/LoggingApplicationListener.java
| 277
|
[] |
void
| true
| 2
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
slowRemoveIfForRemainingElements
|
private static <T extends @Nullable Object> void slowRemoveIfForRemainingElements(
List<T> list, Predicate<? super T> predicate, int to, int from) {
// Here we know that:
// * (to < from) and that both are valid indices.
// * Everything with (index < to) should be kept.
// * Everything with (to <= index < from) should be removed.
// * The element with (index == from) should be kept.
// * Everything with (index > from) has not been checked yet.
// Check from the end of the list backwards (minimize expected cost of
// moving elements when remove() is called). Stop before 'from' because
// we already know that should be kept.
for (int n = list.size() - 1; n > from; n--) {
if (predicate.apply(list.get(n))) {
list.remove(n);
}
}
// And now remove everything in the range [to, from) (going backwards).
for (int n = from - 1; n >= to; n--) {
list.remove(n);
}
}
|
Removes, from an iterable, every element that satisfies the provided predicate.
<p>Removals may or may not happen immediately as each element is tested against the predicate.
The behavior of this method is not specified if {@code predicate} is dependent on {@code
removeFrom}.
<p><b>Java 8+ users:</b> if {@code removeFrom} is a {@link Collection}, use {@code
removeFrom.removeIf(predicate)} instead.
@param removeFrom the iterable to (potentially) remove elements from
@param predicate a predicate that determines whether an element should be removed
@return {@code true} if any elements were removed from the iterable
@throws UnsupportedOperationException if the iterable does not support {@code remove()}.
@since 2.0
|
java
|
android/guava/src/com/google/common/collect/Iterables.java
| 228
|
[
"list",
"predicate",
"to",
"from"
] |
void
| true
| 4
| 7.6
|
google/guava
| 51,352
|
javadoc
| false
|
asarrays
|
def asarrays(
a: Array | complex,
b: Array | complex,
xp: ModuleType,
) -> tuple[Array, Array]:
"""
Ensure both `a` and `b` are arrays.
If `b` is a python scalar, it is converted to the same dtype as `a`, and vice versa.
Behavior is not specified when mixing a Python ``float`` and an array with an
integer data type; this may give ``float32``, ``float64``, or raise an exception.
Behavior is implementation-specific.
Similarly, behavior is not specified when mixing a Python ``complex`` and an array
with a real-valued data type; this may give ``complex64``, ``complex128``, or raise
an exception. Behavior is implementation-specific.
Parameters
----------
a, b : Array | int | float | complex | bool
Input arrays or scalars. At least one must be an array.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
Array, Array
The input arrays, possibly converted to arrays if they were scalars.
See Also
--------
mixing-arrays-with-python-scalars : Array API specification for the behavior.
"""
a_scalar = is_python_scalar(a)
b_scalar = is_python_scalar(b)
if not a_scalar and not b_scalar:
# This includes misc. malformed input e.g. str
return a, b # type: ignore[return-value]
swap = False
if a_scalar:
swap = True
b, a = a, b
if is_array_api_obj(a):
# a is an Array API object
# b is a int | float | complex | bool
xa = a
# https://data-apis.org/array-api/draft/API_specification/type_promotion.html#mixing-arrays-with-python-scalars
same_dtype = {
bool: "bool",
int: ("integral", "real floating", "complex floating"),
float: ("real floating", "complex floating"),
complex: "complex floating",
}
kind = same_dtype[type(cast(complex, b))]
if xp.isdtype(a.dtype, kind):
xb = xp.asarray(b, dtype=a.dtype)
else:
# Undefined behaviour. Let the function deal with it, if it can.
xb = xp.asarray(b)
else:
# Neither a nor b are Array API objects.
# Note: we can only reach this point when one explicitly passes
# xp=xp to the calling function; otherwise we fail earlier on
# array_namespace(a, b).
xa, xb = xp.asarray(a), xp.asarray(b)
return (xb, xa) if swap else (xa, xb)
|
Ensure both `a` and `b` are arrays.
If `b` is a python scalar, it is converted to the same dtype as `a`, and vice versa.
Behavior is not specified when mixing a Python ``float`` and an array with an
integer data type; this may give ``float32``, ``float64``, or raise an exception.
Behavior is implementation-specific.
Similarly, behavior is not specified when mixing a Python ``complex`` and an array
with a real-valued data type; this may give ``complex64``, ``complex128``, or raise
an exception. Behavior is implementation-specific.
Parameters
----------
a, b : Array | int | float | complex | bool
Input arrays or scalars. At least one must be an array.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
Array, Array
The input arrays, possibly converted to arrays if they were scalars.
See Also
--------
mixing-arrays-with-python-scalars : Array API specification for the behavior.
|
python
|
sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
| 156
|
[
"a",
"b",
"xp"
] |
tuple[Array, Array]
| true
| 9
| 6.8
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
_drop_labels_or_levels
|
def _drop_labels_or_levels(self, keys, axis: AxisInt = 0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys : str or list of str
labels or levels to drop
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = common.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
"The following keys are not valid labels or "
f"levels for axis {axis}: {invalid_keys}"
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy(deep=False)
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = default_index(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
|
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys : str or list of str
labels or levels to drop
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
|
python
|
pandas/core/generic.py
| 1,802
|
[
"self",
"keys",
"axis"
] | true
| 10
| 6.96
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
endBlockScope
|
function endBlockScope() {
Debug.assert(state > TransformationState.Uninitialized, "Cannot end a block scope during initialization.");
Debug.assert(state < TransformationState.Completed, "Cannot end a block scope after transformation has completed.");
const statements: Statement[] | undefined = some(blockScopedVariableDeclarations) ?
[
factory.createVariableStatement(
/*modifiers*/ undefined,
factory.createVariableDeclarationList(
blockScopedVariableDeclarations.map(identifier => factory.createVariableDeclaration(identifier)),
NodeFlags.Let,
),
),
] : undefined;
blockScopeStackOffset--;
blockScopedVariableDeclarations = blockScopedVariableDeclarationsStack[blockScopeStackOffset];
if (blockScopeStackOffset === 0) {
blockScopedVariableDeclarationsStack = [];
}
return statements;
}
|
Ends a block scope. The previous set of block hoisted variables are restored. Any hoisted declarations are returned.
|
typescript
|
src/compiler/transformer.ts
| 600
|
[] | false
| 3
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getMBeanName
|
static String getMBeanName(String prefix, MetricName metricName) {
StringBuilder mBeanName = new StringBuilder();
mBeanName.append(prefix);
mBeanName.append(":type=");
mBeanName.append(metricName.group());
for (Map.Entry<String, String> entry : metricName.tags().entrySet()) {
if (entry.getKey().isEmpty() || entry.getValue().isEmpty())
continue;
mBeanName.append(",");
mBeanName.append(entry.getKey());
mBeanName.append("=");
mBeanName.append(Sanitizer.jmxSanitize(entry.getValue()));
}
return mBeanName.toString();
}
|
@param metricName
@return standard JMX MBean name in the following format domainName:type=metricType,key1=val1,key2=val2
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/JmxReporter.java
| 176
|
[
"prefix",
"metricName"
] |
String
| true
| 3
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
buildTrustConfig
|
protected SslTrustConfig buildTrustConfig(
Path basePath,
SslVerificationMode verificationMode,
SslKeyConfig keyConfig,
@Nullable Set<X509Field> restrictedTrustFields
) {
final List<String> certificateAuthorities = resolveListSetting(CERTIFICATE_AUTHORITIES, Function.identity(), null);
final String trustStorePath = resolveSetting(TRUSTSTORE_PATH, Function.identity(), null);
if (certificateAuthorities != null && trustStorePath != null) {
throw new SslConfigException(
"cannot specify both [" + settingPrefix + CERTIFICATE_AUTHORITIES + "] and [" + settingPrefix + TRUSTSTORE_PATH + "]"
);
}
if (verificationMode.isCertificateVerificationEnabled() == false) {
return TrustEverythingConfig.TRUST_EVERYTHING;
}
if (certificateAuthorities != null) {
return new PemTrustConfig(certificateAuthorities, basePath);
}
if (trustStorePath != null) {
final char[] password = resolvePasswordSetting(TRUSTSTORE_SECURE_PASSWORD, TRUSTSTORE_LEGACY_PASSWORD);
final String storeType = resolveSetting(TRUSTSTORE_TYPE, Function.identity(), inferKeyStoreType(trustStorePath));
final String algorithm = resolveSetting(TRUSTSTORE_ALGORITHM, Function.identity(), TrustManagerFactory.getDefaultAlgorithm());
return new StoreTrustConfig(trustStorePath, password, storeType, algorithm, true, basePath);
}
return buildDefaultTrustConfig(defaultTrustConfig, keyConfig);
}
|
Resolve all necessary configuration settings, and load a {@link SslConfiguration}.
@param basePath The base path to use for any settings that represent file paths. Typically points to the Elasticsearch
configuration directory.
@throws SslConfigException For any problems with the configuration, or with loading the required SSL classes.
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java
| 338
|
[
"basePath",
"verificationMode",
"keyConfig",
"restrictedTrustFields"
] |
SslTrustConfig
| true
| 6
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
incrementalAlterConfigs
|
AlterConfigsResult incrementalAlterConfigs(Map<ConfigResource,
Collection<AlterConfigOp>> configs, AlterConfigsOptions options);
|
Incrementally update the configuration for the specified resources.
<p>
Updates are not transactional so they may succeed for some resources while fail for others. The configs for
a particular resource are updated atomically.
<p>
The following exceptions can be anticipated when calling {@code get()} on the futures obtained from
the returned {@link AlterConfigsResult}:
<ul>
<li>{@link org.apache.kafka.common.errors.ClusterAuthorizationException}
if the authenticated user didn't have alter access to the cluster.</li>
<li>{@link org.apache.kafka.common.errors.TopicAuthorizationException}
if the authenticated user didn't have alter access to the Topic.</li>
<li>{@link org.apache.kafka.common.errors.UnknownTopicOrPartitionException}
if the Topic doesn't exist.</li>
<li>{@link org.apache.kafka.common.errors.InvalidRequestException}
if the request details are invalid. e.g., a configuration key was specified more than once for a resource</li>
</ul>
<p>
This operation is supported by brokers with version 2.3.0 or higher.
@param configs The resources with their configs
@param options The options to use when altering configs
@return The AlterConfigsResult
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 530
|
[
"configs",
"options"
] |
AlterConfigsResult
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
convertExportsDotXEquals_replaceNode
|
function convertExportsDotXEquals_replaceNode(name: string | undefined, exported: Expression, useSitesToUnqualify: Map<Node, Node> | undefined): Statement {
const modifiers = [factory.createToken(SyntaxKind.ExportKeyword)];
switch (exported.kind) {
case SyntaxKind.FunctionExpression: {
const { name: expressionName } = exported as FunctionExpression;
if (expressionName && expressionName.text !== name) {
// `exports.f = function g() {}` -> `export const f = function g() {}`
return exportConst();
}
}
// falls through
case SyntaxKind.ArrowFunction:
// `exports.f = function() {}` --> `export function f() {}`
return functionExpressionToDeclaration(name, modifiers, exported as FunctionExpression | ArrowFunction, useSitesToUnqualify);
case SyntaxKind.ClassExpression:
// `exports.C = class {}` --> `export class C {}`
return classExpressionToDeclaration(name, modifiers, exported as ClassExpression, useSitesToUnqualify);
default:
return exportConst();
}
function exportConst() {
// `exports.x = 0;` --> `export const x = 0;`
return makeConst(modifiers, factory.createIdentifier(name!), replaceImportUseSites(exported, useSitesToUnqualify)); // TODO: GH#18217
}
}
|
Convert `module.exports = { ... }` to individual exports..
We can't always do this if the module has interesting members -- then it will be a default export instead.
|
typescript
|
src/services/codefixes/convertToEsModule.ts
| 430
|
[
"name",
"exported",
"useSitesToUnqualify"
] | true
| 3
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
format
|
@Override
public StringBuffer format(final Object obj, final StringBuffer toAppendTo, final FieldPosition pos) {
return toAppendTo.append(printer.format(obj));
}
|
Formats a {@link Date}, {@link Calendar} or {@link Long} (milliseconds) object. This method is an implementation of
{@link Format#format(Object, StringBuffer, FieldPosition)}
@param obj the object to format.
@param toAppendTo the buffer to append to.
@param pos the position, ignored.
@return the given buffer.
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDateFormat.java
| 525
|
[
"obj",
"toAppendTo",
"pos"
] |
StringBuffer
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
addClassAnnotationIfNeeded
|
private void addClassAnnotationIfNeeded() {
if (annotationNeeded) {
// logger.debug("Adding {} annotation", ENTITLEMENT_ANNOTATION);
AnnotationVisitor av = cv.visitAnnotation(ENTITLEMENT_ANNOTATION_DESCRIPTOR, true);
if (av != null) {
av.visitEnd();
}
annotationNeeded = false;
}
}
|
A class annotation can be added via visitAnnotation; we need to call visitAnnotation after all other visitAnnotation
calls (in case one of them detects our annotation is already present), but before any other subsequent visit* method is called
(up to visitMethod -- if no visitMethod is called, there is nothing to instrument).
This includes visitNestMember, visitPermittedSubclass, visitInnerClass, visitField, visitRecordComponent and, of course,
visitMethod (see {@link ClassVisitor} javadoc).
|
java
|
libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java
| 223
|
[] |
void
| true
| 3
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
readWriteLock
|
public static Striped<ReadWriteLock> readWriteLock(int stripes) {
return custom(stripes, ReentrantReadWriteLock::new);
}
|
Creates a {@code Striped<ReadWriteLock>} with eagerly initialized, strongly referenced
read-write locks. Every lock is reentrant.
@param stripes the minimum number of stripes (locks) required
@return a new {@code Striped<ReadWriteLock>}
|
java
|
android/guava/src/com/google/common/util/concurrent/Striped.java
| 268
|
[
"stripes"
] | true
| 1
| 6.16
|
google/guava
| 51,352
|
javadoc
| false
|
|
getUnderlyingFile
|
private @Nullable File getUnderlyingFile(Resource resource) {
try {
if (resource instanceof ClassPathResource || resource instanceof FileSystemResource
|| resource instanceof FileUrlResource) {
return resource.getFile().getAbsoluteFile();
}
}
catch (IOException ex) {
// Ignore
}
return null;
}
|
Return the profile or {@code null} if the resource is not profile specific.
@return the profile or {@code null}
@since 2.4.6
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/StandardConfigDataResource.java
| 133
|
[
"resource"
] |
File
| true
| 5
| 7.2
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
setMaxTimestamp
|
@Override
public void setMaxTimestamp(TimestampType timestampType, long maxTimestamp) {
long currentMaxTimestamp = maxTimestamp();
// We don't need to recompute crc if the timestamp is not updated.
if (timestampType() == timestampType && currentMaxTimestamp == maxTimestamp)
return;
byte attributes = computeAttributes(compressionType(), timestampType, isTransactional(), isControlBatch(), hasDeleteHorizonMs());
buffer.putShort(ATTRIBUTES_OFFSET, attributes);
buffer.putLong(MAX_TIMESTAMP_OFFSET, maxTimestamp);
long crc = computeChecksum();
ByteUtils.writeUnsignedInt(buffer, CRC_OFFSET, crc);
}
|
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas.
@return The base timestamp
|
java
|
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
| 370
|
[
"timestampType",
"maxTimestamp"
] |
void
| true
| 3
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
toNumber
|
function toNumber(value) {
if (typeof value == 'number') {
return value;
}
if (isSymbol(value)) {
return NAN;
}
if (isObject(value)) {
var other = typeof value.valueOf == 'function' ? value.valueOf() : value;
value = isObject(other) ? (other + '') : other;
}
if (typeof value != 'string') {
return value === 0 ? value : +value;
}
value = baseTrim(value);
var isBinary = reIsBinary.test(value);
return (isBinary || reIsOctal.test(value))
? freeParseInt(value.slice(2), isBinary ? 2 : 8)
: (reIsBadHex.test(value) ? NAN : +value);
}
|
Converts `value` to a number.
@static
@memberOf _
@since 4.0.0
@category Lang
@param {*} value The value to process.
@returns {number} Returns the number.
@example
_.toNumber(3.2);
// => 3.2
_.toNumber(Number.MIN_VALUE);
// => 5e-324
_.toNumber(Infinity);
// => Infinity
_.toNumber('3.2');
// => 3.2
|
javascript
|
lodash.js
| 12,564
|
[
"value"
] | false
| 12
| 7.04
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
removeOccurrences
|
@CanIgnoreReturnValue
public static boolean removeOccurrences(
Multiset<?> multisetToModify, Iterable<?> occurrencesToRemove) {
if (occurrencesToRemove instanceof Multiset) {
return removeOccurrences(multisetToModify, (Multiset<?>) occurrencesToRemove);
} else {
checkNotNull(multisetToModify);
checkNotNull(occurrencesToRemove);
boolean changed = false;
for (Object o : occurrencesToRemove) {
changed |= multisetToModify.remove(o);
}
return changed;
}
}
|
For each occurrence of an element {@code e} in {@code occurrencesToRemove}, removes one
occurrence of {@code e} in {@code multisetToModify}.
<p>Equivalently, this method modifies {@code multisetToModify} so that {@code
multisetToModify.count(e)} is set to {@code Math.max(0, multisetToModify.count(e) -
Iterables.frequency(occurrencesToRemove, e))}.
<p>This is <i>not</i> the same as {@code multisetToModify.} {@link Multiset#removeAll
removeAll}{@code (occurrencesToRemove)}, which removes all occurrences of elements that appear
in {@code occurrencesToRemove}. However, this operation <i>is</i> equivalent to, albeit
sometimes more efficient than, the following:
{@snippet :
for (E e : occurrencesToRemove) {
multisetToModify.remove(e);
}
}
@return {@code true} if {@code multisetToModify} was changed as a result of this operation
@since 18.0 (present in 10.0 with a requirement that the second parameter be a {@code
Multiset})
|
java
|
android/guava/src/com/google/common/collect/Multisets.java
| 762
|
[
"multisetToModify",
"occurrencesToRemove"
] | true
| 2
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
before
|
function before(n, func) {
var result;
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
n = toInteger(n);
return function() {
if (--n > 0) {
result = func.apply(this, arguments);
}
if (n <= 1) {
func = undefined;
}
return result;
};
}
|
Creates a function that invokes `func`, with the `this` binding and arguments
of the created function, while it's called less than `n` times. Subsequent
calls to the created function return the result of the last `func` invocation.
@static
@memberOf _
@since 3.0.0
@category Function
@param {number} n The number of calls at which `func` is no longer invoked.
@param {Function} func The function to restrict.
@returns {Function} Returns the new restricted function.
@example
jQuery(element).on('click', _.before(5, addContactToList));
// => Allows adding up to 4 contacts to the list.
|
javascript
|
lodash.js
| 10,149
|
[
"n",
"func"
] | false
| 4
| 7.68
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
is_container
|
def is_container(obj) -> bool:
"""Test if an object is a container (iterable) but not a string."""
if isinstance(obj, Proxy):
# Proxy of any object is considered a container because it implements __iter__
# to forward the call to the lazily initialized object
# Unwrap Proxy before checking __iter__ to evaluate the proxied object
obj = obj.__wrapped__
return hasattr(obj, "__iter__") and not isinstance(obj, str)
|
Test if an object is a container (iterable) but not a string.
|
python
|
airflow-core/src/airflow/utils/helpers.py
| 106
|
[
"obj"
] |
bool
| true
| 3
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
_forward_pass_fast
|
def _forward_pass_fast(self, X, check_input=True):
"""Predict using the trained model
This is the same as _forward_pass but does not record the activations
of all layers and only returns the last layer's activation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
check_input : bool, default=True
Perform input data validation or not.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The decision function of the samples for each class in the model.
"""
if check_input:
X = validate_data(self, X, accept_sparse=["csr", "csc"], reset=False)
# Initialize first layer
activation = X
# Forward propagate
hidden_activation = ACTIVATIONS[self.activation]
for i in range(self.n_layers_ - 1):
activation = safe_sparse_dot(activation, self.coefs_[i])
activation += self.intercepts_[i]
if i != self.n_layers_ - 2:
hidden_activation(activation)
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activation)
return activation
|
Predict using the trained model
This is the same as _forward_pass but does not record the activations
of all layers and only returns the last layer's activation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
check_input : bool, default=True
Perform input data validation or not.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The decision function of the samples for each class in the model.
|
python
|
sklearn/neural_network/_multilayer_perceptron.py
| 189
|
[
"self",
"X",
"check_input"
] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
quantile
|
def quantile(self, q: float | list[float] | AnyArrayLike = 0.5, **kwargs):
"""
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Returns
-------
DataFrame or Series
Quantile of values within each group.
See Also
--------
Series.quantile
Return a series, where the index is q and the values are the quantiles.
DataFrame.quantile
Return a DataFrame, where the columns are the columns of self,
and the values are the quantiles.
DataFrameGroupBy.quantile
Return a DataFrame, where the columns are groupby columns,
and the values are its quantiles.
Examples
--------
>>> ser = pd.Series(
... [1, 3, 2, 4, 3, 8],
... index=pd.DatetimeIndex(
... [
... "2023-01-01",
... "2023-01-10",
... "2023-01-15",
... "2023-02-01",
... "2023-02-10",
... "2023-02-15",
... ]
... ),
... )
>>> ser.resample("MS").quantile()
2023-01-01 2.0
2023-02-01 4.0
Freq: MS, dtype: float64
>>> ser.resample("MS").quantile(0.25)
2023-01-01 1.5
2023-02-01 3.5
Freq: MS, dtype: float64
"""
return self._downsample("quantile", q=q, **kwargs)
|
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Returns
-------
DataFrame or Series
Quantile of values within each group.
See Also
--------
Series.quantile
Return a series, where the index is q and the values are the quantiles.
DataFrame.quantile
Return a DataFrame, where the columns are the columns of self,
and the values are the quantiles.
DataFrameGroupBy.quantile
Return a DataFrame, where the columns are groupby columns,
and the values are its quantiles.
Examples
--------
>>> ser = pd.Series(
... [1, 3, 2, 4, 3, 8],
... index=pd.DatetimeIndex(
... [
... "2023-01-01",
... "2023-01-10",
... "2023-01-15",
... "2023-02-01",
... "2023-02-10",
... "2023-02-15",
... ]
... ),
... )
>>> ser.resample("MS").quantile()
2023-01-01 2.0
2023-02-01 4.0
Freq: MS, dtype: float64
>>> ser.resample("MS").quantile(0.25)
2023-01-01 1.5
2023-02-01 3.5
Freq: MS, dtype: float64
|
python
|
pandas/core/resample.py
| 1,895
|
[
"self",
"q"
] | true
| 1
| 7.28
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
stubArray
|
function stubArray() {
return [];
}
|
This method returns a new empty array.
@static
@memberOf _
@since 4.13.0
@category Util
@returns {Array} Returns the new empty array.
@example
var arrays = _.times(2, _.stubArray);
console.log(arrays);
// => [[], []]
console.log(arrays[0] === arrays[1]);
// => false
|
javascript
|
lodash.js
| 16,151
|
[] | false
| 1
| 6.96
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
update_providers_next_version
|
def update_providers_next_version():
"""
Scan all provider pyproject.toml files for dependencies with "# use next version" comment
and update them to use the current version from the referenced provider's pyproject.toml.
"""
from airflow_breeze.utils.packages import update_providers_with_next_version_comment
# make sure dependencies are regenerated before we start
regenerate_provider_dependencies_once()
get_console().print("\n[info]Scanning for providers with '# use next version' comments...\n")
updates_made = update_providers_with_next_version_comment()
if updates_made:
get_console().print("\n[success]Summary of updates:[/]")
for provider_id, dependencies in updates_made.items():
get_console().print(f"\n[info]Provider: {provider_id}[/]")
for dep_name, dep_info in dependencies.items():
get_console().print(f" • {dep_name}: {dep_info['old_version']} → {dep_info['new_version']}")
get_console().print(
f"\n[success]Updated {len(updates_made)} provider(s) with "
f"{sum(len(deps) for deps in updates_made.values())} dependency change(s).[/]"
)
# Regenerate provider dependencies after some of them changed
regenerate_provider_dependencies_once.cache_clear()
regenerate_provider_dependencies_once()
else:
get_console().print(
"\n[info]No updates needed. All providers with '# use next version' "
"comments are already using the latest versions.[/]"
)
|
Scan all provider pyproject.toml files for dependencies with "# use next version" comment
and update them to use the current version from the referenced provider's pyproject.toml.
|
python
|
dev/breeze/src/airflow_breeze/commands/release_management_commands.py
| 3,032
|
[] | false
| 5
| 6.4
|
apache/airflow
| 43,597
|
unknown
| false
|
|
removeFrom
|
long removeFrom(long time) {
return time - this.defaultTimeZone.getOffset(time);
}
|
Remove the default offset from the given time.
@param time the time to remove the default offset from
@return the time with the default offset removed
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/DefaultTimeZoneOffset.java
| 54
|
[
"time"
] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
noResult
|
@SuppressWarnings("unchecked")
public static <R> InvocationResult<R> noResult() {
return (InvocationResult<R>) NONE;
}
|
Return an {@link InvocationResult} instance representing no result.
@param <R> the result type
@return an {@link InvocationResult}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/util/LambdaSafe.java
| 441
|
[] | true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
ensureUseStrict
|
function ensureUseStrict(statements: NodeArray<Statement>): NodeArray<Statement> {
const foundUseStrict = findUseStrictPrologue(statements);
if (!foundUseStrict) {
return setTextRange(createNodeArray<Statement>([createUseStrictPrologue(), ...statements]), statements);
}
return statements;
}
|
Ensures "use strict" directive is added
@param statements An array of statements
|
typescript
|
src/compiler/factory/nodeFactory.ts
| 6,944
|
[
"statements"
] | true
| 2
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
transitionToFenced
|
public void transitionToFenced() {
if (state == MemberState.PREPARE_LEAVING) {
log.info("Member {} with epoch {} got fenced but it is already preparing to leave " +
"the group, so it will stop sending heartbeat and won't attempt to send the " +
"leave request or rejoin.", memberId, memberEpoch);
// Briefly transition to LEAVING to ensure all required actions are applied even
// though there is no need to send a leave group heartbeat (ex. clear epoch and
// notify epoch listeners). Then transition to UNSUBSCRIBED, ensuring that the member
// (that is not part of the group anymore from the broker point of view) will stop
// sending heartbeats while it completes the ongoing leaving operation.
transitionToSendingLeaveGroup(false);
transitionTo(MemberState.UNSUBSCRIBED);
maybeCompleteLeaveInProgress();
return;
}
if (state == MemberState.LEAVING) {
log.debug("Member {} with epoch {} got fenced before sending leave group heartbeat. " +
"It will not send the leave request and won't attempt to rejoin.", memberId, memberEpoch);
transitionTo(MemberState.UNSUBSCRIBED);
maybeCompleteLeaveInProgress();
return;
}
if (state == MemberState.UNSUBSCRIBED) {
log.debug("Member {} with epoch {} got fenced but it already left the group, so it " +
"won't attempt to rejoin.", memberId, memberEpoch);
return;
}
transitionTo(MemberState.FENCED);
resetEpoch();
log.debug("Member {} with epoch {} transitioned to {} state. It will release its " +
"assignment and rejoin the group.", memberId, memberEpoch, MemberState.FENCED);
// Release assignment
CompletableFuture<Void> callbackResult = signalPartitionsLost(subscriptions.assignedPartitions());
callbackResult.whenComplete((result, error) -> {
if (error != null) {
log.error("onPartitionsLost callback invocation failed while releasing assignment" +
" after member got fenced. Member will rejoin the group anyways.", error);
}
clearAssignment();
if (state == MemberState.FENCED) {
transitionToJoining();
} else {
log.debug("Fenced member onPartitionsLost callback completed but the state has " +
"already changed to {}, so the member won't rejoin the group", state);
}
});
}
|
Transition the member to the FENCED state, where the member will release the assignment by
calling the onPartitionsLost callback, and when the callback completes, it will transition
to {@link MemberState#JOINING} to rejoin the group. This is expected to be invoked when
the heartbeat returns a FENCED_MEMBER_EPOCH or UNKNOWN_MEMBER_ID error.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 387
|
[] |
void
| true
| 6
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
preprocessFlamechart
|
function preprocessFlamechart(rawData: TimelineEvent[]): Flamechart {
let parsedData;
try {
parsedData = importFromChromeTimeline(rawData, 'react-devtools');
} catch (error) {
// Assume any Speedscope errors are caused by bad profiles
const errorToRethrow = new InvalidProfileError(error.message);
errorToRethrow.stack = error.stack;
throw errorToRethrow;
}
const profile = parsedData.profiles[0]; // TODO: Choose the main CPU thread only
const speedscopeFlamechart = new SpeedscopeFlamechart({
// $FlowFixMe[method-unbinding]
getTotalWeight: profile.getTotalWeight.bind(profile),
// $FlowFixMe[method-unbinding]
forEachCall: profile.forEachCall.bind(profile),
// $FlowFixMe[method-unbinding]
formatValue: profile.formatValue.bind(profile),
getColorBucketForFrame: () => 0,
});
const flamechart: Flamechart = speedscopeFlamechart.getLayers().map(layer =>
layer.map(
({
start,
end,
node: {
frame: {name, file, line, col},
},
}) => ({
name,
timestamp: start / 1000,
duration: (end - start) / 1000,
scriptUrl: file,
locationLine: line,
locationColumn: col,
}),
),
);
return flamechart;
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-devtools-timeline/src/import-worker/preprocessData.js
| 955
|
[] | false
| 2
| 6.4
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
baseTimestamp
|
public long baseTimestamp() {
return buffer.getLong(BASE_TIMESTAMP_OFFSET);
}
|
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas.
@return The base timestamp
|
java
|
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
| 165
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getVersionedContentEntry
|
private ZipContent.Entry getVersionedContentEntry(String name) {
// NOTE: we can't call isMultiRelease() directly because it's a final method and
// it inspects the container jar. We use ManifestInfo instead.
if (BASE_VERSION >= this.version || name.startsWith(META_INF) || !getManifestInfo().isMultiRelease()) {
return null;
}
MetaInfVersionsInfo metaInfVersionsInfo = getMetaInfVersionsInfo();
int[] versions = metaInfVersionsInfo.versions();
String[] directories = metaInfVersionsInfo.directories();
for (int i = versions.length - 1; i >= 0; i--) {
if (versions[i] <= this.version) {
ZipContent.Entry entry = getContentEntry(directories[i], name);
if (entry != null) {
return entry;
}
}
}
return null;
}
|
Return if an entry with the given name exists.
@param name the name to check
@return if the entry exists
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jar/NestedJarFile.java
| 273
|
[
"name"
] | true
| 7
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
throttleTimeMs
|
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
|
The number of each type of error in the response, including {@link Errors#NONE} and top-level errors as well as
more specifically scoped errors (such as topic or partition-level errors).
@return A count of errors.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsResponse.java
| 53
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
currentLag
|
@Override
public OptionalLong currentLag(TopicPartition topicPartition) {
return delegate.currentLag(topicPartition);
}
|
Get the consumer's current lag on the partition. Returns an "empty" {@link OptionalLong} if the lag is not known,
for example if there is no position yet, or if the end offset is not known yet.
<p>
This method uses locally cached metadata. If the log end offset is not known yet, it triggers a request to fetch
the log end offset, but returns immediately.
@param topicPartition The partition to get the lag for.
@return This {@code Consumer} instance's current lag for the given partition.
@throws IllegalStateException if the {@code topicPartition} is not assigned
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
| 1,722
|
[
"topicPartition"
] |
OptionalLong
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
getPropertyName
|
function getPropertyName(
child: AngularRoute,
property: 'title' | 'redirectTo' | 'matcher' | 'runGuardsAndResolvers',
) {
if (child[property] instanceof Function) {
return getClassOrFunctionName(child[property], property);
}
return child[property];
}
|
Get the display name for a function or class.
@param fn - The function or class to get the name from
@param defaultName - Optional name to check against. If the function name matches this value,
'[Function]' is returned instead
@returns The formatted name: class name, function name with '()', or '[Function]' for anonymous/arrow functions
|
typescript
|
devtools/projects/ng-devtools-backend/src/lib/router-tree.ts
| 208
|
[
"child",
"property"
] | false
| 2
| 7.12
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
deallocate
|
public void deallocate(ByteBuffer buffer, int size) {
lock.lock();
try {
if (size == this.poolableSize && size == buffer.capacity()) {
buffer.clear();
this.free.add(buffer);
} else {
this.nonPooledAvailableMemory += size;
}
Condition moreMem = this.waiters.peekFirst();
if (moreMem != null)
moreMem.signal();
} finally {
lock.unlock();
}
}
|
Return buffers to the pool. If they are of the poolable size add them to the free list, otherwise just mark the
memory as free.
@param buffer The buffer to return
@param size The size of the buffer to mark as deallocated, note that this may be smaller than buffer.capacity
since the buffer may re-allocate itself during in-place compression
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/BufferPool.java
| 260
|
[
"buffer",
"size"
] |
void
| true
| 4
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
nextToken
|
public String nextToken() {
if (hasNext()) {
return tokens[tokenPos++];
}
return null;
}
|
Gets the next token from the String.
Equivalent to {@link #next()} except it returns null rather than
throwing {@link NoSuchElementException} when no tokens remain.
@return the next sequential token, or null when no more tokens are found.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrTokenizer.java
| 655
|
[] |
String
| true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getRawType
|
public final Class<? super T> getRawType() {
if (runtimeType instanceof Class) {
@SuppressWarnings("unchecked") // raw type is T
Class<? super T> result = (Class<? super T>) runtimeType;
return result;
} else if (runtimeType instanceof ParameterizedType) {
@SuppressWarnings("unchecked") // raw type is |T|
Class<? super T> result = (Class<? super T>) ((ParameterizedType) runtimeType).getRawType();
return result;
} else {
// For a wildcard or type variable, the first bound determines the runtime type.
// This case also covers GenericArrayType.
return getRawTypes().iterator().next();
}
}
|
Returns the raw type of {@code T}. Formally speaking, if {@code T} is returned by {@link
java.lang.reflect.Method#getGenericReturnType}, the raw type is what's returned by {@link
java.lang.reflect.Method#getReturnType} of the same method object. Specifically:
<ul>
<li>If {@code T} is a {@code Class} itself, {@code T} itself is returned.
<li>If {@code T} is a {@link ParameterizedType}, the raw type of the parameterized type is
returned.
<li>If {@code T} is a {@link GenericArrayType}, the returned type is the corresponding array
class. For example: {@code List<Integer>[] => List[]}.
<li>If {@code T} is a type variable or a wildcard type, the raw type of the first upper bound
is returned. For example: {@code <X extends Foo> => Foo}.
</ul>
|
java
|
android/guava/src/com/google/common/reflect/TypeToken.java
| 193
|
[] | true
| 3
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
_hash_pandas_object
|
def _hash_pandas_object(
self, *, encoding: str, hash_key: str, categorize: bool
) -> npt.NDArray[np.uint64]:
"""
Hash a Categorical by hashing its categories, and then mapping the codes
to the hashes.
Parameters
----------
encoding : str
hash_key : str
categorize : bool
Ignored for Categorical.
Returns
-------
np.ndarray[uint64]
"""
# Note we ignore categorize, as we are already Categorical.
from pandas.core.util.hashing import hash_array
# Convert ExtensionArrays to ndarrays
values = np.asarray(self.categories._values)
hashed = hash_array(values, encoding, hash_key, categorize=False)
# we have uint64, as we don't directly support missing values
# we don't want to use take_nd which will coerce to float
# instead, directly construct the result with a
# max(np.uint64) as the missing value indicator
#
# TODO: GH#15362
mask = self.isna()
if len(hashed):
result = hashed.take(self._codes)
else:
result = np.zeros(len(mask), dtype="uint64")
if mask.any():
result[mask] = lib.u8max
return result
|
Hash a Categorical by hashing its categories, and then mapping the codes
to the hashes.
Parameters
----------
encoding : str
hash_key : str
categorize : bool
Ignored for Categorical.
Returns
-------
np.ndarray[uint64]
|
python
|
pandas/core/arrays/categorical.py
| 2,179
|
[
"self",
"encoding",
"hash_key",
"categorize"
] |
npt.NDArray[np.uint64]
| true
| 4
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
deliveryCount
|
public Optional<Short> deliveryCount() {
return deliveryCount;
}
|
Get the delivery count for the record if available. Deliveries
are counted for records delivered by share groups.
@return the delivery count or empty when deliveries not counted
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecord.java
| 256
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
generateCodeChallenge
|
async function generateCodeChallenge(codeVerifier: string): Promise<string> {
const encoder = new TextEncoder();
const data = encoder.encode(codeVerifier);
const digest = await crypto.subtle.digest('SHA-256', data);
// Base64url encode the digest
const base64String = btoa(String.fromCharCode(...new Uint8Array(digest)));
return base64String
.replace(/\+/g, '-')
.replace(/\//g, '_')
.replace(/=+$/, '');
}
|
Generates a PKCE code challenge from a code verifier using SHA-256.
@param codeVerifier The code verifier string
@returns A base64url-encoded SHA-256 hash of the code verifier
|
typescript
|
extensions/github-authentication/src/flows.ts
| 135
|
[
"codeVerifier"
] | true
| 1
| 6.56
|
microsoft/vscode
| 179,840
|
jsdoc
| true
|
|
toMap
|
public Map<String, Map<String, Short>> toMap() {
return features.entrySet().stream().collect(
Collectors.toMap(
Map.Entry::getKey,
entry -> entry.getValue().toMap()));
}
|
@return A map representation of the underlying features. The returned value can be converted
back to Features using one of the from*FeaturesMap() APIs of this class.
|
java
|
clients/src/main/java/org/apache/kafka/common/feature/Features.java
| 96
|
[] | true
| 1
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
invokeAdviceMethod
|
protected @Nullable Object invokeAdviceMethod(JoinPoint jp, @Nullable JoinPointMatch jpMatch,
@Nullable Object returnValue, @Nullable Throwable t) throws Throwable {
return invokeAdviceMethodWithGivenArgs(argBinding(jp, jpMatch, returnValue, t));
}
|
Invoke the advice method.
@param jpMatch the JoinPointMatch that matched this execution join point
@param returnValue the return value from the method execution (may be null)
@param ex the exception thrown by the method execution (may be null)
@return the invocation result
@throws Throwable in case of invocation failure
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AbstractAspectJAdvice.java
| 627
|
[
"jp",
"jpMatch",
"returnValue",
"t"
] |
Object
| true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
withProfiles
|
ConfigDataActivationContext withProfiles(Profiles profiles) {
return new ConfigDataActivationContext(this.cloudPlatform, profiles);
}
|
Return a new {@link ConfigDataActivationContext} with specific profiles.
@param profiles the profiles
@return a new {@link ConfigDataActivationContext} with specific profiles
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataActivationContext.java
| 74
|
[
"profiles"
] |
ConfigDataActivationContext
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
requestOffsetReset
|
public synchronized void requestOffsetReset(Collection<TopicPartition> partitions, AutoOffsetResetStrategy offsetResetStrategy) {
partitions.forEach(tp -> {
log.info("Seeking to {} offset of partition {}", offsetResetStrategy, tp);
assignedState(tp).reset(offsetResetStrategy);
});
}
|
Unset the preferred read replica. This causes the fetcher to go back to the leader for fetches.
@param tp The topic partition
@return the removed preferred read replica if set, Empty otherwise.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 789
|
[
"partitions",
"offsetResetStrategy"
] |
void
| true
| 1
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
create
|
public static <E extends @Nullable Object> CompactHashSet<E> create(
Collection<? extends E> collection) {
CompactHashSet<E> set = createWithExpectedSize(collection.size());
set.addAll(collection);
return set;
}
|
Creates a <i>mutable</i> {@code CompactHashSet} instance containing the elements of the given
collection in unspecified order.
@param collection the elements that the set should contain
@return a new {@code CompactHashSet} containing those elements (minus duplicates)
|
java
|
android/guava/src/com/google/common/collect/CompactHashSet.java
| 92
|
[
"collection"
] | true
| 1
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
trace
|
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> import numpy as np
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
if isinstance(a, np.matrix):
# Get trace of matrix via an array to preserve backward compatibility.
return asarray(a).trace(
offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out
)
else:
return asanyarray(a).trace(
offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out
)
|
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> import numpy as np
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
|
python
|
numpy/_core/fromnumeric.py
| 1,807
|
[
"a",
"offset",
"axis1",
"axis2",
"dtype",
"out"
] | false
| 3
| 7.76
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
addJsonPropertySource
|
private void addJsonPropertySource(ConfigurableEnvironment environment, PropertySource<?> source) {
MutablePropertySources sources = environment.getPropertySources();
String name = findPropertySource(sources);
if (sources.contains(name)) {
sources.addBefore(name, source);
}
else {
sources.addFirst(source);
}
}
|
Flatten the map keys using period separator.
@param map the map that should be flattened
@return the flattened map
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/support/SpringApplicationJsonEnvironmentPostProcessor.java
| 155
|
[
"environment",
"source"
] |
void
| true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
append
|
public StrBuilder append(final float value) {
return append(String.valueOf(value));
}
|
Appends a float value to the string builder using {@code String.valueOf}.
@param value the value to append
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 527
|
[
"value"
] |
StrBuilder
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getMessageFormat
|
protected @Nullable MessageFormat getMessageFormat(ResourceBundle bundle, String code, Locale locale)
throws MissingResourceException {
Map<String, Map<Locale, MessageFormat>> codeMap = this.cachedBundleMessageFormats.get(bundle);
Map<Locale, MessageFormat> localeMap = null;
if (codeMap != null) {
localeMap = codeMap.get(code);
if (localeMap != null) {
MessageFormat result = localeMap.get(locale);
if (result != null) {
return result;
}
}
}
String msg = getStringOrNull(bundle, code);
if (msg != null) {
if (codeMap == null) {
codeMap = this.cachedBundleMessageFormats.computeIfAbsent(bundle, b -> new ConcurrentHashMap<>());
}
if (localeMap == null) {
localeMap = codeMap.computeIfAbsent(code, c -> new ConcurrentHashMap<>());
}
MessageFormat result = createMessageFormat(msg, locale);
localeMap.put(locale, result);
return result;
}
return null;
}
|
Return a MessageFormat for the given bundle and code,
fetching already generated MessageFormats from the cache.
@param bundle the ResourceBundle to work on
@param code the message code to retrieve
@param locale the Locale to use to build the MessageFormat
@return the resulting MessageFormat, or {@code null} if no message
defined for the given code
@throws MissingResourceException if thrown by the ResourceBundle
|
java
|
spring-context/src/main/java/org/springframework/context/support/ResourceBundleMessageSource.java
| 308
|
[
"bundle",
"code",
"locale"
] |
MessageFormat
| true
| 7
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
dropna
|
def dropna(
self,
*,
axis: Axis = 0,
inplace: bool = False,
how: AnyAll | None = None,
ignore_index: bool = False,
) -> Series | None:
"""
Return a new Series with missing values removed.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
inplace : bool, default False
If True, do operation inplace and return None.
how : str, optional
Not in use. Kept for compatibility.
ignore_index : bool, default ``False``
If ``True``, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 2.0.0
Returns
-------
Series or None
Series with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
Series.isna: Indicate missing values.
Series.notna : Indicate existing (non-missing) values.
Series.fillna : Replace missing values.
DataFrame.dropna : Drop rows or columns which contain NA values.
Index.dropna : Drop missing indices.
Examples
--------
>>> ser = pd.Series([1.0, 2.0, np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
dtype: float64
Empty strings are not considered NA values. ``None`` is considered an
NA value.
>>> ser = pd.Series([np.nan, 2, pd.NaT, "", None, "I stay"])
>>> ser
0 NaN
1 2
2 NaT
3
4 None
5 I stay
dtype: object
>>> ser.dropna()
1 2
3
5 I stay
dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
ignore_index = validate_bool_kwarg(ignore_index, "ignore_index")
# Validate the axis parameter
self._get_axis_number(axis or 0)
if self._can_hold_na:
result = remove_na_arraylike(self)
else:
if not inplace:
result = self.copy(deep=False)
else:
result = self
if ignore_index:
result.index = default_index(len(result))
if inplace:
return self._update_inplace(result)
else:
return result
|
Return a new Series with missing values removed.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
inplace : bool, default False
If True, do operation inplace and return None.
how : str, optional
Not in use. Kept for compatibility.
ignore_index : bool, default ``False``
If ``True``, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 2.0.0
Returns
-------
Series or None
Series with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
Series.isna: Indicate missing values.
Series.notna : Indicate existing (non-missing) values.
Series.fillna : Replace missing values.
DataFrame.dropna : Drop rows or columns which contain NA values.
Index.dropna : Drop missing indices.
Examples
--------
>>> ser = pd.Series([1.0, 2.0, np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
dtype: float64
Empty strings are not considered NA values. ``None`` is considered an
NA value.
>>> ser = pd.Series([np.nan, 2, pd.NaT, "", None, "I stay"])
>>> ser
0 NaN
1 2
2 NaT
3
4 None
5 I stay
dtype: object
>>> ser.dropna()
1 2
3
5 I stay
dtype: object
|
python
|
pandas/core/series.py
| 6,347
|
[
"self",
"axis",
"inplace",
"how",
"ignore_index"
] |
Series | None
| true
| 9
| 8.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
visitTopLevelExportAssignment
|
function visitTopLevelExportAssignment(node: ExportAssignment): VisitResult<Statement | undefined> {
if (node.isExportEquals) {
return undefined;
}
return createExportStatement(factory.createIdentifier("default"), visitNode(node.expression, visitor, isExpression), /*location*/ node, /*allowComments*/ true);
}
|
Visits an ExportAssignment node.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/module/module.ts
| 1,736
|
[
"node"
] | true
| 2
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
concat
|
public static CharSource concat(Iterable<? extends CharSource> sources) {
return new ConcatenatedCharSource(sources);
}
|
Concatenates multiple {@link CharSource} instances into a single source. Streams returned from
the source will contain the concatenated data from the streams of the underlying sources.
<p>Only one underlying stream will be open at a time. Closing the concatenated stream will
close the open underlying stream.
@param sources the sources to concatenate
@return a {@code CharSource} containing the concatenated data
@since 15.0
|
java
|
android/guava/src/com/google/common/io/CharSource.java
| 450
|
[
"sources"
] |
CharSource
| true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
put
|
public JSONObject put(String name, double value) throws JSONException {
this.nameValuePairs.put(checkName(name), JSON.checkDouble(value));
return this;
}
|
Maps {@code name} to {@code value}, clobbering any existing name/value mapping with
the same name.
@param name the name of the property
@param value a finite value. May not be {@link Double#isNaN() NaNs} or
{@link Double#isInfinite() infinities}.
@return this object.
@throws JSONException if an error occurs
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 219
|
[
"name",
"value"
] |
JSONObject
| true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
asSharedProxy
|
SharedProxy asSharedProxy() && { return SharedProxy{std::move(*this)}; }
|
Move this `Function` into a copyable callable object, of which all copies
share the state.
|
cpp
|
folly/Function.h
| 920
|
[] | true
| 2
| 6.8
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
subtypeOfComponentType
|
private static @Nullable Type subtypeOfComponentType(Type[] bounds) {
for (Type bound : bounds) {
Type componentType = getComponentType(bound);
if (componentType != null) {
// Only the first bound can be a class or array.
// Bounds after the first can only be interfaces.
if (componentType instanceof Class) {
Class<?> componentClass = (Class<?>) componentType;
if (componentClass.isPrimitive()) {
return componentClass;
}
}
return subtypeOf(componentType);
}
}
return null;
}
|
Returns {@code ? extends X} if any of {@code bounds} is a subtype of {@code X[]}; or null
otherwise.
|
java
|
android/guava/src/com/google/common/reflect/Types.java
| 198
|
[
"bounds"
] |
Type
| true
| 4
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
forTypes
|
public static BindableRuntimeHintsRegistrar forTypes(Class<?>... types) {
return new BindableRuntimeHintsRegistrar(types);
}
|
Create a new {@link BindableRuntimeHintsRegistrar} for the specified types.
@param types the types to process
@return a new {@link BindableRuntimeHintsRegistrar} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/BindableRuntimeHintsRegistrar.java
| 121
|
[] |
BindableRuntimeHintsRegistrar
| true
| 1
| 6.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
toPrimitive
|
public static int[] toPrimitive(final Integer[] array) {
if (array == null) {
return null;
}
if (array.length == 0) {
return EMPTY_INT_ARRAY;
}
final int[] result = new int[array.length];
for (int i = 0; i < array.length; i++) {
result[i] = array[i].intValue();
}
return result;
}
|
Converts an array of object Integers to primitives.
<p>
This method returns {@code null} for a {@code null} input array.
</p>
@param array a {@link Integer} array, may be {@code null}.
@return an {@code int} array, {@code null} if null array input.
@throws NullPointerException if an array element is {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 9,055
|
[
"array"
] | true
| 4
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
convert_sbom_entry_to_dict
|
def convert_sbom_entry_to_dict(
dependency: dict[str, Any],
dependency_depth: dict[str, int],
is_core: bool,
is_devel: bool,
include_open_psf_scorecard: bool,
include_github_stats: bool,
include_actions: bool,
github_token: str | None,
console: Console,
) -> dict[str, Any] | None:
"""
Convert SBOM to Row for CSV or spreadsheet output
:param dependency: Dependency to convert
:param is_core: Whether the dependency is core or not
:param is_devel: Whether the dependency is devel or not
:param include_open_psf_scorecard: Whether to include Open PSF Scorecard
"""
console.print(f"[bright_blue]Calculating {dependency['name']} information.")
vcs = get_vcs(dependency)
name = dependency.get("name", "")
if name.startswith("apache-airflow"):
return None
normalized_name = normalize_package_name(dependency.get("name", ""))
row = {
"Name": normalized_name,
"Author": dependency.get("author", ""),
"Version": dependency.get("version", ""),
"Description": dependency.get("description"),
"Core": is_core,
"Devel": is_devel,
"Depth": dependency_depth.get(normalized_name, "Extra"),
"Licenses": convert_licenses(dependency.get("licenses", [])),
"Purl": dependency.get("purl"),
"Pypi": get_pypi_link(dependency),
"Vcs": vcs,
"Governance": get_governance(vcs),
}
if vcs and include_open_psf_scorecard:
open_psf_scorecard = get_open_psf_scorecard(vcs, name, console)
row.update(open_psf_scorecard)
if vcs and include_github_stats:
github_stats = get_github_stats(
vcs=vcs, project_name=name, github_token=github_token, console=console
)
row.update(github_stats)
if name in get_project_metadata(MetadataFromSpreadsheet.RELATIONSHIP_PROJECTS):
row["Relationship"] = "Yes"
if include_actions:
if name in get_project_metadata(MetadataFromSpreadsheet.CONTACTED_PROJECTS):
row["Contacted"] = "Yes"
num_actions = 0
for action, (threshold, action_text) in ACTIONS.items():
opsf_action = "OPSF-" + action
if opsf_action in row and int(row[opsf_action]) < threshold:
row[action_text] = "Yes"
num_actions += 1
row["Num Actions"] = num_actions
console.print(f"[green]Calculated {dependency['name']} information.")
return row
|
Convert SBOM to Row for CSV or spreadsheet output
:param dependency: Dependency to convert
:param is_core: Whether the dependency is core or not
:param is_devel: Whether the dependency is devel or not
:param include_open_psf_scorecard: Whether to include Open PSF Scorecard
|
python
|
dev/breeze/src/airflow_breeze/commands/sbom_commands.py
| 994
|
[
"dependency",
"dependency_depth",
"is_core",
"is_devel",
"include_open_psf_scorecard",
"include_github_stats",
"include_actions",
"github_token",
"console"
] |
dict[str, Any] | None
| true
| 12
| 6.16
|
apache/airflow
| 43,597
|
sphinx
| false
|
nextToInternal
|
private String nextToInternal(String excluded) {
int start = this.pos;
for (; this.pos < this.in.length(); this.pos++) {
char c = this.in.charAt(this.pos);
if (c == '\r' || c == '\n' || excluded.indexOf(c) != -1) {
return this.in.substring(start, this.pos);
}
}
return this.in.substring(start);
}
|
Returns the string up to but not including any of the given characters or a newline
character. This does not consume the excluded character.
@return the string up to but not including any of the given characters or a newline
character
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONTokener.java
| 337
|
[
"excluded"
] |
String
| true
| 5
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
createTrustManager
|
@Override
public X509ExtendedTrustManager createTrustManager() {
final Path path = resolvePath();
try {
final KeyStore store = readKeyStore(path);
if (requireTrustAnchors) {
checkTrustStore(store, path);
}
return KeyStoreUtil.createTrustManager(store, algorithm);
} catch (GeneralSecurityException e) {
throw keystoreException(path, e);
}
}
|
@param path The path to the keystore file
@param password The password for the keystore
@param type The {@link KeyStore#getType() type} of the keystore (typically "PKCS12" or "jks").
See {@link KeyStoreUtil#inferKeyStoreType}.
@param algorithm The algorithm to use for the Trust Manager (see {@link javax.net.ssl.TrustManagerFactory#getAlgorithm()}).
@param requireTrustAnchors If true, the truststore will be checked to ensure that it contains at least one valid trust anchor.
@param configBasePath The base path for the configuration directory
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoreTrustConfig.java
| 78
|
[] |
X509ExtendedTrustManager
| true
| 3
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
processPair
|
function processPair(currentItem: TextRangeWithKind, currentStartLine: number, currentParent: Node, previousItem: TextRangeWithKind, previousStartLine: number, previousParent: Node, contextNode: Node, dynamicIndentation: DynamicIndentation | undefined): LineAction {
formattingContext.updateContext(previousItem, previousParent, currentItem, currentParent, contextNode);
const rules = getRules(formattingContext);
let trimTrailingWhitespaces = formattingContext.options.trimTrailingWhitespace !== false;
let lineAction = LineAction.None;
if (rules) {
// Apply rules in reverse order so that higher priority rules (which are first in the array)
// win in a conflict with lower priority rules.
forEachRight(rules, rule => {
lineAction = applyRuleEdits(rule, previousItem, previousStartLine, currentItem, currentStartLine);
if (dynamicIndentation) {
switch (lineAction) {
case LineAction.LineRemoved:
// Handle the case where the next line is moved to be the end of this line.
// In this case we don't indent the next line in the next pass.
if (currentParent.getStart(sourceFile) === currentItem.pos) {
dynamicIndentation.recomputeIndentation(/*lineAddedByFormatting*/ false, contextNode);
}
break;
case LineAction.LineAdded:
// Handle the case where token2 is moved to the new line.
// In this case we indent token2 in the next pass but we set
// sameLineIndent flag to notify the indenter that the indentation is within the line.
if (currentParent.getStart(sourceFile) === currentItem.pos) {
dynamicIndentation.recomputeIndentation(/*lineAddedByFormatting*/ true, contextNode);
}
break;
default:
Debug.assert(lineAction === LineAction.None);
}
}
// We need to trim trailing whitespace between the tokens if they were on different lines, and no rule was applied to put them on the same line
trimTrailingWhitespaces = trimTrailingWhitespaces && !(rule.action & RuleAction.DeleteSpace) && rule.flags !== RuleFlags.CanDeleteNewLines;
});
}
else {
trimTrailingWhitespaces = trimTrailingWhitespaces && currentItem.kind !== SyntaxKind.EndOfFileToken;
}
if (currentStartLine !== previousStartLine && trimTrailingWhitespaces) {
// We need to trim trailing whitespace between the tokens if they were on different lines, and no rule was applied to put them on the same line
trimTrailingWhitespacesForLines(previousStartLine, currentStartLine, previousItem);
}
return lineAction;
}
|
Tries to compute the indentation for a list element.
If list element is not in range then
function will pick its actual indentation
so it can be pushed downstream as inherited indentation.
If list element is in the range - its indentation will be equal
to inherited indentation from its predecessors.
|
typescript
|
src/services/formatting/formatting.ts
| 1,101
|
[
"currentItem",
"currentStartLine",
"currentParent",
"previousItem",
"previousStartLine",
"previousParent",
"contextNode",
"dynamicIndentation"
] | true
| 11
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getBestComparator
|
static Comparator<byte[]> getBestComparator() {
try {
Class<? extends LexicographicalComparator> unsafeImpl =
Class.forName(UNSAFE_COMPARATOR_NAME).asSubclass(LexicographicalComparator.class);
// requireNonNull is safe because the class is an enum.
LexicographicalComparator unsafeComparator =
requireNonNull(unsafeImpl.getEnumConstants())[0];
return unsafeComparator.isFunctional()
? unsafeComparator
: lexicographicalComparatorJavaImpl();
} catch (Throwable t) { // ensure we really catch *everything*
/*
* Now that UnsafeComparator is implemented to initialize successfully even when we know we
* can't use it, this `catch` block might now be necessary only:
*
* - in the Android flavor or anywhere else that users might be applying an optimizer that
* might strip UnsafeComparator entirely. (TODO(cpovirk): Are we confident that optimizers
* aren't stripping UnsafeComparator today? Should we have Proguard configuration for it?)
*
* - if Unsafe is removed entirely from JDKs (or already absent in some unusual environment
* today). TODO: b/392974826 - Check for the existence of Unsafe and its methods
* reflectively before attempting to access UnsafeComparator. Or, better yet, allow
* UnsafeComparator to still initialize correctly even if Unsafe is unavailable. This would
* protect against users that automatically preinitialize internal classes that they've seen
* initialized in their apps in the past. To do that, we may need to move the references to
* Unsafe to another class and then ensure that the preinitialization logic doesn't start
* picking up the new class as part of loading UnsafeComparator!
*/
return lexicographicalComparatorJavaImpl();
}
}
|
Returns the best comparator supported by the current runtime.
|
java
|
android/guava/src/com/google/common/primitives/UnsignedBytes.java
| 474
|
[] | true
| 3
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
shrink_mask
|
def shrink_mask(self):
"""
Reduce a mask to nomask when possible.
Parameters
----------
None
Returns
-------
result : MaskedArray
A :class:`~ma.MaskedArray` object.
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4)
>>> x.mask
array([[False, False],
[False, False]])
>>> x.shrink_mask()
masked_array(
data=[[1, 2],
[3, 4]],
mask=False,
fill_value=999999)
>>> x.mask
False
"""
self._mask = _shrink_mask(self._mask)
return self
|
Reduce a mask to nomask when possible.
Parameters
----------
None
Returns
-------
result : MaskedArray
A :class:`~ma.MaskedArray` object.
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4)
>>> x.mask
array([[False, False],
[False, False]])
>>> x.shrink_mask()
masked_array(
data=[[1, 2],
[3, 4]],
mask=False,
fill_value=999999)
>>> x.mask
False
|
python
|
numpy/ma/core.py
| 3,723
|
[
"self"
] | false
| 1
| 6.16
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
isErrorEnabled
|
@Override
public boolean isErrorEnabled() {
synchronized (this.lines) {
return (this.destination == null) || this.destination.isErrorEnabled();
}
}
|
Create a new {@link DeferredLog} instance managed by a {@link DeferredLogFactory}.
@param destination the switch-over destination
@param lines the lines backing all related deferred logs
@since 2.4.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/DeferredLog.java
| 93
|
[] | true
| 2
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
values
|
@Override
public Set<V> values() {
/*
* We can almost reuse the inverse's keySet, except we have to fix the
* iteration order so that it is consistent with the forward map.
*/
Set<V> result = valueSet;
return (result == null) ? valueSet = new ValueSet() : result;
}
|
Specifies the delegate maps going in each direction. Called by subclasses during
deserialization.
|
java
|
android/guava/src/com/google/common/collect/AbstractBiMap.java
| 253
|
[] | true
| 2
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
nanany
|
def nanany(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
mask: npt.NDArray[np.bool_] | None = None,
) -> bool:
"""
Check if any elements along an axis evaluate to True.
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : bool
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, 2])
>>> nanops.nanany(s.values)
np.True_
>>> from pandas.core import nanops
>>> s = pd.Series([np.nan])
>>> nanops.nanany(s.values)
np.False_
"""
if values.dtype.kind in "iub" and mask is None:
# GH#26032 fastpath
# error: Incompatible return value type (got "Union[bool_, ndarray]",
# expected "bool")
return values.any(axis) # type: ignore[return-value]
if values.dtype.kind == "M":
# GH#34479
raise TypeError("datetime64 type does not support operation 'any'")
values, _ = _get_values(values, skipna, fill_value=False, mask=mask)
# For object type, any won't necessarily return
# boolean values (numpy/numpy#4352)
if values.dtype == object:
values = values.astype(bool)
# error: Incompatible return value type (got "Union[bool_, ndarray]", expected
# "bool")
return values.any(axis) # type: ignore[return-value]
|
Check if any elements along an axis evaluate to True.
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : bool
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, 2])
>>> nanops.nanany(s.values)
np.True_
>>> from pandas.core import nanops
>>> s = pd.Series([np.nan])
>>> nanops.nanany(s.values)
np.False_
|
python
|
pandas/core/nanops.py
| 484
|
[
"values",
"axis",
"skipna",
"mask"
] |
bool
| true
| 5
| 8.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_maybe_infer_tz
|
def _maybe_infer_tz(tz: tzinfo | None, inferred_tz: tzinfo | None) -> tzinfo | None:
"""
If a timezone is inferred from data, check that it is compatible with
the user-provided timezone, if any.
Parameters
----------
tz : tzinfo or None
inferred_tz : tzinfo or None
Returns
-------
tz : tzinfo or None
Raises
------
TypeError : if both timezones are present but do not match
"""
if tz is None:
tz = inferred_tz
elif inferred_tz is None:
pass
elif not timezones.tz_compare(tz, inferred_tz):
raise TypeError(
f"data is already tz-aware {inferred_tz}, unable to set specified tz: {tz}"
)
return tz
|
If a timezone is inferred from data, check that it is compatible with
the user-provided timezone, if any.
Parameters
----------
tz : tzinfo or None
inferred_tz : tzinfo or None
Returns
-------
tz : tzinfo or None
Raises
------
TypeError : if both timezones are present but do not match
|
python
|
pandas/core/arrays/datetimes.py
| 2,709
|
[
"tz",
"inferred_tz"
] |
tzinfo | None
| true
| 4
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
declareNamedObjects
|
@Override
public <T> void declareNamedObjects(
BiConsumer<Value, List<T>> consumer,
NamedObjectParser<T, Context> namedObjectParser,
ParseField field
) {
Consumer<Value> orderedModeCallback = (v) -> {
throw new IllegalArgumentException("[" + field + "] doesn't support arrays. Use a single object with multiple fields.");
};
declareNamedObjects(consumer, namedObjectParser, orderedModeCallback, field);
}
|
Parses a Value from the given {@link XContentParser}
@param parser the parser to build a value from
@param value the value to fill from the parser
@param context a context that is passed along to all declared field parsers
@return the parsed value
@throws IOException if an IOException occurs.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java
| 561
|
[
"consumer",
"namedObjectParser",
"field"
] |
void
| true
| 1
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.