function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
binaryToHexDigitMsb0_4bits
|
public static char binaryToHexDigitMsb0_4bits(final boolean[] src, final int srcPos) {
if (src.length > Byte.SIZE) {
throw new IllegalArgumentException("src.length > 8: src.length=" + src.length);
}
if (src.length - srcPos < 4) {
throw new IllegalArgumentException("src.length - srcPos < 4: src.length=" + src.length + ", srcPos=" + srcPos);
}
if (src[srcPos + 3]) {
if (src[srcPos + 2]) {
if (src[srcPos + 1]) {
return src[srcPos] ? 'f' : '7';
}
return src[srcPos] ? 'b' : '3';
}
if (src[srcPos + 1]) {
return src[srcPos] ? 'd' : '5';
}
return src[srcPos] ? '9' : '1';
}
if (src[srcPos + 2]) {
if (src[srcPos + 1]) {
return src[srcPos] ? 'e' : '6';
}
return src[srcPos] ? 'a' : '2';
}
if (src[srcPos + 1]) {
return src[srcPos] ? 'c' : '4';
}
return src[srcPos] ? '8' : '0';
}
|
Converts binary (represented as boolean array) to a hexadecimal digit using the MSB0 bit ordering.
<p>
(1, 0, 0, 0) is converted as follow: '8' (1, 0, 0, 1, 1, 0, 1, 0) with srcPos = 3 is converted to 'D'
</p>
@param src the binary to convert.
@param srcPos the position of the LSB to start the conversion.
@return a hexadecimal digit representing the selected bits.
@throws IllegalArgumentException if {@code src} is empty, {@code src.length > 8} or {@code src.length - srcPos < 4}.
@throws NullPointerException if {@code src} is {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/Conversion.java
| 258
|
[
"src",
"srcPos"
] | true
| 18
| 6.72
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
finish
|
@Override
public void finish() throws IOException {
if (finished) {
throw new IllegalStateException("already finished");
}
finished = true;
flatVectorWriter.finish();
if (meta != null) {
// write end of fields marker
meta.writeInt(-1);
CodecUtil.writeFooter(meta);
}
if (vectorIndex != null) {
CodecUtil.writeFooter(vectorIndex);
}
}
|
Flushes vector data and associated data to disk.
<p>
This method and the private helpers it calls only need to support FLOAT32.
For FlatFieldVectorWriter we only need to support float[] during flush: during indexing users provide floats[], and pass floats to
FlatFieldVectorWriter, even when we have a BYTE dataType (i.e. an "int8_hnsw" type).
During merging, we use quantized data, so we need to support byte[] too (see {@link ES92GpuHnswVectorsWriter#mergeOneField}),
but not here.
That's how our other current formats work: use floats during indexing, and quantized data to build graph during merging.
</p>
|
java
|
libs/gpu-codec/src/main/java/org/elasticsearch/gpu/codec/ES92GpuHnswVectorsWriter.java
| 241
|
[] |
void
| true
| 4
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
from_file
|
def from_file(
self,
filename: str | os.PathLike[str],
load: t.Callable[[t.IO[t.Any]], t.Mapping[str, t.Any]],
silent: bool = False,
text: bool = True,
) -> bool:
"""Update the values in the config from a file that is loaded
using the ``load`` parameter. The loaded data is passed to the
:meth:`from_mapping` method.
.. code-block:: python
import json
app.config.from_file("config.json", load=json.load)
import tomllib
app.config.from_file("config.toml", load=tomllib.load, text=False)
:param filename: The path to the data file. This can be an
absolute path or relative to the config root path.
:param load: A callable that takes a file handle and returns a
mapping of loaded data from the file.
:type load: ``Callable[[Reader], Mapping]`` where ``Reader``
implements a ``read`` method.
:param silent: Ignore the file if it doesn't exist.
:param text: Open the file in text or binary mode.
:return: ``True`` if the file was loaded successfully.
.. versionchanged:: 2.3
The ``text`` parameter was added.
.. versionadded:: 2.0
"""
filename = os.path.join(self.root_path, filename)
try:
with open(filename, "r" if text else "rb") as f:
obj = load(f)
except OSError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = f"Unable to load configuration file ({e.strerror})"
raise
return self.from_mapping(obj)
|
Update the values in the config from a file that is loaded
using the ``load`` parameter. The loaded data is passed to the
:meth:`from_mapping` method.
.. code-block:: python
import json
app.config.from_file("config.json", load=json.load)
import tomllib
app.config.from_file("config.toml", load=tomllib.load, text=False)
:param filename: The path to the data file. This can be an
absolute path or relative to the config root path.
:param load: A callable that takes a file handle and returns a
mapping of loaded data from the file.
:type load: ``Callable[[Reader], Mapping]`` where ``Reader``
implements a ``read`` method.
:param silent: Ignore the file if it doesn't exist.
:param text: Open the file in text or binary mode.
:return: ``True`` if the file was loaded successfully.
.. versionchanged:: 2.3
The ``text`` parameter was added.
.. versionadded:: 2.0
|
python
|
src/flask/config.py
| 256
|
[
"self",
"filename",
"load",
"silent",
"text"
] |
bool
| true
| 4
| 7.92
|
pallets/flask
| 70,946
|
sphinx
| false
|
findAgentJar
|
static String findAgentJar() {
String propertyName = "es.entitlement.agentJar";
String propertyValue = System.getProperty(propertyName);
if (propertyValue != null) {
return propertyValue;
}
Path esHome = Path.of(System.getProperty("es.path.home"));
Path dir = esHome.resolve("lib/entitlement-agent");
if (Files.exists(dir) == false) {
throw new IllegalStateException("Directory for entitlement jar does not exist: " + dir);
}
try (var s = Files.list(dir)) {
var candidates = s.limit(2).toList();
if (candidates.size() != 1) {
throw new IllegalStateException("Expected one jar in " + dir + "; found " + candidates.size());
}
return candidates.get(0).toString();
} catch (IOException e) {
throw new IllegalStateException("Failed to list entitlement jars in: " + dir, e);
}
}
|
Main entry point that activates entitlement checking. Once this method returns,
calls to methods protected by entitlements from classes without a valid
policy will throw {@link org.elasticsearch.entitlement.runtime.api.NotEntitledException}.
@param serverPolicyPatch additional entitlements to patch the embedded server layer policy
@param pluginPolicies maps each plugin name to the corresponding {@link Policy}
@param scopeResolver a functor to map a Java Class to the component and module it belongs to.
@param settingResolver a functor to resolve a setting name pattern for one or more Elasticsearch settings.
@param dataDirs data directories for Elasticsearch
@param sharedDataDir shared data directory for Elasticsearch (deprecated)
@param sharedRepoDirs shared repository directories for Elasticsearch
@param configDir the config directory for Elasticsearch
@param libDir the lib directory for Elasticsearch
@param modulesDir the directory where Elasticsearch modules are
@param pluginsDir the directory where plugins are installed for Elasticsearch
@param pluginSourcePaths maps each plugin name to the location of that plugin's code
@param tempDir the temp directory for Elasticsearch
@param logsDir the log directory for Elasticsearch
@param pidFile path to a pid file for Elasticsearch, or {@code null} if one was not specified
@param suppressFailureLogPackages packages for which we do not need or want to log Entitlements failures
|
java
|
libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java
| 139
|
[] |
String
| true
| 5
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
identityToString
|
public static String identityToString(final Object object) {
if (object == null) {
return null;
}
final String name = object.getClass().getName();
final String hexString = identityHashCodeHex(object);
final StringBuilder builder = new StringBuilder(name.length() + 1 + hexString.length());
// @formatter:off
builder.append(name)
.append(AT_SIGN)
.append(hexString);
// @formatter:on
return builder.toString();
}
|
Gets the toString that would be produced by {@link Object} if a class did not override toString itself. {@code null} will return {@code null}.
<pre>
ObjectUtils.identityToString(null) = null
ObjectUtils.identityToString("") = "java.lang.String@1e23"
ObjectUtils.identityToString(Boolean.TRUE) = "java.lang.Boolean@7fa"
</pre>
@param object the object to create a toString for, may be {@code null}.
@return the default toString text, or {@code null} if {@code null} passed in.
|
java
|
src/main/java/org/apache/commons/lang3/ObjectUtils.java
| 813
|
[
"object"
] |
String
| true
| 2
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
enableSubstitutionsForBlockScopedBindings
|
function enableSubstitutionsForBlockScopedBindings() {
if ((enabledSubstitutions & ES2015SubstitutionFlags.BlockScopedBindings) === 0) {
enabledSubstitutions |= ES2015SubstitutionFlags.BlockScopedBindings;
context.enableSubstitution(SyntaxKind.Identifier);
}
}
|
Enables a more costly code path for substitutions when we determine a source file
contains block-scoped bindings (e.g. `let` or `const`).
|
typescript
|
src/compiler/transformers/es2015.ts
| 4,875
|
[] | false
| 2
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
decorateIfRequired
|
public BeanDefinitionHolder decorateIfRequired(
Node node, BeanDefinitionHolder originalDef, @Nullable BeanDefinition containingBd) {
String namespaceUri = getNamespaceURI(node);
if (namespaceUri != null && !isDefaultNamespace(namespaceUri)) {
NamespaceHandler handler = this.readerContext.getNamespaceHandlerResolver().resolve(namespaceUri);
if (handler != null) {
BeanDefinitionHolder decorated =
handler.decorate(node, originalDef, new ParserContext(this.readerContext, this, containingBd));
if (decorated != null) {
return decorated;
}
}
else if (namespaceUri.startsWith("http://www.springframework.org/schema/")) {
error("Unable to locate Spring NamespaceHandler for XML schema namespace [" + namespaceUri + "]", node);
}
else {
// A custom namespace, not to be handled by Spring - maybe "xml:...".
if (logger.isDebugEnabled()) {
logger.debug("No Spring NamespaceHandler found for XML schema namespace [" + namespaceUri + "]");
}
}
}
return originalDef;
}
|
Decorate the given bean definition through a namespace handler,
if applicable.
@param node the current child node
@param originalDef the current bean definition
@param containingBd the containing bean definition (if any)
@return the decorated bean definition
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/BeanDefinitionParserDelegate.java
| 1,430
|
[
"node",
"originalDef",
"containingBd"
] |
BeanDefinitionHolder
| true
| 7
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
prepareRangeContainsErrorFunction
|
function prepareRangeContainsErrorFunction(errors: readonly Diagnostic[], originalRange: TextRange): (r: TextRange) => boolean {
if (!errors.length) {
return rangeHasNoErrors;
}
// pick only errors that fall in range
const sorted = errors
.filter(d => rangeOverlapsWithStartEnd(originalRange, d.start!, d.start! + d.length!)) // TODO: GH#18217
.sort((e1, e2) => e1.start! - e2.start!);
if (!sorted.length) {
return rangeHasNoErrors;
}
let index = 0;
return r => {
// in current implementation sequence of arguments [r1, r2...] is monotonically increasing.
// 'index' tracks the index of the most recent error that was checked.
while (true) {
if (index >= sorted.length) {
// all errors in the range were already checked -> no error in specified range
return false;
}
const error = sorted[index];
if (r.end <= error.start!) {
// specified range ends before the error referred by 'index' - no error in range
return false;
}
if (startEndOverlapsWithStartEnd(r.pos, r.end, error.start!, error.start! + error.length!)) {
// specified range overlaps with error range
return true;
}
index++;
}
};
function rangeHasNoErrors(): boolean {
return false;
}
}
|
formatting is not applied to ranges that contain parse errors.
This function will return a predicate that for a given text range will tell
if there are any parse errors that overlap with the range.
|
typescript
|
src/services/formatting/formatting.ts
| 330
|
[
"errors",
"originalRange"
] | true
| 7
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
takeRenewedRecords
|
public void takeRenewedRecords() {
for (Map.Entry<TopicIdPartition, ShareInFlightBatch<K, V>> entry : batches.entrySet()) {
entry.getValue().takeRenewals();
}
// Any acquisition lock timeout updated by renewal is applied as the renewed records are move back to in-flight
if (acquisitionLockTimeoutMsRenewed.isPresent()) {
acquisitionLockTimeoutMs = acquisitionLockTimeoutMsRenewed;
}
}
|
Take any renewed records and move them back into in-flight state.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetch.java
| 143
|
[] |
void
| true
| 2
| 7.2
|
apache/kafka
| 31,560
|
javadoc
| false
|
make_mask_descr
|
def make_mask_descr(ndtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with the type of all fields in `ndtype` to a
boolean type. Field names are not altered.
Parameters
----------
ndtype : dtype
The dtype to convert.
Returns
-------
result : dtype
A dtype that looks like `ndtype`, the type of all fields is boolean.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
... 'formats':[np.float32, np.int64]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i8')])
>>> ma.make_mask_descr(dtype)
dtype([('foo', '|b1'), ('bar', '|b1')])
>>> ma.make_mask_descr(np.float32)
dtype('bool')
"""
return _replace_dtype_fields(ndtype, MaskType)
|
Construct a dtype description list from a given dtype.
Returns a new dtype object, with the type of all fields in `ndtype` to a
boolean type. Field names are not altered.
Parameters
----------
ndtype : dtype
The dtype to convert.
Returns
-------
result : dtype
A dtype that looks like `ndtype`, the type of all fields is boolean.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
... 'formats':[np.float32, np.int64]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i8')])
>>> ma.make_mask_descr(dtype)
dtype([('foo', '|b1'), ('bar', '|b1')])
>>> ma.make_mask_descr(np.float32)
dtype('bool')
|
python
|
numpy/ma/core.py
| 1,366
|
[
"ndtype"
] | false
| 1
| 6.16
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
print
|
public abstract String print(Duration value, @Nullable ChronoUnit unit);
|
Print the specified duration using the given unit.
@param value the value to print
@param unit the value to use for printing
@return the printed result
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/convert/DurationStyle.java
| 134
|
[
"value",
"unit"
] |
String
| true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
will_fusion_create_cycle
|
def will_fusion_create_cycle(
self, node1: BaseSchedulerNode, node2: BaseSchedulerNode
) -> bool:
"""
Finds whether there's a path from node1 to node2 (or vice-versa)
caused indirectly by other fusions.
"""
# since we are just returning boolean here, use slightly faster, unordered set
visited = OrderedSet[FusedSchedulerNode]()
def found_path(node: BaseSchedulerNode) -> bool:
# only fused nodes can introduce new ancestors.
if isinstance(node, FusedSchedulerNode) and node not in visited:
visited.add(node)
if node.get_operation_names().issubset(combined_ancestors):
# All fusion outputs are in ancestors of node1 and node2, thus
# cannot introduce new path:
#
# 1. if output is neither descendent of node1 or node2, the
# output cannot introduce a path
# 2. due to [can_fuse]: if WLOG output is descendent of node1, it cannot be
# on path(node1->node2), hence it cannot be ancestor of node2
# 3. due to [acyclic]: if WLOG output is descendent of node1, it cannot be
# ancestor of node1
return False
else:
# continue DFS of new ancestors introduced by the fusion
return bool(combined_names & node.ancestors) or any(
found_path(self.name_to_fused_node[n])
for n in node.ancestors - combined_ancestors
)
return False
# as above - use slightly faster, unordered set
combined_names = (
node1.get_operation_names()._dict.keys()
| node2.get_operation_names()._dict.keys()
)
combined_ancestors = (
node1.ancestors._dict.keys() | node2.ancestors._dict.keys()
) - combined_names
cycle = any(found_path(self.name_to_fused_node[n]) for n in combined_ancestors)
if cycle:
WhyNoFuse(node1, node2)("will create cycle")
return cycle
|
Finds whether there's a path from node1 to node2 (or vice-versa)
caused indirectly by other fusions.
|
python
|
torch/_inductor/scheduler.py
| 4,383
|
[
"self",
"node1",
"node2"
] |
bool
| true
| 7
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
createScheduler
|
@SuppressWarnings("NullAway") // Dataflow analysis limitation
protected Scheduler createScheduler(SchedulerFactory schedulerFactory, @Nullable String schedulerName)
throws SchedulerException {
// Override thread context ClassLoader to work around naive Quartz ClassLoadHelper loading.
Thread currentThread = Thread.currentThread();
ClassLoader threadContextClassLoader = currentThread.getContextClassLoader();
boolean overrideClassLoader = (this.resourceLoader != null &&
this.resourceLoader.getClassLoader() != threadContextClassLoader);
if (overrideClassLoader) {
currentThread.setContextClassLoader(this.resourceLoader.getClassLoader());
}
try {
SchedulerRepository repository = SchedulerRepository.getInstance();
synchronized (repository) {
Scheduler existingScheduler = (schedulerName != null ? repository.lookup(schedulerName) : null);
Scheduler newScheduler = schedulerFactory.getScheduler();
if (newScheduler == existingScheduler) {
throw new IllegalStateException("Active Scheduler of name '" + schedulerName + "' already registered " +
"in Quartz SchedulerRepository. Cannot create a new Spring-managed Scheduler of the same name!");
}
if (!this.exposeSchedulerInRepository) {
// Need to remove it in this case, since Quartz shares the Scheduler instance by default!
SchedulerRepository.getInstance().remove(newScheduler.getSchedulerName());
}
return newScheduler;
}
}
finally {
if (overrideClassLoader) {
// Reset original thread context ClassLoader.
currentThread.setContextClassLoader(threadContextClassLoader);
}
}
}
|
Create the Scheduler instance for the given factory and scheduler name.
Called by {@link #afterPropertiesSet}.
<p>The default implementation invokes SchedulerFactory's {@code getScheduler}
method. Can be overridden for custom Scheduler creation.
@param schedulerFactory the factory to create the Scheduler with
@param schedulerName the name of the scheduler to create
@return the Scheduler instance
@throws SchedulerException if thrown by Quartz methods
@see #afterPropertiesSet
@see org.quartz.SchedulerFactory#getScheduler
|
java
|
spring-context-support/src/main/java/org/springframework/scheduling/quartz/SchedulerFactoryBean.java
| 652
|
[
"schedulerFactory",
"schedulerName"
] |
Scheduler
| true
| 7
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_mode
|
def _mode(self, dropna: bool = True) -> Self:
"""
Returns the mode(s) of the ExtensionArray.
Always returns `ExtensionArray` even if only one value.
Parameters
----------
dropna : bool, default True
Don't consider counts of NA values.
Returns
-------
same type as self
Sorted, if possible.
"""
pa_type = self._pa_array.type
if pa.types.is_temporal(pa_type):
nbits = pa_type.bit_width
if nbits == 32:
data = self._pa_array.cast(pa.int32())
elif nbits == 64:
data = self._pa_array.cast(pa.int64())
else:
raise NotImplementedError(pa_type)
else:
data = self._pa_array
if dropna:
data = data.drop_null()
res = pc.value_counts(data)
most_common = res.field("values").filter(
pc.equal(res.field("counts"), pc.max(res.field("counts")))
)
if pa.types.is_temporal(pa_type):
most_common = most_common.cast(pa_type)
most_common = most_common.take(pc.array_sort_indices(most_common))
return self._from_pyarrow_array(most_common)
|
Returns the mode(s) of the ExtensionArray.
Always returns `ExtensionArray` even if only one value.
Parameters
----------
dropna : bool, default True
Don't consider counts of NA values.
Returns
-------
same type as self
Sorted, if possible.
|
python
|
pandas/core/arrays/arrow/array.py
| 2,394
|
[
"self",
"dropna"
] |
Self
| true
| 8
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_parser_dispatch
|
def _parser_dispatch(flavor: HTMLFlavors | None) -> type[_HtmlFrameParser]:
"""
Choose the parser based on the input flavor.
Parameters
----------
flavor : {{"lxml", "html5lib", "bs4"}} or None
The type of parser to use. This must be a valid backend.
Returns
-------
cls : _HtmlFrameParser subclass
The parser class based on the requested input flavor.
Raises
------
ValueError
* If `flavor` is not a valid backend.
ImportError
* If you do not have the requested `flavor`
"""
valid_parsers = list(_valid_parsers.keys())
if flavor not in valid_parsers:
raise ValueError(
f"{flavor!r} is not a valid flavor, valid flavors are {valid_parsers}"
)
if flavor in ("bs4", "html5lib"):
import_optional_dependency("html5lib")
import_optional_dependency("bs4")
else:
import_optional_dependency("lxml.etree")
return _valid_parsers[flavor]
|
Choose the parser based on the input flavor.
Parameters
----------
flavor : {{"lxml", "html5lib", "bs4"}} or None
The type of parser to use. This must be a valid backend.
Returns
-------
cls : _HtmlFrameParser subclass
The parser class based on the requested input flavor.
Raises
------
ValueError
* If `flavor` is not a valid backend.
ImportError
* If you do not have the requested `flavor`
|
python
|
pandas/io/html.py
| 883
|
[
"flavor"
] |
type[_HtmlFrameParser]
| true
| 4
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
asFailableSupplier
|
@SuppressWarnings("unchecked")
public static <R> FailableSupplier<R, Throwable> asFailableSupplier(final Method method) {
return asInterfaceInstance(FailableSupplier.class, method);
}
|
Produces a {@link FailableSupplier} for a given a <em>supplier</em> Method. The FailableSupplier return type must
match the method's return type.
<p>
Only works with static methods.
</p>
@param <R> The Method return type.
@param method the method to invoke.
@return a correctly-typed wrapper for the given target.
|
java
|
src/main/java/org/apache/commons/lang3/function/MethodInvokers.java
| 169
|
[
"method"
] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
match
|
public boolean match(String parserName, Supplier<XContentLocation> location, String fieldName, DeprecationHandler deprecationHandler) {
Objects.requireNonNull(fieldName, "fieldName cannot be null");
// if this parse field has not been completely deprecated then try to
// match the preferred name
if (fullyDeprecated == false && allReplacedWith == null && fieldName.equals(name)) {
return true;
}
boolean isCompatibleDeprecation = RestApiVersion.minimumSupported().matches(forRestApiVersion)
&& RestApiVersion.current().matches(forRestApiVersion) == false;
// Now try to match against one of the deprecated names. Note that if
// the parse field is entirely deprecated (allReplacedWith != null) all
// fields will be in the deprecatedNames array
for (String depName : deprecatedNames) {
if (fieldName.equals(depName)) {
if (fullyDeprecated) {
deprecationHandler.logRemovedField(parserName, location, fieldName, isCompatibleDeprecation);
} else if (allReplacedWith == null) {
deprecationHandler.logRenamedField(parserName, location, fieldName, name, isCompatibleDeprecation);
} else {
deprecationHandler.logReplacedField(parserName, location, fieldName, allReplacedWith, isCompatibleDeprecation);
}
return true;
}
}
return false;
}
|
Does {@code fieldName} match this field?
@param parserName
the name of the parent object holding this field
@param location
the XContentLocation of the field
@param fieldName
the field name to match against this {@link ParseField}
@param deprecationHandler called if {@code fieldName} is deprecated
@return true if <code>fieldName</code> matches any of the acceptable
names for this {@link ParseField}.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ParseField.java
| 157
|
[
"parserName",
"location",
"fieldName",
"deprecationHandler"
] | true
| 8
| 7.6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
device
|
def device(x: _ArrayApiObj, /) -> Device:
"""
Hardware device the array data resides on.
This is equivalent to `x.device` according to the `standard
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.device.html>`__.
This helper is included because some array libraries either do not have
the `device` attribute or include it with an incompatible API.
Parameters
----------
x: array
array instance from an array API compatible library.
Returns
-------
out: device
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
section of the array API specification).
Notes
-----
For NumPy the device is always `"cpu"`. For Dask, the device is always a
special `DASK_DEVICE` object.
See Also
--------
to_device : Move array data to a different device.
"""
if is_numpy_array(x):
return "cpu"
elif is_dask_array(x):
# Peek at the metadata of the Dask array to determine type
if is_numpy_array(x._meta): # pyright: ignore
# Must be on CPU since backed by numpy
return "cpu"
return _DASK_DEVICE
elif is_jax_array(x):
# FIXME Jitted JAX arrays do not have a device attribute
# https://github.com/jax-ml/jax/issues/26000
# Return None in this case. Note that this workaround breaks
# the standard and will result in new arrays being created on the
# default device instead of the same device as the input array(s).
x_device = getattr(x, "device", None)
# Older JAX releases had .device() as a method, which has been replaced
# with a property in accordance with the standard.
if inspect.ismethod(x_device):
return x_device()
else:
return x_device
elif is_pydata_sparse_array(x):
# `sparse` will gain `.device`, so check for this first.
x_device = getattr(x, "device", None)
if x_device is not None:
return x_device
# Everything but DOK has this attr.
try:
inner = x.data # pyright: ignore
except AttributeError:
return "cpu"
# Return the device of the constituent array
return device(inner) # pyright: ignore
return x.device # pyright: ignore
|
Hardware device the array data resides on.
This is equivalent to `x.device` according to the `standard
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.device.html>`__.
This helper is included because some array libraries either do not have
the `device` attribute or include it with an incompatible API.
Parameters
----------
x: array
array instance from an array API compatible library.
Returns
-------
out: device
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
section of the array API specification).
Notes
-----
For NumPy the device is always `"cpu"`. For Dask, the device is always a
special `DASK_DEVICE` object.
See Also
--------
to_device : Move array data to a different device.
|
python
|
sklearn/externals/array_api_compat/common/_helpers.py
| 699
|
[
"x"
] |
Device
| true
| 9
| 6.32
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
get_include
|
def get_include():
"""
Return the directory that contains the NumPy \\*.h header files.
Extension modules that need to compile against NumPy may need to use this
function to locate the appropriate include directory.
Notes
-----
When using ``setuptools``, for example in ``setup.py``::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
...
Note that a CLI tool ``numpy-config`` was introduced in NumPy 2.0, using
that is likely preferred for build systems other than ``setuptools``::
$ numpy-config --cflags
-I/path/to/site-packages/numpy/_core/include
# Or rely on pkg-config:
$ export PKG_CONFIG_PATH=$(numpy-config --pkgconfigdir)
$ pkg-config --cflags
-I/path/to/site-packages/numpy/_core/include
Examples
--------
>>> np.get_include()
'.../site-packages/numpy/core/include' # may vary
"""
import numpy
if numpy.show_config is None:
# running from numpy source directory
d = os.path.join(os.path.dirname(numpy.__file__), '_core', 'include')
else:
# using installed numpy core headers
import numpy._core as _core
d = os.path.join(os.path.dirname(_core.__file__), 'include')
return d
|
Return the directory that contains the NumPy \\*.h header files.
Extension modules that need to compile against NumPy may need to use this
function to locate the appropriate include directory.
Notes
-----
When using ``setuptools``, for example in ``setup.py``::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
...
Note that a CLI tool ``numpy-config`` was introduced in NumPy 2.0, using
that is likely preferred for build systems other than ``setuptools``::
$ numpy-config --cflags
-I/path/to/site-packages/numpy/_core/include
# Or rely on pkg-config:
$ export PKG_CONFIG_PATH=$(numpy-config --pkgconfigdir)
$ pkg-config --cflags
-I/path/to/site-packages/numpy/_core/include
Examples
--------
>>> np.get_include()
'.../site-packages/numpy/core/include' # may vary
|
python
|
numpy/lib/_utils_impl.py
| 78
|
[] | false
| 3
| 7.2
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
process_fd
|
def process_fd(
proc,
fd,
log: Logger,
process_line_callback: Callable[[str], None] | None = None,
is_dataflow_job_id_exist_callback: Callable[[], bool] | None = None,
):
"""
Print output to logs.
:param proc: subprocess.
:param fd: File descriptor.
:param process_line_callback: Optional callback which can be used to process
stdout and stderr to detect job id.
:param log: logger.
"""
if fd not in (proc.stdout, proc.stderr):
raise AirflowException("No data in stderr or in stdout.")
fd_to_log = {proc.stderr: log.warning, proc.stdout: log.info}
func_log = fd_to_log[fd]
for line_raw in iter(fd.readline, b""):
line = line_raw.decode()
if process_line_callback:
process_line_callback(line)
func_log(line.rstrip("\n"))
if is_dataflow_job_id_exist_callback and is_dataflow_job_id_exist_callback():
return
|
Print output to logs.
:param proc: subprocess.
:param fd: File descriptor.
:param process_line_callback: Optional callback which can be used to process
stdout and stderr to detect job id.
:param log: logger.
|
python
|
providers/apache/beam/src/airflow/providers/apache/beam/hooks/beam.py
| 122
|
[
"proc",
"fd",
"log",
"process_line_callback",
"is_dataflow_job_id_exist_callback"
] | true
| 6
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
get_secret
|
def get_secret(self, secret_name: str) -> str | bytes:
"""
Retrieve secret value from AWS Secrets Manager as a str or bytes.
The value reflects format it stored in the AWS Secrets Manager.
.. seealso::
- :external+boto3:py:meth:`SecretsManager.Client.get_secret_value`
:param secret_name: name of the secrets.
:return: Union[str, bytes] with the information about the secrets
"""
# Depending on whether the secret is a string or binary, one of
# these fields will be populated.
get_secret_value_response = self.get_conn().get_secret_value(SecretId=secret_name)
if "SecretString" in get_secret_value_response:
secret = get_secret_value_response["SecretString"]
else:
secret = base64.b64decode(get_secret_value_response["SecretBinary"])
return secret
|
Retrieve secret value from AWS Secrets Manager as a str or bytes.
The value reflects format it stored in the AWS Secrets Manager.
.. seealso::
- :external+boto3:py:meth:`SecretsManager.Client.get_secret_value`
:param secret_name: name of the secrets.
:return: Union[str, bytes] with the information about the secrets
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/secrets_manager.py
| 43
|
[
"self",
"secret_name"
] |
str | bytes
| true
| 3
| 7.6
|
apache/airflow
| 43,597
|
sphinx
| false
|
truePredicate
|
@SuppressWarnings("unchecked")
static <E extends Throwable> FailableLongPredicate<E> truePredicate() {
return TRUE;
}
|
Gets the TRUE singleton.
@param <E> The kind of thrown exception or error.
@return The NOP singleton.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableLongPredicate.java
| 57
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
partitionResponse
|
public static FetchResponseData.PartitionData partitionResponse(int partition, Errors error) {
return new FetchResponseData.PartitionData()
.setPartitionIndex(partition)
.setErrorCode(error.code())
.setHighWatermark(FetchResponse.INVALID_HIGH_WATERMARK)
.setRecords(MemoryRecords.EMPTY);
}
|
Convenience method to find the size of a response.
@param version The version of the response to use.
@param partIterator The partition iterator.
@return The response size in bytes.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java
| 201
|
[
"partition",
"error"
] | true
| 1
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
apply
|
@Nullable String apply(String value) {
try {
Long result = this.action.apply(value);
return (result != null) ? String.valueOf(result) : null;
}
catch (RuntimeException ex) {
if (this.ignoredExceptions.test(ex)) {
return null;
}
throw ex;
}
}
|
Attempt to convert the specified value to epoch time.
@param value the value to coerce to
@return the epoch time in milliseconds or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/info/GitProperties.java
| 140
|
[
"value"
] |
String
| true
| 4
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
Tree
|
function Tree({
badgeClassName,
actions,
className,
clearMessages,
entries,
isTransitionPending,
label,
messageClassName,
}: TreeProps) {
if (entries.length === 0) {
return null;
}
return (
<div className={className}>
<div className={`${sharedStyles.HeaderRow} ${styles.HeaderRow}`}>
<div className={sharedStyles.Header}>{label}</div>
<Button
disabled={isTransitionPending}
onClick={clearMessages}
title={`Clear all ${label} for this component`}>
<ButtonIcon type="clear" />
</Button>
</div>
{entries.map(([message, count], index) => (
<ErrorOrWarningView
key={`${label}-${index}`}
badgeClassName={badgeClassName}
className={messageClassName}
count={count}
message={message}
/>
))}
</div>
);
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-devtools-shared/src/devtools/views/Components/InspectedElementErrorsAndWarningsTree.js
| 125
|
[] | false
| 2
| 6.24
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
geoAzimuthRads
|
double geoAzimuthRads(double x, double y, double z) {
// from https://www.movable-type.co.uk/scripts/latlong-vectors.html
// N = {0,0,1}
// c1 = a×b
// c2 = a×N
// sinθ = |c1×c2| · sgn(c1×c2 · a)
// cosθ = c1·c2
// θ = atan2(sinθ, cosθ)
final double c1X = this.y * z - this.z * y;
final double c1Y = this.z * x - this.x * z;
final double c1Z = this.x * y - this.y * x;
final double c2X = this.y;
final double c2Y = -this.x;
final double c2Z = 0d;
final double c1c2X = c1Y * c2Z - c1Z * c2Y;
final double c1c2Y = c1Z * c2X - c1X * c2Z;
final double c1c2Z = c1X * c2Y - c1Y * c2X;
final double sign = Math.signum(dotProduct(this.x, this.y, this.z, c1c2X, c1c2Y, c1c2Z));
return FastMath.atan2(sign * magnitude(c1c2X, c1c2Y, c1c2Z), dotProduct(c1X, c1Y, c1Z, c2X, c2Y, c2Z));
}
|
Determines the azimuth to the provided 3D coordinate.
@param x The first 3D coordinate.
@param y The second 3D coordinate.
@param z The third 3D coordinate.
@return The azimuth in radians.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/Vec3d.java
| 138
|
[
"x",
"y",
"z"
] | true
| 1
| 7.2
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
tryInstallExecSandbox
|
@Override
public void tryInstallExecSandbox() {
// create a new Job
Handle job = kernel.CreateJobObjectW();
if (job == null) {
throw new UnsupportedOperationException("CreateJobObject: " + kernel.GetLastError());
}
try {
// retrieve the current basic limits of the job
int clazz = JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS;
var info = kernel.newJobObjectBasicLimitInformation();
if (kernel.QueryInformationJobObject(job, clazz, info) == false) {
throw new UnsupportedOperationException("QueryInformationJobObject: " + kernel.GetLastError());
}
// modify the number of active processes to be 1 (exactly the one process we will add to the job).
info.setActiveProcessLimit(1);
info.setLimitFlags(JOB_OBJECT_LIMIT_ACTIVE_PROCESS);
if (kernel.SetInformationJobObject(job, clazz, info) == false) {
throw new UnsupportedOperationException("SetInformationJobObject: " + kernel.GetLastError());
}
// assign ourselves to the job
if (kernel.AssignProcessToJobObject(job, kernel.GetCurrentProcess()) == false) {
throw new UnsupportedOperationException("AssignProcessToJobObject: " + kernel.GetLastError());
}
} finally {
kernel.CloseHandle(job);
}
execSandboxState = ExecSandboxState.ALL_THREADS;
logger.debug("Windows ActiveProcessLimit initialization successful");
}
|
Install exec system call filtering on Windows.
<p>
Process creation is restricted with {@code SetInformationJobObject/ActiveProcessLimit}.
<p>
Note: This is not intended as a real sandbox. It is another level of security, mostly intended to annoy
security researchers and make their lives more difficult in achieving "remote execution" exploits.
|
java
|
libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java
| 96
|
[] |
void
| true
| 5
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
set
|
public Struct set(String name, Object value) {
BoundField field = this.schema.get(name);
if (field == null)
throw new SchemaException("Unknown field: " + name);
this.values[field.index] = value;
return this;
}
|
Set the field specified by the given name to the value
@param name The name of the field
@param value The value to set
@throws SchemaException If the field is not known
|
java
|
clients/src/main/java/org/apache/kafka/common/protocol/types/Struct.java
| 147
|
[
"name",
"value"
] |
Struct
| true
| 2
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
maxTimestamp
|
@Override
public long maxTimestamp() {
return buffer.getLong(MAX_TIMESTAMP_OFFSET);
}
|
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas.
@return The base timestamp
|
java
|
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
| 169
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
loadBeanPostProcessors
|
static <T extends BeanPostProcessor> List<T> loadBeanPostProcessors(
ConfigurableListableBeanFactory beanFactory, Class<T> beanPostProcessorType) {
String[] postProcessorNames = beanFactory.getBeanNamesForType(beanPostProcessorType, true, false);
List<T> postProcessors = new ArrayList<>();
for (String ppName : postProcessorNames) {
postProcessors.add(beanFactory.getBean(ppName, beanPostProcessorType));
}
sortPostProcessors(postProcessors, beanFactory);
return postProcessors;
}
|
Load and sort the post-processors of the specified type.
@param beanFactory the bean factory to use
@param beanPostProcessorType the post-processor type
@param <T> the post-processor type
@return a list of sorted post-processors for the specified type
|
java
|
spring-context/src/main/java/org/springframework/context/support/PostProcessorRegistrationDelegate.java
| 301
|
[
"beanFactory",
"beanPostProcessorType"
] | true
| 1
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
deleteShareGroups
|
DeleteShareGroupsResult deleteShareGroups(Collection<String> groupIds, DeleteShareGroupsOptions options);
|
Delete share groups from the cluster.
@param groupIds Collection of share group ids which are to be deleted.
@param options The options to use when deleting a share group.
@return The DeleteShareGroupsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 2,025
|
[
"groupIds",
"options"
] |
DeleteShareGroupsResult
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
prod
|
def prod(
self,
numeric_only: bool = False,
min_count: int = 0,
):
"""
Compute prod of group values.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
.. versionchanged:: 2.0.0
numeric_only no longer accepts ``None``.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computed prod of values within each group.
See Also
--------
core.resample.Resampler.sum : Compute sum of groups, excluding missing values.
core.resample.Resampler.mean : Compute mean of groups, excluding missing values.
core.resample.Resampler.median : Compute median of groups, excluding missing
values.
Examples
--------
>>> ser = pd.Series(
... [1, 2, 3, 4],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
... ),
... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
>>> ser.resample("MS").prod()
2023-01-01 2
2023-02-01 12
Freq: MS, dtype: int64
"""
return self._downsample("prod", numeric_only=numeric_only, min_count=min_count)
|
Compute prod of group values.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
.. versionchanged:: 2.0.0
numeric_only no longer accepts ``None``.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computed prod of values within each group.
See Also
--------
core.resample.Resampler.sum : Compute sum of groups, excluding missing values.
core.resample.Resampler.mean : Compute mean of groups, excluding missing values.
core.resample.Resampler.median : Compute median of groups, excluding missing
values.
Examples
--------
>>> ser = pd.Series(
... [1, 2, 3, 4],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
... ),
... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
>>> ser.resample("MS").prod()
2023-01-01 2
2023-02-01 12
Freq: MS, dtype: int64
|
python
|
pandas/core/resample.py
| 1,158
|
[
"self",
"numeric_only",
"min_count"
] | true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
privateKey
|
@Nullable PrivateKey privateKey();
|
The private key for this store or {@code null}.
@return the private key
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemSslStore.java
| 73
|
[] |
PrivateKey
| true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
ipFloatBit
|
public static float ipFloatBit(float[] q, byte[] d) {
if (q.length != d.length * Byte.SIZE) {
throw new IllegalArgumentException("vector dimensions incompatible: " + q.length + "!= " + Byte.SIZE + " x " + d.length);
}
return IMPL.ipFloatBit(q, d);
}
|
Compute the inner product of two vectors, where the query vector is a float vector and the document vector is a bit vector.
This will return the sum of the query vector values using the document vector as a mask.
When comparing the bits with the floats, they are done in "big endian" order. For example, if the float vector
is [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0] and the bit vector is [0b10000000], the inner product will be 1.0.
@param q the query vector
@param d the document vector
@return the inner product of the two vectors
|
java
|
libs/simdvec/src/main/java/org/elasticsearch/simdvec/ESVectorUtil.java
| 101
|
[
"q",
"d"
] | true
| 2
| 8.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
calcTSPScore
|
std::pair<uint64_t, uint64_t>
calcTSPScore(const BinaryFunctionListType &BinaryFunctions,
const std::unordered_map<BinaryBasicBlock *, uint64_t> &BBAddr,
const std::unordered_map<BinaryBasicBlock *, uint64_t> &BBSize) {
uint64_t Score = 0;
uint64_t JumpCount = 0;
for (BinaryFunction *BF : BinaryFunctions) {
if (!BF->hasProfile())
continue;
for (BinaryBasicBlock *SrcBB : BF->getLayout().blocks()) {
auto BI = SrcBB->branch_info_begin();
for (BinaryBasicBlock *DstBB : SrcBB->successors()) {
if (SrcBB != DstBB && BI->Count != BinaryBasicBlock::COUNT_NO_PROFILE) {
JumpCount += BI->Count;
auto BBAddrIt = BBAddr.find(SrcBB);
assert(BBAddrIt != BBAddr.end());
uint64_t SrcBBAddr = BBAddrIt->second;
auto BBSizeIt = BBSize.find(SrcBB);
assert(BBSizeIt != BBSize.end());
uint64_t SrcBBSize = BBSizeIt->second;
BBAddrIt = BBAddr.find(DstBB);
assert(BBAddrIt != BBAddr.end());
uint64_t DstBBAddr = BBAddrIt->second;
if (SrcBBAddr + SrcBBSize == DstBBAddr)
Score += BI->Count;
}
++BI;
}
}
}
return std::make_pair(Score, JumpCount);
}
|
(the number of fallthrough branches, the total number of branches)
|
cpp
|
bolt/lib/Passes/CacheMetrics.cpp
| 56
|
[] | true
| 5
| 6.56
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
toEncodedString
|
public static String toEncodedString(final byte[] bytes, final Charset charset) {
return new String(bytes, Charsets.toCharset(charset));
}
|
Converts a {@code byte[]} to a String using the specified character encoding.
@param bytes the byte array to read from.
@param charset the encoding to use, if null then use the platform default.
@return a new String.
@throws NullPointerException if {@code bytes} is null
@since 3.2
@since 3.3 No longer throws {@link UnsupportedEncodingException}.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 8,661
|
[
"bytes",
"charset"
] |
String
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_finishAndReturnText
|
protected Text _finishAndReturnText() throws IOException {
int ptr = _inputPtr;
if (ptr >= _inputEnd) {
_loadMoreGuaranteed();
ptr = _inputPtr;
}
int startPtr = ptr;
final int[] codes = INPUT_CODES_UTF8;
final int max = _inputEnd;
final byte[] inputBuffer = _inputBuffer;
stringLength = 0;
backslashes.clear();
loop: while (true) {
if (ptr >= max) {
return null;
}
int c = inputBuffer[ptr] & 0xFF;
switch (codes[c]) {
case 0 -> {
++ptr;
++stringLength;
}
case 1 -> {
if (c == INT_QUOTE) {
// End of the string
break loop;
}
assert c == INT_BACKSLASH;
backslashes.add(ptr);
++ptr;
if (ptr >= max) {
// Backslash at end of file
return null;
}
c = inputBuffer[ptr] & 0xFF;
if (c == '"' || c == '/' || c == '\\') {
ptr += 1;
stringLength += 1;
} else {
// Any other escaped sequence requires replacing the sequence with
// a new character, which we don't support in the optimized path
return null;
}
}
case 2, 3, 4 -> {
int bytesToSkip = codes[c];
if (ptr + bytesToSkip > max) {
return null;
}
ptr += bytesToSkip;
// Code points that require 4 bytes in UTF-8 will use 2 chars in UTF-16.
stringLength += (bytesToSkip == 4 ? 2 : 1);
}
default -> {
return null;
}
}
}
stringEnd = ptr + 1;
if (backslashes.isEmpty()) {
return new Text(new XContentString.UTF8Bytes(inputBuffer, startPtr, ptr - startPtr), stringLength);
} else {
byte[] buff = new byte[ptr - startPtr - backslashes.size()];
int copyPtr = startPtr;
int destPtr = 0;
for (Integer backslash : backslashes) {
int length = backslash - copyPtr;
System.arraycopy(inputBuffer, copyPtr, buff, destPtr, length);
destPtr += length;
copyPtr = backslash + 1;
}
System.arraycopy(inputBuffer, copyPtr, buff, destPtr, ptr - copyPtr);
lastOptimisedValue = buff;
return new Text(new XContentString.UTF8Bytes(buff), stringLength);
}
}
|
Method that will try to get underlying UTF-8 encoded bytes of the current string token.
This is only a best-effort attempt; if there is some reason the bytes cannot be retrieved, this method will return null.
|
java
|
libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/ESUTF8StreamJsonParser.java
| 76
|
[] |
Text
| true
| 12
| 7.12
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
format
|
public static String format(final Calendar calendar, final String pattern, final TimeZone timeZone) {
return format(calendar, pattern, timeZone, null);
}
|
Formats a calendar into a specific pattern in a time zone.
@param calendar the calendar to format, not null.
@param pattern the pattern to use to format the calendar, not null.
@param timeZone the time zone to use, may be {@code null}.
@return the formatted calendar.
@see FastDateFormat#format(Calendar)
@since 2.4
|
java
|
src/main/java/org/apache/commons/lang3/time/DateFormatUtils.java
| 239
|
[
"calendar",
"pattern",
"timeZone"
] |
String
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
saturatedPow
|
@SuppressWarnings("ShortCircuitBoolean")
public static long saturatedPow(long b, int k) {
checkNonNegative("exponent", k);
if (b >= -2 & b <= 2) {
switch ((int) b) {
case 0:
return (k == 0) ? 1 : 0;
case 1:
return 1;
case -1:
return ((k & 1) == 0) ? 1 : -1;
case 2:
if (k >= Long.SIZE - 1) {
return Long.MAX_VALUE;
}
return 1L << k;
case -2:
if (k >= Long.SIZE) {
return Long.MAX_VALUE + (k & 1);
}
return ((k & 1) == 0) ? (1L << k) : (-1L << k);
default:
throw new AssertionError();
}
}
long accum = 1;
// if b is negative and k is odd then the limit is MIN otherwise the limit is MAX
long limit = Long.MAX_VALUE + ((b >>> (Long.SIZE - 1)) & (k & 1));
while (true) {
switch (k) {
case 0:
return accum;
case 1:
return saturatedMultiply(accum, b);
default:
if ((k & 1) != 0) {
accum = saturatedMultiply(accum, b);
}
k >>= 1;
if (k > 0) {
if (-FLOOR_SQRT_MAX_LONG > b | b > FLOOR_SQRT_MAX_LONG) {
return limit;
}
b *= b;
}
}
}
}
|
Returns the {@code b} to the {@code k}th power, unless it would overflow or underflow in which
case {@code Long.MAX_VALUE} or {@code Long.MIN_VALUE} is returned, respectively.
@since 20.0
|
java
|
android/guava/src/com/google/common/math/LongMath.java
| 709
|
[
"b",
"k"
] | true
| 11
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
adaptCaffeineCache
|
protected Cache adaptCaffeineCache(String name, AsyncCache<Object, Object> cache) {
return new CaffeineCache(name, cache, isAllowNullValues());
}
|
Adapt the given new Caffeine AsyncCache instance to Spring's {@link Cache}
abstraction for the specified cache name.
@param name the name of the cache
@param cache the Caffeine AsyncCache instance
@return the Spring CaffeineCache adapter (or a decorator thereof)
@since 6.1
@see CaffeineCache#CaffeineCache(String, AsyncCache, boolean)
@see #isAllowNullValues()
|
java
|
spring-context-support/src/main/java/org/springframework/cache/caffeine/CaffeineCacheManager.java
| 357
|
[
"name",
"cache"
] |
Cache
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
execute
|
public abstract double execute(Map<String, Object> params, double[] values);
|
@param params The user-provided parameters
@param values The values in the window that we are moving a function across
@return A double representing the value from this particular window
|
java
|
modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovingFunctionScript.java
| 27
|
[
"params",
"values"
] | true
| 1
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
getMin
|
@Override
public double getMin() {
if (mergingDigest != null) {
return mergingDigest.getMin();
}
return sortingDigest.getMin();
}
|
Similar to the constructor above. The limit for switching from a {@link SortingDigest} to a {@link MergingDigest} implementation
is calculated based on the passed compression factor.
@param compression The compression factor for the MergingDigest
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/HybridDigest.java
| 198
|
[] | true
| 2
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
checkNonLoadingCache
|
private void checkNonLoadingCache() {
checkState(refreshNanos == UNSET_INT, "refreshAfterWrite requires a LoadingCache");
}
|
Builds a cache which does not automatically load values when keys are requested.
<p>Consider {@link #build(CacheLoader)} instead, if it is feasible to implement a {@code
CacheLoader}.
<p>This method does not alter the state of this {@code CacheBuilder} instance, so it can be
invoked again to create multiple independent caches.
@return a cache having the requested features
@since 11.0
|
java
|
android/guava/src/com/google/common/cache/CacheBuilder.java
| 1,060
|
[] |
void
| true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
parseNonArrayType
|
function parseNonArrayType(): TypeNode {
switch (token()) {
case SyntaxKind.AnyKeyword:
case SyntaxKind.UnknownKeyword:
case SyntaxKind.StringKeyword:
case SyntaxKind.NumberKeyword:
case SyntaxKind.BigIntKeyword:
case SyntaxKind.SymbolKeyword:
case SyntaxKind.BooleanKeyword:
case SyntaxKind.UndefinedKeyword:
case SyntaxKind.NeverKeyword:
case SyntaxKind.ObjectKeyword:
// If these are followed by a dot, then parse these out as a dotted type reference instead.
return tryParse(parseKeywordAndNoDot) || parseTypeReference();
case SyntaxKind.AsteriskEqualsToken:
// If there is '*=', treat it as * followed by postfix =
scanner.reScanAsteriskEqualsToken();
// falls through
case SyntaxKind.AsteriskToken:
return parseJSDocAllType();
case SyntaxKind.QuestionQuestionToken:
// If there is '??', treat it as prefix-'?' in JSDoc type.
scanner.reScanQuestionToken();
// falls through
case SyntaxKind.QuestionToken:
return parseJSDocUnknownOrNullableType();
case SyntaxKind.FunctionKeyword:
return parseJSDocFunctionType();
case SyntaxKind.ExclamationToken:
return parseJSDocNonNullableType();
case SyntaxKind.NoSubstitutionTemplateLiteral:
case SyntaxKind.StringLiteral:
case SyntaxKind.NumericLiteral:
case SyntaxKind.BigIntLiteral:
case SyntaxKind.TrueKeyword:
case SyntaxKind.FalseKeyword:
case SyntaxKind.NullKeyword:
return parseLiteralTypeNode();
case SyntaxKind.MinusToken:
return lookAhead(nextTokenIsNumericOrBigIntLiteral) ? parseLiteralTypeNode(/*negative*/ true) : parseTypeReference();
case SyntaxKind.VoidKeyword:
return parseTokenNode<TypeNode>();
case SyntaxKind.ThisKeyword: {
const thisKeyword = parseThisTypeNode();
if (token() === SyntaxKind.IsKeyword && !scanner.hasPrecedingLineBreak()) {
return parseThisTypePredicate(thisKeyword);
}
else {
return thisKeyword;
}
}
case SyntaxKind.TypeOfKeyword:
return lookAhead(isStartOfTypeOfImportType) ? parseImportType() : parseTypeQuery();
case SyntaxKind.OpenBraceToken:
return lookAhead(isStartOfMappedType) ? parseMappedType() : parseTypeLiteral();
case SyntaxKind.OpenBracketToken:
return parseTupleType();
case SyntaxKind.OpenParenToken:
return parseParenthesizedType();
case SyntaxKind.ImportKeyword:
return parseImportType();
case SyntaxKind.AssertsKeyword:
return lookAhead(nextTokenIsIdentifierOrKeywordOnSameLine) ? parseAssertsTypePredicate() : parseTypeReference();
case SyntaxKind.TemplateHead:
return parseTemplateType();
default:
return parseTypeReference();
}
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 4,589
|
[] | true
| 9
| 6.8
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
subarray
|
public static <T> T[] subarray(final T[] array, int startIndexInclusive, int endIndexExclusive) {
if (array == null) {
return null;
}
startIndexInclusive = max0(startIndexInclusive);
endIndexExclusive = Math.min(endIndexExclusive, array.length);
final int newSize = endIndexExclusive - startIndexInclusive;
final Class<T> type = getComponentType(array);
if (newSize <= 0) {
return newInstance(type, 0);
}
return arraycopy(array, startIndexInclusive, 0, newSize, () -> newInstance(type, newSize));
}
|
Produces a new array containing the elements between the start and end indices.
<p>
The start index is inclusive, the end index exclusive. Null array input produces null output.
</p>
<p>
The component type of the subarray is always the same as that of the input array. Thus, if the input is an array of type {@link Date}, the following
usage is envisaged:
</p>
<pre>
Date[] someDates = (Date[]) ArrayUtils.subarray(allDates, 2, 5);
</pre>
@param <T> the component type of the array.
@param array the input array.
@param startIndexInclusive the starting index. Undervalue (<0) is promoted to 0, overvalue (>array.length) results in an empty array.
@param endIndexExclusive elements up to endIndex-1 are present in the returned subarray. Undervalue (< startIndex) produces empty array, overvalue
(>array.length) is demoted to array length.
@return a new array containing the elements between the start and end indices.
@since 2.1
@see Arrays#copyOfRange(Object[], int, int)
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 7,988
|
[
"array",
"startIndexInclusive",
"endIndexExclusive"
] | true
| 3
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toPercent
|
function toPercent(parsedNumber: ParsedNumber): ParsedNumber {
// if the number is 0, don't do anything
if (parsedNumber.digits[0] === 0) {
return parsedNumber;
}
// Getting the current number of decimals
const fractionLen = parsedNumber.digits.length - parsedNumber.integerLen;
if (parsedNumber.exponent) {
parsedNumber.exponent += 2;
} else {
if (fractionLen === 0) {
parsedNumber.digits.push(0, 0);
} else if (fractionLen === 1) {
parsedNumber.digits.push(0);
}
parsedNumber.integerLen += 2;
}
return parsedNumber;
}
|
@ngModule CommonModule
@description
Formats a number as text, with group sizing, separator, and other
parameters based on the locale.
@param value The number to format.
@param locale A locale code for the locale format rules to use.
@param digitsInfo Decimal representation options, specified by a string in the following format:
`{minIntegerDigits}.{minFractionDigits}-{maxFractionDigits}`. See `DecimalPipe` for more details.
@returns The formatted text string.
@see [Internationalization (i18n) Guide](guide/i18n)
@publicApi
|
typescript
|
packages/common/src/i18n/format_number.ts
| 357
|
[
"parsedNumber"
] | true
| 7
| 7.6
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
compile_multiarch_bundle_from_llvm_ir
|
def compile_multiarch_bundle_from_llvm_ir(
llvm_ir_path: str, output_bundle_path: str, target_archs: Optional[list[str]] = None
) -> bool:
"""
Complete workflow: LLVM IR → multiple code objects → bundle.
This is the main entry point for multi-arch compilation.
Args:
llvm_ir_path: Path to .ll file
output_bundle_path: Where to write bundle
target_archs: Optional list of architectures
Returns:
True if successful
"""
if target_archs is None:
# Get architectures from environment variable or config
target_archs = get_rocm_target_archs()
# Step 1: Compile LLVM IR to code object for each architecture
code_objects = {}
temp_dir = os.path.dirname(output_bundle_path)
kernel_name = os.path.splitext(os.path.basename(llvm_ir_path))[0]
for arch in target_archs:
# Create temporary single-architecture code object
# Format: kernel_name_gfx90a.co, kernel_name_gfx942.co, etc.
co_path = os.path.join(temp_dir, f"{kernel_name}_{arch}.co")
# Compile with clang backend: LLVM IR → GPU machine code
if compile_llvm_ir_to_code_object(llvm_ir_path, co_path, arch):
code_objects[arch] = co_path
if not code_objects:
return False
# Step 2: Bundle all code objects together
# Uses clang-offload-bundler to create fat binary
success = create_multiarch_bundle(code_objects, output_bundle_path)
# Step 3: Clean up temporary single-arch code objects
# The bundle contains all the code, so intermediates are no longer needed
for co_path in code_objects.values():
try:
os.remove(co_path)
except Exception:
pass
return success
|
Complete workflow: LLVM IR → multiple code objects → bundle.
This is the main entry point for multi-arch compilation.
Args:
llvm_ir_path: Path to .ll file
output_bundle_path: Where to write bundle
target_archs: Optional list of architectures
Returns:
True if successful
|
python
|
torch/_inductor/rocm_multiarch_utils.py
| 215
|
[
"llvm_ir_path",
"output_bundle_path",
"target_archs"
] |
bool
| true
| 6
| 8.08
|
pytorch/pytorch
| 96,034
|
google
| false
|
allConsumed
|
public synchronized Map<TopicPartition, OffsetAndMetadata> allConsumed() {
Map<TopicPartition, OffsetAndMetadata> allConsumed = new HashMap<>();
assignment.forEach((topicPartition, partitionState) -> {
if (partitionState.hasValidPosition())
allConsumed.put(topicPartition, new OffsetAndMetadata(partitionState.position.offset,
partitionState.position.offsetEpoch, ""));
});
return allConsumed;
}
|
Unset the preferred read replica. This causes the fetcher to go back to the leader for fetches.
@param tp The topic partition
@return the removed preferred read replica if set, Empty otherwise.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 775
|
[] | true
| 2
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
fit
|
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the median and quantiles
used for later scaling along the features axis.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted scaler.
"""
# at fit, convert sparse matrices to csc for optimized computation of
# the quantiles
X = validate_data(
self,
X,
accept_sparse="csc",
dtype=FLOAT_DTYPES,
ensure_all_finite="allow-nan",
)
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" % str(self.quantile_range))
if self.with_centering:
if sparse.issparse(X):
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives."
)
self.center_ = np.nanmedian(X, axis=0)
else:
self.center_ = None
if self.with_scaling:
quantiles = []
for feature_idx in range(X.shape[1]):
if sparse.issparse(X):
column_nnz_data = X.data[
X.indptr[feature_idx] : X.indptr[feature_idx + 1]
]
column_data = np.zeros(shape=X.shape[0], dtype=X.dtype)
column_data[: len(column_nnz_data)] = column_nnz_data
else:
column_data = X[:, feature_idx]
quantiles.append(np.nanpercentile(column_data, self.quantile_range))
quantiles = np.transpose(quantiles)
self.scale_ = quantiles[1] - quantiles[0]
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
if self.unit_variance:
adjust = stats.norm.ppf(q_max / 100.0) - stats.norm.ppf(q_min / 100.0)
self.scale_ = self.scale_ / adjust
else:
self.scale_ = None
return self
|
Compute the median and quantiles to be used for scaling.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the median and quantiles
used for later scaling along the features axis.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted scaler.
|
python
|
sklearn/preprocessing/_data.py
| 1,668
|
[
"self",
"X",
"y"
] | false
| 11
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
alterReplicaLogDirs
|
AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment,
AlterReplicaLogDirsOptions options);
|
Change the log directory for the specified replicas. If the replica does not exist on the broker, the result
shows REPLICA_NOT_AVAILABLE for the given replica and the replica will be created in the given log directory on the
broker when it is created later. If the replica already exists on the broker, the replica will be moved to the given
log directory if it is not already there. For detailed result, inspect the returned {@link AlterReplicaLogDirsResult} instance.
<p>
This operation is not transactional so it may succeed for some replicas while fail for others.
<p>
This operation is supported by brokers with version 1.1.0 or higher.
@param replicaAssignment The replicas with their log directory absolute path
@param options The options to use when changing replica dir
@return The AlterReplicaLogDirsResult
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 567
|
[
"replicaAssignment",
"options"
] |
AlterReplicaLogDirsResult
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
findCandidateComponents
|
public Set<BeanDefinition> findCandidateComponents(String basePackage) {
if (this.componentsIndex != null && indexSupportsIncludeFilters()) {
if (this.componentsIndex.hasScannedPackage(basePackage)) {
return addCandidateComponentsFromIndex(this.componentsIndex, basePackage);
}
else {
this.componentsIndex.registerScan(basePackage);
}
}
return scanCandidateComponents(basePackage);
}
|
Scan the component index or class path for candidate components.
@param basePackage the package to check for annotated classes
@return a corresponding Set of autodetected bean definitions
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ClassPathScanningCandidateComponentProvider.java
| 312
|
[
"basePackage"
] | true
| 4
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
_query_range_helper
|
def _query_range_helper(
self, node: int, start: int, end: int, left: int, right: int
) -> T:
"""
Helper method to query a range of values in the segment tree.
Args:
node: Current node index
start: Start index of the current segment
end: End index of the current segment
left: Start index of the range to query
right: End index of the range to query
Returns:
Summary value for the range
"""
# No overlap
if start > right or end < left:
return self.identity
# Push lazy updates before processing this node
self._push_lazy(node, start, end)
# Complete overlap
if start >= left and end <= right:
return self.tree[node]
# Partial overlap, recurse to children
mid = (start + end) // 2
left_child = 2 * node
right_child = 2 * node + 1
left_result = self._query_range_helper(left_child, start, mid, left, right)
right_result = self._query_range_helper(right_child, mid + 1, end, left, right)
# Combine results from children
return self.summary_op(left_result, right_result)
|
Helper method to query a range of values in the segment tree.
Args:
node: Current node index
start: Start index of the current segment
end: End index of the current segment
left: Start index of the range to query
right: End index of the range to query
Returns:
Summary value for the range
|
python
|
torch/_inductor/codegen/segmented_tree.py
| 158
|
[
"self",
"node",
"start",
"end",
"left",
"right"
] |
T
| true
| 5
| 8.08
|
pytorch/pytorch
| 96,034
|
google
| false
|
inline_subgraph_to_ir_nodes
|
def inline_subgraph_to_ir_nodes(
gm: torch.fx.GraphModule, inputs: list[Any], name: str
) -> Any:
"""Inline a subgraph by converting its FX operations to individual IR nodes.
This converts a subgraph to multiple ComputedBuffer nodes (fusable),
enabling epilogue fusion with subsequent operations.
Returns:
TensorBox containing the final operation result as individual IR nodes
"""
from torch._inductor.lowering import process_subgraph_nodes
# Temporarily switch V.graph.module to subgraph during processing; restore to prevent IR nodes added to wrong graph
original_module = V.graph.module
try:
V.graph.module = gm
return process_subgraph_nodes(gm, inputs)
finally:
V.graph.module = original_module
|
Inline a subgraph by converting its FX operations to individual IR nodes.
This converts a subgraph to multiple ComputedBuffer nodes (fusable),
enabling epilogue fusion with subsequent operations.
Returns:
TensorBox containing the final operation result as individual IR nodes
|
python
|
torch/_inductor/codegen/subgraph.py
| 30
|
[
"gm",
"inputs",
"name"
] |
Any
| true
| 1
| 6.24
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
flip
|
def flip(m, axis=None):
"""
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : array_like
Input array.
axis : None or int or tuple of ints, optional
Axis or axes along which to flip over. The default,
axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
Returns
-------
out : array_like
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
See Also
--------
flipud : Flip an array vertically (axis=0).
fliplr : Flip an array horizontally (axis=1).
Notes
-----
flip(m, 0) is equivalent to flipud(m).
flip(m, 1) is equivalent to fliplr(m).
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all
positions.
flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at
position 0 and position 1.
Examples
--------
>>> import numpy as np
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> np.flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> np.flip(A)
array([[[7, 6],
[5, 4]],
[[3, 2],
[1, 0]]])
>>> np.flip(A, (0, 2))
array([[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
>>> rng = np.random.default_rng()
>>> A = rng.normal(size=(3,4,5))
>>> np.all(np.flip(A,2) == A[:,:,::-1,...])
True
"""
if not hasattr(m, 'ndim'):
m = asarray(m)
if axis is None:
indexer = (np.s_[::-1],) * m.ndim
else:
axis = _nx.normalize_axis_tuple(axis, m.ndim)
indexer = [np.s_[:]] * m.ndim
for ax in axis:
indexer[ax] = np.s_[::-1]
indexer = tuple(indexer)
return m[indexer]
|
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : array_like
Input array.
axis : None or int or tuple of ints, optional
Axis or axes along which to flip over. The default,
axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
Returns
-------
out : array_like
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
See Also
--------
flipud : Flip an array vertically (axis=0).
fliplr : Flip an array horizontally (axis=1).
Notes
-----
flip(m, 0) is equivalent to flipud(m).
flip(m, 1) is equivalent to fliplr(m).
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all
positions.
flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at
position 0 and position 1.
Examples
--------
>>> import numpy as np
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> np.flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> np.flip(A)
array([[[7, 6],
[5, 4]],
[[3, 2],
[1, 0]]])
>>> np.flip(A, (0, 2))
array([[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
>>> rng = np.random.default_rng()
>>> A = rng.normal(size=(3,4,5))
>>> np.all(np.flip(A,2) == A[:,:,::-1,...])
True
|
python
|
numpy/lib/_function_base_impl.py
| 273
|
[
"m",
"axis"
] | false
| 5
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
_update_memory_tracking_after_swap_sink_waits
|
def _update_memory_tracking_after_swap_sink_waits(
candidate: BaseSchedulerNode,
gns: list[BaseSchedulerNode],
candidate_delta_mem: int,
candidate_allocfree: SNodeMemory,
group_n_to_bufs_after_swap_dealloc_instead_of_candidate: dict,
post_alloc_update: dict[BaseSchedulerNode, int],
size_free_delta_update: dict[BaseSchedulerNode, int],
curr_memory: dict,
snodes_allocfree: dict,
) -> None:
"""
Update memory tracking structures after swap (sink_waits version).
Updates curr_memory and snodes_allocfree dictionaries to reflect the new
memory state after swapping candidate with group.
Args:
candidate: Node that was moved
gns: Group nodes
candidate_delta_mem: Net memory change from candidate (alloc - free)
candidate_allocfree: Candidate's allocation/free info
group_n_to_bufs_after_swap_dealloc_instead_of_candidate: Buffers whose deallocation moves from candidate to group
post_alloc_update: Cached post-allocation memory values
size_free_delta_update: Cached size-free delta values
curr_memory: Current memory state dict (mutated)
snodes_allocfree: Node allocation/free info dict (mutated)
"""
group_head = gns[0]
pre_group_mem = curr_memory[group_head][0] - snodes_allocfree[group_head].size_alloc
if not group_n_to_bufs_after_swap_dealloc_instead_of_candidate:
candidate_post_alloc = pre_group_mem + candidate_allocfree.size_alloc
curr_memory[candidate] = (
candidate_post_alloc,
candidate_post_alloc - candidate_allocfree.size_free,
)
for gn in gns:
cm = curr_memory[gn]
curr_memory[gn] = (
cm[0] + candidate_delta_mem,
cm[1] + candidate_delta_mem,
)
return
for n in [candidate, *gns]:
post_alloc = post_alloc_update[n]
snodes_allocfree[n].size_free += size_free_delta_update.get(n, 0)
curr_memory[n] = (
post_alloc,
post_alloc - snodes_allocfree[n].size_free,
)
|
Update memory tracking structures after swap (sink_waits version).
Updates curr_memory and snodes_allocfree dictionaries to reflect the new
memory state after swapping candidate with group.
Args:
candidate: Node that was moved
gns: Group nodes
candidate_delta_mem: Net memory change from candidate (alloc - free)
candidate_allocfree: Candidate's allocation/free info
group_n_to_bufs_after_swap_dealloc_instead_of_candidate: Buffers whose deallocation moves from candidate to group
post_alloc_update: Cached post-allocation memory values
size_free_delta_update: Cached size-free delta values
curr_memory: Current memory state dict (mutated)
snodes_allocfree: Node allocation/free info dict (mutated)
|
python
|
torch/_inductor/comms.py
| 1,442
|
[
"candidate",
"gns",
"candidate_delta_mem",
"candidate_allocfree",
"group_n_to_bufs_after_swap_dealloc_instead_of_candidate",
"post_alloc_update",
"size_free_delta_update",
"curr_memory",
"snodes_allocfree"
] |
None
| true
| 4
| 6.08
|
pytorch/pytorch
| 96,034
|
google
| false
|
nanvar
|
def nanvar(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
ddof: int = 1,
mask=None,
):
"""
Compute the variance along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nanvar(s.values)
1.0
"""
dtype = values.dtype
mask = _maybe_get_mask(values, skipna, mask)
if dtype.kind in "iu":
values = values.astype("f8")
if mask is not None:
values[mask] = np.nan
if values.dtype.kind == "f":
count, d = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
else:
count, d = _get_counts_nanvar(values.shape, mask, axis, ddof)
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
# xref GH10242
# Compute variance via two-pass algorithm, which is stable against
# cancellation errors and relatively accurate for small numbers of
# observations.
#
# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
if axis is not None:
avg = np.expand_dims(avg, axis)
if values.dtype.kind == "c":
# Need to use absolute value for complex numbers.
sqr = _ensure_numeric(abs(avg - values) ** 2)
else:
sqr = _ensure_numeric((avg - values) ** 2)
if mask is not None:
np.putmask(sqr, mask, 0)
result = sqr.sum(axis=axis, dtype=np.float64) / d
# Return variance as np.float64 (the datatype used in the accumulator),
# unless we were dealing with a float array, in which case use the same
# precision as the original values array.
if dtype.kind == "f":
result = result.astype(dtype, copy=False)
return result
|
Compute the variance along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nanvar(s.values)
1.0
|
python
|
pandas/core/nanops.py
| 960
|
[
"values",
"axis",
"skipna",
"ddof",
"mask"
] | true
| 12
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
hoistBindingElement
|
function hoistBindingElement(node: VariableDeclaration | BindingElement): void {
if (isBindingPattern(node.name)) {
for (const element of node.name.elements) {
if (!isOmittedExpression(element)) {
hoistBindingElement(element);
}
}
}
else {
hoistVariableDeclaration(factory.cloneNode(node.name));
}
}
|
Hoists the declared names of a VariableDeclaration or BindingElement.
@param node The declaration to hoist.
|
typescript
|
src/compiler/transformers/module/system.ts
| 922
|
[
"node"
] | true
| 4
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
toString
|
@Override
public String toString() {
return "ValueHint{value=" + this.value + ", description='" + this.description + '\'' + '}';
}
|
A single-line, single-sentence description of this hint, if any.
@return the short description
@see #getDescription()
|
java
|
configuration-metadata/spring-boot-configuration-metadata/src/main/java/org/springframework/boot/configurationmetadata/ValueHint.java
| 75
|
[] |
String
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
fromIPv6BigInteger
|
public static Inet6Address fromIPv6BigInteger(BigInteger address) {
return (Inet6Address) fromBigInteger(address, true);
}
|
Returns the {@code Inet6Address} corresponding to a given {@code BigInteger}.
@param address BigInteger representing the IPv6 address
@return Inet6Address representation of the given BigInteger
@throws IllegalArgumentException if the BigInteger is not between 0 and 2^128-1
@since 28.2
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 1,106
|
[
"address"
] |
Inet6Address
| true
| 1
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
_set_var
|
def _set_var(env: dict[str, str], variable: str, attribute: str | bool | None, default: str | None = None):
"""Set variable in env dict.
Priorities:
1. attribute comes first if not None
2. then environment variable if set
3. then not None default value if environment variable is None
4. if default is None, then the key is not set at all in dictionary
"""
if attribute is not None:
if isinstance(attribute, bool):
env[variable] = str(attribute).lower()
else:
env[variable] = str(attribute)
else:
os_variable_value = os.environ.get(variable)
if os_variable_value is not None:
env[variable] = os_variable_value
elif default is not None:
env[variable] = default
|
Set variable in env dict.
Priorities:
1. attribute comes first if not None
2. then environment variable if set
3. then not None default value if environment variable is None
4. if default is None, then the key is not set at all in dictionary
|
python
|
dev/breeze/src/airflow_breeze/params/shell_params.py
| 131
|
[
"env",
"variable",
"attribute",
"default"
] | true
| 7
| 7.04
|
apache/airflow
| 43,597
|
unknown
| false
|
|
sum
|
def sum(
self,
numeric_only: bool = False,
min_count: int = 0,
):
"""
Compute sum of group values.
This method provides a simple way to compute the sum of values within each
resampled group, particularly useful for aggregating time-based data into
daily, monthly, or yearly sums.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
.. versionchanged:: 2.0.0
numeric_only no longer accepts ``None``.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computed sum of values within each group.
See Also
--------
core.resample.Resampler.mean : Compute mean of groups, excluding missing values.
core.resample.Resampler.count : Compute count of group, excluding missing
values.
DataFrame.resample : Resample time-series data.
Series.sum : Return the sum of the values over the requested axis.
Examples
--------
>>> ser = pd.Series(
... [1, 2, 3, 4],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
... ),
... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
>>> ser.resample("MS").sum()
2023-01-01 3
2023-02-01 7
Freq: MS, dtype: int64
"""
return self._downsample("sum", numeric_only=numeric_only, min_count=min_count)
|
Compute sum of group values.
This method provides a simple way to compute the sum of values within each
resampled group, particularly useful for aggregating time-based data into
daily, monthly, or yearly sums.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
.. versionchanged:: 2.0.0
numeric_only no longer accepts ``None``.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computed sum of values within each group.
See Also
--------
core.resample.Resampler.mean : Compute mean of groups, excluding missing values.
core.resample.Resampler.count : Compute count of group, excluding missing
values.
DataFrame.resample : Resample time-series data.
Series.sum : Return the sum of the values over the requested axis.
Examples
--------
>>> ser = pd.Series(
... [1, 2, 3, 4],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
... ),
... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
>>> ser.resample("MS").sum()
2023-01-01 3
2023-02-01 7
Freq: MS, dtype: int64
|
python
|
pandas/core/resample.py
| 1,098
|
[
"self",
"numeric_only",
"min_count"
] | true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
convertRemainingAccentCharacters
|
private static void convertRemainingAccentCharacters(final StringBuilder decomposed) {
for (int i = 0; i < decomposed.length(); i++) {
final char charAt = decomposed.charAt(i);
switch (charAt) {
case '\u0141':
decomposed.setCharAt(i, 'L');
break;
case '\u0142':
decomposed.setCharAt(i, 'l');
break;
// D with stroke
case '\u0110':
// LATIN CAPITAL LETTER D WITH STROKE
decomposed.setCharAt(i, 'D');
break;
case '\u0111':
// LATIN SMALL LETTER D WITH STROKE
decomposed.setCharAt(i, 'd');
break;
// I with bar
case '\u0197':
decomposed.setCharAt(i, 'I');
break;
case '\u0268':
decomposed.setCharAt(i, 'i');
break;
case '\u1D7B':
decomposed.setCharAt(i, 'I');
break;
case '\u1DA4':
decomposed.setCharAt(i, 'i');
break;
case '\u1DA7':
decomposed.setCharAt(i, 'I');
break;
// U with bar
case '\u0244':
// LATIN CAPITAL LETTER U BAR
decomposed.setCharAt(i, 'U');
break;
case '\u0289':
// LATIN SMALL LETTER U BAR
decomposed.setCharAt(i, 'u');
break;
case '\u1D7E':
// LATIN SMALL CAPITAL LETTER U WITH STROKE
decomposed.setCharAt(i, 'U');
break;
case '\u1DB6':
// MODIFIER LETTER SMALL U BAR
decomposed.setCharAt(i, 'u');
break;
// T with stroke
case '\u0166':
// LATIN CAPITAL LETTER T WITH STROKE
decomposed.setCharAt(i, 'T');
break;
case '\u0167':
// LATIN SMALL LETTER T WITH STROKE
decomposed.setCharAt(i, 't');
break;
default:
break;
}
}
}
|
Tests whether the given CharSequence contains any whitespace characters.
<p>
Whitespace is defined by {@link Character#isWhitespace(char)}.
</p>
<pre>
StringUtils.containsWhitespace(null) = false
StringUtils.containsWhitespace("") = false
StringUtils.containsWhitespace("ab") = false
StringUtils.containsWhitespace(" ab") = true
StringUtils.containsWhitespace("a b") = true
StringUtils.containsWhitespace("ab ") = true
</pre>
@param seq the CharSequence to check (may be {@code null}).
@return {@code true} if the CharSequence is not empty and contains at least 1 (breaking) whitespace character.
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 1,369
|
[
"decomposed"
] |
void
| true
| 2
| 7.68
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
stamp_links
|
def stamp_links(self, visitor, append_stamps=False, **headers):
"""Stamp this signature links (callbacks and errbacks).
Using a visitor will pass on responsibility for the stamping
to the visitor.
Arguments:
visitor (StampingVisitor): Visitor API object.
append_stamps (bool):
If True, duplicated stamps will be appended to a list.
If False, duplicated stamps will be replaced by the last stamp.
headers (Dict): Stamps that should be added to headers.
"""
non_visitor_headers = headers.copy()
# When we are stamping links, we want to avoid adding stamps from the linked signature itself
# so we turn off self_headers to stamp the link only with the visitor and the headers.
# If it's enabled, the link copies the stamps of the linked signature, and we don't want that.
self_headers = False
# Stamp all of the callbacks of this signature
headers = deepcopy(non_visitor_headers)
for link in maybe_list(self.options.get('link')) or []:
link = maybe_signature(link, app=self.app)
visitor_headers = None
if visitor is not None:
visitor_headers = visitor.on_callback(link, **headers) or {}
headers = self._stamp_headers(
visitor_headers=visitor_headers,
append_stamps=append_stamps,
self_headers=self_headers,
**headers
)
link.stamp(visitor, append_stamps, **headers)
# Stamp all of the errbacks of this signature
headers = deepcopy(non_visitor_headers)
for link in maybe_list(self.options.get('link_error')) or []:
link = maybe_signature(link, app=self.app)
visitor_headers = None
if visitor is not None:
visitor_headers = visitor.on_errback(link, **headers) or {}
headers = self._stamp_headers(
visitor_headers=visitor_headers,
append_stamps=append_stamps,
self_headers=self_headers,
**headers
)
link.stamp(visitor, append_stamps, **headers)
|
Stamp this signature links (callbacks and errbacks).
Using a visitor will pass on responsibility for the stamping
to the visitor.
Arguments:
visitor (StampingVisitor): Visitor API object.
append_stamps (bool):
If True, duplicated stamps will be appended to a list.
If False, duplicated stamps will be replaced by the last stamp.
headers (Dict): Stamps that should be added to headers.
|
python
|
celery/canvas.py
| 635
|
[
"self",
"visitor",
"append_stamps"
] | false
| 9
| 6.08
|
celery/celery
| 27,741
|
google
| false
|
|
checkElementIndex
|
@CanIgnoreReturnValue
public static int checkElementIndex(int index, int size) {
return checkElementIndex(index, size, "index");
}
|
Ensures that {@code index} specifies a valid <i>element</i> in an array, list or string of size
{@code size}. An element index may range from zero, inclusive, to {@code size}, exclusive.
@param index a user-supplied index identifying an element of an array, list or string
@param size the size of that array, list or string
@return the value of {@code index}
@throws IndexOutOfBoundsException if {@code index} is negative or is not less than {@code size}
@throws IllegalArgumentException if {@code size} is negative
|
java
|
android/guava/src/com/google/common/base/Preconditions.java
| 1,349
|
[
"index",
"size"
] | true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
doWithMainClasses
|
static <T> @Nullable T doWithMainClasses(JarFile jarFile, @Nullable String classesLocation,
MainClassCallback<T> callback) throws IOException {
List<JarEntry> classEntries = getClassEntries(jarFile, classesLocation);
classEntries.sort(new ClassEntryComparator());
for (JarEntry entry : classEntries) {
try (InputStream inputStream = new BufferedInputStream(jarFile.getInputStream(entry))) {
ClassDescriptor classDescriptor = createClassDescriptor(inputStream);
if (classDescriptor != null && classDescriptor.isMainMethodFound()) {
String className = convertToClassName(entry.getName(), classesLocation);
T result = callback.doWith(new MainClass(className, classDescriptor.getAnnotationNames()));
if (result != null) {
return result;
}
}
}
}
return null;
}
|
Perform the given callback operation on all main classes from the given jar.
@param <T> the result type
@param jarFile the jar file to search
@param classesLocation the location within the jar containing classes
@param callback the callback
@return the first callback result or {@code null}
@throws IOException in case of I/O errors
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/MainClassFinder.java
| 219
|
[
"jarFile",
"classesLocation",
"callback"
] |
T
| true
| 4
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
createBeanDefinition
|
public static AbstractBeanDefinition createBeanDefinition(
@Nullable String parentName, @Nullable String className, @Nullable ClassLoader classLoader) throws ClassNotFoundException {
GenericBeanDefinition bd = new GenericBeanDefinition();
bd.setParentName(parentName);
if (className != null) {
if (classLoader != null) {
bd.setBeanClass(ClassUtils.forName(className, classLoader));
}
else {
bd.setBeanClassName(className);
}
}
return bd;
}
|
Create a new GenericBeanDefinition for the given parent name and class name,
eagerly loading the bean class if a ClassLoader has been specified.
@param parentName the name of the parent bean, if any
@param className the name of the bean class, if any
@param classLoader the ClassLoader to use for loading bean classes
(can be {@code null} to just register bean classes by name)
@return the bean definition
@throws ClassNotFoundException if the bean class could not be loaded
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/BeanDefinitionReaderUtils.java
| 57
|
[
"parentName",
"className",
"classLoader"
] |
AbstractBeanDefinition
| true
| 3
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
isZip
|
private boolean isZip(InputStreamSupplier supplier) {
try {
try (InputStream inputStream = supplier.openStream()) {
return isZip(inputStream);
}
}
catch (IOException ex) {
return false;
}
}
|
Writes a signature file if necessary for the given {@code writtenLibraries}.
@param writtenLibraries the libraries
@param writer the writer to use to write the signature file if necessary
@throws IOException if a failure occurs when writing the signature file
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Packager.java
| 282
|
[
"supplier"
] | true
| 2
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
getMainClass
|
@Override
protected String getMainClass() throws Exception {
String mainClass = getProperty(MAIN, "Start-Class");
if (mainClass == null) {
throw new IllegalStateException("No '%s' or 'Start-Class' specified".formatted(MAIN));
}
return mainClass;
}
|
Properties key for boolean flag (default false) which, if set, will cause the
external configuration properties to be copied to System properties (assuming that
is allowed by Java security).
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/PropertiesLauncher.java
| 367
|
[] |
String
| true
| 2
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
writeLoaderClasses
|
private void writeLoaderClasses(AbstractJarWriter writer) throws IOException {
Layout layout = getLayout();
if (layout instanceof CustomLoaderLayout customLoaderLayout) {
customLoaderLayout.writeLoadedClasses(writer);
}
else if (layout.isExecutable()) {
writer.writeLoaderClasses();
}
}
|
Sets if jarmode jars relevant for the packaging should be automatically included.
@param includeRelevantJarModeJars if relevant jars are included
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Packager.java
| 218
|
[
"writer"
] |
void
| true
| 3
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_process_line_with_next_version_comment
|
def _process_line_with_next_version_comment(
line: str, pyproject_file: Path, updates_made: dict[str, dict[str, Any]]
) -> tuple[str, bool]:
"""
Process a line that contains the "# use next version" comment.
Returns:
Tuple of (processed_line, was_modified)
"""
# Extract the provider package name and current version constraint
# Format is typically: "apache-airflow-providers-xxx>=version", # use next version
match = re.search(r'"(apache-airflow-providers-[^">=<]+)>=([^",]+)"', line)
if not match:
# Comment found but couldn't parse the line
return line, False
provider_package_name = match.group(1)
current_min_version = match.group(2)
# Get the current version from the referenced provider
provider_version = _get_provider_version_from_package_name(provider_package_name)
if not provider_version:
return line, False
# Update the line with the new version
return _update_dependency_line_with_new_version(
line, provider_package_name, current_min_version, provider_version, pyproject_file, updates_made
)
|
Process a line that contains the "# use next version" comment.
Returns:
Tuple of (processed_line, was_modified)
|
python
|
dev/breeze/src/airflow_breeze/utils/packages.py
| 1,268
|
[
"line",
"pyproject_file",
"updates_made"
] |
tuple[str, bool]
| true
| 3
| 7.6
|
apache/airflow
| 43,597
|
unknown
| false
|
isIncluded
|
private boolean isIncluded(Class<?> candidate) {
for (MethodValidationExcludeFilter exclusionFilter : this.excludeFilters) {
if (exclusionFilter.isExcluded(candidate)) {
return false;
}
}
return true;
}
|
Creates a new {@code FilteredMethodValidationPostProcessor} that will apply the
given {@code excludeFilters} when identifying beans that are eligible for method
validation post-processing.
@param excludeFilters filters to apply
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/validation/beanvalidation/FilteredMethodValidationPostProcessor.java
| 71
|
[
"candidate"
] | true
| 2
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
eager_shape
|
def eager_shape(x: Array, /) -> tuple[int, ...]:
"""
Return shape of an array. Raise if shape is not fully defined.
Parameters
----------
x : Array
Input array.
Returns
-------
tuple[int, ...]
Shape of the array.
"""
shape = x.shape
# Dask arrays uses non-standard NaN instead of None
if any(s is None or math.isnan(s) for s in shape):
msg = "Unsupported lazy shape"
raise TypeError(msg)
return cast(tuple[int, ...], shape)
|
Return shape of an array. Raise if shape is not fully defined.
Parameters
----------
x : Array
Input array.
Returns
-------
tuple[int, ...]
Shape of the array.
|
python
|
sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
| 253
|
[
"x"
] |
tuple[int, ...]
| true
| 3
| 7.04
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
lastSequence
|
@Override
public int lastSequence() {
int baseSequence = baseSequence();
if (baseSequence == RecordBatch.NO_SEQUENCE)
return RecordBatch.NO_SEQUENCE;
return incrementSequence(baseSequence, lastOffsetDelta());
}
|
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas.
@return The base timestamp
|
java
|
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
| 208
|
[] | true
| 2
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
make_circles
|
def make_circles(
n_samples=100, *, shuffle=True, noise=None, random_state=None, factor=0.8
):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or tuple of shape (2,), dtype=int, default=100
If int, it is the total number of points generated.
For odd numbers, the inner circle will have one point more than the
outer circle.
If two-element tuple, number of points in outer circle and inner
circle.
.. versionchanged:: 0.23
Added two-element tuple.
shuffle : bool, default=True
Whether to shuffle the samples.
noise : float, default=None
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
factor : float, default=.8
Scale factor between inner and outer circle in the range `[0, 1)`.
Returns
-------
X : ndarray of shape (n_samples, 2)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels (0 or 1) for class membership of each sample.
Examples
--------
>>> from sklearn.datasets import make_circles
>>> X, y = make_circles(random_state=42)
>>> X.shape
(100, 2)
>>> y.shape
(100,)
>>> list(y[:5])
[np.int64(1), np.int64(1), np.int64(1), np.int64(0), np.int64(0)]
"""
if isinstance(n_samples, numbers.Integral):
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
else: # n_samples is a tuple
if len(n_samples) != 2:
raise ValueError("When a tuple, n_samples must have exactly two elements.")
n_samples_out, n_samples_in = n_samples
generator = check_random_state(random_state)
# so as not to have the first point = last point, we set endpoint=False
linspace_out = np.linspace(0, 2 * np.pi, n_samples_out, endpoint=False)
linspace_in = np.linspace(0, 2 * np.pi, n_samples_in, endpoint=False)
outer_circ_x = np.cos(linspace_out)
outer_circ_y = np.sin(linspace_out)
inner_circ_x = np.cos(linspace_in) * factor
inner_circ_y = np.sin(linspace_in) * factor
X = np.vstack(
[np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)]
).T
y = np.hstack(
[np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)]
)
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
|
Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int or tuple of shape (2,), dtype=int, default=100
If int, it is the total number of points generated.
For odd numbers, the inner circle will have one point more than the
outer circle.
If two-element tuple, number of points in outer circle and inner
circle.
.. versionchanged:: 0.23
Added two-element tuple.
shuffle : bool, default=True
Whether to shuffle the samples.
noise : float, default=None
Standard deviation of Gaussian noise added to the data.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
factor : float, default=.8
Scale factor between inner and outer circle in the range `[0, 1)`.
Returns
-------
X : ndarray of shape (n_samples, 2)
The generated samples.
y : ndarray of shape (n_samples,)
The integer labels (0 or 1) for class membership of each sample.
Examples
--------
>>> from sklearn.datasets import make_circles
>>> X, y = make_circles(random_state=42)
>>> X.shape
(100, 2)
>>> y.shape
(100,)
>>> list(y[:5])
[np.int64(1), np.int64(1), np.int64(1), np.int64(0), np.int64(0)]
|
python
|
sklearn/datasets/_samples_generator.py
| 807
|
[
"n_samples",
"shuffle",
"noise",
"random_state",
"factor"
] | false
| 6
| 7.6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
to_frame
|
def to_frame(
self,
index: bool = True,
name=lib.no_default,
allow_duplicates: bool = False,
) -> DataFrame:
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of str, optional
The passed names should substitute index level names.
allow_duplicates : bool, optional default False
Allow duplicate column labels to be created.
Returns
-------
DataFrame
DataFrame representation of the MultiIndex, with levels as columns.
See Also
--------
DataFrame : Two-dimensional, size-mutable, potentially heterogeneous
tabular data.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([["a", "b"], ["c", "d"]])
>>> mi
MultiIndex([('a', 'c'),
('b', 'd')],
)
>>> df = mi.to_frame()
>>> df
0 1
a c a c
b d b d
>>> df = mi.to_frame(index=False)
>>> df
0 1
0 a c
1 b d
>>> df = mi.to_frame(name=["x", "y"])
>>> df
x y
a c a c
b d b d
"""
from pandas import DataFrame
if name is not lib.no_default:
if not is_list_like(name):
raise TypeError("'name' must be a list / sequence of column names.")
if len(name) != len(self.levels):
raise ValueError(
"'name' should have same length as number of levels on index."
)
idx_names = name
else:
idx_names = self._get_level_names()
if not allow_duplicates and len(set(idx_names)) != len(idx_names):
raise ValueError(
"Cannot create duplicate column labels if allow_duplicates is False"
)
# Guarantee resulting column order - PY36+ dict maintains insertion order
result = DataFrame(
{level: self._get_level_values(level) for level in range(len(self.levels))},
copy=False,
)
result.columns = idx_names
if index:
result.index = self
return result
|
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of str, optional
The passed names should substitute index level names.
allow_duplicates : bool, optional default False
Allow duplicate column labels to be created.
Returns
-------
DataFrame
DataFrame representation of the MultiIndex, with levels as columns.
See Also
--------
DataFrame : Two-dimensional, size-mutable, potentially heterogeneous
tabular data.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([["a", "b"], ["c", "d"]])
>>> mi
MultiIndex([('a', 'c'),
('b', 'd')],
)
>>> df = mi.to_frame()
>>> df
0 1
a c a c
b d b d
>>> df = mi.to_frame(index=False)
>>> df
0 1
0 a c
1 b d
>>> df = mi.to_frame(name=["x", "y"])
>>> df
x y
a c a c
b d b d
|
python
|
pandas/core/indexes/multi.py
| 1,880
|
[
"self",
"index",
"name",
"allow_duplicates"
] |
DataFrame
| true
| 8
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
immutableEnumSet
|
public static <E extends Enum<E>> ImmutableSet<E> immutableEnumSet(Iterable<E> elements) {
if (elements instanceof ImmutableEnumSet) {
return (ImmutableEnumSet<E>) elements;
} else if (elements instanceof Collection) {
Collection<E> collection = (Collection<E>) elements;
if (collection.isEmpty()) {
return ImmutableSet.of();
} else {
return ImmutableEnumSet.asImmutable(EnumSet.copyOf(collection));
}
} else {
Iterator<E> itr = elements.iterator();
if (itr.hasNext()) {
EnumSet<E> enumSet = EnumSet.of(itr.next());
Iterators.addAll(enumSet, itr);
return ImmutableEnumSet.asImmutable(enumSet);
} else {
return ImmutableSet.of();
}
}
}
|
Returns an immutable set instance containing the given enum elements. Internally, the returned
set will be backed by an {@link EnumSet}.
<p>The iteration order of the returned set follows the enum's iteration order, not the order in
which the elements appear in the given collection.
@param elements the elements, all of the same {@code enum} type, that the set should contain
@return an immutable set containing those elements, minus duplicates
|
java
|
android/guava/src/com/google/common/collect/Sets.java
| 120
|
[
"elements"
] | true
| 5
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
getElement
|
public String getElement(int elementIndex, Form form) {
CharSequence element = this.elements.get(elementIndex);
ElementType type = this.elements.getType(elementIndex);
if (type.isIndexed()) {
return element.toString();
}
if (form == Form.ORIGINAL) {
if (type != ElementType.NON_UNIFORM) {
return element.toString();
}
return convertToOriginalForm(element).toString();
}
if (form == Form.DASHED) {
if (type == ElementType.UNIFORM || type == ElementType.DASHED) {
return element.toString();
}
return convertToDashedElement(element).toString();
}
CharSequence uniformElement = this.uniformElements[elementIndex];
if (uniformElement == null) {
uniformElement = (type != ElementType.UNIFORM) ? convertToUniformElement(element) : element;
this.uniformElements[elementIndex] = uniformElement.toString();
}
return uniformElement.toString();
}
|
Return an element in the name in the given form.
@param elementIndex the element index
@param form the form to return
@return the last element
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 149
|
[
"elementIndex",
"form"
] |
String
| true
| 9
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
timestampType
|
TimestampType timestampType();
|
Get the timestamp type of this record batch. This will be {@link TimestampType#NO_TIMESTAMP_TYPE}
if the batch has magic 0.
@return The timestamp type
|
java
|
clients/src/main/java/org/apache/kafka/common/record/RecordBatch.java
| 101
|
[] |
TimestampType
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
tryAddBucket
|
boolean tryAddBucket(long index, long count, boolean isPositive) {
assert index >= MIN_INDEX && index <= MAX_INDEX : "index must be in range [" + MIN_INDEX + ".." + MAX_INDEX + "]";
assert isPositive || positiveBuckets.numBuckets == 0 : "Cannot add negative buckets after a positive bucket has been added";
assert count > 0 : "Cannot add a bucket with empty or negative count";
if (isPositive) {
return positiveBuckets.tryAddBucket(index, count);
} else {
return negativeBuckets.tryAddBucket(index, count);
}
}
|
Attempts to add a bucket to the positive or negative range of this histogram.
<br>
Callers must adhere to the following rules:
<ul>
<li>All buckets for the negative values range must be provided before the first one from the positive values range.</li>
<li>For both the negative and positive ranges, buckets must be provided with their indices in ascending order.</li>
<li>It is not allowed to provide the same bucket more than once.</li>
<li>It is not allowed to add empty buckets ({@code count <= 0}).</li>
</ul>
If any of these rules are violated, this call will fail with an exception.
If the bucket cannot be added because the maximum capacity has been reached, the call will not modify the state
of this histogram and will return {@code false}.
@param index the index of the bucket to add
@param count the count to associate with the given bucket
@param isPositive {@code true} if the bucket belongs to the positive range, {@code false} if it belongs to the negative range
@return {@code true} if the bucket was added, {@code false} if it could not be added due to insufficient capacity
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/FixedCapacityExponentialHistogram.java
| 175
|
[
"index",
"count",
"isPositive"
] | true
| 4
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
needs_i8_conversion
|
def needs_i8_conversion(dtype: DtypeObj | None) -> bool:
"""
Check whether the dtype should be converted to int64.
Dtype "needs" such a conversion if the dtype is of a datetime-like dtype
Parameters
----------
dtype : np.dtype, ExtensionDtype, or None
Returns
-------
boolean
Whether or not the dtype should be converted to int64.
Examples
--------
>>> needs_i8_conversion(str)
False
>>> needs_i8_conversion(np.int64)
False
>>> needs_i8_conversion(np.datetime64)
False
>>> needs_i8_conversion(np.dtype(np.datetime64))
True
>>> needs_i8_conversion(np.array(["a", "b"]))
False
>>> needs_i8_conversion(pd.Series([1, 2]))
False
>>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
False
>>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
False
>>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern").dtype)
True
"""
if isinstance(dtype, np.dtype):
return dtype.kind in "mM"
return isinstance(dtype, (PeriodDtype, DatetimeTZDtype))
|
Check whether the dtype should be converted to int64.
Dtype "needs" such a conversion if the dtype is of a datetime-like dtype
Parameters
----------
dtype : np.dtype, ExtensionDtype, or None
Returns
-------
boolean
Whether or not the dtype should be converted to int64.
Examples
--------
>>> needs_i8_conversion(str)
False
>>> needs_i8_conversion(np.int64)
False
>>> needs_i8_conversion(np.datetime64)
False
>>> needs_i8_conversion(np.dtype(np.datetime64))
True
>>> needs_i8_conversion(np.array(["a", "b"]))
False
>>> needs_i8_conversion(pd.Series([1, 2]))
False
>>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
False
>>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
False
>>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern").dtype)
True
|
python
|
pandas/core/dtypes/common.py
| 1,204
|
[
"dtype"
] |
bool
| true
| 2
| 7.84
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
head
|
def head(self, n: int = 5) -> Self:
"""
Return the first `n` rows.
This function exhibits the same behavior as ``df[:n]``, returning the
first ``n`` rows based on position. It is useful for quickly checking
if your object has the right type of data in it.
When ``n`` is positive, it returns the first ``n`` rows. For ``n`` equal to 0,
it returns an empty object. When ``n`` is negative, it returns
all rows except the last ``|n|`` rows, mirroring the behavior of ``df[:n]``.
If ``n`` is larger than the number of rows, this function returns all rows.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame(
... {
... "animal": [
... "alligator",
... "bee",
... "falcon",
... "lion",
... "monkey",
... "parrot",
... "shark",
... "whale",
... "zebra",
... ]
... }
... )
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
For negative values of `n`
>>> df.head(-3)
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
"""
return self.iloc[:n].copy()
|
Return the first `n` rows.
This function exhibits the same behavior as ``df[:n]``, returning the
first ``n`` rows based on position. It is useful for quickly checking
if your object has the right type of data in it.
When ``n`` is positive, it returns the first ``n`` rows. For ``n`` equal to 0,
it returns an empty object. When ``n`` is negative, it returns
all rows except the last ``|n|`` rows, mirroring the behavior of ``df[:n]``.
If ``n`` is larger than the number of rows, this function returns all rows.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame(
... {
... "animal": [
... "alligator",
... "bee",
... "falcon",
... "lion",
... "monkey",
... "parrot",
... "shark",
... "whale",
... "zebra",
... ]
... }
... )
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
For negative values of `n`
>>> df.head(-3)
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
|
python
|
pandas/core/generic.py
| 5,631
|
[
"self",
"n"
] |
Self
| true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
combine
|
@CanIgnoreReturnValue
@SuppressWarnings("unchecked") // ArrayBasedBuilder stores its elements as Object.
Builder<E> combine(Builder<E> other) {
if (hashTable != null) {
for (int i = 0; i < other.size; ++i) {
// requireNonNull is safe because the first `size` elements are non-null.
add((E) requireNonNull(other.contents[i]));
}
} else {
addAll(other.contents, other.size);
}
return this;
}
|
Adds each element of {@code elements} to the {@code ImmutableSet}, ignoring duplicate
elements (only the first duplicate element is added).
@param elements the elements to add to the {@code ImmutableSet}
@return this {@code Builder} object
@throws NullPointerException if {@code elements} is null or contains a null element
|
java
|
android/guava/src/com/google/common/collect/ImmutableSet.java
| 577
|
[
"other"
] | true
| 3
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
|
add
|
public static boolean[] add(final boolean[] array, final boolean element) {
final boolean[] newArray = (boolean[]) copyArrayGrow1(array, Boolean.TYPE);
newArray[newArray.length - 1] = element;
return newArray;
}
|
Copies the given array and adds the given element at the end of the new array.
<p>
The new array contains the same elements of the input
array plus the given element in the last position. The component type of
the new array is the same as that of the input array.
</p>
<p>
If the input array is {@code null}, a new one element array is returned
whose component type is the same as the element.
</p>
<pre>
ArrayUtils.add(null, true) = [true]
ArrayUtils.add([true], false) = [true, false]
ArrayUtils.add([true, false], true) = [true, false, true]
</pre>
@param array the array to copy and add the element to, may be {@code null}.
@param element the object to add at the last index of the new array.
@return A new array containing the existing elements plus the new element.
@since 2.1
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 221
|
[
"array",
"element"
] | true
| 1
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
createOffsetCommitRequest
|
private OffsetCommitRequestState createOffsetCommitRequest(final Map<TopicPartition, OffsetAndMetadata> offsets,
final long deadlineMs) {
return jitter.isPresent() ?
new OffsetCommitRequestState(
offsets,
groupId,
groupInstanceId,
deadlineMs,
retryBackoffMs,
retryBackoffMaxMs,
jitter.getAsDouble(),
memberInfo) :
new OffsetCommitRequestState(
offsets,
groupId,
groupInstanceId,
deadlineMs,
retryBackoffMs,
retryBackoffMaxMs,
memberInfo);
}
|
Commit offsets, retrying on expected retriable errors while the retry timeout hasn't expired.
@param offsets Offsets to commit
@param deadlineMs Time until which the request will be retried if it fails with
an expected retriable error.
@return Future that will complete when a successful response
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
| 441
|
[
"offsets",
"deadlineMs"
] |
OffsetCommitRequestState
| true
| 2
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
determinePointcutClassLoader
|
private @Nullable ClassLoader determinePointcutClassLoader() {
if (this.beanFactory instanceof ConfigurableBeanFactory cbf) {
return cbf.getBeanClassLoader();
}
if (this.pointcutDeclarationScope != null) {
return this.pointcutDeclarationScope.getClassLoader();
}
return ClassUtils.getDefaultClassLoader();
}
|
Determine the ClassLoader to use for pointcut evaluation.
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AspectJExpressionPointcut.java
| 209
|
[] |
ClassLoader
| true
| 3
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
withScope
|
@Override
public Releasable withScope(Traceable traceable) {
final Context context = spans.get(traceable.getSpanId());
if (context != null && Span.fromContextOrNull(context).isRecording()) {
return context.makeCurrent()::close;
}
return () -> {};
}
|
Most of the examples of how to use the OTel API look something like this, where the span context
is automatically propagated:
<pre>{@code
Span span = tracer.spanBuilder("parent").startSpan();
try (Scope scope = parentSpan.makeCurrent()) {
// ...do some stuff, possibly creating further spans
} finally {
span.end();
}
}</pre>
This typically isn't useful in Elasticsearch, because a {@link Scope} can't be used across threads.
However, if a scope is active, then the APM agent can capture additional information, so this method
exists to make it possible to use scopes in the few situation where it makes sense.
@param traceable provides the ID of a currently-open span for which to open a scope.
@return a method to close the scope when you are finished with it.
|
java
|
modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java
| 290
|
[
"traceable"
] |
Releasable
| true
| 3
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
hasNext
|
@Override
public final boolean hasNext() {
checkState(state != State.FAILED);
switch (state) {
case DONE:
return false;
case READY:
return true;
default:
}
return tryToComputeNext();
}
|
Implementations of {@link #computeNext} <b>must</b> invoke this method when there are no
elements left in the iteration.
@return {@code null}; a convenience so your {@code computeNext} implementation can use the
simple statement {@code return endOfData();}
|
java
|
android/guava/src/com/google/common/collect/AbstractIterator.java
| 126
|
[] | true
| 1
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
containsSuperCall
|
function containsSuperCall(node: Node): boolean {
if (isSuperCall(node)) {
return true;
}
if (!(node.transformFlags & TransformFlags.ContainsLexicalSuper)) {
return false;
}
switch (node.kind) {
// stop at function boundaries
case SyntaxKind.ArrowFunction:
case SyntaxKind.FunctionExpression:
case SyntaxKind.FunctionDeclaration:
case SyntaxKind.Constructor:
case SyntaxKind.ClassStaticBlockDeclaration:
return false;
// only step into computed property names for class and object literal elements
case SyntaxKind.GetAccessor:
case SyntaxKind.SetAccessor:
case SyntaxKind.MethodDeclaration:
case SyntaxKind.PropertyDeclaration: {
const named = node as AccessorDeclaration | MethodDeclaration | PropertyDeclaration;
if (isComputedPropertyName(named.name)) {
return !!forEachChild(named.name, containsSuperCall);
}
return false;
}
}
return !!forEachChild(node, containsSuperCall);
}
|
Transforms the parameters of the constructor declaration of a class.
@param constructor The constructor for the class.
@param hasSynthesizedSuper A value indicating whether the constructor starts with a
synthesized `super` call.
|
typescript
|
src/compiler/transformers/es2015.ts
| 1,216
|
[
"node"
] | true
| 4
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
ofNonNull
|
public static <L, R> MutablePair<L, R> ofNonNull(final Map.Entry<L, R> pair) {
return of(Objects.requireNonNull(pair, "pair"));
}
|
Creates a mutable pair from a map entry.
@param <L> the left element type
@param <R> the right element type
@param pair the existing map entry.
@return a mutable pair formed from the map entry
@throws NullPointerException if the pair is null.
@since 3.20
|
java
|
src/main/java/org/apache/commons/lang3/tuple/MutablePair.java
| 118
|
[
"pair"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
customizers
|
public ThreadPoolTaskExecutorBuilder customizers(ThreadPoolTaskExecutorCustomizer... customizers) {
Assert.notNull(customizers, "'customizers' must not be null");
return customizers(Arrays.asList(customizers));
}
|
Set the {@link ThreadPoolTaskExecutorCustomizer ThreadPoolTaskExecutorCustomizers}
that should be applied to the {@link ThreadPoolTaskExecutor}. Customizers are
applied in the order that they were added after builder configuration has been
applied. Setting this value will replace any previously configured customizers.
@param customizers the customizers to set
@return a new builder instance
@see #additionalCustomizers(ThreadPoolTaskExecutorCustomizer...)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/task/ThreadPoolTaskExecutorBuilder.java
| 243
|
[] |
ThreadPoolTaskExecutorBuilder
| true
| 1
| 6.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
conn_config
|
def conn_config(self) -> AwsConnectionWrapper:
"""Get the Airflow Connection object and wrap it in helper (cached)."""
connection = None
if self.aws_conn_id:
try:
connection = self.get_connection(self.aws_conn_id)
except AirflowNotFoundException:
self.log.warning(
"Unable to find AWS Connection ID '%s', switching to empty.", self.aws_conn_id
)
# In the TaskSDK's BaseHook, it only retrieves the connection via task-sdk. Since the AWS system testing infrastructure
# doesn't use task-sdk, this leads to an error which we handle below.
except ImportError as e:
if "SUPERVISOR_COMMS" in str(e):
self.log.exception(e)
else:
raise
return AwsConnectionWrapper(
conn=connection,
region_name=self._region_name,
botocore_config=self._config,
verify=self._verify,
)
|
Get the Airflow Connection object and wrap it in helper (cached).
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/base_aws.py
| 613
|
[
"self"
] |
AwsConnectionWrapper
| true
| 4
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
compute
|
public double compute(long... dataset) {
return computeInPlace(longsToDoubles(dataset));
}
|
Computes the quantile value of the given dataset.
@param dataset the dataset to do the calculation on, which must be non-empty, which will be
cast to doubles (with any associated lost of precision), and which will not be mutated by
this call (it is copied instead)
@return the quantile value
|
java
|
android/guava/src/com/google/common/math/Quantiles.java
| 265
|
[] | true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
createFetchRequest
|
protected FetchRequest.Builder createFetchRequest(final Node fetchTarget,
final FetchSessionHandler.FetchRequestData requestData) {
// Version 12 is the maximum version that could be used without topic IDs. See FetchRequest.json for schema
// changelog.
final short maxVersion = requestData.canUseTopicIds() ? ApiKeys.FETCH.latestVersion() : (short) 12;
final FetchRequest.Builder request = FetchRequest.Builder
.forConsumer(maxVersion, fetchConfig.maxWaitMs, fetchConfig.minBytes, requestData.toSend())
.isolationLevel(fetchConfig.isolationLevel)
.setMaxBytes(fetchConfig.maxBytes)
.metadata(requestData.metadata())
.removed(requestData.toForget())
.replaced(requestData.toReplace())
.rackId(fetchConfig.clientRackId);
log.debug("Sending {} {} to broker {}", fetchConfig.isolationLevel, requestData, fetchTarget);
// We add the node to the set of nodes with pending fetch requests before adding the
// listener because the future may have been fulfilled on another thread (e.g. during a
// disconnection being handled by the heartbeat thread) which will mean the listener
// will be invoked synchronously.
log.debug("Adding pending request for node {}", fetchTarget);
nodesWithPendingFetchRequests.add(fetchTarget.id());
return request;
}
|
Creates a new {@link FetchRequest fetch request} in preparation for sending to the Kafka cluster.
@param fetchTarget {@link Node} from which the fetch data will be requested
@param requestData {@link FetchSessionHandler.FetchRequestData} that represents the session data
@return {@link FetchRequest.Builder} that can be submitted to the broker
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java
| 310
|
[
"fetchTarget",
"requestData"
] | true
| 2
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
setHtmlTextToMimePart
|
private void setHtmlTextToMimePart(MimePart mimePart, String text) throws MessagingException {
if (getEncoding() != null) {
mimePart.setContent(text, CONTENT_TYPE_HTML + CONTENT_TYPE_CHARSET_SUFFIX + getEncoding());
}
else {
mimePart.setContent(text, CONTENT_TYPE_HTML);
}
}
|
Set the given plain text and HTML text as alternatives, offering
both options to the email client. Requires multipart mode.
<p><b>NOTE:</b> Invoke {@link #addInline} <i>after</i> {@code setText};
else, mail readers might not be able to resolve inline references correctly.
@param plainText the plain text for the message
@param htmlText the HTML text for the message
@throws MessagingException in case of errors
|
java
|
spring-context-support/src/main/java/org/springframework/mail/javamail/MimeMessageHelper.java
| 876
|
[
"mimePart",
"text"
] |
void
| true
| 2
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
setAll
|
public static <T> T[] setAll(final T[] array, final IntFunction<? extends T> generator) {
if (array != null && generator != null) {
Arrays.setAll(array, generator);
}
return array;
}
|
Sets all elements of the specified array, using the provided generator supplier to compute each element.
<p>
If the generator supplier throws an exception, it is relayed to the caller and the array is left in an indeterminate
state.
</p>
@param <T> type of elements of the array, may be {@code null}.
@param array array to be initialized, may be {@code null}.
@param generator a function accepting an index and producing the desired value for that position.
@return the input array
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 6,748
|
[
"array",
"generator"
] | true
| 3
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
iterable_not_string
|
def iterable_not_string(obj: object) -> bool:
"""
Check if the object is an iterable but not a string.
Parameters
----------
obj : The object to check.
Returns
-------
is_iter_not_string : bool
Whether `obj` is a non-string iterable.
Examples
--------
>>> iterable_not_string([1, 2, 3])
True
>>> iterable_not_string("foo")
False
>>> iterable_not_string(1)
False
"""
return isinstance(obj, abc.Iterable) and not isinstance(obj, str)
|
Check if the object is an iterable but not a string.
Parameters
----------
obj : The object to check.
Returns
-------
is_iter_not_string : bool
Whether `obj` is a non-string iterable.
Examples
--------
>>> iterable_not_string([1, 2, 3])
True
>>> iterable_not_string("foo")
False
>>> iterable_not_string(1)
False
|
python
|
pandas/core/dtypes/inference.py
| 81
|
[
"obj"
] |
bool
| true
| 2
| 8.16
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
eventuallyClose
|
@CanIgnoreReturnValue
@ParametricNullness
public <C extends @Nullable Object & @Nullable AutoCloseable> C eventuallyClose(
@ParametricNullness C closeable, Executor closingExecutor) {
checkNotNull(closingExecutor);
if (closeable != null) {
list.add(closeable, closingExecutor);
}
return closeable;
}
|
Captures an object to be closed when a {@link ClosingFuture} pipeline is done.
<p>Be careful when targeting an older SDK than you are building against (most commonly when
building for Android): Ensure that any object you pass implements the interface not just in
your current SDK version but also at the oldest version you support. For example, <a
href="https://developer.android.com/sdk/api_diff/28/changes/android.media.MediaDrm#android.media.MediaDrm.close_added()">API
Level 28</a> is the first version in which {@code MediaDrm} is {@code AutoCloseable}. To
support older versions, pass a wrapper {@code AutoCloseable} with a method reference like
{@code mediaDrm::release}.
@param closeable the object to be closed
@param closingExecutor the object will be closed on this executor
@return the first argument
|
java
|
android/guava/src/com/google/common/util/concurrent/ClosingFuture.java
| 224
|
[
"closeable",
"closingExecutor"
] |
C
| true
| 2
| 7.44
|
google/guava
| 51,352
|
javadoc
| false
|
agentmain
|
public static void agentmain(String agentArgs, Instrumentation inst) {
final Class<?> initClazz;
try {
initClazz = Class.forName(agentArgs);
} catch (ClassNotFoundException e) {
throw new AssertionError("entitlement agent does could not find EntitlementInitialization", e);
}
final Method initMethod;
try {
initMethod = initClazz.getMethod("initialize", Instrumentation.class);
} catch (NoSuchMethodException e) {
throw new AssertionError("EntitlementInitialization missing initialize method", e);
}
try {
initMethod.invoke(null, inst);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new AssertionError("entitlement initialization failed", e);
}
}
|
The agent main method
@param agentArgs arguments passed to the agent.For our agent, this is the class to load and use for Entitlement Initialization.
See e.g. {@code EntitlementsBootstrap#loadAgent}
@param inst The {@link Instrumentation} instance to use for injecting Entitlements checks
|
java
|
libs/entitlement/agent/src/main/java/org/elasticsearch/entitlement/agent/EntitlementAgent.java
| 34
|
[
"agentArgs",
"inst"
] |
void
| true
| 4
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
get_query_results_paginator
|
def get_query_results_paginator(
self,
query_execution_id: str,
max_items: int | None = None,
page_size: int | None = None,
starting_token: str | None = None,
) -> PageIterator | None:
"""
Fetch submitted Athena query results.
.. seealso::
- :external+boto3:py:class:`Athena.Paginator.GetQueryResults`
:param query_execution_id: Id of submitted athena query
:param max_items: The total number of items to return.
:param page_size: The size of each page.
:param starting_token: A token to specify where to start paginating.
:return: *None* if the query is in intermediate, failed, or cancelled
state. Otherwise a paginator to iterate through pages of results.
Call :meth`.build_full_result()` on the returned paginator to get all
results at once.
"""
query_state = self.check_query_status(query_execution_id)
if query_state is None:
self.log.error("Invalid Query state (null). Query execution id: %s", query_execution_id)
return None
if query_state in self.INTERMEDIATE_STATES or query_state in self.FAILURE_STATES:
self.log.error(
'Query is in "%s" state. Cannot fetch results, Query execution id: %s',
query_state,
query_execution_id,
)
return None
result_params = {
"QueryExecutionId": query_execution_id,
"PaginationConfig": {
"MaxItems": max_items,
"PageSize": page_size,
"StartingToken": starting_token,
},
}
paginator = self.get_conn().get_paginator("get_query_results")
return paginator.paginate(**result_params)
|
Fetch submitted Athena query results.
.. seealso::
- :external+boto3:py:class:`Athena.Paginator.GetQueryResults`
:param query_execution_id: Id of submitted athena query
:param max_items: The total number of items to return.
:param page_size: The size of each page.
:param starting_token: A token to specify where to start paginating.
:return: *None* if the query is in intermediate, failed, or cancelled
state. Otherwise a paginator to iterate through pages of results.
Call :meth`.build_full_result()` on the returned paginator to get all
results at once.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/athena.py
| 227
|
[
"self",
"query_execution_id",
"max_items",
"page_size",
"starting_token"
] |
PageIterator | None
| true
| 4
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
purge
|
def purge(self, now=None):
# type: (float) -> None
"""Check oldest items and remove them if needed.
Arguments:
now (float): Time of purging -- by default right now.
This can be useful for unit testing.
"""
now = now or time.monotonic()
now = now() if isinstance(now, Callable) else now
if self.maxlen:
while len(self._data) > self.maxlen:
self.pop()
# time based expiring:
if self.expires:
while len(self._data) > self.minlen >= 0:
inserted_time, _ = self._heap[0]
if inserted_time + self.expires > now:
break # oldest item hasn't expired yet
self.pop()
|
Check oldest items and remove them if needed.
Arguments:
now (float): Time of purging -- by default right now.
This can be useful for unit testing.
|
python
|
celery/utils/collections.py
| 577
|
[
"self",
"now"
] | false
| 8
| 6.24
|
celery/celery
| 27,741
|
google
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.