function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
tensor_always_has_static_shape
|
def tensor_always_has_static_shape(
tensor: Union[torch.Tensor, Any],
is_tensor: bool,
tensor_source: Source,
) -> tuple[bool, Optional[TensorStaticReason]]:
"""
Given a tensor, source, and is_tensor flag, determine if a shape should be static.
Args:
tensor - the real tensor to evaluate, parameters force a static shape.
is_tensor - internal dynamo check, essentially "is_tensor": target_cls is TensorVariable,
tensors not in a TensorVariable for whatever reason are forced static.
Returns a tuple, where the first element is the bool of whether or not this tensor should have a static shape.
The second element is a TensorStaticReason, useful for passing to tensor_static_reason_to_message if needed.
"""
from .source import is_from_unspecialized_param_buffer_source
if (
tensor_source.guard_source.is_specialized_nn_module()
or tensor_source.guard_source.is_unspecialized_builtin_nn_module()
) and config.force_nn_module_property_static_shapes:
return True, TensorStaticReason.NN_MODULE_PROPERTY
if (
type(tensor) is torch.nn.Parameter
or is_from_unspecialized_param_buffer_source(tensor_source)
) and config.force_parameter_static_shapes:
return True, TensorStaticReason.PARAMETER
if not is_tensor:
return True, TensorStaticReason.NOT_TENSOR
return False, None
|
Given a tensor, source, and is_tensor flag, determine if a shape should be static.
Args:
tensor - the real tensor to evaluate, parameters force a static shape.
is_tensor - internal dynamo check, essentially "is_tensor": target_cls is TensorVariable,
tensors not in a TensorVariable for whatever reason are forced static.
Returns a tuple, where the first element is the bool of whether or not this tensor should have a static shape.
The second element is a TensorStaticReason, useful for passing to tensor_static_reason_to_message if needed.
|
python
|
torch/_dynamo/utils.py
| 3,902
|
[
"tensor",
"is_tensor",
"tensor_source"
] |
tuple[bool, Optional[TensorStaticReason]]
| true
| 8
| 6.72
|
pytorch/pytorch
| 96,034
|
google
| false
|
standardPollLastEntry
|
protected @Nullable Entry<E> standardPollLastEntry() {
Iterator<Entry<E>> entryIterator = descendingMultiset().entrySet().iterator();
if (!entryIterator.hasNext()) {
return null;
}
Entry<E> entry = entryIterator.next();
entry = Multisets.immutableEntry(entry.getElement(), entry.getCount());
entryIterator.remove();
return entry;
}
|
A sensible definition of {@link #pollLastEntry()} in terms of {@code
descendingMultiset().entrySet().iterator()}.
<p>If you override {@link #descendingMultiset()} or {@link #entrySet()}, you may wish to
override {@link #pollLastEntry()} to forward to this implementation.
|
java
|
android/guava/src/com/google/common/collect/ForwardingSortedMultiset.java
| 186
|
[] | true
| 2
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
unquote_header_value
|
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != "\\\\":
return value.replace("\\\\", "\\").replace('\\"', '"')
return value
|
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
|
python
|
src/requests/utils.py
| 432
|
[
"value",
"is_filename"
] | false
| 5
| 6.4
|
psf/requests
| 53,586
|
sphinx
| false
|
|
openStream
|
InputStream openStream() throws IOException;
|
Returns a new open {@link InputStream} at the beginning of the content.
@return a new {@link InputStream}
@throws IOException on IO error
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/InputStreamSupplier.java
| 37
|
[] |
InputStream
| true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
toLongString
|
public static String toLongString(final TypeVariable<?> typeVariable) {
Objects.requireNonNull(typeVariable, "typeVariable");
final StringBuilder buf = new StringBuilder();
final GenericDeclaration d = typeVariable.getGenericDeclaration();
if (d instanceof Class<?>) {
Class<?> c = (Class<?>) d;
while (true) {
if (c.getEnclosingClass() == null) {
buf.insert(0, c.getName());
break;
}
buf.insert(0, c.getSimpleName()).insert(0, '.');
c = c.getEnclosingClass();
}
} else if (d instanceof Type) { // not possible as of now
buf.append(toString((Type) d));
} else {
buf.append(d);
}
return buf.append(':').append(typeVariableToString(typeVariable)).toString();
}
|
Formats a {@link TypeVariable} including its {@link GenericDeclaration}.
@param typeVariable the type variable to create a String representation for, not {@code null}.
@return String.
@throws NullPointerException if {@code typeVariable} is {@code null}.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 1,507
|
[
"typeVariable"
] |
String
| true
| 5
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
nansem
|
def nansem(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
ddof: int = 1,
mask: npt.NDArray[np.bool_] | None = None,
) -> float:
"""
Compute the standard error in the mean along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nansem(s.values)
np.float64(0.5773502691896258)
"""
# This checks if non-numeric-like data is passed with numeric_only=False
# and raises a TypeError otherwise
nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)
mask = _maybe_get_mask(values, skipna, mask)
if values.dtype.kind != "f":
values = values.astype("f8")
if not skipna and mask is not None and mask.any():
return np.nan
count, _ = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
var = nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)
return np.sqrt(var) / np.sqrt(count)
|
Compute the standard error in the mean along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nansem(s.values)
np.float64(0.5773502691896258)
|
python
|
pandas/core/nanops.py
| 1,038
|
[
"values",
"axis",
"skipna",
"ddof",
"mask"
] |
float
| true
| 5
| 8.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
determineReplacementMetadata
|
private @Nullable ConfigurationMetadataProperty determineReplacementMetadata(
ConfigurationMetadataProperty metadata) {
String replacementId = metadata.getDeprecation().getReplacement();
if (StringUtils.hasText(replacementId)) {
ConfigurationMetadataProperty replacement = this.allProperties.get(replacementId);
if (replacement != null) {
return replacement;
}
return detectMapValueReplacement(replacementId);
}
return null;
}
|
Analyse the {@link ConfigurableEnvironment environment} and attempt to rename
legacy properties if a replacement exists.
@return a report of the migration
|
java
|
core/spring-boot-properties-migrator/src/main/java/org/springframework/boot/context/properties/migrator/PropertiesMigrationReporter.java
| 157
|
[
"metadata"
] |
ConfigurationMetadataProperty
| true
| 3
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getAndDecrement
|
public int getAndDecrement() {
final int last = value;
value--;
return last;
}
|
Decrements this instance's value by 1; this method returns the value associated with the instance
immediately prior to the decrement operation. This method is not thread safe.
@return the value associated with the instance before it was decremented.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableInt.java
| 234
|
[] | true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
polyvander
|
def polyvander(x, deg):
"""Vandermonde matrix of given degree.
Returns the Vandermonde matrix of degree `deg` and sample points
`x`. The Vandermonde matrix is defined by
.. math:: V[..., i] = x^i,
where ``0 <= i <= deg``. The leading indices of `V` index the elements of
`x` and the last index is the power of `x`.
If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the
matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and
``polyval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of polynomials of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray.
The Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where the last index is the power of `x`.
The dtype will be the same as the converted `x`.
See Also
--------
polyvander2d, polyvander3d
Examples
--------
The Vandermonde matrix of degree ``deg = 5`` and sample points
``x = [-1, 2, 3]`` contains the element-wise powers of `x`
from 0 to 5 as its columns.
>>> from numpy.polynomial import polynomial as P
>>> x, deg = [-1, 2, 3], 5
>>> P.polyvander(x=x, deg=deg)
array([[ 1., -1., 1., -1., 1., -1.],
[ 1., 2., 4., 8., 16., 32.],
[ 1., 3., 9., 27., 81., 243.]])
"""
ideg = pu._as_int(deg, "deg")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=None, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x * 0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i - 1] * x
return np.moveaxis(v, 0, -1)
|
Vandermonde matrix of given degree.
Returns the Vandermonde matrix of degree `deg` and sample points
`x`. The Vandermonde matrix is defined by
.. math:: V[..., i] = x^i,
where ``0 <= i <= deg``. The leading indices of `V` index the elements of
`x` and the last index is the power of `x`.
If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the
matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and
``polyval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of polynomials of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray.
The Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where the last index is the power of `x`.
The dtype will be the same as the converted `x`.
See Also
--------
polyvander2d, polyvander3d
Examples
--------
The Vandermonde matrix of degree ``deg = 5`` and sample points
``x = [-1, 2, 3]`` contains the element-wise powers of `x`
from 0 to 5 as its columns.
>>> from numpy.polynomial import polynomial as P
>>> x, deg = [-1, 2, 3], 5
>>> P.polyvander(x=x, deg=deg)
array([[ 1., -1., 1., -1., 1., -1.],
[ 1., 2., 4., 8., 16., 32.],
[ 1., 3., 9., 27., 81., 243.]])
|
python
|
numpy/polynomial/polynomial.py
| 1,074
|
[
"x",
"deg"
] | false
| 4
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
compile_fx
|
def compile_fx(
model_: GraphModule,
example_inputs_: Sequence[InputType],
inner_compile: Callable[..., OutputCode] = compile_fx_inner,
config_patches: Optional[dict[str, Any]] = None,
decompositions: Optional[dict[OpOverload, Callable[..., Any]]] = None,
ignore_shape_env: bool = False,
) -> CompileFxOutput:
"""
Main entry point for compiling given FX graph. Despite the fact that this
lives in :mod:`torch._inductor`, this function is responsible for calling
into AOT Autograd (and we will eventually get a callback to
``inner_compile`` to perform actual compilation. In other words, this
function orchestrates end-to-end compilation for the inductor backend when
you use :func:`torch.compile`.
NB: This function TAKES OWNERSHIP of the input ``model_`` and can potentially
mutate it! Make a copy if you need to preserve the original GraphModule.
"""
# Some arguments trigger a recursive call to compile_fx. Handle these
# short circuits first, before anything else
from torch._inductor.compiler_bisector import CompilerBisector
if CompilerBisector.disable_subsystem("inductor", "pre_grad_graph"):
# pyrefly: ignore [bad-return]
return model_
if config_patches:
with config.patch(config_patches):
return compile_fx(
model_,
example_inputs_,
# need extra layer of patching as backwards is compiled out of scope
inner_compile=config.patch(config_patches)(inner_compile),
decompositions=decompositions,
ignore_shape_env=ignore_shape_env,
)
# Wake up the AsyncCompile subproc pool as early as possible (if there's cuda).
if any(
isinstance(e, torch.Tensor) and e.device.type in ("cuda", "xpu")
for e in example_inputs_
):
torch._inductor.async_compile.AsyncCompile.wakeup()
if config.cpp_wrapper or config.fx_wrapper:
from torch._export.non_strict_utils import _fakify_script_objects
cpp_wrapper_config = config.cpp_wrapper
fx_wrapper_config = config.fx_wrapper
with (
config.patch(get_cpp_wrapper_config()),
V.set_real_inputs(example_inputs_),
):
inputs_: Sequence[InputType] = (
_extract_inputs_from_exported_gm(model_, example_inputs_)
if isinstance(model_, GraphModule)
else example_inputs_
)
fake_mode = detect_fake_mode(inputs_)
with _fakify_script_objects(model_, inputs_, {}, fake_mode) as (
patched_mod,
fake_args,
_,
_,
_,
):
return _maybe_wrap_and_compile_fx_main(
patched_mod,
fake_args,
inner_compile=functools.partial(
inner_compile,
cpp_wrapper=cpp_wrapper_config,
fx_wrapper=fx_wrapper_config,
),
decompositions=decompositions,
ignore_shape_env=ignore_shape_env,
)
return _maybe_wrap_and_compile_fx_main(
model_,
example_inputs_,
inner_compile,
decompositions,
ignore_shape_env,
)
|
Main entry point for compiling given FX graph. Despite the fact that this
lives in :mod:`torch._inductor`, this function is responsible for calling
into AOT Autograd (and we will eventually get a callback to
``inner_compile`` to perform actual compilation. In other words, this
function orchestrates end-to-end compilation for the inductor backend when
you use :func:`torch.compile`.
NB: This function TAKES OWNERSHIP of the input ``model_`` and can potentially
mutate it! Make a copy if you need to preserve the original GraphModule.
|
python
|
torch/_inductor/compile_fx.py
| 2,464
|
[
"model_",
"example_inputs_",
"inner_compile",
"config_patches",
"decompositions",
"ignore_shape_env"
] |
CompileFxOutput
| true
| 8
| 6.8
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
select_as_coordinates
|
def select_as_coordinates(
self,
key: str,
where=None,
start: int | None = None,
stop: int | None = None,
):
"""
return the selection as an Index
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_coordinates with a table")
return tbl.read_coordinates(where=where, start=start, stop=stop)
|
return the selection as an Index
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
|
python
|
pandas/io/pytables.py
| 938
|
[
"self",
"key",
"where",
"start",
"stop"
] | true
| 2
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
getNumPendingMessagesInQueue
|
int64_t getNumPendingMessagesInQueue() const {
if (eventBase_) {
eventBase_->dcheckIsInEventBaseThread();
}
int64_t numMsgs = 0;
for (const auto& callback : callbacks_) {
if (callback.consumer) {
numMsgs += callback.consumer->getQueue().size();
}
}
return numMsgs;
}
|
Get the current number of unprocessed messages in NotificationQueue.
This method must be invoked from the AsyncServerSocket's primary
EventBase thread. Use EventBase::runInEventBaseThread() to schedule the
operation in the correct EventBase if your code is not in the server
socket's primary EventBase.
|
cpp
|
folly/io/async/AsyncServerSocket.h
| 704
|
[] | true
| 3
| 6.4
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
truncate
|
public static Calendar truncate(final Calendar date, final int field) {
Objects.requireNonNull(date, "date");
return modify((Calendar) date.clone(), field, ModifyType.TRUNCATE);
}
|
Truncates a date, leaving the field specified as the most
significant field.
<p>For example, if you had the date-time of 28 Mar 2002
13:45:01.231, if you passed with HOUR, it would return 28 Mar
2002 13:00:00.000. If this was passed with MONTH, it would
return 1 Mar 2002 0:00:00.000.</p>
@param date the date to work with, not null.
@param field the field from {@link Calendar} or {@code SEMI_MONTH}.
@return the different truncated date, not null.
@throws NullPointerException if the date is {@code null}.
@throws ArithmeticException if the year is over 280 million.
|
java
|
src/main/java/org/apache/commons/lang3/time/DateUtils.java
| 1,720
|
[
"date",
"field"
] |
Calendar
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
startFinalizer
|
public static void startFinalizer(
Class<?> finalizableReferenceClass,
ReferenceQueue<Object> queue,
PhantomReference<Object> frqReference) {
/*
* We use FinalizableReference.class for two things:
*
* 1) To invoke FinalizableReference.finalizeReferent()
*
* 2) To detect when FinalizableReference's class loader has to be garbage collected, at which
* point, Finalizer can stop running
*/
if (!finalizableReferenceClass.getName().equals(FINALIZABLE_REFERENCE)) {
throw new IllegalArgumentException("Expected " + FINALIZABLE_REFERENCE + ".");
}
Finalizer finalizer = new Finalizer(finalizableReferenceClass, queue, frqReference);
String threadName = Finalizer.class.getName();
Thread thread = null;
if (bigThreadConstructor != null) {
try {
boolean inheritThreadLocals = false;
long defaultStackSize = 0;
thread =
bigThreadConstructor.newInstance(
(ThreadGroup) null, finalizer, threadName, defaultStackSize, inheritThreadLocals);
} catch (Throwable t) {
logger.log(
Level.INFO, "Failed to create a thread without inherited thread-local values", t);
}
}
if (thread == null) {
thread = new Thread((ThreadGroup) null, finalizer, threadName);
}
thread.setDaemon(true);
try {
if (inheritableThreadLocals != null) {
inheritableThreadLocals.set(thread, null);
}
} catch (Throwable t) {
logger.log(
Level.INFO,
"Failed to clear thread local values inherited by reference finalizer thread.",
t);
}
thread.start();
}
|
Starts the Finalizer thread. FinalizableReferenceQueue calls this method reflectively.
@param finalizableReferenceClass FinalizableReference.class.
@param queue a reference queue that the thread will poll.
@param frqReference a phantom reference to the FinalizableReferenceQueue, which will be queued
either when the FinalizableReferenceQueue is no longer referenced anywhere, or when its
close() method is called.
|
java
|
android/guava/src/com/google/common/base/internal/Finalizer.java
| 62
|
[
"finalizableReferenceClass",
"queue",
"frqReference"
] |
void
| true
| 7
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
emitPos
|
function emitPos(pos: number) {
if (sourceMapsDisabled || positionIsSynthesized(pos) || isJsonSourceMapSource(sourceMapSource)) {
return;
}
const { line: sourceLine, character: sourceCharacter } = getLineAndCharacterOfPosition(sourceMapSource, pos);
sourceMapGenerator!.addMapping(
writer.getLine(),
writer.getColumn(),
sourceMapSourceIndex,
sourceLine,
sourceCharacter,
/*nameIndex*/ undefined,
);
}
|
Emits a mapping.
If the position is synthetic (undefined or a negative value), no mapping will be
created.
@param pos The position.
|
typescript
|
src/compiler/emitter.ts
| 6,211
|
[
"pos"
] | false
| 4
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
enforce_output_layout
|
def enforce_output_layout(gm: torch.fx.GraphModule):
"""
Make sure the output node's layout does not change due to compiler optimizations
by adding aten.as_strided nodes with the expected strides.
Only used for inference so we can assume all graph outputs are model outputs.
"""
*_, output_node = gm.graph.nodes
out_list = output_node.args[0]
with gm.graph.inserting_before(output_node):
for n in out_list:
if not isinstance(
n.meta["val"], torch.Tensor
) or not torch._prims_common.is_non_overlapping_and_dense(n.meta["val"]):
continue
# add a node to enforce eager layout
ft = n.meta["val"]
new_node = gm.graph.call_function(
prims.inductor_force_stride_order.default, (n, ft.stride())
)
# can not call
# n.replace_all_uses_with(new_node)
# since it will replace the usage of n in new_node itself.
output_node.replace_input_with(n, new_node)
gm.graph.lint()
gm.recompile()
|
Make sure the output node's layout does not change due to compiler optimizations
by adding aten.as_strided nodes with the expected strides.
Only used for inference so we can assume all graph outputs are model outputs.
|
python
|
torch/_inductor/freezing.py
| 210
|
[
"gm"
] | true
| 4
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
recordStats
|
@CanIgnoreReturnValue
public CacheBuilder<K, V> recordStats() {
statsCounterSupplier = CACHE_STATS_COUNTER;
return this;
}
|
Enable the accumulation of {@link CacheStats} during the operation of the cache. Without this
{@link Cache#stats} will return zero for all statistics. Note that recording stats requires
bookkeeping to be performed with each operation, and thus imposes a performance penalty on
cache operation.
@return this {@code CacheBuilder} instance (for chaining)
@since 12.0 (previously, stats collection was automatic)
|
java
|
android/guava/src/com/google/common/cache/CacheBuilder.java
| 1,010
|
[] | true
| 1
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
__init__
|
def __init__(self, accumulator_node_name: str, removed_buffers: OrderedSet[str]):
"""
Initializes a CutlassEVTEpilogueArgumentFormatter object. Do not instantiate directly.
Use the CutlassEVTCodegen.ir_to_evt_python_code static method.
Args:
accumulator_node_name: The name of the accumulator node which should contain
the Matmul result before fusion according to the IR graph.
epilogue_nodes: The list of scheduler nodes to be fused into the epilogue
"""
self.accumulator_node_name: str = accumulator_node_name #
self.body: IndentedBuffer = IndentedBuffer(1) # The body buffer for codegen
self.var_counter: Iterator[int] = itertools.count()
self.store_name_to_value: dict[str, OpsValue] = (
dict()
) # Aliases for subexpression functors
self.reads: OrderedSet[str] = OrderedSet([])
# Used for creating example tensors
self.var_name_to_buffer_name: dict[str, str] = {
_ACCUMULATOR_ARG_NAME: accumulator_node_name
}
self.removed_buffers: OrderedSet[str] = removed_buffers
self.cur_node: Optional[ComputedBuffer] = None
self.name_to_buffer = V.graph.name_to_buffer | V.graph.graph_inputs
for name in V.graph.constants:
self.name_to_buffer[name] = V.graph.add_tensor_constant(
V.graph.constants[name], name
)
self.is_D_assigned = False
self.D_var_name = None
if accumulator_node_name not in removed_buffers:
# cannot return accumulator directly, so alias it
var = self._tmp_var()
self.body.writeline(f"{var} = {_ACCUMULATOR_ARG_NAME}")
self.store(accumulator_node_name, value=OpsValue(var))
|
Initializes a CutlassEVTEpilogueArgumentFormatter object. Do not instantiate directly.
Use the CutlassEVTCodegen.ir_to_evt_python_code static method.
Args:
accumulator_node_name: The name of the accumulator node which should contain
the Matmul result before fusion according to the IR graph.
epilogue_nodes: The list of scheduler nodes to be fused into the epilogue
|
python
|
torch/_inductor/codegen/cuda/cutlass_python_evt.py
| 146
|
[
"self",
"accumulator_node_name",
"removed_buffers"
] | true
| 3
| 6.4
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
get_topological_order
|
def get_topological_order(self) -> list[str]:
"""
Get nodes in topological order (dependencies before dependents).
Returns:
List of node IDs in topological order
"""
visited = set()
temp_visited = set()
result = []
def visit(node_id: str):
if node_id in temp_visited:
raise ValueError(f"Cycle detected involving node {node_id}")
if node_id in visited:
return
temp_visited.add(node_id)
node = self.nodes[node_id]
# Visit all input nodes first
for input_node_id in node.input_nodes:
if input_node_id in self.nodes: # Skip external inputs
visit(input_node_id)
temp_visited.remove(node_id)
visited.add(node_id)
result.append(node_id)
# Start from all nodes to handle disconnected components
for node_id in self.nodes:
if node_id not in visited:
visit(node_id)
return result
|
Get nodes in topological order (dependencies before dependents).
Returns:
List of node IDs in topological order
|
python
|
tools/experimental/torchfuzz/ops_fuzzer.py
| 156
|
[
"self"
] |
list[str]
| true
| 7
| 7.44
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
on_callback
|
def on_callback(self, callback, **header) -> dict:
"""Method that is called on callback stamping.
Arguments:
callback (Signature): callback that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
Returns:
Dict: headers to update.
"""
return {}
|
Method that is called on callback stamping.
Arguments:
callback (Signature): callback that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
Returns:
Dict: headers to update.
|
python
|
celery/canvas.py
| 208
|
[
"self",
"callback"
] |
dict
| true
| 1
| 6.56
|
celery/celery
| 27,741
|
google
| false
|
nodeIfOnline
|
public Optional<Node> nodeIfOnline(TopicPartition partition, int id) {
Node node = nodeById(id);
PartitionInfo partitionInfo = partition(partition);
if (node != null && partitionInfo != null &&
!Arrays.asList(partitionInfo.offlineReplicas()).contains(node) &&
Arrays.asList(partitionInfo.replicas()).contains(node)) {
return Optional.of(node);
} else {
return Optional.empty();
}
}
|
Get the node by node id if the replica for the given partition is online
@param partition The TopicPartition
@param id The node id
@return the node
|
java
|
clients/src/main/java/org/apache/kafka/common/Cluster.java
| 253
|
[
"partition",
"id"
] | true
| 5
| 7.28
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
get_provider_info_dict
|
def get_provider_info_dict(provider_id: str) -> dict[str, Any]:
"""Retrieves provider info from the provider yaml file.
:param provider_id: package id to retrieve provider.yaml from
:return: provider_info dictionary
"""
provider_yaml_dict = get_provider_distributions_metadata().get(provider_id)
if provider_yaml_dict:
provider_yaml_dict = filter_provider_info_data(provider_yaml_dict)
validate_provider_info_with_runtime_schema(provider_yaml_dict)
return provider_yaml_dict or {}
|
Retrieves provider info from the provider yaml file.
:param provider_id: package id to retrieve provider.yaml from
:return: provider_info dictionary
|
python
|
dev/breeze/src/airflow_breeze/utils/packages.py
| 239
|
[
"provider_id"
] |
dict[str, Any]
| true
| 3
| 7.76
|
apache/airflow
| 43,597
|
sphinx
| false
|
resolve
|
@SuppressWarnings("unchecked")
public <T> @Nullable T resolve(RegisteredBean registeredBean) {
Assert.notNull(registeredBean, "'registeredBean' must not be null");
return (T) (isLazyLookup(registeredBean) ? buildLazyResourceProxy(registeredBean) :
resolveValue(registeredBean));
}
|
Resolve the value for the specified registered bean.
@param registeredBean the registered bean
@return the resolved field or method parameter value
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ResourceElementResolver.java
| 119
|
[
"registeredBean"
] |
T
| true
| 2
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getInt
|
public int getInt(int index) throws JSONException {
Object object = get(index);
Integer result = JSON.toInteger(object);
if (result == null) {
throw JSON.typeMismatch(index, object, "int");
}
return result;
}
|
Returns the value at {@code index} if it exists and is an int or can be coerced to
an int.
@param index the index to get the value from
@return the {@code value}
@throws JSONException if the value at {@code index} doesn't exist or cannot be
coerced to an int.
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 406
|
[
"index"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
resolveCommand
|
function resolveCommand(command: string): string {
// Commands known to require .cmd on Windows (node-based & shim-installed)
const WINDOWS_SHIM_COMMANDS = new Set([
'npm',
'npx',
'pnpm',
'yarn',
'ng',
// Anything installed via node_modules/.bin (vite, eslint, prettier, etc)
// can be added here as needed. Do NOT list native executables.
]);
if (process.platform !== 'win32') {
return command;
}
if (WINDOWS_SHIM_COMMANDS.has(command)) {
return `${command}.cmd`;
}
return command;
}
|
Resolve the actual executable name for a given command on the current platform.
Why this exists:
- Many Node-based CLIs (npm, npx, pnpm, yarn, vite, eslint, anything in node_modules/.bin) do NOT
ship as real executables on Windows.
- Instead, they install *.cmd and *.ps1 “shim” files.
- When using execa/child_process with `shell: false` (our default), Node WILL NOT resolve these
shims. -> calling execa("npx") throws ENOENT on Windows.
This helper normalizes command names so they can be spawned cross-platform without using `shell:
true`.
Rules:
- If on Windows:
- For known shim-based commands, append `.cmd` (e.g., "npx" → "npx.cmd").
- For everything else, return the name unchanged.
- On non-Windows, return command unchanged.
Open for extension:
- Add new commands to `WINDOWS_SHIM_COMMANDS` as needed.
- If Storybook adds new internal commands later, extend the list.
@param {string} command - The executable name passed into executeCommand.
@returns {string} - The normalized executable name safe for passing to execa.
|
typescript
|
code/core/src/common/utils/command.ts
| 115
|
[
"command"
] | true
| 3
| 7.04
|
storybookjs/storybook
| 88,865
|
jsdoc
| false
|
|
maybeSeekUnvalidated
|
synchronized void maybeSeekUnvalidated(TopicPartition tp, FetchPosition position, AutoOffsetResetStrategy requestedResetStrategy) {
TopicPartitionState state = assignedStateOrNull(tp);
if (state == null) {
log.debug("Skipping reset of partition {} since it is no longer assigned", tp);
} else if (!state.awaitingReset()) {
log.debug("Skipping reset of partition {} since reset is no longer needed", tp);
} else if (requestedResetStrategy != null && !requestedResetStrategy.equals(state.resetStrategy)) {
log.debug("Skipping reset of partition {} since an alternative reset has been requested", tp);
} else {
log.info("Resetting offset for partition {} to position {}.", tp, position);
state.seekUnvalidated(position);
}
}
|
Get the subscription topics for which metadata is required. For the leader, this will include
the union of the subscriptions of all group members. For followers, it is just that member's
subscription. This is used when querying topic metadata to detect the metadata changes which would
require rebalancing. The leader fetches metadata for all topics in the group so that it
can do the partition assignment (which requires at least partition counts for all topics
to be assigned).
@return The union of all subscribed topics in the group if this member is the leader
of the current generation; otherwise it returns the same set as {@link #subscription()}
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 449
|
[
"tp",
"position",
"requestedResetStrategy"
] |
void
| true
| 5
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
gen_attr_descriptor_import
|
def gen_attr_descriptor_import() -> str:
"""
import AttrsDescriptor if the triton version is new enough to have this
class defined.
"""
if not has_triton_package():
return ""
import triton.compiler.compiler
# Note: this works because triton.compiler.compiler imports AttrsDescriptor from triton.backends.compiler
# When support for the legacy AttrsDescriptor is removed then this import path should be changed.
if hasattr(triton.compiler.compiler, "AttrsDescriptor"):
return "from triton.compiler.compiler import AttrsDescriptor"
else:
return ""
|
import AttrsDescriptor if the triton version is new enough to have this
class defined.
|
python
|
torch/_inductor/codegen/triton.py
| 168
|
[] |
str
| true
| 4
| 6.72
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
isGzip
|
public static boolean isGzip(Path path) throws IOException {
try (InputStream is = Files.newInputStream(path); InputStream gzis = new GZIPInputStream(is)) {
gzis.read(); // nooping, the point is just whether it's a gzip or not
return true;
} catch (ZipException e) {
return false;
}
}
|
Read the database type from the database. We do this manually instead of relying on the built-in mechanism to avoid reading the
entire database into memory merely to read the type. This is especially important to maintain on master nodes where pipelines are
validated. If we read the entire database into memory, we could potentially run into low-memory constraints on such nodes where
loading this data would otherwise be wasteful if they are not also ingest nodes.
@return the database type
@throws IOException if an I/O exception occurs reading the database type
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/MMDBUtil.java
| 104
|
[
"path"
] | true
| 2
| 7.04
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
putBytes
|
@CanIgnoreReturnValue
PrimitiveSink putBytes(byte[] bytes, int off, int len);
|
Puts a chunk of an array of bytes into this sink. {@code bytes[off]} is the first byte written,
{@code bytes[off + len - 1]} is the last.
@param bytes a byte array
@param off the start offset in the array
@param len the number of bytes to write
@return this instance
@throws IndexOutOfBoundsException if {@code off < 0} or {@code off + len > bytes.length} or
{@code len < 0}
|
java
|
android/guava/src/com/google/common/hash/PrimitiveSink.java
| 59
|
[
"bytes",
"off",
"len"
] |
PrimitiveSink
| true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
renameSpecialKeysFlexible
|
static void renameSpecialKeysFlexible(IngestDocument document) {
RENAME_KEYS.forEach((nonOtelName, otelName) -> {
boolean fieldExists = false;
Object value = null;
if (document.hasField(nonOtelName)) {
// Dotted fields are treated the same as normalized fields in flexible mode
fieldExists = true;
value = document.getFieldValue(nonOtelName, Object.class, true);
document.removeField(nonOtelName);
// recursively remove empty parent fields
int lastDot = nonOtelName.lastIndexOf('.');
while (lastDot > 0) {
String parentName = nonOtelName.substring(0, lastDot);
// In flexible mode, dotted field names can be removed. Parent paths may not exist since they might be included
// by the dotted field removal (e.g. For the doc {a:{b.c:1}}, removing a.b.c will not leave an a.b field because
// there is no a.b field to start with.
@SuppressWarnings("unchecked")
Map<String, Object> parent = document.getFieldValue(parentName, Map.class, true);
if (parent != null) {
if (parent.isEmpty()) {
document.removeField(parentName);
} else {
break;
}
}
lastDot = parentName.lastIndexOf('.');
}
}
if (fieldExists) {
// Flexible mode creates dotted field names when parent fields are not present. We expect the rename keys to be
// normalized after processing, so we progressively build each field's parents if it's a dotted field.
Map<String, Object> source = document.getSource();
String remainingPath = otelName;
int dot = remainingPath.indexOf('.');
while (dot > 0) {
// Dotted field, emulate classic mode by building out each parent object
String fieldName = remainingPath.substring(0, dot);
remainingPath = remainingPath.substring(dot + 1);
Object existingParent = source.get(fieldName);
if (existingParent instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, Object> castAssignment = (Map<String, Object>) existingParent;
source = castAssignment;
} else {
Map<String, Object> map = new HashMap<>();
source.put(fieldName, map);
source = map;
}
dot = remainingPath.indexOf('.');
}
source.put(remainingPath, value);
}
});
}
|
Renames specific ECS keys in the given document to their OpenTelemetry-compatible counterparts using logic compatible with the
{@link org.elasticsearch.ingest.IngestPipelineFieldAccessPattern#FLEXIBLE} access pattern and based on the {@code RENAME_KEYS} map.
<p>This method performs the following operations:
<ul>
<li>For each key in the {@code RENAME_KEYS} map, it checks if a corresponding field exists in the document.</li>
<li>If the field exists, it removes it from the document and adds a new field with the corresponding name from the
{@code RENAME_KEYS} map and the same value. If a field's parent objects do not exist, it will progressively build
each parent object instead of concatenating the field names together.</li>
<li>If the key is nested (contains dots), it recursively removes empty parent fields after renaming.</li>
</ul>
@param document the document to process
|
java
|
modules/ingest-otel/src/main/java/org/elasticsearch/ingest/otel/NormalizeForStreamProcessor.java
| 334
|
[
"document"
] |
void
| true
| 8
| 6.48
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
__next__
|
def __next__(self):
"""
Return the next value, or raise StopIteration.
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
>>> next(fl)
3
>>> next(fl)
masked
>>> next(fl)
Traceback (most recent call last):
...
StopIteration
"""
d = next(self.dataiter)
if self.maskiter is not None:
m = next(self.maskiter)
if isinstance(m, np.void):
return mvoid(d, mask=m, hardmask=self.ma._hardmask)
elif m: # Just a scalar, masked
return masked
return d
|
Return the next value, or raise StopIteration.
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
>>> next(fl)
3
>>> next(fl)
masked
>>> next(fl)
Traceback (most recent call last):
...
StopIteration
|
python
|
numpy/ma/core.py
| 2,730
|
[
"self"
] | false
| 4
| 6.48
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
multi_stream_iter
|
def multi_stream_iter(self, log_group: str, streams: list, positions=None) -> Generator:
"""
Iterate over the available events.
The events coming from a set of log streams in a single log group
interleaving the events from each stream so they're yielded in timestamp order.
:param log_group: The name of the log group.
:param streams: A list of the log stream names. The position of the stream in this list is
the stream number.
:param positions: A list of pairs of (timestamp, skip) which represents the last record
read from each stream.
:return: A tuple of (stream number, cloudwatch log event).
"""
positions = positions or {s: Position(timestamp=0, skip=0) for s in streams}
event_iters = [
self.logs_hook.get_log_events(log_group, s, positions[s].timestamp, positions[s].skip)
for s in streams
]
events: list[Any | None] = []
for event_stream in event_iters:
if event_stream:
try:
events.append(next(event_stream))
except StopIteration:
events.append(None)
else:
events.append(None)
while any(events):
i = argmin(events, lambda x: x["timestamp"] if x else 9999999999) or 0
yield i, events[i]
try:
events[i] = next(event_iters[i])
except StopIteration:
events[i] = None
|
Iterate over the available events.
The events coming from a set of log streams in a single log group
interleaving the events from each stream so they're yielded in timestamp order.
:param log_group: The name of the log group.
:param streams: A list of the log stream names. The position of the stream in this list is
the stream number.
:param positions: A list of pairs of (timestamp, skip) which represents the last record
read from each stream.
:return: A tuple of (stream number, cloudwatch log event).
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
| 252
|
[
"self",
"log_group",
"streams",
"positions"
] |
Generator
| true
| 8
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
asByteArray
|
byte[] asByteArray() {
ByteBuffer buffer = ByteBuffer.allocate(MINIMUM_SIZE);
buffer.order(ByteOrder.LITTLE_ENDIAN);
buffer.putInt(SIGNATURE);
buffer.putShort(this.numberOfThisDisk);
buffer.putShort(this.diskWhereCentralDirectoryStarts);
buffer.putShort(this.numberOfCentralDirectoryEntriesOnThisDisk);
buffer.putShort(this.totalNumberOfCentralDirectoryEntries);
buffer.putInt(this.sizeOfCentralDirectory);
buffer.putInt(this.offsetToStartOfCentralDirectory);
buffer.putShort(this.commentLength);
return buffer.array();
}
|
Return the contents of this record as a byte array suitable for writing to a zip.
@return the record as a byte array
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipEndOfCentralDirectoryRecord.java
| 83
|
[] | true
| 1
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
include_if
|
def include_if(self, c: Consumer) -> bool:
"""Determine if this bootstep should be included.
Args:
c: The Celery consumer instance
Returns:
bool: True if quorum queues are detected, False otherwise
"""
return detect_quorum_queues(c.app, c.app.connection_for_write().transport.driver_type)[0]
|
Determine if this bootstep should be included.
Args:
c: The Celery consumer instance
Returns:
bool: True if quorum queues are detected, False otherwise
|
python
|
celery/worker/consumer/delayed_delivery.py
| 52
|
[
"self",
"c"
] |
bool
| true
| 1
| 6.56
|
celery/celery
| 27,741
|
google
| false
|
charMatcher
|
public static StrMatcher charMatcher(final char ch) {
return new CharMatcher(ch);
}
|
Creates a matcher from a character.
@param ch the character to match, must not be null.
@return a new Matcher for the given char.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrMatcher.java
| 245
|
[
"ch"
] |
StrMatcher
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
estimatedBytesWritten
|
private int estimatedBytesWritten() {
if (compression.type() == CompressionType.NONE) {
return batchHeaderSizeInBytes + uncompressedRecordsSizeInBytes;
} else {
// estimate the written bytes to the underlying byte buffer based on uncompressed written bytes
return batchHeaderSizeInBytes + (int) (uncompressedRecordsSizeInBytes * estimatedCompressionRatio * COMPRESSION_RATE_ESTIMATION_FACTOR);
}
}
|
Get an estimate of the number of bytes written (based on the estimation factor hard-coded in {@link CompressionType}).
@return The estimated number of bytes written
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
| 820
|
[] | true
| 2
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
threadNamePrefix
|
public SimpleAsyncTaskSchedulerBuilder threadNamePrefix(@Nullable String threadNamePrefix) {
return new SimpleAsyncTaskSchedulerBuilder(threadNamePrefix, this.concurrencyLimit, this.virtualThreads,
this.taskTerminationTimeout, this.taskDecorator, this.customizers);
}
|
Set the prefix to use for the names of newly created threads.
@param threadNamePrefix the thread name prefix to set
@return a new builder instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/task/SimpleAsyncTaskSchedulerBuilder.java
| 80
|
[
"threadNamePrefix"
] |
SimpleAsyncTaskSchedulerBuilder
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
init_IA64_32Bit
|
private static void init_IA64_32Bit() {
addProcessors(new Processor(Processor.Arch.BIT_32, Processor.Type.IA_64), "ia64_32", "ia64n");
}
|
Gets a {@link Processor} object the given value {@link String}. The {@link String} must be like a value returned by the {@code "os.arch"} system
property.
@param value A {@link String} like a value returned by the {@code os.arch} System Property.
@return A {@link Processor} when it exists, else {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/ArchUtils.java
| 107
|
[] |
void
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getPrimitiveStackCache
|
function getPrimitiveStackCache(): Map<string, Array<any>> {
// This initializes a cache of all primitive hooks so that the top
// most stack frames added by calling the primitive hook can be removed.
if (primitiveStackCache === null) {
const cache = new Map<string, Array<any>>();
let readHookLog;
try {
// Use all hooks here to add them to the hook log.
Dispatcher.useContext(({_currentValue: null}: any));
Dispatcher.useState(null);
Dispatcher.useReducer((s: mixed, a: mixed) => s, null);
Dispatcher.useRef(null);
if (typeof Dispatcher.useCacheRefresh === 'function') {
// This type check is for Flow only.
Dispatcher.useCacheRefresh();
}
Dispatcher.useLayoutEffect(() => {});
Dispatcher.useInsertionEffect(() => {});
Dispatcher.useEffect(() => {});
Dispatcher.useImperativeHandle(undefined, () => null);
Dispatcher.useDebugValue(null);
Dispatcher.useCallback(() => {});
Dispatcher.useTransition();
Dispatcher.useSyncExternalStore(
() => () => {},
() => null,
() => null,
);
Dispatcher.useDeferredValue(null);
Dispatcher.useMemo(() => null);
Dispatcher.useOptimistic(null, (s: mixed, a: mixed) => s);
Dispatcher.useFormState((s: mixed, p: mixed) => s, null);
Dispatcher.useActionState((s: mixed, p: mixed) => s, null);
Dispatcher.useHostTransitionStatus();
if (typeof Dispatcher.useMemoCache === 'function') {
// This type check is for Flow only.
Dispatcher.useMemoCache(0);
}
if (typeof Dispatcher.use === 'function') {
// This type check is for Flow only.
Dispatcher.use(
({
$$typeof: REACT_CONTEXT_TYPE,
_currentValue: null,
}: any),
);
Dispatcher.use({
then() {},
status: 'fulfilled',
value: null,
});
try {
Dispatcher.use(
({
then() {},
}: any),
);
} catch (x) {}
}
Dispatcher.useId();
if (typeof Dispatcher.useEffectEvent === 'function') {
Dispatcher.useEffectEvent((args: empty) => {});
}
} finally {
readHookLog = hookLog;
hookLog = [];
}
for (let i = 0; i < readHookLog.length; i++) {
const hook = readHookLog[i];
cache.set(hook.primitive, ErrorStackParser.parse(hook.stackError));
}
primitiveStackCache = cache;
}
return primitiveStackCache;
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-debug-tools/src/ReactDebugHooks.js
| 69
|
[] | false
| 8
| 6.32
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
wait_for_job
|
def wait_for_job(
self,
job_id: str,
delay: int | float | None = None,
get_batch_log_fetcher: Callable[[str], AwsTaskLogFetcher | None] | None = None,
) -> None:
"""
Wait for Batch job to complete.
This assumes that the ``.waiter_model`` is configured using some
variation of the ``.default_config`` so that it can generate waiters
with the following names: "JobExists", "JobRunning" and "JobComplete".
:param job_id: a Batch job ID
:param delay: A delay before polling for job status
:param get_batch_log_fetcher: A method that returns batch_log_fetcher of
type AwsTaskLogFetcher or None when the CloudWatch log stream hasn't been created yet.
:raises: AirflowException
.. note::
This method adds a small random jitter to the ``delay`` (+/- 2 sec, >= 1 sec).
Using a random interval helps to avoid AWS API throttle limits when many
concurrent tasks request job-descriptions.
It also modifies the ``max_attempts`` to use the ``sys.maxsize``,
which allows Airflow to manage the timeout on waiting.
"""
self.delay(delay)
try:
waiter = self.get_waiter("JobExists")
waiter.config.delay = self.add_jitter(waiter.config.delay, width=2, minima=1)
waiter.config.max_attempts = sys.maxsize # timeout is managed by Airflow
waiter.wait(jobs=[job_id])
waiter = self.get_waiter("JobRunning")
waiter.config.delay = self.add_jitter(waiter.config.delay, width=2, minima=1)
waiter.config.max_attempts = sys.maxsize # timeout is managed by Airflow
waiter.wait(jobs=[job_id])
batch_log_fetcher = None
try:
if get_batch_log_fetcher:
batch_log_fetcher = get_batch_log_fetcher(job_id)
if batch_log_fetcher:
batch_log_fetcher.start()
waiter = self.get_waiter("JobComplete")
waiter.config.delay = self.add_jitter(waiter.config.delay, width=2, minima=1)
waiter.config.max_attempts = sys.maxsize # timeout is managed by Airflow
waiter.wait(jobs=[job_id])
finally:
if batch_log_fetcher:
batch_log_fetcher.stop()
batch_log_fetcher.join()
except (botocore.exceptions.ClientError, botocore.exceptions.WaiterError) as err:
raise AirflowException(err)
|
Wait for Batch job to complete.
This assumes that the ``.waiter_model`` is configured using some
variation of the ``.default_config`` so that it can generate waiters
with the following names: "JobExists", "JobRunning" and "JobComplete".
:param job_id: a Batch job ID
:param delay: A delay before polling for job status
:param get_batch_log_fetcher: A method that returns batch_log_fetcher of
type AwsTaskLogFetcher or None when the CloudWatch log stream hasn't been created yet.
:raises: AirflowException
.. note::
This method adds a small random jitter to the ``delay`` (+/- 2 sec, >= 1 sec).
Using a random interval helps to avoid AWS API throttle limits when many
concurrent tasks request job-descriptions.
It also modifies the ``max_attempts`` to use the ``sys.maxsize``,
which allows Airflow to manage the timeout on waiting.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/batch_waiters.py
| 200
|
[
"self",
"job_id",
"delay",
"get_batch_log_fetcher"
] |
None
| true
| 4
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
asPredicate
|
public static <T> Predicate<T> asPredicate(final FailablePredicate<T, ?> predicate) {
return input -> test(predicate, input);
}
|
Converts the given {@link FailablePredicate} into a standard {@link Predicate}.
@param <T> the type used by the predicates
@param predicate a {@link FailablePredicate}
@return a standard {@link Predicate}
|
java
|
src/main/java/org/apache/commons/lang3/function/Failable.java
| 362
|
[
"predicate"
] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
__iter__
|
def __iter__(self) -> Iterator[int]:
"""
Return an iterator of the values.
Returns
-------
iterator
An iterator yielding ints from the RangeIndex.
Examples
--------
>>> idx = pd.RangeIndex(3)
>>> for x in idx:
... print(x)
0
1
2
"""
yield from self._range
|
Return an iterator of the values.
Returns
-------
iterator
An iterator yielding ints from the RangeIndex.
Examples
--------
>>> idx = pd.RangeIndex(3)
>>> for x in idx:
... print(x)
0
1
2
|
python
|
pandas/core/indexes/range.py
| 571
|
[
"self"
] |
Iterator[int]
| true
| 1
| 6.08
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
protocol_df_chunk_to_pandas
|
def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame:
"""
Convert interchange protocol chunk to ``pd.DataFrame``.
Parameters
----------
df : DataFrameXchg
Returns
-------
pd.DataFrame
"""
columns: dict[str, Any] = {}
buffers = [] # hold on to buffers, keeps memory alive
for name in df.column_names():
if not isinstance(name, str):
raise ValueError(f"Column {name} is not a string")
if name in columns:
raise ValueError(f"Column {name} is not unique")
col = df.get_column_by_name(name)
dtype = col.dtype[0]
if dtype in (
DtypeKind.INT,
DtypeKind.UINT,
DtypeKind.FLOAT,
DtypeKind.BOOL,
):
columns[name], buf = primitive_column_to_ndarray(col)
elif dtype == DtypeKind.CATEGORICAL:
columns[name], buf = categorical_column_to_series(col)
elif dtype == DtypeKind.STRING:
columns[name], buf = string_column_to_ndarray(col)
elif dtype == DtypeKind.DATETIME:
columns[name], buf = datetime_column_to_ndarray(col)
else:
raise NotImplementedError(f"Data type {dtype} not handled yet")
buffers.append(buf)
pandas_df = pd.DataFrame(columns)
pandas_df.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"] = buffers
return pandas_df
|
Convert interchange protocol chunk to ``pd.DataFrame``.
Parameters
----------
df : DataFrameXchg
Returns
-------
pd.DataFrame
|
python
|
pandas/core/interchange/from_dataframe.py
| 178
|
[
"df"
] |
pd.DataFrame
| true
| 9
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
blockingGet
|
@ParametricNullness
@SuppressWarnings("nullness") // TODO(b/147136275): Remove once our checker understands & and |.
final V blockingGet() throws InterruptedException, ExecutionException {
if (Thread.interrupted()) {
throw new InterruptedException();
}
@RetainedLocalRef Object localValue = valueField;
if (localValue != null & notInstanceOfDelegatingToFuture(localValue)) {
return getDoneValue(localValue);
}
Waiter oldHead = waitersField;
if (oldHead != Waiter.TOMBSTONE) {
Waiter node = new Waiter();
do {
node.setNext(oldHead);
if (casWaiters(oldHead, node)) {
// we are on the stack, now wait for completion.
while (true) {
LockSupport.park(this);
// Check interruption first, if we woke up due to interruption we need to honor that.
if (Thread.interrupted()) {
removeWaiter(node);
throw new InterruptedException();
}
// Otherwise re-read and check doneness. If we loop then it must have been a spurious
// wakeup
localValue = valueField;
if (localValue != null & notInstanceOfDelegatingToFuture(localValue)) {
return getDoneValue(localValue);
}
}
}
oldHead = waitersField; // re-read and loop.
} while (oldHead != Waiter.TOMBSTONE);
}
// re-read valueField, if we get here then we must have observed a TOMBSTONE while trying to add
// a waiter.
// requireNonNull is safe because valueField is always set before TOMBSTONE.
return getDoneValue(requireNonNull(valueField));
}
|
Releases all threads in the {@link #waitersField} list, and clears the list.
|
java
|
android/guava/src/com/google/common/util/concurrent/AbstractFutureState.java
| 224
|
[] |
V
| true
| 8
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
symmetric_difference
|
def symmetric_difference(
self,
other,
result_name: abc.Hashable | None = None,
sort: bool | None = None,
):
"""
Compute the symmetric difference of two Index objects.
Parameters
----------
other : Index or array-like
Index or an array-like object with elements to compute the symmetric
difference with the original Index.
result_name : str
A string representing the name of the resulting Index, if desired.
sort : bool or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by pandas.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Returns a new Index object containing elements that appear in either the
original Index or the `other` Index, but not both.
See Also
--------
Index.difference : Return a new Index with elements of index not in other.
Index.union : Form the union of two Index objects.
Index.intersection : Form the intersection of two Index objects.
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Index([1, 5], dtype='int64')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
if self.dtype != other.dtype:
self, other = self._dti_setop_align_tzs(other, "symmetric_difference")
if not self._should_compare(other):
return self.union(other, sort=sort).rename(result_name)
elif self.dtype != other.dtype:
dtype = self._find_common_type_compat(other)
this = self.astype(dtype, copy=False)
that = other.astype(dtype, copy=False)
return this.symmetric_difference(that, sort=sort).rename(result_name)
this = self.unique()
other = other.unique()
indexer = this.get_indexer_for(other)
# {this} minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(
np.arange(this.size), common_indexer, assume_unique=True
)
left_diff = this.take(left_indexer)
# {other} minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.take(right_indexer)
res_values = left_diff.append(right_diff)
result = _maybe_try_sort(res_values, sort)
if not self._is_multi:
return Index(result, name=result_name, dtype=res_values.dtype)
else:
left_diff = cast("MultiIndex", left_diff)
if len(result) == 0:
# result might be an Index, if other was an Index
return left_diff.remove_unused_levels().set_names(result_name)
return result.set_names(result_name)
|
Compute the symmetric difference of two Index objects.
Parameters
----------
other : Index or array-like
Index or an array-like object with elements to compute the symmetric
difference with the original Index.
result_name : str
A string representing the name of the resulting Index, if desired.
sort : bool or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by pandas.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Returns a new Index object containing elements that appear in either the
original Index or the `other` Index, but not both.
See Also
--------
Index.difference : Return a new Index with elements of index not in other.
Index.union : Form the union of two Index objects.
Index.intersection : Form the intersection of two Index objects.
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Index([1, 5], dtype='int64')
|
python
|
pandas/core/indexes/base.py
| 3,489
|
[
"self",
"other",
"result_name",
"sort"
] | true
| 8
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
lastIndexOf
|
function lastIndexOf(array, value, fromIndex) {
var length = array == null ? 0 : array.length;
if (!length) {
return -1;
}
var index = length;
if (fromIndex !== undefined) {
index = toInteger(fromIndex);
index = index < 0 ? nativeMax(length + index, 0) : nativeMin(index, length - 1);
}
return value === value
? strictLastIndexOf(array, value, index)
: baseFindIndex(array, baseIsNaN, index, true);
}
|
This method is like `_.indexOf` except that it iterates over elements of
`array` from right to left.
@static
@memberOf _
@since 0.1.0
@category Array
@param {Array} array The array to inspect.
@param {*} value The value to search for.
@param {number} [fromIndex=array.length-1] The index to search from.
@returns {number} Returns the index of the matched value, else `-1`.
@example
_.lastIndexOf([1, 2, 1, 2], 2);
// => 3
// Search from the `fromIndex`.
_.lastIndexOf([1, 2, 1, 2], 2, 2);
// => 1
|
javascript
|
lodash.js
| 7,738
|
[
"array",
"value",
"fromIndex"
] | false
| 6
| 7.68
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
english_upper
|
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy._core.numerictypes import english_upper
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
|
Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy._core.numerictypes import english_upper
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
|
python
|
numpy/_core/_string_helpers.py
| 44
|
[
"s"
] | false
| 1
| 6.16
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
set_default_device
|
def set_default_device(device: "Device") -> None:
"""Sets the default ``torch.Tensor`` to be allocated on ``device``. This
does not affect factory function calls which are called with an explicit
``device`` argument. Factory calls will be performed as if they
were passed ``device`` as an argument.
To only temporarily change the default device instead of setting it
globally, use ``with torch.device(device):`` instead.
The default device is initially ``cpu``. If you set the default tensor
device to another device (e.g., ``cuda``) without a device index, tensors
will be allocated on whatever the current device for the device type,
even after :func:`torch.cuda.set_device` is called.
.. warning::
This function imposes a slight performance cost on every Python
call to the torch API (not just factory functions). If this
is causing problems for you, please comment on
https://github.com/pytorch/pytorch/issues/92701
.. note::
This doesn't affect functions that create tensors that share the same memory as the input, like:
:func:`torch.from_numpy` and :func:`torch.frombuffer`
Args:
device (device or string): the device to set as default
Example::
>>> # xdoctest: +SKIP("requires cuda, changes global state")
>>> torch.get_default_device()
device(type='cpu')
>>> torch.set_default_device('cuda') # current device is 0
>>> torch.get_default_device()
device(type='cuda', index=0)
>>> torch.set_default_device('cuda')
>>> torch.cuda.set_device('cuda:1') # current device is 1
>>> torch.get_default_device()
device(type='cuda', index=1)
>>> torch.set_default_device('cuda:1')
>>> torch.get_default_device()
device(type='cuda', index=1)
"""
global _GLOBAL_DEVICE_CONTEXT
if hasattr(_GLOBAL_DEVICE_CONTEXT, "device_context"):
device_context = _GLOBAL_DEVICE_CONTEXT.device_context
if device_context is not None:
device_context.__exit__(None, None, None)
if device is None:
device_context = None
else:
from torch.utils._device import DeviceContext
device_context = DeviceContext(device)
device_context.__enter__()
_GLOBAL_DEVICE_CONTEXT.device_context = device_context
|
Sets the default ``torch.Tensor`` to be allocated on ``device``. This
does not affect factory function calls which are called with an explicit
``device`` argument. Factory calls will be performed as if they
were passed ``device`` as an argument.
To only temporarily change the default device instead of setting it
globally, use ``with torch.device(device):`` instead.
The default device is initially ``cpu``. If you set the default tensor
device to another device (e.g., ``cuda``) without a device index, tensors
will be allocated on whatever the current device for the device type,
even after :func:`torch.cuda.set_device` is called.
.. warning::
This function imposes a slight performance cost on every Python
call to the torch API (not just factory functions). If this
is causing problems for you, please comment on
https://github.com/pytorch/pytorch/issues/92701
.. note::
This doesn't affect functions that create tensors that share the same memory as the input, like:
:func:`torch.from_numpy` and :func:`torch.frombuffer`
Args:
device (device or string): the device to set as default
Example::
>>> # xdoctest: +SKIP("requires cuda, changes global state")
>>> torch.get_default_device()
device(type='cpu')
>>> torch.set_default_device('cuda') # current device is 0
>>> torch.get_default_device()
device(type='cuda', index=0)
>>> torch.set_default_device('cuda')
>>> torch.cuda.set_device('cuda:1') # current device is 1
>>> torch.get_default_device()
device(type='cuda', index=1)
>>> torch.set_default_device('cuda:1')
>>> torch.get_default_device()
device(type='cuda', index=1)
|
python
|
torch/__init__.py
| 1,224
|
[
"device"
] |
None
| true
| 5
| 7.84
|
pytorch/pytorch
| 96,034
|
google
| false
|
createCollection
|
@Override
Set<V> createCollection() {
return Platform.newHashSetWithExpectedSize(expectedValuesPerKey);
}
|
{@inheritDoc}
<p>Creates an empty {@code HashSet} for a collection of values for one key.
@return a new {@code HashSet} containing a collection of values for one key
|
java
|
android/guava/src/com/google/common/collect/HashMultimap.java
| 127
|
[] | true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
getValue
|
@Deprecated
@Override
public Long getValue() {
return Long.valueOf(this.value);
}
|
Gets the value as a Long instance.
@return the value as a Long, never null.
@deprecated Use {@link #get()}.
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableLong.java
| 259
|
[] |
Long
| true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
maybeExpire
|
void maybeExpire() {
if (numAttempts > 0 && isExpired()) {
removeRequest();
future().completeExceptionally(new TimeoutException(requestDescription() +
" could not complete before timeout expired."));
}
}
|
Complete the request future with a TimeoutException if the request has been sent out
at least once and the timeout has been reached.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
| 914
|
[] |
void
| true
| 3
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
createSuperElementAccessInAsyncMethod
|
function createSuperElementAccessInAsyncMethod(argumentExpression: Expression, location: TextRange): LeftHandSideExpression {
if (enclosingSuperContainerFlags & NodeCheckFlags.MethodWithSuperPropertyAssignmentInAsync) {
return setTextRange(
factory.createPropertyAccessExpression(
factory.createCallExpression(
factory.createUniqueName("_superIndex", GeneratedIdentifierFlags.Optimistic | GeneratedIdentifierFlags.FileLevel),
/*typeArguments*/ undefined,
[argumentExpression],
),
"value",
),
location,
);
}
else {
return setTextRange(
factory.createCallExpression(
factory.createUniqueName("_superIndex", GeneratedIdentifierFlags.Optimistic | GeneratedIdentifierFlags.FileLevel),
/*typeArguments*/ undefined,
[argumentExpression],
),
location,
);
}
}
|
Hooks node substitutions.
@param hint A hint as to the intended usage of the node.
@param node The node to substitute.
|
typescript
|
src/compiler/transformers/es2017.ts
| 1,027
|
[
"argumentExpression",
"location"
] | true
| 3
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
get_dataframe_repr_params
|
def get_dataframe_repr_params() -> dict[str, Any]:
"""Get the parameters used to repr(dataFrame) calls using DataFrame.to_string.
Supplying these parameters to DataFrame.to_string is equivalent to calling
``repr(DataFrame)``. This is useful if you want to adjust the repr output.
Example
-------
>>> import pandas as pd
>>>
>>> df = pd.DataFrame([[1, 2], [3, 4]])
>>> repr_params = pd.io.formats.format.get_dataframe_repr_params()
>>> repr(df) == df.to_string(**repr_params)
True
"""
from pandas.io.formats import console
if get_option("display.expand_frame_repr"):
line_width, _ = console.get_console_size()
else:
line_width = None
return {
"max_rows": get_option("display.max_rows"),
"min_rows": get_option("display.min_rows"),
"max_cols": get_option("display.max_columns"),
"max_colwidth": get_option("display.max_colwidth"),
"show_dimensions": get_option("display.show_dimensions"),
"line_width": line_width,
}
|
Get the parameters used to repr(dataFrame) calls using DataFrame.to_string.
Supplying these parameters to DataFrame.to_string is equivalent to calling
``repr(DataFrame)``. This is useful if you want to adjust the repr output.
Example
-------
>>> import pandas as pd
>>>
>>> df = pd.DataFrame([[1, 2], [3, 4]])
>>> repr_params = pd.io.formats.format.get_dataframe_repr_params()
>>> repr(df) == df.to_string(**repr_params)
True
|
python
|
pandas/io/formats/format.py
| 355
|
[] |
dict[str, Any]
| true
| 3
| 8
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
construct_1d_arraylike_from_scalar
|
def construct_1d_arraylike_from_scalar(
value: Scalar, length: int, dtype: DtypeObj | None
) -> ArrayLike:
"""
create an np.ndarray / pandas type of specified shape and dtype
filled with values
Parameters
----------
value : scalar value
length : int
dtype : pandas_dtype or np.dtype
Returns
-------
np.ndarray / pandas type of length, filled with value
"""
if dtype is None:
try:
dtype, value = infer_dtype_from_scalar(value)
except OutOfBoundsDatetime:
dtype = _dtype_obj
if isinstance(dtype, ExtensionDtype):
cls = dtype.construct_array_type()
seq = [] if length == 0 else [value]
return cls._from_sequence(seq, dtype=dtype).repeat(length)
if length and dtype.kind in "iu" and isna(value):
# coerce if we have nan for an integer dtype
dtype = np.dtype("float64")
elif lib.is_np_dtype(dtype, "US"):
# we need to coerce to object dtype to avoid
# to allow numpy to take our string as a scalar value
dtype = np.dtype("object")
if not isna(value):
value = ensure_str(value)
elif dtype.kind in "mM":
value = _maybe_box_and_unbox_datetimelike(value, dtype)
subarr = np.empty(length, dtype=dtype)
if length:
# GH 47391: numpy > 1.24 will raise filling np.nan into int dtypes
subarr.fill(value)
return subarr
|
create an np.ndarray / pandas type of specified shape and dtype
filled with values
Parameters
----------
value : scalar value
length : int
dtype : pandas_dtype or np.dtype
Returns
-------
np.ndarray / pandas type of length, filled with value
|
python
|
pandas/core/dtypes/cast.py
| 1,393
|
[
"value",
"length",
"dtype"
] |
ArrayLike
| true
| 11
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getBinder
|
private Binder getBinder(@Nullable ConfigDataActivationContext activationContext,
Predicate<ConfigDataEnvironmentContributor> filter, Set<BinderOption> options) {
boolean failOnInactiveSource = options.contains(BinderOption.FAIL_ON_BIND_TO_INACTIVE_SOURCE);
Iterable<ConfigurationPropertySource> sources = () -> getBinderSources(
filter.and((contributor) -> failOnInactiveSource || contributor.isActive(activationContext)));
PlaceholdersResolver placeholdersResolver = new ConfigDataEnvironmentContributorPlaceholdersResolver(this.root,
activationContext, null, failOnInactiveSource, this.conversionService);
BindHandler bindHandler = !failOnInactiveSource ? null : new InactiveSourceChecker(activationContext);
return new Binder(sources, placeholdersResolver, null, null, bindHandler);
}
|
Return a {@link Binder} backed by the contributors.
@param activationContext the activation context
@param filter a filter used to limit the contributors
@param options binder options to apply
@return a binder instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironmentContributors.java
| 228
|
[
"activationContext",
"filter",
"options"
] |
Binder
| true
| 3
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
formatPeriod
|
public static String formatPeriod(final long startMillis, final long endMillis, final String format) {
return formatPeriod(startMillis, endMillis, format, true, TimeZone.getDefault());
}
|
Formats the time gap as a string, using the specified format.
Padding the left-hand side side of numbers with zeroes is optional.
@param startMillis the start of the duration
@param endMillis the end of the duration
@param format the way in which to format the duration, not null
@return the formatted duration, not null
@throws IllegalArgumentException if startMillis is greater than endMillis
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationFormatUtils.java
| 501
|
[
"startMillis",
"endMillis",
"format"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
resolveItemMetadataGroup
|
private ItemMetadata resolveItemMetadataGroup(String prefix, MetadataGenerationEnvironment environment) {
Element propertyElement = environment.getTypeUtils().asElement(getType());
String nestedPrefix = ConfigurationMetadata.nestedPrefix(prefix, getName());
String dataType = environment.getTypeUtils().getQualifiedName(propertyElement);
String ownerType = environment.getTypeUtils().getQualifiedName(getDeclaringElement());
String sourceMethod = (getGetter() != null) ? getGetter().toString() : null;
return ItemMetadata.newGroup(nestedPrefix, dataType, ownerType, sourceMethod);
}
|
Return if this property has been explicitly marked as nested (for example using an
annotation}.
@param environment the metadata generation environment
@return if the property has been marked as nested
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/PropertyDescriptor.java
| 180
|
[
"prefix",
"environment"
] |
ItemMetadata
| true
| 2
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_interp_limit
|
def _interp_limit(
invalid: npt.NDArray[np.bool_], fw_limit: int | None, bw_limit: int | None
) -> np.ndarray:
"""
Get indexers of values that won't be filled
because they exceed the limits.
Parameters
----------
invalid : np.ndarray[bool]
fw_limit : int or None
forward limit to index
bw_limit : int or None
backward limit to index
Returns
-------
set of indexers
Notes
-----
This is equivalent to the more readable, but slower
.. code-block:: python
def _interp_limit(invalid, fw_limit, bw_limit):
for x in np.where(invalid)[0]:
if invalid[max(0, x - fw_limit) : x + bw_limit + 1].all():
yield x
"""
# handle forward first; the backward direction is the same except
# 1. operate on the reversed array
# 2. subtract the returned indices from N - 1
N = len(invalid)
f_idx = np.array([], dtype=np.int64)
b_idx = np.array([], dtype=np.int64)
assume_unique = True
def inner(invalid, limit: int):
limit = min(limit, N)
windowed = np.lib.stride_tricks.sliding_window_view(invalid, limit + 1).all(1)
idx = np.union1d(
np.where(windowed)[0] + limit,
np.where((~invalid[: limit + 1]).cumsum() == 0)[0],
)
return idx
if fw_limit is not None:
if fw_limit == 0:
f_idx = np.where(invalid)[0]
assume_unique = False
else:
f_idx = inner(invalid, fw_limit)
if bw_limit is not None:
if bw_limit == 0:
# then we don't even need to care about backwards
# just use forwards
return f_idx
else:
b_idx = N - 1 - inner(invalid[::-1], bw_limit)
if fw_limit == 0:
return b_idx
return np.intersect1d(f_idx, b_idx, assume_unique=assume_unique)
|
Get indexers of values that won't be filled
because they exceed the limits.
Parameters
----------
invalid : np.ndarray[bool]
fw_limit : int or None
forward limit to index
bw_limit : int or None
backward limit to index
Returns
-------
set of indexers
Notes
-----
This is equivalent to the more readable, but slower
.. code-block:: python
def _interp_limit(invalid, fw_limit, bw_limit):
for x in np.where(invalid)[0]:
if invalid[max(0, x - fw_limit) : x + bw_limit + 1].all():
yield x
|
python
|
pandas/core/missing.py
| 1,039
|
[
"invalid",
"fw_limit",
"bw_limit"
] |
np.ndarray
| true
| 8
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
get_output_location
|
def get_output_location(self, query_execution_id: str) -> str:
"""
Get the output location of the query results in S3 URI format.
.. seealso::
- :external+boto3:py:meth:`Athena.Client.get_query_execution`
:param query_execution_id: Id of submitted athena query
"""
if not query_execution_id:
raise ValueError(f"Invalid Query execution id. Query execution id: {query_execution_id}")
if not (response := self.get_query_info(query_execution_id=query_execution_id, use_cache=True)):
raise ValueError(f"Unable to get query information for execution id: {query_execution_id}")
try:
return response["QueryExecution"]["ResultConfiguration"]["OutputLocation"]
except KeyError:
self.log.error("Error retrieving OutputLocation. Query execution id: %s", query_execution_id)
raise
|
Get the output location of the query results in S3 URI format.
.. seealso::
- :external+boto3:py:meth:`Athena.Client.get_query_execution`
:param query_execution_id: Id of submitted athena query
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/athena.py
| 309
|
[
"self",
"query_execution_id"
] |
str
| true
| 3
| 6.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
toByteArray
|
public byte[] toByteArray() {
return bitSet.toByteArray();
}
|
Returns a new byte array containing all the bits in this bit set.
<p>
More precisely, if:
</p>
<ol>
<li>{@code byte[] bytes = s.toByteArray();}</li>
<li>then {@code bytes.length == (s.length()+7)/8} and</li>
<li>{@code s.get(n) == ((bytes[n/8] & (1<<(n%8))) != 0)}</li>
<li>for all {@code n < 8 * bytes.length}.</li>
</ol>
@return a byte array containing a little-endian representation of all the bits in this bit set
|
java
|
src/main/java/org/apache/commons/lang3/util/FluentBitSet.java
| 545
|
[] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
once
|
public static BooleanSupplier once() {
return new OnceTrue();
}
|
@return a {@link BooleanSupplier} which supplies {@code true} the first time it is called, and {@code false} subsequently.
|
java
|
libs/core/src/main/java/org/elasticsearch/core/Predicates.java
| 110
|
[] |
BooleanSupplier
| true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
parseYieldExpression
|
function parseYieldExpression(): YieldExpression {
const pos = getNodePos();
// YieldExpression[In] :
// yield
// yield [no LineTerminator here] [Lexical goal InputElementRegExp]AssignmentExpression[?In, Yield]
// yield [no LineTerminator here] * [Lexical goal InputElementRegExp]AssignmentExpression[?In, Yield]
nextToken();
if (
!scanner.hasPrecedingLineBreak() &&
(token() === SyntaxKind.AsteriskToken || isStartOfExpression())
) {
return finishNode(
factory.createYieldExpression(
parseOptionalToken(SyntaxKind.AsteriskToken),
parseAssignmentExpressionOrHigher(/*allowReturnTypeInArrowFunction*/ true),
),
pos,
);
}
else {
// if the next token is not on the same line as yield. or we don't have an '*' or
// the start of an expression, then this is just a simple "yield" expression.
return finishNode(factory.createYieldExpression(/*asteriskToken*/ undefined, /*expression*/ undefined), pos);
}
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 5,169
|
[] | true
| 5
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
toString
|
@Override
public String toString() {
return "MergingDigest"
+ "-"
+ getScaleFunction()
+ "-"
+ (useWeightLimit ? "weight" : "kSize")
+ "-"
+ (useAlternatingSort ? "alternating" : "stable")
+ "-"
+ (useTwoLevelCompression ? "twoLevel" : "oneLevel");
}
|
Merges any pending inputs and compresses the data down to the public setting.
Note that this typically loses a bit of precision and thus isn't a thing to
be doing all the time. It is best done only when we want to show results to
the outside world.
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java
| 611
|
[] |
String
| true
| 4
| 7.04
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
signature
|
def signature(varies, *args, **kwargs):
"""Create new signature.
- if the first argument is a signature already then it's cloned.
- if the first argument is a dict, then a Signature version is returned.
Returns:
Signature: The resulting signature.
"""
app = kwargs.get('app')
if isinstance(varies, dict):
if isinstance(varies, abstract.CallableSignature):
return varies.clone()
return Signature.from_dict(varies, app=app)
return Signature(varies, *args, **kwargs)
|
Create new signature.
- if the first argument is a signature already then it's cloned.
- if the first argument is a dict, then a Signature version is returned.
Returns:
Signature: The resulting signature.
|
python
|
celery/canvas.py
| 2,373
|
[
"varies"
] | false
| 3
| 7.12
|
celery/celery
| 27,741
|
unknown
| false
|
|
binaryValue
|
@Override
public byte[] binaryValue() throws IOException {
try {
return parser.getBinaryValue();
} catch (IOException e) {
throw handleParserException(e);
}
}
|
Handle parser exception depending on type.
This converts known exceptions to XContentParseException and rethrows them.
|
java
|
libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java
| 299
|
[] | true
| 2
| 6.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
VirtualLock
|
boolean VirtualLock(Address address, long size);
|
Locks the specified region of the process's virtual address space into physical
memory, ensuring that subsequent access to the region will not incur a page fault.
@param address A pointer to the base address of the region of pages to be locked.
@param size The size of the region to be locked, in bytes.
@return true if the function succeeds
@see <a href="https://msdn.microsoft.com/en-us/library/windows/desktop/aa366895%28v=vs.85%29.aspx">VirtualLock docs</a>
|
java
|
libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/Kernel32Library.java
| 61
|
[
"address",
"size"
] | true
| 1
| 6.16
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
execute
|
@Override
public IngestDocument execute(IngestDocument document) {
document.doNoSelfReferencesCheck(true);
IngestScript.Factory factory = precompiledIngestScriptFactory;
if (factory == null) {
factory = scriptService.compile(script, IngestScript.CONTEXT);
}
factory.newInstance(script.getParams(), document.getCtxMap()).execute();
return document;
}
|
Executes the script with the Ingest document in context.
@param document The Ingest document passed into the script context under the "ctx" object.
|
java
|
modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java
| 73
|
[
"document"
] |
IngestDocument
| true
| 2
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
strip_leading_zeros_from_version
|
def strip_leading_zeros_from_version(version: str) -> str:
"""
Strips leading zeros from version number.
This converts 1974.04.03 to 1974.4.3 as the format with leading month and day zeros is not accepted
by PIP versioning.
:param version: version number in CALVER format (potentially with leading 0s in date and month)
:return: string with leading 0s after dot replaced.
"""
return ".".join(i.lstrip("0") or "0" for i in version.split("."))
|
Strips leading zeros from version number.
This converts 1974.04.03 to 1974.4.3 as the format with leading month and day zeros is not accepted
by PIP versioning.
:param version: version number in CALVER format (potentially with leading 0s in date and month)
:return: string with leading 0s after dot replaced.
|
python
|
dev/breeze/src/airflow_breeze/utils/versions.py
| 20
|
[
"version"
] |
str
| true
| 2
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
count
|
def count(self) -> NDFrameT:
"""
Compute count of group, excluding missing values.
Returns
-------
Series or DataFrame
Count of values within each group.
%(see_also)s
Examples
--------
For SeriesGroupBy:
>>> lst = ["a", "a", "b"]
>>> ser = pd.Series([1, 2, np.nan], index=lst)
>>> ser
a 1.0
a 2.0
b NaN
dtype: float64
>>> ser.groupby(level=0).count()
a 2
b 0
dtype: int64
For DataFrameGroupBy:
>>> data = [[1, np.nan, 3], [1, np.nan, 6], [7, 8, 9]]
>>> df = pd.DataFrame(
... data, columns=["a", "b", "c"], index=["cow", "horse", "bull"]
... )
>>> df
a b c
cow 1 NaN 3
horse 1 NaN 6
bull 7 8.0 9
>>> df.groupby("a").count()
b c
a
1 0 2
7 1 1
For Resampler:
>>> ser = pd.Series(
... [1, 2, 3, 4],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
... ),
... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
>>> ser.resample("MS").count()
2023-01-01 2
2023-02-01 2
Freq: MS, dtype: int64
"""
data = self._get_data_to_aggregate()
ids = self._grouper.ids
ngroups = self._grouper.ngroups
mask = ids != -1
is_series = data.ndim == 1
def hfunc(bvalues: ArrayLike) -> ArrayLike:
# TODO(EA2D): reshape would not be necessary with 2D EAs
if bvalues.ndim == 1:
# EA
masked = mask & ~isna(bvalues).reshape(1, -1)
else:
masked = mask & ~isna(bvalues)
counted = lib.count_level_2d(masked, labels=ids, max_bin=ngroups)
if isinstance(bvalues, BaseMaskedArray):
return IntegerArray(
counted[0], mask=np.zeros(counted.shape[1], dtype=np.bool_)
)
elif isinstance(bvalues, ArrowExtensionArray) and not isinstance(
bvalues.dtype, StringDtype
):
dtype = pandas_dtype("int64[pyarrow]")
return type(bvalues)._from_sequence(counted[0], dtype=dtype)
if is_series:
assert counted.ndim == 2
assert counted.shape[0] == 1
return counted[0]
return counted
new_mgr = data.grouped_reduce(hfunc)
new_obj = self._wrap_agged_manager(new_mgr)
result = self._wrap_aggregated_output(new_obj)
return result
|
Compute count of group, excluding missing values.
Returns
-------
Series or DataFrame
Count of values within each group.
%(see_also)s
Examples
--------
For SeriesGroupBy:
>>> lst = ["a", "a", "b"]
>>> ser = pd.Series([1, 2, np.nan], index=lst)
>>> ser
a 1.0
a 2.0
b NaN
dtype: float64
>>> ser.groupby(level=0).count()
a 2
b 0
dtype: int64
For DataFrameGroupBy:
>>> data = [[1, np.nan, 3], [1, np.nan, 6], [7, 8, 9]]
>>> df = pd.DataFrame(
... data, columns=["a", "b", "c"], index=["cow", "horse", "bull"]
... )
>>> df
a b c
cow 1 NaN 3
horse 1 NaN 6
bull 7 8.0 9
>>> df.groupby("a").count()
b c
a
1 0 2
7 1 1
For Resampler:
>>> ser = pd.Series(
... [1, 2, 3, 4],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
... ),
... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
>>> ser.resample("MS").count()
2023-01-01 2
2023-02-01 2
Freq: MS, dtype: int64
|
python
|
pandas/core/groupby/groupby.py
| 2,105
|
[
"self"
] |
NDFrameT
| true
| 7
| 8.4
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
extracted
|
private PropertyDescriptor extracted(TypeElement declaringElement, TypeElementMembers members,
VariableElement parameter) {
String parameterName = parameter.getSimpleName().toString();
String name = getPropertyName(parameter, parameterName);
TypeMirror type = parameter.asType();
ExecutableElement getter = members.getPublicGetter(parameterName, type);
ExecutableElement setter = members.getPublicSetter(parameterName, type);
VariableElement field = members.getFields().get(parameterName);
RecordComponentElement recordComponent = members.getRecordComponents().get(parameterName);
SourceMetadata sourceMetadata = this.environment.resolveSourceMetadata(field, getter);
PropertyDescriptor propertyDescriptor = (recordComponent != null)
? new RecordParameterPropertyDescriptor(name, type, parameter, declaringElement, getter,
recordComponent)
: new ConstructorParameterPropertyDescriptor(name, type, parameter, declaringElement, getter, setter,
field);
return sourceMetadata.createPropertyDescriptor(name, propertyDescriptor);
}
|
Return the {@link PropertyDescriptor} instances that are valid candidates for the
specified {@link TypeElement type} based on the specified {@link ExecutableElement
factory method}, if any.
@param type the target type
@param factoryMethod the method that triggered the metadata for that {@code type}
or {@code null}
@return the candidate properties for metadata generation
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/PropertyDescriptorResolver.java
| 88
|
[
"declaringElement",
"members",
"parameter"
] |
PropertyDescriptor
| true
| 2
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
certificates
|
@Nullable List<X509Certificate> certificates();
|
The certificates for this store. When a {@link #privateKey() private key} is
present the returned value is treated as a certificate chain, otherwise it is
treated a list of certificates that should all be registered.
@return the X509 certificates
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemSslStore.java
| 67
|
[] | true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
_downsample
|
def _downsample(self, how, **kwargs):
"""
Downsample the cython defined function.
Parameters
----------
how : string / cython mapped function
**kwargs : kw args passed to how function
"""
ax = self.ax
if is_subperiod(ax.freq, self.freq):
# Downsampling
return self._groupby_and_aggregate(how, **kwargs)
elif is_superperiod(ax.freq, self.freq):
if how == "ohlc":
# GH #13083
# upsampling to subperiods is handled as an asfreq, which works
# for pure aggregating/reducing methods
# OHLC reduces along the time dimension, but creates multiple
# values for each period -> handle by _groupby_and_aggregate()
return self._groupby_and_aggregate(how)
return self.asfreq()
elif ax.freq == self.freq:
return self.asfreq()
raise IncompatibleFrequency(
f"Frequency {ax.freq} cannot be resampled to {self.freq}, "
"as they are not sub or super periods"
)
|
Downsample the cython defined function.
Parameters
----------
how : string / cython mapped function
**kwargs : kw args passed to how function
|
python
|
pandas/core/resample.py
| 2,210
|
[
"self",
"how"
] | false
| 5
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
any
|
def any(self, axis: AxisInt = 0, *args, **kwargs) -> bool:
"""
Tests whether at least one of elements evaluate True
Returns
-------
any : bool
See Also
--------
numpy.any
"""
nv.validate_any(args, kwargs)
values = self.sp_values
if len(values) != len(self) and np.any(self.fill_value):
return True
return values.any().item()
|
Tests whether at least one of elements evaluate True
Returns
-------
any : bool
See Also
--------
numpy.any
|
python
|
pandas/core/arrays/sparse/array.py
| 1,484
|
[
"self",
"axis"
] |
bool
| true
| 3
| 6.56
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
update
|
function update(object, path, updater) {
return object == null ? object : baseUpdate(object, path, castFunction(updater));
}
|
This method is like `_.set` except that accepts `updater` to produce the
value to set. Use `_.updateWith` to customize `path` creation. The `updater`
is invoked with one argument: (value).
**Note:** This method mutates `object`.
@static
@memberOf _
@since 4.6.0
@category Object
@param {Object} object The object to modify.
@param {Array|string} path The path of the property to set.
@param {Function} updater The function to produce the updated value.
@returns {Object} Returns `object`.
@example
var object = { 'a': [{ 'b': { 'c': 3 } }] };
_.update(object, 'a[0].b.c', function(n) { return n * n; });
console.log(object.a[0].b.c);
// => 9
_.update(object, 'x[0].y.z', function(n) { return n ? n + 1 : 0; });
console.log(object.x[0].y.z);
// => 0
|
javascript
|
lodash.js
| 13,976
|
[
"object",
"path",
"updater"
] | false
| 2
| 7.44
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
findEditorByConvention
|
public static @Nullable PropertyEditor findEditorByConvention(@Nullable Class<?> targetType) {
if (targetType == null || targetType.isArray() || unknownEditorTypes.contains(targetType)) {
return null;
}
ClassLoader cl = targetType.getClassLoader();
if (cl == null) {
try {
cl = ClassLoader.getSystemClassLoader();
if (cl == null) {
return null;
}
}
catch (Throwable ex) {
// for example, AccessControlException on Google App Engine
return null;
}
}
String targetTypeName = targetType.getName();
String editorName = targetTypeName + "Editor";
try {
Class<?> editorClass = cl.loadClass(editorName);
if (editorClass != null) {
if (!PropertyEditor.class.isAssignableFrom(editorClass)) {
unknownEditorTypes.add(targetType);
return null;
}
return (PropertyEditor) instantiateClass(editorClass);
}
// Misbehaving ClassLoader returned null instead of ClassNotFoundException
// - fall back to unknown editor type registration below
}
catch (ClassNotFoundException ex) {
// Ignore - fall back to unknown editor type registration below
}
unknownEditorTypes.add(targetType);
return null;
}
|
Find a JavaBeans PropertyEditor following the 'Editor' suffix convention
(for example, "mypackage.MyDomainClass" → "mypackage.MyDomainClassEditor").
<p>Compatible to the standard JavaBeans convention as implemented by
{@link java.beans.PropertyEditorManager} but isolated from the latter's
registered default editors for primitive types.
@param targetType the type to find an editor for
@return the corresponding editor, or {@code null} if none found
|
java
|
spring-beans/src/main/java/org/springframework/beans/BeanUtils.java
| 552
|
[
"targetType"
] |
PropertyEditor
| true
| 10
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
shouldHeartbeatNow
|
public boolean shouldHeartbeatNow() {
MemberState state = state();
return state == MemberState.ACKNOWLEDGING || state == MemberState.LEAVING || state == MemberState.JOINING;
}
|
@return True if the member should send heartbeat to the coordinator without waiting for
the interval.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 692
|
[] | true
| 3
| 8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
dispatch
|
public void dispatch() {
// iterate by index to avoid concurrent modification exceptions
for (int i = 0; i < listeners.size(); i++) {
listeners.get(i).dispatch();
}
}
|
Dispatches all events enqueued prior to this call, serially and in order, for every listener.
<p>Note: this method is idempotent and safe to call from any thread
|
java
|
android/guava/src/com/google/common/util/concurrent/ListenerCallQueue.java
| 118
|
[] |
void
| true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
groupMetadata
|
public ConsumerGroupMetadata groupMetadata() {
return groupMetadata;
}
|
Return the consumer group metadata.
@return the current consumer group metadata
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java
| 1,010
|
[] |
ConsumerGroupMetadata
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
all
|
public KafkaFuture<Map<String, UserScramCredentialsDescription>> all() {
final KafkaFutureImpl<Map<String, UserScramCredentialsDescription>> retval = new KafkaFutureImpl<>();
dataFuture.whenComplete((data, throwable) -> {
if (throwable != null) {
retval.completeExceptionally(throwable);
} else {
/* Check to make sure every individual described user succeeded. Note that a successfully described user
* is one that appears with *either* a NONE error code or a RESOURCE_NOT_FOUND error code. The
* RESOURCE_NOT_FOUND means the client explicitly requested a describe of that particular user but it could
* not be described because it does not exist; such a user will not appear as a key in the returned map.
*/
Optional<DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult> optionalFirstFailedDescribe =
data.results().stream().filter(result ->
result.errorCode() != Errors.NONE.code() && result.errorCode() != Errors.RESOURCE_NOT_FOUND.code()).findFirst();
if (optionalFirstFailedDescribe.isPresent()) {
retval.completeExceptionally(Errors.forCode(optionalFirstFailedDescribe.get().errorCode()).exception(optionalFirstFailedDescribe.get().errorMessage()));
} else {
Map<String, UserScramCredentialsDescription> retvalMap = new HashMap<>();
data.results().forEach(userResult ->
retvalMap.put(userResult.user(), new UserScramCredentialsDescription(userResult.user(),
getScramCredentialInfosFor(userResult))));
retval.complete(retvalMap);
}
}
});
return retval;
}
|
@return a future for the results of all described users with map keys (one per user) being consistent with the
contents of the list returned by {@link #users()}. The future will complete successfully only if all such user
descriptions complete successfully.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java
| 54
|
[] | true
| 4
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getNeighbor
|
public static final String getNeighbor(String geohash, int level, int dx, int dy) {
int cell = BASE_32_STRING.indexOf(geohash.charAt(level - 1));
// Decoding the Geohash bit pattern to determine grid coordinates
int x0 = cell & 1; // first bit of x
int y0 = cell & 2; // first bit of y
int x1 = cell & 4; // second bit of x
int y1 = cell & 8; // second bit of y
int x2 = cell & 16; // third bit of x
// combine the bitpattern to grid coordinates.
// note that the semantics of x and y are swapping
// on each level
int x = x0 + (x1 / 2) + (x2 / 4);
int y = (y0 / 2) + (y1 / 4);
if (level == 1) {
// Root cells at north (namely "bcfguvyz") or at
// south (namely "0145hjnp") do not have neighbors
// in north/south direction
if ((dy < 0 && y == 0) || (dy > 0 && y == 3)) {
return null;
} else {
return Character.toString(encodeBase32(x + dx, y + dy));
}
} else {
// define grid coordinates for next level
final int nx = ((level % 2) == 1) ? (x + dx) : (x + dy);
final int ny = ((level % 2) == 1) ? (y + dy) : (y + dx);
// if the defined neighbor has the same parent a the current cell
// encode the cell directly. Otherwise find the cell next to this
// cell recursively. Since encoding wraps around within a cell
// it can be encoded here.
// xLimit and YLimit must always be respectively 7 and 3
// since x and y semantics are swapping on each level.
if (nx >= 0 && nx <= 7 && ny >= 0 && ny <= 3) {
return geohash.substring(0, level - 1) + encodeBase32(nx, ny);
} else {
String neighbor = getNeighbor(geohash, level - 1, dx, dy);
return (neighbor != null) ? neighbor + encodeBase32(nx, ny) : neighbor;
}
}
}
|
Calculate the geohash of a neighbor of a geohash
@param geohash the geohash of a cell
@param level level of the geohash
@param dx delta of the first grid coordinate (must be -1, 0 or +1)
@param dy delta of the second grid coordinate (must be -1, 0 or +1)
@return geohash of the defined cell
|
java
|
libs/geo/src/main/java/org/elasticsearch/geometry/utils/Geohash.java
| 202
|
[
"geohash",
"level",
"dx",
"dy"
] |
String
| true
| 13
| 7.28
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
open_resource
|
def open_resource(
self, resource: str, mode: str = "rb", encoding: str | None = "utf-8"
) -> t.IO[t.AnyStr]:
"""Open a resource file relative to :attr:`root_path` for reading. The
blueprint-relative equivalent of the app's :meth:`~.Flask.open_resource`
method.
:param resource: Path to the resource relative to :attr:`root_path`.
:param mode: Open the file in this mode. Only reading is supported,
valid values are ``"r"`` (or ``"rt"``) and ``"rb"``.
:param encoding: Open the file with this encoding when opening in text
mode. This is ignored when opening in binary mode.
.. versionchanged:: 3.1
Added the ``encoding`` parameter.
"""
if mode not in {"r", "rt", "rb"}:
raise ValueError("Resources can only be opened for reading.")
path = os.path.join(self.root_path, resource)
if mode == "rb":
return open(path, mode) # pyright: ignore
return open(path, mode, encoding=encoding)
|
Open a resource file relative to :attr:`root_path` for reading. The
blueprint-relative equivalent of the app's :meth:`~.Flask.open_resource`
method.
:param resource: Path to the resource relative to :attr:`root_path`.
:param mode: Open the file in this mode. Only reading is supported,
valid values are ``"r"`` (or ``"rt"``) and ``"rb"``.
:param encoding: Open the file with this encoding when opening in text
mode. This is ignored when opening in binary mode.
.. versionchanged:: 3.1
Added the ``encoding`` parameter.
|
python
|
src/flask/blueprints.py
| 104
|
[
"self",
"resource",
"mode",
"encoding"
] |
t.IO[t.AnyStr]
| true
| 3
| 6.4
|
pallets/flask
| 70,946
|
sphinx
| false
|
applyEmptySelectionError
|
function applyEmptySelectionError(
error: EmptySelectionError,
argsTree: ArgumentsRenderingTree,
globalOmit?: GlobalOmitOptions,
) {
const subSelection = argsTree.arguments.getDeepSubSelectionValue(error.selectionPath)?.asObject()
if (subSelection) {
const omit = subSelection.getField('omit')?.value.asObject()
if (omit) {
applyEmptySelectionErrorOmit(error, argsTree, omit)
return
}
if (subSelection.hasField('select')) {
applyEmptySelectionErrorSelect(error, argsTree)
return
}
}
if (globalOmit?.[uncapitalize(error.outputType.name)]) {
applyEmptySelectionErrorGlobalOmit(error, argsTree)
return
}
// should never happen, but in case it does
argsTree.addErrorMessage(() => `Unknown field at "${error.selectionPath.join('.')} selection"`)
}
|
Given the validation error and arguments rendering tree, applies corresponding
formatting to an error tree and adds all relevant messages.
@param error
@param args
|
typescript
|
packages/client/src/runtime/core/errorRendering/applyValidationError.ts
| 140
|
[
"error",
"argsTree",
"globalOmit?"
] | false
| 5
| 6.08
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
setCount
|
@CanIgnoreReturnValue
public Builder<E> setCount(E element, int count) {
contents.setCount(checkNotNull(element), count);
return this;
}
|
Adds or removes the necessary occurrences of an element such that the element attains the
desired count.
@param element the element to add or remove occurrences of
@param count the desired count of the element in this multiset
@return this {@code Builder} object
@throws NullPointerException if {@code element} is null
@throws IllegalArgumentException if {@code count} is negative
|
java
|
guava/src/com/google/common/collect/ImmutableMultiset.java
| 546
|
[
"element",
"count"
] | true
| 1
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
insecure
|
public static RandomUtils insecure() {
return INSECURE;
}
|
Gets the singleton instance based on {@link ThreadLocalRandom#current()}; <b>which is not cryptographically
secure</b>; use {@link #secure()} to use an algorithms/providers specified in the
{@code securerandom.strongAlgorithms} {@link Security} property.
<p>
The method {@link ThreadLocalRandom#current()} is called on-demand.
</p>
@return the singleton instance based on {@link ThreadLocalRandom#current()}.
@see ThreadLocalRandom#current()
@see #secure()
@since 3.17.0
|
java
|
src/main/java/org/apache/commons/lang3/RandomUtils.java
| 102
|
[] |
RandomUtils
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
make_run_fn
|
def make_run_fn(
self, *input_tensors: torch.Tensor, out: torch.Tensor
) -> Callable[[], None]:
"""
Create a function to run the CUDA kernel with the given input and output tensors.
"""
self.ensure_dll_loaded()
self.update_workspace_size()
args = [c_void_p(tensor.data_ptr()) for tensor in list(input_tensors) + [out]]
autotuning_log.debug(
"make_run_fn: self.kernel_name=%s, self.source_file=%s, self.hash_key=%s, self.DLL=%s, args=%s, self.extra_args=%s",
self.kernel_name,
self.source_file,
self.hash_key,
self.DLL,
args,
self.extra_args,
)
stream_ptr = c_void_p(torch.cuda.current_stream().cuda_stream)
run_method = getattr(self.DLL, self.kernel_name)
workspace_ptr = c_void_p(0)
if self.workspace_size > 0:
self.workspace = torch.zeros(
(self.workspace_size + 7) // 8,
dtype=torch.float64,
device=out.device,
)
workspace_ptr = c_void_p(self.workspace.data_ptr())
# Generate partial function.
ret = functools.partial(
run_method,
*args,
*self.extra_args,
None, # null workspace size ptr
workspace_ptr, # set workspace ptr,
stream_ptr,
)
# sanity check to make sure we cleanup run fn properly
try:
ret()
except RuntimeError as e:
err_msg = str(e)
def raise_runtime_error():
raise RuntimeError(err_msg)
self.cleanup_run_fn()
return raise_runtime_error
return ret
|
Create a function to run the CUDA kernel with the given input and output tensors.
|
python
|
torch/_inductor/autotune_process.py
| 827
|
[
"self",
"out"
] |
Callable[[], None]
| true
| 2
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
constant_name
|
def constant_name(self, name: str, device_override: Optional[torch.device]) -> str:
"""
We AOT copy constants to the devices they are needed on.
If device_override doesn't match the constant's device, then
copy it and return a different name.
"""
if self.constants[name].device == device_override or device_override is None:
return name
with torch.utils._python_dispatch._disable_current_modes():
# caller might have OrderedSet fake tensor mode which will create a fake tensor
# when calling .to, so unset modes here
non_dup_const_name = self.allocate_non_dup_const_name(
f"{name}_{device_override.type}{device_override.index or 0}",
self.constants[name].to(device_override),
)
assert non_dup_const_name in self.constants, (
f"{non_dup_const_name} should be in V.graph.constants already"
)
# register device-copied buffers and parameters to graph as well
# to codegen correct torch::aot_inductor::ConstantType for them rather than `Unknown`
if any(
name == normalize_name(buffer_name)
for buffer_name in self.named_buffers
):
self.named_buffers[non_dup_const_name] = self.constants[
non_dup_const_name
]
if any(
name == normalize_name(param_name)
for param_name in self.named_parameters
):
self.named_parameters[non_dup_const_name] = self.constants[
non_dup_const_name
]
return non_dup_const_name
|
We AOT copy constants to the devices they are needed on.
If device_override doesn't match the constant's device, then
copy it and return a different name.
|
python
|
torch/_inductor/graph.py
| 1,114
|
[
"self",
"name",
"device_override"
] |
str
| true
| 6
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
sendListOffsetsRequests
|
private RequestFuture<ListOffsetResult> sendListOffsetsRequests(final Map<TopicPartition, Long> timestampsToSearch,
final boolean requireTimestamps) {
final Set<TopicPartition> partitionsToRetry = new HashSet<>();
Map<Node, Map<TopicPartition, ListOffsetsPartition>> timestampsToSearchByNode =
groupListOffsetRequests(timestampsToSearch, partitionsToRetry);
if (timestampsToSearchByNode.isEmpty())
return RequestFuture.failure(new StaleMetadataException());
final RequestFuture<ListOffsetResult> listOffsetRequestsFuture = new RequestFuture<>();
final Map<TopicPartition, ListOffsetData> fetchedTimestampOffsets = new HashMap<>();
final AtomicInteger remainingResponses = new AtomicInteger(timestampsToSearchByNode.size());
for (Map.Entry<Node, Map<TopicPartition, ListOffsetsPartition>> entry : timestampsToSearchByNode.entrySet()) {
RequestFuture<ListOffsetResult> future = sendListOffsetRequest(entry.getKey(), entry.getValue(), requireTimestamps);
future.addListener(new RequestFutureListener<>() {
@Override
public void onSuccess(ListOffsetResult partialResult) {
synchronized (listOffsetRequestsFuture) {
fetchedTimestampOffsets.putAll(partialResult.fetchedOffsets);
partitionsToRetry.addAll(partialResult.partitionsToRetry);
if (remainingResponses.decrementAndGet() == 0 && !listOffsetRequestsFuture.isDone()) {
ListOffsetResult result = new ListOffsetResult(fetchedTimestampOffsets, partitionsToRetry);
listOffsetRequestsFuture.complete(result);
}
}
}
@Override
public void onFailure(RuntimeException e) {
synchronized (listOffsetRequestsFuture) {
if (!listOffsetRequestsFuture.isDone())
listOffsetRequestsFuture.raise(e);
}
}
});
}
return listOffsetRequestsFuture;
}
|
Search the offsets by target times for the specified partitions.
@param timestampsToSearch the mapping between partitions and target time
@param requireTimestamps true if we should fail with an UnsupportedVersionException if the broker does
not support fetching precise timestamps for offsets
@return A response which can be polled to obtain the corresponding timestamps and offsets.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcher.java
| 300
|
[
"timestampsToSearch",
"requireTimestamps"
] | true
| 5
| 7.28
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
binaryBeMsb0ToHexDigit
|
public static char binaryBeMsb0ToHexDigit(final boolean[] src, final int srcPos) {
// JDK 9: Objects.checkIndex(int index, int length)
if (Integer.compareUnsigned(srcPos, src.length) >= 0) {
// Throw the correct exception
if (src.length == 0) {
throw new IllegalArgumentException("Cannot convert an empty array.");
}
throw new IndexOutOfBoundsException(srcPos + " is not within array length " + src.length);
}
// Little-endian bit 0 position
final int pos = src.length - 1 - srcPos;
if (3 <= pos && src[pos - 3]) {
if (src[pos - 2]) {
if (src[pos - 1]) {
return src[pos] ? 'f' : 'e';
}
return src[pos] ? 'd' : 'c';
}
if (src[pos - 1]) {
return src[pos] ? 'b' : 'a';
}
return src[pos] ? '9' : '8';
}
if (2 <= pos && src[pos - 2]) {
if (src[pos - 1]) {
return src[pos] ? '7' : '6';
}
return src[pos] ? '5' : '4';
}
if (1 <= pos && src[pos - 1]) {
return src[pos] ? '3' : '2';
}
return src[pos] ? '1' : '0';
}
|
Converts a binary (represented as boolean array) in big-endian MSB0 bit ordering to a hexadecimal digit.
<p>
(1, 0, 0, 0) with srcPos = 0 is converted as follow: '8' (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0) with srcPos = 2 is converted to '5'.
</p>
@param src the binary to convert.
@param srcPos the position of the LSB to start the conversion.
@return a hexadecimal digit representing the selected bits.
@throws IllegalArgumentException if {@code src} is empty.
@throws NullPointerException if {@code src} is {@code null}.
@throws IndexOutOfBoundsException if {@code srcPos} is outside the array.
|
java
|
src/main/java/org/apache/commons/lang3/Conversion.java
| 107
|
[
"src",
"srcPos"
] | true
| 21
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
iteratee
|
function iteratee(func) {
return baseIteratee(typeof func == 'function' ? func : baseClone(func, CLONE_DEEP_FLAG));
}
|
Creates a function that invokes `func` with the arguments of the created
function. If `func` is a property name, the created function returns the
property value for a given element. If `func` is an array or object, the
created function returns `true` for elements that contain the equivalent
source properties, otherwise it returns `false`.
@static
@since 4.0.0
@memberOf _
@category Util
@param {*} [func=_.identity] The value to convert to a callback.
@returns {Function} Returns the callback.
@example
var users = [
{ 'user': 'barney', 'age': 36, 'active': true },
{ 'user': 'fred', 'age': 40, 'active': false }
];
// The `_.matches` iteratee shorthand.
_.filter(users, _.iteratee({ 'user': 'barney', 'active': true }));
// => [{ 'user': 'barney', 'age': 36, 'active': true }]
// The `_.matchesProperty` iteratee shorthand.
_.filter(users, _.iteratee(['user', 'fred']));
// => [{ 'user': 'fred', 'age': 40 }]
// The `_.property` iteratee shorthand.
_.map(users, _.iteratee('user'));
// => ['barney', 'fred']
// Create custom iteratee shorthands.
_.iteratee = _.wrap(_.iteratee, function(iteratee, func) {
return !_.isRegExp(func) ? iteratee(func) : function(string) {
return func.test(string);
};
});
_.filter(['abc', 'def'], /ef/);
// => ['def']
|
javascript
|
lodash.js
| 15,642
|
[
"func"
] | false
| 2
| 6.96
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
ljust
|
def ljust(a, width, fillchar=' '):
"""
Return an array with the elements of `a` left-justified in a
string of length `width`.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
width : array_like, with any integer dtype
The length of the resulting strings, unless ``width < str_len(a)``.
fillchar : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Optional character to use for padding (default is space).
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input types
See Also
--------
str.ljust
Notes
-----
While it is possible for ``a`` and ``fillchar`` to have different dtypes,
passing a non-ASCII character in ``fillchar`` when ``a`` is of dtype "S"
is not allowed, and a ``ValueError`` is raised.
Examples
--------
>>> import numpy as np
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> np.strings.ljust(c, width=3)
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
>>> np.strings.ljust(c, width=9)
array(['aAaAaA ', ' aA ', 'abBABba '], dtype='<U9')
"""
width = np.asanyarray(width)
if not np.issubdtype(width.dtype, np.integer):
raise TypeError(f"unsupported type {width.dtype} for operand 'width'")
a = np.asanyarray(a)
fillchar = np.asanyarray(fillchar)
if np.any(str_len(fillchar) != 1):
raise TypeError(
"The fill character must be exactly one character long")
if np.result_type(a, fillchar).char == "T":
return _ljust(a, width, fillchar)
fillchar = fillchar.astype(a.dtype, copy=False)
width = np.maximum(str_len(a), width)
shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape)
out_dtype = f"{a.dtype.char}{width.max()}"
out = np.empty_like(a, shape=shape, dtype=out_dtype)
return _ljust(a, width, fillchar, out=out)
|
Return an array with the elements of `a` left-justified in a
string of length `width`.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
width : array_like, with any integer dtype
The length of the resulting strings, unless ``width < str_len(a)``.
fillchar : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Optional character to use for padding (default is space).
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input types
See Also
--------
str.ljust
Notes
-----
While it is possible for ``a`` and ``fillchar`` to have different dtypes,
passing a non-ASCII character in ``fillchar`` when ``a`` is of dtype "S"
is not allowed, and a ``ValueError`` is raised.
Examples
--------
>>> import numpy as np
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> np.strings.ljust(c, width=3)
array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
>>> np.strings.ljust(c, width=9)
array(['aAaAaA ', ' aA ', 'abBABba '], dtype='<U9')
|
python
|
numpy/_core/strings.py
| 762
|
[
"a",
"width",
"fillchar"
] | false
| 4
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
getAccessibleConstructor
|
public static <T> Constructor<T> getAccessibleConstructor(final Class<T> cls, final Class<?>... parameterTypes) {
Objects.requireNonNull(cls, "cls");
try {
return getAccessibleConstructor(cls.getConstructor(parameterTypes));
} catch (final NoSuchMethodException e) {
return null;
}
}
|
Finds a constructor given a class and signature, checking accessibility.
<p>
This finds the constructor and ensures that it is accessible. The constructor signature must match the parameter types exactly.
</p>
@param <T> the constructor type.
@param cls the class to find a constructor for, not {@code null}.
@param parameterTypes the array of parameter types, {@code null} treated as empty.
@return the constructor, {@code null} if no matching accessible constructor found.
@throws NullPointerException if {@code cls} is {@code null}
@throws SecurityException Thrown if a security manager is present and the caller's class loader is not the same as or an ancestor of the class loader
for the class and invocation of {@link SecurityManager#checkPackageAccess(String)} denies access to the package of the
class.
@see Class#getConstructor
@see #getAccessibleConstructor(java.lang.reflect.Constructor)
|
java
|
src/main/java/org/apache/commons/lang3/reflect/ConstructorUtils.java
| 65
|
[
"cls"
] | true
| 2
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
singleQuoteMatcher
|
public static StrMatcher singleQuoteMatcher() {
return SINGLE_QUOTE_MATCHER;
}
|
Gets the matcher for the single quote character.
@return the matcher for a single quote.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrMatcher.java
| 322
|
[] |
StrMatcher
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
union_with_duplicates
|
def union_with_duplicates(
lvals: ArrayLike | Index, rvals: ArrayLike | Index
) -> ArrayLike | Index:
"""
Extracts the union from lvals and rvals with respect to duplicates and nans in
both arrays.
Parameters
----------
lvals: np.ndarray or ExtensionArray
left values which is ordered in front.
rvals: np.ndarray or ExtensionArray
right values ordered after lvals.
Returns
-------
np.ndarray or ExtensionArray
Containing the unsorted union of both arrays.
Notes
-----
Caller is responsible for ensuring lvals.dtype == rvals.dtype.
"""
from pandas import Series
l_count = value_counts_internal(lvals, dropna=False)
r_count = value_counts_internal(rvals, dropna=False)
l_count, r_count = l_count.align(r_count, fill_value=0)
final_count = np.maximum(l_count.values, r_count.values)
final_count = Series(final_count, index=l_count.index, dtype="int", copy=False)
if isinstance(lvals, ABCMultiIndex) and isinstance(rvals, ABCMultiIndex):
unique_vals = lvals.append(rvals).unique()
else:
if isinstance(lvals, ABCIndex):
lvals = lvals._values
if isinstance(rvals, ABCIndex):
rvals = rvals._values
# error: List item 0 has incompatible type "Union[ExtensionArray,
# ndarray[Any, Any], Index]"; expected "Union[ExtensionArray,
# ndarray[Any, Any]]"
combined = concat_compat([lvals, rvals]) # type: ignore[list-item]
unique_vals = unique(combined)
unique_vals = ensure_wrapped_if_datetimelike(unique_vals)
repeats = final_count.reindex(unique_vals).values
return np.repeat(unique_vals, repeats)
|
Extracts the union from lvals and rvals with respect to duplicates and nans in
both arrays.
Parameters
----------
lvals: np.ndarray or ExtensionArray
left values which is ordered in front.
rvals: np.ndarray or ExtensionArray
right values ordered after lvals.
Returns
-------
np.ndarray or ExtensionArray
Containing the unsorted union of both arrays.
Notes
-----
Caller is responsible for ensuring lvals.dtype == rvals.dtype.
|
python
|
pandas/core/algorithms.py
| 1,604
|
[
"lvals",
"rvals"
] |
ArrayLike | Index
| true
| 6
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getFile
|
private File getFile(String patternLocation, Resource resource) {
try {
return resource.getFile();
}
catch (Exception ex) {
throw new IllegalStateException(
"Unable to load config data resource from pattern '" + patternLocation + "'", ex);
}
}
|
Get a multiple resources from a location pattern.
@param location the location pattern
@param type the type of resource to return
@return the resources
@see #isPattern(String)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/LocationResourceLoader.java
| 137
|
[
"patternLocation",
"resource"
] |
File
| true
| 2
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
beanOfTypeIncludingAncestors
|
public static <T> T beanOfTypeIncludingAncestors(ListableBeanFactory lbf, Class<T> type)
throws BeansException {
Map<String, T> beansOfType = beansOfTypeIncludingAncestors(lbf, type);
return uniqueBean(type, beansOfType);
}
|
Return a single bean of the given type or subtypes, also picking up beans
defined in ancestor bean factories if the current bean factory is a
HierarchicalBeanFactory. Useful convenience method when we expect a
single bean and don't care about the bean name.
<p>Does consider objects created by FactoryBeans, which means that FactoryBeans
will get initialized. If the object created by the FactoryBean doesn't match,
the raw FactoryBean itself will be matched against the type.
<p>This version of {@code beanOfTypeIncludingAncestors} automatically includes
prototypes and FactoryBeans.
<p><b>Note: Beans of the same name will take precedence at the 'lowest' factory level,
i.e. such beans will be returned from the lowest factory that they are being found in,
hiding corresponding beans in ancestor factories.</b> This feature allows for
'replacing' beans by explicitly choosing the same bean name in a child factory;
the bean in the ancestor factory won't be visible then, not even for by-type lookups.
@param lbf the bean factory
@param type the type of bean to match
@return the matching bean instance
@throws NoSuchBeanDefinitionException if no bean of the given type was found
@throws NoUniqueBeanDefinitionException if more than one bean of the given type was found
@throws BeansException if the bean could not be created
@see #beansOfTypeIncludingAncestors(ListableBeanFactory, Class)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/BeanFactoryUtils.java
| 410
|
[
"lbf",
"type"
] |
T
| true
| 1
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
init_backend_registration
|
def init_backend_registration() -> None:
"""
Register the backend for different devices, including the scheduling
for kernel code generation and the host side wrapper code generation.
"""
from .cpp import CppScheduling
from .cpp_wrapper_cpu import CppWrapperCpu
from .cpp_wrapper_cpu_array_ref import CppWrapperCpuArrayRef
from .cpp_wrapper_gpu import CppWrapperGpu
from .cpp_wrapper_mps import CppWrapperMps
from .cuda_combined_scheduling import CUDACombinedScheduling
from .halide import HalideScheduling
from .mps import MetalScheduling
from .pallas import PallasScheduling
from .python_wrapper_mtia import PythonWrapperMtia
from .triton import TritonScheduling
from .wrapper import PythonWrapperCodegen
from .wrapper_fxir import WrapperFxCodegen
if get_scheduling_for_device("cpu") is None:
cpu_backends = {
"cpp": CppScheduling,
"halide": HalideScheduling,
"triton": TritonScheduling,
"pallas": PallasScheduling,
}
register_backend_for_device(
"cpu",
lambda scheduling: cpu_backends[config.cpu_backend](scheduling),
PythonWrapperCodegen,
CppWrapperCpuArrayRef
if config.aot_inductor.allow_stack_allocation
else CppWrapperCpu,
WrapperFxCodegen,
)
if get_scheduling_for_device("cuda") is None:
# CUDACombinedScheduling combines Triton and CUDA C++ scheduling for CUDA devices via delegation
cuda_backends = {
"triton": CUDACombinedScheduling,
"halide": HalideScheduling,
"pallas": PallasScheduling,
}
register_backend_for_device(
"cuda",
lambda scheduling: cuda_backends[config.cuda_backend](scheduling),
PythonWrapperCodegen,
CppWrapperGpu,
WrapperFxCodegen,
)
if get_scheduling_for_device("xpu") is None:
register_backend_for_device(
"xpu",
TritonScheduling,
PythonWrapperCodegen,
CppWrapperGpu,
WrapperFxCodegen,
)
if get_scheduling_for_device("mps") is None:
register_backend_for_device(
"mps",
MetalScheduling,
PythonWrapperCodegen,
CppWrapperMps,
WrapperFxCodegen,
)
if get_scheduling_for_device("mtia") is None:
register_backend_for_device(
"mtia",
TritonScheduling,
PythonWrapperMtia,
CppWrapperGpu,
WrapperFxCodegen,
)
private_backend = torch._C._get_privateuse1_backend_name()
if (
private_backend != "privateuseone"
and get_scheduling_for_device(private_backend) is None
):
from torch.utils.backend_registration import _get_custom_mod_func
try:
device_scheduling = _get_custom_mod_func("Scheduling")
wrapper_codegen = _get_custom_mod_func("PythonWrapperCodegen")
cpp_wrapper_codegen = _get_custom_mod_func("CppWrapperCodegen")
fx_wrapper_codegen = _get_custom_mod_func("WrapperFxCodegen")
if device_scheduling and wrapper_codegen and cpp_wrapper_codegen:
register_backend_for_device(
private_backend,
device_scheduling,
wrapper_codegen,
cpp_wrapper_codegen,
fx_wrapper_codegen,
)
except RuntimeError:
pass
|
Register the backend for different devices, including the scheduling
for kernel code generation and the host side wrapper code generation.
|
python
|
torch/_inductor/codegen/common.py
| 500
|
[] |
None
| true
| 12
| 6.8
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
parse
|
static @Nullable PrivateKey parse(String text) {
return parse(text, null);
}
|
Parse a private key from the specified string.
@param text the text to parse
@return the parsed private key
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemPrivateKeyParser.java
| 194
|
[
"text"
] |
PrivateKey
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
equals
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
return ObjectUtils.nullSafeEquals(this.value, ((BindResult<?>) obj).value);
}
|
Return the object that was bound, or throw an exception to be created by the
provided supplier if no value has been bound.
@param <X> the type of the exception to be thrown
@param exceptionSupplier the supplier which will return the exception to be thrown
@return the present value
@throws X if there is no value present
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/BindResult.java
| 134
|
[
"obj"
] | true
| 4
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
execute
|
@Override
protected void execute(Terminal terminal, OptionSet options, ProcessInfo processInfo) throws Exception {
if (subcommands.isEmpty()) {
throw new IllegalStateException("No subcommands configured");
}
// .values(...) returns an unmodifiable list
final List<String> args = new ArrayList<>(arguments.values(options));
if (args.isEmpty()) {
throw new MissingCommandException();
}
String subcommandName = args.remove(0);
Command subcommand = subcommands.get(subcommandName);
if (subcommand == null) {
throw new UserException(ExitCodes.USAGE, "Unknown command [" + subcommandName + "]");
}
for (final KeyValuePair pair : this.settingOption.values(options)) {
args.add("-E" + pair);
}
subcommand.mainWithoutErrorHandling(args.toArray(new String[0]), terminal, processInfo);
}
|
Construct the multi-command with the specified command description and runnable to execute before main is invoked.
@param description the multi-command description
|
java
|
libs/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java
| 73
|
[
"terminal",
"options",
"processInfo"
] |
void
| true
| 4
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
is_local_package_version
|
def is_local_package_version(version_suffix: str) -> bool:
"""
Check if the given version suffix is a local version suffix. A local version suffix will contain a
plus sign ('+'). This function does not guarantee that the version suffix is a valid local version suffix.
Args:
version_suffix (str): The version suffix to check.
Returns:
bool: True if the version suffix contains a '+', False otherwise. Please note this does not
guarantee that the version suffix is a valid local version suffix.
"""
if version_suffix and ("+" in version_suffix):
return True
return False
|
Check if the given version suffix is a local version suffix. A local version suffix will contain a
plus sign ('+'). This function does not guarantee that the version suffix is a valid local version suffix.
Args:
version_suffix (str): The version suffix to check.
Returns:
bool: True if the version suffix contains a '+', False otherwise. Please note this does not
guarantee that the version suffix is a valid local version suffix.
|
python
|
dev/breeze/src/airflow_breeze/utils/version_utils.py
| 26
|
[
"version_suffix"
] |
bool
| true
| 3
| 8.08
|
apache/airflow
| 43,597
|
google
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.