function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
readContext
|
function readContext(Context: ReactContext<mixed>) {
const dispatcher = SharedInternals.H;
if (dispatcher === null) {
// This wasn't being minified but we're going to retire this package anyway.
// eslint-disable-next-line react-internal/prod-error-codes
throw new Error(
'react-cache: read and preload may only be called from within a ' +
"component's render. They are not supported in event handlers or " +
'lifecycle methods.',
);
}
return dispatcher.readContext(Context);
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-cache/src/ReactCacheOld.js
| 50
|
[] | false
| 2
| 6.4
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
CONST_SHORT
|
public static short CONST_SHORT(final int v) {
if (v < Short.MIN_VALUE || v > Short.MAX_VALUE) {
throw new IllegalArgumentException("Supplied value must be a valid byte literal between -32768 and 32767: [" + v + "]");
}
return (short) v;
}
|
Returns the provided value unchanged. This can prevent javac from inlining a constant field, e.g.,
<pre>
public final static short MAGIC_SHORT = ObjectUtils.CONST_SHORT(127);
</pre>
This way any jars that refer to this field do not have to recompile themselves if the field's value changes at some future date.
@param v the short literal (as an int) value to return.
@throws IllegalArgumentException if the value passed to v is larger than a short, that is, smaller than -32768 or larger than 32767.
@return the byte v, unchanged.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/ObjectUtils.java
| 511
|
[
"v"
] | true
| 3
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_check_X_y
|
def _check_X_y(self, X, y=None, should_be_fitted=True):
"""Validate X and y and make extra check.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data set.
`X` is checked only if `check_X` is not `None` (default is None).
y : array-like of shape (n_samples), default=None
The corresponding target, by default `None`.
`y` is checked only if `check_y` is not `None` (default is None).
should_be_fitted : bool, default=True
Whether or not the classifier should be already fitted.
By default True.
Returns
-------
X, y
"""
if should_be_fitted:
check_is_fitted(self)
if self.check_X is not None:
params = {} if self.check_X_params is None else self.check_X_params
checked_X = self.check_X(X, **params)
if isinstance(checked_X, (bool, np.bool_)):
assert checked_X
else:
X = checked_X
if y is not None and self.check_y is not None:
params = {} if self.check_y_params is None else self.check_y_params
checked_y = self.check_y(y, **params)
if isinstance(checked_y, (bool, np.bool_)):
assert checked_y
else:
y = checked_y
return X, y
|
Validate X and y and make extra check.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data set.
`X` is checked only if `check_X` is not `None` (default is None).
y : array-like of shape (n_samples), default=None
The corresponding target, by default `None`.
`y` is checked only if `check_y` is not `None` (default is None).
should_be_fitted : bool, default=True
Whether or not the classifier should be already fitted.
By default True.
Returns
-------
X, y
|
python
|
sklearn/utils/_mocking.py
| 157
|
[
"self",
"X",
"y",
"should_be_fitted"
] | false
| 11
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
filled
|
def filled(self, fill_value=None):
"""
Return a copy of self, with masked values filled with a given value.
**However**, if there are no masked values to fill, self will be
returned instead as an ndarray.
Parameters
----------
fill_value : array_like, optional
The value to use for invalid entries. Can be scalar or non-scalar.
If non-scalar, the resulting ndarray must be broadcastable over
input array. Default is None, in which case, the `fill_value`
attribute of the array is used instead.
Returns
-------
filled_array : ndarray
A copy of ``self`` with invalid entries replaced by *fill_value*
(be it the function argument or the attribute of ``self``), or
``self`` itself as an ndarray if there are no invalid entries to
be replaced.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([ 1, 2, -999, 4, -999])
>>> x.filled(fill_value=1000)
array([ 1, 2, 1000, 4, 1000])
>>> type(x.filled())
<class 'numpy.ndarray'>
Subclassing is preserved. This means that if, e.g., the data part of
the masked array is a recarray, `filled` returns a recarray:
>>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray)
>>> m = np.ma.array(x, mask=[(True, False), (False, True)])
>>> m.filled()
rec.array([(999999, 2), ( -3, 999999)],
dtype=[('f0', '<i8'), ('f1', '<i8')])
"""
m = self._mask
if m is nomask:
return self._data
if fill_value is None:
fill_value = self.fill_value
else:
fill_value = _check_fill_value(fill_value, self.dtype)
if self is masked_singleton:
return np.asanyarray(fill_value)
if m.dtype.names is not None:
result = self._data.copy('K')
_recursive_filled(result, self._mask, fill_value)
elif not m.any():
return self._data
else:
result = self._data.copy('K')
try:
np.copyto(result, fill_value, where=m)
except (TypeError, AttributeError):
fill_value = narray(fill_value, dtype=object)
d = result.astype(object)
result = np.choose(m, (d, fill_value))
except IndexError:
# ok, if scalar
if self._data.shape:
raise
elif m:
result = np.array(fill_value, dtype=self.dtype)
else:
result = self._data
return result
|
Return a copy of self, with masked values filled with a given value.
**However**, if there are no masked values to fill, self will be
returned instead as an ndarray.
Parameters
----------
fill_value : array_like, optional
The value to use for invalid entries. Can be scalar or non-scalar.
If non-scalar, the resulting ndarray must be broadcastable over
input array. Default is None, in which case, the `fill_value`
attribute of the array is used instead.
Returns
-------
filled_array : ndarray
A copy of ``self`` with invalid entries replaced by *fill_value*
(be it the function argument or the attribute of ``self``), or
``self`` itself as an ndarray if there are no invalid entries to
be replaced.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([ 1, 2, -999, 4, -999])
>>> x.filled(fill_value=1000)
array([ 1, 2, 1000, 4, 1000])
>>> type(x.filled())
<class 'numpy.ndarray'>
Subclassing is preserved. This means that if, e.g., the data part of
the masked array is a recarray, `filled` returns a recarray:
>>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray)
>>> m = np.ma.array(x, mask=[(True, False), (False, True)])
>>> m.filled()
rec.array([(999999, 2), ( -3, 999999)],
dtype=[('f0', '<i8'), ('f1', '<i8')])
|
python
|
numpy/ma/core.py
| 3,856
|
[
"self",
"fill_value"
] | false
| 11
| 7.76
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
forUriStringOrNull
|
private static @Nullable InetAddress forUriStringOrNull(String hostAddr, boolean parseScope) {
checkNotNull(hostAddr);
// Decide if this should be an IPv6 or IPv4 address.
String ipString;
int expectBytes;
if (hostAddr.startsWith("[") && hostAddr.endsWith("]")) {
ipString = hostAddr.substring(1, hostAddr.length() - 1);
expectBytes = 16;
} else {
ipString = hostAddr;
expectBytes = 4;
}
// Parse the address, and make sure the length/version is correct.
Scope scope = parseScope ? new Scope() : null;
byte[] addr = ipStringToBytes(ipString, scope);
if (addr == null || addr.length != expectBytes) {
return null;
}
return bytesToInetAddress(addr, (scope != null) ? scope.scope : null);
}
|
Returns an InetAddress representing the literal IPv4 or IPv6 host portion of a URL, encoded in
the format specified by RFC 3986 section 3.2.2.
<p>This method is similar to {@link InetAddresses#forString(String)}, however, it requires that
IPv6 addresses are surrounded by square brackets.
<p>This method is the inverse of {@link InetAddresses#toUriString(java.net.InetAddress)}.
<p>This method accepts non-ASCII digits, for example {@code "192.168.0.1"} (those are fullwidth
characters). That is consistent with {@link InetAddress}, but not with various RFCs. If you
want to accept ASCII digits only, you can use something like {@code
CharMatcher.ascii().matchesAllOf(ipString)}.
@param hostAddr an RFC 3986 section 3.2.2 encoded IPv4 or IPv6 address
@return an InetAddress representing the address in {@code hostAddr}
@throws IllegalArgumentException if {@code hostAddr} is not a valid IPv4 address, or IPv6
address surrounded by square brackets, or if the address has a scope ID that fails
validation against the interfaces on the machine (as required by Java's {@link
InetAddress})
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 617
|
[
"hostAddr",
"parseScope"
] |
InetAddress
| true
| 7
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
findAnnotation
|
private static <A extends Annotation> @Nullable A findAnnotation(@Nullable Object instance, Class<?> type,
@Nullable Method factory, Class<A> annotationType) {
MergedAnnotation<A> annotation = MergedAnnotation.missing();
if (factory != null) {
annotation = findMergedAnnotation(factory, annotationType);
}
if (!annotation.isPresent()) {
annotation = findMergedAnnotation(type, annotationType);
}
if (!annotation.isPresent() && AopUtils.isAopProxy(instance)) {
annotation = MergedAnnotations.from(AopUtils.getTargetClass(instance), SearchStrategy.TYPE_HIERARCHY)
.get(annotationType);
}
return annotation.isPresent() ? annotation.synthesize() : null;
}
|
Return a {@link ConfigurationPropertiesBean @ConfigurationPropertiesBean} instance
for the given bean details or {@code null} if the bean is not a
{@link ConfigurationProperties @ConfigurationProperties} object. Annotations are
considered both on the bean itself, as well as any factory method (for example a
{@link Bean @Bean} method).
@param applicationContext the source application context
@param bean the bean to consider
@param beanName the bean name
@return a configuration properties bean or {@code null} if the neither the bean nor
factory method are annotated with
{@link ConfigurationProperties @ConfigurationProperties}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/ConfigurationPropertiesBean.java
| 267
|
[
"instance",
"type",
"factory",
"annotationType"
] |
A
| true
| 6
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
codegen_additional_funcs
|
def codegen_additional_funcs(self) -> None:
"""
Generate thread-safe lazy singleton pattern for MPS shader libraries with RAII cleanup.
The generated code will look like:
```
AOTIMetalKernelFunctionHandle get_mps_lib_0_handle() {
static auto kernel_handle = []() {
AOTIMetalShaderLibraryHandle lib_handle = nullptr;
AOTIMetalKernelFunctionHandle kern_handle = nullptr;
aoti_torch_mps_create_shader_library(mps_lib_0_source, &lib_handle);
aoti_torch_mps_get_kernel_function(lib_handle, "generated_kernel", &kern_handle);
// RAII wrapper with custom deleter
auto lib_deleter = [](AOTIMetalShaderLibraryHandle h) {
if (h) aoti_torch_mps_delete_shader_library(h);
};
using LibDeleter = decltype(lib_deleter);
using LibPtr = std::unique_ptr<AOTIMetalShaderLibraryOpaque, LibDeleter>;
// Return pair of kernel handle and library smart pointer for cleanup
return std::make_pair(kern_handle, LibPtr(lib_handle, lib_deleter));
}();
return kernel_handle.first;
}
```
"""
# Add shimified handles and functions
shader_libraries: OrderedSet[str] = OrderedSet()
for line in self.lines:
if not isinstance(line, KernelCallLine):
continue
if line.device.type != "mps":
continue
# Extract library name from kernel name (e.g., "mps_lib_0" from kernel calls)
if line.kernel_name not in self._used_kernel_names:
self._used_kernel_names.add(line.kernel_name)
shader_libraries.add(line.kernel_name)
# NOTE: For shimified version, we expect the shader source constant to be generated
# by the existing MPS shader generation process, but instead of instantiating the
# DynamicMetalShaderLibrary directly, we'll use our shim functions.
# The existing codegen should produce something like:
# const char* mps_lib_0_source = R"MTL(...shader_source...)MTL";
# instead of:
# at::native::mps::DynamicMetalShaderLibrary mps_lib_0(R"MTL(...shader_source...)MTL");
# Generate thread-safe lazy singleton with RAII for each library
for lib_name in shader_libraries:
self.prefix.splice(f"""
AOTIMetalKernelFunctionHandle get_{lib_name}_handle() {{
static auto kernel_handle = []() {{
AOTIMetalShaderLibraryHandle lib_handle = nullptr;
AOTIMetalKernelFunctionHandle kern_handle = nullptr;
aoti_torch_mps_create_shader_library({lib_name}_source, &lib_handle);
aoti_torch_mps_get_kernel_function(lib_handle, "generated_kernel", &kern_handle);
// RAII wrapper with custom deleter
auto lib_deleter = [](AOTIMetalShaderLibraryHandle h) {{
if (h) aoti_torch_mps_delete_shader_library(h);
}};
using LibDeleter = decltype(lib_deleter);
using LibPtr = std::unique_ptr<AOTIMetalShaderLibraryOpaque, LibDeleter>;
// Return pair of kernel handle and library smart pointer for cleanup
return std::make_pair(kern_handle, LibPtr(lib_handle, lib_deleter));
}}();
return kernel_handle.first;
}}
""")
|
Generate thread-safe lazy singleton pattern for MPS shader libraries with RAII cleanup.
The generated code will look like:
```
AOTIMetalKernelFunctionHandle get_mps_lib_0_handle() {
static auto kernel_handle = []() {
AOTIMetalShaderLibraryHandle lib_handle = nullptr;
AOTIMetalKernelFunctionHandle kern_handle = nullptr;
aoti_torch_mps_create_shader_library(mps_lib_0_source, &lib_handle);
aoti_torch_mps_get_kernel_function(lib_handle, "generated_kernel", &kern_handle);
// RAII wrapper with custom deleter
auto lib_deleter = [](AOTIMetalShaderLibraryHandle h) {
if (h) aoti_torch_mps_delete_shader_library(h);
};
using LibDeleter = decltype(lib_deleter);
using LibPtr = std::unique_ptr<AOTIMetalShaderLibraryOpaque, LibDeleter>;
// Return pair of kernel handle and library smart pointer for cleanup
return std::make_pair(kern_handle, LibPtr(lib_handle, lib_deleter));
}();
return kernel_handle.first;
}
```
|
python
|
torch/_inductor/codegen/cpp_wrapper_mps.py
| 226
|
[
"self"
] |
None
| true
| 6
| 6.4
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
hessian
|
def hessian(func, argnums=0):
"""
Computes the Hessian of ``func`` with respect to the arg(s) at index
``argnum`` via a forward-over-reverse strategy.
The forward-over-reverse strategy (composing ``jacfwd(jacrev(func))``) is
a good default for good performance. It is possible to compute Hessians
through other compositions of :func:`jacfwd` and :func:`jacrev` like
``jacfwd(jacfwd(func))`` or ``jacrev(jacrev(func))``.
Args:
func (function): A Python function that takes one or more arguments,
one of which must be a Tensor, and returns one or more Tensors
argnums (int or tuple[int, ...]): Optional, integer or tuple of integers,
saying which arguments to get the Hessian with respect to.
Default: 0.
Returns:
Returns a function that takes in the same inputs as ``func`` and
returns the Hessian of ``func`` with respect to the arg(s) at
``argnums``.
.. note::
You may see this API error out with "forward-mode AD not implemented
for operator X". If so, please file a bug report and we will prioritize it.
An alternative is to use ``jacrev(jacrev(func))``, which has better
operator coverage.
A basic usage with a R^N -> R^1 function gives a N x N Hessian:
>>> from torch.func import hessian
>>> def f(x):
>>> return x.sin().sum()
>>>
>>> x = torch.randn(5)
>>> hess = hessian(f)(x) # equivalent to jacfwd(jacrev(f))(x)
>>> assert torch.allclose(hess, torch.diag(-x.sin()))
"""
return jacfwd(jacrev(func, argnums), argnums)
|
Computes the Hessian of ``func`` with respect to the arg(s) at index
``argnum`` via a forward-over-reverse strategy.
The forward-over-reverse strategy (composing ``jacfwd(jacrev(func))``) is
a good default for good performance. It is possible to compute Hessians
through other compositions of :func:`jacfwd` and :func:`jacrev` like
``jacfwd(jacfwd(func))`` or ``jacrev(jacrev(func))``.
Args:
func (function): A Python function that takes one or more arguments,
one of which must be a Tensor, and returns one or more Tensors
argnums (int or tuple[int, ...]): Optional, integer or tuple of integers,
saying which arguments to get the Hessian with respect to.
Default: 0.
Returns:
Returns a function that takes in the same inputs as ``func`` and
returns the Hessian of ``func`` with respect to the arg(s) at
``argnums``.
.. note::
You may see this API error out with "forward-mode AD not implemented
for operator X". If so, please file a bug report and we will prioritize it.
An alternative is to use ``jacrev(jacrev(func))``, which has better
operator coverage.
A basic usage with a R^N -> R^1 function gives a N x N Hessian:
>>> from torch.func import hessian
>>> def f(x):
>>> return x.sin().sum()
>>>
>>> x = torch.randn(5)
>>> hess = hessian(f)(x) # equivalent to jacfwd(jacrev(f))(x)
>>> assert torch.allclose(hess, torch.diag(-x.sin()))
|
python
|
torch/_functorch/eager_transforms.py
| 1,312
|
[
"func",
"argnums"
] | false
| 1
| 7.52
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
of
|
public static <R> InvocationResult<R> of(@Nullable R value) {
return new InvocationResult<>(value);
}
|
Create a new {@link InvocationResult} instance with the specified value.
@param value the value (may be {@code null})
@param <R> the result type
@return an {@link InvocationResult}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/util/LambdaSafe.java
| 432
|
[
"value"
] | true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
size
|
@Override
public int size() {
return totalSize;
}
|
Creates the collection of values for an explicitly provided key. By default, it simply calls
{@link #createCollection()}, which is the correct behavior for most implementations. The {@link
LinkedHashMultimap} class overrides it.
@param key key to associate with values in the collection
@return an empty collection of values
|
java
|
android/guava/src/com/google/common/collect/AbstractMapBasedMultimap.java
| 175
|
[] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
isCancelled
|
@Override
public boolean isCancelled() {
if (isDependant) {
// Having isCancelled() for a dependent future just return
// CompletableFuture.isCancelled() would break the historical KafkaFuture behaviour because
// CompletableFuture#isCancelled() just checks for the exception being CancellationException
// whereas it will be a CompletionException wrapping a CancellationException
// due needing to compensate for CompletableFuture's CompletionException unwrapping
// shenanigans in other methods.
try {
completableFuture.getNow(null);
return false;
} catch (Exception e) {
return e instanceof CompletionException
&& e.getCause() instanceof CancellationException;
}
} else {
return completableFuture.isCancelled();
}
}
|
Returns true if this CompletableFuture was cancelled before it completed normally.
|
java
|
clients/src/main/java/org/apache/kafka/common/internals/KafkaFutureImpl.java
| 212
|
[] | true
| 4
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
remove
|
@Deprecated
public static String remove(final String str, final String remove) {
return Strings.CS.remove(str, remove);
}
|
Removes all occurrences of a substring from within the source string.
<p>
A {@code null} source string will return {@code null}. An empty ("") source string will return the empty string. A {@code null} remove string will return
the source string. An empty ("") remove string will return the source string.
</p>
<pre>
StringUtils.remove(null, *) = null
StringUtils.remove("", *) = ""
StringUtils.remove(*, null) = *
StringUtils.remove(*, "") = *
StringUtils.remove("queued", "ue") = "qd"
StringUtils.remove("queued", "zz") = "queued"
</pre>
@param str the source String to search, may be null.
@param remove the String to search for and remove, may be null.
@return the substring with the string removed if found, {@code null} if null String input.
@since 2.1
@deprecated Use {@link Strings#remove(String, String) Strings.CS.remove(String, String)}.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 5,709
|
[
"str",
"remove"
] |
String
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
cat_safe
|
def cat_safe(list_of_columns: list[npt.NDArray[np.object_]], sep: str):
"""
Auxiliary function for :meth:`str.cat`.
Same signature as cat_core, but handles TypeErrors in concatenation, which
happen if the arrays in list_of columns have the wrong dtypes or content.
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns.
Returns
-------
nd.array
The concatenation of list_of_columns with sep.
"""
try:
result = cat_core(list_of_columns, sep)
except TypeError:
# if there are any non-string values (wrong dtype or hidden behind
# object dtype), np.sum will fail; catch and return with better message
for column in list_of_columns:
dtype = lib.infer_dtype(column, skipna=True)
if dtype not in ["string", "empty"]:
raise TypeError(
"Concatenation requires list-likes containing only "
"strings (or missing values). Offending values found in "
f"column {dtype}"
) from None
return result
|
Auxiliary function for :meth:`str.cat`.
Same signature as cat_core, but handles TypeErrors in concatenation, which
happen if the arrays in list_of columns have the wrong dtypes or content.
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns.
Returns
-------
nd.array
The concatenation of list_of_columns with sep.
|
python
|
pandas/core/strings/accessor.py
| 3,861
|
[
"list_of_columns",
"sep"
] | true
| 3
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
_parse_subtype
|
def _parse_subtype(dtype: str) -> tuple[str, bool]:
"""
Parse a string to get the subtype
Parameters
----------
dtype : str
A string like
* Sparse[subtype]
* Sparse[subtype, fill_value]
Returns
-------
subtype : str
Raises
------
ValueError
When the subtype cannot be extracted.
"""
xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
m = xpr.match(dtype)
has_fill_value = False
if m:
subtype = m.groupdict()["subtype"]
has_fill_value = bool(m.groupdict()["fill_value"])
elif dtype == "Sparse":
subtype = "float64"
else:
raise ValueError(f"Cannot parse {dtype}")
return subtype, has_fill_value
|
Parse a string to get the subtype
Parameters
----------
dtype : str
A string like
* Sparse[subtype]
* Sparse[subtype, fill_value]
Returns
-------
subtype : str
Raises
------
ValueError
When the subtype cannot be extracted.
|
python
|
pandas/core/dtypes/dtypes.py
| 1,979
|
[
"dtype"
] |
tuple[str, bool]
| true
| 4
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
remove
|
boolean remove(K key);
|
Manually invalidate a key, clearing its entry from the cache.
@param key the key to remove
@return true if the key existed in the cache and the entry was removed or false if it was not present
|
java
|
clients/src/main/java/org/apache/kafka/common/cache/Cache.java
| 44
|
[
"key"
] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
chop
|
public static String chop(final String str) {
if (str == null) {
return null;
}
final int strLen = str.length();
if (strLen < 2) {
return EMPTY;
}
final int lastIdx = strLen - 1;
final String ret = str.substring(0, lastIdx);
final char last = str.charAt(lastIdx);
if (last == CharUtils.LF && ret.charAt(lastIdx - 1) == CharUtils.CR) {
return ret.substring(0, lastIdx - 1);
}
return ret;
}
|
Removes the last character from a String.
<p>
If the String ends in {@code \r\n}, then remove both of them.
</p>
<pre>
StringUtils.chop(null) = null
StringUtils.chop("") = ""
StringUtils.chop("abc \r") = "abc "
StringUtils.chop("abc\n") = "abc"
StringUtils.chop("abc\r\n") = "abc"
StringUtils.chop("abc") = "ab"
StringUtils.chop("abc\nabc") = "abc\nab"
StringUtils.chop("a") = ""
StringUtils.chop("\r") = ""
StringUtils.chop("\n") = ""
StringUtils.chop("\r\n") = ""
</pre>
@param str the String to chop last character from, may be null.
@return String without last character, {@code null} if null String input.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 752
|
[
"str"
] |
String
| true
| 5
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_reductions
|
def _reductions(
func: Callable,
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
min_count: int = 0,
axis: AxisInt | None = None,
**kwargs,
):
"""
Sum, mean or product for 1D masked array.
Parameters
----------
func : np.sum or np.prod
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation).
mask : np.ndarray[bool]
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
axis : int, optional, default None
"""
if not skipna:
if mask.any() or check_below_min_count(values.shape, None, min_count):
return libmissing.NA
else:
return func(values, axis=axis, **kwargs)
else:
if check_below_min_count(values.shape, mask, min_count) and (
axis is None or values.ndim == 1
):
return libmissing.NA
if values.dtype == np.dtype(object):
# object dtype does not support `where` without passing an initial
values = values[~mask]
return func(values, axis=axis, **kwargs)
return func(values, where=~mask, axis=axis, **kwargs)
|
Sum, mean or product for 1D masked array.
Parameters
----------
func : np.sum or np.prod
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation).
mask : np.ndarray[bool]
Boolean numpy array (True values indicate missing values).
skipna : bool, default True
Whether to skip NA.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
axis : int, optional, default None
|
python
|
pandas/core/array_algos/masked_reductions.py
| 26
|
[
"func",
"values",
"mask",
"skipna",
"min_count",
"axis"
] | true
| 10
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
buildEnabled
|
private static void buildEnabled(StringBuilder sb, Object[] elements) {
boolean writingAnsi = false;
boolean containsEncoding = false;
for (Object element : elements) {
if (element instanceof AnsiElement) {
containsEncoding = true;
if (!writingAnsi) {
sb.append(ENCODE_START);
writingAnsi = true;
}
else {
sb.append(ENCODE_JOIN);
}
}
else {
if (writingAnsi) {
sb.append(ENCODE_END);
writingAnsi = false;
}
}
sb.append(element);
}
if (containsEncoding) {
sb.append(writingAnsi ? ENCODE_JOIN : ENCODE_START);
sb.append(RESET);
sb.append(ENCODE_END);
}
}
|
Create a new ANSI string from the specified elements. Any {@link AnsiElement}s will
be encoded as required.
@param elements the elements to encode
@return a string of the encoded elements
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ansi/AnsiOutput.java
| 109
|
[
"sb",
"elements"
] |
void
| true
| 6
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
isNative
|
function isNative(value) {
if (isMaskable(value)) {
throw new Error(CORE_ERROR_TEXT);
}
return baseIsNative(value);
}
|
Checks if `value` is a pristine native function.
**Note:** This method can't reliably detect native functions in the presence
of the core-js package because core-js circumvents this kind of detection.
Despite multiple requests, the core-js maintainer has made it clear: any
attempt to fix the detection will be obstructed. As a result, we're left
with little choice but to throw an error. Unfortunately, this also affects
packages, like [babel-polyfill](https://www.npmjs.com/package/babel-polyfill),
which rely on core-js.
@static
@memberOf _
@since 3.0.0
@category Lang
@param {*} value The value to check.
@returns {boolean} Returns `true` if `value` is a native function,
else `false`.
@example
_.isNative(Array.prototype.push);
// => true
_.isNative(_);
// => false
|
javascript
|
lodash.js
| 12,032
|
[
"value"
] | false
| 2
| 7.2
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
identity
|
def identity(cls, domain=None, window=None, symbol='x'):
"""Identity function.
If ``p`` is the returned series, then ``p(x) == x`` for all
values of x.
Parameters
----------
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
symbol : str, optional
Symbol representing the independent variable. Default is 'x'.
Returns
-------
new_series : series
Series of representing the identity.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
off, scl = pu.mapparms(window, domain)
coef = cls._line(off, scl)
return cls(coef, domain, window, symbol)
|
Identity function.
If ``p`` is the returned series, then ``p(x) == x`` for all
values of x.
Parameters
----------
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
symbol : str, optional
Symbol representing the independent variable. Default is 'x'.
Returns
-------
new_series : series
Series of representing the identity.
|
python
|
numpy/polynomial/_polybase.py
| 1,080
|
[
"cls",
"domain",
"window",
"symbol"
] | false
| 3
| 6.08
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
clone
|
public static <T> T[] clone(final T[] array) {
return array != null ? array.clone() : null;
}
|
Shallow clones an array or returns {@code null}.
<p>
The objects in the array are not cloned, thus there is no special handling for multi-dimensional arrays.
</p>
<p>
This method returns {@code null} for a {@code null} input array.
</p>
@param <T> the component type of the array.
@param array the array to shallow clone, may be {@code null}.
@return the cloned array, {@code null} if {@code null} input.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 1,561
|
[
"array"
] | true
| 2
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
do_teardown_appcontext
|
def do_teardown_appcontext(
self, ctx: AppContext, exc: BaseException | None = None
) -> None:
"""Called right before the application context is popped. Called by
:meth:`.AppContext.pop`.
This calls all functions decorated with :meth:`teardown_appcontext`.
Then the :data:`appcontext_tearing_down` signal is sent.
:param exc: An unhandled exception raised while the context was active.
Passed to each teardown function.
.. versionadded:: 0.9
"""
for func in reversed(self.teardown_appcontext_funcs):
self.ensure_sync(func)(exc)
appcontext_tearing_down.send(self, _async_wrapper=self.ensure_sync, exc=exc)
|
Called right before the application context is popped. Called by
:meth:`.AppContext.pop`.
This calls all functions decorated with :meth:`teardown_appcontext`.
Then the :data:`appcontext_tearing_down` signal is sent.
:param exc: An unhandled exception raised while the context was active.
Passed to each teardown function.
.. versionadded:: 0.9
|
python
|
src/flask/app.py
| 1,432
|
[
"self",
"ctx",
"exc"
] |
None
| true
| 2
| 6.4
|
pallets/flask
| 70,946
|
sphinx
| false
|
_color_brew
|
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360.0 / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.0
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [
(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0),
]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))), (int(255 * (g + m))), (int(255 * (b + m)))]
color_list.append(rgb)
return color_list
|
Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
|
python
|
sklearn/tree/_export.py
| 31
|
[
"n"
] | false
| 2
| 6.24
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_unpack_zerodim_and_defer
|
def _unpack_zerodim_and_defer(method: F, name: str) -> F:
"""
Boilerplate for pandas conventions in arithmetic and comparison methods.
Ensure method returns NotImplemented when operating against "senior"
classes. Ensure zero-dimensional ndarrays are always unpacked.
Parameters
----------
method : binary method
name : str
Returns
-------
method
"""
is_logical = name.strip("_") in ["or", "xor", "and", "ror", "rxor", "rand"]
@wraps(method)
def new_method(self, other):
prio = getattr(other, "__pandas_priority__", None)
if prio is not None:
if prio > self.__pandas_priority__:
# e.g. other is DataFrame while self is Index/Series/EA
return NotImplemented
other = item_from_zerodim(other)
if (
isinstance(self, ABCExtensionArray)
and isinstance(other, list)
and not is_logical
):
# See GH#62423
other = sanitize_array(other, None)
other = ensure_wrapped_if_datetimelike(other)
return method(self, other)
# error: Incompatible return value type (got "Callable[[Any, Any], Any]",
# expected "F")
return new_method # type: ignore[return-value]
|
Boilerplate for pandas conventions in arithmetic and comparison methods.
Ensure method returns NotImplemented when operating against "senior"
classes. Ensure zero-dimensional ndarrays are always unpacked.
Parameters
----------
method : binary method
name : str
Returns
-------
method
|
python
|
pandas/core/ops/common.py
| 49
|
[
"method",
"name"
] |
F
| true
| 6
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
optBoolean
|
public boolean optBoolean(int index, boolean fallback) {
Object object = opt(index);
Boolean result = JSON.toBoolean(object);
return result != null ? result : fallback;
}
|
Returns the value at {@code index} if it exists and is a boolean or can be coerced
to a boolean. Returns {@code fallback} otherwise.
@param index the index to get the value from
@param fallback the fallback value
@return the value at {@code index} of {@code fallback}
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 352
|
[
"index",
"fallback"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
safe_mask
|
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask : array-like
Mask to be used on X.
Returns
-------
mask : ndarray
Array that is safe to use on X.
Examples
--------
>>> from sklearn.utils import safe_mask
>>> from scipy.sparse import csr_matrix
>>> data = csr_matrix([[1], [2], [3], [4], [5]])
>>> condition = [False, True, True, False, True]
>>> mask = safe_mask(data, condition)
>>> data[mask].toarray()
array([[2],
[3],
[5]])
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.signedinteger):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
|
Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask : array-like
Mask to be used on X.
Returns
-------
mask : ndarray
Array that is safe to use on X.
Examples
--------
>>> from sklearn.utils import safe_mask
>>> from scipy.sparse import csr_matrix
>>> data = csr_matrix([[1], [2], [3], [4], [5]])
>>> condition = [False, True, True, False, True]
>>> mask = safe_mask(data, condition)
>>> data[mask].toarray()
array([[2],
[3],
[5]])
|
python
|
sklearn/utils/_mask.py
| 77
|
[
"X",
"mask"
] | false
| 3
| 7.68
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
initialize
|
def initialize():
"""Initialize Airflow with all the settings from this file."""
configure_vars()
prepare_syspath_for_config_and_plugins()
policy_mgr = get_policy_plugin_manager()
# Load policy plugins _before_ importing airflow_local_settings, as Pluggy uses LIFO and we want anything
# in airflow_local_settings to take precendec
load_policy_plugins(policy_mgr)
import_local_settings()
configure_logging()
configure_adapters()
# The webservers import this file from models.py with the default settings.
# Configure secrets masker before masking secrets
_configure_secrets_masker()
is_worker = os.environ.get("_AIRFLOW__REEXECUTED_PROCESS") == "1"
if not os.environ.get("PYTHON_OPERATORS_VIRTUAL_ENV_MODE", None) and not is_worker:
configure_orm()
# mask the sensitive_config_values
conf.mask_secrets()
configure_action_logging()
# Run any custom runtime checks that needs to be executed for providers
run_providers_custom_runtime_checks()
# Ensure we close DB connections at scheduler and gunicorn worker terminations
atexit.register(dispose_orm)
|
Initialize Airflow with all the settings from this file.
|
python
|
airflow-core/src/airflow/settings.py
| 723
|
[] | false
| 3
| 6.4
|
apache/airflow
| 43,597
|
unknown
| false
|
|
appendLeaderChangeMessage
|
public void appendLeaderChangeMessage(long timestamp, LeaderChangeMessage leaderChangeMessage) {
if (partitionLeaderEpoch == RecordBatch.NO_PARTITION_LEADER_EPOCH) {
throw new IllegalArgumentException("Partition leader epoch must be valid, but get " + partitionLeaderEpoch);
}
appendControlRecord(
timestamp,
ControlRecordType.LEADER_CHANGE,
MessageUtil.toByteBufferAccessor(leaderChangeMessage, ControlRecordUtils.LEADER_CHANGE_CURRENT_VERSION).buffer()
);
}
|
Append a control record at the next sequential offset.
@param timestamp The record timestamp
@param type The control record type (cannot be UNKNOWN)
@param value The control record value
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
| 630
|
[
"timestamp",
"leaderChangeMessage"
] |
void
| true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
maintenance
|
private void maintenance() {
if (length == elements.length) {
dedupAndCoalesce(true);
} else if (forceCopyElements) {
this.elements = Arrays.copyOf(elements, elements.length);
// we don't currently need to copy the counts array, because we don't use it directly
// in built ISMs
}
forceCopyElements = false;
}
|
Check if we need to do deduplication and coalescing, and if so, do it.
|
java
|
android/guava/src/com/google/common/collect/ImmutableSortedMultiset.java
| 508
|
[] |
void
| true
| 3
| 7.2
|
google/guava
| 51,352
|
javadoc
| false
|
predict
|
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes, or the predict values.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if is_classifier(self):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
class_type = self.classes_[0].dtype
predictions = np.zeros((n_samples, self.n_outputs_), dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1), axis=0
)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
|
Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes, or the predict values.
|
python
|
sklearn/tree/_classes.py
| 509
|
[
"self",
"X",
"check_input"
] | false
| 8
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
buffer
|
public ByteBuffer buffer() {
return this.buffer;
}
|
Get the underlying buffer backing this record instance.
@return the buffer
|
java
|
clients/src/main/java/org/apache/kafka/common/record/LegacyRecord.java
| 270
|
[] |
ByteBuffer
| true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
correlate
|
def correlate(a, v, mode='valid', propagate_mask=True):
"""
Cross-correlation of two 1-dimensional sequences.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `np.convolve` docstring. Note that the default
is 'valid', unlike `convolve`, which uses 'full'.
propagate_mask : bool
If True, then a result element is masked if any masked element contributes
towards it. If False, then a result element is only masked if no non-masked
element contribute towards it
Returns
-------
out : MaskedArray
Discrete cross-correlation of `a` and `v`.
See Also
--------
numpy.correlate : Equivalent function in the top-level NumPy module.
Examples
--------
Basic correlation:
>>> a = np.ma.array([1, 2, 3])
>>> v = np.ma.array([0, 1, 0])
>>> np.ma.correlate(a, v, mode='valid')
masked_array(data=[2],
mask=[False],
fill_value=999999)
Correlation with masked elements:
>>> a = np.ma.array([1, 2, 3], mask=[False, True, False])
>>> v = np.ma.array([0, 1, 0])
>>> np.ma.correlate(a, v, mode='valid', propagate_mask=True)
masked_array(data=[--],
mask=[ True],
fill_value=999999,
dtype=int64)
Correlation with different modes and mixed array types:
>>> a = np.ma.array([1, 2, 3])
>>> v = np.ma.array([0, 1, 0])
>>> np.ma.correlate(a, v, mode='full')
masked_array(data=[0, 1, 2, 3, 0],
mask=[False, False, False, False, False],
fill_value=999999)
"""
return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask)
|
Cross-correlation of two 1-dimensional sequences.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `np.convolve` docstring. Note that the default
is 'valid', unlike `convolve`, which uses 'full'.
propagate_mask : bool
If True, then a result element is masked if any masked element contributes
towards it. If False, then a result element is only masked if no non-masked
element contribute towards it
Returns
-------
out : MaskedArray
Discrete cross-correlation of `a` and `v`.
See Also
--------
numpy.correlate : Equivalent function in the top-level NumPy module.
Examples
--------
Basic correlation:
>>> a = np.ma.array([1, 2, 3])
>>> v = np.ma.array([0, 1, 0])
>>> np.ma.correlate(a, v, mode='valid')
masked_array(data=[2],
mask=[False],
fill_value=999999)
Correlation with masked elements:
>>> a = np.ma.array([1, 2, 3], mask=[False, True, False])
>>> v = np.ma.array([0, 1, 0])
>>> np.ma.correlate(a, v, mode='valid', propagate_mask=True)
masked_array(data=[--],
mask=[ True],
fill_value=999999,
dtype=int64)
Correlation with different modes and mixed array types:
>>> a = np.ma.array([1, 2, 3])
>>> v = np.ma.array([0, 1, 0])
>>> np.ma.correlate(a, v, mode='full')
masked_array(data=[0, 1, 2, 3, 0],
mask=[False, False, False, False, False],
fill_value=999999)
|
python
|
numpy/ma/core.py
| 8,300
|
[
"a",
"v",
"mode",
"propagate_mask"
] | false
| 1
| 6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
create_asset
|
def create_asset(
self,
*,
scheme: str | None = None,
uri: str | None = None,
name: str | None = None,
group: str | None = None,
asset_kwargs: dict | None = None,
asset_extra: dict[str, JsonValue] | None = None,
) -> Asset | None:
"""
Create an asset instance using the provided parameters.
This method attempts to create an asset instance using the given parameters.
It first checks if a URI or a name is provided and falls back to using the default asset factory
with the given URI or name if no other information is available.
If a scheme is provided but no URI or name, it attempts to find an asset factory that matches
the given scheme. If no such factory is found, it logs an error message and returns None.
If asset_kwargs is provided, it is used to pass additional parameters to the asset
factory. The asset_extra parameter is also passed to the factory as an ``extra`` parameter.
"""
if uri or name:
# Fallback to default factory using the provided URI
kwargs: dict[str, str | dict] = {}
if uri:
kwargs["uri"] = uri
if name:
kwargs["name"] = name
if group:
kwargs["group"] = group
if asset_extra:
kwargs["extra"] = asset_extra
return Asset(**kwargs) # type: ignore[call-overload]
if not scheme:
self.log.debug(
"Missing required parameter: either 'uri' or 'scheme' must be provided to create an asset."
)
return None
asset_factory = self._asset_factories.get(scheme)
if not asset_factory:
self.log.debug("Unsupported scheme: %s. Please provide a valid URI to create an asset.", scheme)
return None
asset_kwargs = asset_kwargs or {}
try:
return asset_factory(**asset_kwargs, extra=asset_extra)
except Exception as e:
self.log.debug("Failed to create asset. Skipping. Error: %s", e)
return None
|
Create an asset instance using the provided parameters.
This method attempts to create an asset instance using the given parameters.
It first checks if a URI or a name is provided and falls back to using the default asset factory
with the given URI or name if no other information is available.
If a scheme is provided but no URI or name, it attempts to find an asset factory that matches
the given scheme. If no such factory is found, it logs an error message and returns None.
If asset_kwargs is provided, it is used to pass additional parameters to the asset
factory. The asset_extra parameter is also passed to the factory as an ``extra`` parameter.
|
python
|
airflow-core/src/airflow/lineage/hook.py
| 148
|
[
"self",
"scheme",
"uri",
"name",
"group",
"asset_kwargs",
"asset_extra"
] |
Asset | None
| true
| 10
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
resolveFactoryMethodIfPossible
|
public void resolveFactoryMethodIfPossible(RootBeanDefinition mbd) {
Class<?> factoryClass;
boolean isStatic;
if (mbd.getFactoryBeanName() != null) {
factoryClass = this.beanFactory.getType(mbd.getFactoryBeanName());
isStatic = false;
}
else {
factoryClass = mbd.getBeanClass();
isStatic = true;
}
Assert.state(factoryClass != null, "Unresolvable factory class");
factoryClass = ClassUtils.getUserClass(factoryClass);
Method[] candidates = getCandidateMethods(factoryClass, mbd);
Method uniqueCandidate = null;
for (Method candidate : candidates) {
if ((!isStatic || isStaticCandidate(candidate, factoryClass)) && mbd.isFactoryMethod(candidate)) {
if (uniqueCandidate == null) {
uniqueCandidate = candidate;
}
else if (isParamMismatch(uniqueCandidate, candidate)) {
uniqueCandidate = null;
break;
}
}
}
mbd.factoryMethodToIntrospect = uniqueCandidate;
}
|
Resolve the factory method in the specified bean definition, if possible.
{@link RootBeanDefinition#getResolvedFactoryMethod()} can be checked for the result.
@param mbd the bean definition to check
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/ConstructorResolver.java
| 330
|
[
"mbd"
] |
void
| true
| 7
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
isParsableDecimal
|
private static boolean isParsableDecimal(final String str, final int beginIdx) {
// See https://docs.oracle.com/javase/specs/jls/se8/html/jls-3.html#jls-NonZeroDigit
int decimalPoints = 0;
boolean asciiNumeric = true;
for (int i = beginIdx; i < str.length(); i++) {
final char ch = str.charAt(i);
final boolean isDecimalPoint = ch == '.';
if (isDecimalPoint) {
decimalPoints++;
}
if (decimalPoints > 1 || !isDecimalPoint && !Character.isDigit(ch)) {
return false;
}
if (!isDecimalPoint) {
asciiNumeric &= CharUtils.isAsciiNumeric(ch);
}
if (decimalPoints > 0 && !asciiNumeric) {
return false;
}
}
return true;
}
|
Tests whether a number string is parsable as a decimal number or integer.
<ul>
<li>At most one decimal point is allowed.</li>
<li>No signs, exponents or type qualifiers are allowed.</li>
<li>Only ASCII digits are allowed if a decimal point is present.</li>
</ul>
@param str the String to test.
@param beginIdx the index to start checking from.
@return {@code true} if the string is a parsable number.
|
java
|
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
| 752
|
[
"str",
"beginIdx"
] | true
| 9
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
microsFrac
|
public double microsFrac() {
return ((double) nanos()) / C1;
}
|
@return the number of {@link #timeUnit()} units this value contains
|
java
|
libs/core/src/main/java/org/elasticsearch/core/TimeValue.java
| 170
|
[] | true
| 1
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
recordIfFinished
|
function recordIfFinished() {
if (state.keydown === EventPhase.Finished && state.input === EventPhase.Finished && state.render === EventPhase.Finished) {
performance.mark('inputlatency/end');
performance.measure('keydown', 'keydown/start', 'keydown/end');
performance.measure('input', 'input/start', 'input/end');
performance.measure('render', 'render/start', 'render/end');
performance.measure('inputlatency', 'inputlatency/start', 'inputlatency/end');
addMeasure('keydown', totalKeydownTime);
addMeasure('input', totalInputTime);
addMeasure('render', totalRenderTime);
addMeasure('inputlatency', totalInputLatencyTime);
// console.info(
// `input latency=${performance.getEntriesByName('inputlatency')[0].duration.toFixed(1)} [` +
// `keydown=${performance.getEntriesByName('keydown')[0].duration.toFixed(1)}, ` +
// `input=${performance.getEntriesByName('input')[0].duration.toFixed(1)}, ` +
// `render=${performance.getEntriesByName('render')[0].duration.toFixed(1)}` +
// `]`
// );
measurementsCount++;
reset();
}
}
|
Record the input latency sample if input handling and rendering are finished.
The challenge here is that we want to record the latency in such a way that it includes
also the layout and painting work the browser does during the animation frame task.
Simply scheduling a new task (via `setTimeout`) from the animation frame task would
schedule the new task at the end of the task queue (after other code that uses `setTimeout`),
so we need to use multiple strategies to make sure our task runs before others:
We schedule tasks (A and B):
- we schedule a task A (via a `setTimeout` call) when the input starts in `markInputStart`.
If the animation frame task is scheduled quickly by the browser, then task A has a very good
chance of being the very first task after the animation frame and thus will record the input latency.
- however, if the animation frame task is scheduled a bit later, then task A might execute
before the animation frame task. We therefore schedule another task B from `markRenderStart`.
We do direct checks in browser event handlers (C, D, E):
- if the browser has multiple keydown events queued up, they will be scheduled before the `setTimeout` tasks,
so we do a direct check in the keydown event handler (C).
- depending on timing, sometimes the animation frame is scheduled even before the `keyup` event, so we
do a direct check there too (E).
- the browser oftentimes emits a `selectionchange` event after an `input`, so we do a direct check there (D).
|
typescript
|
src/vs/base/browser/performance.ts
| 158
|
[] | false
| 4
| 6.08
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
apply
|
@Override
public Value apply(XContentParser parser, Context context) {
try {
return parse(parser, context);
} catch (IOException e) {
throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] failed to parse object", e);
}
}
|
Parses a Value from the given {@link XContentParser}
@param parser the parser to build a value from
@param value the value to fill from the parser
@param context a context that is passed along to all declared field parsers
@return the parsed value
@throws IOException if an IOException occurs.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java
| 383
|
[
"parser",
"context"
] |
Value
| true
| 2
| 7.6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
optimalNumOfBits
|
@VisibleForTesting
static long optimalNumOfBits(long n, double p) {
if (p == 0) {
p = Double.MIN_VALUE;
}
return (long) (-n * Math.log(p) / SQUARED_LOG_TWO);
}
|
Computes m (total bits of Bloom filter) which is expected to achieve, for the specified
expected insertions, the required false positive probability.
<p>See http://en.wikipedia.org/wiki/Bloom_filter#Probability_of_false_positives for the
formula.
@param n expected insertions (must be positive)
@param p false positive rate (must be 0 < p < 1)
|
java
|
android/guava/src/com/google/common/hash/BloomFilter.java
| 538
|
[
"n",
"p"
] | true
| 2
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
reset
|
public StrTokenizer reset() {
tokenPos = 0;
tokens = null;
return this;
}
|
Resets this tokenizer, forgetting all parsing and iteration already completed.
<p>
This method allows the same tokenizer to be reused for the same String.
</p>
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrTokenizer.java
| 856
|
[] |
StrTokenizer
| true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
requestOffsetReset
|
public synchronized void requestOffsetReset(TopicPartition partition, AutoOffsetResetStrategy offsetResetStrategy) {
assignedState(partition).reset(offsetResetStrategy);
}
|
Unset the preferred read replica. This causes the fetcher to go back to the leader for fetches.
@param tp The topic partition
@return the removed preferred read replica if set, Empty otherwise.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 785
|
[
"partition",
"offsetResetStrategy"
] |
void
| true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
describe_instances
|
def describe_instances(self, filters: list | None = None, instance_ids: list | None = None):
"""
Describe EC2 instances, optionally applying filters and selective instance ids.
:param filters: List of filters to specify instances to describe
:param instance_ids: List of instance IDs to describe
:return: Response from EC2 describe_instances API
"""
filters = filters or []
instance_ids = instance_ids or []
self.log.info("Filters provided: %s", filters)
self.log.info("Instance ids provided: %s", instance_ids)
return self.conn.describe_instances(Filters=filters, InstanceIds=instance_ids)
|
Describe EC2 instances, optionally applying filters and selective instance ids.
:param filters: List of filters to specify instances to describe
:param instance_ids: List of instance IDs to describe
:return: Response from EC2 describe_instances API
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/ec2.py
| 134
|
[
"self",
"filters",
"instance_ids"
] | true
| 3
| 7.76
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
withChildren
|
ConfigDataEnvironmentContributor withChildren(ImportPhase importPhase,
List<ConfigDataEnvironmentContributor> children) {
Map<ImportPhase, List<ConfigDataEnvironmentContributor>> updatedChildren = new LinkedHashMap<>(this.children);
updatedChildren.put(importPhase, children);
if (importPhase == ImportPhase.AFTER_PROFILE_ACTIVATION) {
moveProfileSpecific(updatedChildren);
}
return new ConfigDataEnvironmentContributor(this.kind, this.location, this.resource,
this.fromProfileSpecificImport, this.propertySource, this.configurationPropertySource, this.properties,
this.configDataOptions, updatedChildren, this.conversionService);
}
|
Create a new {@link ConfigDataEnvironmentContributor} instance with a new set of
children for the given phase.
@param importPhase the import phase
@param children the new children
@return a new contributor instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironmentContributor.java
| 271
|
[
"importPhase",
"children"
] |
ConfigDataEnvironmentContributor
| true
| 2
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
available_if
|
def available_if(check):
"""An attribute that is available only if check returns a truthy value.
Parameters
----------
check : callable
When passed the object with the decorated method, this should return
a truthy value if the attribute is available, and either return False
or raise an AttributeError if not available.
Returns
-------
callable
Callable makes the decorated method available if `check` returns
a truthy value, otherwise the decorated method is unavailable.
Examples
--------
>>> from sklearn.utils.metaestimators import available_if
>>> class HelloIfEven:
... def __init__(self, x):
... self.x = x
...
... def _x_is_even(self):
... return self.x % 2 == 0
...
... @available_if(_x_is_even)
... def say_hello(self):
... print("Hello")
...
>>> obj = HelloIfEven(1)
>>> hasattr(obj, "say_hello")
False
>>> obj.x = 2
>>> hasattr(obj, "say_hello")
True
>>> obj.say_hello()
Hello
"""
return lambda fn: _AvailableIfDescriptor(fn, check, attribute_name=fn.__name__)
|
An attribute that is available only if check returns a truthy value.
Parameters
----------
check : callable
When passed the object with the decorated method, this should return
a truthy value if the attribute is available, and either return False
or raise an AttributeError if not available.
Returns
-------
callable
Callable makes the decorated method available if `check` returns
a truthy value, otherwise the decorated method is unavailable.
Examples
--------
>>> from sklearn.utils.metaestimators import available_if
>>> class HelloIfEven:
... def __init__(self, x):
... self.x = x
...
... def _x_is_even(self):
... return self.x % 2 == 0
...
... @available_if(_x_is_even)
... def say_hello(self):
... print("Hello")
...
>>> obj = HelloIfEven(1)
>>> hasattr(obj, "say_hello")
False
>>> obj.x = 2
>>> hasattr(obj, "say_hello")
True
>>> obj.say_hello()
Hello
|
python
|
sklearn/utils/_available_if.py
| 57
|
[
"check"
] | false
| 1
| 6.16
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
refreshOffsets
|
private void refreshOffsets(final Map<TopicPartition, OffsetAndMetadata> offsets,
final Throwable error,
final CompletableFuture<Void> result) {
if (error == null) {
// Ensure we only set positions for the partitions that still require one (ex. some partitions may have
// been assigned a position manually)
Map<TopicPartition, OffsetAndMetadata> offsetsToApply = offsetsForInitializingPartitions(offsets);
refreshCommittedOffsets(offsetsToApply, metadata, subscriptionState);
result.complete(null);
} else {
log.error("Error fetching committed offsets to update positions", error);
result.completeExceptionally(error);
}
}
|
Use the given committed offsets to update positions for partitions that still require it.
@param offsets Committed offsets to use to update positions for initializing partitions.
@param error Error received in response to the OffsetFetch request. Will be null if the request was successful.
@param result Future to complete once all positions have been updated with the given committed offsets
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java
| 410
|
[
"offsets",
"error",
"result"
] |
void
| true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
changeColor
|
function changeColor(colorType: 'foreground' | 'background' | 'underline', color?: RGBA | string | undefined): void {
if (colorType === 'foreground') {
customFgColor = color;
} else if (colorType === 'background') {
customBgColor = color;
} else if (colorType === 'underline') {
customUnderlineColor = color;
}
styleNames = styleNames.filter(style => style !== `code-${colorType}-colored`);
if (color !== undefined) {
styleNames.push(`code-${colorType}-colored`);
}
}
|
Change the foreground or background color by clearing the current color
and adding the new one.
@param colorType If `'foreground'`, will change the foreground color, if
`'background'`, will change the background color, and if `'underline'`
will set the underline color.
@param color Color to change to. If `undefined` or not provided,
will clear current color without adding a new one.
|
typescript
|
extensions/notebook-renderers/src/ansi.ts
| 114
|
[
"colorType",
"color?"
] | true
| 7
| 6.88
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
check_key_length
|
def check_key_length(columns: Index, key, value: DataFrame) -> None:
"""
Checks if a key used as indexer has the same length as the columns it is
associated with.
Parameters
----------
columns : Index The columns of the DataFrame to index.
key : A list-like of keys to index with.
value : DataFrame The value to set for the keys.
Raises
------
ValueError: If the length of key is not equal to the number of columns in value
or if the number of columns referenced by key is not equal to number
of columns.
"""
if columns.is_unique:
if len(value.columns) != len(key):
raise ValueError("Columns must be same length as key")
else:
# Missing keys in columns are represented as -1
if len(columns.get_indexer_non_unique(key)[0]) != len(value.columns):
raise ValueError("Columns must be same length as key")
|
Checks if a key used as indexer has the same length as the columns it is
associated with.
Parameters
----------
columns : Index The columns of the DataFrame to index.
key : A list-like of keys to index with.
value : DataFrame The value to set for the keys.
Raises
------
ValueError: If the length of key is not equal to the number of columns in value
or if the number of columns referenced by key is not equal to number
of columns.
|
python
|
pandas/core/indexers/utils.py
| 373
|
[
"columns",
"key",
"value"
] |
None
| true
| 5
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
addAndGet
|
public byte addAndGet(final byte operand) {
this.value += operand;
return value;
}
|
Increments this instance's value by {@code operand}; this method returns the value associated with the instance
immediately after the addition operation. This method is not thread safe.
@param operand the quantity to add, not null.
@return the value associated with this instance after adding the operand.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableByte.java
| 111
|
[
"operand"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
refreshTimeout
|
function refreshTimeout () {
// If the fastNowTimeout is already set and the Timer has the refresh()-
// method available, call it to refresh the timer.
// Some timer objects returned by setTimeout may not have a .refresh()
// method (e.g. mocked timers in tests).
if (fastNowTimeout?.refresh) {
fastNowTimeout.refresh()
// fastNowTimeout is not instantiated yet or refresh is not availabe,
// create a new Timer.
} else {
clearTimeout(fastNowTimeout)
fastNowTimeout = setTimeout(onTick, TICK_MS)
// If the Timer has an unref method, call it to allow the process to exit,
// if there are no other active handles. When using fake timers or mocked
// environments (like Jest), .unref() may not be defined,
fastNowTimeout?.unref()
}
}
|
The onTick function processes the fastTimers array.
@returns {void}
|
javascript
|
deps/undici/src/lib/util/timers.js
| 190
|
[] | false
| 3
| 6.8
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
getSession
|
public synchronized Session getSession() {
if (this.session == null) {
this.session = Session.getInstance(this.javaMailProperties);
}
return this.session;
}
|
Return the JavaMail {@code Session},
lazily initializing it if it hasn't been specified explicitly.
|
java
|
spring-context-support/src/main/java/org/springframework/mail/javamail/JavaMailSenderImpl.java
| 153
|
[] |
Session
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getOriginalBeanName
|
public static String getOriginalBeanName(@Nullable String targetBeanName) {
Assert.isTrue(isScopedTarget(targetBeanName), () -> "bean name '" +
targetBeanName + "' does not refer to the target of a scoped proxy");
return targetBeanName.substring(TARGET_NAME_PREFIX_LENGTH);
}
|
Get the original bean name for the provided {@linkplain #getTargetBeanName
target bean name}.
@param targetBeanName the target bean name for the scoped proxy
@return the original bean name
@throws IllegalArgumentException if the supplied bean name does not refer
to the target of a scoped proxy
@since 5.1.10
@see #getTargetBeanName(String)
@see #isScopedTarget(String)
|
java
|
spring-aop/src/main/java/org/springframework/aop/scope/ScopedProxyUtils.java
| 128
|
[
"targetBeanName"
] |
String
| true
| 1
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
opj_uint_subs
|
static INLINE OPJ_UINT32 opj_uint_subs(OPJ_UINT32 a, OPJ_UINT32 b)
{
return (a >= b) ? a - b : 0;
}
|
Get the saturated difference of two unsigned integers
@return Returns saturated sum of a-b
|
cpp
|
3rdparty/openjpeg/openjp2/opj_intmath.h
| 102
|
[
"a",
"b"
] | true
| 2
| 6.16
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
iterable
|
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : bool
Return ``True`` if the object has an iterator method or is a
sequence and ``False`` otherwise.
Examples
--------
>>> import numpy as np
>>> np.iterable([1, 2, 3])
True
>>> np.iterable(2)
False
Notes
-----
In most cases, the results of ``np.iterable(obj)`` are consistent with
``isinstance(obj, collections.abc.Iterable)``. One notable exception is
the treatment of 0-dimensional arrays::
>>> from collections.abc import Iterable
>>> a = np.array(1.0) # 0-dimensional numpy array
>>> isinstance(a, Iterable)
True
>>> np.iterable(a)
False
"""
try:
iter(y)
except TypeError:
return False
return True
|
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : bool
Return ``True`` if the object has an iterator method or is a
sequence and ``False`` otherwise.
Examples
--------
>>> import numpy as np
>>> np.iterable([1, 2, 3])
True
>>> np.iterable(2)
False
Notes
-----
In most cases, the results of ``np.iterable(obj)`` are consistent with
``isinstance(obj, collections.abc.Iterable)``. One notable exception is
the treatment of 0-dimensional arrays::
>>> from collections.abc import Iterable
>>> a = np.array(1.0) # 0-dimensional numpy array
>>> isinstance(a, Iterable)
True
>>> np.iterable(a)
False
|
python
|
numpy/lib/_function_base_impl.py
| 364
|
[
"y"
] | false
| 1
| 6.32
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
processName
|
@Nullable String processName(MemberPath path, String existingName);
|
Return a new name for the JSON member or {@code null} if the member should be
filtered entirely.
@param path the path of the member
@param existingName the existing and possibly already processed name.
@return the new name
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 951
|
[
"path",
"existingName"
] |
String
| true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
min
|
public static float min(final float... array) {
// Validates input
validateArray(array);
// Finds and returns min
float min = array[0];
for (int i = 1; i < array.length; i++) {
if (Float.isNaN(array[i])) {
return Float.NaN;
}
if (array[i] < min) {
min = array[i];
}
}
return min;
}
|
Returns the minimum value in an array.
@param array an array, must not be null or empty.
@return the minimum value in the array.
@throws NullPointerException if {@code array} is {@code null}.
@throws IllegalArgumentException if {@code array} is empty.
@see IEEE754rUtils#min(float[]) IEEE754rUtils for a version of this method that handles NaN differently.
@since 3.4 Changed signature from min(float[]) to min(float...).
|
java
|
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
| 1,187
|
[] | true
| 4
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
of
|
public static OriginTrackedWritableResource of(WritableResource resource, Origin origin) {
return (OriginTrackedWritableResource) of((Resource) resource, origin);
}
|
Return a new {@link OriginProvider origin tracked} version the given
{@link WritableResource}.
@param resource the tracked resource
@param origin the origin of the resource
@return an {@link OriginTrackedWritableResource} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/origin/OriginTrackedResource.java
| 172
|
[
"resource",
"origin"
] |
OriginTrackedWritableResource
| true
| 1
| 6.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
at_time
|
def at_time(self, time, asof: bool = False, axis: Axis | None = None) -> Self:
"""
Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
time : datetime.time or str
The values to select.
asof : bool, default False
This parameter is currently not supported.
axis : {0 or 'index', 1 or 'columns'}, default 0
For `Series` this parameter is unused and defaults to 0.
Returns
-------
Series or DataFrame
The values with the specified time.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range("2018-04-09", periods=4, freq="12h")
>>> ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time("12:00")
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
if not isinstance(index, DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
indexer = index.indexer_at_time(time, asof=asof)
return self.take(indexer, axis=axis)
|
Select values at particular time of day (e.g., 9:30AM).
Parameters
----------
time : datetime.time or str
The values to select.
asof : bool, default False
This parameter is currently not supported.
axis : {0 or 'index', 1 or 'columns'}, default 0
For `Series` this parameter is unused and defaults to 0.
Returns
-------
Series or DataFrame
The values with the specified time.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range("2018-04-09", periods=4, freq="12h")
>>> ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time("12:00")
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
|
python
|
pandas/core/generic.py
| 8,557
|
[
"self",
"time",
"asof",
"axis"
] |
Self
| true
| 3
| 8.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getExclusions
|
protected Set<String> getExclusions(AnnotationMetadata metadata, @Nullable AnnotationAttributes attributes) {
Set<String> excluded = new LinkedHashSet<>();
if (attributes != null) {
excluded.addAll(asList(attributes, "exclude"));
excluded.addAll(asList(attributes, "excludeName"));
}
excluded.addAll(getExcludeAutoConfigurationsProperty());
return getAutoConfigurationReplacements().replaceAll(excluded);
}
|
Return any exclusions that limit the candidate configurations.
@param metadata the source metadata
@param attributes the {@link #getAttributes(AnnotationMetadata) annotation
attributes}
@return exclusions or an empty set
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationImportSelector.java
| 247
|
[
"metadata",
"attributes"
] | true
| 2
| 7.12
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
find_pickleable_exception
|
def find_pickleable_exception(exc, loads=pickle.loads,
dumps=pickle.dumps):
"""Find first pickleable exception base class.
With an exception instance, iterate over its super classes (by MRO)
and find the first super exception that's pickleable. It does
not go below :exc:`Exception` (i.e., it skips :exc:`Exception`,
:class:`BaseException` and :class:`object`). If that happens
you should use :exc:`UnpickleableException` instead.
Arguments:
exc (BaseException): An exception instance.
loads: decoder to use.
dumps: encoder to use
Returns:
Exception: Nearest pickleable parent exception class
(except :exc:`Exception` and parents), or if the exception is
pickleable it will return :const:`None`.
"""
exc_args = getattr(exc, 'args', [])
for supercls in itermro(exc.__class__, unwanted_base_classes):
try:
superexc = supercls(*exc_args)
loads(dumps(superexc))
except Exception: # pylint: disable=broad-except
pass
else:
return superexc
|
Find first pickleable exception base class.
With an exception instance, iterate over its super classes (by MRO)
and find the first super exception that's pickleable. It does
not go below :exc:`Exception` (i.e., it skips :exc:`Exception`,
:class:`BaseException` and :class:`object`). If that happens
you should use :exc:`UnpickleableException` instead.
Arguments:
exc (BaseException): An exception instance.
loads: decoder to use.
dumps: encoder to use
Returns:
Exception: Nearest pickleable parent exception class
(except :exc:`Exception` and parents), or if the exception is
pickleable it will return :const:`None`.
|
python
|
celery/utils/serialization.py
| 38
|
[
"exc",
"loads",
"dumps"
] | false
| 3
| 6.8
|
celery/celery
| 27,741
|
google
| false
|
|
open
|
public static FileRecords open(File file, boolean mutable) throws IOException {
return open(file, mutable, false, 0, false);
}
|
Get an iterator over the record batches in the file, starting at a specific position. This is similar to
{@link #batches()} except that callers specify a particular position to start reading the batches from. This
method must be used with caution: the start position passed in must be a known start of a batch.
@param start The position to start record iteration from; must be a known position for start of a batch
@return An iterator over batches starting from {@code start}
|
java
|
clients/src/main/java/org/apache/kafka/common/record/FileRecords.java
| 461
|
[
"file",
"mutable"
] |
FileRecords
| true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
setCurrentProxy
|
static @Nullable Object setCurrentProxy(@Nullable Object proxy) {
Object old = currentProxy.get();
if (proxy != null) {
currentProxy.set(proxy);
}
else {
currentProxy.remove();
}
return old;
}
|
Make the given proxy available via the {@code currentProxy()} method.
<p>Note that the caller should be careful to keep the old value as appropriate.
@param proxy the proxy to expose (or {@code null} to reset it)
@return the old proxy, which may be {@code null} if none was bound
@see #currentProxy()
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AopContext.java
| 84
|
[
"proxy"
] |
Object
| true
| 2
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
isPlainObject
|
function isPlainObject(value) {
if (!isObjectLike(value) || baseGetTag(value) != objectTag) {
return false;
}
var proto = getPrototype(value);
if (proto === null) {
return true;
}
var Ctor = hasOwnProperty.call(proto, 'constructor') && proto.constructor;
return typeof Ctor == 'function' && Ctor instanceof Ctor &&
funcToString.call(Ctor) == objectCtorString;
}
|
Checks if `value` is a plain object, that is, an object created by the
`Object` constructor or one with a `[[Prototype]]` of `null`.
@static
@memberOf _
@since 0.8.0
@category Lang
@param {*} value The value to check.
@returns {boolean} Returns `true` if `value` is a plain object, else `false`.
@example
function Foo() {
this.a = 1;
}
_.isPlainObject(new Foo);
// => false
_.isPlainObject([1, 2, 3]);
// => false
_.isPlainObject({ 'x': 0, 'y': 0 });
// => true
_.isPlainObject(Object.create(null));
// => true
|
javascript
|
lodash.js
| 12,143
|
[
"value"
] | false
| 7
| 7.52
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
getBaseName
|
private String getBaseName(ZipContent.Entry contentEntry) {
String name = contentEntry.getName();
if (!name.startsWith(META_INF_VERSIONS)) {
return name;
}
int versionNumberStartIndex = META_INF_VERSIONS.length();
int versionNumberEndIndex = (versionNumberStartIndex != -1) ? name.indexOf('/', versionNumberStartIndex) : -1;
if (versionNumberEndIndex == -1 || versionNumberEndIndex == (name.length() - 1)) {
return null;
}
try {
int versionNumber = Integer.parseInt(name, versionNumberStartIndex, versionNumberEndIndex, DECIMAL);
if (versionNumber > this.version) {
return null;
}
}
catch (NumberFormatException ex) {
return null;
}
return name.substring(versionNumberEndIndex + 1);
}
|
Creates a new {@link NestedJarFile} instance to read from the specific
{@code File}.
@param file the jar file to be opened for reading
@param nestedEntryName the nested entry name to open
@param version the release version to use when opening a multi-release jar
@param onlyNestedJars if <em>only</em> nested jars should be opened
@param cleaner the cleaner used to release resources
@throws IOException on I/O error
@throws IllegalArgumentException if {@code nestedEntryName} is {@code null} or
empty
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jar/NestedJarFile.java
| 205
|
[
"contentEntry"
] |
String
| true
| 7
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getOriginalPropertyValue
|
public PropertyValue getOriginalPropertyValue() {
PropertyValue original = this;
Object source = getSource();
while (source instanceof PropertyValue pv && source != original) {
original = pv;
source = original.getSource();
}
return original;
}
|
Return the original PropertyValue instance for this value holder.
@return the original PropertyValue (either a source of this
value holder or this value holder itself).
|
java
|
spring-beans/src/main/java/org/springframework/beans/PropertyValue.java
| 131
|
[] |
PropertyValue
| true
| 3
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
attemptToConvertStringToEnum
|
private Object attemptToConvertStringToEnum(Class<?> requiredType, String trimmedValue, Object currentConvertedValue) {
Object convertedValue = currentConvertedValue;
if (Enum.class == requiredType && this.targetObject != null) {
// target type is declared as raw enum, treat the trimmed value as <enum.fqn>.FIELD_NAME
int index = trimmedValue.lastIndexOf('.');
if (index > - 1) {
String enumType = trimmedValue.substring(0, index);
String fieldName = trimmedValue.substring(index + 1);
ClassLoader cl = this.targetObject.getClass().getClassLoader();
try {
Class<?> enumValueType = ClassUtils.forName(enumType, cl);
Field enumField = enumValueType.getField(fieldName);
convertedValue = enumField.get(null);
}
catch (ClassNotFoundException ex) {
if (logger.isTraceEnabled()) {
logger.trace("Enum class [" + enumType + "] cannot be loaded", ex);
}
}
catch (Throwable ex) {
if (logger.isTraceEnabled()) {
logger.trace("Field [" + fieldName + "] isn't an enum value for type [" + enumType + "]", ex);
}
}
}
}
if (convertedValue == currentConvertedValue) {
// Try field lookup as fallback: for Java enum or custom enum
// with values defined as static fields. Resulting value still needs
// to be checked, hence we don't return it right away.
try {
Field enumField = requiredType.getField(trimmedValue);
ReflectionUtils.makeAccessible(enumField);
convertedValue = enumField.get(null);
}
catch (Throwable ex) {
if (logger.isTraceEnabled()) {
logger.trace("Field [" + convertedValue + "] isn't an enum value", ex);
}
}
}
return convertedValue;
}
|
Convert the value to the required type (if necessary from a String),
for the specified property.
@param propertyName name of the property
@param oldValue the previous value, if available (may be {@code null})
@param newValue the proposed new value
@param requiredType the type we must convert to
(or {@code null} if not known, for example in case of a collection element)
@param typeDescriptor the descriptor for the target property or field
@return the new value, possibly the result of type conversion
@throws IllegalArgumentException if type conversion failed
|
java
|
spring-beans/src/main/java/org/springframework/beans/TypeConverterDelegate.java
| 286
|
[
"requiredType",
"trimmedValue",
"currentConvertedValue"
] |
Object
| true
| 11
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
exclusiveBetween
|
public static <T> void exclusiveBetween(final T start, final T end, final Comparable<T> value, final String message, final Object... values) {
// TODO when breaking BC, consider returning value
if (value.compareTo(start) <= 0 || value.compareTo(end) >= 0) {
throw new IllegalArgumentException(getMessage(message, values));
}
}
|
Validate that the specified argument object fall between the two
exclusive values specified; otherwise, throws an exception with the
specified message.
<pre>Validate.exclusiveBetween(0, 2, 1, "Not in boundaries");</pre>
@param <T> the type of the argument object.
@param start the exclusive start value, not null.
@param end the exclusive end value, not null.
@param value the object to validate, not null.
@param message the {@link String#format(String, Object...)} exception message if invalid, not null.
@param values the optional values for the formatted exception message, null array not recommended.
@throws IllegalArgumentException if the value falls outside the boundaries.
@see #exclusiveBetween(Object, Object, Comparable)
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 200
|
[
"start",
"end",
"value",
"message"
] |
void
| true
| 3
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
applyEmptySelectionErrorGlobalOmit
|
function applyEmptySelectionErrorGlobalOmit(error: EmptySelectionError, argsTree: ArgumentsRenderingTree) {
const suggestedOmitConfig = new SuggestionObjectValue()
for (const field of error.outputType.fields) {
if (!field.isRelation) {
suggestedOmitConfig.addField(field.name, 'false')
}
}
const omitSuggestion = new ObjectFieldSuggestion('omit', suggestedOmitConfig).makeRequired()
if (error.selectionPath.length === 0) {
argsTree.arguments.addSuggestion(omitSuggestion)
} else {
const [parentPath, fieldName] = splitPath(error.selectionPath)
const parent = argsTree.arguments.getDeepSelectionParent(parentPath)?.value.asObject()
const field = parent?.getField(fieldName)
if (field) {
const fieldValue = field?.value.asObject() ?? new ObjectValue()
fieldValue.addSuggestion(omitSuggestion)
field.value = fieldValue
}
}
// neither select, nor omit are used, but global omit global omit configuration for the model exists
argsTree.addErrorMessage((colors) => {
return `The global ${colors.red('omit')} configuration excludes every field of the model ${colors.bold(
error.outputType.name,
)}. At least one field must be included in the result`
})
}
|
Given the validation error and arguments rendering tree, applies corresponding
formatting to an error tree and adds all relevant messages.
@param error
@param args
|
typescript
|
packages/client/src/runtime/core/errorRendering/applyValidationError.ts
| 239
|
[
"error",
"argsTree"
] | false
| 5
| 6.08
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
getObject
|
@SuppressWarnings("unchecked")
default <S> @Nullable S getObject(Class<S> type) throws Exception{
Class<?> objectType = getObjectType();
return (objectType != null && type.isAssignableFrom(objectType) ? (S) getObject() : null);
}
|
Return an instance of the given type, if supported by this factory.
<p>By default, this supports the primary type exposed by the factory, as
indicated by {@link #getObjectType()} and returned by {@link #getObject()}.
Specific factories may support additional types for dependency injection.
@param type the requested type
@return a corresponding instance managed by this factory,
or {@code null} if none available
@throws Exception in case of creation errors
@since 7.0
@see #getObject()
@see #supportsType(Class)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/SmartFactoryBean.java
| 67
|
[
"type"
] |
S
| true
| 3
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
performRequestAsync
|
private void performRequestAsync(
final NodeTuple<Iterator<Node>> tuple,
final InternalRequest request,
final FailureTrackingResponseListener listener
) {
request.cancellable.runIfNotCancelled(() -> {
final RequestContext context = request.createContextForNextAttempt(tuple.nodes.next(), tuple.authCache);
client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, new FutureCallback<HttpResponse>() {
@Override
public void completed(HttpResponse httpResponse) {
try {
ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse);
if (responseOrResponseException.responseException == null) {
listener.onSuccess(responseOrResponseException.response);
} else {
if (tuple.nodes.hasNext()) {
listener.trackFailure(responseOrResponseException.responseException);
performRequestAsync(tuple, request, listener);
} else {
listener.onDefinitiveFailure(responseOrResponseException.responseException);
}
}
} catch (Exception e) {
listener.onDefinitiveFailure(e);
}
}
@Override
public void failed(Exception failure) {
try {
RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure);
onFailure(context.node);
if (isRetryableException(failure) && tuple.nodes.hasNext()) {
listener.trackFailure(failure);
performRequestAsync(tuple, request, listener);
} else {
listener.onDefinitiveFailure(failure);
}
} catch (Exception e) {
listener.onDefinitiveFailure(e);
}
}
@Override
public void cancelled() {
listener.onDefinitiveFailure(Cancellable.newCancellationException());
}
});
});
}
|
Sends a request to the Elasticsearch cluster that the client points to.
The request is executed asynchronously and the provided
{@link ResponseListener} gets notified upon request completion or
failure. Selects a host out of the provided ones in a round-robin
fashion. Failing hosts are marked dead and retried after a certain
amount of time (minimum 1 minute, maximum 30 minutes), depending on how
many times they previously failed (the more failures, the later they
will be retried). In case of failures all of the alive nodes (or dead
nodes that deserve a retry) are retried until one responds or none of
them does, in which case an {@link IOException} will be thrown.
@param request the request to perform
@param responseListener the {@link ResponseListener} to notify when the
request is completed or fails
|
java
|
client/rest/src/main/java/org/elasticsearch/client/RestClient.java
| 390
|
[
"tuple",
"request",
"listener"
] |
void
| true
| 7
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
byteToBinary
|
public static boolean[] byteToBinary(final byte src, final int srcPos, final boolean[] dst, final int dstPos, final int nBools) {
if (0 == nBools) {
return dst;
}
if (nBools - 1 + srcPos >= Byte.SIZE) {
throw new IllegalArgumentException("nBools - 1 + srcPos >= 8");
}
for (int i = 0; i < nBools; i++) {
final int shift = i + srcPos;
dst[dstPos + i] = (0x1 & src >> shift) != 0;
}
return dst;
}
|
Converts a byte into an array of boolean using the default (little-endian, LSB0) byte and bit ordering.
@param src the byte to convert.
@param srcPos the position in {@code src}, in bits, from where to start the conversion.
@param dst the destination array.
@param dstPos the position in {@code dst} where to copy the result.
@param nBools the number of booleans to copy to {@code dst}, must be smaller or equal to the width of the input (from srcPos to MSB).
@return {@code dst}.
@throws NullPointerException if {@code dst} is {@code null}.
@throws IllegalArgumentException if {@code nBools - 1 + srcPos >= 8}.
@throws ArrayIndexOutOfBoundsException if {@code dstPos + nBools > dst.length}.
|
java
|
src/main/java/org/apache/commons/lang3/Conversion.java
| 498
|
[
"src",
"srcPos",
"dst",
"dstPos",
"nBools"
] | true
| 4
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
list_processing_jobs
|
def list_processing_jobs(self, **kwargs) -> list[dict]:
"""
Call boto3's `list_processing_jobs`.
All arguments should be provided via kwargs. Note that boto3 expects
these in CamelCase, for example:
.. code-block:: python
list_processing_jobs(NameContains="myjob", StatusEquals="Failed")
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.list_processing_jobs`
:param kwargs: (optional) kwargs to boto3's list_training_jobs method
:return: results of the list_processing_jobs request
"""
list_processing_jobs_request = partial(self.get_conn().list_processing_jobs, **kwargs)
results = self._list_request(
list_processing_jobs_request, "ProcessingJobSummaries", max_results=kwargs.get("MaxResults")
)
return results
|
Call boto3's `list_processing_jobs`.
All arguments should be provided via kwargs. Note that boto3 expects
these in CamelCase, for example:
.. code-block:: python
list_processing_jobs(NameContains="myjob", StatusEquals="Failed")
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.list_processing_jobs`
:param kwargs: (optional) kwargs to boto3's list_training_jobs method
:return: results of the list_processing_jobs request
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
| 918
|
[
"self"
] |
list[dict]
| true
| 1
| 6.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
clearCache
|
static void clearCache() {
locationCache.clear();
pathCache.clear();
}
|
Create a new {@link NestedLocation} from the given URI.
@param uri the nested URI
@return a new {@link NestedLocation} instance
@throws IllegalArgumentException if the URI is not valid
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/nested/NestedLocation.java
| 129
|
[] |
void
| true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
visitorWithUnusedExpressionResult
|
function visitorWithUnusedExpressionResult(node: Node): VisitResult<Node | undefined> {
return shouldVisitNode(node) ? visitorWorker(node, /*expressionResultIsUnused*/ true) : node;
}
|
Restores the `HierarchyFacts` for this node's ancestor after visiting this node's
subtree, propagating specific facts from the subtree.
@param ancestorFacts The `HierarchyFacts` of the ancestor to restore after visiting the subtree.
@param excludeFacts The existing `HierarchyFacts` of the subtree that should not be propagated.
@param includeFacts The new `HierarchyFacts` of the subtree that should be propagated.
|
typescript
|
src/compiler/transformers/es2015.ts
| 605
|
[
"node"
] | true
| 2
| 6.16
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
typeHasArrowFunctionBlockingParseError
|
function typeHasArrowFunctionBlockingParseError(node: TypeNode): boolean {
switch (node.kind) {
case SyntaxKind.TypeReference:
return nodeIsMissing((node as TypeReferenceNode).typeName);
case SyntaxKind.FunctionType:
case SyntaxKind.ConstructorType: {
const { parameters, type } = node as FunctionOrConstructorTypeNode;
return isMissingList(parameters) || typeHasArrowFunctionBlockingParseError(type);
}
case SyntaxKind.ParenthesizedType:
return typeHasArrowFunctionBlockingParseError((node as ParenthesizedTypeNode).type);
default:
return false;
}
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 3,809
|
[
"node"
] | true
| 2
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
doSend
|
private void doSend(ClientRequest clientRequest, boolean isInternalRequest, long now) {
ensureActive();
String nodeId = clientRequest.destination();
if (!isInternalRequest) {
// If this request came from outside the NetworkClient, validate
// that we can send data. If the request is internal, we trust
// that internal code has done this validation. Validation
// will be slightly different for some internal requests (for
// example, ApiVersionsRequests can be sent prior to being in
// READY state.)
if (!canSendRequest(nodeId, now))
throw new IllegalStateException("Attempt to send a request to node " + nodeId + " which is not ready.");
}
AbstractRequest.Builder<?> builder = clientRequest.requestBuilder();
try {
NodeApiVersions versionInfo = apiVersions.get(nodeId);
short version;
// Note: if versionInfo is null, we have no server version information. This would be
// the case when sending the initial ApiVersionRequest which fetches the version
// information itself. It is also the case when discoverBrokerVersions is set to false.
if (versionInfo == null) {
version = builder.latestAllowedVersion();
if (discoverBrokerVersions && log.isTraceEnabled())
log.trace("No version information found when sending {} with correlation id {} to node {}. " +
"Assuming version {}.", clientRequest.apiKey(), clientRequest.correlationId(), nodeId, version);
} else {
version = versionInfo.latestUsableVersion(clientRequest.apiKey(), builder.oldestAllowedVersion(),
builder.latestAllowedVersion());
}
// The call to build may also throw UnsupportedVersionException, if there are essential
// fields that cannot be represented in the chosen version.
doSend(clientRequest, isInternalRequest, now, builder.build(version));
} catch (UnsupportedVersionException unsupportedVersionException) {
// If the version is not supported, skip sending the request over the wire.
// Instead, simply add it to the local queue of aborted requests.
log.debug("Version mismatch when attempting to send {} with correlation id {} to {}", builder,
clientRequest.correlationId(), clientRequest.destination(), unsupportedVersionException);
ClientResponse clientResponse = new ClientResponse(clientRequest.makeHeader(builder.latestAllowedVersion()),
clientRequest.callback(), clientRequest.destination(), now, now,
false, unsupportedVersionException, null, null);
if (!isInternalRequest)
abortedSends.add(clientResponse);
else if (clientRequest.apiKey() == ApiKeys.METADATA)
metadataUpdater.handleFailedRequest(now, Optional.of(unsupportedVersionException));
else if (isTelemetryApi(clientRequest.apiKey()) && telemetrySender != null)
telemetrySender.handleFailedRequest(clientRequest.apiKey(), unsupportedVersionException);
}
}
|
Queue up the given request for sending. Requests can only be sent out to ready nodes.
@param request The request
@param now The current timestamp
|
java
|
clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
| 551
|
[
"clientRequest",
"isInternalRequest",
"now"
] |
void
| true
| 11
| 7.2
|
apache/kafka
| 31,560
|
javadoc
| false
|
addContextValue
|
@Override
public ContextedRuntimeException addContextValue(final String label, final Object value) {
exceptionContext.addContextValue(label, value);
return this;
}
|
Adds information helpful to a developer in diagnosing and correcting the problem.
For the information to be meaningful, the value passed should have a reasonable
toString() implementation.
Different values can be added with the same label multiple times.
<p>
Note: This exception is only serializable if the object added is serializable.
</p>
@param label a textual label associated with information, {@code null} not recommended
@param value information needed to understand exception, may be {@code null}
@return {@code this}, for method chaining, not {@code null}
|
java
|
src/main/java/org/apache/commons/lang3/exception/ContextedRuntimeException.java
| 167
|
[
"label",
"value"
] |
ContextedRuntimeException
| true
| 1
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
record
|
public void record(double value, long timeMs, boolean checkQuotas) {
if (shouldRecord()) {
recordInternal(value, timeMs, checkQuotas);
}
}
|
Record a value at a known time. This method is slightly faster than {@link #record(double)} since it will reuse
the time stamp.
@param value The value we are recording
@param timeMs The current POSIX time in milliseconds
@param checkQuotas Indicate if quota must be enforced or not
@throws QuotaViolationException if recording this value moves a metric beyond its configured maximum or minimum
bound
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/Sensor.java
| 224
|
[
"value",
"timeMs",
"checkQuotas"
] |
void
| true
| 2
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
_masked_arith_op
|
def _masked_arith_op(x: np.ndarray, y, op) -> np.ndarray:
"""
If the given arithmetic operation fails, attempt it again on
only the non-null elements of the input array(s).
Parameters
----------
x : np.ndarray
y : np.ndarray, Series, Index
op : binary operator
"""
# For Series `x` is 1D so ravel() is a no-op; calling it anyway makes
# the logic valid for both Series and DataFrame ops.
xrav = x.ravel()
if isinstance(y, np.ndarray):
dtype = find_common_type([x.dtype, y.dtype])
result = np.empty(x.size, dtype=dtype)
if len(x) != len(y):
raise ValueError(x.shape, y.shape)
ymask = notna(y)
# NB: ravel() is only safe since y is ndarray; for e.g. PeriodIndex
# we would get int64 dtype, see GH#19956
yrav = y.ravel()
mask = notna(xrav) & ymask.ravel()
# See GH#5284, GH#5035, GH#19448 for historical reference
if mask.any():
result[mask] = op(xrav[mask], yrav[mask])
else:
if not is_scalar(y):
raise TypeError(
f"Cannot broadcast np.ndarray with operand of type {type(y)}"
)
# mask is only meaningful for x
result = np.empty(x.size, dtype=x.dtype)
mask = notna(xrav)
# 1 ** np.nan is 1. So we have to unmask those.
if op is pow:
mask = np.where(x == 1, False, mask)
elif op is roperator.rpow:
mask = np.where(y == 1, False, mask)
if mask.any():
result[mask] = op(xrav[mask], y)
np.putmask(result, ~mask, np.nan)
result = result.reshape(x.shape) # 2D compat
return result
|
If the given arithmetic operation fails, attempt it again on
only the non-null elements of the input array(s).
Parameters
----------
x : np.ndarray
y : np.ndarray, Series, Index
op : binary operator
|
python
|
pandas/core/ops/array_ops.py
| 135
|
[
"x",
"y",
"op"
] |
np.ndarray
| true
| 9
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getInterceptorsAndDynamicInterceptionAdvice
|
public List<Object> getInterceptorsAndDynamicInterceptionAdvice(Method method, @Nullable Class<?> targetClass) {
List<Object> cachedInterceptors;
if (this.methodCache != null) {
// Method-specific cache for method-specific pointcuts
MethodCacheKey cacheKey = new MethodCacheKey(method);
cachedInterceptors = this.methodCache.get(cacheKey);
if (cachedInterceptors == null) {
cachedInterceptors = this.advisorChainFactory.getInterceptorsAndDynamicInterceptionAdvice(
this, method, targetClass);
this.methodCache.put(cacheKey, cachedInterceptors);
}
}
else {
// Shared cache since there are no method-specific advisors (see below).
cachedInterceptors = this.cachedInterceptors;
if (cachedInterceptors == null) {
cachedInterceptors = this.advisorChainFactory.getInterceptorsAndDynamicInterceptionAdvice(
this, method, targetClass);
this.cachedInterceptors = cachedInterceptors;
}
}
return cachedInterceptors;
}
|
Determine a list of {@link org.aopalliance.intercept.MethodInterceptor} objects
for the given method, based on this configuration.
@param method the proxied method
@param targetClass the target class
@return a List of MethodInterceptors (may also include InterceptorAndDynamicMethodMatchers)
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AdvisedSupport.java
| 516
|
[
"method",
"targetClass"
] | true
| 4
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
_isolation_key
|
def _isolation_key(ischema: IsolationSchema = _DEFAULT_ISOLATION_SCHEMA) -> str:
"""Generate a unique key for the given isolation schema.
Args:
ischema: Schema specifying which context forms to include.
Defaults to including all runtime and compile context.
Returns:
A 32-character hexadecimal string that uniquely identifies
the context specified by the isolation schema.
"""
return sha256(
json.dumps(_isolation_context(ischema), sort_keys=True).encode()
).hexdigest()[:32]
|
Generate a unique key for the given isolation schema.
Args:
ischema: Schema specifying which context forms to include.
Defaults to including all runtime and compile context.
Returns:
A 32-character hexadecimal string that uniquely identifies
the context specified by the isolation schema.
|
python
|
torch/_inductor/runtime/caching/context.py
| 279
|
[
"ischema"
] |
str
| true
| 1
| 6.24
|
pytorch/pytorch
| 96,034
|
google
| false
|
maybeRecordDeprecatedPartitionLead
|
@Deprecated // To be removed in Kafka 5.0 release.
private void maybeRecordDeprecatedPartitionLead(String name, TopicPartition tp, double lead) {
if (shouldReportDeprecatedMetric(tp.topic())) {
Sensor deprecatedRecordsLead = new SensorBuilder(metrics, deprecatedMetricName(name), () -> topicPartitionTags(tp))
.withValue(metricsRegistry.partitionRecordsLead)
.withMin(metricsRegistry.partitionRecordsLeadMin)
.withAvg(metricsRegistry.partitionRecordsLeadAvg)
.build();
deprecatedRecordsLead.record(lead);
}
}
|
This method is called by the {@link Fetch fetch} logic before it requests fetches in order to update the
internal set of metrics that are tracked.
@param subscription {@link SubscriptionState} that contains the set of assigned partitions
@see SubscriptionState#assignmentId()
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManager.java
| 236
|
[
"name",
"tp",
"lead"
] |
void
| true
| 2
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
getLast
|
@ParametricNullness
public static <T extends @Nullable Object> T getLast(
Iterator<? extends T> iterator, @ParametricNullness T defaultValue) {
return iterator.hasNext() ? getLast(iterator) : defaultValue;
}
|
Advances {@code iterator} to the end, returning the last element or {@code defaultValue} if the
iterator is empty.
@param defaultValue the default value to return if the iterator is empty
@return the last element of {@code iterator}
@since 3.0
|
java
|
android/guava/src/com/google/common/collect/Iterators.java
| 921
|
[
"iterator",
"defaultValue"
] |
T
| true
| 2
| 7.6
|
google/guava
| 51,352
|
javadoc
| false
|
forwardCurrentToHead
|
function forwardCurrentToHead(analyzer, node) {
const codePath = analyzer.codePath;
const state = CodePath.getState(codePath);
const currentSegments = state.currentSegments;
const headSegments = state.headSegments;
const end = Math.max(currentSegments.length, headSegments.length);
let i, currentSegment, headSegment;
// Fires leaving events.
for (i = 0; i < end; ++i) {
currentSegment = currentSegments[i];
headSegment = headSegments[i];
if (currentSegment !== headSegment && currentSegment) {
if (currentSegment.reachable) {
analyzer.emitter.emit('onCodePathSegmentEnd', currentSegment, node);
}
}
}
// Update state.
state.currentSegments = headSegments;
// Fires entering events.
for (i = 0; i < end; ++i) {
currentSegment = currentSegments[i];
headSegment = headSegments[i];
if (currentSegment !== headSegment && headSegment) {
CodePathSegment.markUsed(headSegment);
if (headSegment.reachable) {
analyzer.emitter.emit('onCodePathSegmentStart', headSegment, node);
}
}
}
}
|
Updates the current segment with the head segment.
This is similar to local branches and tracking branches of git.
To separate the current and the head is in order to not make useless segments.
In this process, both "onCodePathSegmentStart" and "onCodePathSegmentEnd"
events are fired.
@param {CodePathAnalyzer} analyzer The instance.
@param {ASTNode} node The current AST node.
@returns {void}
|
javascript
|
packages/eslint-plugin-react-hooks/src/code-path-analysis/code-path-analyzer.js
| 183
|
[
"analyzer",
"node"
] | false
| 9
| 6.08
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
prepareMethodOverrides
|
public void prepareMethodOverrides() throws BeanDefinitionValidationException {
// Check that lookup methods exist and determine their overloaded status.
if (hasMethodOverrides()) {
getMethodOverrides().getOverrides().forEach(this::prepareMethodOverride);
}
}
|
Validate and prepare the method overrides defined for this bean.
Checks for existence of a method with the specified name.
@throws BeanDefinitionValidationException in case of validation failure
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanDefinition.java
| 1,251
|
[] |
void
| true
| 2
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
equals
|
@Override
public boolean equals(@Nullable Object other) {
return (this == other || (other instanceof AnnotationMethodMatcher otherMm &&
this.annotationType.equals(otherMm.annotationType) &&
this.checkInherited == otherMm.checkInherited));
}
|
Create a new AnnotationClassFilter for the given annotation type.
@param annotationType the annotation type to look for
@param checkInherited whether to also check the superclasses and
interfaces as well as meta-annotations for the annotation type
(i.e. whether to use {@link AnnotatedElementUtils#hasAnnotation}
semantics instead of standard Java {@link Method#isAnnotationPresent})
@since 5.0
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/annotation/AnnotationMethodMatcher.java
| 91
|
[
"other"
] | true
| 4
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
timestampType
|
public TimestampType timestampType() {
return timestampType(magic(), wrapperRecordTimestampType, attributes());
}
|
Get the timestamp type of the record.
@return The timestamp type or {@link TimestampType#NO_TIMESTAMP_TYPE} if the magic is 0.
|
java
|
clients/src/main/java/org/apache/kafka/common/record/LegacyRecord.java
| 235
|
[] |
TimestampType
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
forward
|
def forward(self, x: Tensor) -> Tensor:
r"""
Args:
x (Tensor): Tensor of dimension (batch_size, num_features, input_length).
Returns:
Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length).
"""
x = self.acoustic_model(x)
x = nn.functional.log_softmax(x, dim=1)
return x
|
r"""
Args:
x (Tensor): Tensor of dimension (batch_size, num_features, input_length).
Returns:
Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length).
|
python
|
benchmarks/functional_autograd_benchmark/torchaudio_models.py
| 104
|
[
"self",
"x"
] |
Tensor
| true
| 1
| 6.08
|
pytorch/pytorch
| 96,034
|
google
| false
|
sourceMapCacheToObject
|
function sourceMapCacheToObject() {
const moduleSourceMapCache = getModuleSourceMapCache();
if (moduleSourceMapCache.size === 0) {
return undefined;
}
const obj = { __proto__: null };
for (const { 0: k, 1: v } of moduleSourceMapCache) {
obj[k] = {
__proto__: null,
lineLengths: v.lineLengths,
data: v.data,
url: v.sourceMapURL,
};
}
return obj;
}
|
Read source map from file.
@param {string} mapURL - file url of the source map
@returns {object} deserialized source map JSON object
|
javascript
|
lib/internal/source_map/source_map_cache.js
| 343
|
[] | false
| 2
| 7.12
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
registerRuntimeHintsIfNecessary
|
private void registerRuntimeHintsIfNecessary(RegisteredBean registeredBean, Executable constructorOrFactoryMethod) {
if (registeredBean.getBeanFactory() instanceof DefaultListableBeanFactory dlbf) {
RuntimeHints runtimeHints = this.generationContext.getRuntimeHints();
ProxyRuntimeHintsRegistrar registrar = new ProxyRuntimeHintsRegistrar(dlbf.getAutowireCandidateResolver());
registrar.registerRuntimeHints(runtimeHints, constructorOrFactoryMethod);
}
}
|
Generate the instance supplier code.
@param registeredBean the bean to handle
@param instantiationDescriptor the executable to use to create the bean
@return the generated code
@since 6.1.7
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/InstanceSupplierCodeGenerator.java
| 153
|
[
"registeredBean",
"constructorOrFactoryMethod"
] |
void
| true
| 2
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
findPrismaClientDir
|
async function findPrismaClientDir(baseDir: string) {
const resolveOpts = { basedir: baseDir, preserveSymlinks: true }
const cliDir = await resolvePkg('prisma', resolveOpts)
const clientDir = await resolvePkg('@prisma/client', resolveOpts)
const resolvedClientDir = clientDir && (await fs.realpath(clientDir))
debug('prismaCliDir', cliDir)
debug('prismaClientDir', clientDir)
// If CLI not found, we can only continue forward, likely a test
if (cliDir === undefined) return resolvedClientDir
if (clientDir === undefined) return resolvedClientDir
// for everything to work well we expect `../<client-dir>`
const relDir = path.relative(cliDir, clientDir).split(path.sep)
// if the client is not near `prisma`, in parent folder => fail
if (relDir[0] !== '..' || relDir[1] === '..') return undefined
// we return the resolved location as pnpm users will want that
return resolvedClientDir
}
|
Tries to find a `@prisma/client` that is next to the `prisma` CLI
@param baseDir from where to start looking from
@returns `@prisma/client` location
|
typescript
|
packages/client-generator-js/src/resolvePrismaClient.ts
| 36
|
[
"baseDir"
] | false
| 6
| 6.8
|
prisma/prisma
| 44,834
|
jsdoc
| true
|
|
writeTo
|
long writeTo(TransferableChannel channel) throws IOException;
|
Write some as-yet unwritten bytes from this send to the provided channel. It may take multiple calls for the send
to be completely written
@param channel The Channel to write to
@return The number of bytes written
@throws IOException If the write fails
|
java
|
clients/src/main/java/org/apache/kafka/common/network/Send.java
| 38
|
[
"channel"
] | true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
shouldBlock
|
@Override
public boolean shouldBlock() {
return !isDone();
}
|
Convert from a request future of one type to another type
@param adapter The adapter which does the conversion
@param <S> The type of the future adapted to
@return The new future
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java
| 251
|
[] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
format
|
@Deprecated
@Override
public StringBuffer format(final Calendar calendar, final StringBuffer buf) {
return printer.format(calendar, buf);
}
|
Formats a {@link Calendar} object into the supplied {@link StringBuffer}.
@param calendar the calendar to format.
@param buf the buffer to format into.
@return the specified string buffer.
@deprecated Use {{@link #format(Calendar, Appendable)}.
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDateFormat.java
| 432
|
[
"calendar",
"buf"
] |
StringBuffer
| true
| 1
| 6.4
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_get_loc_single_level_index
|
def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int:
"""
If key is NA value, location of index unify as -1.
Parameters
----------
level_index: Index
key : label
Returns
-------
loc : int
If key is NA value, loc is -1
Else, location of key in index.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
"""
if is_scalar(key) and isna(key):
# TODO: need is_valid_na_for_dtype(key, level_index.dtype)
return -1
else:
return level_index.get_loc(key)
|
If key is NA value, location of index unify as -1.
Parameters
----------
level_index: Index
key : label
Returns
-------
loc : int
If key is NA value, loc is -1
Else, location of key in index.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
|
python
|
pandas/core/indexes/multi.py
| 3,242
|
[
"self",
"level_index",
"key"
] |
int
| true
| 4
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
registerDisposableBeanIfNecessary
|
protected void registerDisposableBeanIfNecessary(String beanName, Object bean, RootBeanDefinition mbd) {
if (!mbd.isPrototype() && requiresDestruction(bean, mbd)) {
if (mbd.isSingleton()) {
// Register a DisposableBean implementation that performs all destruction
// work for the given bean: DestructionAwareBeanPostProcessors,
// DisposableBean interface, custom destroy method.
registerDisposableBean(beanName, new DisposableBeanAdapter(
bean, beanName, mbd, getBeanPostProcessorCache().destructionAware));
}
else {
// A bean with a custom scope...
Scope scope = this.scopes.get(mbd.getScope());
if (scope == null) {
throw new IllegalStateException("No Scope registered for scope name '" + mbd.getScope() + "'");
}
scope.registerDestructionCallback(beanName, new DisposableBeanAdapter(
bean, beanName, mbd, getBeanPostProcessorCache().destructionAware));
}
}
}
|
Add the given bean to the list of disposable beans in this factory,
registering its DisposableBean interface and/or the given destroy method
to be called on factory shutdown (if applicable). Only applies to singletons.
@param beanName the name of the bean
@param bean the bean instance
@param mbd the bean definition for the bean
@see RootBeanDefinition#isSingleton
@see RootBeanDefinition#getDependsOn
@see #registerDisposableBean
@see #registerDependentBean
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 1,922
|
[
"beanName",
"bean",
"mbd"
] |
void
| true
| 5
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
prettyPrint
|
public XContentBuilder prettyPrint() {
generator.usePrettyPrint();
return this;
}
|
@return the output stream to which the built object is being written. Note that is dangerous to modify the stream.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
| 290
|
[] |
XContentBuilder
| true
| 1
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
anyMatches
|
protected final boolean anyMatches(ConditionContext context, AnnotatedTypeMetadata metadata,
Condition... conditions) {
for (Condition condition : conditions) {
if (matches(context, metadata, condition)) {
return true;
}
}
return false;
}
|
Return true if any of the specified conditions match.
@param context the context
@param metadata the annotation meta-data
@param conditions conditions to test
@return {@code true} if any condition matches.
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/SpringBootCondition.java
| 124
|
[
"context",
"metadata"
] | true
| 2
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
merge
|
@SuppressWarnings({"rawtypes", "unchecked"})
private void merge(Map<String, Object> output, Map<String, Object> map) {
map.forEach((key, value) -> {
Object existing = output.get(key);
if (value instanceof Map valueMap && existing instanceof Map existingMap) {
Map<String, Object> result = new LinkedHashMap<>(existingMap);
merge(result, valueMap);
output.put(key, result);
}
else {
output.put(key, value);
}
});
}
|
Template method that subclasses may override to construct the object
returned by this factory.
<p>Invoked lazily the first time {@link #getObject()} is invoked in
case of a shared singleton; else, on each {@link #getObject()} call.
<p>The default implementation returns the merged {@code Map} instance.
@return the object returned by this factory
@see #process(MatchCallback)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/YamlMapFactoryBean.java
| 127
|
[
"output",
"map"
] |
void
| true
| 3
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
setDelimiterMatcher
|
public StrTokenizer setDelimiterMatcher(final StrMatcher delim) {
if (delim == null) {
this.delimMatcher = StrMatcher.noneMatcher();
} else {
this.delimMatcher = delim;
}
return this;
}
|
Sets the field delimiter matcher.
<p>
The delimiter is used to separate one token from another.
</p>
@param delim the delimiter matcher to use.
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrTokenizer.java
| 924
|
[
"delim"
] |
StrTokenizer
| true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
defineInternal
|
public ConfigDef defineInternal(final String name, final Type type, final Object defaultValue, final Importance importance) {
return define(new ConfigKey(name, type, defaultValue, null, importance, "", "", -1, Width.NONE, name, Collections.emptyList(), null, true, null));
}
|
Define a new internal configuration. Internal configuration won't show up in the docs and aren't
intended for general use.
@param name The name of the config parameter
@param type The type of the config
@param defaultValue The default value to use if this config isn't present
@param importance The importance of this config (i.e. is this something you will likely need to change?)
@return This ConfigDef so you can chain calls
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 454
|
[
"name",
"type",
"defaultValue",
"importance"
] |
ConfigDef
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.