function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
getDepth
|
function getDepth(stack: ProcessorState['measureStack']) {
if (stack.length > 0) {
const {depth, type} = stack[stack.length - 1];
return type === 'render-idle' ? depth : depth + 1;
}
return 0;
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-devtools-timeline/src/import-worker/preprocessData.js
| 135
|
[] | false
| 3
| 6.24
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
_update_dependency_line_with_new_version
|
def _update_dependency_line_with_new_version(
line: str,
provider_package_name: str,
current_min_version: str,
new_version: str,
pyproject_file: Path,
updates_made: dict[str, dict[str, Any]],
) -> tuple[str, bool]:
"""
Update a dependency line with a new version and track the change.
Returns:
Tuple of (updated_line, was_modified)
"""
if new_version == current_min_version:
get_console().print(
f"[dim]Skipping {provider_package_name} in {pyproject_file.relative_to(AIRFLOW_PROVIDERS_ROOT_PATH)}: "
f"already at version {new_version}"
)
return line, False
# Replace the version in the line
old_constraint = f'"{provider_package_name}>={current_min_version}"'
new_constraint = f'"{provider_package_name}>={new_version}"'
updated_line = line.replace(old_constraint, new_constraint)
# remove the comment starting with '# use next version' (and anything after it) and rstrip spaces
updated_line = re.sub(r"#\s*use next version.*$", "", updated_line).rstrip()
# Track the update
provider_id_short = pyproject_file.parent.relative_to(AIRFLOW_PROVIDERS_ROOT_PATH)
provider_key = str(provider_id_short)
if provider_key not in updates_made:
updates_made[provider_key] = {}
updates_made[provider_key][provider_package_name] = {
"old_version": current_min_version,
"new_version": new_version,
"file": str(pyproject_file),
}
get_console().print(
f"[info]Updating {provider_package_name} in {pyproject_file.relative_to(AIRFLOW_PROVIDERS_ROOT_PATH)}: "
f"{current_min_version} -> {new_version} (comment removed)"
)
return updated_line, True
|
Update a dependency line with a new version and track the change.
Returns:
Tuple of (updated_line, was_modified)
|
python
|
dev/breeze/src/airflow_breeze/utils/packages.py
| 1,218
|
[
"line",
"provider_package_name",
"current_min_version",
"new_version",
"pyproject_file",
"updates_made"
] |
tuple[str, bool]
| true
| 3
| 7.92
|
apache/airflow
| 43,597
|
unknown
| false
|
select
|
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> import numpy as np
Beginning with an array of integers from 0 to 5 (inclusive),
elements less than ``3`` are negated, elements greater than ``3``
are squared, and elements not meeting either of these conditions
(exactly ``3``) are replaced with a `default` value of ``42``.
>>> x = np.arange(6)
>>> condlist = [x<3, x>3]
>>> choicelist = [-x, x**2]
>>> np.select(condlist, choicelist, 42)
array([ 0, -1, -2, 42, 16, 25])
When multiple conditions are satisfied, the first one encountered in
`condlist` is used.
>>> condlist = [x<=4, x>3]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist, 55)
array([ 0, 1, 2, 3, 4, 25])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
raise ValueError("select with an empty condition list is not possible")
# TODO: This preserves the Python int, float, complex manually to get the
# right `result_type` with NEP 50. Most likely we will grow a better
# way to spell this (and this can be replaced).
choicelist = [
choice if type(choice) in (int, float, complex) else np.asarray(choice)
for choice in choicelist]
choicelist.append(default if type(default) in (int, float, complex)
else np.asarray(default))
try:
dtype = np.result_type(*choicelist)
except TypeError as e:
msg = f'Choicelist and default value do not have a common dtype: {e}'
raise TypeError(msg) from None
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it separately optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
for i, cond in enumerate(condlist):
if cond.dtype.type is not np.bool:
raise TypeError(
f'invalid entry {i} in condlist: should be boolean ndarray')
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
|
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> import numpy as np
Beginning with an array of integers from 0 to 5 (inclusive),
elements less than ``3`` are negated, elements greater than ``3``
are squared, and elements not meeting either of these conditions
(exactly ``3``) are replaced with a `default` value of ``42``.
>>> x = np.arange(6)
>>> condlist = [x<3, x>3]
>>> choicelist = [-x, x**2]
>>> np.select(condlist, choicelist, 42)
array([ 0, -1, -2, 42, 16, 25])
When multiple conditions are satisfied, the first one encountered in
`condlist` is used.
>>> condlist = [x<=4, x>3]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist, 55)
array([ 0, 1, 2, 3, 4, 25])
|
python
|
numpy/lib/_function_base_impl.py
| 813
|
[
"condlist",
"choicelist",
"default"
] | false
| 10
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
generateCodeForAccessibleFactoryMethod
|
private CodeBlock generateCodeForAccessibleFactoryMethod(String beanName,
Method factoryMethod, Class<?> targetClass, @Nullable String factoryBeanName) {
this.generationContext.getRuntimeHints().reflection().registerType(factoryMethod.getDeclaringClass());
if (factoryBeanName == null && factoryMethod.getParameterCount() == 0) {
Class<?> suppliedType = ClassUtils.resolvePrimitiveIfNecessary(factoryMethod.getReturnType());
CodeBlock.Builder code = CodeBlock.builder();
code.add("$T.<$T>forFactoryMethod($T.class, $S)", BeanInstanceSupplier.class,
suppliedType, targetClass, factoryMethod.getName());
code.add(".withGenerator(($L) -> $T.$L())", REGISTERED_BEAN_PARAMETER_NAME,
ClassUtils.getUserClass(targetClass), factoryMethod.getName());
return code.build();
}
GeneratedMethod getInstanceMethod = generateGetInstanceSupplierMethod(method ->
buildGetInstanceMethodForFactoryMethod(method, beanName, factoryMethod,
targetClass, factoryBeanName, PRIVATE_STATIC));
return generateReturnStatement(getInstanceMethod);
}
|
Generate the instance supplier code.
@param registeredBean the bean to handle
@param instantiationDescriptor the executable to use to create the bean
@return the generated code
@since 6.1.7
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/InstanceSupplierCodeGenerator.java
| 272
|
[
"beanName",
"factoryMethod",
"targetClass",
"factoryBeanName"
] |
CodeBlock
| true
| 3
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
eigvals
|
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigh : eigenvalues and eigenvectors of real symmetric or complex
Hermitian (conjugate symmetric) arrays.
scipy.linalg.eigvals : Similar function in SciPy.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> import numpy as np
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by ``Q`` on one side and
by ``Q.T`` on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.]) # random
"""
a, wrap = _makearray(a)
_assert_stacked_square(a)
_assert_finite(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->D'
with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence,
invalid='call', over='ignore', divide='ignore',
under='ignore'):
w = _umath_linalg.eigvals(a, signature=signature)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
|
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigh : eigenvalues and eigenvectors of real symmetric or complex
Hermitian (conjugate symmetric) arrays.
scipy.linalg.eigvals : Similar function in SciPy.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> import numpy as np
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by ``Q`` on one side and
by ``Q.T`` on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.]) # random
|
python
|
numpy/linalg/_linalg.py
| 1,171
|
[
"a"
] | false
| 5
| 7.44
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
match
|
public boolean match(String fieldName, DeprecationHandler deprecationHandler) {
return match(null, () -> XContentLocation.UNKNOWN, fieldName, deprecationHandler);
}
|
Does {@code fieldName} match this field?
@param fieldName
the field name to match against this {@link ParseField}
@param deprecationHandler called if {@code fieldName} is deprecated
@return true if <code>fieldName</code> matches any of the acceptable
names for this {@link ParseField}.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ParseField.java
| 141
|
[
"fieldName",
"deprecationHandler"
] | true
| 1
| 6.16
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
getElementBounds
|
function getElementBounds({ element }: CodeLineElement): { top: number; height: number } {
const myBounds = element.getBoundingClientRect();
// Some code line elements may contain other code line elements.
// In those cases, only take the height up to that child.
const codeLineChild = element.querySelector(`.${codeLineClass}`);
if (codeLineChild) {
const childBounds = codeLineChild.getBoundingClientRect();
const height = Math.max(1, (childBounds.top - myBounds.top));
return {
top: myBounds.top,
height: height
};
}
return myBounds;
}
|
Find the html elements that are at a specific pixel offset on the page.
|
typescript
|
extensions/markdown-language-features/preview-src/scroll-sync.ts
| 125
|
[
"{ element }"
] | true
| 2
| 6
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
between
|
public static UnicodeEscaper between(final int codePointLow, final int codePointHigh) {
return new UnicodeEscaper(codePointLow, codePointHigh, true);
}
|
Constructs a {@link UnicodeEscaper} between the specified values (inclusive).
@param codePointLow above which to escape.
@param codePointHigh below which to escape.
@return the newly created {@link UnicodeEscaper} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/translate/UnicodeEscaper.java
| 58
|
[
"codePointLow",
"codePointHigh"
] |
UnicodeEscaper
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
fastAsin
|
static double fastAsin(double x) {
if (x < 0) {
return -fastAsin(-x);
} else if (x > 1) {
return Double.NaN;
} else {
// Cutoffs for models. Note that the ranges overlap. In the
// overlap we do linear interpolation to guarantee the overall
// result is "nice"
double c0High = 0.1;
double c1High = 0.55;
double c2Low = 0.5;
double c2High = 0.8;
double c3Low = 0.75;
double c3High = 0.9;
double c4Low = 0.87;
if (x > c3High) {
return Math.asin(x);
} else {
// the models
double[] m0 = { 0.2955302411, 1.2221903614, 0.1488583743, 0.2422015816, -0.3688700895, 0.0733398445 };
double[] m1 = { -0.0430991920, 0.9594035750, -0.0362312299, 0.1204623351, 0.0457029620, -0.0026025285 };
double[] m2 = { -0.034873933724, 1.054796752703, -0.194127063385, 0.283963735636, 0.023800124916, -0.000872727381 };
double[] m3 = { -0.37588391875, 2.61991859025, -2.48835406886, 1.48605387425, 0.00857627492, -0.00015802871 };
// the parameters for all of the models
double[] vars = { 1, x, x * x, x * x * x, 1 / (1 - x), 1 / (1 - x) / (1 - x) };
// raw grist for interpolation coefficients
double x0 = bound((c0High - x) / c0High);
double x1 = bound((c1High - x) / (c1High - c2Low));
double x2 = bound((c2High - x) / (c2High - c3Low));
double x3 = bound((c3High - x) / (c3High - c4Low));
// interpolation coefficients
// noinspection UnnecessaryLocalVariable
double mix0 = x0;
double mix1 = (1 - x0) * x1;
double mix2 = (1 - x1) * x2;
double mix3 = (1 - x2) * x3;
double mix4 = 1 - x3;
// now mix all the results together, avoiding extra evaluations
double r = 0;
if (mix0 > 0) {
r += mix0 * eval(m0, vars);
}
if (mix1 > 0) {
r += mix1 * eval(m1, vars);
}
if (mix2 > 0) {
r += mix2 * eval(m2, vars);
}
if (mix3 > 0) {
r += mix3 * eval(m3, vars);
}
if (mix4 > 0) {
// model 4 is just the real deal
r += mix4 * Math.asin(x);
}
return r;
}
}
}
|
Approximates asin to within about 1e-6. This approximation works by breaking the range from 0 to 1 into 5 regions
for all but the region nearest 1, rational polynomial models get us a very good approximation of asin and by
interpolating as we move from region to region, we can guarantee continuity and we happen to get monotonicity as
well. for the values near 1, we just use Math.asin as our region "approximation".
@param x sin(theta)
@return theta
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/ScaleFunction.java
| 577
|
[
"x"
] | true
| 9
| 8.32
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
pop
|
def pop(self, exc: BaseException | None = None) -> None:
"""Pop this context so that it is no longer the active context. Then
call teardown functions and signals.
Typically, this is not used directly. Instead, use a ``with`` block
to manage the context.
This context must currently be the active context, otherwise a
:exc:`RuntimeError` is raised. In some situations, such as streaming or
testing, the context may have been pushed multiple times. It will only
trigger cleanup once it has been popped as many times as it was pushed.
Until then, it will remain the active context.
:param exc: An unhandled exception that was raised while the context was
active. Passed to teardown functions.
.. versionchanged:: 0.9
Added the ``exc`` argument.
"""
if self._cv_token is None:
raise RuntimeError(f"Cannot pop this context ({self!r}), it is not pushed.")
ctx = _cv_app.get(None)
if ctx is None or self._cv_token is None:
raise RuntimeError(
f"Cannot pop this context ({self!r}), there is no active context."
)
if ctx is not self:
raise RuntimeError(
f"Cannot pop this context ({self!r}), it is not the active"
f" context ({ctx!r})."
)
self._push_count -= 1
if self._push_count > 0:
return
try:
if self._request is not None:
self.app.do_teardown_request(self, exc)
self._request.close()
finally:
self.app.do_teardown_appcontext(self, exc)
_cv_app.reset(self._cv_token)
self._cv_token = None
appcontext_popped.send(self.app, _async_wrapper=self.app.ensure_sync)
|
Pop this context so that it is no longer the active context. Then
call teardown functions and signals.
Typically, this is not used directly. Instead, use a ``with`` block
to manage the context.
This context must currently be the active context, otherwise a
:exc:`RuntimeError` is raised. In some situations, such as streaming or
testing, the context may have been pushed multiple times. It will only
trigger cleanup once it has been popped as many times as it was pushed.
Until then, it will remain the active context.
:param exc: An unhandled exception that was raised while the context was
active. Passed to teardown functions.
.. versionchanged:: 0.9
Added the ``exc`` argument.
|
python
|
src/flask/ctx.py
| 432
|
[
"self",
"exc"
] |
None
| true
| 7
| 6.88
|
pallets/flask
| 70,946
|
sphinx
| false
|
from
|
static <T> InstanceSupplier<T> from(@Nullable Supplier<T> supplier) {
return (registry) -> (supplier != null) ? supplier.get() : null;
}
|
Factory method that can be used to create an {@link InstanceSupplier} from a
{@link Supplier}.
@param <T> the instance type
@param supplier the supplier that will provide the instance
@return a new {@link InstanceSupplier}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/bootstrap/BootstrapRegistry.java
| 159
|
[
"supplier"
] | true
| 2
| 7.68
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
processApplicationEvents
|
private void processApplicationEvents() {
LinkedList<ApplicationEvent> events = new LinkedList<>();
applicationEventQueue.drainTo(events);
if (events.isEmpty())
return;
asyncConsumerMetrics.recordApplicationEventQueueSize(0);
long startMs = time.milliseconds();
for (ApplicationEvent event : events) {
asyncConsumerMetrics.recordApplicationEventQueueTime(time.milliseconds() - event.enqueuedMs());
try {
if (event instanceof CompletableEvent) {
applicationEventReaper.add((CompletableEvent<?>) event);
}
// Check if there are any metadata errors and fail the event if an error is present.
// This call is meant to handle "immediately completed events" which may not enter the
// awaiting state, so metadata errors need to be checked and handled right away.
if (event instanceof MetadataErrorNotifiableEvent) {
if (maybeFailOnMetadataError(List.of(event)))
continue;
}
applicationEventProcessor.process(event);
} catch (Throwable t) {
log.warn("Error processing event {}", t.getMessage(), t);
}
}
asyncConsumerMetrics.recordApplicationEventQueueProcessingTime(time.milliseconds() - startMs);
}
|
Process the events-if any-that were produced by the application thread.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java
| 247
|
[] |
void
| true
| 6
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
abs
|
def abs(self) -> Self:
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta("1 days")])
>>> s.abs()
0 1 days
dtype: timedelta64[us]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame(
... {"a": [4, 5, 6, 7], "b": [10, 20, 30, 40], "c": [100, 50, -30, -50]}
... )
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
res_mgr = self._mgr.apply(np.abs)
return self._constructor_from_mgr(res_mgr, axes=res_mgr.axes).__finalize__(
self, name="abs"
)
|
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta("1 days")])
>>> s.abs()
0 1 days
dtype: timedelta64[us]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame(
... {"a": [4, 5, 6, 7], "b": [10, 20, 30, 40], "c": [100, 50, -30, -50]}
... )
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
|
python
|
pandas/core/generic.py
| 1,522
|
[
"self"
] |
Self
| true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
add
|
void add(DissectKey key, String value) {
matches++;
if (key.skip()) {
return;
}
switch (key.getModifier()) {
case NONE -> simpleResults.put(key.getName(), value);
case APPEND -> appendResults.computeIfAbsent(key.getName(), k -> new AppendResult(appendSeparator))
.addValue(value, implicitAppendOrder++);
case APPEND_WITH_ORDER -> appendResults.computeIfAbsent(key.getName(), k -> new AppendResult(appendSeparator))
.addValue(value, key.getAppendPosition());
case FIELD_NAME -> referenceResults.computeIfAbsent(key.getName(), k -> new ReferenceResult()).setKey(value);
case FIELD_VALUE -> referenceResults.computeIfAbsent(key.getName(), k -> new ReferenceResult()).setValue(value);
}
}
|
Add the key/value that was found as result of the parsing
@param key the {@link DissectKey}
@param value the discovered value for the key
|
java
|
libs/dissect/src/main/java/org/elasticsearch/dissect/DissectMatch.java
| 58
|
[
"key",
"value"
] |
void
| true
| 2
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
getIndentationForNodeWorker
|
function getIndentationForNodeWorker(
current: Node,
currentStart: LineAndCharacter,
ignoreActualIndentationRange: TextRange | undefined,
indentationDelta: number,
sourceFile: SourceFile,
isNextChild: boolean,
options: EditorSettings,
): number {
let parent = current.parent;
// Walk up the tree and collect indentation for parent-child node pairs. Indentation is not added if
// * parent and child nodes start on the same line, or
// * parent is an IfStatement and child starts on the same line as an 'else clause'.
while (parent) {
let useActualIndentation = true;
if (ignoreActualIndentationRange) {
const start = current.getStart(sourceFile);
useActualIndentation = start < ignoreActualIndentationRange.pos || start > ignoreActualIndentationRange.end;
}
const containingListOrParentStart = getContainingListOrParentStart(parent, current, sourceFile);
const parentAndChildShareLine = containingListOrParentStart.line === currentStart.line ||
childStartsOnTheSameLineWithElseInIfStatement(parent, current, currentStart.line, sourceFile);
if (useActualIndentation) {
// check if current node is a list item - if yes, take indentation from it
const firstListChild = getContainingList(current, sourceFile)?.[0];
// A list indents its children if the children begin on a later line than the list itself:
//
// f1( L0 - List start
// { L1 - First child start: indented, along with all other children
// prop: 0
// },
// {
// prop: 1
// }
// )
//
// f2({ L0 - List start and first child start: children are not indented.
// prop: 0 Object properties are indented only one level, because the list
// }, { itself contributes nothing.
// prop: 1 L3 - The indentation of the second object literal is best understood by
// }) looking at the relationship between the list and *first* list item.
const listIndentsChild = !!firstListChild && getStartLineAndCharacterForNode(firstListChild, sourceFile).line > containingListOrParentStart.line;
let actualIndentation = getActualIndentationForListItem(current, sourceFile, options, listIndentsChild);
if (actualIndentation !== Value.Unknown) {
return actualIndentation + indentationDelta;
}
// try to fetch actual indentation for current node from source text
actualIndentation = getActualIndentationForNode(current, parent, currentStart, parentAndChildShareLine, sourceFile, options);
if (actualIndentation !== Value.Unknown) {
return actualIndentation + indentationDelta;
}
}
// increase indentation if parent node wants its content to be indented and parent and child nodes don't start on the same line
if (shouldIndentChildNode(options, parent, current, sourceFile, isNextChild) && !parentAndChildShareLine) {
indentationDelta += options.indentSize!;
}
// In our AST, a call argument's `parent` is the call-expression, not the argument list.
// We would like to increase indentation based on the relationship between an argument and its argument-list,
// so we spoof the starting position of the (parent) call-expression to match the (non-parent) argument-list.
// But, the spoofed start-value could then cause a problem when comparing the start position of the call-expression
// to *its* parent (in the case of an iife, an expression statement), adding an extra level of indentation.
//
// Instead, when at an argument, we unspoof the starting position of the enclosing call expression
// *after* applying indentation for the argument.
const useTrueStart = isArgumentAndStartLineOverlapsExpressionBeingCalled(parent, current, currentStart.line, sourceFile);
current = parent;
parent = current.parent;
currentStart = useTrueStart ? sourceFile.getLineAndCharacterOfPosition(current.getStart(sourceFile)) : containingListOrParentStart;
}
return indentationDelta + getBaseIndentation(options);
}
|
@param assumeNewLineBeforeCloseBrace
`false` when called on text from a real source file.
`true` when we need to assume `position` is on a newline.
This is useful for codefixes. Consider
```
function f() {
|}
```
with `position` at `|`.
When inserting some text after an open brace, we would like to get indentation as if a newline was already there.
By default indentation at `position` will be 0 so 'assumeNewLineBeforeCloseBrace' overrides this behavior.
|
typescript
|
src/services/formatting/smartIndenter.ts
| 240
|
[
"current",
"currentStart",
"ignoreActualIndentationRange",
"indentationDelta",
"sourceFile",
"isNextChild",
"options"
] | true
| 12
| 8.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
withJsonResource
|
public ConfigurationMetadataRepositoryJsonBuilder withJsonResource(InputStream inputStream, Charset charset)
throws IOException {
if (inputStream == null) {
throw new IllegalArgumentException("InputStream must not be null.");
}
this.repositories.add(add(inputStream, charset));
return this;
}
|
Add the content of a {@link ConfigurationMetadataRepository} defined by the
specified {@link InputStream} JSON document using the specified {@link Charset}. If
this metadata repository holds items that were loaded previously, these are
ignored.
<p>
Leaves the stream open when done.
@param inputStream the source input stream
@param charset the charset of the input
@return this builder
@throws IOException in case of I/O errors
|
java
|
configuration-metadata/spring-boot-configuration-metadata/src/main/java/org/springframework/boot/configurationmetadata/ConfigurationMetadataRepositoryJsonBuilder.java
| 72
|
[
"inputStream",
"charset"
] |
ConfigurationMetadataRepositoryJsonBuilder
| true
| 2
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_concatenate_shapes
|
def _concatenate_shapes(shapes, axis):
"""Given array shapes, return the resulting shape and slices prefixes.
These help in nested concatenation.
Returns
-------
shape: tuple of int
This tuple satisfies::
shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
shape == concatenate(arrs, axis).shape
slice_prefixes: tuple of (slice(start, end), )
For a list of arrays being concatenated, this returns the slice
in the larger array at axis that needs to be sliced into.
For example, the following holds::
ret = concatenate([a, b, c], axis)
_, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
ret[(slice(None),) * axis + sl_a] == a
ret[(slice(None),) * axis + sl_b] == b
ret[(slice(None),) * axis + sl_c] == c
These are called slice prefixes since they are used in the recursive
blocking algorithm to compute the left-most slices during the
recursion. Therefore, they must be prepended to rest of the slice
that was computed deeper in the recursion.
These are returned as tuples to ensure that they can quickly be added
to existing slice tuple without creating a new tuple every time.
"""
# Cache a result that will be reused.
shape_at_axis = [shape[axis] for shape in shapes]
# Take a shape, any shape
first_shape = shapes[0]
first_shape_pre = first_shape[:axis]
first_shape_post = first_shape[axis + 1:]
if any(shape[:axis] != first_shape_pre or
shape[axis + 1:] != first_shape_post for shape in shapes):
raise ValueError(
f'Mismatched array shapes in block along axis {axis}.')
shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis + 1:])
offsets_at_axis = _accumulate(shape_at_axis)
slice_prefixes = [(slice(start, end),)
for start, end in zip([0] + offsets_at_axis,
offsets_at_axis)]
return shape, slice_prefixes
|
Given array shapes, return the resulting shape and slices prefixes.
These help in nested concatenation.
Returns
-------
shape: tuple of int
This tuple satisfies::
shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
shape == concatenate(arrs, axis).shape
slice_prefixes: tuple of (slice(start, end), )
For a list of arrays being concatenated, this returns the slice
in the larger array at axis that needs to be sliced into.
For example, the following holds::
ret = concatenate([a, b, c], axis)
_, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
ret[(slice(None),) * axis + sl_a] == a
ret[(slice(None),) * axis + sl_b] == b
ret[(slice(None),) * axis + sl_c] == c
These are called slice prefixes since they are used in the recursive
blocking algorithm to compute the left-most slices during the
recursion. Therefore, they must be prepended to rest of the slice
that was computed deeper in the recursion.
These are returned as tuples to ensure that they can quickly be added
to existing slice tuple without creating a new tuple every time.
|
python
|
numpy/_core/shape_base.py
| 638
|
[
"shapes",
"axis"
] | false
| 3
| 6.08
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
toString
|
@Override
public String toString() {
return "ConfigurableEnvironmentPropertySource {propertySources=" + super.source.getPropertySources() + "}";
}
|
Convert the supplied value to a {@link String} using the {@link ConversionService}
from the {@link Environment}.
<p>This is a modified version of
{@link org.springframework.core.env.AbstractPropertyResolver#convertValueIfNecessary(Object, Class)}.
@param value the value to convert
@return the converted value, or the original value if no conversion is necessary
@since 6.2.8
|
java
|
spring-context/src/main/java/org/springframework/context/support/PropertySourcesPlaceholderConfigurer.java
| 271
|
[] |
String
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
hashCode
|
@Override
public int hashCode() {
E e = getElement();
return ((e == null) ? 0 : e.hashCode()) ^ getCount();
}
|
Return this entry's hash code, following the behavior specified in {@link
Multiset.Entry#hashCode}.
|
java
|
android/guava/src/com/google/common/collect/Multisets.java
| 845
|
[] | true
| 2
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
failableStream
|
public static <T> FailableStream<T> failableStream(final T value) {
return failableStream(streamOf(value));
}
|
Shorthand for {@code Streams.failableStream(value == null ? Stream.empty() : Stream.of(value))}.
@param <T> the type of stream elements.
@param value the single element of the new stream, may be {@code null}.
@return the new FailableStream on {@code value} or an empty stream.
@since 3.15.0
|
java
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
| 577
|
[
"value"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
truePredicate
|
@SuppressWarnings("unchecked")
static <T, E extends Throwable> FailablePredicate<T, E> truePredicate() {
return TRUE;
}
|
Gets the TRUE singleton.
@param <T> Predicate type.
@param <E> The kind of thrown exception or error.
@return The NOP singleton.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailablePredicate.java
| 60
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
of
|
@Contract("_, false -> !null")
static @Nullable ConfigurationPropertyName of(@Nullable CharSequence name, boolean returnNullIfInvalid) {
Elements elements = elementsOf(name, returnNullIfInvalid, ElementsParser.DEFAULT_CAPACITY);
return (elements != null) ? new ConfigurationPropertyName(elements) : null;
}
|
Return a {@link ConfigurationPropertyName} for the specified string.
@param name the source name
@param returnNullIfInvalid if null should be returned if the name is not valid
@return a {@link ConfigurationPropertyName} instance
@throws InvalidConfigurationPropertyNameException if the name is not valid and
{@code returnNullIfInvalid} is {@code false}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 662
|
[
"name",
"returnNullIfInvalid"
] |
ConfigurationPropertyName
| true
| 2
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
right_shift
|
def right_shift(a, n):
"""
Shift the bits of an integer to the right.
This is the masked array version of `numpy.right_shift`, for details
see that function.
See Also
--------
numpy.right_shift
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> x = [11, 3, 8, 1]
>>> mask = [0, 0, 0, 1]
>>> masked_x = ma.masked_array(x, mask)
>>> masked_x
masked_array(data=[11, 3, 8, --],
mask=[False, False, False, True],
fill_value=999999)
>>> ma.right_shift(masked_x,1)
masked_array(data=[5, 1, 4, --],
mask=[False, False, False, True],
fill_value=999999)
"""
m = getmask(a)
if m is nomask:
d = umath.right_shift(filled(a), n)
return masked_array(d)
else:
d = umath.right_shift(filled(a, 0), n)
return masked_array(d, mask=m)
|
Shift the bits of an integer to the right.
This is the masked array version of `numpy.right_shift`, for details
see that function.
See Also
--------
numpy.right_shift
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> x = [11, 3, 8, 1]
>>> mask = [0, 0, 0, 1]
>>> masked_x = ma.masked_array(x, mask)
>>> masked_x
masked_array(data=[11, 3, 8, --],
mask=[False, False, False, True],
fill_value=999999)
>>> ma.right_shift(masked_x,1)
masked_array(data=[5, 1, 4, --],
mask=[False, False, False, True],
fill_value=999999)
|
python
|
numpy/ma/core.py
| 7,457
|
[
"a",
"n"
] | false
| 3
| 6.48
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
findFactoryMethod
|
private static @Nullable Method findFactoryMethod(ConfigurableListableBeanFactory beanFactory, String beanName) {
if (beanFactory.containsBeanDefinition(beanName)) {
BeanDefinition beanDefinition = beanFactory.getMergedBeanDefinition(beanName);
if (beanDefinition instanceof RootBeanDefinition rootBeanDefinition) {
return rootBeanDefinition.getResolvedFactoryMethod();
}
}
return null;
}
|
Return a {@link ConfigurationPropertiesBean @ConfigurationPropertiesBean} instance
for the given bean details or {@code null} if the bean is not a
{@link ConfigurationProperties @ConfigurationProperties} object. Annotations are
considered both on the bean itself, as well as any factory method (for example a
{@link Bean @Bean} method).
@param applicationContext the source application context
@param bean the bean to consider
@param beanName the bean name
@return a configuration properties bean or {@code null} if the neither the bean nor
factory method are annotated with
{@link ConfigurationProperties @ConfigurationProperties}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/ConfigurationPropertiesBean.java
| 232
|
[
"beanFactory",
"beanName"
] |
Method
| true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_maybe_get_mask
|
def _maybe_get_mask(
values: np.ndarray, skipna: bool, mask: npt.NDArray[np.bool_] | None
) -> npt.NDArray[np.bool_] | None:
"""
Compute a mask if and only if necessary.
This function will compute a mask iff it is necessary. Otherwise,
return the provided mask (potentially None) when a mask does not need to be
computed.
A mask is never necessary if the values array is of boolean or integer
dtypes, as these are incapable of storing NaNs. If passing a NaN-capable
dtype that is interpretable as either boolean or integer data (eg,
timedelta64), a mask must be provided.
If the skipna parameter is False, a new mask will not be computed.
The mask is computed using isna() by default. Setting invert=True selects
notna() as the masking function.
Parameters
----------
values : ndarray
input array to potentially compute mask for
skipna : bool
boolean for whether NaNs should be skipped
mask : Optional[ndarray]
nan-mask if known
Returns
-------
Optional[np.ndarray[bool]]
"""
if mask is None:
if values.dtype.kind in "biu":
# Boolean data cannot contain nulls, so signal via mask being None
return None
if skipna or values.dtype.kind in "mM":
mask = isna(values)
return mask
|
Compute a mask if and only if necessary.
This function will compute a mask iff it is necessary. Otherwise,
return the provided mask (potentially None) when a mask does not need to be
computed.
A mask is never necessary if the values array is of boolean or integer
dtypes, as these are incapable of storing NaNs. If passing a NaN-capable
dtype that is interpretable as either boolean or integer data (eg,
timedelta64), a mask must be provided.
If the skipna parameter is False, a new mask will not be computed.
The mask is computed using isna() by default. Setting invert=True selects
notna() as the masking function.
Parameters
----------
values : ndarray
input array to potentially compute mask for
skipna : bool
boolean for whether NaNs should be skipped
mask : Optional[ndarray]
nan-mask if known
Returns
-------
Optional[np.ndarray[bool]]
|
python
|
pandas/core/nanops.py
| 211
|
[
"values",
"skipna",
"mask"
] |
npt.NDArray[np.bool_] | None
| true
| 5
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_list_all
|
def _list_all(self, api_call: Callable, response_key: str, verbose: bool) -> list:
"""
Repeatedly call a provided boto3 API Callable and collates the responses into a List.
:param api_call: The api command to execute.
:param response_key: Which dict key to collect into the final list.
:param verbose: Provides additional logging if set to True. Defaults to False.
:return: A List of the combined results of the provided API call.
"""
name_collection: list = []
token: str | None = DEFAULT_PAGINATION_TOKEN
while token is not None:
response = api_call(nextToken=token)
# If response list is not empty, append it to the running list.
name_collection += filter(None, response.get(response_key))
token = response.get("nextToken")
self.log.info("Retrieved list of %s %s.", len(name_collection), response_key)
if verbose:
self.log.info("%s found: %s", response_key.title(), name_collection)
return name_collection
|
Repeatedly call a provided boto3 API Callable and collates the responses into a List.
:param api_call: The api command to execute.
:param response_key: Which dict key to collect into the final list.
:param verbose: Provides additional logging if set to True. Defaults to False.
:return: A List of the combined results of the provided API call.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/eks.py
| 522
|
[
"self",
"api_call",
"response_key",
"verbose"
] |
list
| true
| 3
| 8.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
apply_replacements
|
def apply_replacements(replacements: dict[str, str], text: str) -> str:
"""
Applies the given replacements within the text.
Args:
replacements (dict): Mapping of str -> str replacements.
text (str): Text in which to make replacements.
Returns:
Text with replacements applied, if any.
"""
for before, after in replacements.items():
text = text.replace(before, after)
return text
|
Applies the given replacements within the text.
Args:
replacements (dict): Mapping of str -> str replacements.
text (str): Text in which to make replacements.
Returns:
Text with replacements applied, if any.
|
python
|
tools/setup_helpers/gen_version_header.py
| 36
|
[
"replacements",
"text"
] |
str
| true
| 2
| 8.08
|
pytorch/pytorch
| 96,034
|
google
| false
|
getValue
|
@Deprecated
@Override
public Short getValue() {
return Short.valueOf(this.value);
}
|
Gets the value as a Short instance.
@return the value as a Short, never null.
@deprecated Use {@link #get()}.
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableShort.java
| 259
|
[] |
Short
| true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
rank
|
def rank(
self,
method: WindowingRankType = "average",
ascending: bool = True,
pct: bool = False,
numeric_only: bool = False,
):
"""
Calculate the expanding rank.
Parameters
----------
method : {'average', 'min', 'max'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.rank : Aggregating rank for Series.
DataFrame.rank : Aggregating rank for DataFrame.
Examples
--------
>>> s = pd.Series([1, 4, 2, 3, 5, 3])
>>> s.expanding().rank()
0 1.0
1 2.0
2 2.0
3 3.0
4 5.0
5 3.5
dtype: float64
>>> s.expanding().rank(method="max")
0 1.0
1 2.0
2 2.0
3 3.0
4 5.0
5 4.0
dtype: float64
>>> s.expanding().rank(method="min")
0 1.0
1 2.0
2 2.0
3 3.0
4 5.0
5 3.0
dtype: float64
"""
return super().rank(
method=method,
ascending=ascending,
pct=pct,
numeric_only=numeric_only,
)
|
Calculate the expanding rank.
Parameters
----------
method : {'average', 'min', 'max'}, default 'average'
How to rank the group of records that have the same value (i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.rank : Aggregating rank for Series.
DataFrame.rank : Aggregating rank for DataFrame.
Examples
--------
>>> s = pd.Series([1, 4, 2, 3, 5, 3])
>>> s.expanding().rank()
0 1.0
1 2.0
2 2.0
3 3.0
4 5.0
5 3.5
dtype: float64
>>> s.expanding().rank(method="max")
0 1.0
1 2.0
2 2.0
3 3.0
4 5.0
5 4.0
dtype: float64
>>> s.expanding().rank(method="min")
0 1.0
1 2.0
2 2.0
3 3.0
4 5.0
5 3.0
dtype: float64
|
python
|
pandas/core/window/expanding.py
| 1,125
|
[
"self",
"method",
"ascending",
"pct",
"numeric_only"
] | true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
createDateTimeFormatter
|
public DateTimeFormatter createDateTimeFormatter(DateTimeFormatter fallbackFormatter) {
DateTimeFormatter dateTimeFormatter = null;
if (StringUtils.hasLength(this.pattern)) {
dateTimeFormatter = DateTimeFormatterUtils.createStrictDateTimeFormatter(this.pattern);
}
else if (this.iso != null && this.iso != ISO.NONE) {
dateTimeFormatter = switch (this.iso) {
case DATE -> DateTimeFormatter.ISO_DATE;
case TIME -> DateTimeFormatter.ISO_TIME;
case DATE_TIME -> DateTimeFormatter.ISO_DATE_TIME;
default -> throw new IllegalStateException("Unsupported ISO format: " + this.iso);
};
}
else if (this.dateStyle != null && this.timeStyle != null) {
dateTimeFormatter = DateTimeFormatter.ofLocalizedDateTime(this.dateStyle, this.timeStyle);
}
else if (this.dateStyle != null) {
dateTimeFormatter = DateTimeFormatter.ofLocalizedDate(this.dateStyle);
}
else if (this.timeStyle != null) {
dateTimeFormatter = DateTimeFormatter.ofLocalizedTime(this.timeStyle);
}
if (dateTimeFormatter != null && this.timeZone != null) {
dateTimeFormatter = dateTimeFormatter.withZone(this.timeZone.toZoneId());
}
return (dateTimeFormatter != null ? dateTimeFormatter : fallbackFormatter);
}
|
Create a new {@code DateTimeFormatter} using this factory.
<p>If no specific pattern or style has been defined,
the supplied {@code fallbackFormatter} will be used.
@param fallbackFormatter the fall-back formatter to use
when no specific factory properties have been set
@return a new date time formatter
|
java
|
spring-context/src/main/java/org/springframework/format/datetime/standard/DateTimeFormatterFactory.java
| 175
|
[
"fallbackFormatter"
] |
DateTimeFormatter
| true
| 11
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
broadcast
|
def broadcast(self, command, arguments=None, destination=None,
connection=None, reply=False, timeout=1.0, limit=None,
callback=None, channel=None, pattern=None, matcher=None,
**extra_kwargs):
"""Broadcast a control command to the celery workers.
Arguments:
command (str): Name of command to send.
arguments (Dict): Keyword arguments for the command.
destination (List): If set, a list of the hosts to send the
command to, when empty broadcast to all workers.
connection (kombu.Connection): Custom broker connection to use,
if not set, a connection will be acquired from the pool.
reply (bool): Wait for and return the reply.
timeout (float): Timeout in seconds to wait for the reply.
limit (int): Limit number of replies.
callback (Callable): Callback called immediately for
each reply received.
pattern (str): Custom pattern string to match
matcher (Callable): Custom matcher to run the pattern to match
"""
with self.app.connection_or_acquire(connection) as conn:
arguments = dict(arguments or {}, **extra_kwargs)
if pattern and matcher:
# tests pass easier without requiring pattern/matcher to
# always be sent in
return self.mailbox(conn)._broadcast(
command, arguments, destination, reply, timeout,
limit, callback, channel=channel,
pattern=pattern, matcher=matcher,
)
else:
return self.mailbox(conn)._broadcast(
command, arguments, destination, reply, timeout,
limit, callback, channel=channel,
)
|
Broadcast a control command to the celery workers.
Arguments:
command (str): Name of command to send.
arguments (Dict): Keyword arguments for the command.
destination (List): If set, a list of the hosts to send the
command to, when empty broadcast to all workers.
connection (kombu.Connection): Custom broker connection to use,
if not set, a connection will be acquired from the pool.
reply (bool): Wait for and return the reply.
timeout (float): Timeout in seconds to wait for the reply.
limit (int): Limit number of replies.
callback (Callable): Callback called immediately for
each reply received.
pattern (str): Custom pattern string to match
matcher (Callable): Custom matcher to run the pattern to match
|
python
|
celery/app/control.py
| 753
|
[
"self",
"command",
"arguments",
"destination",
"connection",
"reply",
"timeout",
"limit",
"callback",
"channel",
"pattern",
"matcher"
] | false
| 5
| 6.08
|
celery/celery
| 27,741
|
google
| false
|
|
fillna
|
def fillna(self, value):
"""
Fill NA/NaN values with the specified value.
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
Returns
-------
Index
NA/NaN values replaced with `value`.
See Also
--------
DataFrame.fillna : Fill NaN values of a DataFrame.
Series.fillna : Fill NaN Values of a Series.
Examples
--------
>>> idx = pd.Index([np.nan, np.nan, 3])
>>> idx.fillna(0)
Index([0.0, 0.0, 3.0], dtype='float64')
"""
if not is_scalar(value):
raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}")
if self.hasnans:
result = self.putmask(self._isnan, value)
# no need to care metadata other than name
# because it can't have freq if it has NaTs
# _with_infer needed for test_fillna_categorical
return Index._with_infer(result, name=self.name)
return self._view()
|
Fill NA/NaN values with the specified value.
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
Returns
-------
Index
NA/NaN values replaced with `value`.
See Also
--------
DataFrame.fillna : Fill NaN values of a DataFrame.
Series.fillna : Fill NaN Values of a Series.
Examples
--------
>>> idx = pd.Index([np.nan, np.nan, 3])
>>> idx.fillna(0)
Index([0.0, 0.0, 3.0], dtype='float64')
|
python
|
pandas/core/indexes/base.py
| 2,744
|
[
"self",
"value"
] | false
| 3
| 7.84
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
andBitCount
|
public static int andBitCount(byte[] a, byte[] b) {
if (a.length != b.length) {
throw new IllegalArgumentException("vector dimensions differ: " + a.length + "!=" + b.length);
}
try {
return (int) BIT_COUNT_MH.invokeExact(a, b);
} catch (Throwable e) {
if (e instanceof Error err) {
throw err;
} else if (e instanceof RuntimeException re) {
throw re;
} else {
throw new RuntimeException(e);
}
}
}
|
AND bit count computed over signed bytes.
Copied from Lucene's XOR implementation
@param a bytes containing a vector
@param b bytes containing another vector, of the same dimension
@return the value of the AND bit count of the two vectors
|
java
|
libs/simdvec/src/main/java/org/elasticsearch/simdvec/ESVectorUtil.java
| 128
|
[
"a",
"b"
] | true
| 5
| 8.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
resolveReturnTypeForFactoryMethod
|
public static Class<?> resolveReturnTypeForFactoryMethod(
Method method, @Nullable Object[] args, @Nullable ClassLoader classLoader) {
Assert.notNull(method, "Method must not be null");
Assert.notNull(args, "Argument array must not be null");
TypeVariable<Method>[] declaredTypeVariables = method.getTypeParameters();
Type genericReturnType = method.getGenericReturnType();
Type[] methodParameterTypes = method.getGenericParameterTypes();
Assert.isTrue(args.length == methodParameterTypes.length, "Argument array does not match parameter count");
// Ensure that the type variable (for example, T) is declared directly on the method
// itself (for example, via <T>), not on the enclosing class or interface.
boolean locallyDeclaredTypeVariableMatchesReturnType = false;
for (TypeVariable<Method> currentTypeVariable : declaredTypeVariables) {
if (currentTypeVariable.equals(genericReturnType)) {
locallyDeclaredTypeVariableMatchesReturnType = true;
break;
}
}
if (locallyDeclaredTypeVariableMatchesReturnType) {
for (int i = 0; i < methodParameterTypes.length; i++) {
Type methodParameterType = methodParameterTypes[i];
Object arg = args[i];
if (methodParameterType.equals(genericReturnType)) {
if (arg instanceof TypedStringValue typedValue) {
if (typedValue.hasTargetType()) {
return typedValue.getTargetType();
}
try {
Class<?> resolvedType = typedValue.resolveTargetType(classLoader);
if (resolvedType != null) {
return resolvedType;
}
}
catch (ClassNotFoundException ex) {
throw new IllegalStateException("Failed to resolve value type [" +
typedValue.getTargetTypeName() + "] for factory method argument", ex);
}
}
else if (arg != null && !(arg instanceof BeanMetadataElement)) {
// Only consider argument type if it is a simple value...
return arg.getClass();
}
return method.getReturnType();
}
else if (methodParameterType instanceof ParameterizedType parameterizedType) {
Type[] actualTypeArguments = parameterizedType.getActualTypeArguments();
for (Type typeArg : actualTypeArguments) {
if (typeArg.equals(genericReturnType)) {
if (arg instanceof Class<?> clazz) {
return clazz;
}
else {
String className = null;
if (arg instanceof String name) {
className = name;
}
else if (arg instanceof TypedStringValue typedValue) {
String targetTypeName = typedValue.getTargetTypeName();
if (targetTypeName == null || Class.class.getName().equals(targetTypeName)) {
className = typedValue.getValue();
}
}
if (className != null) {
try {
return ClassUtils.forName(className, classLoader);
}
catch (ClassNotFoundException ex) {
throw new IllegalStateException("Could not resolve class name [" + arg +
"] for factory method argument", ex);
}
}
// Consider adding logic to determine the class of the typeArg, if possible.
// For now, just fall back...
return method.getReturnType();
}
}
}
}
}
}
// Fall back...
return method.getReturnType();
}
|
Determine the target type for the generic return type of the given
<em>generic factory method</em>, where formal type variables are declared
on the given method itself.
<p>For example, given a factory method with the following signature, if
{@code resolveReturnTypeForFactoryMethod()} is invoked with the reflected
method for {@code createProxy()} and an {@code Object[]} array containing
{@code MyService.class}, {@code resolveReturnTypeForFactoryMethod()} will
infer that the target return type is {@code MyService}.
<pre class="code">{@code public static <T> T createProxy(Class<T> clazz)}</pre>
<h4>Possible Return Values</h4>
<ul>
<li>the target return type, if it can be inferred</li>
<li>the {@linkplain Method#getReturnType() standard return type}, if
the given {@code method} does not declare any {@linkplain
Method#getTypeParameters() formal type variables}</li>
<li>the {@linkplain Method#getReturnType() standard return type}, if the
target return type cannot be inferred (for example, due to type erasure)</li>
<li>{@code null}, if the length of the given arguments array is shorter
than the length of the {@linkplain
Method#getGenericParameterTypes() formal argument list} for the given
method</li>
</ul>
@param method the method to introspect (never {@code null})
@param args the arguments that will be supplied to the method when it is
invoked (never {@code null})
@param classLoader the ClassLoader to resolve class names against,
if necessary (never {@code null})
@return the resolved target return type or the standard method return type
@since 3.2.5
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AutowireUtils.java
| 178
|
[
"method",
"args",
"classLoader"
] | true
| 20
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
forLocation
|
public static JksSslStoreDetails forLocation(@Nullable String location) {
return new JksSslStoreDetails(null, null, location, null);
}
|
Factory method to create a new {@link JksSslStoreDetails} instance for the given
location.
@param location the location
@return a new {@link JksSslStoreDetails} instance.
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/jks/JksSslStoreDetails.java
| 64
|
[
"location"
] |
JksSslStoreDetails
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getOrCreate
|
JarFile getOrCreate(boolean useCaches, URL jarFileUrl) throws IOException {
if (useCaches) {
JarFile cached = getCached(jarFileUrl);
if (cached != null) {
return cached;
}
}
return this.factory.createJarFile(jarFileUrl, this::onClose);
}
|
Get an existing {@link JarFile} instance from the cache, or create a new
{@link JarFile} instance that can be {@link #cacheIfAbsent(boolean, URL, JarFile)
cached later}.
@param useCaches if caches can be used
@param jarFileUrl the jar file URL
@return a new or existing {@link JarFile} instance
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/UrlJarFiles.java
| 65
|
[
"useCaches",
"jarFileUrl"
] |
JarFile
| true
| 3
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
check_memory
|
def check_memory(memory):
"""Check that ``memory`` is joblib.Memory-like.
joblib.Memory-like means that ``memory`` can be converted into a
joblib.Memory instance (typically a str denoting the ``location``)
or has the same interface (has a ``cache`` method).
Parameters
----------
memory : None, str or object with the joblib.Memory interface
- If string, the location where to create the `joblib.Memory` interface.
- If None, no caching is done and the Memory object is completely transparent.
Returns
-------
memory : object with the joblib.Memory interface
A correct joblib.Memory object.
Raises
------
ValueError
If ``memory`` is not joblib.Memory-like.
Examples
--------
>>> from sklearn.utils.validation import check_memory
>>> check_memory("caching_dir")
Memory(location=caching_dir/joblib)
"""
if memory is None or isinstance(memory, str):
memory = joblib.Memory(location=memory, verbose=0)
elif not hasattr(memory, "cache"):
raise ValueError(
"'memory' should be None, a string or have the same"
" interface as joblib.Memory."
" Got memory='{}' instead.".format(memory)
)
return memory
|
Check that ``memory`` is joblib.Memory-like.
joblib.Memory-like means that ``memory`` can be converted into a
joblib.Memory instance (typically a str denoting the ``location``)
or has the same interface (has a ``cache`` method).
Parameters
----------
memory : None, str or object with the joblib.Memory interface
- If string, the location where to create the `joblib.Memory` interface.
- If None, no caching is done and the Memory object is completely transparent.
Returns
-------
memory : object with the joblib.Memory interface
A correct joblib.Memory object.
Raises
------
ValueError
If ``memory`` is not joblib.Memory-like.
Examples
--------
>>> from sklearn.utils.validation import check_memory
>>> check_memory("caching_dir")
Memory(location=caching_dir/joblib)
|
python
|
sklearn/utils/validation.py
| 405
|
[
"memory"
] | false
| 4
| 7.04
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
validate_periods
|
def validate_periods(periods: int | None) -> int | None:
"""
If a `periods` argument is passed to the Datetime/Timedelta Array/Index
constructor, cast it to an integer.
Parameters
----------
periods : None, int
Returns
-------
periods : None or int
Raises
------
TypeError
if periods is not None or int
"""
if periods is not None and not lib.is_integer(periods):
raise TypeError(f"periods must be an integer, got {periods}")
# error: Incompatible return value type (got "int | integer[Any] | None",
# expected "int | None")
return periods # type: ignore[return-value]
|
If a `periods` argument is passed to the Datetime/Timedelta Array/Index
constructor, cast it to an integer.
Parameters
----------
periods : None, int
Returns
-------
periods : None or int
Raises
------
TypeError
if periods is not None or int
|
python
|
pandas/core/arrays/datetimelike.py
| 2,669
|
[
"periods"
] |
int | None
| true
| 3
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
compose
|
default <V> FailableFunction<V, R, E> compose(final FailableFunction<? super V, ? extends T, E> before) {
Objects.requireNonNull(before);
return (final V v) -> apply(before.apply(v));
}
|
Returns a composed {@link FailableFunction} like {@link Function#compose(Function)}.
@param <V> the input type to the {@code before} function, and to the composed function.
@param before the operator to apply before this one.
@return a composed {@link FailableFunction} like {@link Function#compose(Function)}.
@throws NullPointerException if before is null.
@see #andThen(FailableFunction)
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableFunction.java
| 107
|
[
"before"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
error
|
public Errors error() {
return Errors.forCode(data.errorCode());
}
|
The number of each type of error in the response, including {@link Errors#NONE} and top-level errors as well as
more specifically scoped errors (such as topic or partition-level errors).
@return A count of errors.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsResponse.java
| 63
|
[] |
Errors
| true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
print_async_result_status
|
def print_async_result_status(completed_list: list[ApplyResult]) -> None:
"""
Print status of completed async results.
:param completed_list: list of completed async results.
"""
completed_list.sort(key=lambda x: x.get()[1])
get_console().print()
for result in completed_list:
return_code, info = result.get()
info = info.replace("[", "\\[")
if return_code != 0:
get_console().print(f"[error]NOK[/] for {info}: Return code: {return_code}.")
else:
get_console().print(f"[success]OK [/] for {info}.")
get_console().print()
|
Print status of completed async results.
:param completed_list: list of completed async results.
|
python
|
dev/breeze/src/airflow_breeze/utils/parallel.py
| 346
|
[
"completed_list"
] |
None
| true
| 4
| 6.56
|
apache/airflow
| 43,597
|
sphinx
| false
|
toString
|
@Override
public String toString() {
return getClass().getName() + ": patterns " + ObjectUtils.nullSafeToString(this.patterns) +
", excluded patterns " + ObjectUtils.nullSafeToString(this.excludedPatterns);
}
|
Does the exclusion pattern at the given index match the given String?
@param pattern the {@code String} pattern to match
@param patternIndex index of pattern (starting from 0)
@return {@code true} if there is a match, {@code false} otherwise
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/AbstractRegexpMethodPointcut.java
| 217
|
[] |
String
| true
| 1
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
sort
|
public static double[] sort(final double[] array) {
if (array != null) {
Arrays.sort(array);
}
return array;
}
|
Sorts the given array into ascending order and returns it.
@param array the array to sort (may be null).
@return the given array.
@see Arrays#sort(double[])
|
java
|
src/main/java/org/apache/commons/lang3/ArraySorter.java
| 65
|
[
"array"
] | true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
get
|
public static @Nullable LogFile get(PropertyResolver propertyResolver) {
String file = propertyResolver.getProperty(FILE_NAME_PROPERTY);
String path = propertyResolver.getProperty(FILE_PATH_PROPERTY);
if (StringUtils.hasLength(file) || StringUtils.hasLength(path)) {
return new LogFile(file, path);
}
return null;
}
|
Get a {@link LogFile} from the given Spring {@link Environment}.
@param propertyResolver the {@link PropertyResolver} used to obtain the logging
properties
@return a {@link LogFile} or {@code null} if the environment didn't contain any
suitable properties
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/LogFile.java
| 116
|
[
"propertyResolver"
] |
LogFile
| true
| 3
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
addNoMatchOutcomeToAncestors
|
private void addNoMatchOutcomeToAncestors(String source) {
String prefix = source + "$";
this.outcomes.forEach((candidateSource, sourceOutcomes) -> {
if (candidateSource.startsWith(prefix)) {
ConditionOutcome outcome = ConditionOutcome
.noMatch(ConditionMessage.forCondition("Ancestor " + source).because("did not match"));
sourceOutcomes.add(ANCESTOR_CONDITION, outcome);
}
});
}
|
Returns condition outcomes from this report, grouped by the source.
@return the condition outcomes
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionEvaluationReport.java
| 126
|
[
"source"
] |
void
| true
| 2
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getMainMethod
|
private static Method getMainMethod(Class<?> application) throws Exception {
try {
return application.getDeclaredMethod("main", String[].class);
}
catch (NoSuchMethodException ex) {
return application.getDeclaredMethod("main");
}
}
|
Create a new processor for the specified application and settings.
@param application the application main class
@param settings the general AOT processor settings
@param applicationArgs the arguments to provide to the main method
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplicationAotProcessor.java
| 73
|
[
"application"
] |
Method
| true
| 2
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_read_axes
|
def _read_axes(
self, where, start: int | None = None, stop: int | None = None
) -> list[tuple[np.ndarray, np.ndarray] | tuple[Index, Index]]:
"""
Create the axes sniffed from the table.
Parameters
----------
where : ???
start : int or None, default None
stop : int or None, default None
Returns
-------
List[Tuple[index_values, column_values]]
"""
# create the selection
selection = Selection(self, where=where, start=start, stop=stop)
values = selection.select()
results = []
# convert the data
for a in self.axes:
a.set_info(self.info)
res = a.convert(
values,
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors,
)
results.append(res)
return results
|
Create the axes sniffed from the table.
Parameters
----------
where : ???
start : int or None, default None
stop : int or None, default None
Returns
-------
List[Tuple[index_values, column_values]]
|
python
|
pandas/io/pytables.py
| 3,965
|
[
"self",
"where",
"start",
"stop"
] |
list[tuple[np.ndarray, np.ndarray] | tuple[Index, Index]]
| true
| 2
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
maybeAutoCommitOffsetsAsync
|
public void maybeAutoCommitOffsetsAsync(long now) {
if (autoCommitEnabled) {
nextAutoCommitTimer.update(now);
if (nextAutoCommitTimer.isExpired()) {
nextAutoCommitTimer.reset(autoCommitIntervalMs);
autoCommitOffsetsAsync();
}
}
}
|
Commit offsets synchronously. This method will retry until the commit completes successfully
or an unrecoverable error is encountered.
@param offsets The offsets to be committed
@throws org.apache.kafka.common.errors.AuthorizationException if the consumer is not authorized to the group
or to any of the specified partitions. See the exception for more details
@throws CommitFailedException if an unrecoverable error occurs before the commit can be completed
@throws FencedInstanceIdException if a static member gets fenced
@return If the offset commit was successfully sent and a successful response was received from
the coordinator
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java
| 1,198
|
[
"now"
] |
void
| true
| 3
| 7.44
|
apache/kafka
| 31,560
|
javadoc
| false
|
isSuperContainer
|
function isSuperContainer(node: Node) {
const kind = node.kind;
return kind === SyntaxKind.ClassDeclaration
|| kind === SyntaxKind.Constructor
|| kind === SyntaxKind.MethodDeclaration
|| kind === SyntaxKind.GetAccessor
|| kind === SyntaxKind.SetAccessor;
}
|
Hooks node substitutions.
@param hint The context for the emitter.
@param node The node to substitute.
|
typescript
|
src/compiler/transformers/es2018.ts
| 1,452
|
[
"node"
] | false
| 5
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
flattenDepth
|
function flattenDepth(array, depth) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
depth = depth === undefined ? 1 : toInteger(depth);
return baseFlatten(array, depth);
}
|
Recursively flatten `array` up to `depth` times.
@static
@memberOf _
@since 4.4.0
@category Array
@param {Array} array The array to flatten.
@param {number} [depth=1] The maximum recursion depth.
@returns {Array} Returns the new flattened array.
@example
var array = [1, [2, [3, [4]], 5]];
_.flattenDepth(array, 1);
// => [1, 2, [3, [4]], 5]
_.flattenDepth(array, 2);
// => [1, 2, 3, [4], 5]
|
javascript
|
lodash.js
| 7,472
|
[
"array",
"depth"
] | false
| 4
| 7.68
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
getActualIndentationForListItem
|
function getActualIndentationForListItem(node: Node, sourceFile: SourceFile, options: EditorSettings, listIndentsChild: boolean): number {
if (node.parent && node.parent.kind === SyntaxKind.VariableDeclarationList) {
// VariableDeclarationList has no wrapping tokens
return Value.Unknown;
}
const containingList = getContainingList(node, sourceFile);
if (containingList) {
const index = containingList.indexOf(node);
if (index !== -1) {
const result = deriveActualIndentationFromList(containingList, index, sourceFile, options);
if (result !== Value.Unknown) {
return result;
}
}
return getActualIndentationForListStartLine(containingList, sourceFile, options) + (listIndentsChild ? options.indentSize! : 0); // TODO: GH#18217
}
return Value.Unknown;
}
|
@param assumeNewLineBeforeCloseBrace
`false` when called on text from a real source file.
`true` when we need to assume `position` is on a newline.
This is useful for codefixes. Consider
```
function f() {
|}
```
with `position` at `|`.
When inserting some text after an open brace, we would like to get indentation as if a newline was already there.
By default indentation at `position` will be 0 so 'assumeNewLineBeforeCloseBrace' overrides this behavior.
|
typescript
|
src/services/formatting/smartIndenter.ts
| 552
|
[
"node",
"sourceFile",
"options",
"listIndentsChild"
] | true
| 7
| 8.48
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
delete
|
def delete(key: str, team_name: str | None = None, session: Session | None = None) -> int:
"""
Delete an Airflow Variable for a given key.
:param key: Variable Keys
:param team_name: Team name associated to the task trying to delete the variable (if any)
:param session: optional session, use if provided or create a new one
"""
# TODO: This is not the best way of having compat, but it's "better than erroring" for now. This still
# means SQLA etc is loaded, but we can't avoid that unless/until we add import shims as a big
# back-compat layer
# If this is set it means are in some kind of execution context (Task, Dag Parse or Triggerer perhaps)
# and should use the Task SDK API server path
if hasattr(sys.modules.get("airflow.sdk.execution_time.task_runner"), "SUPERVISOR_COMMS"):
warnings.warn(
"Using Variable.delete from `airflow.models` is deprecated."
"Please use `delete` on Variable from sdk(`airflow.sdk.Variable`) instead",
DeprecationWarning,
stacklevel=1,
)
from airflow.sdk import Variable as TaskSDKVariable
TaskSDKVariable.delete(
key=key,
)
return 1
if team_name and not conf.getboolean("core", "multi_team"):
raise ValueError(
"Multi-team mode is not configured in the Airflow environment but the task trying to delete the variable belongs to a team"
)
ctx: contextlib.AbstractContextManager
if session is not None:
ctx = contextlib.nullcontext(session)
else:
ctx = create_session()
with ctx as session:
result = session.execute(
delete(Variable).where(
Variable.key == key, or_(Variable.team_name == team_name, Variable.team_name.is_(None))
)
)
rows = getattr(result, "rowcount", 0) or 0
SecretCache.invalidate_variable(key)
return rows
|
Delete an Airflow Variable for a given key.
:param key: Variable Keys
:param team_name: Team name associated to the task trying to delete the variable (if any)
:param session: optional session, use if provided or create a new one
|
python
|
airflow-core/src/airflow/models/variable.py
| 398
|
[
"key",
"team_name",
"session"
] |
int
| true
| 7
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
apply
|
public static <T, R> R apply(final Function<T, R> function, final T object) {
return function != null ? function.apply(object) : null;
}
|
Applies the {@link Function} on the object if the function is not {@code null}. Otherwise, does nothing and returns {@code null}.
@param function the function to apply.
@param object the object to apply the function.
@param <T> the type of the argument the function applies.
@param <R> the type of the result the function returns.
@return the value the function returns if the function is not {@code null}; {@code null} otherwise.
@since 3.15.0
|
java
|
src/main/java/org/apache/commons/lang3/function/Functions.java
| 41
|
[
"function",
"object"
] |
R
| true
| 2
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
constrainToRange
|
public static double constrainToRange(double value, double min, double max) {
// avoid auto-boxing by not using Preconditions.checkArgument(); see Guava issue 3984
// Reject NaN by testing for the good case (min <= max) instead of the bad (min > max).
if (min <= max) {
return Math.min(Math.max(value, min), max);
}
throw new IllegalArgumentException(
lenientFormat("min (%s) must be less than or equal to max (%s)", min, max));
}
|
Returns the value nearest to {@code value} which is within the closed range {@code [min..max]}.
<p>If {@code value} is within the range {@code [min..max]}, {@code value} is returned
unchanged. If {@code value} is less than {@code min}, {@code min} is returned, and if {@code
value} is greater than {@code max}, {@code max} is returned.
<p><b>Java 21+ users:</b> Use {@code Math.clamp} instead.
@param value the {@code double} value to constrain
@param min the lower bound (inclusive) of the range to constrain {@code value} to
@param max the upper bound (inclusive) of the range to constrain {@code value} to
@throws IllegalArgumentException if {@code min > max}
@since 21.0
|
java
|
android/guava/src/com/google/common/primitives/Doubles.java
| 255
|
[
"value",
"min",
"max"
] | true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
shapeValue
|
function shapeValue(value: unknown): unknown {
if (value === true || !isObject(value)) return value
// Check if it's a nested query (has arguments or selection keys)
if ('arguments' in value || 'selection' in value) {
const args = value.arguments as Obj | undefined
const selection = value.selection as Obj | undefined
// Can simplify to true if args empty and selection is simple
if (isEmpty(args) && isSimpleSelection(selection)) {
return true
}
return shapeQuery(value)
}
// It's a simple selection object (e.g., { $scalars: true })
return isSimpleSelection(value) ? true : shapeQuery({ selection: value })
}
|
Shapes a value that could be a nested query, a simple selection, or a boolean.
|
typescript
|
packages/sqlcommenter-query-insights/src/shape/shape.ts
| 33
|
[
"value"
] | true
| 8
| 6
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
parseBooleanLenient
|
@SuppressForbidden(reason = "allow lenient parsing of booleans")
public static boolean parseBooleanLenient(String value, boolean defaultValue) {
if (value == null) {
return defaultValue;
}
return Boolean.parseBoolean(value);
}
|
Wrapper around Boolean.parseBoolean for lenient parsing of booleans.
Note: Lenient parsing is highly discouraged and should only be used if absolutely necessary.
|
java
|
libs/core/src/main/java/org/elasticsearch/core/Booleans.java
| 104
|
[
"value",
"defaultValue"
] | true
| 2
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
read_table
|
def read_table(
self,
table_name: str,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
columns=None,
schema: str | None = None,
chunksize: int | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL database table into a DataFrame.
Parameters
----------
table_name : str
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
dtype_backend : {'numpy_nullable', 'pyarrow'}
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). If not specified, the default behavior
is to not use nullable data types. If specified, the behavior
is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
* ``"pyarrow"``: returns pyarrow-backed nullable
:class:`ArrowDtype` :class:`DataFrame`
.. versionadded:: 2.0
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
self.meta.reflect(bind=self.con, only=[table_name], views=True)
table = SQLTable(table_name, self, index=index_col, schema=schema)
if chunksize is not None:
self.returns_generator = True
return table.read(
self.exit_stack,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
dtype_backend=dtype_backend,
)
|
Read SQL database table into a DataFrame.
Parameters
----------
table_name : str
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
dtype_backend : {'numpy_nullable', 'pyarrow'}
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). If not specified, the default behavior
is to not use nullable data types. If specified, the behavior
is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
* ``"pyarrow"``: returns pyarrow-backed nullable
:class:`ArrowDtype` :class:`DataFrame`
.. versionadded:: 2.0
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query
|
python
|
pandas/io/sql.py
| 1,683
|
[
"self",
"table_name",
"index_col",
"coerce_float",
"parse_dates",
"columns",
"schema",
"chunksize",
"dtype_backend"
] |
DataFrame | Iterator[DataFrame]
| true
| 2
| 6.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getResolvedFactoryMethod
|
public @Nullable Method getResolvedFactoryMethod() {
Method factoryMethod = this.factoryMethodToIntrospect;
if (factoryMethod == null &&
getInstanceSupplier() instanceof InstanceSupplier<?> instanceSupplier) {
factoryMethod = instanceSupplier.getFactoryMethod();
}
return factoryMethod;
}
|
Return the resolved factory method as a Java Method object, if available.
@return the factory method, or {@code null} if not found or not resolved yet
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/RootBeanDefinition.java
| 435
|
[] |
Method
| true
| 3
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
toString
|
@Deprecated
@InlineMe(
replacement = "Files.asCharSource(file, charset).read()",
imports = "com.google.common.io.Files")
public static String toString(File file, Charset charset) throws IOException {
return asCharSource(file, charset).read();
}
|
Reads all characters from a file into a {@link String}, using the given character set.
@param file the file to read from
@param charset the charset used to decode the input stream; see {@link StandardCharsets} for
helpful predefined constants
@return a string containing all the characters from the file
@throws IOException if an I/O error occurs
@deprecated Prefer {@code asCharSource(file, charset).read()}.
|
java
|
android/guava/src/com/google/common/io/Files.java
| 250
|
[
"file",
"charset"
] |
String
| true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
_clean_args
|
def _clean_args(*args):
"""
Helper function for delegating arguments to Python string
functions.
Many of the Python string operations that have optional arguments
do not use 'None' to indicate a default value. In these cases,
we need to remove all None arguments, and those following them.
"""
newargs = []
for chk in args:
if chk is None:
break
newargs.append(chk)
return newargs
|
Helper function for delegating arguments to Python string
functions.
Many of the Python string operations that have optional arguments
do not use 'None' to indicate a default value. In these cases,
we need to remove all None arguments, and those following them.
|
python
|
numpy/_core/strings.py
| 127
|
[] | false
| 3
| 6.24
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
open_maybe_zipped
|
def open_maybe_zipped(fileloc, mode="r"):
"""
Open the given file.
If the path contains a folder with a .zip suffix, then the folder
is treated as a zip archive, opening the file inside the archive.
:return: a file object, as in `open`, or as in `ZipFile.open`.
"""
_, archive, filename = ZIP_REGEX.search(fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return TextIOWrapper(zipfile.ZipFile(archive, mode=mode).open(filename))
return open(fileloc, mode=mode)
|
Open the given file.
If the path contains a folder with a .zip suffix, then the folder
is treated as a zip archive, opening the file inside the archive.
:return: a file object, as in `open`, or as in `ZipFile.open`.
|
python
|
airflow-core/src/airflow/utils/file.py
| 152
|
[
"fileloc",
"mode"
] | false
| 3
| 6.08
|
apache/airflow
| 43,597
|
unknown
| false
|
|
iterrows
|
def iterrows(self) -> Iterable[tuple[Hashable, Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
See Also
--------
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames).
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
Examples
--------
>>> df = pd.DataFrame([[1, 1.5]], columns=["int", "float"])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row["int"].dtype)
float64
>>> print(df["int"].dtype)
int64
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values, strict=True):
s = klass(v, index=columns, name=k).__finalize__(self)
if self._mgr.is_single_block:
s._mgr.add_references(self._mgr)
yield k, s
|
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
See Also
--------
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames).
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
Examples
--------
>>> df = pd.DataFrame([[1, 1.5]], columns=["int", "float"])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row["int"].dtype)
float64
>>> print(df["int"].dtype)
int64
|
python
|
pandas/core/frame.py
| 1,545
|
[
"self"
] |
Iterable[tuple[Hashable, Series]]
| true
| 3
| 8.48
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
moveToEnd
|
static void moveToEnd(ConfigurableEnvironment environment) {
MutablePropertySources propertySources = environment.getPropertySources();
PropertySource<?> propertySource = propertySources.remove(NAME);
if (propertySource != null) {
propertySources.addLast(propertySource);
}
}
|
Moves the {@link ApplicationInfoPropertySource} to the end of the environment's
property sources.
@param environment the environment
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ApplicationInfoPropertySource.java
| 78
|
[
"environment"
] |
void
| true
| 2
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
rejectRecordBatch
|
private <K, V> Set<Long> rejectRecordBatch(final ShareInFlightBatch<K, V> inFlightBatch,
final RecordBatch currentBatch) {
// Rewind the acquiredRecordIterator to the start, so we are in a known state
acquiredRecordIterator = acquiredRecordList.listIterator();
OffsetAndDeliveryCount nextAcquired = nextAcquiredRecord();
Set<Long> offsets = new HashSet<>();
for (long offset = currentBatch.baseOffset(); offset <= currentBatch.lastOffset(); offset++) {
if (nextAcquired == null) {
// No more acquired records, so we are done
break;
} else if (offset == nextAcquired.offset) {
// It's acquired, so we reject it
inFlightBatch.addAcknowledgement(offset, AcknowledgeType.REJECT);
offsets.add(offset);
} else if (offset < nextAcquired.offset) {
// It's not acquired, so we skip it
continue;
}
nextAcquired = nextAcquiredRecord();
}
return offsets;
}
|
The {@link RecordBatch batch} of {@link Record records} is converted to a {@link List list} of
{@link ConsumerRecord consumer records} and returned. {@link BufferSupplier Decompression} and
{@link Deserializer deserialization} of the {@link Record record's} key and value are performed in
this step.
@param deserializers {@link Deserializer}s to use to convert the raw bytes to the expected key and value types
@param maxRecords The number of records to return; the number returned may be {@code 0 <= maxRecords}
@param checkCrcs Whether to check the CRC of fetched records
@return {@link ShareInFlightBatch The ShareInFlightBatch containing records and their acknowledgements}
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetch.java
| 282
|
[
"inFlightBatch",
"currentBatch"
] | true
| 5
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
binarize
|
def binarize(X, *, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix.
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, default=0.0
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : bool, default=True
If False, try to avoid a copy and binarize in place.
This is not guaranteed to always work in place; e.g. if the data is
a numpy array with an object dtype, a copy will be returned even with
copy=False.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
See Also
--------
Binarizer : Performs binarization using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
Examples
--------
>>> from sklearn.preprocessing import binarize
>>> X = [[0.4, 0.6, 0.5], [0.6, 0.1, 0.2]]
>>> binarize(X, threshold=0.5)
array([[0., 1., 0.],
[1., 0., 0.]])
"""
X = check_array(X, accept_sparse=["csr", "csc"], force_writeable=True, copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError("Cannot binarize a sparse matrix with threshold < 0")
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
xp, _, device = get_namespace_and_device(X)
float_dtype = _find_matching_floating_dtype(X, threshold, xp=xp)
cond = xp.astype(X, float_dtype, copy=False) > threshold
not_cond = xp.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
|
Boolean thresholding of array-like or scipy.sparse matrix.
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, default=0.0
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : bool, default=True
If False, try to avoid a copy and binarize in place.
This is not guaranteed to always work in place; e.g. if the data is
a numpy array with an object dtype, a copy will be returned even with
copy=False.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
See Also
--------
Binarizer : Performs binarization using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
Examples
--------
>>> from sklearn.preprocessing import binarize
>>> X = [[0.4, 0.6, 0.5], [0.6, 0.1, 0.2]]
>>> binarize(X, threshold=0.5)
array([[0., 1., 0.],
[1., 0., 0.]])
|
python
|
sklearn/preprocessing/_data.py
| 2,219
|
[
"X",
"threshold",
"copy"
] | false
| 4
| 7.52
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
adaptBeanInstance
|
@SuppressWarnings("unchecked")
<T> T adaptBeanInstance(String name, Object bean, @Nullable Class<?> requiredType) {
// Check if required type matches the type of the actual bean instance.
if (requiredType != null && !requiredType.isInstance(bean)) {
try {
Object convertedBean = getTypeConverter().convertIfNecessary(bean, requiredType);
if (convertedBean == null) {
throw new BeanNotOfRequiredTypeException(name, requiredType, bean.getClass());
}
return (T) convertedBean;
}
catch (TypeMismatchException ex) {
if (logger.isTraceEnabled()) {
logger.trace("Failed to convert bean '" + name + "' to required type '" +
ClassUtils.getQualifiedName(requiredType) + "'", ex);
}
throw new BeanNotOfRequiredTypeException(name, requiredType, bean.getClass());
}
}
return (T) bean;
}
|
Return an instance, which may be shared or independent, of the specified bean.
@param name the name of the bean to retrieve
@param requiredType the required type of the bean to retrieve
@param args arguments to use when creating a bean instance using explicit arguments
(only applied when creating a new instance as opposed to retrieving an existing one)
@param typeCheckOnly whether the instance is obtained for a type check,
not for actual use
@return an instance of the bean
@throws BeansException if the bean could not be created
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 402
|
[
"name",
"bean",
"requiredType"
] |
T
| true
| 6
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
append
|
AnsiString append(String text, Code... codes) {
if (codes.length == 0 || !isAnsiSupported()) {
this.value.append(text);
return this;
}
Ansi ansi = Ansi.ansi();
for (Code code : codes) {
ansi = applyCode(ansi, code);
}
this.value.append(ansi.a(text).reset().toString());
return this;
}
|
Append text with the given ANSI codes.
@param text the text to append
@param codes the ANSI codes
@return this string
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/shell/AnsiString.java
| 49
|
[
"text"
] |
AnsiString
| true
| 3
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
markinnerspaces
|
def markinnerspaces(line):
"""
The function replace all spaces in the input variable line which are
surrounded with quotation marks, with the triplet "@_@".
For instance, for the input "a 'b c'" the function returns "a 'b@_@c'"
Parameters
----------
line : str
Returns
-------
str
"""
fragment = ''
inside = False
current_quote = None
escaped = ''
for c in line:
if escaped == '\\' and c in ['\\', '\'', '"']:
fragment += c
escaped = c
continue
if not inside and c in ['\'', '"']:
current_quote = c
if c == current_quote:
inside = not inside
elif c == ' ' and inside:
fragment += '@_@'
continue
fragment += c
escaped = c # reset to non-backslash
return fragment
|
The function replace all spaces in the input variable line which are
surrounded with quotation marks, with the triplet "@_@".
For instance, for the input "a 'b c'" the function returns "a 'b@_@c'"
Parameters
----------
line : str
Returns
-------
str
|
python
|
numpy/f2py/crackfortran.py
| 1,625
|
[
"line"
] | false
| 9
| 6.08
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
delete_objects
|
def delete_objects(self, bucket: str, keys: str | list) -> None:
"""
Delete keys from the bucket.
.. seealso::
- :external+boto3:py:meth:`S3.Client.delete_objects`
:param bucket: Name of the bucket in which you are going to delete object(s)
:param keys: The key(s) to delete from S3 bucket.
When ``keys`` is a string, it's supposed to be the key name of
the single object to delete.
When ``keys`` is a list, it's supposed to be the list of the
keys to delete.
"""
if isinstance(keys, str):
keys = [keys]
s3 = self.get_conn()
extra_kwargs = {}
if self._requester_pays:
extra_kwargs["RequestPayer"] = "requester"
# We can only send a maximum of 1000 keys per request.
# For details see:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.delete_objects
for chunk in chunks(keys, chunk_size=1000):
response = s3.delete_objects(
Bucket=bucket, Delete={"Objects": [{"Key": k} for k in chunk]}, **extra_kwargs
)
deleted_keys = [x["Key"] for x in response.get("Deleted", [])]
self.log.info("Deleted: %s", deleted_keys)
if "Errors" in response:
errors_keys = [x["Key"] for x in response.get("Errors", [])]
raise AirflowException(f"Errors when deleting: {errors_keys}")
|
Delete keys from the bucket.
.. seealso::
- :external+boto3:py:meth:`S3.Client.delete_objects`
:param bucket: Name of the bucket in which you are going to delete object(s)
:param keys: The key(s) to delete from S3 bucket.
When ``keys`` is a string, it's supposed to be the key name of
the single object to delete.
When ``keys`` is a list, it's supposed to be the list of the
keys to delete.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 1,489
|
[
"self",
"bucket",
"keys"
] |
None
| true
| 5
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
getOffsetFromSpec
|
private static long getOffsetFromSpec(OffsetSpec offsetSpec) {
if (offsetSpec instanceof TimestampSpec) {
return ((TimestampSpec) offsetSpec).timestamp();
} else if (offsetSpec instanceof OffsetSpec.EarliestSpec) {
return ListOffsetsRequest.EARLIEST_TIMESTAMP;
} else if (offsetSpec instanceof OffsetSpec.MaxTimestampSpec) {
return ListOffsetsRequest.MAX_TIMESTAMP;
} else if (offsetSpec instanceof OffsetSpec.EarliestLocalSpec) {
return ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP;
} else if (offsetSpec instanceof OffsetSpec.LatestTieredSpec) {
return ListOffsetsRequest.LATEST_TIERED_TIMESTAMP;
} else if (offsetSpec instanceof OffsetSpec.EarliestPendingUploadSpec) {
return ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP;
}
return ListOffsetsRequest.LATEST_TIMESTAMP;
}
|
Forcefully terminates an ongoing transaction for a given transactional ID.
<p>
This API is intended for well-formed but long-running transactions that are known to the
transaction coordinator. It is primarily designed for supporting 2PC (two-phase commit) workflows,
where a coordinator may need to unilaterally terminate a participant transaction that hasn't completed.
</p>
@param transactionalId The transactional ID whose active transaction should be forcefully terminated.
@return a {@link TerminateTransactionResult} that can be used to await the operation result.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 5,141
|
[
"offsetSpec"
] | true
| 7
| 7.44
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
entrySpliterator
|
@Override
@GwtIncompatible("Spliterator")
Spliterator<Entry<K, V>> entrySpliterator() {
return CollectSpliterators.flatMap(
asMap().entrySet().spliterator(),
keyToValueCollectionEntry -> {
K key = keyToValueCollectionEntry.getKey();
Collection<V> valueCollection = keyToValueCollectionEntry.getValue();
return CollectSpliterators.map(
valueCollection.spliterator(),
Spliterator.ORDERED | Spliterator.NONNULL | Spliterator.IMMUTABLE,
(V value) -> immutableEntry(key, value));
},
Spliterator.SIZED | (this instanceof SetMultimap ? Spliterator.DISTINCT : 0),
size());
}
|
Returns an immutable collection of all key-value pairs in the multimap.
|
java
|
guava/src/com/google/common/collect/ImmutableMultimap.java
| 680
|
[] | true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
getPage
|
public int getPage() {
this.newPageSet = false;
if (this.page >= getPageCount()) {
this.page = getPageCount() - 1;
}
return this.page;
}
|
Return the current page number.
Page numbering starts with 0.
|
java
|
spring-beans/src/main/java/org/springframework/beans/support/PagedListHolder.java
| 189
|
[] | true
| 2
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
updateFetchPositions
|
private boolean updateFetchPositions(final Timer timer) {
// If any partitions have been truncated due to a leader change, we need to validate the offsets
offsetFetcher.validatePositionsIfNeeded();
cachedSubscriptionHasAllFetchPositions = subscriptions.hasAllFetchPositions();
if (cachedSubscriptionHasAllFetchPositions) return true;
// If there are any partitions which do not have a valid position and are not
// awaiting reset, then we need to fetch committed offsets. We will only do a
// coordinator lookup if there are partitions which have missing positions, so
// a consumer with manually assigned partitions can avoid a coordinator dependence
// by always ensuring that assigned partitions have an initial position.
if (coordinator != null && !coordinator.initWithCommittedOffsetsIfNeeded(timer)) return false;
// If there are partitions still needing a position and a reset policy is defined,
// request reset using the default policy. If no reset strategy is defined and there
// are partitions with a missing position, then we will raise an exception.
subscriptions.resetInitializingPositions();
// Finally send an asynchronous request to look up and update the positions of any
// partitions which are awaiting reset.
offsetFetcher.resetPositionsIfNeeded();
return true;
}
|
Set the fetch position to the committed position (if there is one)
or reset it using the offset reset policy the user has configured.
@throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details
@throws NoOffsetForPartitionException If no offset is stored for a given partition and no offset reset policy is
defined
@return true iff the operation completed without timing out
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java
| 1,205
|
[
"timer"
] | true
| 4
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
mark_compile_region
|
def mark_compile_region(fn=None, options: Optional[NestedCompileRegionOptions] = None):
"""
This wrapper instructs torch.compile to compile the wrapped region once and
reuse the compiled artifact, instead of the usual way of aggressively
inlining the function.
Under the hood, it tells TorchDynamo to use InvokeSubgraph HOP for the
region. For PyTorch eager, this is a no-op.
Args:
fn: The function to wrap
options: Optional config to use for compiling the subgraph.
Warning: this is an experimental feature under development and
not ready for use yet.
"""
def wrap(func):
def inner(*args, **kwargs):
# Get the innermost function to avoid nested compile regions
inner_func = func
while hasattr(inner_func, "__marked_compile_region_fn__"):
inner_func = inner_func.__marked_compile_region_fn__
return invoke_subgraph_placeholder(inner_func, *args, **kwargs)
inner.__marked_compile_region_fn__ = func # type: ignore[attr-defined]
func.__marked_compile_region_config__ = options # type: ignore[attr-defined]
return inner
if fn:
return wrap(fn)
else:
return wrap
|
This wrapper instructs torch.compile to compile the wrapped region once and
reuse the compiled artifact, instead of the usual way of aggressively
inlining the function.
Under the hood, it tells TorchDynamo to use InvokeSubgraph HOP for the
region. For PyTorch eager, this is a no-op.
Args:
fn: The function to wrap
options: Optional config to use for compiling the subgraph.
Warning: this is an experimental feature under development and
not ready for use yet.
|
python
|
torch/_higher_order_ops/invoke_subgraph.py
| 196
|
[
"fn",
"options"
] | true
| 4
| 6.88
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
_maybe_cache
|
def _maybe_cache(
arg: ArrayConvertible,
format: str | None,
cache: bool,
convert_listlike: Callable,
) -> Series:
"""
Create a cache of unique dates from an array of dates
Parameters
----------
arg : listlike, tuple, 1-d array, Series
format : string
Strftime format to parse time
cache : bool
True attempts to create a cache of converted values
convert_listlike : function
Conversion function to apply on dates
Returns
-------
cache_array : Series
Cache of converted, unique dates. Can be empty
"""
from pandas import Series
cache_array = Series(dtype=object)
if cache:
# Perform a quicker unique check
if not should_cache(arg):
return cache_array
if not isinstance(arg, (np.ndarray, ExtensionArray, Index, ABCSeries)):
arg = np.array(arg)
unique_dates = unique(arg)
if len(unique_dates) < len(arg):
cache_dates = convert_listlike(unique_dates, format)
# GH#45319
try:
cache_array = Series(cache_dates, index=unique_dates, copy=False)
except OutOfBoundsDatetime:
return cache_array
# GH#39882 and GH#35888 in case of None and NaT we get duplicates
if not cache_array.index.is_unique:
cache_array = cache_array[~cache_array.index.duplicated()]
return cache_array
|
Create a cache of unique dates from an array of dates
Parameters
----------
arg : listlike, tuple, 1-d array, Series
format : string
Strftime format to parse time
cache : bool
True attempts to create a cache of converted values
convert_listlike : function
Conversion function to apply on dates
Returns
-------
cache_array : Series
Cache of converted, unique dates. Can be empty
|
python
|
pandas/core/tools/datetimes.py
| 216
|
[
"arg",
"format",
"cache",
"convert_listlike"
] |
Series
| true
| 6
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
optDouble
|
public double optDouble(int index, double fallback) {
Object object = opt(index);
Double result = JSON.toDouble(object);
return result != null ? result : fallback;
}
|
Returns the value at {@code index} if it exists and is a double or can be coerced
to a double. Returns {@code fallback} otherwise.
@param index the index to get the value from
@param fallback the fallback value
@return the value at {@code index} of {@code fallback}
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 392
|
[
"index",
"fallback"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
invocableClone
|
@Override
public MethodInvocation invocableClone() {
@Nullable Object[] cloneArguments = this.arguments;
if (this.arguments.length > 0) {
// Build an independent copy of the arguments array.
cloneArguments = this.arguments.clone();
}
return invocableClone(cloneArguments);
}
|
This implementation returns a shallow copy of this invocation object,
including an independent copy of the original arguments array.
<p>We want a shallow copy in this case: We want to use the same interceptor
chain and other object references, but we want an independent value for the
current interceptor index.
@see java.lang.Object#clone()
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/ReflectiveMethodInvocation.java
| 202
|
[] |
MethodInvocation
| true
| 2
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_nanquantile_1d
|
def _nanquantile_1d(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
qs: npt.NDArray[np.float64],
na_value: Scalar,
interpolation: str,
) -> Scalar | np.ndarray:
"""
Wrapper for np.quantile that skips missing values, specialized to
1-dimensional case.
Parameters
----------
values : array over which to find quantiles
mask : ndarray[bool]
locations in values that should be considered missing
qs : np.ndarray[float64] of quantile indices to find
na_value : scalar
value to return for empty or all-null values
interpolation : str
Returns
-------
quantiles : scalar or array
"""
# mask is Union[ExtensionArray, ndarray]
values = values[~mask]
if len(values) == 0:
# Can't pass dtype=values.dtype here bc we might have na_value=np.nan
# with values.dtype=int64 see test_quantile_empty
# equiv: 'np.array([na_value] * len(qs))' but much faster
return np.full(len(qs), na_value)
return np.quantile(
values,
qs,
# error: No overload variant of "percentile" matches argument
# types "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]"
# , "Dict[str, str]" [call-overload]
method=interpolation, # type: ignore[call-overload]
)
|
Wrapper for np.quantile that skips missing values, specialized to
1-dimensional case.
Parameters
----------
values : array over which to find quantiles
mask : ndarray[bool]
locations in values that should be considered missing
qs : np.ndarray[float64] of quantile indices to find
na_value : scalar
value to return for empty or all-null values
interpolation : str
Returns
-------
quantiles : scalar or array
|
python
|
pandas/core/array_algos/quantile.py
| 111
|
[
"values",
"mask",
"qs",
"na_value",
"interpolation"
] |
Scalar | np.ndarray
| true
| 2
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
of
|
public static <L, R> ImmutablePair<L, R> of(final L left, final R right) {
return left != null || right != null ? new ImmutablePair<>(left, right) : nullPair();
}
|
Creates an immutable pair of two objects inferring the generic types.
@param <L> the left element type.
@param <R> the right element type.
@param left the left element, may be null.
@param right the right element, may be null.
@return an immutable formed from the two parameters, not null.
|
java
|
src/main/java/org/apache/commons/lang3/tuple/ImmutablePair.java
| 105
|
[
"left",
"right"
] | true
| 3
| 8.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
nextBatch
|
T nextBatch() throws IOException;
|
Get the next record batch from the underlying input stream.
@return The next record batch or null if there is none
@throws IOException for any IO errors
|
java
|
clients/src/main/java/org/apache/kafka/common/record/LogInputStream.java
| 42
|
[] |
T
| true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
getResourceDescription
|
private String getResourceDescription(@Nullable Resource resource) {
if (resource instanceof OriginTrackedResource originTrackedResource) {
return getResourceDescription(originTrackedResource.getResource());
}
if (resource == null) {
return "unknown resource [?]";
}
if (resource instanceof ClassPathResource classPathResource) {
return getResourceDescription(classPathResource);
}
return resource.getDescription();
}
|
Return the location of the property within the source (if known).
@return the location or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/origin/TextResourceOrigin.java
| 105
|
[
"resource"
] |
String
| true
| 4
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
cellSpliterator
|
@Override
Spliterator<Cell<R, C, @Nullable V>> cellSpliterator() {
return CollectSpliterators.indexed(
size(), Spliterator.ORDERED | Spliterator.NONNULL | Spliterator.DISTINCT, this::getCell);
}
|
Returns an unmodifiable set of all row key / column key / value triplets. Changes to the table
will update the returned set.
<p>The returned set's iterator traverses the mappings with the first row key, the mappings with
the second row key, and so on.
<p>The value in the returned cells may change if the table subsequently changes.
@return set of table cells consisting of row key / column key / value triplets
|
java
|
guava/src/com/google/common/collect/ArrayTable.java
| 563
|
[] | true
| 1
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
unescapeCsv
|
public static final String unescapeCsv(final String input) {
return UNESCAPE_CSV.translate(input);
}
|
Returns a {@link String} value for an unescaped CSV column.
<p>If the value is enclosed in double quotes, and contains a comma, newline
or double quote, then quotes are removed.
</p>
<p>Any double quote escaped characters (a pair of double quotes) are unescaped
to just one double quote.</p>
<p>If the value is not enclosed in double quotes, or is and does not contain a
comma, newline or double quote, then the String value is returned unchanged.</p>
see <a href="https://en.wikipedia.org/wiki/Comma-separated_values">Wikipedia</a> and
<a href="https://datatracker.ietf.org/doc/html/rfc4180">RFC 4180</a>.
@param input the input CSV column String, may be null
@return the input String, with enclosing double quotes removed and embedded double
quotes unescaped, {@code null} if null string input
@since 2.4
|
java
|
src/main/java/org/apache/commons/lang3/StringEscapeUtils.java
| 680
|
[
"input"
] |
String
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
is_monitoring_in_job_override
|
def is_monitoring_in_job_override(self, config_key: str, job_override: dict | None) -> bool:
"""
Check if monitoring is enabled for the job.
Note: This is not compatible with application defaults:
https://docs.aws.amazon.com/emr/latest/EMR-Serverless-UserGuide/default-configs.html
This is used to determine what extra links should be shown.
"""
monitoring_config = (job_override or {}).get("monitoringConfiguration")
if monitoring_config is None or config_key not in monitoring_config:
return False
# CloudWatch can have an "enabled" flag set to False
if config_key == "cloudWatchLoggingConfiguration":
return monitoring_config.get(config_key).get("enabled") is True
return config_key in monitoring_config
|
Check if monitoring is enabled for the job.
Note: This is not compatible with application defaults:
https://docs.aws.amazon.com/emr/latest/EMR-Serverless-UserGuide/default-configs.html
This is used to determine what extra links should be shown.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/operators/emr.py
| 1,333
|
[
"self",
"config_key",
"job_override"
] |
bool
| true
| 5
| 6.4
|
apache/airflow
| 43,597
|
unknown
| false
|
createLookupPromise
|
function createLookupPromise(family, hostname, all, hints, dnsOrder) {
return new Promise((resolve, reject) => {
if (!hostname) {
reject(new ERR_INVALID_ARG_VALUE('hostname', hostname,
'must be a non-empty string'));
return;
}
const matchedFamily = isIP(hostname);
if (matchedFamily !== 0) {
const result = { address: hostname, family: matchedFamily };
resolve(all ? [result] : result);
return;
}
const req = new GetAddrInfoReqWrap();
req.family = family;
req.hostname = hostname;
req.oncomplete = all ? onlookupall : onlookup;
req.resolve = resolve;
req.reject = reject;
let order = DNS_ORDER_VERBATIM;
if (dnsOrder === 'ipv4first') {
order = DNS_ORDER_IPV4_FIRST;
} else if (dnsOrder === 'ipv6first') {
order = DNS_ORDER_IPV6_FIRST;
}
const err = getaddrinfo(req, hostname, family, hints, order);
if (err) {
reject(new DNSException(err, 'getaddrinfo', hostname));
} else if (hasObserver('dns')) {
const detail = {
hostname,
family,
hints,
verbatim: order === DNS_ORDER_VERBATIM,
order: dnsOrder,
};
startPerf(req, kPerfHooksDnsLookupContext, { type: 'dns', name: 'lookup', detail });
}
});
}
|
Creates a promise that resolves with the IP address of the given hostname.
@param {0 | 4 | 6} family - The IP address family (4 or 6, or 0 for both).
@param {string} hostname - The hostname to resolve.
@param {boolean} all - Whether to resolve with all IP addresses for the hostname.
@param {number} hints - One or more supported getaddrinfo flags (supply multiple via
bitwise OR).
@param {number} dnsOrder - How to sort results. Must be `ipv4first`, `ipv6first` or `verbatim`.
@returns {Promise<DNSLookupResult | DNSLookupResult[]>} The IP address(es) of the hostname.
@typedef {object} DNSLookupResult
@property {string} address - The IP address.
@property {0 | 4 | 6} family - The IP address type. 4 for IPv4 or 6 for IPv6, or 0 (for both).
|
javascript
|
lib/internal/dns/promises.js
| 134
|
[
"family",
"hostname",
"all",
"hints",
"dnsOrder"
] | false
| 11
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
_parse_yaml_file
|
def _parse_yaml_file(file_path: str) -> tuple[dict[str, list[str]], list[FileSyntaxError]]:
"""
Parse a file in the YAML format.
:param file_path: The location of the file that will be processed.
:return: Tuple with mapping of key and list of values and list of syntax errors
"""
with open(file_path) as f:
content = f.read()
if not content:
return {}, [FileSyntaxError(line_no=1, message="The file is empty.")]
try:
secrets = yaml.safe_load(content)
except yaml.MarkedYAMLError as e:
err_line_no = e.problem_mark.line if e.problem_mark else -1
return {}, [FileSyntaxError(line_no=err_line_no, message=str(e))]
if not isinstance(secrets, dict):
return {}, [FileSyntaxError(line_no=1, message="The file should contain the object.")]
return secrets, []
|
Parse a file in the YAML format.
:param file_path: The location of the file that will be processed.
:return: Tuple with mapping of key and list of values and list of syntax errors
|
python
|
airflow-core/src/airflow/secrets/local_filesystem.py
| 112
|
[
"file_path"
] |
tuple[dict[str, list[str]], list[FileSyntaxError]]
| true
| 4
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
validate_args_and_kwargs
|
def validate_args_and_kwargs(
fname, args, kwargs, max_fname_arg_count, compat_args
) -> None:
"""
Checks whether parameters passed to the *args and **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
args: tuple
The `*args` parameter passed into a function
kwargs: dict
The `**kwargs` parameter passed into `fname`
max_fname_arg_count: int
The minimum number of arguments that the function `fname`
requires, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: dict
A dictionary of keys that `kwargs` is allowed to
have and their associated default values.
Raises
------
TypeError if `args` contains more values than there are
`compat_args` OR `kwargs` contains keys not in `compat_args`
ValueError if `args` contains values not at the default value (`None`)
`kwargs` contains keys in `compat_args` that do not map to the default
value as specified in `compat_args`
See Also
--------
validate_args : Purely args validation.
validate_kwargs : Purely kwargs validation.
"""
# Check that the total number of arguments passed in (i.e.
# args and kwargs) does not exceed the length of compat_args
_check_arg_length(
fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args
)
# Check there is no overlap with the positional and keyword
# arguments, similar to what is done in actual Python functions
args_dict = dict(zip(compat_args, args, strict=False))
for key in args_dict:
if key in kwargs:
raise TypeError(
f"{fname}() got multiple values for keyword argument '{key}'"
)
kwargs.update(args_dict)
validate_kwargs(fname, kwargs, compat_args)
|
Checks whether parameters passed to the *args and **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
args: tuple
The `*args` parameter passed into a function
kwargs: dict
The `**kwargs` parameter passed into `fname`
max_fname_arg_count: int
The minimum number of arguments that the function `fname`
requires, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: dict
A dictionary of keys that `kwargs` is allowed to
have and their associated default values.
Raises
------
TypeError if `args` contains more values than there are
`compat_args` OR `kwargs` contains keys not in `compat_args`
ValueError if `args` contains values not at the default value (`None`)
`kwargs` contains keys in `compat_args` that do not map to the default
value as specified in `compat_args`
See Also
--------
validate_args : Purely args validation.
validate_kwargs : Purely kwargs validation.
|
python
|
pandas/util/_validators.py
| 170
|
[
"fname",
"args",
"kwargs",
"max_fname_arg_count",
"compat_args"
] |
None
| true
| 3
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
tap
|
function tap(value, interceptor) {
interceptor(value);
return value;
}
|
This method invokes `interceptor` and returns `value`. The interceptor
is invoked with one argument; (value). The purpose of this method is to
"tap into" a method chain sequence in order to modify intermediate results.
@static
@memberOf _
@since 0.1.0
@category Seq
@param {*} value The value to provide to `interceptor`.
@param {Function} interceptor The function to invoke.
@returns {*} Returns `value`.
@example
_([1, 2, 3])
.tap(function(array) {
// Mutate input array.
array.pop();
})
.reverse()
.value();
// => [2, 1]
|
javascript
|
lodash.js
| 8,869
|
[
"value",
"interceptor"
] | false
| 1
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
shift
|
public static void shift(final boolean[] array, int startIndexInclusive, int endIndexExclusive, int offset) {
if (array == null || startIndexInclusive >= array.length - 1 || endIndexExclusive <= 0) {
return;
}
startIndexInclusive = max0(startIndexInclusive);
endIndexExclusive = Math.min(endIndexExclusive, array.length);
int n = endIndexExclusive - startIndexInclusive;
if (n <= 1) {
return;
}
offset %= n;
if (offset < 0) {
offset += n;
}
// For algorithm explanations and proof of O(n) time complexity and O(1) space complexity
// see https://beradrian.wordpress.com/2015/04/07/shift-an-array-in-on-in-place/
while (n > 1 && offset > 0) {
final int nOffset = n - offset;
if (offset > nOffset) {
swap(array, startIndexInclusive, startIndexInclusive + n - nOffset, nOffset);
n = offset;
offset -= nOffset;
} else if (offset < nOffset) {
swap(array, startIndexInclusive, startIndexInclusive + nOffset, offset);
startIndexInclusive += offset;
n = nOffset;
} else {
swap(array, startIndexInclusive, startIndexInclusive + nOffset, offset);
break;
}
}
}
|
Shifts the order of a series of elements in the given boolean array.
<p>There is no special handling for multi-dimensional arrays. This method
does nothing for {@code null} or empty input arrays.</p>
@param array
the array to shift, may be {@code null}.
@param startIndexInclusive
the starting index. Undervalue (<0) is promoted to 0, overvalue (>array.length) results in no
change.
@param endIndexExclusive
elements up to endIndex-1 are shifted in the array. Undervalue (< start index) results in no
change. Overvalue (>array.length) is demoted to array length.
@param offset
The number of positions to rotate the elements. If the offset is larger than the number of elements to
rotate, than the effective offset is modulo the number of elements to rotate.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 6,814
|
[
"array",
"startIndexInclusive",
"endIndexExclusive",
"offset"
] |
void
| true
| 10
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
extractProject
|
private void extractProject(ProjectGenerationResponse entity, @Nullable String output, boolean overwrite)
throws IOException {
File outputDirectory = (output != null) ? new File(output) : new File(System.getProperty("user.dir"));
if (!outputDirectory.exists()) {
outputDirectory.mkdirs();
}
byte[] content = entity.getContent();
Assert.state(content != null, "'content' must not be null");
try (ZipInputStream zipStream = new ZipInputStream(new ByteArrayInputStream(content))) {
extractFromStream(zipStream, overwrite, outputDirectory);
fixExecutableFlag(outputDirectory, "mvnw");
fixExecutableFlag(outputDirectory, "gradlew");
Log.info("Project extracted to '" + outputDirectory.getAbsolutePath() + "'");
}
}
|
Detect if the project should be extracted.
@param request the generation request
@param response the generation response
@return if the project should be extracted
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/ProjectGenerator.java
| 95
|
[
"entity",
"output",
"overwrite"
] |
void
| true
| 3
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
formatDurationHMS
|
public static String formatDurationHMS(final long durationMillis) {
return formatDuration(durationMillis, "HH:mm:ss.SSS");
}
|
Formats the time gap as a string.
<p>The format used is ISO 8601-like: {@code HH:mm:ss.SSS}.</p>
@param durationMillis the duration to format
@return the formatted duration, not null
@throws IllegalArgumentException if durationMillis is negative
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationFormatUtils.java
| 398
|
[
"durationMillis"
] |
String
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
printSimple
|
private static String printSimple(Duration duration, DurationFormat.@Nullable Unit unit) {
unit = (unit == null ? DurationFormat.Unit.MILLIS : unit);
return unit.print(duration);
}
|
Detect the style then parse the value to return a duration.
@param value the value to parse
@param unit the duration unit to use if the value doesn't specify one ({@code null}
will default to ms)
@return the parsed duration
@throws IllegalArgumentException if the value is not a known style or cannot be
parsed
|
java
|
spring-context/src/main/java/org/springframework/format/datetime/standard/DurationFormatterUtils.java
| 160
|
[
"duration",
"unit"
] |
String
| true
| 2
| 7.68
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
CONST
|
public static double CONST(final double v) {
return v;
}
|
Returns the provided value unchanged. This can prevent javac from inlining a constant field, e.g.,
<pre>
public final static double MAGIC_DOUBLE = ObjectUtils.CONST(1.0);
</pre>
This way any jars that refer to this field do not have to recompile themselves if the field's value changes at some future date.
@param v the double value to return.
@return the double v, unchanged.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/ObjectUtils.java
| 386
|
[
"v"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
read
|
@Override
public int read(byte[] b, int off, int len) throws IOException {
// Obey InputStream contract.
checkPositionIndexes(off, off + len, b.length);
if (len == 0) {
return 0;
}
// The rest of this method implements the process described by the CharsetEncoder javadoc.
int totalBytesRead = 0;
boolean doneEncoding = endOfInput;
DRAINING:
while (true) {
// We stay in draining mode until there are no bytes left in the output buffer. Then we go
// back to encoding/flushing.
if (draining) {
totalBytesRead += drain(b, off + totalBytesRead, len - totalBytesRead);
if (totalBytesRead == len || doneFlushing) {
return (totalBytesRead > 0) ? totalBytesRead : -1;
}
draining = false;
Java8Compatibility.clear(byteBuffer);
}
while (true) {
// We call encode until there is no more input. The last call to encode will have endOfInput
// == true. Then there is a final call to flush.
CoderResult result;
if (doneFlushing) {
result = CoderResult.UNDERFLOW;
} else if (doneEncoding) {
result = encoder.flush(byteBuffer);
} else {
result = encoder.encode(charBuffer, byteBuffer, endOfInput);
}
if (result.isOverflow()) {
// Not enough room in output buffer--drain it, creating a bigger buffer if necessary.
startDraining(true);
continue DRAINING;
} else if (result.isUnderflow()) {
// If encoder underflows, it means either:
// a) the final flush() succeeded; next drain (then done)
// b) we encoded all of the input; next flush
// c) we ran of out input to encode; next read more input
if (doneEncoding) { // (a)
doneFlushing = true;
startDraining(false);
continue DRAINING;
} else if (endOfInput) { // (b)
doneEncoding = true;
} else { // (c)
readMoreChars();
}
} else if (result.isError()) {
// Only reach here if a CharsetEncoder with non-REPLACE settings is used.
result.throwException();
return 0; // Not called.
}
}
}
}
|
Creates a new input stream that will encode the characters from {@code reader} into bytes using
the given character set encoder.
@param reader input source
@param encoder character set encoder used for encoding chars to bytes
@param bufferSize size of internal input and output buffers
@throws IllegalArgumentException if bufferSize is non-positive
|
java
|
android/guava/src/com/google/common/io/ReaderInputStream.java
| 129
|
[
"b",
"off",
"len"
] | true
| 15
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
isLeavingGroup
|
@Override
public boolean isLeavingGroup() {
CloseOptions.GroupMembershipOperation leaveGroupOperation = leaveGroupOperation();
if (REMAIN_IN_GROUP == leaveGroupOperation && groupInstanceId.isEmpty()) {
return false;
}
MemberState state = state();
boolean isLeavingState = state == MemberState.PREPARE_LEAVING || state == MemberState.LEAVING;
// Default operation: both static and dynamic consumers will send a leave heartbeat
boolean hasLeaveOperation = DEFAULT == leaveGroupOperation ||
// Leave operation: both static and dynamic consumers will send a leave heartbeat
LEAVE_GROUP == leaveGroupOperation ||
// Remain in group: static consumers will send a leave heartbeat with -2 epoch to reflect that a member using the given
// instance id decided to leave the group and would be back within the session timeout.
groupInstanceId().isPresent();
return isLeavingState && hasLeaveOperation;
}
|
Log partitions being revoked that were already paused, since the pause flag will be
effectively lost.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManager.java
| 421
|
[] | true
| 7
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
optimizedText
|
XContentString optimizedText() throws IOException;
|
Returns an instance of {@link Map} holding parsed map.
Serves as a replacement for the "map", "mapOrdered" and "mapStrings" methods above.
@param mapFactory factory for creating new {@link Map} objects
@param mapValueParser parser for parsing a single map value
@param <T> map value type
@return {@link Map} object
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentParser.java
| 112
|
[] |
XContentString
| true
| 1
| 6.32
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
getAllNamedFields
|
static std::set<const FieldDecl *>
getAllNamedFields(const CXXRecordDecl *Record) {
std::set<const FieldDecl *> Result;
for (const auto *Field : Record->fields()) {
// Static data members are not in this range.
if (Field->isUnnamedBitField())
continue;
Result.insert(Field);
}
return Result;
}
|
Finds all the named non-static fields of \p Record.
|
cpp
|
clang-tools-extra/clang-tidy/modernize/UseEqualsDefaultCheck.cpp
| 24
|
[] | true
| 2
| 7.04
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
asByteArray
|
byte[] asByteArray() {
ByteBuffer buffer = ByteBuffer.allocate(MINIMUM_SIZE);
buffer.order(ByteOrder.LITTLE_ENDIAN);
buffer.putInt(SIGNATURE);
buffer.putShort(this.versionMadeBy);
buffer.putShort(this.versionNeededToExtract);
buffer.putShort(this.generalPurposeBitFlag);
buffer.putShort(this.compressionMethod);
buffer.putShort(this.lastModFileTime);
buffer.putShort(this.lastModFileDate);
buffer.putInt(this.crc32);
buffer.putInt(this.compressedSize);
buffer.putInt(this.uncompressedSize);
buffer.putShort(this.fileNameLength);
buffer.putShort(this.extraFieldLength);
buffer.putShort(this.fileCommentLength);
buffer.putShort(this.diskNumberStart);
buffer.putShort(this.internalFileAttributes);
buffer.putInt(this.externalFileAttributes);
buffer.putInt(this.offsetToLocalHeader);
return buffer.array();
}
|
Return the contents of this record as a byte array suitable for writing to a zip.
@return the record as a byte array
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipCentralDirectoryFileHeaderRecord.java
| 164
|
[] | true
| 1
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
unicodeWords
|
function unicodeWords(string) {
return string.match(reUnicodeWord) || [];
}
|
Splits a Unicode `string` into an array of its words.
@private
@param {string} The string to inspect.
@returns {Array} Returns the words of `string`.
|
javascript
|
lodash.js
| 1,413
|
[
"string"
] | false
| 2
| 6.16
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
countTrue
|
public static int countTrue(boolean... values) {
int count = 0;
for (boolean value : values) {
if (value) {
count++;
}
}
return count;
}
|
Returns the number of {@code values} that are {@code true}.
@since 16.0
|
java
|
android/guava/src/com/google/common/primitives/Booleans.java
| 524
|
[] | true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.