function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
search
|
static <T> ConfigurationPropertyState search(Iterable<T> source, Predicate<T> predicate) {
Assert.notNull(source, "'source' must not be null");
Assert.notNull(predicate, "'predicate' must not be null");
for (T item : source) {
if (predicate.test(item)) {
return PRESENT;
}
}
return ABSENT;
}
|
Search the given iterable using a predicate to determine if content is
{@link #PRESENT} or {@link #ABSENT}.
@param <T> the data type
@param source the source iterable to search
@param predicate the predicate used to test for presence
@return {@link #PRESENT} if the iterable contains a matching item, otherwise
{@link #ABSENT}.
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyState.java
| 58
|
[
"source",
"predicate"
] |
ConfigurationPropertyState
| true
| 2
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
packageImage
|
private void packageImage(Libraries libraries, AbstractJarWriter writer) throws IOException {
File source = isAlreadyPackaged() ? getBackupFile() : getSource();
try (JarFile sourceJar = new JarFile(source)) {
write(sourceJar, libraries, writer);
}
}
|
Create a packaged image.
@param libraries the contained libraries
@param exporter the exporter used to write the image
@throws IOException on IO error
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/ImagePackager.java
| 65
|
[
"libraries",
"writer"
] |
void
| true
| 2
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
apply
|
public <V, E extends Throwable> V apply(final FailableBiFunction<L, R, V, E> function) throws E {
return function.apply(getKey(), getValue());
}
|
Applies this key and value as arguments to the given function.
@param <V> The function return type.
@param <E> The kind of thrown exception or error.
@param function the consumer to call.
@return the function's return value.
@throws E Thrown when the consumer fails.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/tuple/Pair.java
| 140
|
[
"function"
] |
V
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
equals
|
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Op that = (Op) o;
return Objects.equals(key, that.key) && Objects.equals(value, that.value);
}
|
@return if set then the existing value is updated,
otherwise if null, the existing value is cleared
|
java
|
clients/src/main/java/org/apache/kafka/common/quota/ClientQuotaAlteration.java
| 57
|
[
"o"
] | true
| 5
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
box
|
def box(self, by: IndexLabel | None = None, **kwargs) -> PlotAccessor:
r"""
Make a box plot of the DataFrame columns.
A box plot is a method for graphically depicting groups of numerical
data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. The position of the whiskers
is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the
box. Outlier points are those past the end of the whiskers.
For further details see Wikipedia's
entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`__.
A consideration when using this chart is that the box and the whiskers
can overlap, which is very common when plotting small sets of data.
Parameters
----------
by : str or sequence
Column in the DataFrame to group by.
**kwargs
Additional keywords are documented in
:meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
The matplotlib axes containing the box plot.
See Also
--------
DataFrame.boxplot: Another method to draw a box plot.
Series.plot.box: Draw a box plot from a Series object.
matplotlib.pyplot.boxplot: Draw a box plot in matplotlib.
Examples
--------
Draw a box plot from a DataFrame with four columns of randomly
generated data.
.. plot::
:context: close-figs
>>> data = np.random.randn(25, 4)
>>> df = pd.DataFrame(data, columns=list("ABCD"))
>>> ax = df.plot.box()
You can also generate groupings if you specify the `by` parameter (which
can take a column name, or a list or tuple of column names):
.. plot::
:context: close-figs
>>> age_list = [8, 10, 12, 14, 72, 74, 76, 78, 20, 25, 30, 35, 60, 85]
>>> df = pd.DataFrame({"gender": list("MMMMMMMMFFFFFF"), "age": age_list})
>>> ax = df.plot.box(column="age", by="gender", figsize=(10, 8))
"""
return self(kind="box", by=by, **kwargs)
|
r"""
Make a box plot of the DataFrame columns.
A box plot is a method for graphically depicting groups of numerical
data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. The position of the whiskers
is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the
box. Outlier points are those past the end of the whiskers.
For further details see Wikipedia's
entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`__.
A consideration when using this chart is that the box and the whiskers
can overlap, which is very common when plotting small sets of data.
Parameters
----------
by : str or sequence
Column in the DataFrame to group by.
**kwargs
Additional keywords are documented in
:meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
The matplotlib axes containing the box plot.
See Also
--------
DataFrame.boxplot: Another method to draw a box plot.
Series.plot.box: Draw a box plot from a Series object.
matplotlib.pyplot.boxplot: Draw a box plot in matplotlib.
Examples
--------
Draw a box plot from a DataFrame with four columns of randomly
generated data.
.. plot::
:context: close-figs
>>> data = np.random.randn(25, 4)
>>> df = pd.DataFrame(data, columns=list("ABCD"))
>>> ax = df.plot.box()
You can also generate groupings if you specify the `by` parameter (which
can take a column name, or a list or tuple of column names):
.. plot::
:context: close-figs
>>> age_list = [8, 10, 12, 14, 72, 74, 76, 78, 20, 25, 30, 35, 60, 85]
>>> df = pd.DataFrame({"gender": list("MMMMMMMMFFFFFF"), "age": age_list})
>>> ax = df.plot.box(column="age", by="gender", figsize=(10, 8))
|
python
|
pandas/plotting/_core.py
| 1,577
|
[
"self",
"by"
] |
PlotAccessor
| true
| 1
| 7.28
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getAsBoolean
|
public static <E extends Throwable> boolean getAsBoolean(final FailableBooleanSupplier<E> supplier) {
try {
return supplier.getAsBoolean();
} catch (final Throwable t) {
throw rethrow(t);
}
}
|
Invokes a boolean supplier, and returns the result.
@param supplier The boolean supplier to invoke.
@param <E> The type of checked exception, which the supplier can throw.
@return The boolean, which has been created by the supplier
|
java
|
src/main/java/org/apache/commons/lang3/function/Failable.java
| 422
|
[
"supplier"
] | true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
get_flashed_messages
|
def get_flashed_messages(
with_categories: bool = False, category_filter: t.Iterable[str] = ()
) -> list[str] | list[tuple[str, str]]:
"""Pulls all flashed messages from the session and returns them.
Further calls in the same request to the function will return
the same messages. By default just the messages are returned,
but when `with_categories` is set to ``True``, the return value will
be a list of tuples in the form ``(category, message)`` instead.
Filter the flashed messages to one or more categories by providing those
categories in `category_filter`. This allows rendering categories in
separate html blocks. The `with_categories` and `category_filter`
arguments are distinct:
* `with_categories` controls whether categories are returned with message
text (``True`` gives a tuple, where ``False`` gives just the message text).
* `category_filter` filters the messages down to only those matching the
provided categories.
See :doc:`/patterns/flashing` for examples.
.. versionchanged:: 0.3
`with_categories` parameter added.
.. versionchanged:: 0.9
`category_filter` parameter added.
:param with_categories: set to ``True`` to also receive categories.
:param category_filter: filter of categories to limit return values. Only
categories in the list will be returned.
"""
flashes = app_ctx._flashes
if flashes is None:
flashes = session.pop("_flashes") if "_flashes" in session else []
app_ctx._flashes = flashes
if category_filter:
flashes = list(filter(lambda f: f[0] in category_filter, flashes))
if not with_categories:
return [x[1] for x in flashes]
return flashes
|
Pulls all flashed messages from the session and returns them.
Further calls in the same request to the function will return
the same messages. By default just the messages are returned,
but when `with_categories` is set to ``True``, the return value will
be a list of tuples in the form ``(category, message)`` instead.
Filter the flashed messages to one or more categories by providing those
categories in `category_filter`. This allows rendering categories in
separate html blocks. The `with_categories` and `category_filter`
arguments are distinct:
* `with_categories` controls whether categories are returned with message
text (``True`` gives a tuple, where ``False`` gives just the message text).
* `category_filter` filters the messages down to only those matching the
provided categories.
See :doc:`/patterns/flashing` for examples.
.. versionchanged:: 0.3
`with_categories` parameter added.
.. versionchanged:: 0.9
`category_filter` parameter added.
:param with_categories: set to ``True`` to also receive categories.
:param category_filter: filter of categories to limit return values. Only
categories in the list will be returned.
|
python
|
src/flask/helpers.py
| 344
|
[
"with_categories",
"category_filter"
] |
list[str] | list[tuple[str, str]]
| true
| 5
| 6.24
|
pallets/flask
| 70,946
|
sphinx
| false
|
getFinalExpressionInChain
|
function getFinalExpressionInChain(node: Expression): CallExpression | PropertyAccessExpression | ElementAccessExpression | undefined {
// foo && |foo.bar === 1|; - here the right child of the && binary expression is another binary expression.
// the rightmost member of the && chain should be the leftmost child of that expression.
node = skipParentheses(node);
if (isBinaryExpression(node)) {
return getFinalExpressionInChain(node.left);
}
// foo && |foo.bar()()| - nested calls are treated like further accesses.
else if ((isPropertyAccessExpression(node) || isElementAccessExpression(node) || isCallExpression(node)) && !isOptionalChain(node)) {
return node;
}
return undefined;
}
|
Gets a property access expression which may be nested inside of a binary expression. The final
expression in an && chain will occur as the right child of the parent binary expression, unless
it is followed by a different binary operator.
@param node the right child of a binary expression or a call expression.
|
typescript
|
src/services/refactors/convertToOptionalChainExpression.ts
| 292
|
[
"node"
] | true
| 7
| 7.2
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
identity
|
def identity(n, dtype=None):
"""
Returns the square identity matrix of given size.
Parameters
----------
n : int
Size of the returned identity matrix.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : matrix
`n` x `n` matrix with its main diagonal set to one,
and all other elements zero.
See Also
--------
numpy.identity : Equivalent array function.
matlib.eye : More general matrix identity function.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.identity(3, dtype=np.int_)
matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
a = array([1] + n * [0], dtype=dtype)
b = empty((n, n), dtype=dtype)
b.flat = a
return b
|
Returns the square identity matrix of given size.
Parameters
----------
n : int
Size of the returned identity matrix.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : matrix
`n` x `n` matrix with its main diagonal set to one,
and all other elements zero.
See Also
--------
numpy.identity : Equivalent array function.
matlib.eye : More general matrix identity function.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.identity(3, dtype=np.int_)
matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
|
python
|
numpy/matlib.py
| 155
|
[
"n",
"dtype"
] | false
| 1
| 6.32
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
stopTask
|
private void stopTask(ProjectId projectId, Runnable onFailure) {
ActionListener<PersistentTasksCustomMetadata.PersistentTask<?>> listener = ActionListener.wrap(
r -> logger.debug("Stopped geoip downloader task"),
e -> {
Throwable t = e instanceof RemoteTransportException ? ExceptionsHelper.unwrapCause(e) : e;
if (t instanceof ResourceNotFoundException == false) {
logger.warn("failed to remove geoip downloader task", e);
onFailure.run();
}
}
);
persistentTasksService.sendProjectRemoveRequest(
projectId,
getTaskId(projectId, projectResolver.supportsMultipleProjects()),
MasterNodeRequest.INFINITE_MASTER_NODE_TIMEOUT,
ActionListener.runAfter(listener, () -> {
IndexAbstraction databasesAbstraction = clusterService.state()
.metadata()
.getProject(projectId)
.getIndicesLookup()
.get(DATABASES_INDEX);
if (databasesAbstraction != null) {
// regardless of whether DATABASES_INDEX is an alias, resolve it to a concrete index
Index databasesIndex = databasesAbstraction.getWriteIndex();
client.projectClient(projectId)
.admin()
.indices()
.prepareDelete(databasesIndex.getName())
.execute(ActionListener.wrap(rr -> {
// remove task reference in the map so it can be garbage collected
tasks.remove(projectId);
taskIsBootstrappedByProject.remove(projectId);
atLeastOneGeoipProcessorByProject.remove(projectId);
}, e -> {
Throwable t = e instanceof RemoteTransportException ? ExceptionsHelper.unwrapCause(e) : e;
if (t instanceof ResourceNotFoundException == false) {
logger.warn("failed to remove " + databasesIndex, e);
}
}));
}
})
);
}
|
Check if a processor is a pipeline processor containing at least a geoip processor. This method also updates
pipelineHasGeoProcessorById with a result for any pipelines it looks at.
@param processor Processor config.
@param downloadDatabaseOnPipelineCreation Should the download_database_on_pipeline_creation of the geoip processor be true or false.
@param pipelineConfigById A Map of pipeline id to PipelineConfiguration
@param pipelineHasGeoProcessorById A Map of pipeline id to Boolean, indicating whether the pipeline references a geoip processor
(true), does not reference a geoip processor (false), or we are currently trying to figure that
out (null).
@return true if a geoip processor is found in the processors of this processor if this processor is a pipeline processor.
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java
| 552
|
[
"projectId",
"onFailure"
] |
void
| true
| 6
| 7.76
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
set_flags
|
def set_flags(
self,
*,
copy: bool | lib.NoDefault = lib.no_default,
allows_duplicate_labels: bool | None = None,
) -> Self:
"""
Return a new object with updated flags.
This method creates a shallow copy of the original object, preserving its
underlying data while modifying its global flags. In particular, it allows
you to update properties such as whether duplicate labels are permitted. This
behavior is especially useful in method chains, where one wishes to
adjust DataFrame or Series characteristics without altering the original object.
Parameters
----------
copy : bool, default False
This keyword is now ignored; changing its value will have no
impact on the method.
.. deprecated:: 3.0.0
This keyword is ignored and will be removed in pandas 4.0. Since
pandas 3.0, this method always returns a new object using a lazy
copy mechanism that defers copies until necessary
(Copy-on-Write). See the `user guide on Copy-on-Write
<https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
for more details.
allows_duplicate_labels : bool, optional
Whether the returned object allows duplicate labels.
Returns
-------
Series or DataFrame
The same type as the caller.
See Also
--------
DataFrame.attrs : Global metadata applying to this dataset.
DataFrame.flags : Global flags applying to this object.
Notes
-----
This method returns a new object that's a view on the same data
as the input. Mutating the input or the output values will be reflected
in the other.
This method is intended to be used in method chains.
"Flags" differ from "metadata". Flags reflect properties of the
pandas object (the Series or DataFrame). Metadata refer to properties
of the dataset, and should be stored in :attr:`DataFrame.attrs`.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]})
>>> df.flags.allows_duplicate_labels
True
>>> df2 = df.set_flags(allows_duplicate_labels=False)
>>> df2.flags.allows_duplicate_labels
False
"""
self._check_copy_deprecation(copy)
df = self.copy(deep=False)
if allows_duplicate_labels is not None:
df.flags["allows_duplicate_labels"] = allows_duplicate_labels
return df
|
Return a new object with updated flags.
This method creates a shallow copy of the original object, preserving its
underlying data while modifying its global flags. In particular, it allows
you to update properties such as whether duplicate labels are permitted. This
behavior is especially useful in method chains, where one wishes to
adjust DataFrame or Series characteristics without altering the original object.
Parameters
----------
copy : bool, default False
This keyword is now ignored; changing its value will have no
impact on the method.
.. deprecated:: 3.0.0
This keyword is ignored and will be removed in pandas 4.0. Since
pandas 3.0, this method always returns a new object using a lazy
copy mechanism that defers copies until necessary
(Copy-on-Write). See the `user guide on Copy-on-Write
<https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
for more details.
allows_duplicate_labels : bool, optional
Whether the returned object allows duplicate labels.
Returns
-------
Series or DataFrame
The same type as the caller.
See Also
--------
DataFrame.attrs : Global metadata applying to this dataset.
DataFrame.flags : Global flags applying to this object.
Notes
-----
This method returns a new object that's a view on the same data
as the input. Mutating the input or the output values will be reflected
in the other.
This method is intended to be used in method chains.
"Flags" differ from "metadata". Flags reflect properties of the
pandas object (the Series or DataFrame). Metadata refer to properties
of the dataset, and should be stored in :attr:`DataFrame.attrs`.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]})
>>> df.flags.allows_duplicate_labels
True
>>> df2 = df.set_flags(allows_duplicate_labels=False)
>>> df2.flags.allows_duplicate_labels
False
|
python
|
pandas/core/generic.py
| 403
|
[
"self",
"copy",
"allows_duplicate_labels"
] |
Self
| true
| 2
| 8.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
processAcknowledgementEvents
|
void processAcknowledgementEvents() {
List<ShareAcknowledgementEvent> events = acknowledgementEventHandler.drainEvents();
if (!events.isEmpty()) {
for (ShareAcknowledgementEvent event : events) {
try {
acknowledgementEventProcessor.process(event);
} catch (Exception e) {
log.warn("An error occurred when processing the acknowledgement event: {}", e.getMessage(), e);
}
}
}
}
|
Process acknowledgement events, if any, that were produced by the {@link ConsumerNetworkThread network thread}.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java
| 1,191
|
[] |
void
| true
| 3
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
is_timedelta64_ns_dtype
|
def is_timedelta64_ns_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of the timedelta64[ns] dtype.
This is a very specific dtype, so generic ones like `np.timedelta64`
will return False if passed into this function.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of the timedelta64[ns] dtype.
See Also
--------
api.types.is_timedelta64_dtype: Check whether an array-like or dtype
is of the timedelta64 dtype.
Examples
--------
>>> from pandas.api.types import is_timedelta64_ns_dtype
>>> is_timedelta64_ns_dtype(np.dtype("m8[ns]"))
True
>>> is_timedelta64_ns_dtype(np.dtype("m8[ps]")) # Wrong frequency
False
>>> is_timedelta64_ns_dtype(np.array([1, 2], dtype="m8[ns]"))
True
>>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))
False
"""
return _is_dtype(arr_or_dtype, lambda dtype: dtype == TD64NS_DTYPE)
|
Check whether the provided array or dtype is of the timedelta64[ns] dtype.
This is a very specific dtype, so generic ones like `np.timedelta64`
will return False if passed into this function.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of the timedelta64[ns] dtype.
See Also
--------
api.types.is_timedelta64_dtype: Check whether an array-like or dtype
is of the timedelta64 dtype.
Examples
--------
>>> from pandas.api.types import is_timedelta64_ns_dtype
>>> is_timedelta64_ns_dtype(np.dtype("m8[ns]"))
True
>>> is_timedelta64_ns_dtype(np.dtype("m8[ps]")) # Wrong frequency
False
>>> is_timedelta64_ns_dtype(np.array([1, 2], dtype="m8[ns]"))
True
>>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))
False
|
python
|
pandas/core/dtypes/common.py
| 1,118
|
[
"arr_or_dtype"
] |
bool
| true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
mid
|
def mid(self) -> Index:
"""
Return the midpoint of each Interval in the IntervalArray as an Index.
The midpoint of an interval is calculated as the average of its
``left`` and ``right`` bounds. This property returns a ``pandas.Index`` object
containing the midpoint for each interval.
See Also
--------
Interval.left : Return left bound for the interval.
Interval.right : Return right bound for the interval.
Interval.length : Return the length of each interval.
Examples
--------
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.mid
Index([0.5, 3.0], dtype='float64')
"""
try:
return 0.5 * (self.left + self.right)
except TypeError:
# datetime safe version
return self.left + 0.5 * self.length
|
Return the midpoint of each Interval in the IntervalArray as an Index.
The midpoint of an interval is calculated as the average of its
``left`` and ``right`` bounds. This property returns a ``pandas.Index`` object
containing the midpoint for each interval.
See Also
--------
Interval.left : Return left bound for the interval.
Interval.right : Return right bound for the interval.
Interval.length : Return the length of each interval.
Examples
--------
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.mid
Index([0.5, 3.0], dtype='float64')
|
python
|
pandas/core/arrays/interval.py
| 1,440
|
[
"self"
] |
Index
| true
| 1
| 6.96
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
buildGetInstanceMethodForConstructor
|
private void buildGetInstanceMethodForConstructor(MethodSpec.Builder method, ConstructorDescriptor descriptor,
javax.lang.model.element.Modifier... modifiers) {
Constructor<?> constructor = descriptor.constructor();
Class<?> publicType = descriptor.publicType();
Class<?> actualType = descriptor.actualType();
CodeWarnings codeWarnings = new CodeWarnings();
codeWarnings.detectDeprecation(actualType, constructor)
.detectDeprecation(Arrays.stream(constructor.getParameters()).map(Parameter::getType));
method.addJavadoc("Get the bean instance supplier for '$L'.", descriptor.beanName());
method.addModifiers(modifiers);
codeWarnings.suppress(method);
method.returns(ParameterizedTypeName.get(BeanInstanceSupplier.class, publicType));
CodeBlock.Builder code = CodeBlock.builder();
code.add(generateResolverForConstructor(descriptor));
boolean hasArguments = constructor.getParameterCount() > 0;
boolean onInnerClass = ClassUtils.isInnerClass(actualType);
CodeBlock arguments = hasArguments ?
new AutowiredArgumentsCodeGenerator(actualType, constructor)
.generateCode(constructor.getParameterTypes(), (onInnerClass ? 1 : 0)) : NO_ARGS;
CodeBlock newInstance = generateNewInstanceCodeForConstructor(actualType, arguments);
code.add(generateWithGeneratorCode(hasArguments, newInstance));
method.addStatement(code.build());
}
|
Generate the instance supplier code.
@param registeredBean the bean to handle
@param instantiationDescriptor the executable to use to create the bean
@return the generated code
@since 6.1.7
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/InstanceSupplierCodeGenerator.java
| 218
|
[
"method",
"descriptor"
] |
void
| true
| 3
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
write
|
@Override
public long write(ByteBuffer[] srcs) throws IOException {
return socketChannel.write(srcs);
}
|
Writes a sequence of bytes to this channel from the given buffer.
@param srcs The buffer from which bytes are to be retrieved
@return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream
@throws IOException If some other I/O error occurs
|
java
|
clients/src/main/java/org/apache/kafka/common/network/PlaintextTransportLayer.java
| 149
|
[
"srcs"
] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
fuzz_valid_stride
|
def fuzz_valid_stride(size: tuple[int, ...]) -> tuple[int, ...]:
"""
Fuzzes PyTorch tensor strides by generating valid stride patterns for a given size.
Args:
size: Tensor shape/size as a tuple of integers
Returns:
Tuple[int, ...]: A tuple representing valid tensor strides
"""
if len(size) == 0:
# Scalar tensor has no strides
return ()
# Choose stride pattern type
stride_types = [
"contiguous", # Normal contiguous memory layout
"transposed", # Transposed dimensions
"custom_gaps", # Custom strides with gaps (non-dense)
"minimal", # Minimal valid strides (all ones)
"nonoverlapping_and_dense", # Non-overlapping and dense (contiguous)
"nonoverlapping_and_dense_non_contig", # Non-overlapping and dense but not contiguous
"overlapping", # Overlapping memory access (zero strides)
"sparse_gaps", # Large gaps (definitely non-dense)
]
stride_type: str = random.choice(stride_types)
if stride_type in ["contiguous", "nonoverlapping_and_dense"]:
# Standard contiguous strides: stride[i] = product of sizes[i+1:]
return tuple(_compute_contiguous_strides(size))
elif stride_type == "transposed":
# Create transposed version - swap some dimensions' strides
base_strides = list(_compute_contiguous_strides(size))
if len(base_strides) >= 2:
# Randomly swap strides of two dimensions
i, j = random.sample(range(len(base_strides)), 2)
base_strides[i], base_strides[j] = base_strides[j], base_strides[i]
return tuple(base_strides)
elif stride_type == "custom_gaps":
# Create strides with custom gaps/spacing
base_strides = list(_compute_contiguous_strides(size))
# Add random gaps to some strides
for i in range(len(base_strides)):
if size[i] != 0 and random.random() < 0.3: # 30% chance to add gap
gap_multiplier: int = random.randint(2, 5)
base_strides[i] *= gap_multiplier
return tuple(base_strides)
elif stride_type == "minimal":
# Minimal valid strides (all ones)
return tuple([1] * len(size))
elif stride_type == "nonoverlapping_and_dense_non_contig":
# Non-overlapping and dense but not contiguous (e.g., column-major)
return tuple(_compute_non_contiguous_dense_strides(size))
elif stride_type == "overlapping":
# Create overlapping strides (zero strides for some dimensions)
base_strides = list(_compute_contiguous_strides(size))
# Randomly set some strides to 0 to cause overlapping
for i in range(len(base_strides)):
if size[i] > 1 and random.random() < 0.4: # 40% chance to make overlapping
base_strides[i] = 0
return tuple(base_strides)
elif stride_type == "sparse_gaps":
# Create strides with very large gaps (definitely non-dense)
base_strides = list(_compute_contiguous_strides(size))
# Add very large gaps to create sparse layout
for i in range(len(base_strides)):
if size[i] > 1:
gap_multiplier: int = random.randint(10, 100) # Much larger gaps
base_strides[i] *= gap_multiplier
return tuple(base_strides)
# Fallback to contiguous
return tuple(_compute_contiguous_strides(size))
|
Fuzzes PyTorch tensor strides by generating valid stride patterns for a given size.
Args:
size: Tensor shape/size as a tuple of integers
Returns:
Tuple[int, ...]: A tuple representing valid tensor strides
|
python
|
tools/experimental/torchfuzz/tensor_fuzzer.py
| 115
|
[
"size"
] |
tuple[int, ...]
| true
| 18
| 6.8
|
pytorch/pytorch
| 96,034
|
google
| false
|
addPropertyValues
|
public MutablePropertyValues addPropertyValues(@Nullable PropertyValues other) {
if (other != null) {
PropertyValue[] pvs = other.getPropertyValues();
for (PropertyValue pv : pvs) {
addPropertyValue(new PropertyValue(pv));
}
}
return this;
}
|
Copy all given PropertyValues into this object. Guarantees PropertyValue
references are independent, although it can't deep copy objects currently
referenced by individual PropertyValue objects.
@param other the PropertyValues to copy
@return this in order to allow for adding multiple property values in a chain
|
java
|
spring-beans/src/main/java/org/springframework/beans/MutablePropertyValues.java
| 143
|
[
"other"
] |
MutablePropertyValues
| true
| 2
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_add_delegate_accessors
|
def _add_delegate_accessors(
cls,
delegate,
accessors: list[str],
typ: str,
overwrite: bool = False,
accessor_mapping: Callable[[str], str] = lambda x: x,
raise_on_missing: bool = True,
) -> None:
"""
Add accessors to cls from the delegate class.
Parameters
----------
cls
Class to add the methods/properties to.
delegate
Class to get methods/properties and docstrings.
accessors : list of str
List of accessors to add.
typ : {'property', 'method'}
overwrite : bool, default False
Overwrite the method/property in the target class if it exists.
accessor_mapping: Callable, default lambda x: x
Callable to map the delegate's function to the cls' function.
raise_on_missing: bool, default True
Raise if an accessor does not exist on delegate.
False skips the missing accessor.
"""
def _create_delegator_property(name: str):
def _getter(self):
return self._delegate_property_get(name)
def _setter(self, new_values):
return self._delegate_property_set(name, new_values)
_getter.__name__ = name
_setter.__name__ = name
return property(
fget=_getter,
fset=_setter,
doc=getattr(delegate, accessor_mapping(name)).__doc__,
)
def _create_delegator_method(name: str):
method = getattr(delegate, accessor_mapping(name))
@functools.wraps(method)
def f(self, *args, **kwargs):
return self._delegate_method(name, *args, **kwargs)
return f
for name in accessors:
if (
not raise_on_missing
and getattr(delegate, accessor_mapping(name), None) is None
):
continue
if typ == "property":
f = _create_delegator_property(name)
else:
f = _create_delegator_method(name)
# don't overwrite existing methods/properties
if overwrite or not hasattr(cls, name):
setattr(cls, name, f)
|
Add accessors to cls from the delegate class.
Parameters
----------
cls
Class to add the methods/properties to.
delegate
Class to get methods/properties and docstrings.
accessors : list of str
List of accessors to add.
typ : {'property', 'method'}
overwrite : bool, default False
Overwrite the method/property in the target class if it exists.
accessor_mapping: Callable, default lambda x: x
Callable to map the delegate's function to the cls' function.
raise_on_missing: bool, default True
Raise if an accessor does not exist on delegate.
False skips the missing accessor.
|
python
|
pandas/core/accessor.py
| 76
|
[
"cls",
"delegate",
"accessors",
"typ",
"overwrite",
"accessor_mapping",
"raise_on_missing"
] |
None
| true
| 8
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
estimateCompressedSizeInBytes
|
private static int estimateCompressedSizeInBytes(int size, CompressionType compressionType) {
return compressionType == CompressionType.NONE ? size : Math.min(Math.max(size / 2, 1024), 1 << 16);
}
|
Get an iterator over the deep records.
@return An iterator over the records
|
java
|
clients/src/main/java/org/apache/kafka/common/record/AbstractRecords.java
| 120
|
[
"size",
"compressionType"
] | true
| 2
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
toCalendar
|
public static Calendar toCalendar(final Date date) {
final Calendar c = Calendar.getInstance();
c.setTime(Objects.requireNonNull(date, "date"));
return c;
}
|
Converts a {@link Date} into a {@link Calendar}.
@param date the date to convert to a Calendar.
@return the created Calendar.
@throws NullPointerException if null is passed in.
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/time/DateUtils.java
| 1,612
|
[
"date"
] |
Calendar
| true
| 1
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getAsInt
|
public static <E extends Throwable> int getAsInt(final FailableIntSupplier<E> supplier) {
try {
return supplier.getAsInt();
} catch (final Throwable t) {
throw rethrow(t);
}
}
|
Invokes an int supplier, and returns the result.
@param supplier The int supplier to invoke.
@param <E> The type of checked exception, which the supplier can throw.
@return The int, which has been created by the supplier
|
java
|
src/main/java/org/apache/commons/lang3/function/Failable.java
| 452
|
[
"supplier"
] | true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
byte_bounds
|
def byte_bounds(a):
"""
Returns pointers to the end-points of an array.
Parameters
----------
a : ndarray
Input array. It must conform to the Python-side of the array
interface.
Returns
-------
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second
integer is just past the last byte of the array. If `a` is not
contiguous it will not use every byte between the (`low`, `high`)
values.
Examples
--------
>>> import numpy as np
>>> I = np.eye(2, dtype=np.float32); I.dtype
dtype('float32')
>>> low, high = np.lib.array_utils.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
>>> I = np.eye(2); I.dtype
dtype('float64')
>>> low, high = np.lib.array_utils.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
"""
ai = a.__array_interface__
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
bytes_a = asarray(a).dtype.itemsize
a_low = a_high = a_data
if astrides is None:
# contiguous case
a_high += a.size * bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
a_low += (shape - 1) * stride
else:
a_high += (shape - 1) * stride
a_high += bytes_a
return a_low, a_high
|
Returns pointers to the end-points of an array.
Parameters
----------
a : ndarray
Input array. It must conform to the Python-side of the array
interface.
Returns
-------
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second
integer is just past the last byte of the array. If `a` is not
contiguous it will not use every byte between the (`low`, `high`)
values.
Examples
--------
>>> import numpy as np
>>> I = np.eye(2, dtype=np.float32); I.dtype
dtype('float32')
>>> low, high = np.lib.array_utils.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
>>> I = np.eye(2); I.dtype
dtype('float64')
>>> low, high = np.lib.array_utils.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
|
python
|
numpy/lib/_array_utils_impl.py
| 12
|
[
"a"
] | false
| 6
| 7.84
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
random
|
@Deprecated
public static String random(final int count) {
return secure().next(count);
}
|
Creates a random string whose length is the number of characters specified.
<p>
Characters will be chosen from the set of all characters.
</p>
@param count the length of random string to create.
@return the random string.
@throws IllegalArgumentException if {@code count} < 0.
@deprecated Use {@link #next(int)} from {@link #secure()}, {@link #secureStrong()}, or {@link #insecure()}.
|
java
|
src/main/java/org/apache/commons/lang3/RandomStringUtils.java
| 134
|
[
"count"
] |
String
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
sacChol8x8Damped
|
static inline int sacChol8x8Damped(const float (*A)[8],
float lambda,
float (*L)[8]){
const int N = 8;
int i, j, k;
float lambdap1 = lambda + 1.0f;
float x;
for(i=0;i<N;i++){/* Row */
/* Pre-diagonal elements */
for(j=0;j<i;j++){
x = A[i][j]; /* Aij */
for(k=0;k<j;k++){
x -= L[i][k] * L[j][k];/* - Sum_{k=0..j-1} Lik*Ljk */
}
L[i][j] = x / L[j][j]; /* Lij = ... / Ljj */
}
/* Diagonal element */
{j = i;
x = A[j][j] * lambdap1; /* Ajj */
for(k=0;k<j;k++){
x -= L[j][k] * L[j][k];/* - Sum_{k=0..j-1} Ljk^2 */
}
if(x<0){
return 0;
}
L[j][j] = sqrtf(x); /* Ljj = sqrt( ... ) */
}
}
return 1;
}
|
Cholesky decomposition on 8x8 real positive-definite matrix defined by its
lower-triangular half. Outputs L, the lower triangular part of the
decomposition.
A and L can overlap fully (in-place) or not at all, but may not partially
overlap.
For damping, the diagonal elements are scaled by 1.0 + lambda.
Returns zero if decomposition unsuccessful, and non-zero otherwise.
Source: http://en.wikipedia.org/wiki/Cholesky_decomposition#
The_Cholesky.E2.80.93Banachiewicz_and_Cholesky.E2.80.93Crout_algorithms
|
cpp
|
modules/calib3d/src/rho.cpp
| 2,345
|
[
"lambda"
] | true
| 6
| 6.72
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
_num_features
|
def _num_features(X):
"""Return the number of features in an array-like X.
This helper function tries hard to avoid to materialize an array version
of X unless necessary. For instance, if X is a list of lists,
this function will return the length of the first element, assuming
that subsequent elements are all lists of the same length without
checking.
Parameters
----------
X : array-like
array-like to get the number of features.
Returns
-------
features : int
Number of features
"""
type_ = type(X)
if type_.__module__ == "builtins":
type_name = type_.__qualname__
else:
type_name = f"{type_.__module__}.{type_.__qualname__}"
message = f"Unable to find the number of features from X of type {type_name}"
if not hasattr(X, "__len__") and not hasattr(X, "shape"):
if not hasattr(X, "__array__"):
raise TypeError(message)
# Only convert X to a numpy array if there is no cheaper, heuristic
# option.
X = np.asarray(X)
if hasattr(X, "shape"):
if not hasattr(X.shape, "__len__") or len(X.shape) <= 1:
message += f" with shape {X.shape}"
raise TypeError(message)
return X.shape[1]
first_sample = X[0]
# Do not consider an array-like of strings or dicts to be a 2D array
if isinstance(first_sample, (str, bytes, dict)):
message += f" where the samples are of type {type(first_sample).__qualname__}"
raise TypeError(message)
try:
# If X is a list of lists, for instance, we assume that all nested
# lists have the same length without checking or converting to
# a numpy array to keep this function call as cheap as possible.
return len(first_sample)
except Exception as err:
raise TypeError(message) from err
|
Return the number of features in an array-like X.
This helper function tries hard to avoid to materialize an array version
of X unless necessary. For instance, if X is a list of lists,
this function will return the length of the first element, assuming
that subsequent elements are all lists of the same length without
checking.
Parameters
----------
X : array-like
array-like to get the number of features.
Returns
-------
features : int
Number of features
|
python
|
sklearn/utils/validation.py
| 318
|
[
"X"
] | false
| 10
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
make_function_with_closure
|
def make_function_with_closure(
self,
fn_name: str,
code: types.CodeType,
) -> None:
"""Creates a closure with code object `code`.
Expects the TOS to be the tuple of cells to use for this closure.
TOS will be popped to create the closure.
Args:
- fn_name: name of the function
- code: code object of the function
(does not include the tuple of cells on the TOS)
"""
output = self._output
output.append(self.create_load_const(code))
if sys.version_info < (3, 11):
output.append(self.create_load_const(fn_name))
if sys.version_info >= (3, 13):
output.extend(
[
create_instruction("MAKE_FUNCTION"),
create_instruction("SET_FUNCTION_ATTRIBUTE", arg=0x08),
]
)
else:
output.append(create_instruction("MAKE_FUNCTION", arg=0x08))
self.clear_tos()
|
Creates a closure with code object `code`.
Expects the TOS to be the tuple of cells to use for this closure.
TOS will be popped to create the closure.
Args:
- fn_name: name of the function
- code: code object of the function
(does not include the tuple of cells on the TOS)
|
python
|
torch/_dynamo/codegen.py
| 543
|
[
"self",
"fn_name",
"code"
] |
None
| true
| 4
| 6.72
|
pytorch/pytorch
| 96,034
|
google
| false
|
maybeThrow
|
public void maybeThrow() {
if (exception != null) {
throw this.exception;
}
}
|
Throw the exception corresponding to this error if there is one
|
java
|
clients/src/main/java/org/apache/kafka/common/protocol/Errors.java
| 488
|
[] |
void
| true
| 2
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
entry_points_with_dist
|
def entry_points_with_dist(group: str) -> Iterator[EPnD]:
"""
Retrieve entry points of the given group.
This is like the ``entry_points()`` function from ``importlib.metadata``,
except it also returns the distribution the entry point was loaded from.
Note that this may return multiple distributions to the same package if they
are loaded from different ``sys.path`` entries. The caller site should
implement appropriate deduplication logic if needed.
:param group: Filter results to only this entrypoint group
:return: Generator of (EntryPoint, Distribution) objects for the specified groups
"""
return iter(_get_grouped_entry_points()[group])
|
Retrieve entry points of the given group.
This is like the ``entry_points()`` function from ``importlib.metadata``,
except it also returns the distribution the entry point was loaded from.
Note that this may return multiple distributions to the same package if they
are loaded from different ``sys.path`` entries. The caller site should
implement appropriate deduplication logic if needed.
:param group: Filter results to only this entrypoint group
:return: Generator of (EntryPoint, Distribution) objects for the specified groups
|
python
|
airflow-core/src/airflow/utils/entry_points.py
| 47
|
[
"group"
] |
Iterator[EPnD]
| true
| 1
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
using
|
static <T> InstanceSupplier<T> using(ThrowingSupplier<T> supplier) {
Assert.notNull(supplier, "Supplier must not be null");
if (supplier instanceof InstanceSupplier<T> instanceSupplier) {
return instanceSupplier;
}
return registeredBean -> supplier.getWithException();
}
|
Factory method to create an {@link InstanceSupplier} from a
{@link ThrowingSupplier}.
@param <T> the type of instance supplied by this supplier
@param supplier the source supplier
@return a new {@link InstanceSupplier}
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/InstanceSupplier.java
| 99
|
[
"supplier"
] | true
| 2
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
_validation_error_message
|
def _validation_error_message(self, value, allow_listlike: bool = False) -> str:
"""
Construct an exception message on validation error.
Some methods allow only scalar inputs, while others allow either scalar
or listlike.
Parameters
----------
allow_listlike: bool, default False
Returns
-------
str
"""
if hasattr(value, "dtype") and getattr(value, "ndim", 0) > 0:
msg_got = f"{value.dtype} array"
else:
msg_got = f"'{type(value).__name__}'"
if allow_listlike:
msg = (
f"value should be a '{self._scalar_type.__name__}', 'NaT', "
f"or array of those. Got {msg_got} instead."
)
else:
msg = (
f"value should be a '{self._scalar_type.__name__}' or 'NaT'. "
f"Got {msg_got} instead."
)
return msg
|
Construct an exception message on validation error.
Some methods allow only scalar inputs, while others allow either scalar
or listlike.
Parameters
----------
allow_listlike: bool, default False
Returns
-------
str
|
python
|
pandas/core/arrays/datetimelike.py
| 640
|
[
"self",
"value",
"allow_listlike"
] |
str
| true
| 6
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
pallas
|
def pallas(self, kernel_name: str, source_code: str):
"""
Compile Pallas (JAX experimental) kernels.
Args:
kernel_name: Name of the kernel to be defined
source_code: Source code of the Pallas kernel, as a string
Note:
Pallas kernels are Python code that uses JAX and Pallas APIs.
We use the PyCodeCache to write the source code to a file and load it.
"""
from torch._inductor.codegen.pallas import MAIN_SUFFIX, PallasKernelWrapper
kernel_code_log.info("Pallas Kernel:\n%s", source_code)
def task():
key, path = torch._inductor.codecache.PyCodeCache.write(source_code)
mod = torch._inductor.codecache.PyCodeCache.load_by_key_path(key, path)
# Find our special entry point named function
main_func_name = f"{kernel_name}_{MAIN_SUFFIX}"
if not hasattr(mod, main_func_name):
available = [name for name in dir(mod) if callable(getattr(mod, name))]
raise RuntimeError(
f"Could not find Pallas main kernel function '{main_func_name}'. Available callables: {available}"
)
return PallasKernelWrapper(getattr(mod, main_func_name), kernel_path=path)
if get_compile_threads() <= 1:
return task()
else:
future = self.submit(task)
return LambdaFuture(lambda: future.result())
|
Compile Pallas (JAX experimental) kernels.
Args:
kernel_name: Name of the kernel to be defined
source_code: Source code of the Pallas kernel, as a string
Note:
Pallas kernels are Python code that uses JAX and Pallas APIs.
We use the PyCodeCache to write the source code to a file and load it.
|
python
|
torch/_inductor/async_compile.py
| 604
|
[
"self",
"kernel_name",
"source_code"
] | true
| 4
| 7.04
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
arraycopy
|
public static <T> T arraycopy(final T source, final int sourcePos, final T dest, final int destPos, final int length) {
System.arraycopy(source, sourcePos, dest, destPos, length);
return dest;
}
|
A fluent version of {@link System#arraycopy(Object, int, Object, int, int)} that returns the destination array.
@param <T> the type.
@param source the source array.
@param sourcePos starting position in the source array.
@param dest the destination array.
@param destPos starting position in the destination data.
@param length the number of array elements to be copied.
@return dest
@throws IndexOutOfBoundsException if copying would cause access of data outside array bounds.
@throws ArrayStoreException if an element in the {@code src} array could not be stored into the {@code dest} array because of a type
mismatch.
@throws NullPointerException if either {@code src} or {@code dest} is {@code null}.
@since 3.15.0
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 1,439
|
[
"source",
"sourcePos",
"dest",
"destPos",
"length"
] |
T
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
nearest
|
def nearest(self, limit: int | None = None):
"""
Resample by using the nearest value.
When resampling data, missing values may appear (e.g., when the
resampling frequency is higher than the original frequency).
The `nearest` method will replace ``NaN`` values that appeared in
the resampled data with the value from the nearest member of the
sequence, based on the index value.
Missing values that existed in the original data will not be modified.
If `limit` is given, fill only this many values in each direction for
each of the original values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
An upsampled Series or DataFrame with ``NaN`` values filled with
their nearest value.
See Also
--------
bfill : Backward fill the new missing values in the resampled data.
ffill : Forward fill ``NaN`` values.
Examples
--------
>>> s = pd.Series([1, 2], index=pd.date_range("20180101", periods=2, freq="1h"))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
Freq: h, dtype: int64
>>> s.resample("15min").nearest()
2018-01-01 00:00:00 1
2018-01-01 00:15:00 1
2018-01-01 00:30:00 2
2018-01-01 00:45:00 2
2018-01-01 01:00:00 2
Freq: 15min, dtype: int64
Limit the number of upsampled values imputed by the nearest:
>>> s.resample("15min").nearest(limit=1)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 00:45:00 2.0
2018-01-01 01:00:00 2.0
Freq: 15min, dtype: float64
"""
return self._upsample("nearest", limit=limit)
|
Resample by using the nearest value.
When resampling data, missing values may appear (e.g., when the
resampling frequency is higher than the original frequency).
The `nearest` method will replace ``NaN`` values that appeared in
the resampled data with the value from the nearest member of the
sequence, based on the index value.
Missing values that existed in the original data will not be modified.
If `limit` is given, fill only this many values in each direction for
each of the original values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
An upsampled Series or DataFrame with ``NaN`` values filled with
their nearest value.
See Also
--------
bfill : Backward fill the new missing values in the resampled data.
ffill : Forward fill ``NaN`` values.
Examples
--------
>>> s = pd.Series([1, 2], index=pd.date_range("20180101", periods=2, freq="1h"))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
Freq: h, dtype: int64
>>> s.resample("15min").nearest()
2018-01-01 00:00:00 1
2018-01-01 00:15:00 1
2018-01-01 00:30:00 2
2018-01-01 00:45:00 2
2018-01-01 01:00:00 2
Freq: 15min, dtype: int64
Limit the number of upsampled values imputed by the nearest:
>>> s.resample("15min").nearest(limit=1)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 00:45:00 2.0
2018-01-01 01:00:00 2.0
Freq: 15min, dtype: float64
|
python
|
pandas/core/resample.py
| 687
|
[
"self",
"limit"
] | true
| 1
| 7.28
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
readLines
|
@CanIgnoreReturnValue // some processors won't return a useful result
@ParametricNullness
public static <T extends @Nullable Object> T readLines(
URL url, Charset charset, LineProcessor<T> callback) throws IOException {
return asCharSource(url, charset).readLines(callback);
}
|
Streams lines from a URL, stopping when our callback returns false, or we have read all of the
lines.
@param url the URL to read from
@param charset the charset used to decode the input stream; see {@link StandardCharsets} for
helpful predefined constants
@param callback the LineProcessor to use to handle the lines
@return the output of processing the lines
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/Resources.java
| 122
|
[
"url",
"charset",
"callback"
] |
T
| true
| 1
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
createAsMap
|
@Override
Map<K, Collection<V>> createAsMap() {
return new AsMap(map);
}
|
Returns an iterator across all key-value map entries, used by {@code entries().iterator()} and
{@code values().iterator()}. The default behavior, which traverses the values for one key, the
values for a second key, and so on, suffices for most {@code AbstractMapBasedMultimap}
implementations.
@return an iterator across map entries
|
java
|
android/guava/src/com/google/common/collect/AbstractMapBasedMultimap.java
| 1,274
|
[] | true
| 1
| 6.16
|
google/guava
| 51,352
|
javadoc
| false
|
|
centroids
|
@Override
public Collection<Centroid> centroids() {
return Collections.unmodifiableCollection(summary);
}
|
@param q The quantile desired. Can be in the range [0,1].
@return The minimum value x such that we think that the proportion of samples is ≤ x is q.
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/AVLTreeDigest.java
| 362
|
[] | true
| 1
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
asList
|
public static List<Character> asList(char... backingArray) {
if (backingArray.length == 0) {
return Collections.emptyList();
}
return new CharArrayAsList(backingArray);
}
|
Returns a fixed-size list backed by the specified array, similar to {@link
Arrays#asList(Object[])}. The list supports {@link List#set(int, Object)}, but any attempt to
set a value to {@code null} will result in a {@link NullPointerException}.
<p>The returned list maintains the values, but not the identities, of {@code Character} objects
written to or read from it. For example, whether {@code list.get(0) == list.get(0)} is true for
the returned list is unspecified.
<p>The returned list is serializable.
@param backingArray the array to back the list
@return a list view of the array
|
java
|
android/guava/src/com/google/common/primitives/Chars.java
| 569
|
[] | true
| 2
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
alterShareGroupOffsets
|
default AlterShareGroupOffsetsResult alterShareGroupOffsets(String groupId, Map<TopicPartition, Long> offsets) {
return alterShareGroupOffsets(groupId, offsets, new AlterShareGroupOffsetsOptions());
}
|
Alters offsets for the specified group. In order to succeed, the group must be empty.
<p>This is a convenience method for {@link #alterShareGroupOffsets(String, Map, AlterShareGroupOffsetsOptions)} with default options.
See the overload for more details.
@param groupId The group for which to alter offsets.
@param offsets A map of offsets by partition.
@return The AlterShareGroupOffsetsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,967
|
[
"groupId",
"offsets"
] |
AlterShareGroupOffsetsResult
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
randomDouble
|
public double randomDouble() {
return randomDouble(0, Double.MAX_VALUE);
}
|
Generates a random double between 0 (inclusive) and Double.MAX_VALUE (exclusive).
@return the random double.
@see #randomDouble(double, double)
@since 3.16.0
|
java
|
src/main/java/org/apache/commons/lang3/RandomUtils.java
| 329
|
[] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
indexSupportsIncludeFilter
|
private boolean indexSupportsIncludeFilter(TypeFilter filter) {
if (filter instanceof AnnotationTypeFilter annotationTypeFilter) {
Class<? extends Annotation> annotationType = annotationTypeFilter.getAnnotationType();
return isStereotypeAnnotationForIndex(annotationType);
}
if (filter instanceof AssignableTypeFilter assignableTypeFilter) {
Class<?> target = assignableTypeFilter.getTargetType();
return AnnotationUtils.isAnnotationDeclaredLocally(Indexed.class, target);
}
return false;
}
|
Determine if the specified include {@link TypeFilter} is supported by the index.
@param filter the filter to check
@return whether the index supports this include filter
@since 5.0
@see #registerCandidateTypeForIncludeFilter(String, TypeFilter)
@see #extractStereotype(TypeFilter)
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ClassPathScanningCandidateComponentProvider.java
| 347
|
[
"filter"
] | true
| 3
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
flush
|
@Override
public void flush() throws IOException {
generator.flush();
}
|
Returns a version used for serialising a response.
@return a compatible version
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
| 1,281
|
[] |
void
| true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
readAsn1Object
|
public Asn1Object readAsn1Object(int requiredType) throws IOException {
final Asn1Object obj = readAsn1Object();
if (obj.type != requiredType) {
throw new IllegalStateException(
"Expected ASN.1 object of type 0x" + Integer.toHexString(requiredType) + " but was 0x" + Integer.toHexString(obj.type)
);
}
return obj;
}
|
Read an object and verify its type
@param requiredType The expected type code
@throws IOException if data can not be parsed
@throws IllegalStateException if the parsed object is of the wrong type
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DerParser.java
| 71
|
[
"requiredType"
] |
Asn1Object
| true
| 2
| 6.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
parsePackageName
|
function parsePackageName(specifier, base) {
let separatorIndex = StringPrototypeIndexOf(specifier, '/');
let validPackageName = true;
let isScoped = false;
if (specifier[0] === '@') {
isScoped = true;
if (separatorIndex === -1 || specifier.length === 0) {
validPackageName = false;
} else {
separatorIndex = StringPrototypeIndexOf(
specifier, '/', separatorIndex + 1);
}
}
const packageName = separatorIndex === -1 ?
specifier : StringPrototypeSlice(specifier, 0, separatorIndex);
// Package name cannot have leading . and cannot have percent-encoding or
// \\ separators.
if (RegExpPrototypeExec(invalidPackageNameRegEx, packageName) !== null) {
validPackageName = false;
}
if (!validPackageName) {
throw new ERR_INVALID_MODULE_SPECIFIER(
specifier, 'is not a valid package name', fileURLToPath(base));
}
const packageSubpath = '.' + (separatorIndex === -1 ? '' :
StringPrototypeSlice(specifier, separatorIndex));
return { packageName, packageSubpath, isScoped };
}
|
Parse a package name from a specifier.
@param {string} specifier - The import specifier.
@param {string | URL | undefined} base - The parent URL.
@returns {object}
|
javascript
|
lib/internal/modules/package_json_reader.js
| 248
|
[
"specifier",
"base"
] | false
| 9
| 6.4
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
remove
|
public static String remove(final String str, final char remove) {
if (isEmpty(str) || str.indexOf(remove) == INDEX_NOT_FOUND) {
return str;
}
final char[] chars = str.toCharArray();
int pos = 0;
for (int i = 0; i < chars.length; i++) {
if (chars[i] != remove) {
chars[pos++] = chars[i];
}
}
return new String(chars, 0, pos);
}
|
Removes all occurrences of a character from within the source string.
<p>
A {@code null} source string will return {@code null}. An empty ("") source string will return the empty string.
</p>
<pre>
StringUtils.remove(null, *) = null
StringUtils.remove("", *) = ""
StringUtils.remove("queued", 'u') = "qeed"
StringUtils.remove("queued", 'z') = "queued"
</pre>
@param str the source String to search, may be null.
@param remove the char to search for and remove, may be null.
@return the substring with the char removed if found, {@code null} if null String input.
@since 2.1
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 5,672
|
[
"str",
"remove"
] |
String
| true
| 5
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
throwInvalidPathError
|
function throwInvalidPathError(path: unknown, exampleUrls: string[]): never {
throw new RuntimeError(
RuntimeErrorCode.INVALID_LOADER_ARGUMENTS,
ngDevMode &&
`Image loader has detected an invalid path (\`${path}\`). ` +
`To fix this, supply a path using one of the following formats: ${exampleUrls.join(
' or ',
)}`,
);
}
|
Internal helper function that makes it easier to introduce custom image loaders for the
`NgOptimizedImage` directive. It is enough to specify a URL builder function to obtain full DI
configuration for a given loader: a DI token corresponding to the actual loader function, plus DI
tokens managing preconnect check functionality.
@param buildUrlFn a function returning a full URL based on loader's configuration
@param exampleUrls example of full URLs for a given loader (used in error messages)
@returns a set of DI providers corresponding to the configured image loader
|
typescript
|
packages/common/src/directives/ng_optimized_image/image_loaders/image_loader.ts
| 120
|
[
"path",
"exampleUrls"
] | true
| 2
| 7.6
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
write_th
|
def write_th(
self, s: Any, header: bool = False, indent: int = 0, tags: str | None = None
) -> None:
"""
Method for writing a formatted <th> cell.
If col_space is set on the formatter then that is used for
the value of min-width.
Parameters
----------
s : object
The data to be written inside the cell.
header : bool, default False
Set to True if the <th> is for use inside <thead>. This will
cause min-width to be set if there is one.
indent : int, default 0
The indentation level of the cell.
tags : str, default None
Tags to include in the cell.
Returns
-------
A written <th> cell.
"""
col_space = self.col_space.get(s, None)
if header and col_space is not None:
tags = tags or ""
tags += f'style="min-width: {col_space};"'
self._write_cell(s, kind="th", indent=indent, tags=tags)
|
Method for writing a formatted <th> cell.
If col_space is set on the formatter then that is used for
the value of min-width.
Parameters
----------
s : object
The data to be written inside the cell.
header : bool, default False
Set to True if the <th> is for use inside <thead>. This will
cause min-width to be set if there is one.
indent : int, default 0
The indentation level of the cell.
tags : str, default None
Tags to include in the cell.
Returns
-------
A written <th> cell.
|
python
|
pandas/io/formats/html.py
| 147
|
[
"self",
"s",
"header",
"indent",
"tags"
] |
None
| true
| 4
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
merge
|
static ReleasableExponentialHistogram merge(
int maxBucketCount,
ExponentialHistogramCircuitBreaker breaker,
Iterator<? extends ExponentialHistogram> histograms
) {
try (ExponentialHistogramMerger merger = ExponentialHistogramMerger.create(maxBucketCount, breaker)) {
while (histograms.hasNext()) {
merger.add(histograms.next());
}
return merger.getAndClear();
}
}
|
Merges the provided exponential histograms to a new, single histogram with at most the given amount of buckets.
@param maxBucketCount the maximum number of buckets the result histogram is allowed to have
@param breaker the circuit breaker to use to limit memory allocations
@param histograms the histograms to merge
@return the merged histogram
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogram.java
| 270
|
[
"maxBucketCount",
"breaker",
"histograms"
] |
ReleasableExponentialHistogram
| true
| 2
| 7.6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
ToggleActivityList
|
function ToggleActivityList({
dispatch,
state,
}: {
dispatch: LayoutDispatch,
state: LayoutState,
}) {
return (
<Button
onClick={() =>
dispatch({
type: 'ACTION_SET_ACTIVITY_LIST_TOGGLE',
payload: null,
})
}
title={
state.activityListHidden ? 'Show Activity List' : 'Hide Activity List'
}>
<ButtonIcon
type={state.activityListHidden ? 'panel-left-open' : 'panel-left-close'}
/>
</Button>
);
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-devtools-shared/src/devtools/views/SuspenseTab/SuspenseTab.js
| 98
|
[] | false
| 3
| 6.24
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
processRange
|
function processRange(range: TextRangeWithKind, rangeStart: LineAndCharacter, parent: Node, contextNode: Node, dynamicIndentation: DynamicIndentation): LineAction {
const rangeHasError = rangeContainsError(range);
let lineAction = LineAction.None;
if (!rangeHasError) {
if (!previousRange) {
// trim whitespaces starting from the beginning of the span up to the current line
const originalStart = sourceFile.getLineAndCharacterOfPosition(originalRange.pos);
trimTrailingWhitespacesForLines(originalStart.line, rangeStart.line);
}
else {
lineAction = processPair(range, rangeStart.line, parent, previousRange, previousRangeStartLine, previousParent, contextNode, dynamicIndentation);
}
}
previousRange = range;
previousRangeTriviaEnd = range.end;
previousParent = parent;
previousRangeStartLine = rangeStart.line;
return lineAction;
}
|
Tries to compute the indentation for a list element.
If list element is not in range then
function will pick its actual indentation
so it can be pushed downstream as inherited indentation.
If list element is in the range - its indentation will be equal
to inherited indentation from its predecessors.
|
typescript
|
src/services/formatting/formatting.ts
| 1,079
|
[
"range",
"rangeStart",
"parent",
"contextNode",
"dynamicIndentation"
] | true
| 4
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
geoToH3Address
|
public static String geoToH3Address(double lat, double lng, int res) {
return h3ToString(geoToH3(lat, lng, res));
}
|
Find the H3 index of the resolution <code>res</code> cell containing the lat/lon (in degrees)
@param lat Latitude in degrees.
@param lng Longitude in degrees.
@param res Resolution, 0 <= res <= 15
@return The H3 index.
@throws IllegalArgumentException Latitude, longitude, or resolution is out of range.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/H3.java
| 215
|
[
"lat",
"lng",
"res"
] |
String
| true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
poll_sqs
|
async def poll_sqs(self, client: BaseAwsConnection) -> Collection:
"""
Asynchronously poll SQS queue to retrieve messages.
:param client: SQS connection
:return: A list of messages retrieved from SQS
"""
self.log.info("SqsSensor checking for message on queue: %s", self.sqs_queue)
self.log.debug(
"Polling SQS queue '%s' for up to %d message(s) with %d seconds wait time",
self.sqs_queue,
self.max_messages,
self.wait_time_seconds,
)
receive_message_kwargs = {
"QueueUrl": self.sqs_queue,
"MaxNumberOfMessages": self.max_messages,
"WaitTimeSeconds": self.wait_time_seconds,
}
if self.visibility_timeout is not None:
receive_message_kwargs["VisibilityTimeout"] = self.visibility_timeout
self.log.debug("Using visibility timeout: %d seconds", self.visibility_timeout)
response = await client.receive_message(**receive_message_kwargs)
message_count = len(response.get("Messages", []))
if message_count > 0:
self.log.debug("Received %d message(s) from SQS API call", message_count)
else:
self.log.debug("No messages returned from SQS API call")
return response
|
Asynchronously poll SQS queue to retrieve messages.
:param client: SQS connection
:return: A list of messages retrieved from SQS
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/triggers/sqs.py
| 132
|
[
"self",
"client"
] |
Collection
| true
| 4
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
_get_counts_nanvar
|
def _get_counts_nanvar(
values_shape: Shape,
mask: npt.NDArray[np.bool_] | None,
axis: AxisInt | None,
ddof: int,
dtype: np.dtype = np.dtype(np.float64),
) -> tuple[float | np.ndarray, float | np.ndarray]:
"""
Get the count of non-null values along an axis, accounting
for degrees of freedom.
Parameters
----------
values_shape : Tuple[int, ...]
shape tuple from values ndarray, used if mask is None
mask : Optional[ndarray[bool]]
locations in values that should be considered missing
axis : Optional[int]
axis to count along
ddof : int
degrees of freedom
dtype : type, optional
type to use for count
Returns
-------
count : int, np.nan or np.ndarray
d : int, np.nan or np.ndarray
"""
count = _get_counts(values_shape, mask, axis, dtype=dtype)
d = count - dtype.type(ddof)
# always return NaN, never inf
if is_float(count):
if count <= ddof:
# error: Incompatible types in assignment (expression has type
# "float", variable has type "Union[floating[Any], ndarray[Any,
# dtype[floating[Any]]]]")
count = np.nan # type: ignore[assignment]
d = np.nan
else:
# count is not narrowed by is_float check
count = cast(np.ndarray, count)
mask = count <= ddof
if mask.any():
np.putmask(d, mask, np.nan)
np.putmask(count, mask, np.nan)
return count, d
|
Get the count of non-null values along an axis, accounting
for degrees of freedom.
Parameters
----------
values_shape : Tuple[int, ...]
shape tuple from values ndarray, used if mask is None
mask : Optional[ndarray[bool]]
locations in values that should be considered missing
axis : Optional[int]
axis to count along
ddof : int
degrees of freedom
dtype : type, optional
type to use for count
Returns
-------
count : int, np.nan or np.ndarray
d : int, np.nan or np.ndarray
|
python
|
pandas/core/nanops.py
| 861
|
[
"values_shape",
"mask",
"axis",
"ddof",
"dtype"
] |
tuple[float | np.ndarray, float | np.ndarray]
| true
| 5
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
maybeBindThisOrTargetOrArgsFromPointcutExpression
|
private void maybeBindThisOrTargetOrArgsFromPointcutExpression() {
if (this.numberOfRemainingUnboundArguments > 1) {
throw new AmbiguousBindingException("Still " + this.numberOfRemainingUnboundArguments +
" unbound args at this()/target()/args() binding stage, with no way to determine between them");
}
List<String> varNames = new ArrayList<>();
String[] tokens = StringUtils.tokenizeToStringArray(this.pointcutExpression, " ");
for (int i = 0; i < tokens.length; i++) {
if (tokens[i].equals("this") ||
tokens[i].startsWith("this(") ||
tokens[i].equals("target") ||
tokens[i].startsWith("target(")) {
PointcutBody body = getPointcutBody(tokens, i);
i += body.numTokensConsumed;
String varName = maybeExtractVariableName(body.text);
if (varName != null) {
varNames.add(varName);
}
}
else if (tokens[i].equals("args") || tokens[i].startsWith("args(")) {
PointcutBody body = getPointcutBody(tokens, i);
i += body.numTokensConsumed;
List<String> candidateVarNames = new ArrayList<>();
maybeExtractVariableNamesFromArgs(body.text, candidateVarNames);
// we may have found some var names that were bound in previous primitive args binding step,
// filter them out...
for (String varName : candidateVarNames) {
if (!alreadyBound(varName)) {
varNames.add(varName);
}
}
}
}
if (varNames.size() > 1) {
throw new AmbiguousBindingException("Found " + varNames.size() +
" candidate this(), target(), or args() variables but only one unbound argument slot");
}
else if (varNames.size() == 1) {
for (int j = 0; j < this.parameterNameBindings.length; j++) {
if (isUnbound(j)) {
bindParameterName(j, varNames.get(0));
break;
}
}
}
// else varNames.size must be 0 and we have nothing to bind.
}
|
Parse the string pointcut expression looking for this(), target() and args() expressions.
If we find one, try and extract a candidate variable name and bind it.
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AspectJAdviceParameterNameDiscoverer.java
| 481
|
[] |
void
| true
| 15
| 7.2
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
tryInternalFastPathGetFailure
|
@Override
/*
* We should annotate the superclass, InternalFutureFailureAccess, to say that its copy of this
* method returns @Nullable, too. However, we're not sure if we want to make any changes to that
* class, since it's in a separate artifact that we planned to release only a single version of.
*/
protected final @Nullable Throwable tryInternalFastPathGetFailure() {
if (this instanceof Trusted) {
@RetainedLocalRef Object localValue = value();
if (localValue instanceof Failure) {
return ((Failure) localValue).exception;
}
}
return null;
}
|
Usually returns {@code null} but, if this {@code Future} has failed, may <i>optionally</i>
return the cause of the failure. "Failure" means specifically "completed with an exception"; it
does not include "was cancelled." To be explicit: If this method returns a non-null value,
then:
<ul>
<li>{@code isDone()} must return {@code true}
<li>{@code isCancelled()} must return {@code false}
<li>{@code get()} must not block, and it must throw an {@code ExecutionException} with the
return value of this method as its cause
</ul>
<p>This method is {@code protected} so that classes like {@code
com.google.common.util.concurrent.SettableFuture} do not expose it to their users as an
instance method. In the unlikely event that you need to call this method, call {@link
InternalFutures#tryInternalFastPathGetFailure(InternalFutureFailureAccess)}.
@since 27.0
|
java
|
android/guava/src/com/google/common/util/concurrent/AbstractFuture.java
| 809
|
[] |
Throwable
| true
| 3
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
beam_options_to_args
|
def beam_options_to_args(options: dict) -> list[str]:
"""
Return a formatted pipeline options from a dictionary of arguments.
The logic of this method should be compatible with Apache Beam:
https://github.com/apache/beam/blob/77f57d1fc498592089e32701b45505bbdccccd47/sdks/python/
apache_beam/options/pipeline_options.py#L260-L268
WARNING: In case of amending please check the latest main branch implementation!
:param options: Dictionary with options
:return: List of arguments
"""
if not options:
return []
args: list[str] = []
for attr, value in options.items():
if isinstance(value, bool):
if value:
args.append(f"--{attr}")
elif attr in _FLAG_THAT_SETS_FALSE_VALUE:
# Capture overriding flags, which have a different dest
# from the flag name defined in the parser.add_argument
# Eg: no_use_public_ips, which has the dest=use_public_ips
# different from flag name
flag_that_disables_the_option = _FLAG_THAT_SETS_FALSE_VALUE[attr]
args.append(f"--{flag_that_disables_the_option}")
elif attr in _FLAG_THAT_SETS_FALSE_VALUE_JAVA:
# Capture Java flags that should not be skipped by having
# False value
false_value_flag = _FLAG_THAT_SETS_FALSE_VALUE_JAVA[attr]
args.append(f"--{false_value_flag}")
elif isinstance(value, list):
args.extend([f"--{attr}={v}" for v in value])
elif isinstance(value, dict):
args.append(f"--{attr}={json.dumps(value)}")
elif value is None:
# explicitly skip None values,as later they might be passed as string 'None',
# and override value by default https://github.com/apache/beam/pull/24948
continue
else:
args.append(f"--{attr}={value}")
return args
|
Return a formatted pipeline options from a dictionary of arguments.
The logic of this method should be compatible with Apache Beam:
https://github.com/apache/beam/blob/77f57d1fc498592089e32701b45505bbdccccd47/sdks/python/
apache_beam/options/pipeline_options.py#L260-L268
WARNING: In case of amending please check the latest main branch implementation!
:param options: Dictionary with options
:return: List of arguments
|
python
|
providers/apache/beam/src/airflow/providers/apache/beam/hooks/beam.py
| 76
|
[
"options"
] |
list[str]
| true
| 11
| 7.92
|
apache/airflow
| 43,597
|
sphinx
| false
|
registerBeanDefinitions
|
public int registerBeanDefinitions(Map<?, ?> map) throws BeansException {
return registerBeanDefinitions(map, null);
}
|
Register bean definitions contained in a Map, using all property keys (i.e. not
filtering by prefix).
@param map a map of {@code name} to {@code property} (String or Object). Property
values will be strings if coming from a Properties file etc. Property names
(keys) <b>must</b> be Strings. Class keys must be Strings.
@return the number of bean definitions found
@throws BeansException in case of loading or parsing errors
@see #registerBeanDefinitions(java.util.Map, String, String)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/PropertiesBeanDefinitionReader.java
| 323
|
[
"map"
] | true
| 1
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
onConsume
|
public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) {
ConsumerRecords<K, V> interceptRecords = records;
for (Plugin<ConsumerInterceptor<K, V>> interceptorPlugin : this.interceptorPlugins) {
try {
interceptRecords = interceptorPlugin.get().onConsume(interceptRecords);
} catch (Exception e) {
// do not propagate interceptor exception, log and continue calling other interceptors
log.warn("Error executing interceptor onConsume callback", e);
}
}
return interceptRecords;
}
|
This is called when the records are about to be returned to the user.
<p>
This method calls {@link ConsumerInterceptor#onConsume(ConsumerRecords)} for each
interceptor. Records returned from each interceptor get passed to onConsume() of the next interceptor
in the chain of interceptors.
<p>
This method does not throw exceptions. If any of the interceptors in the chain throws an exception,
it gets caught and logged, and next interceptor in the chain is called with 'records' returned by the
previous successful interceptor onConsume call.
@param records records to be consumed by the client.
@return records that are either modified by interceptors or same as records passed to this method.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptors.java
| 66
|
[
"records"
] | true
| 2
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
newArray
|
static <T extends @Nullable Object> T[] newArray(T[] reference, int length) {
T[] empty = reference.length == 0 ? reference : Arrays.copyOf(reference, 0);
return Arrays.copyOf(empty, length);
}
|
Returns a new array of the given length with the same type as a reference array.
@param reference any array of the desired type
@param length the length of the new array
|
java
|
android/guava/src/com/google/common/collect/Platform.java
| 99
|
[
"reference",
"length"
] | true
| 2
| 6.96
|
google/guava
| 51,352
|
javadoc
| false
|
|
normalizeSelector
|
function normalizeSelector(selector: string): [string, boolean] {
const hasAmpersand = selector.split(/\s*,\s*/).find((token) => token == SELF_TOKEN)
? true
: false;
if (hasAmpersand) {
selector = selector.replace(SELF_TOKEN_REGEX, '');
}
// Note: the :enter and :leave aren't normalized here since those
// selectors are filled in at runtime during timeline building
selector = selector
.replace(/@\*/g, NG_TRIGGER_SELECTOR)
.replace(/@\w+/g, (match) => NG_TRIGGER_SELECTOR + '-' + match.slice(1))
.replace(/:animating/g, NG_ANIMATING_SELECTOR);
return [selector, hasAmpersand];
}
|
@license
Copyright Google LLC All Rights Reserved.
Use of this source code is governed by an MIT-style license that can be
found in the LICENSE file at https://angular.dev/license
|
typescript
|
packages/animations/browser/src/dsl/animation_ast_builder.ts
| 588
|
[
"selector"
] | true
| 3
| 6
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
masked_values
|
def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
"""
Mask using floating point equality.
Return a MaskedArray, masked where the data in array `x` are approximately
equal to `value`, determined using `isclose`. The default tolerances for
`masked_values` are the same as those for `isclose`.
For integer types, exact equality is used, in the same way as
`masked_equal`.
The fill_value is set to `value` and the mask is set to ``nomask`` if
possible.
Parameters
----------
x : array_like
Array to mask.
value : float
Masking value.
rtol, atol : float, optional
Tolerance parameters passed on to `isclose`
copy : bool, optional
Whether to return a copy of `x`.
shrink : bool, optional
Whether to collapse a mask full of False to ``nomask``.
Returns
-------
result : MaskedArray
The result of masking `x` where approximately equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> x = np.array([1, 1.1, 2, 1.1, 3])
>>> ma.masked_values(x, 1.1)
masked_array(data=[1.0, --, 2.0, --, 3.0],
mask=[False, True, False, True, False],
fill_value=1.1)
Note that `mask` is set to ``nomask`` if possible.
>>> ma.masked_values(x, 2.1)
masked_array(data=[1. , 1.1, 2. , 1.1, 3. ],
mask=False,
fill_value=2.1)
Unlike `masked_equal`, `masked_values` can perform approximate equalities.
>>> ma.masked_values(x, 2.1, atol=1e-1)
masked_array(data=[1.0, 1.1, --, 1.1, 3.0],
mask=[False, False, True, False, False],
fill_value=2.1)
"""
xnew = filled(x, value)
if np.issubdtype(xnew.dtype, np.floating):
mask = np.isclose(xnew, value, atol=atol, rtol=rtol)
else:
mask = umath.equal(xnew, value)
ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value)
if shrink:
ret.shrink_mask()
return ret
|
Mask using floating point equality.
Return a MaskedArray, masked where the data in array `x` are approximately
equal to `value`, determined using `isclose`. The default tolerances for
`masked_values` are the same as those for `isclose`.
For integer types, exact equality is used, in the same way as
`masked_equal`.
The fill_value is set to `value` and the mask is set to ``nomask`` if
possible.
Parameters
----------
x : array_like
Array to mask.
value : float
Masking value.
rtol, atol : float, optional
Tolerance parameters passed on to `isclose`
copy : bool, optional
Whether to return a copy of `x`.
shrink : bool, optional
Whether to collapse a mask full of False to ``nomask``.
Returns
-------
result : MaskedArray
The result of masking `x` where approximately equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> x = np.array([1, 1.1, 2, 1.1, 3])
>>> ma.masked_values(x, 1.1)
masked_array(data=[1.0, --, 2.0, --, 3.0],
mask=[False, True, False, True, False],
fill_value=1.1)
Note that `mask` is set to ``nomask`` if possible.
>>> ma.masked_values(x, 2.1)
masked_array(data=[1. , 1.1, 2. , 1.1, 3. ],
mask=False,
fill_value=2.1)
Unlike `masked_equal`, `masked_values` can perform approximate equalities.
>>> ma.masked_values(x, 2.1, atol=1e-1)
masked_array(data=[1.0, 1.1, --, 1.1, 3.0],
mask=[False, False, True, False, False],
fill_value=2.1)
|
python
|
numpy/ma/core.py
| 2,316
|
[
"x",
"value",
"rtol",
"atol",
"copy",
"shrink"
] | false
| 4
| 7.44
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
getAllParameters
|
@Override
public CacheInvocationParameter[] getAllParameters(@Nullable Object... values) {
if (this.allParameterDetails.size() != values.length) {
throw new IllegalStateException("Values mismatch, operation has " +
this.allParameterDetails.size() + " parameter(s) but got " + values.length + " value(s)");
}
List<CacheInvocationParameter> result = new ArrayList<>();
for (int i = 0; i < this.allParameterDetails.size(); i++) {
result.add(this.allParameterDetails.get(i).toCacheInvocationParameter(values[i]));
}
return result.toArray(new CacheInvocationParameter[0]);
}
|
Construct a new {@code AbstractJCacheOperation}.
@param methodDetails the {@link CacheMethodDetails} related to the cached method
@param cacheResolver the cache resolver to resolve regular caches
|
java
|
spring-context-support/src/main/java/org/springframework/cache/jcache/interceptor/AbstractJCacheOperation.java
| 109
|
[] | true
| 3
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
asBiConsumer
|
@SuppressWarnings("unchecked")
public static <T, U> BiConsumer<T, U> asBiConsumer(final Method method) {
return asInterfaceInstance(BiConsumer.class, method);
}
|
Produces a {@link BiConsumer} for a given <em>consumer</em> Method. For example, a classic setter method (as opposed
to a fluent setter). You call the BiConsumer with two arguments: (1) the object receiving the method call, and (2)
the method argument.
@param <T> the type of the first argument to the operation: The type containing the Method.
@param <U> the type of the second argument to the operation: The type of the method argument.
@param method the method to invoke.
@return a correctly-typed wrapper for the given target.
|
java
|
src/main/java/org/apache/commons/lang3/function/MethodInvokers.java
| 83
|
[
"method"
] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
on_kill
|
def on_kill(self) -> None:
"""
Cancel the submitted job run.
Note: this method will not run in deferrable mode.
"""
if self.job_id:
self.log.info("Stopping job run with jobId - %s", self.job_id)
response = self.hook.conn.cancel_job_run(applicationId=self.application_id, jobRunId=self.job_id)
http_status_code = (
response.get("ResponseMetadata", {}).get("HTTPStatusCode") if response else None
)
if http_status_code is None or http_status_code != 200:
self.log.error("Unable to request query cancel on EMR Serverless. Exiting")
return
self.log.info(
"Polling EMR Serverless for query with id %s to reach final state",
self.job_id,
)
# This should be replaced with a boto waiter when available.
waiter(
get_state_callable=self.hook.conn.get_job_run,
get_state_args={
"applicationId": self.application_id,
"jobRunId": self.job_id,
},
parse_response=["jobRun", "state"],
desired_state=EmrServerlessHook.JOB_TERMINAL_STATES,
failure_states=set(),
object_type="job",
action="cancelled",
countdown=self.waiter_delay * self.waiter_max_attempts,
check_interval_seconds=self.waiter_delay,
)
|
Cancel the submitted job run.
Note: this method will not run in deferrable mode.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/operators/emr.py
| 1,298
|
[
"self"
] |
None
| true
| 5
| 7.04
|
apache/airflow
| 43,597
|
unknown
| false
|
andThen
|
default TriConsumer<T, U, V> andThen(final TriConsumer<? super T, ? super U, ? super V> after) {
Objects.requireNonNull(after);
return (t, u, v) -> {
accept(t, u, v);
after.accept(t, u, v);
};
}
|
Returns a composed {@link TriConsumer} that performs, in sequence, this operation followed by the {@code after}
operation. If performing either operation throws an exception, it is relayed to the caller of the composed
operation. If performing this operation throws an exception, the {@code after} operation will not be performed.
@param after the operation to perform after this operation.
@return a composed {@link TriConsumer} that performs in sequence this operation followed by the {@code after}
operation.
@throws NullPointerException if {@code after} is null.
|
java
|
src/main/java/org/apache/commons/lang3/function/TriConsumer.java
| 62
|
[
"after"
] | true
| 1
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
dtype_is_implied
|
def dtype_is_implied(dtype):
"""
Determine if the given dtype is implied by the representation
of its values.
Parameters
----------
dtype : dtype
Data type
Returns
-------
implied : bool
True if the dtype is implied by the representation of its values.
Examples
--------
>>> import numpy as np
>>> np._core.arrayprint.dtype_is_implied(int)
True
>>> np.array([1, 2, 3], int)
array([1, 2, 3])
>>> np._core.arrayprint.dtype_is_implied(np.int8)
False
>>> np.array([1, 2, 3], np.int8)
array([1, 2, 3], dtype=int8)
"""
dtype = np.dtype(dtype)
if format_options.get()['legacy'] <= 113 and dtype.type == np.bool:
return False
# not just void types can be structured, and names are not part of the repr
if dtype.names is not None:
return False
# should care about endianness *unless size is 1* (e.g., int8, bool)
if not dtype.isnative:
return False
return dtype.type in _typelessdata
|
Determine if the given dtype is implied by the representation
of its values.
Parameters
----------
dtype : dtype
Data type
Returns
-------
implied : bool
True if the dtype is implied by the representation of its values.
Examples
--------
>>> import numpy as np
>>> np._core.arrayprint.dtype_is_implied(int)
True
>>> np.array([1, 2, 3], int)
array([1, 2, 3])
>>> np._core.arrayprint.dtype_is_implied(np.int8)
False
>>> np.array([1, 2, 3], np.int8)
array([1, 2, 3], dtype=int8)
|
python
|
numpy/_core/arrayprint.py
| 1,518
|
[
"dtype"
] | false
| 5
| 7.36
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
cdf
|
@Override
public double cdf(double x) {
AVLGroupTree values = summary;
if (values.isEmpty()) {
return Double.NaN;
}
if (values.size() == 1) {
if (x < values.mean(values.first())) return 0;
if (x > values.mean(values.first())) return 1;
return 0.5;
} else {
if (x < min) {
return 0;
}
if (Double.compare(x, min) == 0) {
// we have one or more centroids == x, treat them as one
// dw will accumulate the weight of all of the centroids at x
double dw = 0;
for (Centroid value : values) {
if (Double.compare(value.mean(), x) != 0) {
break;
}
dw += value.count();
}
return dw / 2.0 / size();
}
if (x > max) {
return 1;
}
if (Double.compare(x, max) == 0) {
int ix = values.last();
double dw = 0;
while (ix != NIL && Double.compare(values.mean(ix), x) == 0) {
dw += values.count(ix);
ix = values.prev(ix);
}
long n = size();
return (n - dw / 2.0) / n;
}
// we scan a across the centroids
Iterator<Centroid> it = values.iterator();
Centroid a = it.next();
// b is the look-ahead to the next centroid
Centroid b = it.next();
// initially, we set left width equal to right width
double left = (b.mean() - a.mean()) / 2;
double right = left;
// scan to next to last element
double r = 0;
while (it.hasNext()) {
if (x < a.mean() + right) {
double value = (r + a.count() * interpolate(x, a.mean() - left, a.mean() + right)) / count;
return Math.max(value, 0.0);
}
r += a.count();
a = b;
left = right;
b = it.next();
right = (b.mean() - a.mean()) / 2;
}
// for the last element, assume right width is same as left
if (x < a.mean() + right) {
return (r + a.count() * interpolate(x, a.mean() - right, a.mean() + right)) / count;
}
return 1;
}
}
|
@param x the value at which the CDF should be evaluated
@return the approximate fraction of all samples that were less than or equal to x.
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/AVLTreeDigest.java
| 215
|
[
"x"
] | true
| 15
| 8.16
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
usesAccessEntries
|
boolean usesAccessEntries() {
return usesAccessQueue() || recordsAccess();
}
|
Creates a new, empty map with the specified strategy, initial capacity and concurrency level.
|
java
|
android/guava/src/com/google/common/cache/LocalCache.java
| 368
|
[] | true
| 2
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
override
|
CacheOverride override();
|
Override caching to temporarily enable it. Once caching is no longer needed the
returned {@link CacheOverride} should be closed to restore previous cache settings.
@return a {@link CacheOverride}
@since 3.5.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyCaching.java
| 62
|
[] |
CacheOverride
| true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
resolveListSetting
|
private <V> List<V> resolveListSetting(String key, Function<String, V> parser, List<V> defaultValue) {
try {
final List<String> list = getSettingAsList(expandSettingKey(key));
if (list == null || list.isEmpty()) {
return defaultValue;
}
return list.stream().map(parser).collect(Collectors.toList());
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new SslConfigException("cannot retrieve setting [" + settingPrefix + key + "]", e);
}
}
|
Resolve all necessary configuration settings, and load a {@link SslConfiguration}.
@param basePath The base path to use for any settings that represent file paths. Typically points to the Elasticsearch
configuration directory.
@throws SslConfigException For any problems with the configuration, or with loading the required SSL classes.
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java
| 477
|
[
"key",
"parser",
"defaultValue"
] | true
| 5
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
_find
|
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values.
If there are conflicting cookies, _find arbitrarily chooses one.
See _find_no_duplicates if you want an exception thrown if there are
conflicting cookies.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:return: cookie.value
"""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}")
|
Requests uses this method internally to get cookie values.
If there are conflicting cookies, _find arbitrarily chooses one.
See _find_no_duplicates if you want an exception thrown if there are
conflicting cookies.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:return: cookie.value
|
python
|
src/requests/cookies.py
| 366
|
[
"self",
"name",
"domain",
"path"
] | false
| 7
| 7.12
|
psf/requests
| 53,586
|
sphinx
| false
|
|
createElementSet
|
@Override
Set<E> createElementSet() {
Set<E> delegate = countMap.keySet();
return new ForwardingSet<E>() {
@Override
protected Set<E> delegate() {
return delegate;
}
@Override
public boolean contains(@Nullable Object object) {
return object != null && Collections2.safeContains(delegate, object);
}
@Override
public boolean containsAll(Collection<?> collection) {
return standardContainsAll(collection);
}
@Override
public boolean remove(@Nullable Object object) {
return object != null && Collections2.safeRemove(delegate, object);
}
@Override
public boolean removeAll(Collection<?> c) {
return standardRemoveAll(c);
}
};
}
|
Sets the number of occurrences of {@code element} to {@code newCount}, but only if the count is
currently {@code expectedOldCount}. If {@code element} does not appear in the multiset exactly
{@code expectedOldCount} times, no changes will be made.
@return {@code true} if the change was successful. This usually indicates that the multiset has
been modified, but not always: in the case that {@code expectedOldCount == newCount}, the
method will return {@code true} if the condition was met.
@throws IllegalArgumentException if {@code expectedOldCount} or {@code newCount} is negative
|
java
|
android/guava/src/com/google/common/collect/ConcurrentHashMultiset.java
| 471
|
[] | true
| 3
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
count
|
public int count(String node) {
Deque<NetworkClient.InFlightRequest> queue = requests.get(node);
return queue == null ? 0 : queue.size();
}
|
Return the number of in-flight requests directed at the given node
@param node The node
@return The request count.
|
java
|
clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java
| 107
|
[
"node"
] | true
| 2
| 8.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getParser
|
public OptionParser getParser() {
if (this.parser == null) {
this.parser = new OptionParser();
options();
}
return this.parser;
}
|
Create a new {@link OptionHandler} instance with an argument processor.
@param argumentProcessor strategy that can be used to manipulate arguments before
they are used.
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/options/OptionHandler.java
| 86
|
[] |
OptionParser
| true
| 2
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
makeGetter
|
static LittleEndianBytes makeGetter() {
try {
/*
* UnsafeByteArray uses Unsafe.getLong() in an unsupported way, which is known to cause
* crashes on Android when running in 32-bit mode. For maximum safety, we shouldn't use
* Unsafe.getLong() at all, but the performance benefit on x86_64 is too great to ignore, so
* as a compromise, we enable the optimization only on platforms that we specifically know to
* work.
*
* In the future, the use of Unsafe.getLong() should be replaced by ByteBuffer.getLong(),
* which will have an efficient native implementation in JDK 9.
*
*/
String arch = System.getProperty("os.arch");
if (Objects.equals(arch, "amd64")) {
return ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN)
? UnsafeByteArray.UNSAFE_LITTLE_ENDIAN
: UnsafeByteArray.UNSAFE_BIG_ENDIAN;
}
} catch (Throwable t) {
// ensure we really catch *everything*
}
return JavaLittleEndianBytes.INSTANCE;
}
|
Fallback implementation for when VarHandle and Unsafe are not available in our current
environment.
|
java
|
android/guava/src/com/google/common/hash/LittleEndianByteArray.java
| 257
|
[] |
LittleEndianBytes
| true
| 4
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
verifyOutputDirectory
|
async function verifyOutputDirectory(directory: string, datamodel: string, schemaPath: string) {
let content: string
try {
content = await fs.readFile(path.join(directory, 'package.json'), 'utf8')
} catch (e) {
if (e.code === 'ENOENT') {
// no package.json exists, we are good
return
}
throw e
}
const { name } = JSON.parse(content)
if (name === clientPkg.name) {
const message = [`Generating client into ${bold(directory)} is not allowed.`]
message.push('This package is used by `prisma generate` and overwriting its content is dangerous.')
message.push('')
message.push('Suggestion:')
const outputDeclaration = findOutputPathDeclaration(datamodel)
if (outputDeclaration && outputDeclaration.content.includes(clientPkg.name)) {
const outputLine = outputDeclaration.content
message.push(`In ${bold(schemaPath)} replace:`)
message.push('')
message.push(`${dim(outputDeclaration.lineNumber)} ${replacePackageName(outputLine, red(clientPkg.name))}`)
message.push('with')
message.push(`${dim(outputDeclaration.lineNumber)} ${replacePackageName(outputLine, green('.prisma/client'))}`)
} else {
message.push(`Generate client into ${bold(replacePackageName(directory, green('.prisma/client')))} instead`)
}
message.push('')
message.push("You won't need to change your imports.")
message.push('Imports from `@prisma/client` will be automatically forwarded to `.prisma/client`')
const error = new Error(message.join('\n'))
throw error
}
}
|
Get all the directories involved in the generation process.
@returns
|
typescript
|
packages/client-generator-js/src/generateClient.ts
| 550
|
[
"directory",
"datamodel",
"schemaPath"
] | false
| 7
| 6.08
|
prisma/prisma
| 44,834
|
jsdoc
| true
|
|
parse
|
def parse(self, declarations_str: str) -> Iterator[tuple[str, str]]:
"""
Generates (prop, value) pairs from declarations.
In a future version may generate parsed tokens from tinycss/tinycss2
Parameters
----------
declarations_str : str
"""
for decl in declarations_str.split(";"):
if not decl.strip():
continue
prop, sep, val = decl.partition(":")
prop = prop.strip().lower()
# TODO: don't lowercase case sensitive parts of values (strings)
val = val.strip().lower()
if sep:
yield prop, val
else:
warnings.warn(
f"Ill-formatted attribute: expected a colon in {decl!r}",
CSSWarning,
stacklevel=find_stack_level(),
)
|
Generates (prop, value) pairs from declarations.
In a future version may generate parsed tokens from tinycss/tinycss2
Parameters
----------
declarations_str : str
|
python
|
pandas/io/formats/css.py
| 401
|
[
"self",
"declarations_str"
] |
Iterator[tuple[str, str]]
| true
| 5
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
beginningOffsets
|
@Override
public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, Duration timeout) {
return delegate.beginningOffsets(partitions, timeout);
}
|
Get the first offset for the given partitions.
<p>
This method does not change the current consumer position of the partitions.
@see #seekToBeginning(Collection)
@param partitions the partitions to get the earliest offsets
@param timeout The maximum amount of time to await retrieval of the beginning offsets
@return The earliest available offsets for the given partitions, and it will return empty map if zero timeout is provided
@throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details
@throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details
@throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before
expiration of the passed timeout
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
| 1,653
|
[
"partitions",
"timeout"
] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
reverse
|
public static void reverse(final boolean[] array, final int startIndexInclusive, final int endIndexExclusive) {
if (array == null) {
return;
}
int i = Math.max(startIndexInclusive, 0);
int j = Math.min(array.length, endIndexExclusive) - 1;
boolean tmp;
while (j > i) {
tmp = array[j];
array[j] = array[i];
array[i] = tmp;
j--;
i++;
}
}
|
Reverses the order of the given array in the given range.
<p>
This method does nothing for a {@code null} input array.
</p>
@param array
the array to reverse, may be {@code null}.
@param startIndexInclusive
the starting index. Undervalue (<0) is promoted to 0, overvalue (>array.length) results in no
change.
@param endIndexExclusive
elements up to endIndex-1 are reversed in the array. Undervalue (< start index) results in no
change. Overvalue (>array.length) is demoted to array length.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 6,364
|
[
"array",
"startIndexInclusive",
"endIndexExclusive"
] |
void
| true
| 3
| 6.72
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
setxor1d
|
def setxor1d(ar1, ar2, assume_unique=False):
"""
Set exclusive-or of 1-D arrays with unique elements.
The output is always a masked array. See `numpy.setxor1d` for more details.
See Also
--------
numpy.setxor1d : Equivalent function for ndarrays.
Examples
--------
>>> import numpy as np
>>> ar1 = np.ma.array([1, 2, 3, 2, 4])
>>> ar2 = np.ma.array([2, 3, 5, 7, 5])
>>> np.ma.setxor1d(ar1, ar2)
masked_array(data=[1, 4, 5, 7],
mask=False,
fill_value=999999)
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = ma.concatenate((ar1, ar2), axis=None)
if aux.size == 0:
return aux
aux.sort()
auxf = aux.filled()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = (flag[1:] == flag[:-1])
return aux[flag2]
|
Set exclusive-or of 1-D arrays with unique elements.
The output is always a masked array. See `numpy.setxor1d` for more details.
See Also
--------
numpy.setxor1d : Equivalent function for ndarrays.
Examples
--------
>>> import numpy as np
>>> ar1 = np.ma.array([1, 2, 3, 2, 4])
>>> ar2 = np.ma.array([2, 3, 5, 7, 5])
>>> np.ma.setxor1d(ar1, ar2)
masked_array(data=[1, 4, 5, 7],
mask=False,
fill_value=999999)
|
python
|
numpy/ma/extras.py
| 1,350
|
[
"ar1",
"ar2",
"assume_unique"
] | false
| 3
| 6.64
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
notifyMonitorPointAdded
|
protected void notifyMonitorPointAdded() {
if (monitor != null) {
monitor.pointAdded(description + ".addPoint()", getCurrentPoints());
}
}
|
Implementation of this interface will receive calls with internal data at each step of the
simplification algorithm. This is of use for debugging complex cases, as well as gaining insight
into the way the algorithm works. Data provided in the callback includes:
<ul>
<li>String description of current process</li>
<li>List of points in current simplification</li>
<li>Last point removed from the simplification</li>
</ul>
mode, list of points representing the current linked-list of internal nodes used for
triangulation, and a list of triangles so far created by the algorithm.
|
java
|
libs/geo/src/main/java/org/elasticsearch/geometry/simplify/StreamingGeometrySimplifier.java
| 272
|
[] |
void
| true
| 2
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
_create_range_input_gen_fn
|
def _create_range_input_gen_fn(
base_gen_fn: Callable[[torch.Tensor], torch.Tensor],
dim_index: int,
range_start: int,
range_end: Union[int, float],
range_upper_bound: int,
) -> Callable[[torch.Tensor], torch.Tensor]:
"""Create input generator that modifies target dimension to top of range.
range_upper_bound: Size to use for benchmarking when range_end is unbounded.
Default is DEFAULT_RANGE_UPPER_BOUND = 65536
"""
from torch._inductor.ir import get_fill_order
from torch._inductor.kernel.flex.common import construct_strides
target_dim = range_upper_bound if range_end == float("inf") else int(range_end)
def constrained_gen_fn(fake_tensor: torch.Tensor) -> torch.Tensor:
result = base_gen_fn(fake_tensor)
shape = list(result.shape)
shape[dim_index] = target_dim
# We modified the shape of the result, so we need to recalculate the strides
# TODO: Refine this to a better way to more directly preserve strides
fill_order = get_fill_order(result.stride(), shape_env=None)
new_stride = construct_strides(shape, fill_order)
storage_size = sum((s - 1) * st for s, st in zip(shape, new_stride)) + 1
storage = torch.randn(storage_size, dtype=result.dtype, device=result.device)
return storage.as_strided(shape, tuple(new_stride))
return constrained_gen_fn
|
Create input generator that modifies target dimension to top of range.
range_upper_bound: Size to use for benchmarking when range_end is unbounded.
Default is DEFAULT_RANGE_UPPER_BOUND = 65536
|
python
|
torch/_inductor/kernel/custom_op.py
| 357
|
[
"base_gen_fn",
"dim_index",
"range_start",
"range_end",
"range_upper_bound"
] |
Callable[[torch.Tensor], torch.Tensor]
| true
| 2
| 6.72
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
createRetryPolicy
|
public RetryPolicy createRetryPolicy() {
PropertyMapper map = PropertyMapper.get();
RetryPolicy.Builder builder = RetryPolicy.builder();
map.from(this::getExceptionIncludes).to(builder::includes);
map.from(this::getExceptionExcludes).to(builder::excludes);
map.from(this::getExceptionPredicate).to(builder::predicate);
map.from(this::getMaxRetries).to(builder::maxRetries);
map.from(this::getDelay).to(builder::delay);
map.from(this::getJitter).to(builder::jitter);
map.from(this::getMultiplier).to(builder::multiplier);
map.from(this::getMaxDelay).to(builder::maxDelay);
return (this.factory != null) ? this.factory.apply(builder) : builder.build();
}
|
Create a {@link RetryPolicy} based on the state of this instance.
@return a {@link RetryPolicy}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/retry/RetryPolicySettings.java
| 81
|
[] |
RetryPolicy
| true
| 2
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
hasUserSuppliedInterfaces
|
boolean hasUserSuppliedInterfaces() {
for (Class<?> ifc : this.interfaces) {
if (!SpringProxy.class.isAssignableFrom(ifc) && !isAdvisorIntroducedInterface(ifc)) {
return true;
}
}
return false;
}
|
Remove a proxied interface.
<p>Does nothing if the given interface isn't proxied.
@param ifc the interface to remove from the proxy
@return {@code true} if the interface was removed; {@code false}
if the interface was not found and hence could not be removed
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AdvisedSupport.java
| 268
|
[] | true
| 3
| 8.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
getMaybeTokenStringContent
|
function getMaybeTokenStringContent(token: Token): string {
if (typeof token.content === 'string') {
return token.content;
}
return '';
}
|
Adds metadata for synthetic metrics for which the API does not provide metadata.
See https://github.com/grafana/grafana/issues/22337 for details.
@param metadata HELP and TYPE metadata from /api/v1/metadata
|
typescript
|
packages/grafana-prometheus/src/language_utils.ts
| 288
|
[
"token"
] | true
| 2
| 6.72
|
grafana/grafana
| 71,362
|
jsdoc
| false
|
|
toBooleanObject
|
public static Boolean toBooleanObject(final String str) {
// Previously used equalsIgnoreCase, which was fast for interned 'true'.
// Non interned 'true' matched 15 times slower.
//
// Optimization provides same performance as before for interned 'true'.
// Similar performance for null, 'false', and other strings not length 2/3/4.
// 'true'/'TRUE' match 4 times slower, 'tRUE'/'True' 7 times slower.
if (str == TRUE) {
return Boolean.TRUE;
}
if (str == null) {
return null;
}
switch (str.length()) {
case 1: {
final char ch0 = str.charAt(0);
if (ch0 == 'y' || ch0 == 'Y' ||
ch0 == 't' || ch0 == 'T' ||
ch0 == '1') {
return Boolean.TRUE;
}
if (ch0 == 'n' || ch0 == 'N' ||
ch0 == 'f' || ch0 == 'F' ||
ch0 == '0') {
return Boolean.FALSE;
}
break;
}
case 2: {
final char ch0 = str.charAt(0);
final char ch1 = str.charAt(1);
if ((ch0 == 'o' || ch0 == 'O') &&
(ch1 == 'n' || ch1 == 'N')) {
return Boolean.TRUE;
}
if ((ch0 == 'n' || ch0 == 'N') &&
(ch1 == 'o' || ch1 == 'O')) {
return Boolean.FALSE;
}
break;
}
case 3: {
final char ch0 = str.charAt(0);
final char ch1 = str.charAt(1);
final char ch2 = str.charAt(2);
if ((ch0 == 'y' || ch0 == 'Y') &&
(ch1 == 'e' || ch1 == 'E') &&
(ch2 == 's' || ch2 == 'S')) {
return Boolean.TRUE;
}
if ((ch0 == 'o' || ch0 == 'O') &&
(ch1 == 'f' || ch1 == 'F') &&
(ch2 == 'f' || ch2 == 'F')) {
return Boolean.FALSE;
}
break;
}
case 4: {
final char ch0 = str.charAt(0);
final char ch1 = str.charAt(1);
final char ch2 = str.charAt(2);
final char ch3 = str.charAt(3);
if ((ch0 == 't' || ch0 == 'T') &&
(ch1 == 'r' || ch1 == 'R') &&
(ch2 == 'u' || ch2 == 'U') &&
(ch3 == 'e' || ch3 == 'E')) {
return Boolean.TRUE;
}
break;
}
case 5: {
final char ch0 = str.charAt(0);
final char ch1 = str.charAt(1);
final char ch2 = str.charAt(2);
final char ch3 = str.charAt(3);
final char ch4 = str.charAt(4);
if ((ch0 == 'f' || ch0 == 'F') &&
(ch1 == 'a' || ch1 == 'A') &&
(ch2 == 'l' || ch2 == 'L') &&
(ch3 == 's' || ch3 == 'S') &&
(ch4 == 'e' || ch4 == 'E')) {
return Boolean.FALSE;
}
break;
}
default:
break;
}
return null;
}
|
Converts a String to a Boolean.
<p>{@code 'true'}, {@code 'on'}, {@code 'y'}, {@code 't'}, {@code 'yes'}
or {@code '1'} (case insensitive) will return {@code true}.
{@code 'false'}, {@code 'off'}, {@code 'n'}, {@code 'f'}, {@code 'no'}
or {@code '0'} (case insensitive) will return {@code false}.
Otherwise, {@code null} is returned.</p>
<p>NOTE: This method may return {@code null} and may throw a {@link NullPointerException}
if unboxed to a {@code boolean}.</p>
<pre>
// Case is not significant
BooleanUtils.toBooleanObject(null) = null
BooleanUtils.toBooleanObject("true") = Boolean.TRUE
BooleanUtils.toBooleanObject("T") = Boolean.TRUE // i.e. T[RUE]
BooleanUtils.toBooleanObject("false") = Boolean.FALSE
BooleanUtils.toBooleanObject("f") = Boolean.FALSE // i.e. f[alse]
BooleanUtils.toBooleanObject("No") = Boolean.FALSE
BooleanUtils.toBooleanObject("n") = Boolean.FALSE // i.e. n[o]
BooleanUtils.toBooleanObject("on") = Boolean.TRUE
BooleanUtils.toBooleanObject("ON") = Boolean.TRUE
BooleanUtils.toBooleanObject("off") = Boolean.FALSE
BooleanUtils.toBooleanObject("oFf") = Boolean.FALSE
BooleanUtils.toBooleanObject("yes") = Boolean.TRUE
BooleanUtils.toBooleanObject("Y") = Boolean.TRUE // i.e. Y[ES]
BooleanUtils.toBooleanObject("1") = Boolean.TRUE
BooleanUtils.toBooleanObject("0") = Boolean.FALSE
BooleanUtils.toBooleanObject("blue") = null
BooleanUtils.toBooleanObject("true ") = null // trailing space (too long)
BooleanUtils.toBooleanObject("ono") = null // does not match on or no
</pre>
@param str the String to check; upper and lower case are treated as the same
@return the Boolean value of the string, {@code null} if no match or {@code null} input
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 734
|
[
"str"
] |
Boolean
| true
| 51
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
visitBinaryExpression
|
function visitBinaryExpression(node: BinaryExpression, expressionResultIsUnused: boolean): Expression {
// If we are here it is because this is a destructuring assignment.
if (isDestructuringAssignment(node)) {
return flattenDestructuringAssignment(
node,
visitor,
context,
FlattenLevel.All,
!expressionResultIsUnused,
);
}
if (node.operatorToken.kind === SyntaxKind.CommaToken) {
return factory.updateBinaryExpression(
node,
Debug.checkDefined(visitNode(node.left, visitorWithUnusedExpressionResult, isExpression)),
node.operatorToken,
Debug.checkDefined(visitNode(node.right, expressionResultIsUnused ? visitorWithUnusedExpressionResult : visitor, isExpression)),
);
}
return visitEachChild(node, visitor, context);
}
|
Visits a BinaryExpression that contains a destructuring assignment.
@param node A BinaryExpression node.
@param expressionResultIsUnused Indicates the result of an expression is unused by the parent node (i.e., the left side of a comma or the
expression of an `ExpressionStatement`).
|
typescript
|
src/compiler/transformers/es2015.ts
| 2,683
|
[
"node",
"expressionResultIsUnused"
] | true
| 4
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
clear
|
@Override
public void clear() {
if (needsAllocArrays()) {
return;
}
this.firstEntry = ENDPOINT;
this.lastEntry = ENDPOINT;
// Either both arrays are null or neither is, but we check both to satisfy the nullness checker.
if (predecessor != null && successor != null) {
Arrays.fill(predecessor, 0, size(), 0);
Arrays.fill(successor, 0, size(), 0);
}
super.clear();
}
|
Pointer to the last node in the linked list, or {@code ENDPOINT} if there are no entries.
|
java
|
android/guava/src/com/google/common/collect/CompactLinkedHashSet.java
| 240
|
[] |
void
| true
| 4
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
toString
|
@Override
public String toString() {
return originalHeaderValue;
}
|
Resolves this instance to a MediaType instance defined in given MediaTypeRegistry.
Performs validation against parameters.
@param mediaTypeRegistry a registry where a mapping between a raw media type to an instance MediaType is defined
@return a MediaType instance or null if no media type could be found or if a known parameter do not passes validation
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ParsedMediaType.java
| 152
|
[] |
String
| true
| 1
| 6.32
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
declaresMultipleFieldsInMacro
|
static bool declaresMultipleFieldsInMacro(const RecordDecl *Decl,
const SourceManager &SrcMgr) {
SourceLocation LastMacroLoc;
for (const auto &Field : Decl->fields()) {
if (!Field->getLocation().isMacroID())
continue;
SourceLocation MacroLoc = SrcMgr.getExpansionLoc(Field->getLocation());
if (LastMacroLoc.isValid() && MacroLoc == LastMacroLoc)
return true;
LastMacroLoc = MacroLoc;
}
return false;
}
|
\returns nullptr if the name is ambiguous or not found.
|
cpp
|
clang-tools-extra/clang-reorder-fields/ReorderFieldsAction.cpp
| 69
|
[] | true
| 4
| 7.04
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
errorMessage
|
protected boolean errorMessage(@Nullable String message) {
Log.error((message != null) ? message : "Unexpected error");
return message != null;
}
|
Subclass hook called after a command has run.
@param command the command that has run
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/CommandRunner.java
| 263
|
[
"message"
] | true
| 2
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
tryParseParenthesizedArrowFunctionExpression
|
function tryParseParenthesizedArrowFunctionExpression(allowReturnTypeInArrowFunction: boolean): Expression | undefined {
const triState = isParenthesizedArrowFunctionExpression();
if (triState === Tristate.False) {
// It's definitely not a parenthesized arrow function expression.
return undefined;
}
// If we definitely have an arrow function, then we can just parse one, not requiring a
// following => or { token. Otherwise, we *might* have an arrow function. Try to parse
// it out, but don't allow any ambiguity, and return 'undefined' if this could be an
// expression instead.
return triState === Tristate.True ?
parseParenthesizedArrowFunctionExpression(/*allowAmbiguity*/ true, /*allowReturnTypeInArrowFunction*/ true) :
tryParse(() => parsePossibleParenthesizedArrowFunctionExpression(allowReturnTypeInArrowFunction));
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 5,216
|
[
"allowReturnTypeInArrowFunction"
] | true
| 3
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
get
|
def get(
cls,
key: str,
default_var: Any = __NO_DEFAULT_SENTINEL,
deserialize_json: bool = False,
team_name: str | None = None,
) -> Any:
"""
Get a value for an Airflow Variable Key.
:param key: Variable Key
:param default_var: Default value of the Variable if the Variable doesn't exist
:param deserialize_json: Deserialize the value to a Python dict
:param team_name: Team name associated to the task trying to access the variable (if any)
"""
# TODO: This is not the best way of having compat, but it's "better than erroring" for now. This still
# means SQLA etc is loaded, but we can't avoid that unless/until we add import shims as a big
# back-compat layer
# If this is set it means we are in some kind of execution context (Task, Dag Parse or Triggerer perhaps)
# and should use the Task SDK API server path
if hasattr(sys.modules.get("airflow.sdk.execution_time.task_runner"), "SUPERVISOR_COMMS"):
warnings.warn(
"Using Variable.get from `airflow.models` is deprecated."
"Please use `get` on Variable from sdk(`airflow.sdk.Variable`) instead",
DeprecationWarning,
stacklevel=1,
)
from airflow.sdk import Variable as TaskSDKVariable
default_kwargs = {} if default_var is cls.__NO_DEFAULT_SENTINEL else {"default": default_var}
var_val = TaskSDKVariable.get(key, deserialize_json=deserialize_json, **default_kwargs)
if isinstance(var_val, str):
mask_secret(var_val, key)
return var_val
if team_name and not conf.getboolean("core", "multi_team"):
raise ValueError(
"Multi-team mode is not configured in the Airflow environment but the task trying to access the variable belongs to a team"
)
var_val = Variable.get_variable_from_secrets(key=key, team_name=team_name)
if var_val is None:
if default_var is not cls.__NO_DEFAULT_SENTINEL:
return default_var
raise KeyError(f"Variable {key} does not exist")
if deserialize_json:
obj = json.loads(var_val)
mask_secret(obj, key)
return obj
mask_secret(var_val, key)
return var_val
|
Get a value for an Airflow Variable Key.
:param key: Variable Key
:param default_var: Default value of the Variable if the Variable doesn't exist
:param deserialize_json: Deserialize the value to a Python dict
:param team_name: Team name associated to the task trying to access the variable (if any)
|
python
|
airflow-core/src/airflow/models/variable.py
| 137
|
[
"cls",
"key",
"default_var",
"deserialize_json",
"team_name"
] |
Any
| true
| 9
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
isNumericSpace
|
public static boolean isNumericSpace(final CharSequence cs) {
if (cs == null) {
return false;
}
final int sz = cs.length();
for (int i = 0; i < sz; i++) {
final char nowChar = cs.charAt(i);
if (nowChar != ' ' && !Character.isDigit(nowChar)) {
return false;
}
}
return true;
}
|
Tests if the CharSequence contains only Unicode digits or space ({@code ' '}). A decimal point is not a Unicode digit and returns false.
<p>
{@code null} will return {@code false}. An empty CharSequence (length()=0) will return {@code true}.
</p>
<pre>
StringUtils.isNumericSpace(null) = false
StringUtils.isNumericSpace("") = true
StringUtils.isNumericSpace(" ") = true
StringUtils.isNumericSpace("123") = true
StringUtils.isNumericSpace("12 3") = true
StringUtils.isNumericSpace("\u0967\u0968\u0969") = true
StringUtils.isNumericSpace("\u0967\u0968 \u0969") = true
StringUtils.isNumericSpace("ab2c") = false
StringUtils.isNumericSpace("12-3") = false
StringUtils.isNumericSpace("12.3") = false
</pre>
@param cs the CharSequence to check, may be null.
@return {@code true} if only contains digits or space, and is non-null.
@since 3.0 Changed signature from isNumericSpace(String) to isNumericSpace(CharSequence)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 3,750
|
[
"cs"
] | true
| 5
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
handshakeWrap
|
private SSLEngineResult handshakeWrap(boolean doWrite) throws IOException {
log.trace("SSLHandshake handshakeWrap {}", channelId);
if (netWriteBuffer.hasRemaining())
throw new IllegalStateException("handshakeWrap called with netWriteBuffer not empty");
//this should never be called with a network buffer that contains data
//so we can clear it here.
netWriteBuffer.clear();
SSLEngineResult result;
try {
result = sslEngine.wrap(ByteUtils.EMPTY_BUF, netWriteBuffer);
} finally {
//prepare the results to be written
netWriteBuffer.flip();
}
handshakeStatus = result.getHandshakeStatus();
if (result.getStatus() == SSLEngineResult.Status.OK &&
result.getHandshakeStatus() == HandshakeStatus.NEED_TASK) {
handshakeStatus = runDelegatedTasks();
}
if (doWrite) flush(netWriteBuffer);
return result;
}
|
Performs the WRAP function
@param doWrite boolean
@return SSLEngineResult
@throws IOException
|
java
|
clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java
| 485
|
[
"doWrite"
] |
SSLEngineResult
| true
| 5
| 7.12
|
apache/kafka
| 31,560
|
javadoc
| false
|
compute_class_weight
|
def compute_class_weight(class_weight, *, classes, y, sample_weight=None):
"""Estimate class weights for unbalanced datasets.
Parameters
----------
class_weight : dict, "balanced" or None
If "balanced", class weights will be given by
`n_samples / (n_classes * np.bincount(y))` or their weighted equivalent if
`sample_weight` is provided.
If a dictionary is given, keys are classes and values are corresponding class
weights.
If `None` is given, the class weights will be uniform.
classes : ndarray
Array of the classes occurring in the data, as given by
`np.unique(y_org)` with `y_org` the original class labels.
y : array-like of shape (n_samples,)
Array of original class labels per sample.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual samples. Only used when
`class_weight='balanced'`.
Returns
-------
class_weight_vect : ndarray of shape (n_classes,)
Array with `class_weight_vect[i]` the weight for i-th class.
References
----------
The "balanced" heuristic is inspired by
Logistic Regression in Rare Events Data, King, Zen, 2001.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.class_weight import compute_class_weight
>>> y = [1, 1, 1, 1, 0, 0]
>>> compute_class_weight(class_weight="balanced", classes=np.unique(y), y=y)
array([1.5 , 0.75])
"""
# Import error caused by circular imports.
from sklearn.preprocessing import LabelEncoder
if set(y) - set(classes):
raise ValueError("classes should include all valid labels that can be in y")
if class_weight is None or len(class_weight) == 0:
# uniform class weights
weight = np.ones(classes.shape[0], dtype=np.float64, order="C")
elif class_weight == "balanced":
# Find the weight of each class as present in y.
le = LabelEncoder()
y_ind = le.fit_transform(y)
if not all(np.isin(classes, le.classes_)):
raise ValueError("classes should have valid labels that are in y")
sample_weight = _check_sample_weight(sample_weight, y)
weighted_class_counts = np.bincount(y_ind, weights=sample_weight)
recip_freq = weighted_class_counts.sum() / (
len(le.classes_) * weighted_class_counts
)
weight = recip_freq[le.transform(classes)]
else:
# user-defined dictionary
weight = np.ones(classes.shape[0], dtype=np.float64, order="C")
unweighted_classes = []
for i, c in enumerate(classes):
if c in class_weight:
weight[i] = class_weight[c]
else:
unweighted_classes.append(c)
n_weighted_classes = len(classes) - len(unweighted_classes)
if unweighted_classes and n_weighted_classes != len(class_weight):
unweighted_classes_user_friendly_str = np.array(unweighted_classes).tolist()
raise ValueError(
f"The classes, {unweighted_classes_user_friendly_str}, are not in"
" class_weight"
)
return weight
|
Estimate class weights for unbalanced datasets.
Parameters
----------
class_weight : dict, "balanced" or None
If "balanced", class weights will be given by
`n_samples / (n_classes * np.bincount(y))` or their weighted equivalent if
`sample_weight` is provided.
If a dictionary is given, keys are classes and values are corresponding class
weights.
If `None` is given, the class weights will be uniform.
classes : ndarray
Array of the classes occurring in the data, as given by
`np.unique(y_org)` with `y_org` the original class labels.
y : array-like of shape (n_samples,)
Array of original class labels per sample.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual samples. Only used when
`class_weight='balanced'`.
Returns
-------
class_weight_vect : ndarray of shape (n_classes,)
Array with `class_weight_vect[i]` the weight for i-th class.
References
----------
The "balanced" heuristic is inspired by
Logistic Regression in Rare Events Data, King, Zen, 2001.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.class_weight import compute_class_weight
>>> y = [1, 1, 1, 1, 0, 0]
>>> compute_class_weight(class_weight="balanced", classes=np.unique(y), y=y)
array([1.5 , 0.75])
|
python
|
sklearn/utils/class_weight.py
| 22
|
[
"class_weight",
"classes",
"y",
"sample_weight"
] | false
| 12
| 7.28
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
convertToOriginalForm
|
private CharSequence convertToOriginalForm(CharSequence element) {
return convertElement(element, false,
(ch, i) -> ch == '_' || ElementsParser.isValidChar(Character.toLowerCase(ch), i));
}
|
Return an element in the name in the given form.
@param elementIndex the element index
@param form the form to return
@return the last element
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 175
|
[
"element"
] |
CharSequence
| true
| 2
| 7.84
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
firstEntryIndex
|
int firstEntryIndex() {
return isEmpty() ? -1 : 0;
}
|
Moves the last entry in the entry array into {@code dstIndex}, and nulls out its old position.
|
java
|
android/guava/src/com/google/common/collect/CompactHashMap.java
| 604
|
[] | true
| 2
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
build
|
public MemoryRecords build() {
if (aborted) {
throw new IllegalStateException("Attempting to build an aborted record batch");
}
close();
return builtRecords;
}
|
Close this builder and return the resulting buffer.
@return The built log buffer
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
| 238
|
[] |
MemoryRecords
| true
| 2
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
addRestParameterIfNeeded
|
function addRestParameterIfNeeded(statements: Statement[], node: FunctionLikeDeclaration, inConstructorWithSynthesizedSuper: boolean): boolean {
const prologueStatements: Statement[] = [];
const parameter = lastOrUndefined(node.parameters);
if (!shouldAddRestParameter(parameter, inConstructorWithSynthesizedSuper)) {
return false;
}
// `declarationName` is the name of the local declaration for the parameter.
// TODO(rbuckton): Does this need to be parented?
const declarationName = parameter.name.kind === SyntaxKind.Identifier ? setParent(setTextRange(factory.cloneNode(parameter.name), parameter.name), parameter.name.parent) : factory.createTempVariable(/*recordTempVariable*/ undefined);
setEmitFlags(declarationName, EmitFlags.NoSourceMap);
// `expressionName` is the name of the parameter used in expressions.
const expressionName = parameter.name.kind === SyntaxKind.Identifier ? factory.cloneNode(parameter.name) : declarationName;
const restIndex = node.parameters.length - 1;
const temp = factory.createLoopVariable();
// var param = [];
prologueStatements.push(
setEmitFlags(
setTextRange(
factory.createVariableStatement(
/*modifiers*/ undefined,
factory.createVariableDeclarationList([
factory.createVariableDeclaration(
declarationName,
/*exclamationToken*/ undefined,
/*type*/ undefined,
factory.createArrayLiteralExpression([]),
),
]),
),
/*location*/ parameter,
),
EmitFlags.CustomPrologue,
),
);
// for (var _i = restIndex; _i < arguments.length; _i++) {
// param[_i - restIndex] = arguments[_i];
// }
const forStatement = factory.createForStatement(
setTextRange(
factory.createVariableDeclarationList([
factory.createVariableDeclaration(temp, /*exclamationToken*/ undefined, /*type*/ undefined, factory.createNumericLiteral(restIndex)),
]),
parameter,
),
setTextRange(
factory.createLessThan(
temp,
factory.createPropertyAccessExpression(factory.createIdentifier("arguments"), "length"),
),
parameter,
),
setTextRange(factory.createPostfixIncrement(temp), parameter),
factory.createBlock([
startOnNewLine(
setTextRange(
factory.createExpressionStatement(
factory.createAssignment(
factory.createElementAccessExpression(
expressionName,
restIndex === 0
? temp
: factory.createSubtract(temp, factory.createNumericLiteral(restIndex)),
),
factory.createElementAccessExpression(factory.createIdentifier("arguments"), temp),
),
),
/*location*/ parameter,
),
),
]),
);
setEmitFlags(forStatement, EmitFlags.CustomPrologue);
startOnNewLine(forStatement);
prologueStatements.push(forStatement);
if (parameter.name.kind !== SyntaxKind.Identifier) {
// do the actual destructuring of the rest parameter if necessary
prologueStatements.push(
setEmitFlags(
setTextRange(
factory.createVariableStatement(
/*modifiers*/ undefined,
factory.createVariableDeclarationList(
flattenDestructuringBinding(parameter, visitor, context, FlattenLevel.All, expressionName),
),
),
parameter,
),
EmitFlags.CustomPrologue,
),
);
}
insertStatementsAfterCustomPrologue(statements, prologueStatements);
return true;
}
|
Adds statements to the body of a function-like node if it contains a rest parameter.
@param statements The statements for the new function body.
@param node A function-like node.
@param inConstructorWithSynthesizedSuper A value indicating whether the parameter is
part of a constructor declaration with a
synthesized call to `super`
|
typescript
|
src/compiler/transformers/es2015.ts
| 2,042
|
[
"statements",
"node",
"inConstructorWithSynthesizedSuper"
] | true
| 6
| 6.48
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.