function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
repeat
|
def repeat(a, repeats, axis=None):
"""
Repeat each element of an array after themselves
Parameters
----------
a : array_like
Input array.
repeats : int or array of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
unique : Find the unique elements of an array.
Examples
--------
>>> import numpy as np
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _wrapfunc(a, 'repeat', repeats, axis=axis)
|
Repeat each element of an array after themselves
Parameters
----------
a : array_like
Input array.
repeats : int or array of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
unique : Find the unique elements of an array.
Examples
--------
>>> import numpy as np
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
|
python
|
numpy/_core/fromnumeric.py
| 438
|
[
"a",
"repeats",
"axis"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
put
|
public JSONArray put(long value) {
this.values.add(value);
return this;
}
|
Appends {@code value} to the end of this array.
@param value the value
@return this array.
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 165
|
[
"value"
] |
JSONArray
| true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
legmulx
|
def legmulx(c):
"""Multiply a Legendre series by x.
Multiply the Legendre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
See Also
--------
legadd, legsub, legmul, legdiv, legpow
Notes
-----
The multiplication uses the recursion relationship for Legendre
polynomials in the form
.. math::
xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> L.legmulx([1,2,3])
array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0] * 0
prd[1] = c[0]
for i in range(1, len(c)):
j = i + 1
k = i - 1
s = i + j
prd[j] = (c[i] * j) / s
prd[k] += (c[i] * i) / s
return prd
|
Multiply a Legendre series by x.
Multiply the Legendre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
See Also
--------
legadd, legsub, legmul, legdiv, legpow
Notes
-----
The multiplication uses the recursion relationship for Legendre
polynomials in the form
.. math::
xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> L.legmulx([1,2,3])
array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary
|
python
|
numpy/polynomial/legendre.py
| 408
|
[
"c"
] | false
| 4
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
createLogContext
|
static LogContext createLogContext(String clientId) {
return new LogContext("[AdminClient clientId=" + clientId + "] ");
}
|
Pretty-print an exception.
@param throwable The exception.
@return A compact human-readable string.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 597
|
[
"clientId"
] |
LogContext
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
is_int64_dtype
|
def is_int64_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of the int64 dtype.
.. deprecated:: 2.1.0
is_int64_dtype is deprecated and will be removed in a future
version. Use dtype == np.int64 instead.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of the int64 dtype.
See Also
--------
api.types.is_float_dtype : Check whether the provided array or dtype is of a
float dtype.
api.types.is_bool_dtype : Check whether the provided array or dtype is of a
boolean dtype.
api.types.is_object_dtype : Check whether an array-like or dtype is of the
object dtype.
numpy.int64 : Numpy's 64-bit integer type.
Notes
-----
Depending on system architecture, the return value of `is_int64_dtype(
int)` will be True if the OS uses 64-bit integers and False if the OS
uses 32-bit integers.
Examples
--------
>>> from pandas.api.types import is_int64_dtype
>>> is_int64_dtype(str) # doctest: +SKIP
False
>>> is_int64_dtype(np.int32) # doctest: +SKIP
False
>>> is_int64_dtype(np.int64) # doctest: +SKIP
True
>>> is_int64_dtype("int8") # doctest: +SKIP
False
>>> is_int64_dtype("Int8") # doctest: +SKIP
False
>>> is_int64_dtype(pd.Int64Dtype) # doctest: +SKIP
True
>>> is_int64_dtype(float) # doctest: +SKIP
False
>>> is_int64_dtype(np.uint64) # unsigned # doctest: +SKIP
False
>>> is_int64_dtype(np.array(["a", "b"])) # doctest: +SKIP
False
>>> is_int64_dtype(np.array([1, 2], dtype=np.int64)) # doctest: +SKIP
True
>>> is_int64_dtype(pd.Index([1, 2.0])) # float # doctest: +SKIP
False
>>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned # doctest: +SKIP
False
"""
# GH#52564
warnings.warn(
"is_int64_dtype is deprecated and will be removed in a future "
"version. Use dtype == np.int64 instead.",
Pandas4Warning,
stacklevel=2,
)
return _is_dtype_type(arr_or_dtype, classes(np.int64))
|
Check whether the provided array or dtype is of the int64 dtype.
.. deprecated:: 2.1.0
is_int64_dtype is deprecated and will be removed in a future
version. Use dtype == np.int64 instead.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of the int64 dtype.
See Also
--------
api.types.is_float_dtype : Check whether the provided array or dtype is of a
float dtype.
api.types.is_bool_dtype : Check whether the provided array or dtype is of a
boolean dtype.
api.types.is_object_dtype : Check whether an array-like or dtype is of the
object dtype.
numpy.int64 : Numpy's 64-bit integer type.
Notes
-----
Depending on system architecture, the return value of `is_int64_dtype(
int)` will be True if the OS uses 64-bit integers and False if the OS
uses 32-bit integers.
Examples
--------
>>> from pandas.api.types import is_int64_dtype
>>> is_int64_dtype(str) # doctest: +SKIP
False
>>> is_int64_dtype(np.int32) # doctest: +SKIP
False
>>> is_int64_dtype(np.int64) # doctest: +SKIP
True
>>> is_int64_dtype("int8") # doctest: +SKIP
False
>>> is_int64_dtype("Int8") # doctest: +SKIP
False
>>> is_int64_dtype(pd.Int64Dtype) # doctest: +SKIP
True
>>> is_int64_dtype(float) # doctest: +SKIP
False
>>> is_int64_dtype(np.uint64) # unsigned # doctest: +SKIP
False
>>> is_int64_dtype(np.array(["a", "b"])) # doctest: +SKIP
False
>>> is_int64_dtype(np.array([1, 2], dtype=np.int64)) # doctest: +SKIP
True
>>> is_int64_dtype(pd.Index([1, 2.0])) # float # doctest: +SKIP
False
>>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned # doctest: +SKIP
False
|
python
|
pandas/core/dtypes/common.py
| 925
|
[
"arr_or_dtype"
] |
bool
| true
| 1
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
extractPropertiesFromApplication
|
private void extractPropertiesFromApplication(Properties properties, @Nullable Map<String, Object> map) {
if (map != null) {
flatten(properties, map, "");
}
}
|
Create a new {@link CloudFoundryVcapEnvironmentPostProcessor} instance.
@param logFactory the log factory to use
@since 3.0.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/cloud/CloudFoundryVcapEnvironmentPostProcessor.java
| 169
|
[
"properties",
"map"
] |
void
| true
| 2
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
notEmpty
|
public static <T extends Collection<?>> T notEmpty(final T collection, final String message, final Object... values) {
Objects.requireNonNull(collection, toSupplier(message, values));
if (collection.isEmpty()) {
throw new IllegalArgumentException(getMessage(message, values));
}
return collection;
}
|
<p>Validates that the specified argument collection is neither {@code null}
nor a size of zero (no elements); otherwise throwing an exception
with the specified message.
<pre>Validate.notEmpty(myCollection, "The collection must not be empty");</pre>
@param <T> the collection type.
@param collection the collection to check, validated not null by this method.
@param message the {@link String#format(String, Object...)} exception message if invalid, not null.
@param values the optional values for the formatted exception message, null array not recommended.
@return the validated collection (never {@code null} method for chaining).
@throws NullPointerException if the collection is {@code null}.
@throws IllegalArgumentException if the collection is empty.
@see #notEmpty(Object[])
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 888
|
[
"collection",
"message"
] |
T
| true
| 2
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
checkUnsupportedConfigsPostProcess
|
protected void checkUnsupportedConfigsPostProcess() {
String groupProtocol = getString(GROUP_PROTOCOL_CONFIG);
if (GroupProtocol.CLASSIC.name().equalsIgnoreCase(groupProtocol)) {
checkUnsupportedConfigsPostProcess(GroupProtocol.CLASSIC, CLASSIC_PROTOCOL_UNSUPPORTED_CONFIGS);
} else if (GroupProtocol.CONSUMER.name().equalsIgnoreCase(groupProtocol)) {
checkUnsupportedConfigsPostProcess(GroupProtocol.CONSUMER, CONSUMER_PROTOCOL_UNSUPPORTED_CONFIGS);
}
}
|
A list of configuration keys not supported for CONSUMER protocol.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java
| 766
|
[] |
void
| true
| 3
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
_random_choice_csc
|
def _random_choice_csc(n_samples, classes, class_probability=None, random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of \
shape (n_classes,), default=None
Class distribution of each column. If None, uniform distribution is
assumed.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the sampled classes.
See :term:`Glossary <random_state>`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array("i")
indices = array.array("i")
indptr = array.array("i", [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != "i":
raise ValueError("class dtype %s is not supported" % classes[j].dtype)
classes[j] = classes[j].astype(np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if not np.isclose(np.sum(class_prob_j), 1.0):
raise ValueError(
"Probability array at index {0} does not sum to one".format(j)
)
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError(
"classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(
j, classes[j].shape[0], class_prob_j.shape[0]
)
)
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
index_class_0 = np.flatnonzero(classes[j] == 0).item()
p_nonzero = 1 - class_prob_j[index_class_0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(
n_population=n_samples, n_samples=nnz, random_state=random_state
)
indices.extend(ind_sample)
# Normalize probabilities for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = class_probability_nz / np.sum(
class_probability_nz
)
classes_ind = np.searchsorted(
class_probability_nz_norm.cumsum(), rng.uniform(size=nnz)
)
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr), (n_samples, len(classes)), dtype=int)
|
Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of \
shape (n_classes,), default=None
Class distribution of each column. If None, uniform distribution is
assumed.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the sampled classes.
See :term:`Glossary <random_state>`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
|
python
|
sklearn/utils/random.py
| 17
|
[
"n_samples",
"classes",
"class_probability",
"random_state"
] | false
| 9
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
validatePositionsIfNeeded
|
void validatePositionsIfNeeded() {
Map<TopicPartition, SubscriptionState.FetchPosition> partitionsToValidate = offsetFetcherUtils.refreshAndGetPartitionsToValidate();
if (partitionsToValidate.isEmpty()) {
return;
}
sendOffsetsForLeaderEpochRequestsAndValidatePositions(partitionsToValidate);
}
|
Validate positions for all assigned partitions for which a leader change has been detected.
This will generate OffsetsForLeaderEpoch requests for the partitions, with the known offset
epoch and current leader epoch. It will enqueue the generated requests, to be sent on the
next call to {@link #poll(long)}.
<p/>
When a response is received, positions are validated and, if a log truncation is detected, a
{@link LogTruncationException} will be saved in memory in cachedUpdatePositionsException, to be thrown on the
next call to this function.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java
| 504
|
[] |
void
| true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
formatPeriodISO
|
public static String formatPeriodISO(final long startMillis, final long endMillis) {
return formatPeriod(startMillis, endMillis, ISO_EXTENDED_FORMAT_PATTERN, false, TimeZone.getDefault());
}
|
Formats the time gap as a string.
<p>The format used is the ISO 8601 period format.</p>
@param startMillis the start of the duration to format
@param endMillis the end of the duration to format
@return the formatted duration, not null
@throws IllegalArgumentException if startMillis is greater than endMillis
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationFormatUtils.java
| 670
|
[
"startMillis",
"endMillis"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_is_line_empty
|
def _is_line_empty(self, line: Sequence[Scalar]) -> bool:
"""
Check if a line is empty or not.
Parameters
----------
line : str, array-like
The line of data to check.
Returns
-------
boolean : Whether or not the line is empty.
"""
return not line or all(not x for x in line)
|
Check if a line is empty or not.
Parameters
----------
line : str, array-like
The line of data to check.
Returns
-------
boolean : Whether or not the line is empty.
|
python
|
pandas/io/parsers/python_parser.py
| 877
|
[
"self",
"line"
] |
bool
| true
| 2
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
skipKeyValueIterator
|
@Override
public CloseableIterator<Record> skipKeyValueIterator(BufferSupplier bufferSupplier) {
if (count() == 0) {
return CloseableIterator.wrap(Collections.emptyIterator());
}
/*
* For uncompressed iterator, it is actually not worth skipping key / value / headers at all since
* its ByteBufferInputStream's skip() function is less efficient compared with just reading it actually
* as it will allocate new byte array.
*/
if (!isCompressed())
return uncompressedIterator();
// we define this to be a closable iterator so that caller (i.e. the log validator) needs to close it
// while we can save memory footprint of not decompressing the full record set ahead of time
return compressedIterator(bufferSupplier, true);
}
|
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas.
@return The base timestamp
|
java
|
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
| 338
|
[
"bufferSupplier"
] | true
| 3
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
deduceBindMethod
|
private static org.springframework.boot.context.properties.bind.BindMethod deduceBindMethod(
@Nullable Constructor<?> bindConstructor) {
return (bindConstructor != null) ? VALUE_OBJECT_BIND_METHOD : JAVA_BEAN_BIND_METHOD;
}
|
Deduce the {@code BindMethod} that should be used for the given {@link Bindable}.
@param bindable the source bindable
@return the bind method to use
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/ConfigurationPropertiesBean.java
| 313
|
[
"bindConstructor"
] | true
| 2
| 7.68
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
partitionsToFetch
|
private List<TopicPartition> partitionsToFetch() {
return subscriptions.fetchablePartitions(tp -> true);
}
|
The method checks whether the leader for a topicIdPartition has changed.
@param nodeId The previous leader for the partition.
@param topicIdPartition The TopicIdPartition to check.
@return Returns true if leader information is available and leader has changed.
If the leader information is not available or if the leader has not changed, it returns false.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 1,124
|
[] | true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
applyAsDouble
|
double applyAsDouble(T t, U u) throws E;
|
Applies this function to the given arguments.
@param t the first function argument
@param u the second function argument
@return the function result
@throws E Thrown when the function fails.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableToDoubleBiFunction.java
| 58
|
[
"t",
"u"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
indexSupportsIncludeFilters
|
private boolean indexSupportsIncludeFilters() {
for (TypeFilter includeFilter : this.includeFilters) {
if (!indexSupportsIncludeFilter(includeFilter)) {
return false;
}
}
return true;
}
|
Determine if the component index can be used by this instance.
@return {@code true} if the index is available and the configuration of this
instance is supported by it, {@code false} otherwise
@since 5.0
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ClassPathScanningCandidateComponentProvider.java
| 330
|
[] | true
| 2
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
_find_no_duplicates
|
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:raises KeyError: if cookie is not found
:raises CookieConflictError: if there are multiple cookies
that match name and optionally domain and path
:return: cookie.value
"""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None:
# if there are multiple cookies that meet passed in criteria
raise CookieConflictError(
f"There are multiple cookies with name, {name!r}"
)
# we will eventually return this as long as no cookie conflict
toReturn = cookie.value
if toReturn:
return toReturn
raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}")
|
Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:raises KeyError: if cookie is not found
:raises CookieConflictError: if there are multiple cookies
that match name and optionally domain and path
:return: cookie.value
|
python
|
src/requests/cookies.py
| 386
|
[
"self",
"name",
"domain",
"path"
] | false
| 9
| 6.64
|
psf/requests
| 53,586
|
sphinx
| false
|
|
append
|
@Override
public StrBuilder append(final CharSequence seq) {
if (seq == null) {
return appendNull();
}
if (seq instanceof StrBuilder) {
return append((StrBuilder) seq);
}
if (seq instanceof StringBuilder) {
return append((StringBuilder) seq);
}
if (seq instanceof StringBuffer) {
return append((StringBuffer) seq);
}
if (seq instanceof CharBuffer) {
return append((CharBuffer) seq);
}
return append(seq.toString());
}
|
Appends a CharSequence to this string builder.
Appending null will call {@link #appendNull()}.
@param seq the CharSequence to append
@return {@code this} instance.
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 473
|
[
"seq"
] |
StrBuilder
| true
| 6
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
print
|
public static String print(Duration value, DurationFormat.Style style) {
return print(value, style, null);
}
|
Print the specified duration in the specified style.
@param value the value to print
@param style the style to print in
@return the printed result
|
java
|
spring-context/src/main/java/org/springframework/format/datetime/standard/DurationFormatterUtils.java
| 58
|
[
"value",
"style"
] |
String
| true
| 1
| 6.96
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
merge
|
def merge(self, other, inplace: bool = False):
"""
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
|
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
|
python
|
pandas/tseries/holiday.py
| 584
|
[
"self",
"other",
"inplace"
] | true
| 3
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
is_sequence
|
def is_sequence(obj: object) -> bool:
"""
Check if the object is a sequence of objects.
String types are not included as sequences here.
Parameters
----------
obj : The object to check
Returns
-------
is_sequence : bool
Whether `obj` is a sequence of objects.
Examples
--------
>>> l = [1, 2, 3]
>>>
>>> is_sequence(l)
True
>>> is_sequence(iter(l))
False
"""
try:
# Can iterate over it.
iter(obj) # type: ignore[call-overload]
# Has a length associated with it.
len(obj) # type: ignore[arg-type]
return not isinstance(obj, (str, bytes))
except (TypeError, AttributeError):
return False
|
Check if the object is a sequence of objects.
String types are not included as sequences here.
Parameters
----------
obj : The object to check
Returns
-------
is_sequence : bool
Whether `obj` is a sequence of objects.
Examples
--------
>>> l = [1, 2, 3]
>>>
>>> is_sequence(l)
True
>>> is_sequence(iter(l))
False
|
python
|
pandas/core/dtypes/inference.py
| 452
|
[
"obj"
] |
bool
| true
| 1
| 7.44
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
send_email
|
def send_email(
to: list[str] | Iterable[str],
subject: str,
html_content: str,
files: list[str] | None = None,
dryrun: bool = False,
cc: str | Iterable[str] | None = None,
bcc: str | Iterable[str] | None = None,
mime_subtype: str = "mixed",
mime_charset: str = "utf-8",
conn_id: str | None = None,
custom_headers: dict[str, Any] | None = None,
**kwargs,
) -> None:
"""
Send an email using the backend specified in the *EMAIL_BACKEND* configuration option.
:param to: A list or iterable of email addresses to send the email to.
:param subject: The subject of the email.
:param html_content: The content of the email in HTML format.
:param files: A list of paths to files to attach to the email.
:param dryrun: If *True*, the email will not actually be sent. Default: *False*.
:param cc: A string or iterable of strings containing email addresses to send a copy of the email to.
:param bcc: A string or iterable of strings containing email addresses to send a
blind carbon copy of the email to.
:param mime_subtype: The subtype of the MIME message. Default: "mixed".
:param mime_charset: The charset of the email. Default: "utf-8".
:param conn_id: The connection ID to use for the backend. If not provided, the default connection
specified in the *EMAIL_CONN_ID* configuration option will be used.
:param custom_headers: A dictionary of additional headers to add to the MIME message.
No validations are run on these values, and they should be able to be encoded.
:param kwargs: Additional keyword arguments to pass to the backend.
"""
backend = conf.getimport("email", "EMAIL_BACKEND")
backend_conn_id = conn_id or conf.get("email", "EMAIL_CONN_ID")
from_email = conf.get("email", "from_email", fallback=None)
to_list = get_email_address_list(to)
to_comma_separated = ", ".join(to_list)
return backend(
to_comma_separated,
subject,
html_content,
files=files,
dryrun=dryrun,
cc=cc,
bcc=bcc,
mime_subtype=mime_subtype,
mime_charset=mime_charset,
conn_id=backend_conn_id,
from_email=from_email,
custom_headers=custom_headers,
**kwargs,
)
|
Send an email using the backend specified in the *EMAIL_BACKEND* configuration option.
:param to: A list or iterable of email addresses to send the email to.
:param subject: The subject of the email.
:param html_content: The content of the email in HTML format.
:param files: A list of paths to files to attach to the email.
:param dryrun: If *True*, the email will not actually be sent. Default: *False*.
:param cc: A string or iterable of strings containing email addresses to send a copy of the email to.
:param bcc: A string or iterable of strings containing email addresses to send a
blind carbon copy of the email to.
:param mime_subtype: The subtype of the MIME message. Default: "mixed".
:param mime_charset: The charset of the email. Default: "utf-8".
:param conn_id: The connection ID to use for the backend. If not provided, the default connection
specified in the *EMAIL_CONN_ID* configuration option will be used.
:param custom_headers: A dictionary of additional headers to add to the MIME message.
No validations are run on these values, and they should be able to be encoded.
:param kwargs: Additional keyword arguments to pass to the backend.
|
python
|
airflow-core/src/airflow/utils/email.py
| 39
|
[
"to",
"subject",
"html_content",
"files",
"dryrun",
"cc",
"bcc",
"mime_subtype",
"mime_charset",
"conn_id",
"custom_headers"
] |
None
| true
| 2
| 6.8
|
apache/airflow
| 43,597
|
sphinx
| false
|
getTarget
|
@Override
public Object getTarget() throws BeansException {
++this.invocationCount;
Object target = this.targetInThread.get();
if (target == null) {
if (logger.isDebugEnabled()) {
logger.debug("No target for prototype '" + this.targetBeanName + "' bound to thread: " +
"creating one and binding it to thread '" + Thread.currentThread().getName() + "'");
}
// Associate target with ThreadLocal.
target = newPrototypeInstance();
this.targetInThread.set(target);
synchronized (this.targetSet) {
this.targetSet.add(target);
}
}
else {
++this.hitCount;
}
return target;
}
|
Implementation of abstract getTarget() method.
We look for a target held in a ThreadLocal. If we don't find one,
we create one and bind it to the thread. No synchronization is required.
|
java
|
spring-aop/src/main/java/org/springframework/aop/target/ThreadLocalTargetSource.java
| 83
|
[] |
Object
| true
| 3
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
nested
|
public <T> BiConsumer<T, BiConsumer<String, Object>> nested(Consumer<Pairs<T>> pairs) {
return (!this.include) ? none() : new Pairs<>(joinWith("."), pairs)::nested;
}
|
Add pairs using nested naming (for example as used in ECS).
@param <T> the item type
@param pairs callback to add all the pairs
@return a {@link BiConsumer} for use with the {@link JsonWriter}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/structured/ContextPairs.java
| 80
|
[
"pairs"
] | true
| 2
| 7.84
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
format
|
String format(Calendar calendar);
|
Formats a {@link Calendar} object.
The TimeZone set on the Calendar is only used to adjust the time offset.
The TimeZone specified during the construction of the Parser will determine the TimeZone
used in the formatted string.
@param calendar the calendar to format.
@return the formatted string.
|
java
|
src/main/java/org/apache/commons/lang3/time/DatePrinter.java
| 47
|
[
"calendar"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getDeclarationDiagnosticsWorker
|
function getDeclarationDiagnosticsWorker(sourceFile: SourceFile, cancellationToken: CancellationToken | undefined): readonly DiagnosticWithLocation[] {
let result = cachedDeclarationDiagnosticsForFile?.get(sourceFile.path);
if (!result) {
(cachedDeclarationDiagnosticsForFile ??= new Map()).set(
sourceFile.path,
result = getDeclarationDiagnosticsForFileNoCache(sourceFile, cancellationToken),
);
}
return result;
}
|
@returns The line index marked as preceding the diagnostic, or -1 if none was.
|
typescript
|
src/compiler/program.ts
| 3,239
|
[
"sourceFile",
"cancellationToken"
] | true
| 2
| 7.04
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
_freeze_group_tasks
|
def _freeze_group_tasks(self, _id=None, group_id=None, chord=None,
root_id=None, parent_id=None, group_index=None):
"""Freeze the tasks in the group.
Note:
If the group tasks are created from a generator, the tasks generator would
not be exhausted, and the tasks would be frozen lazily.
Returns:
tuple: A tuple of the group id, and the AsyncResult of each of the group tasks.
"""
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
opts = self.options
try:
gid = opts['task_id']
except KeyError:
gid = opts['task_id'] = group_id or uuid()
if group_id:
opts['group_id'] = group_id
if chord:
opts['chord'] = chord
if group_index is not None:
opts['group_index'] = group_index
root_id = opts.setdefault('root_id', root_id)
parent_id = opts.setdefault('parent_id', parent_id)
if isinstance(self.tasks, _regen):
# When the group tasks are a generator, we need to make sure we don't
# exhaust it during the freeze process. We use two generators to do this.
# One generator will be used to freeze the tasks to get their AsyncResult.
# The second generator will be used to replace the tasks in the group with an unexhausted state.
# Create two new generators from the original generator of the group tasks (cloning the tasks).
tasks1, tasks2 = itertools.tee(self._unroll_tasks(self.tasks))
# Use the first generator to freeze the group tasks to acquire the AsyncResult for each task.
results = regen(self._freeze_tasks(tasks1, group_id, chord, root_id, parent_id))
# Use the second generator to replace the exhausted generator of the group tasks.
self.tasks = regen(tasks2)
else:
new_tasks = []
# Need to unroll subgroups early so that chord gets the
# right result instance for chord_unlock etc.
results = list(self._freeze_unroll(
new_tasks, group_id, chord, root_id, parent_id,
))
if isinstance(self.tasks, MutableSequence):
self.tasks[:] = new_tasks
else:
self.tasks = new_tasks
return gid, results
|
Freeze the tasks in the group.
Note:
If the group tasks are created from a generator, the tasks generator would
not be exhausted, and the tasks would be frozen lazily.
Returns:
tuple: A tuple of the group id, and the AsyncResult of each of the group tasks.
|
python
|
celery/canvas.py
| 1,818
|
[
"self",
"_id",
"group_id",
"chord",
"root_id",
"parent_id",
"group_index"
] | false
| 9
| 7.44
|
celery/celery
| 27,741
|
unknown
| false
|
|
load
|
public static PemContent load(InputStream in) throws IOException {
return of(StreamUtils.copyToString(in, StandardCharsets.UTF_8));
}
|
Load {@link PemContent} from the given {@link InputStream}.
@param in an input stream to load the content from
@return the loaded PEM content
@throws IOException on IO error
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemContent.java
| 153
|
[
"in"
] |
PemContent
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getRule
|
static Iso8601_Rule getRule(final int tokenLen) {
switch (tokenLen) {
case 1:
return ISO8601_HOURS;
case 2:
return ISO8601_HOURS_MINUTES;
case 3:
return ISO8601_HOURS_COLON_MINUTES;
default:
throw new IllegalArgumentException("invalid number of X");
}
}
|
Factory method for Iso8601_Rules.
@param tokenLen a token indicating the length of the TimeZone String to be formatted.
@return an Iso8601_Rule that can format TimeZone String of length {@code tokenLen}. If no such
rule exists, an IllegalArgumentException will be thrown.
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDatePrinter.java
| 177
|
[
"tokenLen"
] |
Iso8601_Rule
| true
| 1
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
appendAll
|
public StrBuilder appendAll(final Iterable<?> iterable) {
if (iterable != null) {
iterable.forEach(this::append);
}
return this;
}
|
Appends each item in an iterable to the builder without any separators.
Appending a null iterable will have no effect.
Each object is appended using {@link #append(Object)}.
@param iterable the iterable to append
@return {@code this} instance.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 788
|
[
"iterable"
] |
StrBuilder
| true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
clear
|
def clear(self) -> None:
"""
Reset the ``Styler``, removing any previously applied styles.
Returns None.
See Also
--------
Styler.apply : Apply a CSS-styling function column-wise, row-wise,
or table-wise.
Styler.export : Export the styles applied to the current Styler.
Styler.map : Apply a CSS-styling function elementwise.
Styler.use : Set the styles on the current Styler.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2], "B": [3, np.nan]})
After any added style:
>>> df.style.highlight_null(color="yellow") # doctest: +SKIP
Remove it with:
>>> df.style.clear() # doctest: +SKIP
Please see:
`Table Visualization <../../user_guide/style.ipynb>`_ for more examples.
"""
# create default GH 40675
clean_copy = Styler(self.data, uuid=self.uuid)
clean_attrs = [a for a in clean_copy.__dict__ if not callable(a)]
self_attrs = [a for a in self.__dict__ if not callable(a)] # maybe more attrs
for attr in clean_attrs:
setattr(self, attr, getattr(clean_copy, attr))
for attr in set(self_attrs).difference(clean_attrs):
delattr(self, attr)
|
Reset the ``Styler``, removing any previously applied styles.
Returns None.
See Also
--------
Styler.apply : Apply a CSS-styling function column-wise, row-wise,
or table-wise.
Styler.export : Export the styles applied to the current Styler.
Styler.map : Apply a CSS-styling function elementwise.
Styler.use : Set the styles on the current Styler.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2], "B": [3, np.nan]})
After any added style:
>>> df.style.highlight_null(color="yellow") # doctest: +SKIP
Remove it with:
>>> df.style.clear() # doctest: +SKIP
Please see:
`Table Visualization <../../user_guide/style.ipynb>`_ for more examples.
|
python
|
pandas/io/formats/style.py
| 1,805
|
[
"self"
] |
None
| true
| 3
| 6.96
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
charset
|
public Optional<Charset> charset() {
// racy single-check idiom, this is safe because Optional is immutable.
Optional<Charset> local = parsedCharset;
if (local == null) {
String value = null;
local = Optional.absent();
for (String currentValue : parameters.get(CHARSET_ATTRIBUTE)) {
if (value == null) {
value = currentValue;
local = Optional.of(Charset.forName(value));
} else if (!value.equals(currentValue)) {
throw new IllegalStateException(
"Multiple charset values defined: " + value + ", " + currentValue);
}
}
parsedCharset = local;
}
return local;
}
|
Returns an optional charset for the value of the charset parameter if it is specified.
@throws IllegalStateException if multiple charset values have been set for this media type
@throws IllegalCharsetNameException if a charset value is present, but illegal
@throws UnsupportedCharsetException if a charset value is present, but no support is available
in this instance of the Java virtual machine
|
java
|
android/guava/src/com/google/common/net/MediaType.java
| 857
|
[] | true
| 4
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
getSimpleName
|
public String getSimpleName() {
int lastDollarSign = className.lastIndexOf('$');
if (lastDollarSign != -1) {
String innerClassName = className.substring(lastDollarSign + 1);
// local and anonymous classes are prefixed with number (1,2,3...), anonymous classes are
// entirely numeric whereas local classes have the user supplied name as a suffix
return CharMatcher.inRange('0', '9').trimLeadingFrom(innerClassName);
}
String packageName = getPackageName();
if (packageName.isEmpty()) {
return className;
}
// Since this is a top level class, its simple name is always the part after package name.
return className.substring(packageName.length() + 1);
}
|
Returns the simple name of the underlying class as given in the source code.
<p>Behaves similarly to {@link Class#getSimpleName()} but does not require the class to be
loaded.
<p>But note that this class uses heuristics to identify the simple name. See a related
discussion in <a href="https://github.com/google/guava/issues/3349">issue 3349</a>.
|
java
|
android/guava/src/com/google/common/reflect/ClassPath.java
| 330
|
[] |
String
| true
| 3
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
clone
|
public static <T> T clone(final T obj) {
if (obj instanceof Cloneable) {
final Object result;
final Class<?> objClass = obj.getClass();
if (isArray(obj)) {
final Class<?> componentType = objClass.getComponentType();
if (componentType.isPrimitive()) {
int length = Array.getLength(obj);
result = Array.newInstance(componentType, length);
while (length-- > 0) {
Array.set(result, length, Array.get(obj, length));
}
} else {
result = ((Object[]) obj).clone();
}
} else {
try {
result = objClass.getMethod("clone").invoke(obj);
} catch (final ReflectiveOperationException e) {
throw new CloneFailedException("Exception cloning Cloneable type " + objClass.getName(), e);
}
}
return (T) result;
}
return null;
}
|
Clones an object.
@param <T> the type of the object.
@param obj the object to clone, null returns null.
@return the clone if the object implements {@link Cloneable} otherwise {@code null}.
@throws CloneFailedException if the object is cloneable and the clone operation fails.
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/ObjectUtils.java
| 232
|
[
"obj"
] |
T
| true
| 6
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getParser
|
public DerParser getParser() throws IOException {
if (isConstructed() == false) {
throw new IOException("Invalid DER: can't parse primitive entity"); //$NON-NLS-1$
}
return new DerParser(value);
}
|
For constructed field, return a parser for its content.
@return A parser for the construct.
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DerParser.java
| 215
|
[] |
DerParser
| true
| 2
| 8.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
_refine_percentiles
|
def _refine_percentiles(
percentiles: Sequence[float] | np.ndarray | None,
) -> npt.NDArray[np.float64]:
"""
Ensure that percentiles are unique and sorted.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output.
"""
if percentiles is None:
return np.array([0.25, 0.5, 0.75])
percentiles = np.asarray(percentiles)
# get them all to be in [0, 1]
validate_percentile(percentiles)
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
assert percentiles is not None
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
return unique_pcts
|
Ensure that percentiles are unique and sorted.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output.
|
python
|
pandas/core/methods/describe.py
| 345
|
[
"percentiles"
] |
npt.NDArray[np.float64]
| true
| 3
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
infer_compression
|
def infer_compression(
filepath_or_buffer: FilePath | BaseBuffer, compression: str | None
) -> str | None:
"""
Get the compression method for filepath_or_buffer. If compression='infer',
the inferred compression method is returned. Otherwise, the input
compression method is returned unchanged, unless it's invalid, in which
case an error is raised.
Parameters
----------
filepath_or_buffer : str or file handle
File path or object.
{compression_options}
Returns
-------
string or None
Raises
------
ValueError on invalid compression specified.
"""
if compression is None:
return None
# Infer compression
if compression == "infer":
# Convert all path types (e.g. pathlib.Path) to strings
if isinstance(filepath_or_buffer, str) and "::" in filepath_or_buffer:
# chained URLs contain ::
filepath_or_buffer = filepath_or_buffer.split("::")[0]
filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)
if not isinstance(filepath_or_buffer, str):
# Cannot infer compression of a buffer, assume no compression
return None
# Infer compression from the filename/URL extension
for extension, compression in extension_to_compression.items():
if filepath_or_buffer.lower().endswith(extension):
return compression
return None
# Compression has been specified. Check that it's valid
if compression in _supported_compressions:
return compression
valid = ["infer", None] + sorted(_supported_compressions)
msg = (
f"Unrecognized compression type: {compression}\n"
f"Valid compression types are {valid}"
)
raise ValueError(msg)
|
Get the compression method for filepath_or_buffer. If compression='infer',
the inferred compression method is returned. Otherwise, the input
compression method is returned unchanged, unless it's invalid, in which
case an error is raised.
Parameters
----------
filepath_or_buffer : str or file handle
File path or object.
{compression_options}
Returns
-------
string or None
Raises
------
ValueError on invalid compression specified.
|
python
|
pandas/io/common.py
| 554
|
[
"filepath_or_buffer",
"compression"
] |
str | None
| true
| 9
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
serverSessionExpirationTimeNanos
|
default Long serverSessionExpirationTimeNanos() {
return null;
}
|
Return the session expiration time, if any, otherwise null. The value is in
nanoseconds as per {@code System.nanoTime()} and is therefore only useful
when compared to such a value -- it's absolute value is meaningless. This
value may be non-null only on the server-side. It represents the time after
which, in the absence of re-authentication, the broker will close the session
if it receives a request unrelated to authentication. We store nanoseconds
here to avoid having to invoke the more expensive {@code milliseconds()} call
on the broker for every request
@return the session expiration time, if any, otherwise null
|
java
|
clients/src/main/java/org/apache/kafka/common/network/Authenticator.java
| 102
|
[] |
Long
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
requestTopicMetadata
|
public CompletableFuture<Map<String, List<PartitionInfo>>> requestTopicMetadata(final String topic, final long deadlineMs) {
TopicMetadataRequestState newRequest = new TopicMetadataRequestState(
logContext,
topic,
deadlineMs,
retryBackoffMs,
retryBackoffMaxMs);
inflightRequests.add(newRequest);
return newRequest.future;
}
|
Return the future of the metadata request.
@param topic to be requested.
@return the future of the metadata request.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManager.java
| 130
|
[
"topic",
"deadlineMs"
] | true
| 1
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
assignProducerStateToBatches
|
private void assignProducerStateToBatches(Deque<ProducerBatch> batches) {
if (hasSequence()) {
int sequence = baseSequence();
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId(), producerEpoch());
for (ProducerBatch newBatch : batches) {
newBatch.setProducerState(producerIdAndEpoch, sequence, isTransactional());
sequence += newBatch.recordCount;
}
}
}
|
Finalize the state of a batch. Final state, once set, is immutable. This function may be called
once or twice on a batch. It may be called twice if
1. An inflight batch expires before a response from the broker is received. The batch's final
state is set to FAILED. But it could succeed on the broker and second time around batch.done() may
try to set SUCCEEDED final state.
2. If a transaction abortion happens or if the producer is closed forcefully, the final state is
ABORTED but again it could succeed if broker responds with a success.
Attempted transitions from [FAILED | ABORTED] --> SUCCEEDED are logged.
Attempted transitions from one failure state to the same or a different failed state are ignored.
Attempted transitions from SUCCEEDED to the same or a failed state throw an exception.
@param baseOffset The base offset of the messages assigned by the server
@param logAppendTime The log append time or -1 if CreateTime is being used
@param topLevelException The exception that occurred (or null if the request was successful)
@param recordExceptions Record exception function mapping batchIndex to the respective record exception
@return true if the batch was completed successfully and false if the batch was previously aborted
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java
| 392
|
[
"batches"
] |
void
| true
| 2
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
typeOf
|
public ConfigDef.Type typeOf(String key) {
ConfigDef.ConfigKey configKey = definition.configKeys().get(key);
if (configKey == null)
return null;
return configKey.type;
}
|
Called directly after user configs got parsed (and thus default values got set).
This allows to change default values for "secondary defaults" if required.
@param parsedValues unmodifiable map of current configuration
@return a map of updates that should be applied to the configuration (will be validated to prevent bad updates)
|
java
|
clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
| 215
|
[
"key"
] | true
| 2
| 7.6
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
ti_selector_condition
|
def ti_selector_condition(cls, vals: Collection[str | tuple[str, int]]) -> ColumnElement[bool]:
"""
Build an SQLAlchemy filter for a list of task_ids or tuples of (task_id,map_index).
:meta private:
"""
# Compute a filter for TI.task_id and TI.map_index based on input values
# For each item, it will either be a task_id, or (task_id, map_index)
task_id_only = [v for v in vals if isinstance(v, str)]
with_map_index = [v for v in vals if not isinstance(v, str)]
filters: list[Any] = []
if task_id_only:
filters.append(cls.task_id.in_(task_id_only))
if with_map_index:
filters.append(tuple_(cls.task_id, cls.map_index).in_(with_map_index))
if not filters:
return false()
if len(filters) == 1:
return filters[0]
return or_(*filters)
|
Build an SQLAlchemy filter for a list of task_ids or tuples of (task_id,map_index).
:meta private:
|
python
|
airflow-core/src/airflow/models/taskinstance.py
| 2,042
|
[
"cls",
"vals"
] |
ColumnElement[bool]
| true
| 5
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
awaitMetadataUpdate
|
public boolean awaitMetadataUpdate(Timer timer) {
int version = this.metadata.requestUpdate(false);
do {
poll(timer);
} while (this.metadata.updateVersion() == version && timer.notExpired());
return this.metadata.updateVersion() > version;
}
|
Block waiting on the metadata refresh with a timeout.
@return true if update succeeded, false otherwise.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
| 163
|
[
"timer"
] | true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
applyRuleEdits
|
function applyRuleEdits(rule: Rule, previousRange: TextRangeWithKind, previousStartLine: number, currentRange: TextRangeWithKind, currentStartLine: number): LineAction {
const onLaterLine = currentStartLine !== previousStartLine;
switch (rule.action) {
case RuleAction.StopProcessingSpaceActions:
// no action required
return LineAction.None;
case RuleAction.DeleteSpace:
if (previousRange.end !== currentRange.pos) {
// delete characters starting from t1.end up to t2.pos exclusive
recordDelete(previousRange.end, currentRange.pos - previousRange.end);
return onLaterLine ? LineAction.LineRemoved : LineAction.None;
}
break;
case RuleAction.DeleteToken:
recordDelete(previousRange.pos, previousRange.end - previousRange.pos);
break;
case RuleAction.InsertNewLine:
// exit early if we on different lines and rule cannot change number of newlines
// if line1 and line2 are on subsequent lines then no edits are required - ok to exit
// if line1 and line2 are separated with more than one newline - ok to exit since we cannot delete extra new lines
if (rule.flags !== RuleFlags.CanDeleteNewLines && previousStartLine !== currentStartLine) {
return LineAction.None;
}
// edit should not be applied if we have one line feed between elements
const lineDelta = currentStartLine - previousStartLine;
if (lineDelta !== 1) {
recordReplace(previousRange.end, currentRange.pos - previousRange.end, getNewLineOrDefaultFromHost(host, options));
return onLaterLine ? LineAction.None : LineAction.LineAdded;
}
break;
case RuleAction.InsertSpace:
// exit early if we on different lines and rule cannot change number of newlines
if (rule.flags !== RuleFlags.CanDeleteNewLines && previousStartLine !== currentStartLine) {
return LineAction.None;
}
const posDelta = currentRange.pos - previousRange.end;
if (posDelta !== 1 || sourceFile.text.charCodeAt(previousRange.end) !== CharacterCodes.space) {
recordReplace(previousRange.end, currentRange.pos - previousRange.end, " ");
return onLaterLine ? LineAction.LineRemoved : LineAction.None;
}
break;
case RuleAction.InsertTrailingSemicolon:
recordInsert(previousRange.end, ";");
}
return LineAction.None;
}
|
Trimming will be done for lines after the previous range.
Exclude comments as they had been previously processed.
|
typescript
|
src/services/formatting/formatting.ts
| 1,317
|
[
"rule",
"previousRange",
"previousStartLine",
"currentRange",
"currentStartLine"
] | true
| 12
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
generateDefaultCacheName
|
protected String generateDefaultCacheName(Method method) {
Class<?>[] parameterTypes = method.getParameterTypes();
List<String> parameters = new ArrayList<>(parameterTypes.length);
for (Class<?> parameterType : parameterTypes) {
parameters.add(parameterType.getName());
}
return method.getDeclaringClass().getName() + '.' + method.getName() +
'(' + StringUtils.collectionToCommaDelimitedString(parameters) + ')';
}
|
Generate a default cache name for the specified {@link Method}.
@param method the annotated method
@return the default cache name, according to JSR-107
|
java
|
spring-context-support/src/main/java/org/springframework/cache/jcache/interceptor/AnnotationJCacheOperationSource.java
| 220
|
[
"method"
] |
String
| true
| 1
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
get_authorized_variables
|
def get_authorized_variables(
self,
*,
user: T,
method: ResourceMethod = "GET",
session: Session = NEW_SESSION,
) -> set[str]:
"""
Get variable keys the user has access to.
:param user: the user
:param method: the method to filter on
:param session: the session
"""
stmt = select(Variable.key, Variable.team_name)
rows = session.execute(stmt).all()
variables_by_team: dict[str | None, set[str]] = defaultdict(set)
for var_key, team_name in rows:
variables_by_team[team_name].add(var_key)
var_keys: set[str] = set()
for team_name, team_var_keys in variables_by_team.items():
var_keys.update(
self.filter_authorized_variables(
variable_keys=team_var_keys, user=user, method=method, team_name=team_name
)
)
return var_keys
|
Get variable keys the user has access to.
:param user: the user
:param method: the method to filter on
:param session: the session
|
python
|
airflow-core/src/airflow/api_fastapi/auth/managers/base_auth_manager.py
| 682
|
[
"self",
"user",
"method",
"session"
] |
set[str]
| true
| 3
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
pollHeartbeat
|
protected synchronized void pollHeartbeat(long now) {
if (heartbeatThread != null) {
if (heartbeatThread.isFailed()) {
// set the heartbeat thread to null and raise an exception. If the user catches it,
// the next call to ensureActiveGroup() will spawn a new heartbeat thread.
RuntimeException cause = heartbeatThread.failureCause();
heartbeatThread = null;
throw cause;
}
// Awake the heartbeat thread if needed
if (heartbeat.shouldHeartbeat(now)) {
notify();
}
heartbeat.poll(now);
}
}
|
Check the status of the heartbeat thread (if it is active) and indicate the liveness
of the client. This must be called periodically after joining with {@link #ensureActiveGroup()}
to ensure that the member stays in the group. If an interval of time longer than the
provided rebalance timeout expires without calling this method, then the client will proactively
leave the group.
@param now current time in milliseconds
@throws RuntimeException for unexpected errors raised from the heartbeat thread
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
| 368
|
[
"now"
] |
void
| true
| 4
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
visitYieldExpression
|
function visitYieldExpression(node: YieldExpression): LeftHandSideExpression {
// [source]
// x = yield a();
//
// [intermediate]
// .yield resumeLabel, (a())
// .mark resumeLabel
// x = %sent%;
const resumeLabel = defineLabel();
const expression = visitNode(node.expression, visitor, isExpression);
if (node.asteriskToken) {
// NOTE: `expression` must be defined for `yield*`.
const iterator = (getEmitFlags(node.expression!) & EmitFlags.Iterator) === 0
? setTextRange(emitHelpers().createValuesHelper(expression!), node)
: expression;
emitYieldStar(iterator, /*location*/ node);
}
else {
emitYield(expression, /*location*/ node);
}
markLabel(resumeLabel);
return createGeneratorResume(/*location*/ node);
}
|
Visits a `yield` expression.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/generators.ts
| 1,031
|
[
"node"
] | true
| 4
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
openConnection
|
private URLConnection openConnection(JarFile jarFile) throws IOException {
URL url = this.cache.get(jarFile);
return (url != null) ? url.openConnection() : null;
}
|
Reconnect to the {@link JarFile}, returning a replacement {@link URLConnection}.
@param jarFile the jar file
@param existingConnection the existing connection
@return a newly opened connection inhering the same {@code useCaches} value as the
existing connection
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/UrlJarFiles.java
| 129
|
[
"jarFile"
] |
URLConnection
| true
| 2
| 7.2
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
findMissing
|
static <T> Set<T> findMissing(Set<T> toFind, Set<T> toSearch) {
Set<T> ret = new LinkedHashSet<>();
for (T toFindItem: toFind) {
if (!toSearch.contains(toFindItem)) {
ret.add(toFindItem);
}
}
return ret;
}
|
Return missing items which are expected to be in a particular set, but which are not.
@param toFind The items to look for.
@param toSearch The set of items to search.
@return Empty set if all items were found; some of the missing ones in a set, if not.
|
java
|
clients/src/main/java/org/apache/kafka/clients/FetchSessionHandler.java
| 413
|
[
"toFind",
"toSearch"
] | true
| 2
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
reauthenticationBeginNanos
|
public long reauthenticationBeginNanos() {
return reauthenticationBeginNanos;
}
|
Return the time when re-authentication began. The value is in nanoseconds as
per {@code System.nanoTime()} and is therefore only useful when compared to
such a value -- it's absolute value is meaningless.
@return the time when re-authentication began
|
java
|
clients/src/main/java/org/apache/kafka/common/network/ReauthenticationContext.java
| 91
|
[] | true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
to_device
|
def to_device(x: Array, device: Device, /, *, stream: int | Any | None = None) -> Array:
"""
Copy the array from the device on which it currently resides to the specified ``device``.
This is equivalent to `x.to_device(device, stream=stream)` according to
the `standard
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.to_device.html>`__.
This helper is included because some array libraries do not have the
`to_device` method.
Parameters
----------
x: array
array instance from an array API compatible library.
device: device
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
section of the array API specification).
stream: int | Any | None
stream object to use during copy. In addition to the types supported
in ``array.__dlpack__``, implementations may choose to support any
library-specific stream object with the caveat that any code using
such an object would not be portable.
Returns
-------
out: array
an array with the same data and data type as ``x`` and located on the
specified ``device``.
Notes
-----
For NumPy, this function effectively does nothing since the only supported
device is the CPU. For CuPy, this method supports CuPy CUDA
:external+cupy:class:`Device <cupy.cuda.Device>` and
:external+cupy:class:`Stream <cupy.cuda.Stream>` objects. For PyTorch,
this is the same as :external+torch:meth:`x.to(device) <torch.Tensor.to>`
(the ``stream`` argument is not supported in PyTorch).
See Also
--------
device : Hardware device the array data resides on.
"""
if is_numpy_array(x):
if stream is not None:
raise ValueError("The stream argument to to_device() is not supported")
if device == "cpu":
return x
raise ValueError(f"Unsupported device {device!r}")
elif is_cupy_array(x):
# cupy does not yet have to_device
return _cupy_to_device(x, device, stream=stream)
elif is_torch_array(x):
return _torch_to_device(x, device, stream=stream) # pyright: ignore[reportArgumentType]
elif is_dask_array(x):
if stream is not None:
raise ValueError("The stream argument to to_device() is not supported")
# TODO: What if our array is on the GPU already?
if device == "cpu":
return x
raise ValueError(f"Unsupported device {device!r}")
elif is_jax_array(x):
if not hasattr(x, "__array_namespace__"):
# In JAX v0.4.31 and older, this import adds to_device method to x...
import jax.experimental.array_api # noqa: F401 # pyright: ignore
# ... but only on eager JAX. It won't work inside jax.jit.
if not hasattr(x, "to_device"):
return x
return x.to_device(device, stream=stream)
elif is_pydata_sparse_array(x) and device == _device(x):
# Perform trivial check to return the same array if
# device is same instead of err-ing.
return x
return x.to_device(device, stream=stream) # pyright: ignore
|
Copy the array from the device on which it currently resides to the specified ``device``.
This is equivalent to `x.to_device(device, stream=stream)` according to
the `standard
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.to_device.html>`__.
This helper is included because some array libraries do not have the
`to_device` method.
Parameters
----------
x: array
array instance from an array API compatible library.
device: device
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
section of the array API specification).
stream: int | Any | None
stream object to use during copy. In addition to the types supported
in ``array.__dlpack__``, implementations may choose to support any
library-specific stream object with the caveat that any code using
such an object would not be portable.
Returns
-------
out: array
an array with the same data and data type as ``x`` and located on the
specified ``device``.
Notes
-----
For NumPy, this function effectively does nothing since the only supported
device is the CPU. For CuPy, this method supports CuPy CUDA
:external+cupy:class:`Device <cupy.cuda.Device>` and
:external+cupy:class:`Stream <cupy.cuda.Stream>` objects. For PyTorch,
this is the same as :external+torch:meth:`x.to(device) <torch.Tensor.to>`
(the ``stream`` argument is not supported in PyTorch).
See Also
--------
device : Hardware device the array data resides on.
|
python
|
sklearn/externals/array_api_compat/common/_helpers.py
| 813
|
[
"x",
"device",
"stream"
] |
Array
| true
| 14
| 6.32
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
consensus_score
|
def consensus_score(a, b, *, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the best
matching between sets is found by solving a linear sum assignment problem,
using a modified Jonker-Volgenant algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : tuple (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : tuple (rows, columns)
Another set of biclusters like ``a``.
similarity : 'jaccard' or callable, default='jaccard'
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
Returns
-------
consensus_score : float
Consensus score, a non-negative value, sum of similarities
divided by size of larger set.
See Also
--------
scipy.optimize.linear_sum_assignment : Solve the linear sum assignment problem.
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
Examples
--------
>>> from sklearn.metrics import consensus_score
>>> a = ([[True, False], [False, True]], [[False, True], [True, False]])
>>> b = ([[False, True], [True, False]], [[True, False], [False, True]])
>>> consensus_score(a, b, similarity='jaccard')
1.0
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
row_indices, col_indices = linear_sum_assignment(1.0 - matrix)
n_a = len(a[0])
n_b = len(b[0])
return float(matrix[row_indices, col_indices].sum() / max(n_a, n_b))
|
The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the best
matching between sets is found by solving a linear sum assignment problem,
using a modified Jonker-Volgenant algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : tuple (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : tuple (rows, columns)
Another set of biclusters like ``a``.
similarity : 'jaccard' or callable, default='jaccard'
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
Returns
-------
consensus_score : float
Consensus score, a non-negative value, sum of similarities
divided by size of larger set.
See Also
--------
scipy.optimize.linear_sum_assignment : Solve the linear sum assignment problem.
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
Examples
--------
>>> from sklearn.metrics import consensus_score
>>> a = ([[True, False], [False, True]], [[False, True], [True, False]])
>>> b = ([[False, True], [True, False]], [[True, False], [False, True]])
>>> consensus_score(a, b, similarity='jaccard')
1.0
|
python
|
sklearn/metrics/cluster/_bicluster.py
| 60
|
[
"a",
"b",
"similarity"
] | false
| 2
| 7.04
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
validIndex
|
public static <T extends CharSequence> T validIndex(final T chars, final int index, final String message, final Object... values) {
Objects.requireNonNull(chars, "chars");
if (index < 0 || index >= chars.length()) {
throw new IndexOutOfBoundsException(getMessage(message, values));
}
return chars;
}
|
Validates that the index is within the bounds of the argument
character sequence; otherwise throwing an exception with the
specified message.
<pre>Validate.validIndex(myStr, 2, "The string index is invalid: ");</pre>
<p>If the character sequence is {@code null}, then the message
of the exception is "The validated object is null".</p>
@param <T> the character sequence type.
@param chars the character sequence to check, validated not null by this method.
@param index the index to check.
@param message the {@link String#format(String, Object...)} exception message if invalid, not null.
@param values the optional values for the formatted exception message, null array not recommended.
@return the validated character sequence (never {@code null} for method chaining).
@throws NullPointerException if the character sequence is {@code null}.
@throws IndexOutOfBoundsException if the index is invalid.
@see #validIndex(CharSequence, int)
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 1,167
|
[
"chars",
"index",
"message"
] |
T
| true
| 3
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_register_accessor
|
def _register_accessor(
name: str, cls: type[NDFrame | Index]
) -> Callable[[TypeT], TypeT]:
"""
Register a custom accessor on objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
register_series_accessor : Register a custom accessor on Series objects.
register_index_accessor : Register a custom accessor on Index objects.
Notes
-----
This function allows you to register a custom-defined accessor class
for pandas objects (DataFrame, Series, or Index).
The requirements for the accessor class are as follows:
* Must contain an init method that:
* accepts a single object
* raises an AttributeError if the object does not have correctly
matching inputs for the accessor
* Must contain a method for each access pattern.
* The methods should be able to take any argument signature.
* Accessible using the @property decorator if no additional arguments are
needed.
"""
def decorator(accessor: TypeT) -> TypeT:
if hasattr(cls, name):
warnings.warn(
f"registration of accessor {accessor!r} under name "
f"{name!r} for type {cls!r} is overriding a preexisting "
f"attribute with the same name.",
UserWarning,
stacklevel=find_stack_level(),
)
setattr(cls, name, Accessor(name, accessor))
cls._accessors.add(name)
return accessor
return decorator
|
Register a custom accessor on objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
register_series_accessor : Register a custom accessor on Series objects.
register_index_accessor : Register a custom accessor on Index objects.
Notes
-----
This function allows you to register a custom-defined accessor class
for pandas objects (DataFrame, Series, or Index).
The requirements for the accessor class are as follows:
* Must contain an init method that:
* accepts a single object
* raises an AttributeError if the object does not have correctly
matching inputs for the accessor
* Must contain a method for each access pattern.
* The methods should be able to take any argument signature.
* Accessible using the @property decorator if no additional arguments are
needed.
|
python
|
pandas/core/accessor.py
| 238
|
[
"name",
"cls"
] |
Callable[[TypeT], TypeT]
| true
| 2
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
np_find_common_type
|
def np_find_common_type(*dtypes: np.dtype) -> np.dtype:
"""
np.find_common_type implementation pre-1.25 deprecation using np.result_type
https://github.com/pandas-dev/pandas/pull/49569#issuecomment-1308300065
Parameters
----------
dtypes : np.dtypes
Returns
-------
np.dtype
"""
try:
common_dtype = np.result_type(*dtypes)
if common_dtype.kind in "mMSU":
# NumPy promotion currently (1.25) misbehaves for for times and strings,
# so fall back to object (find_common_dtype did unless there
# was only one dtype)
common_dtype = np.dtype("O")
except TypeError:
common_dtype = np.dtype("O")
return common_dtype
|
np.find_common_type implementation pre-1.25 deprecation using np.result_type
https://github.com/pandas-dev/pandas/pull/49569#issuecomment-1308300065
Parameters
----------
dtypes : np.dtypes
Returns
-------
np.dtype
|
python
|
pandas/core/dtypes/cast.py
| 1,269
|
[] |
np.dtype
| true
| 2
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
torch_key
|
def torch_key() -> bytes:
"""
Compute a key that contains relevant information about torch source files
"""
with dynamo_timed("inductor_codecache_torch_key", log_pt2_compile_event=False):
if not config.is_fbcode():
def get_code_hash(root: str) -> bytes:
# This function isn't meant to be used outside of torch_key, just a
# helper for clarity. Instead, use torch_key() directly when you need
# a hash representing the state of the source code.
extra_files = (
"codegen/aoti_runtime/interface.cpp",
"script.ld",
)
inductor_root = os.path.dirname(__file__)
extra_files = [os.path.join(inductor_root, x) for x in extra_files]
hasher = hashlib.sha256()
hasher.update(torch.__version__.encode("utf-8"))
build_code_hash([root], "", hasher)
for path in extra_files:
if os.path.exists(path):
with open(path, "rb") as f:
hasher.update(f.read())
return hasher.digest()
return get_code_hash(_TORCH_PATH)
from libfb.py import parutil
return parutil.get_file_contents("torch/src_hash.txt").rstrip().encode("ascii")
|
Compute a key that contains relevant information about torch source files
|
python
|
torch/_inductor/codecache.py
| 720
|
[] |
bytes
| true
| 4
| 6.56
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
min
|
@ParametricNullness
public <E extends T> E min(@ParametricNullness E a, @ParametricNullness E b) {
return (compare(a, b) <= 0) ? a : b;
}
|
Returns the lesser of the two values according to this ordering. If the values compare as 0,
the first is returned.
<p><b>Implementation note:</b> this method is invoked by the default implementations of the
other {@code min} overloads, so overriding it will affect their behavior.
<p><b>Note:</b> Consider using {@code Comparators.min(a, b, thisComparator)} instead. If {@code
thisComparator} is {@link Ordering#natural}, then use {@code Comparators.min(a, b)}.
@param a value to compare, returned if less than or equal to b.
@param b value to compare.
@throws ClassCastException if the parameters are not <i>mutually comparable</i> under this
ordering.
|
java
|
android/guava/src/com/google/common/collect/Ordering.java
| 607
|
[
"a",
"b"
] |
E
| true
| 2
| 6.32
|
google/guava
| 51,352
|
javadoc
| false
|
nop
|
@SuppressWarnings("unchecked")
static <T, U, E extends Throwable> FailableBiConsumer<T, U, E> nop() {
return NOP;
}
|
Gets the NOP singleton.
@param <T> Consumed type 1.
@param <U> Consumed type 2.
@param <E> The kind of thrown exception or error.
@return The NOP singleton.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableBiConsumer.java
| 46
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
return "ScramCredentialInfo{" +
"mechanism=" + mechanism +
", iterations=" + iterations +
'}';
}
|
@return the number of iterations used when creating the credential
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ScramCredentialInfo.java
| 57
|
[] |
String
| true
| 1
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
keys
|
def keys(self, include: str = "pandas") -> list[str]:
"""
Return a list of keys corresponding to objects stored in HDFStore.
Parameters
----------
include : str, default 'pandas'
When kind equals 'pandas' return pandas objects.
When kind equals 'native' return native HDF5 Table objects.
Returns
-------
list
List of ABSOLUTE path-names (e.g. have the leading '/').
Raises
------
raises ValueError if kind has an illegal value
See Also
--------
HDFStore.info : Prints detailed information on the store.
HDFStore.get_node : Returns the node with the key.
HDFStore.get_storer : Returns the storer object for a key.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> store.get("data") # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.close() # doctest: +SKIP
"""
if include == "pandas":
return [n._v_pathname for n in self.groups()]
elif include == "native":
assert self._handle is not None # mypy
return [
n._v_pathname for n in self._handle.walk_nodes("/", classname="Table")
]
raise ValueError(
f"`include` should be either 'pandas' or 'native' but is '{include}'"
)
|
Return a list of keys corresponding to objects stored in HDFStore.
Parameters
----------
include : str, default 'pandas'
When kind equals 'pandas' return pandas objects.
When kind equals 'native' return native HDF5 Table objects.
Returns
-------
list
List of ABSOLUTE path-names (e.g. have the leading '/').
Raises
------
raises ValueError if kind has an illegal value
See Also
--------
HDFStore.info : Prints detailed information on the store.
HDFStore.get_node : Returns the node with the key.
HDFStore.get_storer : Returns the storer object for a key.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> store.get("data") # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.close() # doctest: +SKIP
|
python
|
pandas/io/pytables.py
| 662
|
[
"self",
"include"
] |
list[str]
| true
| 3
| 8.32
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
transformEnumMember
|
function transformEnumMember(member: EnumMember): Statement {
// enums don't support computed properties
// we pass false as 'generateNameForComputedPropertyName' for a backward compatibility purposes
// old emitter always generate 'expression' part of the name as-is.
const name = getExpressionForPropertyName(member, /*generateNameForComputedPropertyName*/ false);
const evaluated = resolver.getEnumMemberValue(member);
const valueExpression = transformEnumMemberDeclarationValue(member, evaluated?.value);
const innerAssignment = factory.createAssignment(
factory.createElementAccessExpression(
currentNamespaceContainerName,
name,
),
valueExpression,
);
const outerAssignment = typeof evaluated?.value === "string" || evaluated?.isSyntacticallyString ?
innerAssignment :
factory.createAssignment(
factory.createElementAccessExpression(
currentNamespaceContainerName,
innerAssignment,
),
name,
);
return setTextRange(
factory.createExpressionStatement(
setTextRange(
outerAssignment,
member,
),
),
member,
);
}
|
Transforms an enum member into a statement.
@param member The enum member node.
|
typescript
|
src/compiler/transformers/ts.ts
| 1,909
|
[
"member"
] | true
| 3
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
toBooleanObject
|
public static Boolean toBooleanObject(final int value, final int trueValue, final int falseValue, final int nullValue) {
if (value == trueValue) {
return Boolean.TRUE;
}
if (value == falseValue) {
return Boolean.FALSE;
}
if (value == nullValue) {
return null;
}
throw new IllegalArgumentException("The Integer did not match any specified value");
}
|
Converts an int to a Boolean specifying the conversion values.
<p>NOTE: This method may return {@code null} and may throw a {@link NullPointerException}
if unboxed to a {@code boolean}.</p>
<p>The checks are done first for the {@code trueValue}, then for the {@code falseValue} and
finally for the {@code nullValue}.</p>
<pre>
BooleanUtils.toBooleanObject(0, 0, 2, 3) = Boolean.TRUE
BooleanUtils.toBooleanObject(0, 0, 0, 3) = Boolean.TRUE
BooleanUtils.toBooleanObject(0, 0, 0, 0) = Boolean.TRUE
BooleanUtils.toBooleanObject(2, 1, 2, 3) = Boolean.FALSE
BooleanUtils.toBooleanObject(2, 1, 2, 2) = Boolean.FALSE
BooleanUtils.toBooleanObject(3, 1, 2, 3) = null
</pre>
@param value the Integer to convert
@param trueValue the value to match for {@code true}
@param falseValue the value to match for {@code false}
@param nullValue the value to match for {@code null}
@return Boolean.TRUE, Boolean.FALSE, or {@code null}
@throws IllegalArgumentException if no match
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 612
|
[
"value",
"trueValue",
"falseValue",
"nullValue"
] |
Boolean
| true
| 4
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
centroidCount
|
@Override
public int centroidCount() {
if (mergingDigest != null) {
return mergingDigest.centroidCount();
}
return sortingDigest.centroidCount();
}
|
Similar to the constructor above. The limit for switching from a {@link SortingDigest} to a {@link MergingDigest} implementation
is calculated based on the passed compression factor.
@param compression The compression factor for the MergingDigest
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/HybridDigest.java
| 190
|
[] | true
| 2
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
_cumcount_array
|
def _cumcount_array(self, ascending: bool = True) -> np.ndarray:
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids = self._grouper.ids
ngroups = self._grouper.ngroups
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
if self._grouper.has_dropped_na:
out = np.where(ids == -1, np.nan, out.astype(np.float64, copy=False))
else:
out = out.astype(np.int64, copy=False)
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev]
|
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
|
python
|
pandas/core/groupby/groupby.py
| 1,938
|
[
"self",
"ascending"
] |
np.ndarray
| true
| 6
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
close
|
@Override
public void close() throws IOException {
Throwable throwable = thrown;
// close closeables in LIFO order
while (!stack.isEmpty()) {
Closeable closeable = stack.removeFirst();
try {
closeable.close();
} catch (Throwable e) {
if (throwable == null) {
throwable = e;
} else {
suppressor.suppress(closeable, throwable, e);
}
}
}
if (thrown == null && throwable != null) {
throwIfInstanceOf(throwable, IOException.class);
throwIfUnchecked(throwable);
throw new AssertionError(throwable); // not possible
}
}
|
Closes all {@code Closeable} instances that have been added to this {@code Closer}. If an
exception was thrown in the try block and passed to one of the {@code exceptionThrown} methods,
any exceptions thrown when attempting to close a closeable will be suppressed. Otherwise, the
<i>first</i> exception to be thrown from an attempt to close a closeable will be thrown and any
additional exceptions that are thrown after that will be suppressed.
|
java
|
android/guava/src/com/google/common/io/Closer.java
| 195
|
[] |
void
| true
| 6
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
find
|
def find(a, sub, start=0, end=None):
"""
For each element, return the lowest index in the string where
substring ``sub`` is found, such that ``sub`` is contained in the
range [``start``, ``end``).
Parameters
----------
a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype
sub : array_like, with `np.bytes_` or `np.str_` dtype
The substring to search for.
start, end : array_like, with any integer dtype
The range to look in, interpreted as in slice notation.
Returns
-------
y : ndarray
Output array of ints
See Also
--------
str.find
Examples
--------
>>> import numpy as np
>>> a = np.array(["NumPy is a Python library"])
>>> np.strings.find(a, "Python")
array([11])
"""
end = end if end is not None else MAX
return _find_ufunc(a, sub, start, end)
|
For each element, return the lowest index in the string where
substring ``sub`` is found, such that ``sub`` is contained in the
range [``start``, ``end``).
Parameters
----------
a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype
sub : array_like, with `np.bytes_` or `np.str_` dtype
The substring to search for.
start, end : array_like, with any integer dtype
The range to look in, interpreted as in slice notation.
Returns
-------
y : ndarray
Output array of ints
See Also
--------
str.find
Examples
--------
>>> import numpy as np
>>> a = np.array(["NumPy is a Python library"])
>>> np.strings.find(a, "Python")
array([11])
|
python
|
numpy/_core/strings.py
| 256
|
[
"a",
"sub",
"start",
"end"
] | false
| 2
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
of
|
public static LongRange of(final long fromInclusive, final long toInclusive) {
return of(Long.valueOf(fromInclusive), Long.valueOf(toInclusive));
}
|
Creates a closed range with the specified minimum and maximum values (both inclusive).
<p>
The range uses the natural ordering of the elements to determine where values lie in the range.
</p>
<p>
The arguments may be passed in the order (min,max) or (max,min). The getMinimum and getMaximum methods will return the correct values.
</p>
@param fromInclusive the first value that defines the edge of the range, inclusive.
@param toInclusive the second value that defines the edge of the range, inclusive.
@return the range object, not null.
|
java
|
src/main/java/org/apache/commons/lang3/LongRange.java
| 50
|
[
"fromInclusive",
"toInclusive"
] |
LongRange
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
loadAnnotationType
|
@SuppressWarnings("unchecked")
private static @Nullable Class<? extends Annotation> loadAnnotationType(String name) {
try {
return (Class<? extends Annotation>)
ClassUtils.forName(name, CommonAnnotationBeanPostProcessor.class.getClassLoader());
}
catch (ClassNotFoundException ex) {
return null;
}
}
|
Obtain a resource object for the given name and type through autowiring
based on the given factory.
@param factory the factory to autowire against
@param element the descriptor for the annotated field/method
@param requestingBeanName the name of the requesting bean
@return the resource object (never {@code null})
@throws NoSuchBeanDefinitionException if no corresponding target resource found
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/CommonAnnotationBeanPostProcessor.java
| 578
|
[
"name"
] | true
| 2
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
abbreviate
|
public static String abbreviate(final String str, final int maxWidth) {
return abbreviate(str, ELLIPSIS3, 0, maxWidth);
}
|
Abbreviates a String using ellipses. This will convert "Now is the time for all good men" into "Now is the time for..."
<p>
Specifically:
</p>
<ul>
<li>If the number of characters in {@code str} is less than or equal to {@code maxWidth}, return {@code str}.</li>
<li>Else abbreviate it to {@code (substring(str, 0, max - 3) + "...")}.</li>
<li>If {@code maxWidth} is less than {@code 4}, throw an {@link IllegalArgumentException}.</li>
<li>In no case will it return a String of length greater than {@code maxWidth}.</li>
</ul>
<pre>
StringUtils.abbreviate(null, *) = null
StringUtils.abbreviate("", 4) = ""
StringUtils.abbreviate("abcdefg", 6) = "abc..."
StringUtils.abbreviate("abcdefg", 7) = "abcdefg"
StringUtils.abbreviate("abcdefg", 8) = "abcdefg"
StringUtils.abbreviate("abcdefg", 4) = "a..."
StringUtils.abbreviate("abcdefg", 3) = Throws {@link IllegalArgumentException}.
</pre>
@param str the String to check, may be null.
@param maxWidth maximum length of result String, must be at least 4.
@return abbreviated String, {@code null} if null String input.
@throws IllegalArgumentException if the width is too small.
@since 2.0
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 234
|
[
"str",
"maxWidth"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
maybeAddWriteInterestAfterReauth
|
public void maybeAddWriteInterestAfterReauth() {
if (send != null)
this.transportLayer.addInterestOps(SelectionKey.OP_WRITE);
}
|
Maybe add write interest after re-authentication. This is to ensure that any pending write operation
is resumed.
|
java
|
clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java
| 690
|
[] |
void
| true
| 2
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
listConsumerGroupOffsets
|
default ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec> groupSpecs) {
return listConsumerGroupOffsets(groupSpecs, new ListConsumerGroupOffsetsOptions());
}
|
List the consumer group offsets available in the cluster for the specified groups with the default options.
<p>
This is a convenience method for
{@link #listConsumerGroupOffsets(Map, ListConsumerGroupOffsetsOptions)} with default options.
@param groupSpecs Map of consumer group ids to a spec that specifies the topic partitions of the group to list offsets for.
@return The ListConsumerGroupOffsetsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 949
|
[
"groupSpecs"
] |
ListConsumerGroupOffsetsResult
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
hermvander2d
|
def hermvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points ``(x, y)``. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = H_i(x) * H_j(y),
where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of
`V` index the points ``(x, y)`` and the last index encodes the degrees of
the Hermite polynomials.
If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermvander, hermvander3d, hermval2d, hermval3d
Examples
--------
>>> import numpy as np
>>> from numpy.polynomial.hermite import hermvander2d
>>> x = np.array([-1, 0, 1])
>>> y = np.array([-1, 0, 1])
>>> hermvander2d(x, y, [2, 2])
array([[ 1., -2., 2., -2., 4., -4., 2., -4., 4.],
[ 1., 0., -2., 0., 0., -0., -2., -0., 4.],
[ 1., 2., 2., 2., 4., 4., 2., 4., 4.]])
"""
return pu._vander_nd_flat((hermvander, hermvander), (x, y), deg)
|
Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points ``(x, y)``. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = H_i(x) * H_j(y),
where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of
`V` index the points ``(x, y)`` and the last index encodes the degrees of
the Hermite polynomials.
If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermvander, hermvander3d, hermval2d, hermval3d
Examples
--------
>>> import numpy as np
>>> from numpy.polynomial.hermite import hermvander2d
>>> x = np.array([-1, 0, 1])
>>> y = np.array([-1, 0, 1])
>>> hermvander2d(x, y, [2, 2])
array([[ 1., -2., 2., -2., 4., -4., 2., -4., 4.],
[ 1., 0., -2., 0., 0., -0., -2., -0., 4.],
[ 1., 2., 2., 2., 4., 4., 2., 4., 4.]])
|
python
|
numpy/polynomial/hermite.py
| 1,185
|
[
"x",
"y",
"deg"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
toString
|
public static String toString(final Object array) {
return toString(array, "{}");
}
|
Outputs an array as a String, treating {@code null} as an empty array.
<p>
Multi-dimensional arrays are handled correctly, including
multi-dimensional primitive arrays.
</p>
<p>
The format is that of Java source code, for example {@code {a,b}}.
</p>
@param array the array to get a toString for, may be {@code null}.
@return a String representation of the array, '{}' if null array input.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 9,248
|
[
"array"
] |
String
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
summarize_results_outside_of_folded_logs
|
def summarize_results_outside_of_folded_logs(
outputs: list[Output],
results: list[ApplyResult],
summarize_on_ci: SummarizeAfter,
summary_start_regexp: str | None = None,
):
"""
Print summary of the results outside the folded logs in CI.
:param outputs: List of Output objects containing file names and titles.
:param results: List of ApplyResult objects containing the results of the tasks.
:param summarize_on_ci: Determines when to summarize the parallel jobs when they are completed in
CI, outside the folded CI output.
:param summary_start_regexp: The regexp that determines line after which
outputs should be printed as summary, so that you do not have to look at the folded details of
the run in CI.
"""
if summarize_on_ci == SummarizeAfter.NO_SUMMARY:
return
regex = re.compile(summary_start_regexp) if summary_start_regexp is not None else None
for i, result in enumerate(results):
failure = result.get()[0] != 0
if summarize_on_ci in [
SummarizeAfter.BOTH,
SummarizeAfter.FAILURE if failure else SummarizeAfter.SUCCESS,
]:
print_lines = False
for line in Path(outputs[i].file_name).read_bytes().decode(errors="ignore").splitlines():
if not print_lines and (regex is None or regex.match(remove_ansi_colours(line))):
print_lines = True
get_console().print(f"\n[info]Summary: {outputs[i].escaped_title:<30}:\n")
if print_lines:
print(line)
|
Print summary of the results outside the folded logs in CI.
:param outputs: List of Output objects containing file names and titles.
:param results: List of ApplyResult objects containing the results of the tasks.
:param summarize_on_ci: Determines when to summarize the parallel jobs when they are completed in
CI, outside the folded CI output.
:param summary_start_regexp: The regexp that determines line after which
outputs should be printed as summary, so that you do not have to look at the folded details of
the run in CI.
|
python
|
dev/breeze/src/airflow_breeze/utils/parallel.py
| 487
|
[
"outputs",
"results",
"summarize_on_ci",
"summary_start_regexp"
] | true
| 11
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
formatAsBytes
|
default byte[] formatAsBytes(E event, Charset charset) {
return format(event).getBytes(charset);
}
|
Formats the given log event to a byte array.
@param event the log event to write
@param charset the charset
@return the formatted log event bytes
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/structured/StructuredLogFormatter.java
| 61
|
[
"event",
"charset"
] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
readListUnsafe
|
private static List<Object> readListUnsafe(XContentParser parser, Supplier<Map<String, Object>> mapFactory) throws IOException {
assert parser.currentToken() == Token.START_ARRAY;
ArrayList<Object> list = new ArrayList<>();
for (Token token = parser.nextToken(); token != null && token != XContentParser.Token.END_ARRAY; token = parser.nextToken()) {
list.add(readValueUnsafe(token, parser, mapFactory));
}
return list;
}
|
Checks if the next current token in the supplied parser is a map start for a non-empty map.
Skips to the next token if the parser does not yet have a current token (i.e. {@link #currentToken()} returns {@code null}) and then
checks it.
@return the first key in the map if a non-empty map start is found
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java
| 399
|
[
"parser",
"mapFactory"
] | true
| 3
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
of
|
public static RegisteredBean of(ConfigurableListableBeanFactory beanFactory, String beanName) {
Assert.notNull(beanFactory, "'beanFactory' must not be null");
Assert.hasLength(beanName, "'beanName' must not be empty");
return new RegisteredBean(beanFactory, () -> beanName, false,
() -> (RootBeanDefinition) beanFactory.getMergedBeanDefinition(beanName),
null);
}
|
Create a new {@link RegisteredBean} instance for a regular bean.
@param beanFactory the source bean factory
@param beanName the bean name
@return a new {@link RegisteredBean} instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/RegisteredBean.java
| 82
|
[
"beanFactory",
"beanName"
] |
RegisteredBean
| true
| 1
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getTarget
|
@Override
public final synchronized @Nullable Object getTarget() {
if ((refreshCheckDelayElapsed() && requiresRefresh()) || this.targetObject == null) {
refresh();
}
return this.targetObject;
}
|
Set the delay between refresh checks, in milliseconds.
Default is -1, indicating no refresh checks at all.
<p>Note that an actual refresh will only happen when
{@link #requiresRefresh()} returns {@code true}.
|
java
|
spring-aop/src/main/java/org/springframework/aop/target/dynamic/AbstractRefreshableTargetSource.java
| 76
|
[] |
Object
| true
| 4
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
custom
|
public static <L> Striped<L> custom(int stripes, Supplier<L> supplier) {
return new CompactStriped<>(stripes, supplier);
}
|
Creates a {@code Striped<L>} with eagerly initialized, strongly referenced locks. Every lock is
obtained from the passed supplier.
@param stripes the minimum number of stripes (locks) required
@param supplier a {@code Supplier<L>} object to obtain locks from
@return a new {@code Striped<L>}
@since 33.5.0
|
java
|
android/guava/src/com/google/common/util/concurrent/Striped.java
| 197
|
[
"stripes",
"supplier"
] | true
| 1
| 6.96
|
google/guava
| 51,352
|
javadoc
| false
|
|
visitBinaryExpression
|
function visitBinaryExpression(node: BinaryExpression, expressionResultIsUnused: boolean): Expression {
if (isDestructuringAssignment(node) && containsObjectRestOrSpread(node.left)) {
return flattenDestructuringAssignment(
node,
visitor,
context,
FlattenLevel.ObjectRest,
!expressionResultIsUnused,
);
}
if (node.operatorToken.kind === SyntaxKind.CommaToken) {
return factory.updateBinaryExpression(
node,
visitNode(node.left, visitorWithUnusedExpressionResult, isExpression),
node.operatorToken,
visitNode(node.right, expressionResultIsUnused ? visitorWithUnusedExpressionResult : visitor, isExpression),
);
}
return visitEachChild(node, visitor, context);
}
|
Visits a BinaryExpression that contains a destructuring assignment.
@param node A BinaryExpression node.
@param expressionResultIsUnused Indicates the result of an expression is unused by the parent node (i.e., the left side of a comma or the
expression of an `ExpressionStatement`).
|
typescript
|
src/compiler/transformers/es2018.ts
| 601
|
[
"node",
"expressionResultIsUnused"
] | true
| 5
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
isin
|
def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
"""
Compute boolean array of whether each value is found in the
passed set of values.
Parameters
----------
values : np.ndarray or ExtensionArray
Returns
-------
ndarray[bool]
"""
if values.dtype.kind in "fiuc":
# TODO: de-duplicate with equals, validate_comparison_value
return np.zeros(self.shape, dtype=bool)
values = ensure_wrapped_if_datetimelike(values)
if not isinstance(values, type(self)):
if values.dtype == object:
values = lib.maybe_convert_objects(
values, # type: ignore[arg-type]
convert_non_numeric=True,
dtype_if_all_nat=self.dtype,
)
if values.dtype != object:
return self.isin(values)
else:
# TODO: Deprecate this case
# https://github.com/pandas-dev/pandas/pull/58645/files#r1604055791
return isin(self.astype(object), values)
return np.zeros(self.shape, dtype=bool)
if self.dtype.kind in "mM":
self = cast("DatetimeArray | TimedeltaArray", self)
# error: "DatetimeLikeArrayMixin" has no attribute "as_unit"
values = values.as_unit(self.unit) # type: ignore[attr-defined]
try:
# error: Argument 1 to "_check_compatible_with" of "DatetimeLikeArrayMixin"
# has incompatible type "ExtensionArray | ndarray[Any, Any]"; expected
# "Period | Timestamp | Timedelta | NaTType"
self._check_compatible_with(values) # type: ignore[arg-type]
except (TypeError, ValueError):
# Includes tzawareness mismatch and IncompatibleFrequencyError
return np.zeros(self.shape, dtype=bool)
# error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]"
# has no attribute "asi8"
return isin(self.asi8, values.asi8) # type: ignore[union-attr]
|
Compute boolean array of whether each value is found in the
passed set of values.
Parameters
----------
values : np.ndarray or ExtensionArray
Returns
-------
ndarray[bool]
|
python
|
pandas/core/arrays/datetimelike.py
| 767
|
[
"self",
"values"
] |
npt.NDArray[np.bool_]
| true
| 7
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
processAssignmentReceived
|
protected void processAssignmentReceived(Map<Uuid, SortedSet<Integer>> assignment) {
replaceTargetAssignmentWithNewAssignment(assignment);
if (!targetAssignmentReconciled()) {
// Transition the member to RECONCILING when receiving a new target
// assignment from the broker, different from the current assignment. Note that the
// reconciliation might not be triggered just yet because of missing metadata.
transitionTo(MemberState.RECONCILING);
} else {
// Same assignment received, nothing to reconcile.
log.debug("Target assignment {} received from the broker is equals to the member " +
"current assignment {}. Nothing to reconcile.",
currentTargetAssignment, currentAssignment);
// Make sure we transition the member back to STABLE if it was RECONCILING (ex.
// member was RECONCILING unresolved assignments that were just removed by the
// broker), or JOINING (member joining received empty assignment).
if (state == MemberState.RECONCILING || state == MemberState.JOINING) {
transitionTo(MemberState.STABLE);
}
}
}
|
This will process the assignment received if it is different from the member's current
assignment. If a new assignment is received, this will make sure reconciliation is attempted
on the next call of `poll`. If another reconciliation is currently in process, the first `poll`
after that reconciliation will trigger the new reconciliation.
@param assignment Assignment received from the broker.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 344
|
[
"assignment"
] |
void
| true
| 4
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
get_dags_count
|
def get_dags_count(performance_dag_conf: dict[str, str]) -> int:
"""
Return the number of test DAGs based on given performance DAG configuration.
:param performance_dag_conf: dict with environment variables as keys and their values as values
:return: number of test DAGs
:rtype: int
"""
dag_files_count = int(
get_performance_dag_environment_variable(performance_dag_conf, "PERF_DAG_FILES_COUNT")
)
dags_per_dag_file = int(get_performance_dag_environment_variable(performance_dag_conf, "PERF_DAGS_COUNT"))
return dag_files_count * dags_per_dag_file
|
Return the number of test DAGs based on given performance DAG configuration.
:param performance_dag_conf: dict with environment variables as keys and their values as values
:return: number of test DAGs
:rtype: int
|
python
|
performance/src/performance_dags/performance_dag/performance_dag_utils.py
| 467
|
[
"performance_dag_conf"
] |
int
| true
| 1
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
annotationArrayMemberEquals
|
private static boolean annotationArrayMemberEquals(final Annotation[] a1, final Annotation[] a2) {
if (a1.length != a2.length) {
return false;
}
for (int i = 0; i < a1.length; i++) {
if (!equals(a1[i], a2[i])) {
return false;
}
}
return true;
}
|
Helper method for comparing two arrays of annotations.
@param a1 the first array
@param a2 the second array
@return a flag whether these arrays are equal
|
java
|
src/main/java/org/apache/commons/lang3/AnnotationUtils.java
| 99
|
[
"a1",
"a2"
] | true
| 4
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
all
|
public KafkaFuture<Collection<ClientMetricsResourceListing>> all() {
final KafkaFutureImpl<Collection<ClientMetricsResourceListing>> result = new KafkaFutureImpl<>();
future.whenComplete((listings, throwable) -> {
if (throwable != null) {
result.completeExceptionally(throwable);
} else {
result.complete(listings);
}
});
return result;
}
|
Returns a future that yields either an exception, or the full set of client metrics
listings.
In the event of a failure, the future yields nothing but the first exception which
occurred.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesResult.java
| 45
|
[] | true
| 2
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
createClassLoader
|
private ClassLoader createClassLoader(URL[] urls) {
ClassLoader parent = getClass().getClassLoader();
return new LaunchedClassLoader(isExploded(), getArchive(), urls, parent);
}
|
Create a classloader for the specified archives.
@param urls the classpath URLs
@return the classloader
@throws Exception if the classloader cannot be created
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/Launcher.java
| 85
|
[
"urls"
] |
ClassLoader
| true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
addToEnvironment
|
public static void addToEnvironment(ConfigurableEnvironment environment, Log logger) {
MutablePropertySources sources = environment.getPropertySources();
PropertySource<?> existing = sources.get(RANDOM_PROPERTY_SOURCE_NAME);
if (existing != null) {
logger.trace("RandomValuePropertySource already present");
return;
}
RandomValuePropertySource randomSource = new RandomValuePropertySource(RANDOM_PROPERTY_SOURCE_NAME);
if (sources.get(StandardEnvironment.SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME) != null) {
sources.addAfter(StandardEnvironment.SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME, randomSource);
}
else {
sources.addLast(randomSource);
}
logger.trace("RandomValuePropertySource add to Environment");
}
|
Add a {@link RandomValuePropertySource} to the given {@link Environment}.
@param environment the environment to add the random property source to
@param logger logger used for debug and trace information
@since 4.0.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/env/RandomValuePropertySource.java
| 161
|
[
"environment",
"logger"
] |
void
| true
| 3
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
containsAny
|
public boolean containsAny(final CharSequence cs, final CharSequence... searchCharSequences) {
return containsAny(this::contains, cs, searchCharSequences);
}
|
Tests if the CharSequence contains any of the CharSequences in the given array.
<p>
A {@code null} {@code cs} CharSequence will return {@code false}. A {@code null} or zero length search array will return {@code false}.
</p>
<p>
Case-sensitive examples
</p>
<pre>
Strings.CS.containsAny(null, *) = false
Strings.CS.containsAny("", *) = false
Strings.CS.containsAny(*, null) = false
Strings.CS.containsAny(*, []) = false
Strings.CS.containsAny("abcd", "ab", null) = true
Strings.CS.containsAny("abcd", "ab", "cd") = true
Strings.CS.containsAny("abc", "d", "abc") = true
</pre>
<p>
Case-insensitive examples
</p>
<pre>
Strings.CI.containsAny(null, *) = false
Strings.CI.containsAny("", *) = false
Strings.CI.containsAny(*, null) = false
Strings.CI.containsAny(*, []) = false
Strings.CI.containsAny("abcd", "ab", null) = true
Strings.CI.containsAny("abcd", "ab", "cd") = true
Strings.CI.containsAny("abc", "d", "abc") = true
Strings.CI.containsAny("abc", "D", "ABC") = true
Strings.CI.containsAny("ABC", "d", "abc") = true
</pre>
@param cs The CharSequence to check, may be null
@param searchCharSequences The array of CharSequences to search for, may be null. Individual CharSequences may be null as well.
@return {@code true} if any of the search CharSequences are found, {@code false} otherwise
|
java
|
src/main/java/org/apache/commons/lang3/Strings.java
| 559
|
[
"cs"
] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getLastAddedBucketIndex
|
long getLastAddedBucketIndex() {
if (positiveBuckets.numBuckets + negativeBuckets.numBuckets > 0) {
return bucketIndices[negativeBuckets.numBuckets + positiveBuckets.numBuckets - 1];
} else {
return Long.MIN_VALUE;
}
}
|
@return the index of the last bucket added successfully via {@link #tryAddBucket(long, long, boolean)},
or {@link Long#MIN_VALUE} if no buckets have been added yet.
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/FixedCapacityExponentialHistogram.java
| 210
|
[] | true
| 2
| 7.6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
rearrange
|
def rearrange(
tensor: Union[torch.Tensor, list[torch.Tensor], tuple[torch.Tensor, ...]],
pattern: str,
**axes_lengths: int,
) -> torch.Tensor:
r"""A native implementation of `einops.rearrange`, a reader-friendly smart element reordering for multidimensional
tensors. This operation includes functionality of transpose (axes permutation), reshape (view), squeeze, unsqueeze,
stack, concatenate and other operations.
See: https://einops.rocks/api/rearrange/
Args:
tensor (Tensor or sequence of Tensor): the tensor(s) to rearrange
pattern (str): the rearrangement pattern
axes_lengths (int): any additional length specifications for dimensions
Returns:
Tensor: the rearranged tensor
Examples:
>>> # suppose we have a set of 32 images in "h w c" format (height-width-channel)
>>> images = torch.randn((32, 30, 40, 3))
>>> # stack along first (batch) axis, output is a single array
>>> rearrange(images, "b h w c -> b h w c").shape
torch.Size([32, 30, 40, 3])
>>> # concatenate images along height (vertical axis), 960 = 32 * 30
>>> rearrange(images, "b h w c -> (b h) w c").shape
torch.Size([960, 40, 3])
>>> # concatenated images along horizontal axis, 1280 = 32 * 40
>>> rearrange(images, "b h w c -> h (b w) c").shape
torch.Size([30, 1280, 3])
>>> # reordered axes to "b c h w" format for deep learning
>>> rearrange(images, "b h w c -> b c h w").shape
torch.Size([32, 3, 30, 40])
>>> # flattened each image into a vector, 3600 = 30 * 40 * 3
>>> rearrange(images, "b h w c -> b (c h w)").shape
torch.Size([32, 3600])
>>> # split each image into 4 smaller (top-left, top-right, bottom-left, bottom-right), 128 = 32 * 2 * 2
>>> rearrange(images, "b (h1 h) (w1 w) c -> (b h1 w1) h w c", h1=2, w1=2).shape
torch.Size([128, 15, 20, 3])
>>> # space-to-depth operation
>>> rearrange(images, "b (h h1) (w w1) c -> b h w (c h1 w1)", h1=2, w1=2).shape
torch.Size([32, 15, 20, 12])
"""
if not isinstance(tensor, torch.Tensor):
tensor = torch.stack(tensor)
rearrange_callable = _create_rearrange_callable(
tensor.ndim, pattern, **axes_lengths
)
return rearrange_callable(tensor)
|
r"""A native implementation of `einops.rearrange`, a reader-friendly smart element reordering for multidimensional
tensors. This operation includes functionality of transpose (axes permutation), reshape (view), squeeze, unsqueeze,
stack, concatenate and other operations.
See: https://einops.rocks/api/rearrange/
Args:
tensor (Tensor or sequence of Tensor): the tensor(s) to rearrange
pattern (str): the rearrangement pattern
axes_lengths (int): any additional length specifications for dimensions
Returns:
Tensor: the rearranged tensor
Examples:
>>> # suppose we have a set of 32 images in "h w c" format (height-width-channel)
>>> images = torch.randn((32, 30, 40, 3))
>>> # stack along first (batch) axis, output is a single array
>>> rearrange(images, "b h w c -> b h w c").shape
torch.Size([32, 30, 40, 3])
>>> # concatenate images along height (vertical axis), 960 = 32 * 30
>>> rearrange(images, "b h w c -> (b h) w c").shape
torch.Size([960, 40, 3])
>>> # concatenated images along horizontal axis, 1280 = 32 * 40
>>> rearrange(images, "b h w c -> h (b w) c").shape
torch.Size([30, 1280, 3])
>>> # reordered axes to "b c h w" format for deep learning
>>> rearrange(images, "b h w c -> b c h w").shape
torch.Size([32, 3, 30, 40])
>>> # flattened each image into a vector, 3600 = 30 * 40 * 3
>>> rearrange(images, "b h w c -> b (c h w)").shape
torch.Size([32, 3600])
>>> # split each image into 4 smaller (top-left, top-right, bottom-left, bottom-right), 128 = 32 * 2 * 2
>>> rearrange(images, "b (h1 h) (w1 w) c -> (b h1 w1) h w c", h1=2, w1=2).shape
torch.Size([128, 15, 20, 3])
>>> # space-to-depth operation
>>> rearrange(images, "b (h h1) (w w1) c -> b h w (c h1 w1)", h1=2, w1=2).shape
torch.Size([32, 15, 20, 12])
|
python
|
functorch/einops/rearrange.py
| 151
|
[
"tensor",
"pattern"
] |
torch.Tensor
| true
| 2
| 9.36
|
pytorch/pytorch
| 96,034
|
google
| false
|
_copyto
|
def _copyto(a, val, mask):
"""
Replace values in `a` with NaN where `mask` is True. This differs from
copyto in that it will deal with the case where `a` is a numpy scalar.
Parameters
----------
a : ndarray or numpy scalar
Array or numpy scalar some of whose values are to be replaced
by val.
val : numpy scalar
Value used a replacement.
mask : ndarray, scalar
Boolean array. Where True the corresponding element of `a` is
replaced by `val`. Broadcasts.
Returns
-------
res : ndarray, scalar
Array with elements replaced or scalar `val`.
"""
if isinstance(a, np.ndarray):
np.copyto(a, val, where=mask, casting='unsafe')
else:
a = a.dtype.type(val)
return a
|
Replace values in `a` with NaN where `mask` is True. This differs from
copyto in that it will deal with the case where `a` is a numpy scalar.
Parameters
----------
a : ndarray or numpy scalar
Array or numpy scalar some of whose values are to be replaced
by val.
val : numpy scalar
Value used a replacement.
mask : ndarray, scalar
Boolean array. Where True the corresponding element of `a` is
replaced by `val`. Broadcasts.
Returns
-------
res : ndarray, scalar
Array with elements replaced or scalar `val`.
|
python
|
numpy/lib/_nanfunctions_impl.py
| 115
|
[
"a",
"val",
"mask"
] | false
| 3
| 6.08
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
of
|
@SafeVarargs
@SuppressWarnings("varargs")
public static <E> ManagedList<E> of(E... elements) {
ManagedList<E> list = new ManagedList<>();
Collections.addAll(list, elements);
return list;
}
|
Create a new instance containing an arbitrary number of elements.
@param elements the elements to be contained in the list
@param <E> the {@code List}'s element type
@return a {@code ManagedList} containing the specified elements
@since 5.3.16
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/ManagedList.java
| 65
|
[] | true
| 1
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
canTraverseWithoutReusingEdge
|
private static boolean canTraverseWithoutReusingEdge(
Graph<?> graph, Object nextNode, @Nullable Object previousNode) {
if (graph.isDirected() || !Objects.equals(previousNode, nextNode)) {
return true;
}
// This falls into the undirected A->B->A case. The Graph interface does not support parallel
// edges, so this traversal would require reusing the undirected AB edge.
return false;
}
|
Determines whether an edge has already been used during traversal. In the directed case a cycle
is always detected before reusing an edge, so no special logic is required. In the undirected
case, we must take care not to "backtrack" over an edge (i.e. going from A to B and then going
from B to A).
|
java
|
android/guava/src/com/google/common/graph/Graphs.java
| 165
|
[
"graph",
"nextNode",
"previousNode"
] | true
| 3
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
getAspectClassLoader
|
@Override
public @Nullable ClassLoader getAspectClassLoader() {
if (this.beanFactory instanceof ConfigurableBeanFactory cbf) {
return cbf.getBeanClassLoader();
}
else {
return ClassUtils.getDefaultClassLoader();
}
}
|
Look up the aspect bean from the {@link BeanFactory} and return it.
@see #setAspectBeanName
|
java
|
spring-aop/src/main/java/org/springframework/aop/config/SimpleBeanFactoryAwareAspectInstanceFactory.java
| 70
|
[] |
ClassLoader
| true
| 2
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
unrollVariables
|
public static Type unrollVariables(Map<TypeVariable<?>, Type> typeArguments, final Type type) {
if (typeArguments == null) {
typeArguments = Collections.emptyMap();
}
if (containsTypeVariables(type)) {
if (type instanceof TypeVariable<?>) {
return unrollVariables(typeArguments, typeArguments.get(type));
}
if (type instanceof ParameterizedType) {
final ParameterizedType p = (ParameterizedType) type;
final Map<TypeVariable<?>, Type> parameterizedTypeArguments;
if (p.getOwnerType() == null) {
parameterizedTypeArguments = typeArguments;
} else {
parameterizedTypeArguments = new HashMap<>(typeArguments);
parameterizedTypeArguments.putAll(getTypeArguments(p));
}
final Type[] args = p.getActualTypeArguments();
for (int i = 0; i < args.length; i++) {
final Type unrolled = unrollVariables(parameterizedTypeArguments, args[i]);
if (unrolled != null) {
args[i] = unrolled;
}
}
return parameterizeWithOwner(p.getOwnerType(), (Class<?>) p.getRawType(), args);
}
if (type instanceof WildcardType) {
final WildcardType wild = (WildcardType) type;
return wildcardType().withUpperBounds(unrollBounds(typeArguments, wild.getUpperBounds()))
.withLowerBounds(unrollBounds(typeArguments, wild.getLowerBounds())).build();
}
}
return type;
}
|
Gets a type representing {@code type} with variable assignments "unrolled."
@param typeArguments as from {@link TypeUtils#getTypeArguments(Type, Class)}.
@param type the type to unroll variable assignments for.
@return Type.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 1,660
|
[
"typeArguments",
"type"
] |
Type
| true
| 9
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
squareDistanceBulk
|
public static void squareDistanceBulk(float[] q, float[] v0, float[] v1, float[] v2, float[] v3, float[] distances) {
if (q.length != v0.length) {
throw new IllegalArgumentException("vector dimensions differ: " + q.length + "!=" + v0.length);
}
if (q.length != v1.length) {
throw new IllegalArgumentException("vector dimensions differ: " + q.length + "!=" + v1.length);
}
if (q.length != v2.length) {
throw new IllegalArgumentException("vector dimensions differ: " + q.length + "!=" + v2.length);
}
if (q.length != v3.length) {
throw new IllegalArgumentException("vector dimensions differ: " + q.length + "!=" + v3.length);
}
if (distances.length != 4) {
throw new IllegalArgumentException("distances array must have length 4, but was: " + distances.length);
}
IMPL.squareDistanceBulk(q, v0, v1, v2, v3, distances);
}
|
Bulk computation of square distances between a query vector and four vectors.Result is stored in the provided distances array.
@param q the query vector
@param v0 the first vector
@param v1 the second vector
@param v2 the third vector
@param v3 the fourth vector
@param distances an array to store the computed square distances, must have length 4
@throws IllegalArgumentException if the dimensions of the vectors do not match or if the distances array does not have length 4
|
java
|
libs/simdvec/src/main/java/org/elasticsearch/simdvec/ESVectorUtil.java
| 320
|
[
"q",
"v0",
"v1",
"v2",
"v3",
"distances"
] |
void
| true
| 6
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
put
|
@CanIgnoreReturnValue
@Nullable V put(
@ParametricNullness R rowKey, @ParametricNullness C columnKey, @ParametricNullness V value);
|
Associates the specified value with the specified keys. If the table already contained a
mapping for those keys, the old value is replaced with the specified value.
@param rowKey row key that the value should be associated with
@param columnKey column key that the value should be associated with
@param value value to be associated with the specified keys
@return the value previously associated with the keys, or {@code null} if no mapping existed
for the keys
|
java
|
android/guava/src/com/google/common/collect/Table.java
| 151
|
[
"rowKey",
"columnKey",
"value"
] |
V
| true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
newInstance
|
@SuppressWarnings("unchecked") // OK, because array and values are of type T
public static <T> T[] newInstance(final Class<T> componentType, final int length) {
return (T[]) Array.newInstance(componentType, length);
}
|
Delegates to {@link Array#newInstance(Class,int)} using generics.
@param <T> The array type.
@param componentType The array class.
@param length the array length
@return The new array.
@throws NullPointerException if the specified {@code componentType} parameter is null.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,239
|
[
"componentType",
"length"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.