function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
findMethod
|
public static @Nullable Method findMethod(Class<?> clazz, String methodName, Class<?>... paramTypes) {
try {
return clazz.getMethod(methodName, paramTypes);
}
catch (NoSuchMethodException ex) {
return findDeclaredMethod(clazz, methodName, paramTypes);
}
}
|
Find a method with the given method name and the given parameter types,
declared on the given class or one of its superclasses. Prefers public methods,
but will return a protected, package access, or private method too.
<p>Checks {@code Class.getMethod} first, falling back to
{@code findDeclaredMethod}. This allows to find public methods
without issues even in environments with restricted Java security settings.
@param clazz the class to check
@param methodName the name of the method to find
@param paramTypes the parameter types of the method to find
@return the Method object, or {@code null} if not found
@see Class#getMethod
@see #findDeclaredMethod
|
java
|
spring-beans/src/main/java/org/springframework/beans/BeanUtils.java
| 313
|
[
"clazz",
"methodName"
] |
Method
| true
| 2
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
nargminmax
|
def nargminmax(values: ExtensionArray, method: str, axis: AxisInt = 0):
"""
Implementation of np.argmin/argmax but for ExtensionArray and which
handles missing values.
Parameters
----------
values : ExtensionArray
method : {"argmax", "argmin"}
axis : int, default 0
Returns
-------
int
"""
assert method in {"argmax", "argmin"}
func = np.argmax if method == "argmax" else np.argmin
mask = np.asarray(isna(values))
arr_values = values._values_for_argsort()
if arr_values.ndim > 1:
if mask.any():
if axis == 1:
zipped = zip(arr_values, mask, strict=True)
else:
zipped = zip(arr_values.T, mask.T, strict=True)
return np.array([_nanargminmax(v, m, func) for v, m in zipped])
return func(arr_values, axis=axis)
return _nanargminmax(arr_values, mask, func)
|
Implementation of np.argmin/argmax but for ExtensionArray and which
handles missing values.
Parameters
----------
values : ExtensionArray
method : {"argmax", "argmin"}
axis : int, default 0
Returns
-------
int
|
python
|
pandas/core/sorting.py
| 456
|
[
"values",
"method",
"axis"
] | true
| 6
| 6.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
applyRulesToString
|
private String applyRulesToString(final Calendar c) {
return applyRules(c, new StringBuilder(maxLengthEstimate)).toString();
}
|
Creates a String representation of the given Calendar by applying the rules of this printer to it.
@param c the Calendar to apply the rules to.
@return a String representation of the given Calendar.
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDatePrinter.java
| 1,096
|
[
"c"
] |
String
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
partitionImpl
|
private static <T extends @Nullable Object> UnmodifiableIterator<List<@Nullable T>> partitionImpl(
Iterator<T> iterator, int size, boolean pad) {
checkNotNull(iterator);
checkArgument(size > 0);
return new UnmodifiableIterator<List<@Nullable T>>() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public List<@Nullable T> next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
@SuppressWarnings("unchecked") // we only put Ts in it
@Nullable T[] array = (@Nullable T[]) new Object[size];
int count = 0;
for (; count < size && iterator.hasNext(); count++) {
array[count] = iterator.next();
}
for (int i = count; i < size; i++) {
array[i] = null; // for GWT
}
List<@Nullable T> list = unmodifiableList(asList(array));
// TODO(b/192579700): Use a ternary once it no longer confuses our nullness checker.
if (pad || count == size) {
return list;
} else {
return list.subList(0, count);
}
}
};
}
|
Divides an iterator into unmodifiable sublists of the given size, padding the final iterator
with null values if necessary. For example, partitioning an iterator containing {@code [a, b,
c, d, e]} with a partition size of 3 yields {@code [[a, b, c], [d, e, null]]} -- an outer
iterator containing two inner lists of three elements each, all in the original order.
<p>The returned lists implement {@link java.util.RandomAccess}.
@param iterator the iterator to return a partitioned view of
@param size the desired size of each partition
@return an iterator of immutable lists containing the elements of {@code iterator} divided into
partitions (the final iterable may have trailing null elements)
@throws IllegalArgumentException if {@code size} is nonpositive
|
java
|
android/guava/src/com/google/common/collect/Iterators.java
| 626
|
[
"iterator",
"size",
"pad"
] | true
| 7
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
onPause
|
@Override
public void onPause() {
if (this.running) {
stopBeans(true);
this.running = false;
}
}
|
Stop all registered beans that implement {@link Lifecycle} and <i>are</i>
currently running. Any bean that implements {@link SmartLifecycle} will be
stopped within its 'phase', and all phases will be ordered from highest to
lowest value. All beans that do not implement {@link SmartLifecycle} will be
stopped in the default phase 0. A bean declared as dependent on another bean
will be stopped before the dependency bean regardless of the declared phase.
|
java
|
spring-context/src/main/java/org/springframework/context/support/DefaultLifecycleProcessor.java
| 327
|
[] |
void
| true
| 2
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getExitCode
|
int getExitCode() {
int exitCode = 0;
for (ExitCodeGenerator generator : this.generators) {
try {
int value = generator.getExitCode();
if (value != 0) {
exitCode = value;
break;
}
}
catch (Exception ex) {
exitCode = 1;
ex.printStackTrace();
}
}
return exitCode;
}
|
Get the final exit code that should be returned. The final exit code is the first
non-zero exit code that is {@link ExitCodeGenerator#getExitCode generated}.
@return the final exit code.
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ExitCodeGenerators.java
| 92
|
[] | true
| 3
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
endArray
|
public JSONStringer endArray() throws JSONException {
return close(Scope.EMPTY_ARRAY, Scope.NONEMPTY_ARRAY, "]");
}
|
Ends encoding the current array.
@return this stringer.
@throws JSONException if processing of json failed
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONStringer.java
| 145
|
[] |
JSONStringer
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getExitingExecutorService
|
@J2ktIncompatible
@GwtIncompatible // TODO
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
public static ExecutorService getExitingExecutorService(
ThreadPoolExecutor executor, long terminationTimeout, TimeUnit timeUnit) {
return new Application().getExitingExecutorService(executor, terminationTimeout, timeUnit);
}
|
Converts the given ThreadPoolExecutor into an ExecutorService that exits when the application
is complete. It does so by using daemon threads and adding a shutdown hook to wait for their
completion.
<p>This is mainly for fixed thread pools. See {@link Executors#newFixedThreadPool(int)}.
@param executor the executor to modify to make sure it exits when the application is finished
@param terminationTimeout how long to wait for the executor to finish before terminating the
JVM
@param timeUnit unit of time for the time parameter
@return an unmodifiable version of the input which will not hang the JVM
|
java
|
android/guava/src/com/google/common/util/concurrent/MoreExecutors.java
| 108
|
[
"executor",
"terminationTimeout",
"timeUnit"
] |
ExecutorService
| true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
wrapParseError
|
private static XContentParseException wrapParseError(ParseField field, XContentParser p, IOException e, String s) {
return new XContentParseException(p.getTokenLocation(), "[" + field + "] " + s, e);
}
|
Parses a Value from the given {@link XContentParser}
@param parser the parser to build a value from
@param value the value to fill from the parser
@param context a context that is passed along to all declared field parsers
@return the parsed value
@throws IOException if an IOException occurs.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java
| 553
|
[
"field",
"p",
"e",
"s"
] |
XContentParseException
| true
| 1
| 6.32
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
__or__
|
def __or__(self, other):
"""Chaining operator.
Example:
>>> add.s(2, 2) | add.s(4) | add.s(8)
Returns:
chain: Constructs a :class:`~celery.canvas.chain` of the given signatures.
"""
if isinstance(other, _chain):
# task | chain -> chain
return _chain(seq_concat_seq(
(self,), other.unchain_tasks()), app=self._app)
elif isinstance(other, group):
# unroll group with one member
other = maybe_unroll_group(other)
# task | group() -> chain
return _chain(self, other, app=self.app)
elif isinstance(other, Signature):
# task | task -> chain
return _chain(self, other, app=self._app)
return NotImplemented
|
Chaining operator.
Example:
>>> add.s(2, 2) | add.s(4) | add.s(8)
Returns:
chain: Constructs a :class:`~celery.canvas.chain` of the given signatures.
|
python
|
celery/canvas.py
| 758
|
[
"self",
"other"
] | false
| 4
| 8.56
|
celery/celery
| 27,741
|
unknown
| false
|
|
indexOfDifference
|
public static int indexOfDifference(final CharSequence... css) {
if (ArrayUtils.getLength(css) <= 1) {
return INDEX_NOT_FOUND;
}
boolean anyStringNull = false;
boolean allStringsNull = true;
final int arrayLen = css.length;
int shortestStrLen = Integer.MAX_VALUE;
int longestStrLen = 0;
// find the min and max string lengths; this avoids checking to make
// sure we are not exceeding the length of the string each time through
// the bottom loop.
for (final CharSequence cs : css) {
if (cs == null) {
anyStringNull = true;
shortestStrLen = 0;
} else {
allStringsNull = false;
shortestStrLen = Math.min(cs.length(), shortestStrLen);
longestStrLen = Math.max(cs.length(), longestStrLen);
}
}
// handle lists containing all nulls or all empty strings
if (allStringsNull || longestStrLen == 0 && !anyStringNull) {
return INDEX_NOT_FOUND;
}
// handle lists containing some nulls or some empty strings
if (shortestStrLen == 0) {
return 0;
}
// find the position with the first difference across all strings
int firstDiff = -1;
for (int stringPos = 0; stringPos < shortestStrLen; stringPos++) {
final char comparisonChar = css[0].charAt(stringPos);
for (int arrayPos = 1; arrayPos < arrayLen; arrayPos++) {
if (css[arrayPos].charAt(stringPos) != comparisonChar) {
firstDiff = stringPos;
break;
}
}
if (firstDiff != -1) {
break;
}
}
if (firstDiff == -1 && shortestStrLen != longestStrLen) {
// we compared all of the characters up to the length of the
// shortest string and didn't find a match, but the string lengths
// vary, so return the length of the shortest string.
return shortestStrLen;
}
return firstDiff;
}
|
Compares all CharSequences in an array and returns the index at which the CharSequences begin to differ.
<p>
For example, {@code indexOfDifference(new String[] {"i am a machine", "i am a robot"}) -> 7}
</p>
<pre>
StringUtils.indexOfDifference(null) = -1
StringUtils.indexOfDifference(new String[] {}) = -1
StringUtils.indexOfDifference(new String[] {"abc"}) = -1
StringUtils.indexOfDifference(new String[] {null, null}) = -1
StringUtils.indexOfDifference(new String[] {"", ""}) = -1
StringUtils.indexOfDifference(new String[] {"", null}) = 0
StringUtils.indexOfDifference(new String[] {"abc", null, null}) = 0
StringUtils.indexOfDifference(new String[] {null, null, "abc"}) = 0
StringUtils.indexOfDifference(new String[] {"", "abc"}) = 0
StringUtils.indexOfDifference(new String[] {"abc", ""}) = 0
StringUtils.indexOfDifference(new String[] {"abc", "abc"}) = -1
StringUtils.indexOfDifference(new String[] {"abc", "a"}) = 1
StringUtils.indexOfDifference(new String[] {"ab", "abxyz"}) = 2
StringUtils.indexOfDifference(new String[] {"abcde", "abxyz"}) = 2
StringUtils.indexOfDifference(new String[] {"abcde", "xyz"}) = 0
StringUtils.indexOfDifference(new String[] {"xyz", "abcde"}) = 0
StringUtils.indexOfDifference(new String[] {"i am a machine", "i am a robot"}) = 7
</pre>
@param css array of CharSequences, entries may be null.
@return the index where the strings begin to differ; -1 if they are all equal.
@since 2.4
@since 3.0 Changed signature from indexOfDifference(String...) to indexOfDifference(CharSequence...)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 2,945
|
[] | true
| 13
| 7.52
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
createPartial
|
function createPartial(func, bitmask, thisArg, partials) {
var isBind = bitmask & WRAP_BIND_FLAG,
Ctor = createCtor(func);
function wrapper() {
var argsIndex = -1,
argsLength = arguments.length,
leftIndex = -1,
leftLength = partials.length,
args = Array(leftLength + argsLength),
fn = (this && this !== root && this instanceof wrapper) ? Ctor : func;
while (++leftIndex < leftLength) {
args[leftIndex] = partials[leftIndex];
}
while (argsLength--) {
args[leftIndex++] = arguments[++argsIndex];
}
return apply(fn, isBind ? thisArg : this, args);
}
return wrapper;
}
|
Creates a function that wraps `func` to invoke it with the `this` binding
of `thisArg` and `partials` prepended to the arguments it receives.
@private
@param {Function} func The function to wrap.
@param {number} bitmask The bitmask flags. See `createWrap` for more details.
@param {*} thisArg The `this` binding of `func`.
@param {Array} partials The arguments to prepend to those provided to
the new function.
@returns {Function} Returns the new wrapped function.
|
javascript
|
lodash.js
| 5,400
|
[
"func",
"bitmask",
"thisArg",
"partials"
] | false
| 7
| 6.08
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
baseInRange
|
function baseInRange(number, start, end) {
return number >= nativeMin(start, end) && number < nativeMax(start, end);
}
|
The base implementation of `_.inRange` which doesn't coerce arguments.
@private
@param {number} number The number to check.
@param {number} start The start of the range.
@param {number} end The end of the range.
@returns {boolean} Returns `true` if `number` is in the range, else `false`.
|
javascript
|
lodash.js
| 3,160
|
[
"number",
"start",
"end"
] | false
| 2
| 6
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
codegen_static_numels_sub_kernel
|
def codegen_static_numels_sub_kernel(
self, code: IndentedBuffer, sub_kernel: TritonKernel, num: int
) -> list[str]:
"""
We get a small speedup from hard coding numels if they are static.
This code stomps on the passed-in values by writing an constant to the top of the kernel.
In a kernel like:
def KERNEL_NAME(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, R0_BLOCK : tl.constexpr):
We would add
xnumel = 4096
rnumel = 768
After the signature, before the kernel code, if we decided to make these static. As its hardcoded, it becomes
a better signal to triton on how to unroll and do some static indexing. So, it's not so much that downstream
knows that its a static numel, as that you just plop a constant into the kernel.
"""
grid = []
uniquify_block_sizes = []
for tree in sub_kernel.range_trees:
simplified_tree_numel = V.graph.sizevars.simplify(tree.numel)
if isinstance(simplified_tree_numel, (Integer, int)):
code.writeline(f"{tree.prefix}numel = {int(simplified_tree_numel)}")
else:
assert f"{tree.prefix}numel_{num}" in self.dynamic_shape_args
uniquify_block_sizes.append(f"{tree.prefix}numel")
# pyrefly: ignore [missing-argument]
if not tree.is_reduction:
if isinstance(simplified_tree_numel, (Integer, int)):
grid.append(int(simplified_tree_numel))
else:
# pyrefly: ignore [bad-argument-type]
grid.append(f"{tree.prefix}numel_{num}")
if tree.is_reduction and sub_kernel.persistent_reduction:
if isinstance(simplified_tree_numel, (Integer, int)):
val = int(simplified_tree_numel)
else:
raise RuntimeError(
"Dynamic shape on reduction dimension is not supported"
)
val = next_power_of_2(val)
code.writeline(
f"{tree.prefix.upper()}BLOCK_{num}: tl.constexpr = {val}"
)
uniquify_block_sizes.append(f"{tree.prefix.upper()}BLOCK")
if tree.prefix == "x" and sub_kernel.no_x_dim:
code.writeline(f"XBLOCK_{num}: tl.constexpr = 1")
uniquify_block_sizes.append("XBLOCK")
self.grids.append(grid)
return uniquify_block_sizes
|
We get a small speedup from hard coding numels if they are static.
This code stomps on the passed-in values by writing an constant to the top of the kernel.
In a kernel like:
def KERNEL_NAME(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, R0_BLOCK : tl.constexpr):
We would add
xnumel = 4096
rnumel = 768
After the signature, before the kernel code, if we decided to make these static. As its hardcoded, it becomes
a better signal to triton on how to unroll and do some static indexing. So, it's not so much that downstream
knows that its a static numel, as that you just plop a constant into the kernel.
|
python
|
torch/_inductor/codegen/triton_combo_kernel.py
| 409
|
[
"self",
"code",
"sub_kernel",
"num"
] |
list[str]
| true
| 13
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
hierarchy
|
public static Iterable<Class<?>> hierarchy(final Class<?> type, final Interfaces interfacesBehavior) {
final Iterable<Class<?>> classes = () -> {
final AtomicReference<Class<?>> next = new AtomicReference<>(type);
return new Iterator<Class<?>>() {
@Override
public boolean hasNext() {
return next.get() != null;
}
@Override
public Class<?> next() {
return next.getAndUpdate(Class::getSuperclass);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
};
if (interfacesBehavior != Interfaces.INCLUDE) {
return classes;
}
return () -> {
final Set<Class<?>> seenInterfaces = new HashSet<>();
final Iterator<Class<?>> wrapped = classes.iterator();
return new Iterator<Class<?>>() {
Iterator<Class<?>> interfaces = Collections.emptyIterator();
@Override
public boolean hasNext() {
return interfaces.hasNext() || wrapped.hasNext();
}
@Override
public Class<?> next() {
if (interfaces.hasNext()) {
final Class<?> nextInterface = interfaces.next();
seenInterfaces.add(nextInterface);
return nextInterface;
}
final Class<?> nextSuperclass = wrapped.next();
final Set<Class<?>> currentInterfaces = new LinkedHashSet<>();
walkInterfaces(currentInterfaces, nextSuperclass);
interfaces = currentInterfaces.iterator();
return nextSuperclass;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
private void walkInterfaces(final Set<Class<?>> addTo, final Class<?> c) {
for (final Class<?> iface : c.getInterfaces()) {
if (!seenInterfaces.contains(iface)) {
addTo.add(iface);
}
walkInterfaces(addTo, iface);
}
}
};
};
}
|
Gets an {@link Iterable} that can iterate over a class hierarchy in ascending (subclass to superclass) order.
@param type the type to get the class hierarchy from.
@param interfacesBehavior switch indicating whether to include or exclude interfaces.
@return Iterable an Iterable over the class hierarchy of the given class.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/ClassUtils.java
| 1,183
|
[
"type",
"interfacesBehavior"
] | true
| 5
| 7.84
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
union
|
public ComposablePointcut union(ClassFilter other) {
this.classFilter = ClassFilters.union(this.classFilter, other);
return this;
}
|
Apply a union with the given ClassFilter.
@param other the ClassFilter to apply a union with
@return this composable pointcut (for call chaining)
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/ComposablePointcut.java
| 116
|
[
"other"
] |
ComposablePointcut
| true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
isAutowireCandidate
|
public static boolean isAutowireCandidate(ConfigurableBeanFactory beanFactory, String beanName) {
try {
return beanFactory.getMergedBeanDefinition(beanName).isAutowireCandidate();
}
catch (NoSuchBeanDefinitionException ex) {
// A manually registered singleton instance not backed by a BeanDefinition.
return true;
}
}
|
Check the autowire-candidate status for the specified bean.
@param beanFactory the bean factory
@param beanName the name of the bean to check
@return whether the specified bean qualifies as an autowire candidate
@since 6.2.3
@see org.springframework.beans.factory.config.BeanDefinition#isAutowireCandidate()
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AutowireUtils.java
| 274
|
[
"beanFactory",
"beanName"
] | true
| 2
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
at_most_one
|
def at_most_one(*args) -> bool:
"""
Return True if at most one of args is "truthy", and False otherwise.
NOTSET is treated the same as None.
If user supplies an iterable, we raise ValueError and force them to unpack.
"""
return sum(is_arg_set(a) and bool(a) for a in args) in (0, 1)
|
Return True if at most one of args is "truthy", and False otherwise.
NOTSET is treated the same as None.
If user supplies an iterable, we raise ValueError and force them to unpack.
|
python
|
airflow-core/src/airflow/utils/helpers.py
| 278
|
[] |
bool
| true
| 2
| 6.72
|
apache/airflow
| 43,597
|
unknown
| false
|
invokeOnPartitionsRevokedCallback
|
private CompletableFuture<Void> invokeOnPartitionsRevokedCallback(Set<TopicPartition> partitionsRevoked) {
// This should not trigger the callback if partitionsRevoked is empty, to keep the
// current behaviour.
Optional<ConsumerRebalanceListener> listener = subscriptions.rebalanceListener();
if (!partitionsRevoked.isEmpty() && listener.isPresent()) {
return enqueueConsumerRebalanceListenerCallback(ON_PARTITIONS_REVOKED, partitionsRevoked);
} else {
return CompletableFuture.completedFuture(null);
}
}
|
@return Server-side assignor implementation configured for the member, that will be sent
out to the server to be used. If empty, then the server will select the assignor.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManager.java
| 352
|
[
"partitionsRevoked"
] | true
| 3
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
return this.value.toString();
}
|
Append text with the given ANSI codes.
@param text the text to append
@param codes the ANSI codes
@return this string
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/shell/AnsiString.java
| 76
|
[] |
String
| true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
toStringBuffer
|
public StringBuffer toStringBuffer() {
return new StringBuffer(size).append(buffer, 0, size);
}
|
Gets a StringBuffer version of the string builder, creating a
new instance each time the method is called.
@return the builder as a StringBuffer
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 2,983
|
[] |
StringBuffer
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
describeTransactions
|
default DescribeTransactionsResult describeTransactions(Collection<String> transactionalIds) {
return describeTransactions(transactionalIds, new DescribeTransactionsOptions());
}
|
Describe the state of a set of transactional IDs. See
{@link #describeTransactions(Collection, DescribeTransactionsOptions)} for more details.
@param transactionalIds The set of transactional IDs to query
@return The result
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,697
|
[
"transactionalIds"
] |
DescribeTransactionsResult
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
_partial_date_slice
|
def _partial_date_slice(
self,
reso: Resolution,
parsed: datetime,
) -> slice | npt.NDArray[np.intp]:
"""
Parameters
----------
reso : Resolution
parsed : datetime
Returns
-------
slice or ndarray[intp]
"""
if not self._can_partial_date_slice(reso):
raise ValueError
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
vals = self._data._ndarray
unbox = self._data._unbox
if self.is_monotonic_increasing:
if len(self) and (
(t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
):
# we are out of range
raise KeyError
# TODO: does this depend on being monotonic _increasing_?
# a monotonic (sorted) series can be sliced
left = vals.searchsorted(unbox(t1), side="left")
right = vals.searchsorted(unbox(t2), side="right")
return slice(left, right)
else:
lhs_mask = vals >= unbox(t1)
rhs_mask = vals <= unbox(t2)
# try to find the dates
return (lhs_mask & rhs_mask).nonzero()[0]
|
Parameters
----------
reso : Resolution
parsed : datetime
Returns
-------
slice or ndarray[intp]
|
python
|
pandas/core/indexes/datetimelike.py
| 414
|
[
"self",
"reso",
"parsed"
] |
slice | npt.NDArray[np.intp]
| true
| 9
| 6.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
asNetwork
|
@Override
public Network<N, EndpointPair<N>> asNetwork() {
return new AbstractNetwork<N, EndpointPair<N>>() {
@Override
public Set<N> nodes() {
return AbstractBaseGraph.this.nodes();
}
@Override
public Set<EndpointPair<N>> edges() {
return AbstractBaseGraph.this.edges();
}
@Override
public Graph<N> asGraph() {
if (AbstractBaseGraph.this instanceof Graph) {
return (Graph<N>) AbstractBaseGraph.this;
} else if (AbstractBaseGraph.this instanceof ValueGraph) {
return ((ValueGraph<N, ?>) AbstractBaseGraph.this).asGraph();
}
throw new UnsupportedOperationException(
"Unexpected graph type: " + AbstractBaseGraph.this.getClass());
}
@Override
public boolean isDirected() {
return AbstractBaseGraph.this.isDirected();
}
@Override
public boolean allowsParallelEdges() {
return false; // Graph doesn't allow parallel edges
}
@Override
public boolean allowsSelfLoops() {
return AbstractBaseGraph.this.allowsSelfLoops();
}
@Override
public ElementOrder<N> nodeOrder() {
return AbstractBaseGraph.this.nodeOrder();
}
@Override
public ElementOrder<EndpointPair<N>> edgeOrder() {
return ElementOrder.unordered(); // Graph doesn't define edge order
}
@Override
public Set<N> adjacentNodes(N node) {
return AbstractBaseGraph.this.adjacentNodes(node);
}
@Override
public Set<N> predecessors(N node) {
return AbstractBaseGraph.this.predecessors(node);
}
@Override
public Set<N> successors(N node) {
return AbstractBaseGraph.this.successors(node);
}
@Override
public Set<EndpointPair<N>> incidentEdges(N node) {
return AbstractBaseGraph.this.incidentEdges(node);
}
@Override
public Set<EndpointPair<N>> inEdges(N node) {
checkNotNull(node);
checkArgument(nodes().contains(node));
IncidentEdgeSet<N> incident =
new IncidentEdgeSet<N>(this, node, IncidentEdgeSet.EdgeType.INCOMING) {
@Override
public UnmodifiableIterator<EndpointPair<N>> iterator() {
return Iterators.unmodifiableIterator(
Iterators.transform(
graph.predecessors(node).iterator(),
(N predecessor) ->
graph.isDirected()
? EndpointPair.ordered(predecessor, node)
: EndpointPair.unordered(predecessor, node)));
}
};
return nodeInvalidatableSet(incident, node);
}
@Override
public Set<EndpointPair<N>> outEdges(N node) {
checkNotNull(node);
checkArgument(nodes().contains(node));
IncidentEdgeSet<N> incident =
new IncidentEdgeSet<N>(this, node, IncidentEdgeSet.EdgeType.OUTGOING) {
@Override
public UnmodifiableIterator<EndpointPair<N>> iterator() {
return Iterators.unmodifiableIterator(
Iterators.transform(
graph.successors(node).iterator(),
(N successor) ->
graph.isDirected()
? EndpointPair.ordered(node, successor)
: EndpointPair.unordered(node, successor)));
}
};
return nodeInvalidatableSet(incident, node);
}
@Override
public Set<EndpointPair<N>> adjacentEdges(EndpointPair<N> edge) {
checkArgument(edges().contains(edge));
N nodeU = edge.nodeU();
N nodeV = edge.nodeV();
Set<EndpointPair<N>> endpointPairIncidentEdges =
Sets.union(incidentEdges(nodeU), incidentEdges(nodeV));
return nodePairInvalidatableSet(
Sets.difference(endpointPairIncidentEdges, ImmutableSet.of(edge)), nodeU, nodeV);
}
@Override
public EndpointPair<N> incidentNodes(EndpointPair<N> edge) {
checkArgument(edges().contains(edge));
return edge;
}
// Don't override the existing edge[s]Connecting() or *degree() AbstractNetwork
// implementations; they call in/outEdges() and should be fine.
};
}
|
An implementation of {@link BaseGraph#edges()} defined in terms of {@link Graph#nodes()} and
{@link #successors(Object)}.
|
java
|
android/guava/src/com/google/common/graph/AbstractBaseGraph.java
| 175
|
[] | true
| 5
| 6.16
|
google/guava
| 51,352
|
javadoc
| false
|
|
connectTransport
|
protected Transport connectTransport() throws MessagingException {
String username = getUsername();
String password = getPassword();
if ("".equals(username)) { // probably from a placeholder
username = null;
if ("".equals(password)) { // in conjunction with "" username, this means no password to use
password = null;
}
}
Transport transport = getTransport(getSession());
transport.connect(getHost(), getPort(), username, password);
return transport;
}
|
Obtain and connect a Transport from the underlying JavaMail Session,
passing in the specified host, port, username, and password.
@return the connected Transport object
@throws MessagingException if the connect attempt failed
@since 4.1.2
@see #getTransport
@see #getHost()
@see #getPort()
@see #getUsername()
@see #getPassword()
|
java
|
spring-context-support/src/main/java/org/springframework/mail/javamail/JavaMailSenderImpl.java
| 456
|
[] |
Transport
| true
| 3
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
nan_to_num
|
def nan_to_num(
x: Array | float | complex,
/,
*,
fill_value: int | float = 0.0,
xp: ModuleType | None = None,
) -> Array:
"""
Replace NaN with zero and infinity with large finite numbers (default behaviour).
If `x` is inexact, NaN is replaced by zero or by the user defined value in the
`fill_value` keyword, infinity is replaced by the largest finite floating
point value representable by ``x.dtype``, and -infinity is replaced by the
most negative finite floating point value representable by ``x.dtype``.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
Parameters
----------
x : array | float | complex
Input data.
fill_value : int | float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
`x`, with the non-finite values replaced.
See Also
--------
array_api.isnan : Shows which elements are Not a Number (NaN).
Examples
--------
>>> import array_api_extra as xpx
>>> import array_api_strict as xp
>>> xpx.nan_to_num(xp.inf)
1.7976931348623157e+308
>>> xpx.nan_to_num(-xp.inf)
-1.7976931348623157e+308
>>> xpx.nan_to_num(xp.nan)
0.0
>>> x = xp.asarray([xp.inf, -xp.inf, xp.nan, -128, 128])
>>> xpx.nan_to_num(x)
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
-1.28000000e+002, 1.28000000e+002])
>>> y = xp.asarray([complex(xp.inf, xp.nan), xp.nan, complex(xp.nan, xp.inf)])
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
-1.28000000e+002, 1.28000000e+002])
>>> xpx.nan_to_num(y)
array([ 1.79769313e+308 +0.00000000e+000j, # may vary
0.00000000e+000 +0.00000000e+000j,
0.00000000e+000 +1.79769313e+308j])
"""
if isinstance(fill_value, complex):
msg = "Complex fill values are not supported."
raise TypeError(msg)
xp = array_namespace(x) if xp is None else xp
# for scalars we want to output an array
y = xp.asarray(x)
if (
is_cupy_namespace(xp)
or is_jax_namespace(xp)
or is_numpy_namespace(xp)
or is_torch_namespace(xp)
):
return xp.nan_to_num(y, nan=fill_value)
return _funcs.nan_to_num(y, fill_value=fill_value, xp=xp)
|
Replace NaN with zero and infinity with large finite numbers (default behaviour).
If `x` is inexact, NaN is replaced by zero or by the user defined value in the
`fill_value` keyword, infinity is replaced by the largest finite floating
point value representable by ``x.dtype``, and -infinity is replaced by the
most negative finite floating point value representable by ``x.dtype``.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
Parameters
----------
x : array | float | complex
Input data.
fill_value : int | float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
`x`, with the non-finite values replaced.
See Also
--------
array_api.isnan : Shows which elements are Not a Number (NaN).
Examples
--------
>>> import array_api_extra as xpx
>>> import array_api_strict as xp
>>> xpx.nan_to_num(xp.inf)
1.7976931348623157e+308
>>> xpx.nan_to_num(-xp.inf)
-1.7976931348623157e+308
>>> xpx.nan_to_num(xp.nan)
0.0
>>> x = xp.asarray([xp.inf, -xp.inf, xp.nan, -128, 128])
>>> xpx.nan_to_num(x)
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
-1.28000000e+002, 1.28000000e+002])
>>> y = xp.asarray([complex(xp.inf, xp.nan), xp.nan, complex(xp.nan, xp.inf)])
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
-1.28000000e+002, 1.28000000e+002])
>>> xpx.nan_to_num(y)
array([ 1.79769313e+308 +0.00000000e+000j, # may vary
0.00000000e+000 +0.00000000e+000j,
0.00000000e+000 +1.79769313e+308j])
|
python
|
sklearn/externals/array_api_extra/_delegation.py
| 116
|
[
"x",
"fill_value",
"xp"
] |
Array
| true
| 7
| 8.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
write
|
@SuppressWarnings("unchecked")
@Override
public void write(ByteBuffer buffer, Object o) {
NavigableMap<Integer, Object> objects = (NavigableMap<Integer, Object>) o;
ByteUtils.writeUnsignedVarint(objects.size(), buffer);
for (Map.Entry<Integer, Object> entry : objects.entrySet()) {
Integer tag = entry.getKey();
Field field = fields.get(tag);
ByteUtils.writeUnsignedVarint(tag, buffer);
if (field == null) {
RawTaggedField value = (RawTaggedField) entry.getValue();
ByteUtils.writeUnsignedVarint(value.data().length, buffer);
buffer.put(value.data());
} else {
ByteUtils.writeUnsignedVarint(field.type.sizeOf(entry.getValue()), buffer);
field.type.write(buffer, entry.getValue());
}
}
}
|
Create a new TaggedFields object with the given tags and fields.
@param fields This is an array containing Integer tags followed
by associated Field objects.
@return The new {@link TaggedFields}
|
java
|
clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java
| 61
|
[
"buffer",
"o"
] |
void
| true
| 2
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
newMetadataRequestAndVersion
|
public synchronized MetadataRequestAndVersion newMetadataRequestAndVersion(long nowMs) {
MetadataRequest.Builder request = null;
boolean isPartialUpdate = false;
// Perform a partial update only if a full update hasn't been requested, and the last successful
// hasn't exceeded the metadata refresh time.
if (!this.needFullUpdate && this.lastSuccessfulRefreshMs + this.metadataExpireMs > nowMs) {
request = newMetadataRequestBuilderForNewTopics();
isPartialUpdate = true;
}
if (request == null) {
request = newMetadataRequestBuilder();
isPartialUpdate = false;
}
return new MetadataRequestAndVersion(request, requestVersion, isPartialUpdate);
}
|
Check if this metadata instance has been closed. See {@link #close()} for more information.
@return True if this instance has been closed; false otherwise
|
java
|
clients/src/main/java/org/apache/kafka/clients/Metadata.java
| 718
|
[
"nowMs"
] |
MetadataRequestAndVersion
| true
| 4
| 7.2
|
apache/kafka
| 31,560
|
javadoc
| false
|
nextTimeoutMs
|
int nextTimeoutMs() {
return nextTimeoutMs;
}
|
Check whether a call should be timed out.
The remaining milliseconds until the next timeout will be updated.
@param call The call.
@return True if the call should be timed out.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 1,076
|
[] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
rowMap
|
Map<R, Map<C, V>> rowMap();
|
Returns a view that associates each row key with the corresponding map from column keys to
values. Changes to the returned map will update this table. The returned map does not support
{@code put()} or {@code putAll()}, or {@code setValue()} on its entries.
<p>In contrast, the maps returned by {@code rowMap().get()} have the same behavior as those
returned by {@link #row}. Those maps may support {@code setValue()}, {@code put()}, and {@code
putAll()}.
@return a map view from each row key to a secondary map from column keys to values
|
java
|
android/guava/src/com/google/common/collect/Table.java
| 245
|
[] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
parseRightSideOfDot
|
function parseRightSideOfDot(allowIdentifierNames: boolean, allowPrivateIdentifiers: boolean, allowUnicodeEscapeSequenceInIdentifierName: boolean): Identifier | PrivateIdentifier {
// Technically a keyword is valid here as all identifiers and keywords are identifier names.
// However, often we'll encounter this in error situations when the identifier or keyword
// is actually starting another valid construct.
//
// So, we check for the following specific case:
//
// name.
// identifierOrKeyword identifierNameOrKeyword
//
// Note: the newlines are important here. For example, if that above code
// were rewritten into:
//
// name.identifierOrKeyword
// identifierNameOrKeyword
//
// Then we would consider it valid. That's because ASI would take effect and
// the code would be implicitly: "name.identifierOrKeyword; identifierNameOrKeyword".
// In the first case though, ASI will not take effect because there is not a
// line terminator after the identifier or keyword.
if (scanner.hasPrecedingLineBreak() && tokenIsIdentifierOrKeyword(token())) {
const matchesPattern = lookAhead(nextTokenIsIdentifierOrKeywordOnSameLine);
if (matchesPattern) {
// Report that we need an identifier. However, report it right after the dot,
// and not on the next token. This is because the next token might actually
// be an identifier and the error would be quite confusing.
return createMissingNode<Identifier>(SyntaxKind.Identifier, /*reportAtCurrentPosition*/ true, Diagnostics.Identifier_expected);
}
}
if (token() === SyntaxKind.PrivateIdentifier) {
const node = parsePrivateIdentifier();
return allowPrivateIdentifiers ? node : createMissingNode<Identifier>(SyntaxKind.Identifier, /*reportAtCurrentPosition*/ true, Diagnostics.Identifier_expected);
}
if (allowIdentifierNames) {
return allowUnicodeEscapeSequenceInIdentifierName ? parseIdentifierName() : parseIdentifierNameErrorOnUnicodeEscapeSequence();
}
return parseIdentifier();
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 3,613
|
[
"allowIdentifierNames",
"allowPrivateIdentifiers",
"allowUnicodeEscapeSequenceInIdentifierName"
] | true
| 8
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
match
|
def match(
self,
pat: str | re.Pattern,
case: bool | lib.NoDefault = lib.no_default,
flags: int | lib.NoDefault = lib.no_default,
na=lib.no_default,
):
"""
Determine if each string starts with a match of a regular expression.
Determines whether each string in the Series or Index starts with a
match to a specified regular expression. This function is especially
useful for validating prefixes, such as ensuring that codes, tags, or
identifiers begin with a specific pattern.
Parameters
----------
pat : str or compiled regex
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Regex module flags, e.g. re.IGNORECASE.
na : scalar, optional
Fill value for missing values. The default depends on dtype of the
array. For the ``"str"`` dtype, ``False`` is used. For object
dtype, ``numpy.nan`` is used. For the nullable ``StringDtype``,
``pandas.NA`` is used.
Returns
-------
Series/Index/array of boolean values
A Series, Index, or array of boolean values indicating whether the start
of each string matches the pattern. The result will be of the same type
as the input.
See Also
--------
fullmatch : Stricter matching that requires the entire string to match.
contains : Analogous, but less strict, relying on re.search instead of
re.match.
extract : Extract matched groups.
Examples
--------
>>> ser = pd.Series(["horse", "eagle", "donkey"])
>>> ser.str.match("e")
0 False
1 True
2 False
dtype: bool
"""
if flags is not lib.no_default:
# pat.flags will have re.U regardless, so we need to add it here
# before checking for a match
flags = flags | re.U
if is_re(pat):
if pat.flags != flags:
raise ValueError(
"Cannot both specify 'flags' and pass a compiled regexp "
"object with conflicting flags"
)
else:
pat = re.compile(pat, flags=flags)
# set flags=0 to ensure that when we call
# re.compile(pat, flags=flags) the constructor does not raise.
flags = 0
else:
flags = 0
if case is lib.no_default:
if is_re(pat):
case = not bool(pat.flags & re.IGNORECASE)
else:
# Case-sensitive default
case = True
elif is_re(pat):
implicit_case = not bool(pat.flags & re.IGNORECASE)
if implicit_case != case:
# GH#62240
raise ValueError(
"Cannot both specify 'case' and pass a compiled regexp "
"object with conflicting case-sensitivity"
)
result = self._data.array._str_match(pat, case=case, flags=flags, na=na)
return self._wrap_result(result, fill_value=na, returns_string=False)
|
Determine if each string starts with a match of a regular expression.
Determines whether each string in the Series or Index starts with a
match to a specified regular expression. This function is especially
useful for validating prefixes, such as ensuring that codes, tags, or
identifiers begin with a specific pattern.
Parameters
----------
pat : str or compiled regex
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Regex module flags, e.g. re.IGNORECASE.
na : scalar, optional
Fill value for missing values. The default depends on dtype of the
array. For the ``"str"`` dtype, ``False`` is used. For object
dtype, ``numpy.nan`` is used. For the nullable ``StringDtype``,
``pandas.NA`` is used.
Returns
-------
Series/Index/array of boolean values
A Series, Index, or array of boolean values indicating whether the start
of each string matches the pattern. The result will be of the same type
as the input.
See Also
--------
fullmatch : Stricter matching that requires the entire string to match.
contains : Analogous, but less strict, relying on re.search instead of
re.match.
extract : Extract matched groups.
Examples
--------
>>> ser = pd.Series(["horse", "eagle", "donkey"])
>>> ser.str.match("e")
0 False
1 True
2 False
dtype: bool
|
python
|
pandas/core/strings/accessor.py
| 1,354
|
[
"self",
"pat",
"case",
"flags",
"na"
] | true
| 11
| 8.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
construct_from_string
|
def construct_from_string(cls, string: str) -> Self:
r"""
Construct this type from a string.
This is useful mainly for data types that accept parameters.
For example, a period dtype accepts a frequency parameter that
can be set as ``period[h]`` (where H means hourly frequency).
By default, in the abstract class, just the name of the type is
expected. But subclasses can overwrite this method to accept
parameters.
Parameters
----------
string : str
The name of the type, for example ``category``.
Returns
-------
ExtensionDtype
Instance of the dtype.
Raises
------
TypeError
If a class cannot be constructed from this 'string'.
Examples
--------
For extension dtypes with arguments the following may be an
adequate implementation.
>>> import re
>>> @classmethod
... def construct_from_string(cls, string):
... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
... match = pattern.match(string)
... if match:
... return cls(**match.groupdict())
... else:
... raise TypeError(
... f"Cannot construct a '{cls.__name__}' from '{string}'"
... )
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
# error: Non-overlapping equality check (left operand type: "str", right
# operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap]
assert isinstance(cls.name, str), (cls, type(cls.name))
if string != cls.name:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
return cls()
|
r"""
Construct this type from a string.
This is useful mainly for data types that accept parameters.
For example, a period dtype accepts a frequency parameter that
can be set as ``period[h]`` (where H means hourly frequency).
By default, in the abstract class, just the name of the type is
expected. But subclasses can overwrite this method to accept
parameters.
Parameters
----------
string : str
The name of the type, for example ``category``.
Returns
-------
ExtensionDtype
Instance of the dtype.
Raises
------
TypeError
If a class cannot be constructed from this 'string'.
Examples
--------
For extension dtypes with arguments the following may be an
adequate implementation.
>>> import re
>>> @classmethod
... def construct_from_string(cls, string):
... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
... match = pattern.match(string)
... if match:
... return cls(**match.groupdict())
... else:
... raise TypeError(
... f"Cannot construct a '{cls.__name__}' from '{string}'"
... )
|
python
|
pandas/core/dtypes/base.py
| 244
|
[
"cls",
"string"
] |
Self
| true
| 3
| 8.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
isListCloser
|
function isListCloser(token: Node | undefined): token is Node {
const kind = token && token.kind;
return kind === SyntaxKind.CloseBraceToken
|| kind === SyntaxKind.CloseBracketToken
|| kind === SyntaxKind.CloseParenToken
|| kind === SyntaxKind.JsxClosingElement;
}
|
Splits sibling nodes into up to four partitions:
1) everything left of the first node matched by `pivotOn`,
2) the first node matched by `pivotOn`,
3) everything right of the first node matched by `pivotOn`,
4) a trailing semicolon, if `separateTrailingSemicolon` is enabled.
The left and right groups, if not empty, will each be grouped into their own containing SyntaxList.
@param children The sibling nodes to split.
@param pivotOn The predicate function to match the node to be the pivot. The first node that matches
the predicate will be used; any others that may match will be included into the right-hand group.
@param separateTrailingSemicolon If the last token is a semicolon, it will be returned as a separate
child rather than be included in the right-hand group.
|
typescript
|
src/services/smartSelection.ts
| 352
|
[
"token"
] | false
| 5
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
construct_from_string
|
def construct_from_string(cls, string) -> Self:
"""
Construct a StringDtype from a string.
Parameters
----------
string : str
The type of the name. The storage type will be taking from `string`.
Valid options and their storage types are
========================== ==============================================
string result storage
========================== ==============================================
``'string'`` pd.options.mode.string_storage, default python
``'string[python]'`` python
``'string[pyarrow]'`` pyarrow
========================== ==============================================
Returns
-------
StringDtype
Raise
-----
TypeError
If the string is not a valid option.
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
if string == "string":
return cls()
elif string == "str" and using_string_dtype():
return cls(na_value=np.nan)
elif string == "string[python]":
return cls(storage="python")
elif string == "string[pyarrow]":
return cls(storage="pyarrow")
else:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
|
Construct a StringDtype from a string.
Parameters
----------
string : str
The type of the name. The storage type will be taking from `string`.
Valid options and their storage types are
========================== ==============================================
string result storage
========================== ==============================================
``'string'`` pd.options.mode.string_storage, default python
``'string[python]'`` python
``'string[pyarrow]'`` pyarrow
========================== ==============================================
Returns
-------
StringDtype
Raise
-----
TypeError
If the string is not a valid option.
|
python
|
pandas/core/arrays/string_.py
| 262
|
[
"cls",
"string"
] |
Self
| true
| 8
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
visitBlock
|
function visitBlock(node: Block, isFunctionBody: boolean): Block {
if (isFunctionBody) {
// A function body is not a block scope.
return visitEachChild(node, visitor, context);
}
const ancestorFacts = hierarchyFacts & HierarchyFacts.IterationStatement
? enterSubtree(HierarchyFacts.IterationStatementBlockExcludes, HierarchyFacts.IterationStatementBlockIncludes)
: enterSubtree(HierarchyFacts.BlockExcludes, HierarchyFacts.BlockIncludes);
const updated = visitEachChild(node, visitor, context);
exitSubtree(ancestorFacts, HierarchyFacts.None, HierarchyFacts.None);
return updated;
}
|
Transforms the body of a function-like node.
@param node A function-like node.
|
typescript
|
src/compiler/transformers/es2015.ts
| 2,643
|
[
"node",
"isFunctionBody"
] | true
| 3
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
processLine
|
@CanIgnoreReturnValue // some uses know that their processor never returns false
boolean processLine(String line) throws IOException;
|
This method will be called once for each line.
@param line the line read from the input, without delimiter
@return true to continue processing, false to stop
|
java
|
android/guava/src/com/google/common/io/LineProcessor.java
| 42
|
[
"line"
] | true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
|
check_md5checksum_in_cache_modified
|
def check_md5checksum_in_cache_modified(file_hash: str, cache_path: Path, update: bool) -> bool:
"""
Check if the file hash is present in cache and its content has been modified. Optionally updates
the hash.
:param file_hash: hash of the current version of the file
:param cache_path: path where the hash is stored
:param update: whether to update hash if it is found different
:return: True if the hash file was missing or hash has changed.
"""
if cache_path.exists():
old_md5_checksum_content = Path(cache_path).read_text()
if old_md5_checksum_content.strip() != file_hash.strip():
if update:
save_md5_file(cache_path, file_hash)
return True
else:
if update:
save_md5_file(cache_path, file_hash)
return True
return False
|
Check if the file hash is present in cache and its content has been modified. Optionally updates
the hash.
:param file_hash: hash of the current version of the file
:param cache_path: path where the hash is stored
:param update: whether to update hash if it is found different
:return: True if the hash file was missing or hash has changed.
|
python
|
dev/breeze/src/airflow_breeze/utils/md5_build_check.py
| 38
|
[
"file_hash",
"cache_path",
"update"
] |
bool
| true
| 6
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
isUnParenthesizedAsyncArrowFunctionWorker
|
function isUnParenthesizedAsyncArrowFunctionWorker(): Tristate {
// AsyncArrowFunctionExpression:
// 1) async[no LineTerminator here]AsyncArrowBindingIdentifier[?Yield][no LineTerminator here]=>AsyncConciseBody[?In]
// 2) CoverCallExpressionAndAsyncArrowHead[?Yield, ?Await][no LineTerminator here]=>AsyncConciseBody[?In]
if (token() === SyntaxKind.AsyncKeyword) {
nextToken();
// If the "async" is followed by "=>" token then it is not a beginning of an async arrow-function
// but instead a simple arrow-function which will be parsed inside "parseAssignmentExpressionOrHigher"
if (scanner.hasPrecedingLineBreak() || token() === SyntaxKind.EqualsGreaterThanToken) {
return Tristate.False;
}
// Check for un-parenthesized AsyncArrowFunction
const expr = parseBinaryExpressionOrHigher(OperatorPrecedence.Lowest);
if (!scanner.hasPrecedingLineBreak() && expr.kind === SyntaxKind.Identifier && token() === SyntaxKind.EqualsGreaterThanToken) {
return Tristate.True;
}
}
return Tristate.False;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 5,409
|
[] | true
| 7
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| true
|
|
ensureNotFinished
|
private void ensureNotFinished() {
if (finished) {
throw new IllegalStateException(CLOSED_STREAM);
}
}
|
A simple state check to ensure the stream is still open.
|
java
|
clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockOutputStream.java
| 231
|
[] |
void
| true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
unique
|
def unique(self) -> Self:
"""
Compute the ExtensionArray of unique values.
Returns
-------
pandas.api.extensions.ExtensionArray
With unique values from the input array.
See Also
--------
Index.unique: Return unique values in the index.
Series.unique: Return unique values of Series object.
unique: Return unique values based on a hash table.
Examples
--------
>>> arr = pd.array([1, 2, 3, 1, 2, 3])
>>> arr.unique()
<IntegerArray>
[1, 2, 3]
Length: 3, dtype: Int64
"""
uniques = unique(self.astype(object))
return self._from_sequence(uniques, dtype=self.dtype)
|
Compute the ExtensionArray of unique values.
Returns
-------
pandas.api.extensions.ExtensionArray
With unique values from the input array.
See Also
--------
Index.unique: Return unique values in the index.
Series.unique: Return unique values of Series object.
unique: Return unique values based on a hash table.
Examples
--------
>>> arr = pd.array([1, 2, 3, 1, 2, 3])
>>> arr.unique()
<IntegerArray>
[1, 2, 3]
Length: 3, dtype: Int64
|
python
|
pandas/core/arrays/base.py
| 1,435
|
[
"self"
] |
Self
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
lookup_template_configs
|
def lookup_template_configs(
self,
kernel_inputs: KernelInputs,
op_name: str,
template_uids: list[str],
template_hash_map: Optional[dict[str, Optional[str]]] = None,
) -> dict[str, list[dict[str, Any]]]:
"""
Unified function to look up template configurations for multiple templates.
Override this method to customize lookup logic.
Args:
kernel_inputs: KernelInputs object containing input nodes and scalars
op_name: Operation name (e.g., "mm", "addmm")
template_uids: List of template identifiers (e.g., ["mm", "tma", "decompose_k"])
template_hash_map: Optional mapping from template_uid to src_hash for validation
Returns:
{}: No lookup table in use, or no matches found for any template
{"template_uid1": [config1, config2], ...}: Matches found, filtered configurations
"""
lookup_table = self._get_lookup_table()
if not lookup_table:
log.debug("Lookup table: no table configured or CUDA unavailable")
return {}
# Try both key variants: device-specific first, then device-agnostic
# If both exist, device-specific takes priority
device_key, device_agnostic_key = self.make_lookup_key_variants(
kernel_inputs, op_name
)
config_list = []
for key_type, key in [
("device-specific", device_key),
("device-agnostic", device_agnostic_key),
]:
if key is not None:
config_list = lookup_table.get(key, [])
if config_list:
log.debug(
"Lookup table: found %d configs using %s key '%s' for %s",
len(config_list),
key_type,
key,
op_name,
)
break
else:
log.debug(
"Lookup table: no match for %s (tried keys: %s, %s) (table has %d keys)",
op_name,
device_key,
device_agnostic_key,
len(lookup_table),
)
return {}
log.debug(
"Lookup table: found %d configs for %s templates %s",
len(config_list),
op_name,
template_uids,
)
# Group configs by template_id
configs_by_template: dict[str, list[dict[str, Any]]] = {}
for cfg in config_list:
if not isinstance(cfg, dict):
raise ValueError(
f"Config for {op_name} operation is not a dictionary: {cfg}"
)
if "template_id" not in cfg:
raise ValueError(
f"Config for {op_name} operation missing required 'template_id' field: {cfg}"
)
template_id = cfg["template_id"]
if template_id in template_uids:
if template_id not in configs_by_template:
configs_by_template[template_id] = []
configs_by_template[template_id].append(cfg)
# Check template hashes and clean up template_id field
result = {}
for template_id, matching_configs in configs_by_template.items():
filtered_configs = []
for cfg in matching_configs:
# Check template hash using helper function
if not self._entry_is_valid(cfg, template_id, template_hash_map):
continue
# Return a copy of the config, as we don't want to modify the original
cconfig = copy.deepcopy(cfg)
# Lastly, we have to throw out the template_id, as it's not a valid kwarg
# and just used to identify which template the entry belongs to
del cconfig["template_id"]
# Similarly, the template_hash is not a valid kwarg
cconfig.pop("template_hash", None)
filtered_configs.append(cconfig)
if filtered_configs:
result[template_id] = filtered_configs
return result
|
Unified function to look up template configurations for multiple templates.
Override this method to customize lookup logic.
Args:
kernel_inputs: KernelInputs object containing input nodes and scalars
op_name: Operation name (e.g., "mm", "addmm")
template_uids: List of template identifiers (e.g., ["mm", "tma", "decompose_k"])
template_hash_map: Optional mapping from template_uid to src_hash for validation
Returns:
{}: No lookup table in use, or no matches found for any template
{"template_uid1": [config1, config2], ...}: Matches found, filtered configurations
|
python
|
torch/_inductor/lookup_table/choices.py
| 206
|
[
"self",
"kernel_inputs",
"op_name",
"template_uids",
"template_hash_map"
] |
dict[str, list[dict[str, Any]]]
| true
| 15
| 7.52
|
pytorch/pytorch
| 96,034
|
google
| false
|
runOnce
|
void runOnce() {
// The following code avoids use of the Java Collections Streams API to reduce overhead in this loop.
processApplicationEvents();
final long currentTimeMs = time.milliseconds();
if (lastPollTimeMs != 0L) {
asyncConsumerMetrics.recordTimeBetweenNetworkThreadPoll(currentTimeMs - lastPollTimeMs);
}
lastPollTimeMs = currentTimeMs;
long pollWaitTimeMs = MAX_POLL_TIMEOUT_MS;
for (RequestManager rm : requestManagers.entries()) {
NetworkClientDelegate.PollResult pollResult = rm.poll(currentTimeMs);
long timeoutMs = networkClientDelegate.addAll(pollResult);
pollWaitTimeMs = Math.min(pollWaitTimeMs, timeoutMs);
}
networkClientDelegate.poll(pollWaitTimeMs, currentTimeMs);
long maxTimeToWaitMs = Long.MAX_VALUE;
for (RequestManager rm : requestManagers.entries()) {
long waitMs = rm.maximumTimeToWait(currentTimeMs);
maxTimeToWaitMs = Math.min(maxTimeToWaitMs, waitMs);
}
cachedMaximumTimeToWait = maxTimeToWaitMs;
reapExpiredApplicationEvents(currentTimeMs);
List<CompletableEvent<?>> uncompletedEvents = applicationEventReaper.uncompletedEvents();
maybeFailOnMetadataError(uncompletedEvents);
}
|
Poll and process the {@link ApplicationEvent application events}. It performs the following tasks:
<ol>
<li>
Drains and processes all the events from the application thread's application event queue via
{@link ApplicationEventProcessor}
</li>
<li>
Iterate through the {@link RequestManager} list and invoke {@link RequestManager#poll(long)} to get
the {@link NetworkClientDelegate.UnsentRequest} list and the poll time for the network poll
</li>
<li>
Stage each {@link AbstractRequest.Builder request} to be sent via
{@link NetworkClientDelegate#addAll(List)}
</li>
<li>
Poll the client via {@link KafkaClient#poll(long, long)} to send the requests, as well as
retrieve any available responses
</li>
</ol>
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java
| 210
|
[] |
void
| true
| 2
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
addParameter
|
public void addParameter(String name, String value) {
Objects.requireNonNull(name, "url parameter name cannot be null");
if (parameters.containsKey(name)) {
throw new IllegalArgumentException("url parameter [" + name + "] has already been set to [" + parameters.get(name) + "]");
} else {
parameters.put(name, value);
}
}
|
Add a query string parameter.
@param name the name of the url parameter. Must not be null.
@param value the value of the url parameter. If {@code null} then
the parameter is sent as {@code name} rather than {@code name=value}
@throws IllegalArgumentException if a parameter with that name has
already been set
|
java
|
client/rest/src/main/java/org/elasticsearch/client/Request.java
| 75
|
[
"name",
"value"
] |
void
| true
| 2
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
versions
|
int[] versions() {
return this.versions;
}
|
Return the versions listed under {@code META-INF/versions/} in ascending order.
@return the versions
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jar/MetaInfVersionsInfo.java
| 51
|
[] | true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
updateFetchPosition
|
private void updateFetchPosition(TopicPartition tp) {
if (subscriptions.isOffsetResetNeeded(tp)) {
resetOffsetPosition(tp);
} else if (!committed.containsKey(tp)) {
subscriptions.requestOffsetReset(tp);
resetOffsetPosition(tp);
} else {
subscriptions.seek(tp, committed.get(tp).offset());
}
}
|
Schedule a task to be executed during a poll(). One enqueued task will be executed per {@link #poll(Duration)}
invocation. You can use this repeatedly to mock out multiple responses to poll invocations.
@param task the task to be executed
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java
| 622
|
[
"tp"
] |
void
| true
| 3
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
compare
|
def compare(
self,
other: Series,
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
result_names: Suffixes = ("self", "other"),
) -> DataFrame | Series:
"""
Compare to another Series and show the differences.
Parameters
----------
other : Series
Object to compare with.
align_axis : {{0 or 'index', 1 or 'columns'}}, default 1
Determine which axis to align the comparison on.
* 0, or 'index' : Resulting differences are stacked vertically
with rows drawn alternately from self and other.
* 1, or 'columns' : Resulting differences are aligned horizontally
with columns drawn alternately from self and other.
keep_shape : bool, default False
If true, all rows and columns are kept.
Otherwise, only the ones with different values are kept.
keep_equal : bool, default False
If true, the result keeps values that are equal.
Otherwise, equal values are shown as NaNs.
result_names : tuple, default ('self', 'other')
Set the dataframes names in the comparison.
Returns
-------
Series or DataFrame
If axis is 0 or 'index' the result will be a Series.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
If axis is 1 or 'columns' the result will be a DataFrame.
It will have two columns namely 'self' and 'other'.
See Also
--------
DataFrame.compare : Compare with another DataFrame and show differences.
Notes
-----
Matching NaNs will not appear as a difference.
Examples
--------
>>> s1 = pd.Series(["a", "b", "c", "d", "e"])
>>> s2 = pd.Series(["a", "a", "c", "b", "e"])
Align the differences on columns
>>> s1.compare(s2)
self other
1 b a
3 d b
Stack the differences on indices
>>> s1.compare(s2, align_axis=0)
1 self b
other a
3 self d
other b
dtype: object
Keep all original rows
>>> s1.compare(s2, keep_shape=True)
self other
0 NaN NaN
1 b a
2 NaN NaN
3 d b
4 NaN NaN
Keep all original rows and also all original values
>>> s1.compare(s2, keep_shape=True, keep_equal=True)
self other
0 a a
1 b a
2 c c
3 d b
4 e e
"""
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
result_names=result_names,
)
|
Compare to another Series and show the differences.
Parameters
----------
other : Series
Object to compare with.
align_axis : {{0 or 'index', 1 or 'columns'}}, default 1
Determine which axis to align the comparison on.
* 0, or 'index' : Resulting differences are stacked vertically
with rows drawn alternately from self and other.
* 1, or 'columns' : Resulting differences are aligned horizontally
with columns drawn alternately from self and other.
keep_shape : bool, default False
If true, all rows and columns are kept.
Otherwise, only the ones with different values are kept.
keep_equal : bool, default False
If true, the result keeps values that are equal.
Otherwise, equal values are shown as NaNs.
result_names : tuple, default ('self', 'other')
Set the dataframes names in the comparison.
Returns
-------
Series or DataFrame
If axis is 0 or 'index' the result will be a Series.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
If axis is 1 or 'columns' the result will be a DataFrame.
It will have two columns namely 'self' and 'other'.
See Also
--------
DataFrame.compare : Compare with another DataFrame and show differences.
Notes
-----
Matching NaNs will not appear as a difference.
Examples
--------
>>> s1 = pd.Series(["a", "b", "c", "d", "e"])
>>> s2 = pd.Series(["a", "a", "c", "b", "e"])
Align the differences on columns
>>> s1.compare(s2)
self other
1 b a
3 d b
Stack the differences on indices
>>> s1.compare(s2, align_axis=0)
1 self b
other a
3 self d
other b
dtype: object
Keep all original rows
>>> s1.compare(s2, keep_shape=True)
self other
0 NaN NaN
1 b a
2 NaN NaN
3 d b
4 NaN NaN
Keep all original rows and also all original values
>>> s1.compare(s2, keep_shape=True, keep_equal=True)
self other
0 a a
1 b a
2 c c
3 d b
4 e e
|
python
|
pandas/core/series.py
| 3,062
|
[
"self",
"other",
"align_axis",
"keep_shape",
"keep_equal",
"result_names"
] |
DataFrame | Series
| true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
on_errback
|
def on_errback(self, errback, **header) -> dict:
"""Method that is called on errback stamping.
Arguments:
errback (Signature): errback that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
Returns:
Dict: headers to update.
"""
return {}
|
Method that is called on errback stamping.
Arguments:
errback (Signature): errback that is stamped.
headers (Dict): Partial headers that could be merged with existing headers.
Returns:
Dict: headers to update.
|
python
|
celery/canvas.py
| 219
|
[
"self",
"errback"
] |
dict
| true
| 1
| 6.56
|
celery/celery
| 27,741
|
google
| false
|
some
|
function some(collection, predicate, guard) {
var func = isArray(collection) ? arraySome : baseSome;
if (guard && isIterateeCall(collection, predicate, guard)) {
predicate = undefined;
}
return func(collection, getIteratee(predicate, 3));
}
|
Checks if `predicate` returns truthy for **any** element of `collection`.
Iteration is stopped once `predicate` returns truthy. The predicate is
invoked with three arguments: (value, index|key, collection).
@static
@memberOf _
@since 0.1.0
@category Collection
@param {Array|Object} collection The collection to iterate over.
@param {Function} [predicate=_.identity] The function invoked per iteration.
@param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
@returns {boolean} Returns `true` if any element passes the predicate check,
else `false`.
@example
_.some([null, 0, 'yes', false], Boolean);
// => true
var users = [
{ 'user': 'barney', 'active': true },
{ 'user': 'fred', 'active': false }
];
// The `_.matches` iteratee shorthand.
_.some(users, { 'user': 'barney', 'active': false });
// => false
// The `_.matchesProperty` iteratee shorthand.
_.some(users, ['active', false]);
// => true
// The `_.property` iteratee shorthand.
_.some(users, 'active');
// => true
|
javascript
|
lodash.js
| 9,999
|
[
"collection",
"predicate",
"guard"
] | false
| 4
| 7.2
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
newTreeMap
|
@SuppressWarnings({
"rawtypes", // https://github.com/google/guava/issues/989
"NonApiType", // acts as a direct substitute for a constructor call
})
public static <K extends Comparable, V extends @Nullable Object> TreeMap<K, V> newTreeMap() {
return new TreeMap<>();
}
|
Creates a <i>mutable</i>, empty {@code TreeMap} instance using the natural ordering of its
elements.
<p><b>Note:</b> if mutability is not required, use {@link ImmutableSortedMap#of()} instead.
<p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
use the {@code TreeMap} constructor directly, taking advantage of <a
href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
syntax</a>.
@return a new, empty {@code TreeMap}
|
java
|
android/guava/src/com/google/common/collect/Maps.java
| 359
|
[] | true
| 1
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
transformInitializedVariable
|
function transformInitializedVariable(node: InitializedVariableDeclaration): Expression {
const name = node.name;
if (isBindingPattern(name)) {
return flattenDestructuringAssignment(
node,
visitor,
context,
FlattenLevel.All,
/*needsValue*/ false,
createNamespaceExportExpression,
);
}
else {
return setTextRange(
factory.createAssignment(
getNamespaceMemberNameWithSourceMapsAndWithoutComments(name),
Debug.checkDefined(visitNode(node.initializer, visitor, isExpression)),
),
/*location*/ node,
);
}
}
|
Determines whether to emit an accessor declaration. We should not emit the
declaration if it does not have a body and is abstract.
@param node The declaration node.
|
typescript
|
src/compiler/transformers/ts.ts
| 1,650
|
[
"node"
] | true
| 3
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
_next_iter_line
|
def _next_iter_line(self, row_num: int) -> list[Scalar] | None:
"""
Wrapper around iterating through `self.data` (CSV source).
When a CSV error is raised, we check for specific
error messages that allow us to customize the
error message displayed to the user.
Parameters
----------
row_num: int
The row number of the line being parsed.
"""
try:
assert not isinstance(self.data, list)
line = next(self.data)
# lie about list[str] vs list[Scalar] to minimize ignores
return line # type: ignore[return-value]
except csv.Error as e:
if self.on_bad_lines in (
self.BadLineHandleMethod.ERROR,
self.BadLineHandleMethod.WARN,
):
msg = str(e)
if "NULL byte" in msg or "line contains NUL" in msg:
msg = (
"NULL byte detected. This byte "
"cannot be processed in Python's "
"native csv library at the moment, "
"so please pass in engine='c' instead"
)
if self.skipfooter > 0:
reason = (
"Error could possibly be due to "
"parsing errors in the skipped footer rows "
"(the skipfooter keyword is only applied "
"after Python's csv library has parsed "
"all rows)."
)
msg += ". " + reason
self._alert_malformed(msg, row_num)
return None
|
Wrapper around iterating through `self.data` (CSV source).
When a CSV error is raised, we check for specific
error messages that allow us to customize the
error message displayed to the user.
Parameters
----------
row_num: int
The row number of the line being parsed.
|
python
|
pandas/io/parsers/python_parser.py
| 974
|
[
"self",
"row_num"
] |
list[Scalar] | None
| true
| 5
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
andThen
|
default FailableDoubleConsumer<E> andThen(final FailableDoubleConsumer<E> after) {
Objects.requireNonNull(after);
return (final double t) -> {
accept(t);
after.accept(t);
};
}
|
Returns a composed {@link FailableDoubleConsumer} like {@link DoubleConsumer#andThen(DoubleConsumer)}.
@param after the operation to perform after this one.
@return a composed {@link FailableDoubleConsumer} like {@link DoubleConsumer#andThen(DoubleConsumer)}.
@throws NullPointerException when {@code after} is null.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableDoubleConsumer.java
| 62
|
[
"after"
] | true
| 1
| 6.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
delete_replication_group
|
def delete_replication_group(self, replication_group_id: str) -> dict:
"""
Delete an existing replication group.
.. seealso::
- :external+boto3:py:meth:`ElastiCache.Client.delete_replication_group`
:param replication_group_id: ID of replication group to delete
:return: Response from ElastiCache delete replication group API
"""
return self.conn.delete_replication_group(ReplicationGroupId=replication_group_id)
|
Delete an existing replication group.
.. seealso::
- :external+boto3:py:meth:`ElastiCache.Client.delete_replication_group`
:param replication_group_id: ID of replication group to delete
:return: Response from ElastiCache delete replication group API
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/elasticache_replication_group.py
| 75
|
[
"self",
"replication_group_id"
] |
dict
| true
| 1
| 6.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
bucket_reduce_scatter_by_mb
|
def bucket_reduce_scatter_by_mb(
gm: torch.fx.GraphModule,
bucket_cap_mb_by_bucket_idx: Callable[[int], float],
filter_wait_node: Callable[[torch.fx.Node], bool] | None = None,
mode: BucketMode = "default",
) -> list[list[torch.fx.Node]]:
"""
Identifies all reduce_scatter nodes and groups them into buckets,
based on size limit `bucket_cap_mb_by_bucket_idx`.
Args:
gm (torch.fx.GraphModule): GraphModule where to bucket reduce_scatters.
bucket_cap_mb_by_bucket_idx (Callable[[int], float]): Callable to specify cap of the bucket
in megabytes by bucket idx. The idea of `bucket_cap_mb_by_bucket_idx` is to allow
to specify different sizes of the buckets.
filter_wait_node (Callable[[torch.fx.Node], bool] | None): If specified,
only reduce_scatter nodes with wait_node that satisfy `filter_wait_node` will be bucketed.
Returns:
list[list[torch.fx.Node]]: List of buckets, where each bucket is a list of reduce_scatter nodes.
"""
assert "multidtype" not in mode, (
"reduce scatter bucketing does not support multidtype"
)
return greedy_bucket_collective_by_mb(
gm,
bucket_cap_mb_by_bucket_idx,
is_reduce_scatter_tensor,
_rs_group_key,
filter_wait_node,
)
|
Identifies all reduce_scatter nodes and groups them into buckets,
based on size limit `bucket_cap_mb_by_bucket_idx`.
Args:
gm (torch.fx.GraphModule): GraphModule where to bucket reduce_scatters.
bucket_cap_mb_by_bucket_idx (Callable[[int], float]): Callable to specify cap of the bucket
in megabytes by bucket idx. The idea of `bucket_cap_mb_by_bucket_idx` is to allow
to specify different sizes of the buckets.
filter_wait_node (Callable[[torch.fx.Node], bool] | None): If specified,
only reduce_scatter nodes with wait_node that satisfy `filter_wait_node` will be bucketed.
Returns:
list[list[torch.fx.Node]]: List of buckets, where each bucket is a list of reduce_scatter nodes.
|
python
|
torch/_inductor/fx_passes/bucketing.py
| 387
|
[
"gm",
"bucket_cap_mb_by_bucket_idx",
"filter_wait_node",
"mode"
] |
list[list[torch.fx.Node]]
| true
| 1
| 6.24
|
pytorch/pytorch
| 96,034
|
google
| false
|
groupMembershipOperation
|
public static CloseOptions groupMembershipOperation(final GroupMembershipOperation operation) {
return new CloseOptions().withGroupMembershipOperation(operation);
}
|
Static method to create a {@code CloseOptions} with a specified group membership operation.
@param operation the group membership operation to apply. Must be one of {@code LEAVE_GROUP}, {@code REMAIN_IN_GROUP},
or {@code DEFAULT}.
@return a new {@code CloseOptions} instance with the specified group membership operation.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/CloseOptions.java
| 79
|
[
"operation"
] |
CloseOptions
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
nextFloat
|
@Deprecated
public static float nextFloat(final float startInclusive, final float endExclusive) {
return secure().randomFloat(startInclusive, endExclusive);
}
|
Generates a random float within the specified range.
@param startInclusive the smallest value that can be returned, must be non-negative.
@param endExclusive the upper bound (not included).
@throws IllegalArgumentException if {@code startInclusive > endExclusive} or if {@code startInclusive} is negative.
@return the random float
@deprecated Use {@link #secure()}, {@link #secureStrong()}, or {@link #insecure()}.
|
java
|
src/main/java/org/apache/commons/lang3/RandomUtils.java
| 180
|
[
"startInclusive",
"endExclusive"
] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
notFoundConnection
|
private static JarUrlConnection notFoundConnection(String jarFileName, String entryName) throws IOException {
if (Optimizations.isEnabled()) {
return NOT_FOUND_CONNECTION;
}
return new JarUrlConnection(
() -> new FileNotFoundException("JAR entry " + entryName + " not found in " + jarFileName));
}
|
The {@link URLClassLoader} connects often to check if a resource exists, we can
save some object allocations by using the cached copy if we have one.
@param jarFileURL the jar file to check
@param entryName the entry name to check
@throws FileNotFoundException on a missing entry
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/JarUrlConnection.java
| 360
|
[
"jarFileName",
"entryName"
] |
JarUrlConnection
| true
| 2
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_compute_size_by_dict
|
def _compute_size_by_dict(indices, idx_dict):
"""
Computes the product of the elements in indices based on the dictionary
idx_dict.
Parameters
----------
indices : iterable
Indices to base the product on.
idx_dict : dictionary
Dictionary of index sizes
Returns
-------
ret : int
The resulting product.
Examples
--------
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
90
"""
ret = 1
for i in indices:
ret *= idx_dict[i]
return ret
|
Computes the product of the elements in indices based on the dictionary
idx_dict.
Parameters
----------
indices : iterable
Indices to base the product on.
idx_dict : dictionary
Dictionary of index sizes
Returns
-------
ret : int
The resulting product.
Examples
--------
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
90
|
python
|
numpy/_core/einsumfunc.py
| 61
|
[
"indices",
"idx_dict"
] | false
| 2
| 7.36
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
autoCommitOffsetsAsync
|
private RequestFuture<Void> autoCommitOffsetsAsync() {
Map<TopicPartition, OffsetAndMetadata> allConsumedOffsets = subscriptions.allConsumed();
log.debug("Sending asynchronous auto-commit of offsets {}", allConsumedOffsets);
return commitOffsetsAsync(allConsumedOffsets, (offsets, exception) -> {
if (exception != null) {
if (exception instanceof RetriableCommitFailedException) {
log.debug("Asynchronous auto-commit of offsets {} failed due to retriable error.", offsets,
exception);
nextAutoCommitTimer.updateAndReset(rebalanceConfig.retryBackoffMs);
} else {
log.warn("Asynchronous auto-commit of offsets {} failed: {}", offsets, exception.getMessage());
}
} else {
log.debug("Completed asynchronous auto-commit of offsets {}", offsets);
}
});
}
|
Commit offsets synchronously. This method will retry until the commit completes successfully
or an unrecoverable error is encountered.
@param offsets The offsets to be committed
@throws org.apache.kafka.common.errors.AuthorizationException if the consumer is not authorized to the group
or to any of the specified partitions. See the exception for more details
@throws CommitFailedException if an unrecoverable error occurs before the commit can be completed
@throws FencedInstanceIdException if a static member gets fenced
@return If the offset commit was successfully sent and a successful response was received from
the coordinator
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java
| 1,229
|
[] | true
| 3
| 7.44
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
create_crawler
|
def create_crawler(self, **crawler_kwargs) -> str:
"""
Create an AWS Glue Crawler.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.create_crawler`
:param crawler_kwargs: Keyword args that define the configurations used to create the crawler
:return: Name of the crawler
"""
crawler_name = crawler_kwargs["Name"]
self.log.info("Creating crawler: %s", crawler_name)
return self.glue_client.create_crawler(**crawler_kwargs)
|
Create an AWS Glue Crawler.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.create_crawler`
:param crawler_kwargs: Keyword args that define the configurations used to create the crawler
:return: Name of the crawler
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/glue_crawler.py
| 146
|
[
"self"
] |
str
| true
| 1
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
wasInterrupted
|
protected final boolean wasInterrupted() {
@RetainedLocalRef Object localValue = value();
return (localValue instanceof Cancellation) && ((Cancellation) localValue).wasInterrupted;
}
|
Returns true if this future was cancelled with {@code mayInterruptIfRunning} set to {@code
true}.
@since 14.0
|
java
|
android/guava/src/com/google/common/util/concurrent/AbstractFuture.java
| 431
|
[] | true
| 2
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
__array__
|
def __array__(
self, dtype: npt.DTypeLike | None = None, copy: bool | None = None
) -> np.ndarray:
"""
Return the values as a NumPy array.
Users should not call this directly. Rather, it is invoked by
:func:`numpy.array` and :func:`numpy.asarray`.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to use for the resulting NumPy array. By default,
the dtype is inferred from the data.
copy : bool or None, optional
See :func:`numpy.asarray`.
Returns
-------
numpy.ndarray
The values in the series converted to a :class:`numpy.ndarray`
with the specified `dtype`.
See Also
--------
array : Create a new array from data.
Series.array : Zero-copy view to the array backing the Series.
Series.to_numpy : Series method for similar behavior.
Examples
--------
>>> ser = pd.Series([1, 2, 3])
>>> np.asarray(ser)
array([1, 2, 3])
For timezone-aware data, the timezones may be retained with
``dtype='object'``
>>> tzser = pd.Series(pd.date_range("2000", periods=2, tz="CET"))
>>> np.asarray(tzser, dtype="object")
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
dtype=object)
Or the values may be localized to UTC and the tzinfo discarded with
``dtype='datetime64[ns]'``
>>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', ...],
dtype='datetime64[ns]')
"""
values = self._values
if copy is None:
# Note: branch avoids `copy=None` for NumPy 1.x support
arr = np.asarray(values, dtype=dtype)
else:
arr = np.array(values, dtype=dtype, copy=copy)
if copy is True:
return arr
if copy is False or astype_is_view(values.dtype, arr.dtype):
arr = arr.view()
arr.flags.writeable = False
return arr
|
Return the values as a NumPy array.
Users should not call this directly. Rather, it is invoked by
:func:`numpy.array` and :func:`numpy.asarray`.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to use for the resulting NumPy array. By default,
the dtype is inferred from the data.
copy : bool or None, optional
See :func:`numpy.asarray`.
Returns
-------
numpy.ndarray
The values in the series converted to a :class:`numpy.ndarray`
with the specified `dtype`.
See Also
--------
array : Create a new array from data.
Series.array : Zero-copy view to the array backing the Series.
Series.to_numpy : Series method for similar behavior.
Examples
--------
>>> ser = pd.Series([1, 2, 3])
>>> np.asarray(ser)
array([1, 2, 3])
For timezone-aware data, the timezones may be retained with
``dtype='object'``
>>> tzser = pd.Series(pd.date_range("2000", periods=2, tz="CET"))
>>> np.asarray(tzser, dtype="object")
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
dtype=object)
Or the values may be localized to UTC and the tzinfo discarded with
``dtype='datetime64[ns]'``
>>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', ...],
dtype='datetime64[ns]')
|
python
|
pandas/core/series.py
| 848
|
[
"self",
"dtype",
"copy"
] |
np.ndarray
| true
| 6
| 8.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
matches
|
boolean matches(Method method, Class<?> targetClass, boolean hasIntroductions);
|
Perform static checking whether the given method matches. This may be invoked
instead of the 2-arg {@link #matches(java.lang.reflect.Method, Class)} method
if the caller supports the extended IntroductionAwareMethodMatcher interface.
@param method the candidate method
@param targetClass the target class
@param hasIntroductions {@code true} if the object on whose behalf we are
asking is the subject on one or more introductions; {@code false} otherwise
@return whether this method matches statically
|
java
|
spring-aop/src/main/java/org/springframework/aop/IntroductionAwareMethodMatcher.java
| 41
|
[
"method",
"targetClass",
"hasIntroductions"
] | true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
createFileTypeMap
|
protected FileTypeMap createFileTypeMap(@Nullable Resource mappingLocation, String @Nullable [] mappings) throws IOException {
MimetypesFileTypeMap fileTypeMap = null;
if (mappingLocation != null) {
try (InputStream is = mappingLocation.getInputStream()) {
fileTypeMap = new MimetypesFileTypeMap(is);
}
}
else {
fileTypeMap = new MimetypesFileTypeMap();
}
if (mappings != null) {
for (String mapping : mappings) {
fileTypeMap.addMimeTypes(mapping);
}
}
return fileTypeMap;
}
|
Compile a {@link FileTypeMap} from the mappings in the given mapping file
and the given mapping entries.
<p>The default implementation creates an Activation Framework {@link MimetypesFileTypeMap},
passing in an InputStream from the mapping resource (if any) and registering
the mapping lines programmatically.
@param mappingLocation a {@code mime.types} mapping resource (can be {@code null})
@param mappings an array of MIME type mapping lines (can be {@code null})
@return the compiled FileTypeMap
@throws IOException if resource access failed
@see jakarta.activation.MimetypesFileTypeMap#MimetypesFileTypeMap(java.io.InputStream)
@see jakarta.activation.MimetypesFileTypeMap#addMimeTypes(String)
|
java
|
spring-context-support/src/main/java/org/springframework/mail/javamail/ConfigurableMimeFileTypeMap.java
| 145
|
[
"mappingLocation",
"mappings"
] |
FileTypeMap
| true
| 3
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
lastHeader
|
Header lastHeader(String key);
|
Returns just one (the very last) header for the given key, if present.
@param key to get the last header for; must not be null.
@return this last header matching the given key, returns null if not present.
|
java
|
clients/src/main/java/org/apache/kafka/common/header/Headers.java
| 62
|
[
"key"
] |
Header
| true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
visitorWorker
|
function visitorWorker(node: Node): VisitResult<Node | undefined> {
if (node.transformFlags & TransformFlags.ContainsTypeScript) {
return visitTypeScript(node);
}
return node;
}
|
Visits and possibly transforms any node.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/ts.ts
| 391
|
[
"node"
] | true
| 2
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
onCommit
|
public void onCommit(Map<TopicPartition, OffsetAndMetadata> offsets) {
for (Plugin<ConsumerInterceptor<K, V>> interceptorPlugin : this.interceptorPlugins) {
try {
interceptorPlugin.get().onCommit(offsets);
} catch (Exception e) {
// do not propagate interceptor exception, just log
log.warn("Error executing interceptor onCommit callback", e);
}
}
}
|
This is called when commit request returns successfully from the broker.
<p>
This method calls {@link ConsumerInterceptor#onCommit(Map)} method for each interceptor.
<p>
This method does not throw exceptions. Exceptions thrown by any of the interceptors in the chain are logged, but not propagated.
@param offsets A map of offsets by partition with associated metadata
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptors.java
| 88
|
[
"offsets"
] |
void
| true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
ensureCoordinatorReadyAsync
|
protected synchronized boolean ensureCoordinatorReadyAsync() {
return ensureCoordinatorReady(time.timer(0), true);
}
|
Ensure that the coordinator is ready to receive requests. This will return
immediately without blocking. It is intended to be called in an asynchronous
context when wakeups are not expected.
@return true If coordinator discovery and initial connection succeeded, false otherwise
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
| 280
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
chain
|
public void chain(final RequestFuture<T> future) {
addListener(new RequestFutureListener<>() {
@Override
public void onSuccess(T value) {
future.complete(value);
}
@Override
public void onFailure(RuntimeException e) {
future.raise(e);
}
});
}
|
Convert from a request future of one type to another type
@param adapter The adapter which does the conversion
@param <S> The type of the future adapted to
@return The new future
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java
| 217
|
[
"future"
] |
void
| true
| 1
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
append
|
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> import numpy as np
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, ..., 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: all the input arrays must have same number of dimensions, but
the array at index 0 has 2 dimension(s) and the array at index 1 has 1
dimension(s)
>>> a = np.array([1, 2], dtype=np.int_)
>>> c = np.append(a, [])
>>> c
array([1., 2.])
>>> c.dtype
float64
Default dtype for empty ndarrays is `float64` thus making the output of dtype
`float64` when appended with dtype `int64`
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim - 1
return concatenate((arr, values), axis=axis)
|
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> import numpy as np
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, ..., 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: all the input arrays must have same number of dimensions, but
the array at index 0 has 2 dimension(s) and the array at index 1 has 1
dimension(s)
>>> a = np.array([1, 2], dtype=np.int_)
>>> c = np.append(a, [])
>>> c
array([1., 2.])
>>> c.dtype
float64
Default dtype for empty ndarrays is `float64` thus making the output of dtype
`float64` when appended with dtype `int64`
|
python
|
numpy/lib/_function_base_impl.py
| 5,576
|
[
"arr",
"values",
"axis"
] | false
| 3
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
flatten
|
private static Stream<PropertySource<?>> flatten(PropertySource<?> source) {
if (source.getSource() instanceof ConfigurableEnvironment configurableEnvironment) {
return streamPropertySources(configurableEnvironment.getPropertySources());
}
return Stream.of(source);
}
|
Return {@link Iterable} containing new {@link ConfigurationPropertySource}
instances adapted from the given Spring {@link PropertySource PropertySources}.
<p>
This method will flatten any nested property sources and will filter all
{@link StubPropertySource stub property sources}. Updates to the underlying source,
identified by changes in the sources returned by its iterator, will be
automatically tracked. The underlying source should be thread safe, for example a
{@link MutablePropertySources}
@param sources the Spring property sources to adapt
@return an {@link Iterable} containing newly adapted
{@link SpringConfigurationPropertySource} instances
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertySources.java
| 166
|
[
"source"
] | true
| 2
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
get_filesystem_type
|
def get_filesystem_type(filepath: str):
"""
Determine the type of filesystem used - we might want to use different parameters if tmpfs is used.
:param filepath: path to check
:return: type of filesystem
"""
# We import it locally so that click autocomplete works
try:
import psutil
except ImportError:
return "unknown"
root_type = "unknown"
for part in psutil.disk_partitions(all=True):
if part.mountpoint == "/":
root_type = part.fstype
elif filepath.startswith(part.mountpoint):
return part.fstype
return root_type
|
Determine the type of filesystem used - we might want to use different parameters if tmpfs is used.
:param filepath: path to check
:return: type of filesystem
|
python
|
dev/breeze/src/airflow_breeze/utils/run_utils.py
| 299
|
[
"filepath"
] | true
| 4
| 8.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
str2bool
|
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
Parameters
----------
value : str
The string that is transformed to a boolean.
Returns
-------
boolval : bool
The boolean representation of `value`.
Raises
------
ValueError
If the string is not 'True' or 'False' (case independent)
Examples
--------
>>> import numpy as np
>>> np.lib._iotools.str2bool('TRUE')
True
>>> np.lib._iotools.str2bool('false')
False
"""
value = value.upper()
if value == 'TRUE':
return True
elif value == 'FALSE':
return False
else:
raise ValueError("Invalid boolean")
|
Tries to transform a string supposed to represent a boolean to a boolean.
Parameters
----------
value : str
The string that is transformed to a boolean.
Returns
-------
boolval : bool
The boolean representation of `value`.
Raises
------
ValueError
If the string is not 'True' or 'False' (case independent)
Examples
--------
>>> import numpy as np
>>> np.lib._iotools.str2bool('TRUE')
True
>>> np.lib._iotools.str2bool('false')
False
|
python
|
numpy/lib/_iotools.py
| 386
|
[
"value"
] | false
| 4
| 7.52
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
dropRight
|
function dropRight(array, n, guard) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
n = (guard || n === undefined) ? 1 : toInteger(n);
n = length - n;
return baseSlice(array, 0, n < 0 ? 0 : n);
}
|
Creates a slice of `array` with `n` elements dropped from the end.
@static
@memberOf _
@since 3.0.0
@category Array
@param {Array} array The array to query.
@param {number} [n=1] The number of elements to drop.
@param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
@returns {Array} Returns the slice of `array`.
@example
_.dropRight([1, 2, 3]);
// => [1, 2]
_.dropRight([1, 2, 3], 2);
// => [1]
_.dropRight([1, 2, 3], 5);
// => []
_.dropRight([1, 2, 3], 0);
// => [1, 2, 3]
|
javascript
|
lodash.js
| 7,184
|
[
"array",
"n",
"guard"
] | false
| 6
| 7.52
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
allocatedSizeInBytes
|
OptionalLong allocatedSizeInBytes(Path path);
|
Retrieves the actual number of bytes of disk storage used to store a specified file.
@param path the path to the file
@return an {@link OptionalLong} that contains the number of allocated bytes on disk for the file, or empty if the size is invalid
|
java
|
libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java
| 74
|
[
"path"
] |
OptionalLong
| true
| 1
| 6.48
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
mergeProperties
|
protected PropertiesHolder mergeProperties(List<PropertiesHolder> holders) {
Properties mergedProps = newProperties();
long latestTimestamp = -1;
for (PropertiesHolder holder : holders) {
mergedProps.putAll(holder.getProperties());
if (holder.getFileTimestamp() > latestTimestamp) {
latestTimestamp = holder.getFileTimestamp();
}
}
return new PropertiesHolder(mergedProps, latestTimestamp);
}
|
Merge the given properties holders into a single holder.
@param holders the list of properties holders
@return a single merged properties holder
@since 6.1.4
@see #newProperties()
@see #getMergedProperties
@see #collectPropertiesToMerge
|
java
|
spring-context/src/main/java/org/springframework/context/support/ReloadableResourceBundleMessageSource.java
| 302
|
[
"holders"
] |
PropertiesHolder
| true
| 2
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
css_bar
|
def css_bar(start: float, end: float, color: str) -> str:
"""
Generate CSS code to draw a bar from start to end in a table cell.
Uses linear-gradient.
Parameters
----------
start : float
Relative positional start of bar coloring in [0,1]
end : float
Relative positional end of the bar coloring in [0,1]
color : str
CSS valid color to apply.
Returns
-------
str : The CSS applicable to the cell.
Notes
-----
Uses ``base_css`` from outer scope.
"""
cell_css = base_css
if end > start:
cell_css += "background: linear-gradient(90deg,"
if start > 0:
cell_css += (
f" transparent {start * 100:.1f}%, {color} {start * 100:.1f}%,"
)
cell_css += f" {color} {end * 100:.1f}%, transparent {end * 100:.1f}%)"
return cell_css
|
Generate CSS code to draw a bar from start to end in a table cell.
Uses linear-gradient.
Parameters
----------
start : float
Relative positional start of bar coloring in [0,1]
end : float
Relative positional end of the bar coloring in [0,1]
color : str
CSS valid color to apply.
Returns
-------
str : The CSS applicable to the cell.
Notes
-----
Uses ``base_css`` from outer scope.
|
python
|
pandas/io/formats/style.py
| 4,114
|
[
"start",
"end",
"color"
] |
str
| true
| 3
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
runDelegatedTasks
|
private HandshakeStatus runDelegatedTasks() {
for (;;) {
Runnable task = delegatedTask();
if (task == null) {
break;
}
task.run();
}
return sslEngine.getHandshakeStatus();
}
|
Executes the SSLEngine tasks needed.
@return HandshakeStatus
|
java
|
clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java
| 438
|
[] |
HandshakeStatus
| true
| 3
| 7.44
|
apache/kafka
| 31,560
|
javadoc
| false
|
Event
|
def Event(type, _fields=None, __dict__=dict, __now__=time.time, **fields):
"""Create an event.
Notes:
An event is simply a dictionary: the only required field is ``type``.
A ``timestamp`` field will be set to the current time if not provided.
"""
event = __dict__(_fields, **fields) if _fields else fields
if 'timestamp' not in event:
event.update(timestamp=__now__(), type=type)
else:
event['type'] = type
return event
|
Create an event.
Notes:
An event is simply a dictionary: the only required field is ``type``.
A ``timestamp`` field will be set to the current time if not provided.
|
python
|
celery/events/event.py
| 18
|
[
"type",
"_fields",
"__dict__",
"__now__"
] | false
| 4
| 6.08
|
celery/celery
| 27,741
|
unknown
| false
|
|
useRefLike
|
function useRefLike<T>(name: string, initialValue: T): { current: T } {
return useMemoLike(name, () => ({ current: initialValue }), []);
}
|
Returns a memoized callback.
@example
```ts
const memoizedCallback = useCallback(() => {
doSomething(a, b);
}, [a, b]);
```
@template T The type of the callback function.
@param {T} callback The callback function to memoize.
@param {any[]} [deps] An optional array of dependencies. If any of the dependencies change, the
memoized callback will be recomputed.
@returns {T} The memoized callback.
|
typescript
|
code/core/src/preview-api/modules/addons/hooks.ts
| 345
|
[
"name",
"initialValue"
] | true
| 1
| 7.04
|
storybookjs/storybook
| 88,865
|
jsdoc
| false
|
|
noConflict
|
function noConflict() {
if (root._ === this) {
root._ = oldDash;
}
return this;
}
|
Reverts the `_` variable to its previous value and returns a reference to
the `lodash` function.
@static
@since 0.1.0
@memberOf _
@category Util
@returns {Function} Returns the `lodash` function.
@example
var lodash = _.noConflict();
|
javascript
|
lodash.js
| 15,866
|
[] | false
| 2
| 8.72
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
cleanUp
|
@Override
public void cleanUp() {
if (isLog4jBridgeHandlerAvailable()) {
removeLog4jBridgeHandler();
}
super.cleanUp();
LoggerContext loggerContext = getLoggerContext();
markAsUninitialized(loggerContext);
StatusConsoleListener listener = (StatusConsoleListener) loggerContext.getObject(STATUS_LISTENER_KEY);
if (listener != null) {
StatusLogger.getLogger().removeListener(listener);
loggerContext.removeObject(STATUS_LISTENER_KEY);
}
loggerContext.getConfiguration().removeFilter(FILTER);
Log4J2LoggingSystem.propertySource.setEnvironment(null);
loggerContext.removeObject(ENVIRONMENT_KEY);
}
|
Return the configuration location. The result may be:
<ul>
<li>{@code null}: if DefaultConfiguration is used (no explicit config loaded)</li>
<li>A file path: if provided explicitly by the user</li>
<li>A URI: if loaded from the classpath default or a custom location</li>
</ul>
@param configuration the source configuration
@return the config location or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/Log4J2LoggingSystem.java
| 452
|
[] |
void
| true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
pendingRequestCount
|
public int pendingRequestCount(Node node) {
lock.lock();
try {
return unsent.requestCount(node) + client.inFlightRequestCount(node.idString());
} finally {
lock.unlock();
}
}
|
Get the count of pending requests to the given node. This includes both request that
have been transmitted (i.e. in-flight requests) and those which are awaiting transmission.
@param node The node in question
@return The number of pending requests
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
| 366
|
[
"node"
] | true
| 1
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
send_mail
|
def send_mail(
subject,
message,
from_email,
recipient_list,
*,
fail_silently=False,
auth_user=None,
auth_password=None,
connection=None,
html_message=None,
):
"""
Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If from_email is None, use the DEFAULT_FROM_EMAIL setting.
If auth_user is None, use the EMAIL_HOST_USER setting.
If auth_password is None, use the EMAIL_HOST_PASSWORD setting.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(
username=auth_user,
password=auth_password,
fail_silently=fail_silently,
)
mail = EmailMultiAlternatives(
subject, message, from_email, recipient_list, connection=connection
)
if html_message:
mail.attach_alternative(html_message, "text/html")
return mail.send()
|
Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If from_email is None, use the DEFAULT_FROM_EMAIL setting.
If auth_user is None, use the EMAIL_HOST_USER setting.
If auth_password is None, use the EMAIL_HOST_PASSWORD setting.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
|
python
|
django/core/mail/__init__.py
| 74
|
[
"subject",
"message",
"from_email",
"recipient_list",
"fail_silently",
"auth_user",
"auth_password",
"connection",
"html_message"
] | false
| 3
| 6.08
|
django/django
| 86,204
|
unknown
| false
|
|
newReferenceArray
|
public static <E> AtomicReferenceArray<@Nullable E> newReferenceArray(int length) {
return new AtomicReferenceArray<>(length);
}
|
Creates an {@code AtomicReferenceArray} instance of given length.
@param length the length of the array
@return a new {@code AtomicReferenceArray} with the given length
|
java
|
android/guava/src/com/google/common/util/concurrent/Atomics.java
| 58
|
[
"length"
] | true
| 1
| 6.16
|
google/guava
| 51,352
|
javadoc
| false
|
|
extractPropertyName
|
protected String extractPropertyName(String attributeName) {
return Conventions.attributeNameToPropertyName(attributeName);
}
|
Extract a JavaBean property name from the supplied attribute name.
<p>The default implementation uses the
{@link Conventions#attributeNameToPropertyName(String)}
method to perform the extraction.
<p>The name returned must obey the standard JavaBean property name
conventions. For example for a class with a setter method
'{@code setBingoHallFavourite(String)}', the name returned had
better be '{@code bingoHallFavourite}' (with that exact casing).
@param attributeName the attribute name taken straight from the
XML element being parsed (never {@code null})
@return the extracted JavaBean property name (must never be {@code null})
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/AbstractSimpleBeanDefinitionParser.java
| 181
|
[
"attributeName"
] |
String
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
usingPairs
|
@SuppressWarnings({ "unchecked", "rawtypes" })
public <N, V> Member<T> usingPairs(BiConsumer<T, BiConsumer<N, V>> pairs) {
Assert.notNull(pairs, "'pairs' must not be null");
Assert.state(this.pairs == null, "Pairs cannot be declared multiple times");
Assert.state(this.members == null, "Pairs cannot be declared when using members");
this.pairs = (BiConsumer) pairs;
return this;
}
|
Add JSON name/value pairs. Typically used with a
{@link Map#forEach(BiConsumer)} call, for example:
<pre class="code">
members.add(Event::getLabels).usingPairs(Map::forEach);
</pre>
<p>
When used with a named member, the pairs will be added as a new JSON value
object:
<pre>
{
"name": {
"p1": 1,
"p2": 2
}
}
</pre>
When used with an unnamed member the pairs will be added to the existing JSON
object:
<pre>
{
"p1": 1,
"p2": 2
}
</pre>
@param <N> the name type
@param <V> the value type
@param pairs callback used to provide the pairs
@return a {@link Member} which may be configured further
@see #usingExtractedPairs(BiConsumer, PairExtractor)
@see #usingPairs(BiConsumer)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 589
|
[
"pairs"
] | true
| 1
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
toBoolean
|
public static boolean toBoolean(final String str, final String trueString, final String falseString) {
if (str == trueString) {
return true;
}
if (str == falseString) {
return false;
}
if (str != null) {
if (str.equals(trueString)) {
return true;
}
if (str.equals(falseString)) {
return false;
}
}
throw new IllegalArgumentException("The String did not match either specified value");
}
|
Converts a String to a Boolean throwing an exception if no match found.
<pre>
BooleanUtils.toBoolean("true", "true", "false") = true
BooleanUtils.toBoolean("false", "true", "false") = false
</pre>
@param str the String to check
@param trueString the String to match for {@code true} (case-sensitive), may be {@code null}
@param falseString the String to match for {@code false} (case-sensitive), may be {@code null}
@return the boolean value of the string
@throws IllegalArgumentException if the String doesn't match
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 528
|
[
"str",
"trueString",
"falseString"
] | true
| 6
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
findCandidateAdvisors
|
protected List<Advisor> findCandidateAdvisors() {
Assert.state(this.advisorRetrievalHelper != null, "No BeanFactoryAdvisorRetrievalHelper available");
return this.advisorRetrievalHelper.findAdvisorBeans();
}
|
Find all candidate Advisors to use in auto-proxying.
@return the List of candidate Advisors
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/autoproxy/AbstractAdvisorAutoProxyCreator.java
| 116
|
[] | true
| 1
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
getCollapsedBucketCountAfterScaleReduction
|
int getCollapsedBucketCountAfterScaleReduction(int reduction) {
assert reduction >= 0 && reduction <= MAX_INDEX_BITS;
int totalCollapsed = 0;
for (int i = 0; i < reduction; i++) {
totalCollapsed += collapsedBucketCount[i];
}
return totalCollapsed;
}
|
Returns the number of buckets that will be merged after applying the given scale reduction.
@param reduction the scale reduction factor
@return the number of buckets that will be merged
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/DownscaleStats.java
| 87
|
[
"reduction"
] | true
| 3
| 8.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
visitJavaScriptInGeneratorFunctionBody
|
function visitJavaScriptInGeneratorFunctionBody(node: Node): VisitResult<Node | undefined> {
switch (node.kind) {
case SyntaxKind.FunctionDeclaration:
return visitFunctionDeclaration(node as FunctionDeclaration);
case SyntaxKind.FunctionExpression:
return visitFunctionExpression(node as FunctionExpression);
case SyntaxKind.GetAccessor:
case SyntaxKind.SetAccessor:
return visitAccessorDeclaration(node as AccessorDeclaration);
case SyntaxKind.VariableStatement:
return visitVariableStatement(node as VariableStatement);
case SyntaxKind.ForStatement:
return visitForStatement(node as ForStatement);
case SyntaxKind.ForInStatement:
return visitForInStatement(node as ForInStatement);
case SyntaxKind.BreakStatement:
return visitBreakStatement(node as BreakStatement);
case SyntaxKind.ContinueStatement:
return visitContinueStatement(node as ContinueStatement);
case SyntaxKind.ReturnStatement:
return visitReturnStatement(node as ReturnStatement);
default:
if (node.transformFlags & TransformFlags.ContainsYield) {
return visitJavaScriptContainingYield(node);
}
else if (node.transformFlags & (TransformFlags.ContainsGenerator | TransformFlags.ContainsHoistedDeclarationOrCompletion)) {
return visitEachChild(node, visitor, context);
}
else {
return node;
}
}
}
|
Visits a node that is contained within a generator function.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/generators.ts
| 461
|
[
"node"
] | true
| 5
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
skew
|
def skew(
self,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
) -> Series:
"""
Return unbiased skew within groups.
Normalized by N-1.
Parameters
----------
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns. Not implemented for Series.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
Series
Unbiased skew within groups.
See Also
--------
Series.skew : Return unbiased skew over requested axis.
Examples
--------
>>> ser = pd.Series(
... [390.0, 350.0, 357.0, np.nan, 22.0, 20.0, 30.0],
... index=[
... "Falcon",
... "Falcon",
... "Falcon",
... "Falcon",
... "Parrot",
... "Parrot",
... "Parrot",
... ],
... name="Max Speed",
... )
>>> ser
Falcon 390.0
Falcon 350.0
Falcon 357.0
Falcon NaN
Parrot 22.0
Parrot 20.0
Parrot 30.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).skew()
Falcon 1.525174
Parrot 1.457863
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).skew(skipna=False)
Falcon NaN
Parrot 1.457863
Name: Max Speed, dtype: float64
"""
return self._cython_agg_general(
"skew", alt=None, skipna=skipna, numeric_only=numeric_only, **kwargs
)
|
Return unbiased skew within groups.
Normalized by N-1.
Parameters
----------
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns. Not implemented for Series.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
Series
Unbiased skew within groups.
See Also
--------
Series.skew : Return unbiased skew over requested axis.
Examples
--------
>>> ser = pd.Series(
... [390.0, 350.0, 357.0, np.nan, 22.0, 20.0, 30.0],
... index=[
... "Falcon",
... "Falcon",
... "Falcon",
... "Falcon",
... "Parrot",
... "Parrot",
... "Parrot",
... ],
... name="Max Speed",
... )
>>> ser
Falcon 390.0
Falcon 350.0
Falcon 357.0
Falcon NaN
Parrot 22.0
Parrot 20.0
Parrot 30.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).skew()
Falcon 1.525174
Parrot 1.457863
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).skew(skipna=False)
Falcon NaN
Parrot 1.457863
Name: Max Speed, dtype: float64
|
python
|
pandas/core/groupby/generic.py
| 1,369
|
[
"self",
"skipna",
"numeric_only"
] |
Series
| true
| 1
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
createProxyClassAndInstance
|
protected Object createProxyClassAndInstance(Enhancer enhancer, Callback[] callbacks) {
enhancer.setInterceptDuringConstruction(false);
enhancer.setCallbacks(callbacks);
return (this.constructorArgs != null && this.constructorArgTypes != null ?
enhancer.create(this.constructorArgTypes, this.constructorArgs) :
enhancer.create());
}
|
Set constructor arguments to use for creating the proxy.
@param constructorArgs the constructor argument values
@param constructorArgTypes the constructor argument types
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/CglibAopProxy.java
| 251
|
[
"enhancer",
"callbacks"
] |
Object
| true
| 3
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
toEscaper
|
public Escaper toEscaper() {
return new CharArrayDecorator(toArray());
}
|
Convert this builder into a char escaper which is just a decorator around the underlying array
of replacement char[]s.
@return an escaper that escapes based on the underlying array.
|
java
|
android/guava/src/com/google/common/escape/CharEscaperBuilder.java
| 124
|
[] |
Escaper
| true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
wrapIfNecessary
|
protected Object wrapIfNecessary(Object bean, String beanName, Object cacheKey) {
if (StringUtils.hasLength(beanName) && this.targetSourcedBeans.contains(beanName)) {
return bean;
}
if (Boolean.FALSE.equals(this.advisedBeans.get(cacheKey))) {
return bean;
}
if (isInfrastructureClass(bean.getClass()) || shouldSkip(bean.getClass(), beanName)) {
this.advisedBeans.put(cacheKey, Boolean.FALSE);
return bean;
}
// Create proxy if we have advice.
Object[] specificInterceptors = getAdvicesAndAdvisorsForBean(bean.getClass(), beanName, null);
if (specificInterceptors != DO_NOT_PROXY) {
this.advisedBeans.put(cacheKey, Boolean.TRUE);
Object proxy = createProxy(
bean.getClass(), beanName, specificInterceptors, new SingletonTargetSource(bean));
this.proxyTypes.put(cacheKey, proxy.getClass());
return proxy;
}
this.advisedBeans.put(cacheKey, Boolean.FALSE);
return bean;
}
|
Wrap the given bean if necessary, i.e. if it is eligible for being proxied.
@param bean the raw bean instance
@param beanName the name of the bean
@param cacheKey the cache key for metadata access
@return a proxy wrapping the bean, or the raw bean instance as-is
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/autoproxy/AbstractAutoProxyCreator.java
| 321
|
[
"bean",
"beanName",
"cacheKey"
] |
Object
| true
| 7
| 8.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
inplace_row_scale
|
def inplace_row_scale(X, scale):
"""Inplace row scaling of a CSR or CSC matrix.
Scale each row of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to be scaled. It should be of CSR or CSC format.
scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Array of precomputed sample-wise values to use for scaling.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 2, 3, 4, 5])
>>> indices = np.array([0, 1, 2, 3, 3])
>>> data = np.array([8, 1, 2, 5, 6])
>>> scale = np.array([2, 3, 4, 5])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 0, 0],
[0, 0, 2, 0],
[0, 0, 0, 5],
[0, 0, 0, 6]])
>>> sparsefuncs.inplace_row_scale(csr, scale)
>>> csr.todense()
matrix([[16, 2, 0, 0],
[ 0, 0, 6, 0],
[ 0, 0, 0, 20],
[ 0, 0, 0, 30]])
"""
if sp.issparse(X) and X.format == "csc":
inplace_csr_column_scale(X.T, scale)
elif sp.issparse(X) and X.format == "csr":
inplace_csr_row_scale(X, scale)
else:
_raise_typeerror(X)
|
Inplace row scaling of a CSR or CSC matrix.
Scale each row of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to be scaled. It should be of CSR or CSC format.
scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Array of precomputed sample-wise values to use for scaling.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 2, 3, 4, 5])
>>> indices = np.array([0, 1, 2, 3, 3])
>>> data = np.array([8, 1, 2, 5, 6])
>>> scale = np.array([2, 3, 4, 5])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 0, 0],
[0, 0, 2, 0],
[0, 0, 0, 5],
[0, 0, 0, 6]])
>>> sparsefuncs.inplace_row_scale(csr, scale)
>>> csr.todense()
matrix([[16, 2, 0, 0],
[ 0, 0, 6, 0],
[ 0, 0, 0, 20],
[ 0, 0, 0, 30]])
|
python
|
sklearn/utils/sparsefuncs.py
| 339
|
[
"X",
"scale"
] | false
| 6
| 7.68
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
getSubName
|
private @Nullable String getSubName(String name) {
if (!StringUtils.hasLength(name)) {
return null;
}
int nested = name.lastIndexOf('$');
return (nested != -1) ? name.substring(0, nested) : NameUtil.getSubName(name);
}
|
Return the configuration location. The result may be:
<ul>
<li>{@code null}: if DefaultConfiguration is used (no explicit config loaded)</li>
<li>A file path: if provided explicitly by the user</li>
<li>A URI: if loaded from the classpath default or a custom location</li>
</ul>
@param configuration the source configuration
@return the config location or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/Log4J2LoggingSystem.java
| 421
|
[
"name"
] |
String
| true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
cov
|
def cov(
self,
other: DataFrame | Series | None = None,
pairwise: bool | None = None,
ddof: int = 1,
numeric_only: bool = False,
):
"""
Calculate the expanding sample covariance.
Parameters
----------
other : Series or DataFrame, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.cov : Aggregating cov for Series.
DataFrame.cov : Aggregating cov for DataFrame.
Examples
--------
>>> ser1 = pd.Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
>>> ser2 = pd.Series([10, 11, 13, 16], index=["a", "b", "c", "d"])
>>> ser1.expanding().cov(ser2)
a NaN
b 0.500000
c 1.500000
d 3.333333
dtype: float64
"""
return super().cov(
other=other,
pairwise=pairwise,
ddof=ddof,
numeric_only=numeric_only,
)
|
Calculate the expanding sample covariance.
Parameters
----------
other : Series or DataFrame, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.expanding : Calling expanding with Series data.
DataFrame.expanding : Calling expanding with DataFrames.
Series.cov : Aggregating cov for Series.
DataFrame.cov : Aggregating cov for DataFrame.
Examples
--------
>>> ser1 = pd.Series([1, 2, 3, 4], index=["a", "b", "c", "d"])
>>> ser2 = pd.Series([10, 11, 13, 16], index=["a", "b", "c", "d"])
>>> ser1.expanding().cov(ser2)
a NaN
b 0.500000
c 1.500000
d 3.333333
dtype: float64
|
python
|
pandas/core/window/expanding.py
| 1,243
|
[
"self",
"other",
"pairwise",
"ddof",
"numeric_only"
] | true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
prepareFetchRequests
|
protected Map<Node, FetchSessionHandler.FetchRequestData> prepareFetchRequests() {
// Update metrics in case there was an assignment change
metricsManager.maybeUpdateAssignment(subscriptions);
Map<Node, FetchSessionHandler.Builder> fetchable = new HashMap<>();
long currentTimeMs = time.milliseconds();
Map<String, Uuid> topicIds = metadata.topicIds();
// This is the set of partitions that have buffered data
Set<TopicPartition> buffered = Collections.unmodifiableSet(fetchBuffer.bufferedPartitions());
// This is the list of partitions that are fetchable and have no buffered data
List<TopicPartition> unbuffered = fetchablePartitions(buffered);
if (unbuffered.isEmpty()) {
// If there are no partitions that don't already have data locally buffered, there's no need to issue
// any fetch requests at the present time.
return Collections.emptyMap();
}
Set<Integer> bufferedNodes = bufferedNodes(buffered, currentTimeMs);
for (TopicPartition partition : unbuffered) {
SubscriptionState.FetchPosition position = positionForPartition(partition);
Optional<Node> nodeOpt = maybeNodeForPosition(partition, position, currentTimeMs);
if (nodeOpt.isEmpty())
continue;
Node node = nodeOpt.get();
if (isUnavailable(node)) {
maybeThrowAuthFailure(node);
// If we try to send during the reconnect backoff window, then the request is just
// going to be failed anyway before being sent, so skip sending the request for now
log.trace("Skipping fetch for partition {} because node {} is awaiting reconnect backoff", partition, node);
} else if (nodesWithPendingFetchRequests.contains(node.id())) {
// If there's already an inflight request for this node, don't issue another request.
log.trace("Skipping fetch for partition {} because previous request to {} has not been processed", partition, node);
} else if (bufferedNodes.contains(node.id())) {
// While a node has buffered data, don't fetch other partition data from it. Because the buffered
// partitions are not included in the fetch request, those partitions will be inadvertently dropped
// from the broker fetch session cache. In some cases, that could lead to the entire fetch session
// being evicted.
log.trace("Skipping fetch for partition {} because its leader node {} hosts buffered partitions", partition, node);
} else {
// if there is a leader and no in-flight requests, issue a new fetch
FetchSessionHandler.Builder builder = fetchable.computeIfAbsent(node, k -> {
FetchSessionHandler fetchSessionHandler = sessionHandlers.computeIfAbsent(node.id(), n -> new FetchSessionHandler(logContext, n));
return fetchSessionHandler.newBuilder();
});
Uuid topicId = topicIds.getOrDefault(partition.topic(), Uuid.ZERO_UUID);
FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData(topicId,
position.offset,
FetchRequest.INVALID_LOG_START_OFFSET,
fetchConfig.fetchSize,
position.currentLeader.epoch,
Optional.empty());
builder.add(partition, partitionData);
log.debug("Added {} fetch request for partition {} at position {} to node {}", fetchConfig.isolationLevel,
partition, position, node);
}
}
return convert(fetchable);
}
|
Create fetch requests for all nodes for which we have assigned partitions
that have no existing requests in flight.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java
| 421
|
[] | true
| 6
| 7.12
|
apache/kafka
| 31,560
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.