function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
toChar
|
public static char toChar(final String str, final char defaultValue) {
return StringUtils.isEmpty(str) ? defaultValue : str.charAt(0);
}
|
Converts the String to a char using the first character, defaulting
the value on empty Strings.
<pre>
CharUtils.toChar(null, 'X') = 'X'
CharUtils.toChar("", 'X') = 'X'
CharUtils.toChar("A", 'X') = 'A'
CharUtils.toChar("BA", 'X') = 'B'
</pre>
@param str the character to convert
@param defaultValue the value to use if the Character is null
@return the char value of the first letter of the String or the default if null
|
java
|
src/main/java/org/apache/commons/lang3/CharUtils.java
| 336
|
[
"str",
"defaultValue"
] | true
| 2
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
transformAsync
|
public static <I extends @Nullable Object, O extends @Nullable Object>
ListenableFuture<O> transformAsync(
ListenableFuture<I> input,
AsyncFunction<? super I, ? extends O> function,
Executor executor) {
return AbstractTransformFuture.createAsync(input, function, executor);
}
|
Returns a new {@code Future} whose result is asynchronously derived from the result of the
given {@code Future}. If the given {@code Future} fails, the returned {@code Future} fails with
the same exception (and the function is not invoked).
<p>More precisely, the returned {@code Future} takes its result from a {@code Future} produced
by applying the given {@code AsyncFunction} to the result of the original {@code Future}.
Example usage:
{@snippet :
ListenableFuture<RowKey> rowKeyFuture = indexService.lookUp(query);
ListenableFuture<QueryResult> queryFuture =
transformAsync(rowKeyFuture, dataService::readFuture, executor);
}
<p>When selecting an executor, note that {@code directExecutor} is dangerous in some cases. See
the warnings the {@link MoreExecutors#directExecutor} documentation.
<p>The returned {@code Future} attempts to keep its cancellation state in sync with that of the
input future and that of the future returned by the chain function. That is, if the returned
{@code Future} is cancelled, it will attempt to cancel the other two, and if either of the
other two is cancelled, the returned {@code Future} will receive a callback in which it will
attempt to cancel itself.
@param input The future to transform
@param function A function to transform the result of the input future to the result of the
output future
@param executor Executor to run the function in.
@return A future that holds result of the function (if the input succeeded) or the original
input's failure (if not)
@since 19.0 (in 11.0 as {@code transform})
|
java
|
android/guava/src/com/google/common/util/concurrent/Futures.java
| 451
|
[
"input",
"function",
"executor"
] | true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
polysub
|
def polysub(c1, c2):
"""
Subtract one polynomial from another.
Returns the difference of two polynomials `c1` - `c2`. The arguments
are sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of coefficients representing their difference.
See Also
--------
polyadd, polymulx, polymul, polydiv, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1, 2, 3)
>>> c2 = (3, 2, 1)
>>> P.polysub(c1,c2)
array([-2., 0., 2.])
>>> P.polysub(c2, c1) # -P.polysub(c1,c2)
array([ 2., 0., -2.])
"""
return pu._sub(c1, c2)
|
Subtract one polynomial from another.
Returns the difference of two polynomials `c1` - `c2`. The arguments
are sequences of coefficients from lowest order term to highest, i.e.,
[1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of polynomial coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of coefficients representing their difference.
See Also
--------
polyadd, polymulx, polymul, polydiv, polypow
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c1 = (1, 2, 3)
>>> c2 = (3, 2, 1)
>>> P.polysub(c1,c2)
array([-2., 0., 2.])
>>> P.polysub(c2, c1) # -P.polysub(c1,c2)
array([ 2., 0., -2.])
|
python
|
numpy/polynomial/polynomial.py
| 251
|
[
"c1",
"c2"
] | false
| 1
| 6.16
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
getClassPathIndex
|
ClassPathIndexFile getClassPathIndex(Archive archive) throws IOException {
if (!archive.isExploded()) {
return null; // Regular archives already have a defined order
}
String location = getClassPathIndexFileLocation(archive);
return ClassPathIndexFile.loadIfPossible(archive.getRootDirectory(), location);
}
|
Returns if the launcher is running in an exploded mode. If this method returns
{@code true} then only regular JARs are supported and the additional URL and
ClassLoader support infrastructure can be optimized.
@return if the jar is exploded.
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/Launcher.java
| 130
|
[
"archive"
] |
ClassPathIndexFile
| true
| 2
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
badPositionIndexes
|
private static String badPositionIndexes(int start, int end, int size) {
if (start < 0 || start > size) {
return badPositionIndex(start, size, "start index");
}
if (end < 0 || end > size) {
return badPositionIndex(end, size, "end index");
}
// end < start
return lenientFormat("end index (%s) must not be less than start index (%s)", end, start);
}
|
Ensures that {@code start} and {@code end} specify valid <i>positions</i> in an array, list or
string of size {@code size}, and are in order. A position index may range from zero to {@code
size}, inclusive.
@param start a user-supplied index identifying a starting position in an array, list or string
@param end a user-supplied index identifying an ending position in an array, list or string
@param size the size of that array, list or string
@throws IndexOutOfBoundsException if either index is negative or is greater than {@code size},
or if {@code end} is less than {@code start}
@throws IllegalArgumentException if {@code size} is negative
|
java
|
android/guava/src/com/google/common/base/Preconditions.java
| 1,452
|
[
"start",
"end",
"size"
] |
String
| true
| 5
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
sort
|
public static byte[] sort(final byte[] array) {
if (array != null) {
Arrays.sort(array);
}
return array;
}
|
Sorts the given array into ascending order and returns it.
@param array the array to sort (may be null).
@return the given array.
@see Arrays#sort(byte[])
|
java
|
src/main/java/org/apache/commons/lang3/ArraySorter.java
| 37
|
[
"array"
] | true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
isNewDocument
|
private boolean isNewDocument(CharacterReader reader) throws IOException {
if (reader.isSameLastLineCommentPrefix()) {
return false;
}
boolean result = reader.getLocation().getColumn() == 0;
result = result && readAndExpect(reader, reader::isHyphenCharacter);
result = result && readAndExpect(reader, reader::isHyphenCharacter);
result = result && readAndExpect(reader, reader::isHyphenCharacter);
if (!reader.isEndOfLine()) {
reader.read();
reader.skipWhitespace();
}
return result && reader.isEndOfLine();
}
|
Load {@code .properties} data and return a map of {@code String} ->
{@link OriginTrackedValue}.
@param expandLists if list {@code name[]=a,b,c} shortcuts should be expanded
@return the loaded properties
@throws IOException on read error
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/env/OriginTrackedPropertiesLoader.java
| 165
|
[
"reader"
] | true
| 7
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
computeIndentLevel
|
function computeIndentLevel(content: string, options: FormattingOptions): number {
let i = 0;
let nChars = 0;
const tabSize = options.tabSize || 4;
while (i < content.length) {
const ch = content.charAt(i);
if (ch === ' ') {
nChars++;
} else if (ch === '\t') {
nChars += tabSize;
} else {
break;
}
i++;
}
return Math.floor(nChars / tabSize);
}
|
Creates a formatted string out of the object passed as argument, using the given formatting options
@param any The object to stringify and format
@param options The formatting options to use
|
typescript
|
src/vs/base/common/jsonFormatter.ts
| 226
|
[
"content",
"options"
] | true
| 7
| 6.24
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
set_codes
|
def set_codes(
self, codes, *, level=None, verify_integrity: bool = True
) -> MultiIndex:
"""
Set new codes on MultiIndex. Defaults to returning new index.
Parameters
----------
codes : sequence or list of sequence
New codes to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc) or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
MultiIndex.set_levels : Set new levels on MultiIndex.
MultiIndex.codes : Get the codes of the levels in the MultiIndex.
MultiIndex.levels : Get the levels of the MultiIndex.
Examples
--------
>>> idx = pd.MultiIndex.from_tuples(
... [(1, "one"), (1, "two"), (2, "one"), (2, "two")], names=["foo", "bar"]
... )
>>> idx
MultiIndex([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([1, 0, 1, 0], level=0)
MultiIndex([(2, 'one'),
(1, 'two'),
(2, 'one'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([0, 0, 1, 1], level="bar")
MultiIndex([(1, 'one'),
(1, 'one'),
(2, 'two'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
"""
level, codes = _require_listlike(level, codes, "Codes")
idx = self._view()
idx._reset_identity()
idx._set_codes(codes, level=level, verify_integrity=verify_integrity)
return idx
|
Set new codes on MultiIndex. Defaults to returning new index.
Parameters
----------
codes : sequence or list of sequence
New codes to apply.
level : int, level name, or sequence of int/level names (default None)
Level(s) to set (None for all levels).
verify_integrity : bool, default True
If True, checks that levels and codes are compatible.
Returns
-------
new index (of same type and class...etc) or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
MultiIndex.set_levels : Set new levels on MultiIndex.
MultiIndex.codes : Get the codes of the levels in the MultiIndex.
MultiIndex.levels : Get the levels of the MultiIndex.
Examples
--------
>>> idx = pd.MultiIndex.from_tuples(
... [(1, "one"), (1, "two"), (2, "one"), (2, "two")], names=["foo", "bar"]
... )
>>> idx
MultiIndex([(1, 'one'),
(1, 'two'),
(2, 'one'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([1, 0, 1, 0], level=0)
MultiIndex([(2, 'one'),
(1, 'two'),
(2, 'one'),
(1, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([0, 0, 1, 1], level="bar")
MultiIndex([(1, 'one'),
(1, 'one'),
(2, 'two'),
(2, 'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1])
MultiIndex([(2, 'one'),
(1, 'one'),
(2, 'two'),
(1, 'two')],
names=['foo', 'bar'])
|
python
|
pandas/core/indexes/multi.py
| 1,174
|
[
"self",
"codes",
"level",
"verify_integrity"
] |
MultiIndex
| true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
toPrimitive
|
public static byte[] toPrimitive(final Byte[] array) {
if (array == null) {
return null;
}
if (array.length == 0) {
return EMPTY_BYTE_ARRAY;
}
final byte[] result = new byte[array.length];
for (int i = 0; i < array.length; i++) {
result[i] = array[i].byteValue();
}
return result;
}
|
Converts an array of object Bytes to primitives.
<p>
This method returns {@code null} for a {@code null} input array.
</p>
@param array a {@link Byte} array, may be {@code null}.
@return a {@code byte} array, {@code null} if null array input.
@throws NullPointerException if an array element is {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 8,859
|
[
"array"
] | true
| 4
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
mean
|
@Deprecated
// com.google.common.math.DoubleUtils
@GwtIncompatible
public static double mean(double... values) {
checkArgument(values.length > 0, "Cannot take mean of 0 values");
long count = 1;
double mean = checkFinite(values[0]);
for (int index = 1; index < values.length; ++index) {
checkFinite(values[index]);
count++;
// Art of Computer Programming vol. 2, Knuth, 4.2.2, (15)
mean += (values[index] - mean) / count;
}
return mean;
}
|
Returns the <a href="http://en.wikipedia.org/wiki/Arithmetic_mean">arithmetic mean</a> of
{@code values}.
<p>If these values are a sample drawn from a population, this is also an unbiased estimator of
the arithmetic mean of the population.
@param values a nonempty series of values
@throws IllegalArgumentException if {@code values} is empty or contains any non-finite value
@deprecated Use {@link Stats#meanOf} instead, noting the less strict handling of non-finite
values.
|
java
|
android/guava/src/com/google/common/math/DoubleMath.java
| 408
|
[] | true
| 2
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
clearByte
|
public byte clearByte(final byte holder) {
return (byte) clear(holder);
}
|
Clears the bits.
@param holder the byte data containing the bits we're
interested in
@return the value of holder with the specified bits cleared
(set to {@code 0})
|
java
|
src/main/java/org/apache/commons/lang3/BitField.java
| 111
|
[
"holder"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
floatValue
|
@Override
public float floatValue() {
if (value >= 0) {
return (float) value;
}
// The top bit is set, which means that the float value is going to come from the top 24 bits.
// So we can ignore the bottom 8, except for rounding. See doubleValue() for more.
return (float) ((value >>> 1) | (value & 1)) * 2f;
}
|
Returns the value of this {@code UnsignedLong} as a {@code float}, analogous to a widening
primitive conversion from {@code long} to {@code float}, and correctly rounded.
|
java
|
android/guava/src/com/google/common/primitives/UnsignedLong.java
| 195
|
[] | true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
deepEmpty
|
private static boolean deepEmpty(final String[] strings) {
return Streams.of(strings).allMatch(StringUtils::isEmpty);
}
|
Determines whether or not all the Strings in an array are
empty or not.
@param strings String[] whose elements are being checked for emptiness
@return whether or not the String is empty
|
java
|
src/main/java/org/apache/commons/lang3/CharSetUtils.java
| 105
|
[
"strings"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
completeOnTimeout
|
@Override
public CompletableFuture<T> completeOnTimeout(T value, long timeout, TimeUnit unit) {
throw erroneousCompletionException();
}
|
Completes this future exceptionally. For internal use by the Kafka clients, not by user code.
@param throwable the exception.
@return {@code true} if this invocation caused this CompletableFuture
to transition to a completed state, else {@code false}
|
java
|
clients/src/main/java/org/apache/kafka/common/internals/KafkaCompletableFuture.java
| 87
|
[
"value",
"timeout",
"unit"
] | true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
lowerCase
|
public static String lowerCase(final String str) {
if (str == null) {
return null;
}
return str.toLowerCase();
}
|
Converts a String to lower case as per {@link String#toLowerCase()}.
<p>
A {@code null} input String returns {@code null}.
</p>
<pre>
StringUtils.lowerCase(null) = null
StringUtils.lowerCase("") = ""
StringUtils.lowerCase("aBc") = "abc"
</pre>
<p>
<strong>Note:</strong> As described in the documentation for {@link String#toLowerCase()}, the result of this method is affected by the current locale.
For platform-independent case transformations, the method {@link #lowerCase(String, Locale)} should be used with a specific locale (e.g.
{@link Locale#ENGLISH}).
</p>
@param str the String to lower case, may be null.
@return the lower cased String, {@code null} if null String input.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 5,219
|
[
"str"
] |
String
| true
| 2
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
determineHandlingOrder
|
private Collection<SelectionKey> determineHandlingOrder(Set<SelectionKey> selectionKeys) {
//it is possible that the iteration order over selectionKeys is the same every invocation.
//this may cause starvation of reads when memory is low. to address this we shuffle the keys if memory is low.
if (!outOfMemory && memoryPool.availableMemory() < lowMemThreshold) {
List<SelectionKey> shuffledKeys = new ArrayList<>(selectionKeys);
Collections.shuffle(shuffledKeys);
return shuffledKeys;
} else {
return selectionKeys;
}
}
|
handle any ready I/O on a set of selection keys
@param selectionKeys set of keys to handle
@param isImmediatelyConnected true if running over a set of keys for just-connected sockets
@param currentTimeNanos time at which set of keys was determined
|
java
|
clients/src/main/java/org/apache/kafka/common/network/Selector.java
| 665
|
[
"selectionKeys"
] | true
| 3
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
strftime
|
def strftime(self, date_format) -> Index:
"""
Convert to Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format
doc <https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior>`__.
Formats supported by the C `strftime` API but not by the python string format
doc (such as `"%R"`, `"%r"`) are not officially supported and should be
preferably replaced with their supported equivalents (such as `"%H:%M"`,
`"%I:%M:%S %p"`).
Note that `PeriodIndex` support additional directives, detailed in
`Period.strftime`.
Parameters
----------
date_format : str
Date format string (e.g. "%Y-%m-%d").
Returns
-------
ndarray[object]
NumPy ndarray of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Timestamp.strftime : Format a single Timestamp.
Period.strftime : Format a single Period.
Examples
--------
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"), periods=3, freq="s")
>>> rng.strftime("%B %d, %Y, %r")
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='str')
"""
arr = self._data.strftime(date_format)
return Index(arr, name=self.name, dtype=arr.dtype)
|
Convert to Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format
doc <https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior>`__.
Formats supported by the C `strftime` API but not by the python string format
doc (such as `"%R"`, `"%r"`) are not officially supported and should be
preferably replaced with their supported equivalents (such as `"%H:%M"`,
`"%I:%M:%S %p"`).
Note that `PeriodIndex` support additional directives, detailed in
`Period.strftime`.
Parameters
----------
date_format : str
Date format string (e.g. "%Y-%m-%d").
Returns
-------
ndarray[object]
NumPy ndarray of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Timestamp.strftime : Format a single Timestamp.
Period.strftime : Format a single Period.
Examples
--------
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"), periods=3, freq="s")
>>> rng.strftime("%B %d, %Y, %r")
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='str')
|
python
|
pandas/core/indexes/datetimes.py
| 282
|
[
"self",
"date_format"
] |
Index
| true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getShortCommitId
|
public @Nullable String getShortCommitId() {
String shortId = get("commit.id.abbrev");
if (shortId != null) {
return shortId;
}
String id = getCommitId();
if (id == null) {
return null;
}
return (id.length() > 7) ? id.substring(0, 7) : id;
}
|
Return the abbreviated id of the commit or {@code null}.
@return the short commit id
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/info/GitProperties.java
| 71
|
[] |
String
| true
| 4
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
reformat
|
public String reformat(final String input) throws ParseException {
return format(parseObject(input));
}
|
Utility method to parse and then reformat a String.
@param input String to reformat
@return A reformatted String
@throws ParseException thrown by parseObject(String) call
|
java
|
src/main/java/org/apache/commons/lang3/text/CompositeFormat.java
| 114
|
[
"input"
] |
String
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
resolveAndInvoke
|
public void resolveAndInvoke(RegisteredBean registeredBean, Object instance) {
Assert.notNull(registeredBean, "'registeredBean' must not be null");
Assert.notNull(instance, "'instance' must not be null");
Method method = getMethod(registeredBean);
AutowiredArguments resolved = resolveArguments(registeredBean, method);
if (resolved != null) {
ReflectionUtils.makeAccessible(method);
ReflectionUtils.invokeMethod(method, instance, resolved.toArray());
}
}
|
Resolve the method arguments for the specified registered bean and invoke
the method using reflection.
@param registeredBean the registered bean
@param instance the bean instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/AutowiredMethodArgumentsResolver.java
| 145
|
[
"registeredBean",
"instance"
] |
void
| true
| 2
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getMapData
|
function getMapData(map, key) {
var data = map.__data__;
return isKeyable(key)
? data[typeof key == 'string' ? 'string' : 'hash']
: data.map;
}
|
Gets the data for `map`.
@private
@param {Object} map The map to query.
@param {string} key The reference key.
@returns {*} Returns the map data.
|
javascript
|
lodash.js
| 6,040
|
[
"map",
"key"
] | false
| 3
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
remove
|
@Nullable Object remove(String name);
|
Remove the object with the given {@code name} from the underlying scope.
<p>Returns {@code null} if no object was found; otherwise
returns the removed {@code Object}.
<p>Note that an implementation should also remove a registered destruction
callback for the specified object, if any. It does, however, <i>not</i>
need to <i>execute</i> a registered destruction callback in this case,
since the object will be destroyed by the caller (if appropriate).
<p><b>Note: This is an optional operation.</b> Implementations may throw
{@link UnsupportedOperationException} if they do not support explicitly
removing an object.
@param name the name of the object to remove
@return the removed object, or {@code null} if no object was present
@throws IllegalStateException if the underlying scope is not currently active
@see #registerDestructionCallback
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/Scope.java
| 93
|
[
"name"
] |
Object
| true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
dtypes
|
def dtypes(self) -> Series:
"""
Return the dtype object of each child field of the struct.
Returns
-------
pandas.Series
The data type of each child field.
See Also
--------
Series.dtype: Return the dtype object of the underlying data.
Examples
--------
>>> import pyarrow as pa
>>> s = pd.Series(
... [
... {"version": 1, "project": "pandas"},
... {"version": 2, "project": "pandas"},
... {"version": 1, "project": "numpy"},
... ],
... dtype=pd.ArrowDtype(
... pa.struct([("version", pa.int64()), ("project", pa.string())])
... ),
... )
>>> s.struct.dtypes
version int64[pyarrow]
project string[pyarrow]
dtype: object
"""
from pandas import (
Index,
Series,
)
pa_type = self._data.dtype.pyarrow_dtype
types = [ArrowDtype(struct.type) for struct in pa_type]
names = [struct.name for struct in pa_type]
return Series(types, index=Index(names))
|
Return the dtype object of each child field of the struct.
Returns
-------
pandas.Series
The data type of each child field.
See Also
--------
Series.dtype: Return the dtype object of the underlying data.
Examples
--------
>>> import pyarrow as pa
>>> s = pd.Series(
... [
... {"version": 1, "project": "pandas"},
... {"version": 2, "project": "pandas"},
... {"version": 1, "project": "numpy"},
... ],
... dtype=pd.ArrowDtype(
... pa.struct([("version", pa.int64()), ("project", pa.string())])
... ),
... )
>>> s.struct.dtypes
version int64[pyarrow]
project string[pyarrow]
dtype: object
|
python
|
pandas/core/arrays/arrow/accessors.py
| 260
|
[
"self"
] |
Series
| true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
getObjectFromFactoryBean
|
protected Object getObjectFromFactoryBean(FactoryBean<?> factory, @Nullable Class<?> requiredType,
String beanName, boolean shouldPostProcess) {
if (factory.isSingleton() && containsSingleton(beanName)) {
Boolean lockFlag = isCurrentThreadAllowedToHoldSingletonLock();
boolean locked;
if (lockFlag == null) {
this.singletonLock.lock();
locked = true;
}
else {
locked = (lockFlag && this.singletonLock.tryLock());
}
try {
if (factory instanceof SmartFactoryBean<?>) {
// A SmartFactoryBean may return multiple object types -> do not cache.
// Also, a SmartFactoryBean needs to be thread-safe -> no synchronization necessary.
Object object = doGetObjectFromFactoryBean(factory, requiredType, beanName);
if (shouldPostProcess) {
object = postProcessObjectFromSingletonFactoryBean(object, beanName, locked);
}
return object;
}
else {
// Defensively synchronize against non-thread-safe FactoryBean.getObject() implementations,
// potentially to be called from a background thread while the main thread currently calls
// the same getObject() method within the singleton lock.
synchronized (factory) {
Object object = this.factoryBeanObjectCache.get(beanName);
if (object == null) {
object = doGetObjectFromFactoryBean(factory, requiredType, beanName);
// Only post-process and store if not put there already during getObject() call above
// (for example, because of circular reference processing triggered by custom getBean calls)
Object alreadyThere = this.factoryBeanObjectCache.get(beanName);
if (alreadyThere != null) {
object = alreadyThere;
}
else {
if (shouldPostProcess) {
object = postProcessObjectFromSingletonFactoryBean(object, beanName, locked);
}
if (containsSingleton(beanName)) {
this.factoryBeanObjectCache.put(beanName, object);
}
}
}
return object;
}
}
}
finally {
if (locked) {
this.singletonLock.unlock();
}
}
}
else {
Object object = doGetObjectFromFactoryBean(factory, requiredType, beanName);
if (shouldPostProcess) {
try {
object = postProcessObjectFromFactoryBean(object, beanName);
}
catch (Throwable ex) {
throw new BeanCreationException(beanName, "Post-processing of FactoryBean's object failed", ex);
}
}
return object;
}
}
|
Obtain an object to expose from the given FactoryBean.
@param factory the FactoryBean instance
@param beanName the name of the bean
@param shouldPostProcess whether the bean is subject to post-processing
@return the object obtained from the FactoryBean
@throws BeanCreationException if FactoryBean object creation failed
@see org.springframework.beans.factory.FactoryBean#getObject()
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/FactoryBeanRegistrySupport.java
| 119
|
[
"factory",
"requiredType",
"beanName",
"shouldPostProcess"
] |
Object
| true
| 14
| 7.52
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
maybe_mark_dynamic
|
def maybe_mark_dynamic(t: Any, index: Union[int, list[Any], tuple[Any]]) -> None:
"""
Mark a tensor as having a dynamic dim, but don't enforce it (i.e., if this
dimension ends up getting specialized, don't error).
"""
if is_traceable_wrapper_subclass(t):
# default behavior: mirror maybe_mark_dynamic() on all inner tensors with same dim as t
# TODO: Make this configurable via a supported public API
_apply_func_to_inner_tensors_of_same_dim(maybe_mark_dynamic, t, index)
if isinstance(index, int):
if not hasattr(t, "_dynamo_weak_dynamic_indices"):
# pyrefly: ignore [missing-attribute]
t._dynamo_weak_dynamic_indices = set()
# TODO(voz): Should we bounds check?
# pyrefly: ignore [missing-attribute]
t._dynamo_weak_dynamic_indices.add(index)
return
assert isinstance(index, (list, tuple))
for i in index:
maybe_mark_dynamic(t, i)
|
Mark a tensor as having a dynamic dim, but don't enforce it (i.e., if this
dimension ends up getting specialized, don't error).
|
python
|
torch/_dynamo/decorators.py
| 723
|
[
"t",
"index"
] |
None
| true
| 5
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
explode
|
def explode(self, ignore_index: bool = False) -> Series:
"""
Transform each element of a list-like to a row.
Parameters
----------
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
Returns
-------
Series
Exploded lists to rows; index will be duplicated for these rows.
See Also
--------
Series.str.split : Split string values on specified separator.
Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex
to produce DataFrame.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in an np.nan for that row. In addition, the ordering of elements in
the output will be non-deterministic when exploding sets.
Reference :ref:`the user guide <reshaping.explode>` for more examples.
Examples
--------
>>> s = pd.Series([[1, 2, 3], "foo", [], [3, 4]])
>>> s
0 [1, 2, 3]
1 foo
2 []
3 [3, 4]
dtype: object
>>> s.explode()
0 1
0 2
0 3
1 foo
2 NaN
3 3
3 4
dtype: object
"""
if isinstance(self.dtype, ExtensionDtype):
values, counts = self._values._explode()
elif len(self) and is_object_dtype(self.dtype):
values, counts = reshape.explode(np.asarray(self._values))
else:
result = self.copy()
return result.reset_index(drop=True) if ignore_index else result
if ignore_index:
index: Index = default_index(len(values))
else:
index = self.index.repeat(counts)
return self._constructor(values, index=index, name=self.name, copy=False)
|
Transform each element of a list-like to a row.
Parameters
----------
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
Returns
-------
Series
Exploded lists to rows; index will be duplicated for these rows.
See Also
--------
Series.str.split : Split string values on specified separator.
Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex
to produce DataFrame.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in an np.nan for that row. In addition, the ordering of elements in
the output will be non-deterministic when exploding sets.
Reference :ref:`the user guide <reshaping.explode>` for more examples.
Examples
--------
>>> s = pd.Series([[1, 2, 3], "foo", [], [3, 4]])
>>> s
0 [1, 2, 3]
1 foo
2 []
3 [3, 4]
dtype: object
>>> s.explode()
0 1
0 2
0 3
1 foo
2 NaN
3 3
3 4
dtype: object
|
python
|
pandas/core/series.py
| 4,278
|
[
"self",
"ignore_index"
] |
Series
| true
| 8
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
putmask_without_repeat
|
def putmask_without_repeat(
values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any
) -> None:
"""
np.putmask will truncate or repeat if `new` is a listlike with
len(new) != len(values). We require an exact match.
Parameters
----------
values : np.ndarray
mask : np.ndarray[bool]
new : Any
"""
if getattr(new, "ndim", 0) >= 1:
new = new.astype(values.dtype, copy=False)
# TODO: this prob needs some better checking for 2D cases
nlocs = mask.sum()
if nlocs > 0 and is_list_like(new) and getattr(new, "ndim", 1) == 1:
shape = np.shape(new)
# np.shape compat for if setitem_datetimelike_compat
# changed arraylike to list e.g. test_where_dt64_2d
if nlocs == shape[-1]:
# GH#30567
# If length of ``new`` is less than the length of ``values``,
# `np.putmask` would first repeat the ``new`` array and then
# assign the masked values hence produces incorrect result.
# `np.place` on the other hand uses the ``new`` values at it is
# to place in the masked locations of ``values``
np.place(values, mask, new)
# i.e. values[mask] = new
elif mask.shape[-1] == shape[-1] or shape[-1] == 1:
np.putmask(values, mask, new)
else:
raise ValueError("cannot assign mismatch length to masked array")
else:
np.putmask(values, mask, new)
|
np.putmask will truncate or repeat if `new` is a listlike with
len(new) != len(values). We require an exact match.
Parameters
----------
values : np.ndarray
mask : np.ndarray[bool]
new : Any
|
python
|
pandas/core/array_algos/putmask.py
| 63
|
[
"values",
"mask",
"new"
] |
None
| true
| 10
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
clear
|
public FluentBitSet clear(final int fromIndex, final int toIndex) {
bitSet.clear(fromIndex, toIndex);
return this;
}
|
Sets the bits from the specified {@code fromIndex} (inclusive) to the specified {@code toIndex} (exclusive) to
{@code false}.
@param fromIndex index of the first bit to be cleared.
@param toIndex index after the last bit to be cleared.
@throws IndexOutOfBoundsException if {@code fromIndex} is negative, or {@code toIndex} is negative, or
{@code fromIndex} is larger than {@code toIndex}.
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/util/FluentBitSet.java
| 179
|
[
"fromIndex",
"toIndex"
] |
FluentBitSet
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
createWithExpectedSize
|
public static <K extends @Nullable Object, V extends @Nullable Object>
CompactHashMap<K, V> createWithExpectedSize(int expectedSize) {
return new CompactHashMap<>(expectedSize);
}
|
Creates a {@code CompactHashMap} instance, with a high enough "initial capacity" that it
<i>should</i> hold {@code expectedSize} elements without growth.
@param expectedSize the number of elements you expect to add to the returned set
@return a new, empty {@code CompactHashMap} with enough capacity to hold {@code expectedSize}
elements without resizing
@throws IllegalArgumentException if {@code expectedSize} is negative
|
java
|
android/guava/src/com/google/common/collect/CompactHashMap.java
| 107
|
[
"expectedSize"
] | true
| 1
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
size
|
public long size() throws IOException {
Optional<Long> sizeIfKnown = sizeIfKnown();
if (sizeIfKnown.isPresent()) {
return sizeIfKnown.get();
}
Closer closer = Closer.create();
try {
InputStream in = closer.register(openStream());
return countBySkipping(in);
} catch (IOException e) {
// skip may not be supported... at any rate, try reading
} finally {
closer.close();
}
closer = Closer.create();
try {
InputStream in = closer.register(openStream());
return ByteStreams.exhaust(in);
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
|
Returns the size of this source in bytes, even if doing so requires opening and traversing an
entire stream. To avoid a potentially expensive operation, see {@link #sizeIfKnown}.
<p>The default implementation calls {@link #sizeIfKnown} and returns the value if present. If
absent, it will fall back to a heavyweight operation that will open a stream, read (or {@link
InputStream#skip(long) skip}, if possible) to the end of the stream and return the total number
of bytes that were read.
<p>Note that for some sources that implement {@link #sizeIfKnown} to provide a more efficient
implementation, it is <i>possible</i> that this method will return a different number of bytes
than would be returned by reading all of the bytes (for example, some special files may return
a size of 0 despite actually having content when read).
<p>In either case, for mutable sources such as files, a subsequent read may return a different
number of bytes if the contents are changed.
@throws IOException if an I/O error occurs while reading the size of this source
|
java
|
android/guava/src/com/google/common/io/ByteSource.java
| 204
|
[] | true
| 4
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
create
|
static Archive create(File target) throws Exception {
if (!target.exists()) {
throw new IllegalStateException("Unable to determine code source archive from " + target);
}
return (target.isDirectory() ? new ExplodedArchive(target) : new JarFileArchive(target));
}
|
Factory method to create an {@link Archive} from the given {@link File} target.
@param target a target {@link File} used to create the archive. May be a directory
or a jar file.
@return a new {@link Archive} instance.
@throws Exception if the archive cannot be created
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/Archive.java
| 124
|
[
"target"
] |
Archive
| true
| 3
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
isTypeMemberStart
|
function isTypeMemberStart(): boolean {
// Return true if we have the start of a signature member
if (
token() === SyntaxKind.OpenParenToken ||
token() === SyntaxKind.LessThanToken ||
token() === SyntaxKind.GetKeyword ||
token() === SyntaxKind.SetKeyword
) {
return true;
}
let idToken = false;
// Eat up all modifiers, but hold on to the last one in case it is actually an identifier
while (isModifierKind(token())) {
idToken = true;
nextToken();
}
// Index signatures and computed property names are type members
if (token() === SyntaxKind.OpenBracketToken) {
return true;
}
// Try to get the first property-like token following all modifiers
if (isLiteralPropertyName()) {
idToken = true;
nextToken();
}
// If we were able to get any potential identifier, check that it is
// the start of a member declaration
if (idToken) {
return token() === SyntaxKind.OpenParenToken ||
token() === SyntaxKind.LessThanToken ||
token() === SyntaxKind.QuestionToken ||
token() === SyntaxKind.ColonToken ||
token() === SyntaxKind.CommaToken ||
canParseSemicolon();
}
return false;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 4,292
|
[] | true
| 14
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
distance
|
public static int distance(final Class<?> child, final Class<?> parent) {
if (child == null || parent == null) {
return -1;
}
if (child.equals(parent)) {
return 0;
}
final Class<?> cParent = child.getSuperclass();
int d = BooleanUtils.toInteger(parent.equals(cParent));
if (d == 1) {
return d;
}
d += distance(cParent, parent);
return d > 0 ? d + 1 : -1;
}
|
Returns the number of inheritance hops between two classes.
@param child the child class, may be {@code null}
@param parent the parent class, may be {@code null}
@return the number of generations between the child and parent; 0 if the same class;
-1 if the classes are not related as child and parent (includes where either class is null)
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/reflect/InheritanceUtils.java
| 37
|
[
"child",
"parent"
] | true
| 6
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
createAsyncCaffeineCache
|
protected AsyncCache<Object, Object> createAsyncCaffeineCache(String name) {
return (this.cacheLoader != null ? this.cacheBuilder.buildAsync(this.cacheLoader) :
this.cacheBuilder.buildAsync());
}
|
Build a common Caffeine AsyncCache instance for the specified cache name,
using the common Caffeine configuration specified on this cache manager.
@param name the name of the cache
@return the Caffeine AsyncCache instance
@since 6.1
@see #createCaffeineCache
|
java
|
spring-context-support/src/main/java/org/springframework/cache/caffeine/CaffeineCacheManager.java
| 405
|
[
"name"
] | true
| 2
| 7.36
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
parse_s3_url
|
def parse_s3_url(s3url: str) -> tuple[str, str]:
"""
Parse the S3 Url into a bucket name and key.
See https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html
for valid url formats.
:param s3url: The S3 Url to parse.
:return: the parsed bucket name and key
"""
valid_s3_format = "S3://bucket-name/key-name"
valid_s3_virtual_hosted_format = "https://bucket-name.s3.region-code.amazonaws.com/key-name"
format = s3url.split("//")
if re.match(r"s3[na]?:", format[0], re.IGNORECASE):
parsed_url = urlsplit(s3url, allow_fragments=False)
if not parsed_url.netloc:
raise S3HookUriParseFailure(
"Please provide a bucket name using a valid format of the form: "
f'{valid_s3_format} or {valid_s3_virtual_hosted_format} but provided: "{s3url}"'
)
bucket_name = parsed_url.netloc
key = parsed_url.path.lstrip("/")
elif format[0] == "https:":
temp_split = format[1].split(".")
if temp_split[0] == "s3":
# "https://s3.region-code.amazonaws.com/bucket-name/key-name"
_, bucket_name, key = format[1].split("/", 2)
elif temp_split[1] == "s3":
# "https://bucket-name.s3.region-code.amazonaws.com/key-name"
bucket_name = temp_split[0]
key = format[1].partition("/")[-1]
else:
raise S3HookUriParseFailure(
"Please provide a bucket name using a valid virtually hosted format which should "
f'be of the form: {valid_s3_virtual_hosted_format} but provided: "{s3url}"'
)
else:
raise S3HookUriParseFailure(
"Please provide a bucket name using a valid format of the form: "
f'{valid_s3_format} or {valid_s3_virtual_hosted_format} but provided: "{s3url}"'
)
return bucket_name, key
|
Parse the S3 Url into a bucket name and key.
See https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-bucket-intro.html
for valid url formats.
:param s3url: The S3 Url to parse.
:return: the parsed bucket name and key
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 223
|
[
"s3url"
] |
tuple[str, str]
| true
| 8
| 7.76
|
apache/airflow
| 43,597
|
sphinx
| false
|
predict
|
def predict(self, X, copy=True):
"""Predict targets of given samples.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples.
copy : bool, default=True
Whether to copy `X` or perform in-place normalization.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_targets)
Returns predicted values.
Notes
-----
This call requires the estimation of a matrix of shape
`(n_features, n_targets)`, which may be an issue in high dimensional
space.
"""
check_is_fitted(self)
X = validate_data(self, X, copy=copy, dtype=FLOAT_DTYPES, reset=False)
# Only center X but do not scale it since the coefficients are already scaled
X -= self._x_mean
y_pred = X @ self.coef_.T + self.intercept_
return y_pred.ravel() if self._predict_1d else y_pred
|
Predict targets of given samples.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples.
copy : bool, default=True
Whether to copy `X` or perform in-place normalization.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_targets)
Returns predicted values.
Notes
-----
This call requires the estimation of a matrix of shape
`(n_features, n_targets)`, which may be an issue in high dimensional
space.
|
python
|
sklearn/cross_decomposition/_pls.py
| 451
|
[
"self",
"X",
"copy"
] | false
| 2
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
collectElementsAnnotatedOrMetaAnnotatedWith
|
private boolean collectElementsAnnotatedOrMetaAnnotatedWith(TypeElement annotationType, LinkedList<Element> stack) {
Element element = stack.peekLast();
for (AnnotationMirror annotation : this.elements.getAllAnnotationMirrors(element)) {
Element annotationElement = annotation.getAnnotationType().asElement();
if (!stack.contains(annotationElement)) {
stack.addLast(annotationElement);
if (annotationElement.equals(annotationType)) {
return true;
}
if (!collectElementsAnnotatedOrMetaAnnotatedWith(annotationType, stack)) {
stack.removeLast();
}
}
}
return false;
}
|
Collect the annotations that are annotated or meta-annotated with the specified
{@link TypeElement annotation}.
@param element the element to inspect
@param annotationType the annotation to discover
@return the annotations that are annotated or meta-annotated with this annotation
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataGenerationEnvironment.java
| 295
|
[
"annotationType",
"stack"
] | true
| 4
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
multiply
|
def multiply(a, i):
"""
Return (a * i), that is string multiple concatenation,
element-wise.
Values in ``i`` of less than 0 are treated as 0 (which yields an
empty string).
Parameters
----------
a : array_like, with `np.bytes_` or `np.str_` dtype
i : array_like, with any integer dtype
Returns
-------
out : ndarray
Output array of str or unicode, depending on input types
Notes
-----
This is a thin wrapper around np.strings.multiply that raises
`ValueError` when ``i`` is not an integer. It only
exists for backwards-compatibility.
Examples
--------
>>> import numpy as np
>>> a = np.array(["a", "b", "c"])
>>> np.strings.multiply(a, 3)
array(['aaa', 'bbb', 'ccc'], dtype='<U3')
>>> i = np.array([1, 2, 3])
>>> np.strings.multiply(a, i)
array(['a', 'bb', 'ccc'], dtype='<U3')
>>> np.strings.multiply(np.array(['a']), i)
array(['a', 'aa', 'aaa'], dtype='<U3')
>>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3))
>>> np.strings.multiply(a, 3)
array([['aaa', 'bbb', 'ccc'],
['ddd', 'eee', 'fff']], dtype='<U3')
>>> np.strings.multiply(a, i)
array([['a', 'bb', 'ccc'],
['d', 'ee', 'fff']], dtype='<U3')
"""
try:
return strings_multiply(a, i)
except TypeError:
raise ValueError("Can only multiply by integers")
|
Return (a * i), that is string multiple concatenation,
element-wise.
Values in ``i`` of less than 0 are treated as 0 (which yields an
empty string).
Parameters
----------
a : array_like, with `np.bytes_` or `np.str_` dtype
i : array_like, with any integer dtype
Returns
-------
out : ndarray
Output array of str or unicode, depending on input types
Notes
-----
This is a thin wrapper around np.strings.multiply that raises
`ValueError` when ``i`` is not an integer. It only
exists for backwards-compatibility.
Examples
--------
>>> import numpy as np
>>> a = np.array(["a", "b", "c"])
>>> np.strings.multiply(a, 3)
array(['aaa', 'bbb', 'ccc'], dtype='<U3')
>>> i = np.array([1, 2, 3])
>>> np.strings.multiply(a, i)
array(['a', 'bb', 'ccc'], dtype='<U3')
>>> np.strings.multiply(np.array(['a']), i)
array(['a', 'aa', 'aaa'], dtype='<U3')
>>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3))
>>> np.strings.multiply(a, 3)
array([['aaa', 'bbb', 'ccc'],
['ddd', 'eee', 'fff']], dtype='<U3')
>>> np.strings.multiply(a, i)
array([['a', 'bb', 'ccc'],
['d', 'ee', 'fff']], dtype='<U3')
|
python
|
numpy/_core/defchararray.py
| 267
|
[
"a",
"i"
] | false
| 1
| 6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
isCancelledError
|
function isCancelledError(error: unknown): error is {
cancelled: boolean;
} {
return typeof error === 'object' && error !== null && 'cancelled' in error && error.cancelled === true;
}
|
Checks if an error is a cancelled request error.
Used to avoid logging cancelled request errors.
@param {unknown} error - Error to check
@returns {boolean} True if the error is a cancelled request error
|
typescript
|
packages/grafana-prometheus/src/language_provider.ts
| 401
|
[
"error"
] | false
| 4
| 6.24
|
grafana/grafana
| 71,362
|
jsdoc
| false
|
|
applyAsDouble
|
double applyAsDouble(double operand) throws E;
|
Applies this operator to the given operand.
@param operand the operand
@return the operator result
@throws E Thrown when a consumer fails.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableDoubleUnaryOperator.java
| 78
|
[
"operand"
] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
loadBuiltinWithHooks
|
function loadBuiltinWithHooks(id, url, format) {
if (loadHooks.length) {
url ??= `node:${id}`;
// TODO(joyeecheung): do we really want to invoke the load hook for the builtins?
const loadResult = loadWithHooks(url, format || 'builtin', /* importAttributes */ undefined,
getCjsConditionsArray(), getDefaultLoad(url, id), validateLoadStrict);
if (loadResult.format && loadResult.format !== 'builtin') {
return undefined; // Format has been overridden, return undefined for the caller to continue loading.
}
}
// No hooks or the hooks have not overridden the format. Load it as a builtin module and return the
// exports.
const mod = loadBuiltinModule(id);
return mod.exports;
}
|
Load a specified builtin module, invoking load hooks if necessary.
@param {string} id The module ID (without the node: prefix)
@param {string} url The module URL (with the node: prefix)
@param {string} format Format from resolution.
@returns {any} If there are no load hooks or the load hooks do not override the format of the
builtin, load and return the exports of the builtin. Otherwise, return undefined.
|
javascript
|
lib/internal/modules/cjs/loader.js
| 1,174
|
[
"id",
"url",
"format"
] | false
| 5
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
cutlass_layout
|
def cutlass_layout(torch_layout: ir.Layout) -> "Optional[cutlass_lib.LayoutType]": # type: ignore[name-defined] # noqa: F821
"""
Converts an ir.Layout instance into the corresponding cutlass_library.LayoutType enum value
(RowMajor, ColumnMajor, or None if no matching value is found ).
Args:
torch_layout (ir.Layout): The layout that needs to be looked up.
Returns:
cutlass_lib.LayoutType: The converted layout corresponding to the `torch_layout` or None if no matching
value is found.
"""
assert cutlass_utils.try_import_cutlass()
import cutlass_library.library as cutlass_lib
if V.graph.sizevars.statically_known_equals(torch_layout.stride[-1], 1):
return cutlass_lib.LayoutType.RowMajor
elif V.graph.sizevars.statically_known_equals(torch_layout.stride[-2], 1):
return cutlass_lib.LayoutType.ColumnMajor
else:
return None
|
Converts an ir.Layout instance into the corresponding cutlass_library.LayoutType enum value
(RowMajor, ColumnMajor, or None if no matching value is found ).
Args:
torch_layout (ir.Layout): The layout that needs to be looked up.
Returns:
cutlass_lib.LayoutType: The converted layout corresponding to the `torch_layout` or None if no matching
value is found.
|
python
|
torch/_inductor/codegen/cuda/gemm_template.py
| 645
|
[
"torch_layout"
] |
"Optional[cutlass_lib.LayoutType]"
| true
| 4
| 7.6
|
pytorch/pytorch
| 96,034
|
google
| false
|
tryAcquire
|
public boolean tryAcquire(int permits) {
return tryAcquire(permits, 0, MICROSECONDS);
}
|
Acquires permits from this {@link RateLimiter} if it can be acquired immediately without delay.
<p>This method is equivalent to {@code tryAcquire(permits, 0, anyUnit)}.
@param permits the number of permits to acquire
@return {@code true} if the permits were acquired, {@code false} otherwise
@throws IllegalArgumentException if the requested number of permits is negative or zero
@since 14.0
|
java
|
android/guava/src/com/google/common/util/concurrent/RateLimiter.java
| 367
|
[
"permits"
] | true
| 1
| 6.16
|
google/guava
| 51,352
|
javadoc
| false
|
|
equalsIncludingNaN
|
private static boolean equalsIncludingNaN(double a, double b) {
return (a == b) || (Double.isNaN(a) && Double.isNaN(b));
}
|
Value-based equality for exponential histograms.
@param a the first histogram (can be null)
@param b the second histogram (can be null)
@return true, if both histograms are equal
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogram.java
| 183
|
[
"a",
"b"
] | true
| 3
| 8
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
register
|
public static void register(BeanDefinitionRegistry registry) {
Assert.notNull(registry, "'registry' must not be null");
if (!registry.containsBeanDefinition(BEAN_NAME)) {
BeanDefinition definition = BeanDefinitionBuilder
.rootBeanDefinition(ConfigurationPropertiesBindingPostProcessor.class)
.getBeanDefinition();
definition.setRole(BeanDefinition.ROLE_INFRASTRUCTURE);
registry.registerBeanDefinition(BEAN_NAME, definition);
}
ConfigurationPropertiesBinder.register(registry);
}
|
Register a {@link ConfigurationPropertiesBindingPostProcessor} bean if one is not
already registered.
@param registry the bean definition registry
@since 2.2.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/ConfigurationPropertiesBindingPostProcessor.java
| 114
|
[
"registry"
] |
void
| true
| 2
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
initializeDefaultConditions
|
function initializeDefaultConditions() {
const userConditions = getOptionValue('--conditions');
const noAddons = getOptionValue('--no-addons');
const addonConditions = noAddons ? [] : ['node-addons'];
const moduleConditions = getOptionValue('--require-module') ? ['module-sync'] : [];
defaultConditions = ObjectFreeze([
'node',
'import',
...moduleConditions,
...addonConditions,
...userConditions,
]);
defaultConditionsSet = new SafeSet(defaultConditions);
}
|
Initializes the default conditions for ESM module loading.
This function is called during pre-execution, before any user code is run.
@returns {void}
|
javascript
|
lib/internal/modules/esm/utils.js
| 74
|
[] | false
| 3
| 7.28
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
usesWriteEntries
|
boolean usesWriteEntries() {
return usesWriteQueue() || recordsWrite();
}
|
Creates a new, empty map with the specified strategy, initial capacity and concurrency level.
|
java
|
android/guava/src/com/google/common/cache/LocalCache.java
| 364
|
[] | true
| 2
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
addToCentral
|
private long addToCentral(List<DataBlock> parts, ZipCentralDirectoryFileHeaderRecord originalRecord,
long originalRecordPos, DataBlock name, int offsetToLocalHeader) throws IOException {
ZipCentralDirectoryFileHeaderRecord record = originalRecord.withFileNameLength((short) (name.size() & 0xFFFF))
.withOffsetToLocalHeader(offsetToLocalHeader);
int originalExtraFieldLength = Short.toUnsignedInt(originalRecord.extraFieldLength());
int originalFileCommentLength = Short.toUnsignedInt(originalRecord.fileCommentLength());
int extraFieldAndCommentSize = originalExtraFieldLength + originalFileCommentLength;
parts.add(new ByteArrayDataBlock(record.asByteArray()));
parts.add(name);
if (extraFieldAndCommentSize > 0) {
parts.add(new DataPart(originalRecordPos + originalRecord.size() - extraFieldAndCommentSize,
extraFieldAndCommentSize));
}
return record.size();
}
|
Create a new {@link VirtualZipDataBlock} for the given entries.
@param data the source zip data
@param nameOffsetLookups the name offsets to apply
@param centralRecords the records that should be copied to the virtual zip
@param centralRecordPositions the record positions in the data block.
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/VirtualZipDataBlock.java
| 73
|
[
"parts",
"originalRecord",
"originalRecordPos",
"name",
"offsetToLocalHeader"
] | true
| 2
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
bindExportAssignment
|
function bindExportAssignment(node: ExportAssignment) {
if (!container.symbol || !container.symbol.exports) {
// Incorrect export assignment in some sort of block construct
bindAnonymousDeclaration(node, SymbolFlags.Value, getDeclarationName(node)!);
}
else {
const flags = exportAssignmentIsAlias(node)
// An export default clause with an EntityNameExpression or a class expression exports all meanings of that identifier or expression;
? SymbolFlags.Alias
// An export default clause with any other expression exports a value
: SymbolFlags.Property;
// If there is an `export default x;` alias declaration, can't `export default` anything else.
// (In contrast, you can still have `export default function f() {}` and `export default interface I {}`.)
const symbol = declareSymbol(container.symbol.exports, container.symbol, node, flags, SymbolFlags.All);
if (node.isExportEquals) {
// Will be an error later, since the module already has other exports. Just make sure this has a valueDeclaration set.
setValueDeclaration(symbol, node);
}
}
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 3,125
|
[
"node"
] | false
| 6
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
readStaticField
|
public static Object readStaticField(final Field field) throws IllegalAccessException {
return readStaticField(field, false);
}
|
Reads an accessible {@code static} {@link Field}.
@param field
to read.
@return the field value.
@throws NullPointerException
if the field is {@code null}.
@throws IllegalArgumentException
if the field is not {@code static}.
@throws IllegalAccessException
if the field is not accessible
@throws SecurityException if an underlying accessible object's method denies the request.
@see SecurityManager#checkPermission
|
java
|
src/main/java/org/apache/commons/lang3/reflect/FieldUtils.java
| 520
|
[
"field"
] |
Object
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
detectBrokenLz4Version
|
static void detectBrokenLz4Version() {
byte[] source = new byte[]{1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3};
final LZ4Compressor compressor = LZ4Factory.fastestInstance().fastCompressor();
final byte[] compressed = new byte[compressor.maxCompressedLength(source.length)];
final int compressedLength = compressor.compress(source, 0, source.length, compressed, 0,
compressed.length);
// allocate an array-backed ByteBuffer with non-zero array-offset containing the compressed data
// a buggy decompressor will read the data from the beginning of the underlying array instead of
// the beginning of the ByteBuffer, failing to decompress the invalid data.
final byte[] zeroes = {0, 0, 0, 0, 0};
ByteBuffer nonZeroOffsetBuffer = ByteBuffer
.allocate(zeroes.length + compressed.length) // allocates the backing array with extra space to offset the data
.put(zeroes) // prepend invalid bytes (zeros) before the compressed data in the array
.slice() // create a new ByteBuffer sharing the underlying array, offset to start on the compressed data
.put(compressed); // write the compressed data at the beginning of this new buffer
ByteBuffer dest = ByteBuffer.allocate(source.length);
try {
DECOMPRESSOR.decompress(nonZeroOffsetBuffer, 0, compressedLength, dest, 0, source.length);
} catch (Exception e) {
throw new RuntimeException("Kafka has detected a buggy lz4-java library (< 1.4.x) on the classpath."
+ " If you are using Kafka client libraries, make sure your application does not"
+ " accidentally override the version provided by Kafka or include multiple versions"
+ " of the library on the classpath. The lz4-java version on the classpath should"
+ " match the version the Kafka client libraries depend on. Adding -verbose:class"
+ " to your JVM arguments may help understand which lz4-java version is getting loaded.", e);
}
}
|
Checks whether the version of lz4 on the classpath has the fix for reading from ByteBuffers with
non-zero array offsets (see https://github.com/lz4/lz4-java/pull/65)
|
java
|
clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockInputStream.java
| 283
|
[] |
void
| true
| 2
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
hashCode
|
@Override
public int hashCode() {
Integer h = this.hash;
if (h == null) {
int result = 31 + ((host == null) ? 0 : host.hashCode());
result = 31 * result + id;
result = 31 * result + port;
result = 31 * result + ((rack == null) ? 0 : rack.hashCode());
result = 31 * result + Objects.hashCode(isFenced);
this.hash = result;
return result;
} else {
return h;
}
}
|
Returns whether this node is fenced.
<p>
This applies to broker nodes only. For controller quorum nodes, this field
is not relevant and is defined to be {@code false}.
|
java
|
clients/src/main/java/org/apache/kafka/common/Node.java
| 126
|
[] | true
| 4
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
createParentDirectory
|
private void createParentDirectory(File file) {
File parent = file.getParentFile();
if (parent != null) {
parent.mkdirs();
}
}
|
Write the PID to the specified file.
@param file the PID file
@throws IllegalStateException if no PID is available.
@throws IOException if the file cannot be written
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/system/ApplicationPid.java
| 118
|
[
"file"
] |
void
| true
| 2
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
delete
|
public StrBuilder delete(final int startIndex, int endIndex) {
endIndex = validateRange(startIndex, endIndex);
final int len = endIndex - startIndex;
if (len > 0) {
deleteImpl(startIndex, endIndex, len);
}
return this;
}
|
Deletes the characters between the two specified indices.
@param startIndex the start index, inclusive, must be valid
@param endIndex the end index, exclusive, must be valid except
that if too large it is treated as end of string
@return {@code this} instance.
@throws IndexOutOfBoundsException if the index is invalid
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,663
|
[
"startIndex",
"endIndex"
] |
StrBuilder
| true
| 2
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
ipFloatByte
|
public static float ipFloatByte(float[] q, byte[] d) {
if (q.length != d.length) {
throw new IllegalArgumentException("vector dimensions incompatible: " + q.length + "!= " + d.length);
}
return IMPL.ipFloatByte(q, d);
}
|
Compute the inner product of two vectors, where the query vector is a float vector and the document vector is a byte vector.
@param q the query vector
@param d the document vector
@return the inner product of the two vectors
|
java
|
libs/simdvec/src/main/java/org/elasticsearch/simdvec/ESVectorUtil.java
| 114
|
[
"q",
"d"
] | true
| 2
| 8.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
deleteAll
|
public StrBuilder deleteAll(final char ch) {
for (int i = 0; i < size; i++) {
if (buffer[i] == ch) {
final int start = i;
while (++i < size) {
if (buffer[i] != ch) {
break;
}
}
final int len = i - start;
deleteImpl(start, i, len);
i -= len;
}
}
return this;
}
|
Deletes the character wherever it occurs in the builder.
@param ch the character to delete
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,678
|
[
"ch"
] |
StrBuilder
| true
| 5
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getFactoryMethodForGenerator
|
private @Nullable Method getFactoryMethodForGenerator() {
// Avoid unnecessary currentlyInvokedFactoryMethod exposure outside of full configuration classes.
if (this.lookup instanceof FactoryMethodLookup factoryMethodLookup &&
factoryMethodLookup.declaringClass.getName().contains(ClassUtils.CGLIB_CLASS_SEPARATOR)) {
return factoryMethodLookup.get();
}
return null;
}
|
Return a new {@link BeanInstanceSupplier} instance that uses
direct bean name injection shortcuts for specific parameters.
@param beanNames the bean names to use as shortcut (aligned with the
constructor or factory method parameters)
@return a new {@link BeanInstanceSupplier} instance that uses the
given shortcut bean names
@since 6.2
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanInstanceSupplier.java
| 218
|
[] |
Method
| true
| 3
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
toScaledBigDecimal
|
public static BigDecimal toScaledBigDecimal(final Double value, final int scale, final RoundingMode roundingMode) {
if (value == null) {
return BigDecimal.ZERO;
}
return toScaledBigDecimal(BigDecimal.valueOf(value), scale, roundingMode);
}
|
Converts a {@link Double} to a {@link BigDecimal} whose scale is the specified value with a {@link RoundingMode} applied. If the input {@code value} is
{@code null}, we simply return {@code BigDecimal.ZERO}.
@param value the {@link Double} to convert, may be null.
@param scale the number of digits to the right of the decimal point.
@param roundingMode a rounding behavior for numerical operations capable of discarding precision.
@return the scaled, with appropriate rounding, {@link BigDecimal}.
@since 3.8
|
java
|
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
| 1,676
|
[
"value",
"scale",
"roundingMode"
] |
BigDecimal
| true
| 2
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
putBytes
|
@CanIgnoreReturnValue
PrimitiveSink putBytes(byte[] bytes);
|
Puts an array of bytes into this sink.
@param bytes a byte array
@return this instance
|
java
|
android/guava/src/com/google/common/hash/PrimitiveSink.java
| 45
|
[
"bytes"
] |
PrimitiveSink
| true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
newLinkedBlockingQueue
|
@J2ktIncompatible
@GwtIncompatible // LinkedBlockingQueue
public static <E> LinkedBlockingQueue<E> newLinkedBlockingQueue(Iterable<? extends E> elements) {
if (elements instanceof Collection) {
return new LinkedBlockingQueue<>((Collection<? extends E>) elements);
}
LinkedBlockingQueue<E> queue = new LinkedBlockingQueue<>();
Iterables.addAll(queue, elements);
return queue;
}
|
Creates a {@code LinkedBlockingQueue} with a capacity of {@link Integer#MAX_VALUE}, containing
the elements of the specified iterable, in the order they are returned by the iterable's
iterator.
@param elements the elements that the queue should contain, in order
@return a new {@code LinkedBlockingQueue} containing those elements
|
java
|
android/guava/src/com/google/common/collect/Queues.java
| 186
|
[
"elements"
] | true
| 2
| 7.28
|
google/guava
| 51,352
|
javadoc
| false
|
|
find_option
|
def find_option(self, name, namespace=''):
"""Search for option by name.
Example:
>>> from proj.celery import app
>>> app.conf.find_option('disable_rate_limits')
('worker', 'prefetch_multiplier',
<Option: type->bool default->False>))
Arguments:
name (str): Name of option, cannot be partial.
namespace (str): Preferred name-space (``None`` by default).
Returns:
Tuple: of ``(namespace, key, type)``.
"""
return find(name, namespace)
|
Search for option by name.
Example:
>>> from proj.celery import app
>>> app.conf.find_option('disable_rate_limits')
('worker', 'prefetch_multiplier',
<Option: type->bool default->False>))
Arguments:
name (str): Name of option, cannot be partial.
namespace (str): Preferred name-space (``None`` by default).
Returns:
Tuple: of ``(namespace, key, type)``.
|
python
|
celery/app/utils.py
| 141
|
[
"self",
"name",
"namespace"
] | false
| 1
| 7.2
|
celery/celery
| 27,741
|
google
| false
|
|
unique_labels
|
def unique_labels(*ys):
"""Extract an ordered array of unique labels.
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Label values.
Returns
-------
out : ndarray of shape (n_unique_labels,)
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
ys = attach_unique(*ys, return_tuple=True)
xp, is_array_api_compliant = get_namespace(*ys)
if len(ys) == 0:
raise ValueError("No argument has been passed.")
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == {"binary", "multiclass"}:
ys_types = {"multiclass"}
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (
label_type == "multilabel-indicator"
and len(
set(
check_array(y, accept_sparse=["csr", "csc", "coo"]).shape[1] for y in ys
)
)
> 1
):
raise ValueError(
"Multi-label binary indicator input with different numbers of labels"
)
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
if is_array_api_compliant:
# array_api does not allow for mixed dtypes
unique_ys = xp.concat([_unique_labels(y, xp=xp) for y in ys])
return xp.unique_values(unique_ys)
ys_labels = set(
chain.from_iterable((i for i in _unique_labels(y, xp=xp)) for y in ys)
)
# Check that we don't mix string type with number type
if len(set(isinstance(label, str) for label in ys_labels)) > 1:
raise ValueError("Mix of label input types (string and number)")
return xp.asarray(sorted(ys_labels))
|
Extract an ordered array of unique labels.
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Label values.
Returns
-------
out : ndarray of shape (n_unique_labels,)
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
|
python
|
sklearn/utils/multiclass.py
| 41
|
[] | false
| 9
| 7.6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
poly2herme
|
def poly2herme(pol):
"""
poly2herme(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herme2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> import numpy as np
>>> from numpy.polynomial.hermite_e import poly2herme
>>> poly2herme(np.arange(4))
array([ 2., 10., 2., 3.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = hermeadd(hermemulx(res), pol[i])
return res
|
poly2herme(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herme2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> import numpy as np
>>> from numpy.polynomial.hermite_e import poly2herme
>>> poly2herme(np.arange(4))
array([ 2., 10., 2., 3.])
|
python
|
numpy/polynomial/hermite_e.py
| 95
|
[
"pol"
] | false
| 2
| 7.36
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
execute_using_pool
|
def execute_using_pool(self, pool: BasePool, **kwargs):
"""Used by the worker to send this task to the pool.
Arguments:
pool (~celery.concurrency.base.TaskPool): The execution pool
used to execute this request.
Raises:
celery.exceptions.TaskRevokedError: if the task was revoked.
"""
task_id = self.id
task = self._task
if self.revoked():
raise TaskRevokedError(task_id)
time_limit, soft_time_limit = self.time_limits
trace = fast_trace_task if self._app.use_fast_trace_task else trace_task_ret
result = pool.apply_async(
trace,
args=(self._type, task_id, self._request_dict, self._body,
self._content_type, self._content_encoding),
accept_callback=self.on_accepted,
timeout_callback=self.on_timeout,
callback=self.on_success,
error_callback=self.on_failure,
soft_timeout=soft_time_limit or task.soft_time_limit,
timeout=time_limit or task.time_limit,
correlation_id=task_id,
)
# cannot create weakref to None
self._apply_result = maybe(ref, result)
return result
|
Used by the worker to send this task to the pool.
Arguments:
pool (~celery.concurrency.base.TaskPool): The execution pool
used to execute this request.
Raises:
celery.exceptions.TaskRevokedError: if the task was revoked.
|
python
|
celery/worker/request.py
| 338
|
[
"self",
"pool"
] | true
| 5
| 6.56
|
celery/celery
| 27,741
|
google
| false
|
|
getAnnotatedMethodsNotCached
|
private static ImmutableList<Method> getAnnotatedMethodsNotCached(Class<?> clazz) {
Set<? extends Class<?>> supertypes = TypeToken.of(clazz).getTypes().rawTypes();
Map<MethodIdentifier, Method> identifiers = new HashMap<>();
for (Class<?> supertype : supertypes) {
for (Method method : supertype.getDeclaredMethods()) {
if (method.isAnnotationPresent(Subscribe.class) && !method.isSynthetic()) {
// TODO(cgdecker): Should check for a generic parameter type and error out
Class<?>[] parameterTypes = method.getParameterTypes();
checkArgument(
parameterTypes.length == 1,
"Method %s has @Subscribe annotation but has %s parameters. "
+ "Subscriber methods must have exactly 1 parameter.",
method,
parameterTypes.length);
checkArgument(
!parameterTypes[0].isPrimitive(),
"@Subscribe method %s's parameter is %s. "
+ "Subscriber methods cannot accept primitives. "
+ "Consider changing the parameter to %s.",
method,
parameterTypes[0].getName(),
Primitives.wrap(parameterTypes[0]).getSimpleName());
MethodIdentifier ident = new MethodIdentifier(method);
if (!identifiers.containsKey(ident)) {
identifiers.put(ident, method);
}
}
}
}
return ImmutableList.copyOf(identifiers.values());
}
|
Returns all subscribers for the given listener grouped by the type of event they subscribe to.
|
java
|
android/guava/src/com/google/common/eventbus/SubscriberRegistry.java
| 193
|
[
"clazz"
] | true
| 4
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
getPriority
|
protected @Nullable Integer getPriority(Object beanInstance) {
Comparator<Object> comparator = getDependencyComparator();
if (comparator instanceof OrderComparator orderComparator) {
return orderComparator.getPriority(beanInstance);
}
return null;
}
|
Return the priority assigned for the given bean instance by
the {@code jakarta.annotation.Priority} annotation.
<p>The default implementation delegates to the specified
{@link #setDependencyComparator dependency comparator}, checking its
{@link OrderComparator#getPriority method} if it is an extension of
Spring's common {@link OrderComparator} - typically, an
{@link org.springframework.core.annotation.AnnotationAwareOrderComparator}.
If no such comparator is present, this implementation returns {@code null}.
@param beanInstance the bean instance to check (can be {@code null})
@return the priority assigned to that bean or {@code null} if none is set
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
| 2,217
|
[
"beanInstance"
] |
Integer
| true
| 2
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
accept
|
@SuppressWarnings("boxing") // boxing unavoidable
public static <T extends Throwable> void accept(final FailableBiConsumer<Long, Integer, T> consumer, final Duration duration)
throws T {
if (consumer != null && duration != null) {
consumer.accept(duration.toMillis(), getNanosOfMilli(duration));
}
}
|
Accepts the function with the duration as a long milliseconds and int nanoseconds.
@param <T> The function exception.
@param consumer Accepting function.
@param duration The duration to pick apart.
@throws T See the function signature.
@see StopWatch
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationUtils.java
| 57
|
[
"consumer",
"duration"
] |
void
| true
| 3
| 6.72
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getUserHome
|
private static Path getUserHome() {
String userHome = System.getProperty("user.home");
if (userHome == null) {
throw new IllegalStateException("user.home system property is required");
}
return PathUtils.get(userHome);
}
|
Main entry point that activates entitlement checking. Once this method returns,
calls to methods protected by entitlements from classes without a valid
policy will throw {@link org.elasticsearch.entitlement.runtime.api.NotEntitledException}.
@param serverPolicyPatch additional entitlements to patch the embedded server layer policy
@param pluginPolicies maps each plugin name to the corresponding {@link Policy}
@param scopeResolver a functor to map a Java Class to the component and module it belongs to.
@param settingResolver a functor to resolve a setting name pattern for one or more Elasticsearch settings.
@param dataDirs data directories for Elasticsearch
@param sharedDataDir shared data directory for Elasticsearch (deprecated)
@param sharedRepoDirs shared repository directories for Elasticsearch
@param configDir the config directory for Elasticsearch
@param libDir the lib directory for Elasticsearch
@param modulesDir the directory where Elasticsearch modules are
@param pluginsDir the directory where plugins are installed for Elasticsearch
@param pluginSourcePaths maps each plugin name to the location of that plugin's code
@param tempDir the temp directory for Elasticsearch
@param logsDir the log directory for Elasticsearch
@param pidFile path to a pid file for Elasticsearch, or {@code null} if one was not specified
@param suppressFailureLogPackages packages for which we do not need or want to log Entitlements failures
|
java
|
libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java
| 110
|
[] |
Path
| true
| 2
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
replace
|
public String replace(final String text, String searchString, final String replacement, int max) {
if (StringUtils.isEmpty(text) || StringUtils.isEmpty(searchString) || replacement == null || max == 0) {
return text;
}
if (ignoreCase) {
searchString = searchString.toLowerCase();
}
int start = 0;
int end = indexOf(text, searchString, start);
if (end == INDEX_NOT_FOUND) {
return text;
}
final int replLength = searchString.length();
int increase = Math.max(replacement.length() - replLength, 0);
increase *= max < 0 ? 16 : Math.min(max, 64);
final StringBuilder buf = new StringBuilder(text.length() + increase);
while (end != INDEX_NOT_FOUND) {
buf.append(text, start, end).append(replacement);
start = end + replLength;
if (--max == 0) {
break;
}
end = indexOf(text, searchString, start);
}
buf.append(text, start, text.length());
return buf.toString();
}
|
Replaces a String with another String inside a larger String, for the first {@code max} values of the search String.
<p>
A {@code null} reference passed to this method is a no-op.
</p>
<p>
Case-sensitive examples
</p>
<pre>
Strings.CS.replace(null, *, *, *) = null
Strings.CS.replace("", *, *, *) = ""
Strings.CS.replace("any", null, *, *) = "any"
Strings.CS.replace("any", *, null, *) = "any"
Strings.CS.replace("any", "", *, *) = "any"
Strings.CS.replace("any", *, *, 0) = "any"
Strings.CS.replace("abaa", "a", null, -1) = "abaa"
Strings.CS.replace("abaa", "a", "", -1) = "b"
Strings.CS.replace("abaa", "a", "z", 0) = "abaa"
Strings.CS.replace("abaa", "a", "z", 1) = "zbaa"
Strings.CS.replace("abaa", "a", "z", 2) = "zbza"
Strings.CS.replace("abaa", "a", "z", -1) = "zbzz"
</pre>
<p>
Case-insensitive examples
</p>
<pre>
Strings.CI.replace(null, *, *, *) = null
Strings.CI.replace("", *, *, *) = ""
Strings.CI.replace("any", null, *, *) = "any"
Strings.CI.replace("any", *, null, *) = "any"
Strings.CI.replace("any", "", *, *) = "any"
Strings.CI.replace("any", *, *, 0) = "any"
Strings.CI.replace("abaa", "a", null, -1) = "abaa"
Strings.CI.replace("abaa", "a", "", -1) = "b"
Strings.CI.replace("abaa", "a", "z", 0) = "abaa"
Strings.CI.replace("abaa", "A", "z", 1) = "zbaa"
Strings.CI.replace("abAa", "a", "z", 2) = "zbza"
Strings.CI.replace("abAa", "a", "z", -1) = "zbzz"
</pre>
@param text text to search and replace in, may be null
@param searchString the String to search for (case-insensitive), may be null
@param replacement the String to replace it with, may be null
@param max maximum number of values to replace, or {@code -1} if no maximum
@return the text with any replacements processed, {@code null} if null String input
|
java
|
src/main/java/org/apache/commons/lang3/Strings.java
| 1,291
|
[
"text",
"searchString",
"replacement",
"max"
] |
String
| true
| 10
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getMappings
|
function getMappings(mappings: DMMF.Mappings, datamodel: DMMF.Datamodel): DMMF.Mappings {
const modelOperations = mappings.modelOperations
.filter((mapping) => {
const model = datamodel.models.find((m) => m.name === mapping.model)
if (!model) {
throw new Error(`Mapping without model ${mapping.model}`)
}
return model.fields.some((f) => f.kind !== 'object')
})
// TODO most of this is probably not needed anymore
.map((mapping: any) => ({
model: mapping.model,
plural: pluralize(uncapitalize(mapping.model)), // TODO not needed anymore
findUnique: mapping.findUnique || mapping.findSingle,
findUniqueOrThrow: mapping.findUniqueOrThrow,
findFirst: mapping.findFirst,
findFirstOrThrow: mapping.findFirstOrThrow,
findMany: mapping.findMany,
create: mapping.createOne || mapping.createSingle || mapping.create,
createMany: mapping.createMany,
createManyAndReturn: mapping.createManyAndReturn,
delete: mapping.deleteOne || mapping.deleteSingle || mapping.delete,
update: mapping.updateOne || mapping.updateSingle || mapping.update,
deleteMany: mapping.deleteMany,
updateMany: mapping.updateMany,
updateManyAndReturn: mapping.updateManyAndReturn,
upsert: mapping.upsertOne || mapping.upsertSingle || mapping.upsert,
aggregate: mapping.aggregate,
groupBy: mapping.groupBy,
findRaw: mapping.findRaw,
aggregateRaw: mapping.aggregateRaw,
}))
return {
modelOperations,
otherOperations: mappings.otherOperations,
}
}
|
Turns type: string into type: string[] for all args in order to support union input types
@param document
|
typescript
|
packages/client-generator-js/src/externalToInternalDmmf.ts
| 20
|
[
"mappings",
"datamodel"
] | true
| 11
| 6.72
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
rewriteExprFromNumberToDuration
|
std::string rewriteExprFromNumberToDuration(
const ast_matchers::MatchFinder::MatchResult &Result, DurationScale Scale,
const Expr *Node) {
const Expr &RootNode = *Node->IgnoreParenImpCasts();
// First check to see if we can undo a complementary function call.
if (std::optional<std::string> MaybeRewrite =
rewriteInverseDurationCall(Result, Scale, RootNode))
return *MaybeRewrite;
if (isLiteralZero(Result, RootNode))
return {"absl::ZeroDuration()"};
return (llvm::Twine(getDurationFactoryForScale(Scale)) + "(" +
simplifyDurationFactoryArg(Result, RootNode) + ")")
.str();
}
|
Returns `true` if `Node` is a value which evaluates to a literal `0`.
|
cpp
|
clang-tools-extra/clang-tidy/abseil/DurationRewriter.cpp
| 222
|
[
"Scale"
] | true
| 3
| 6
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
copy_fwd_metadata_to_bw_nodes
|
def copy_fwd_metadata_to_bw_nodes(fx_g: torch.fx.GraphModule) -> None:
"""
Input: `fx_g` which contains the joint fwd+bwd FX graph created by
aot_autograd.
This function walks the graph and copies over metadata from forward nodes
to backward nodes, using the `seq_nr` field as a one-to-many mapping
from forward node to backward node. This metadata is useful for performance
profiling and debugging.
This function supports matching forward and backward nodes across different
subgraphs (e.g., in recursive submodules from HOPs), enabling backward nodes
in any submodule to match forward nodes in any submodule.
"""
# Build a global mapping of seq_nr to forward nodes across all subgraphs
fwd_seq_nr_to_node: dict[str, torch.fx.Node] = {}
# First pass: collect all forward nodes from all subgraphs
for submod in fx_g.modules():
if isinstance(submod, torch.fx.GraphModule):
_collect_fwd_nodes_from_subgraph(submod, fwd_seq_nr_to_node)
if annotation_log.isEnabledFor(logging.DEBUG):
for k, v in fwd_seq_nr_to_node.items():
annotation_log.debug("forward:: key: %s, value: %s", k, v)
# Second pass: copy metadata to backward nodes in all subgraphs
# using the global forward mapping
for submod in fx_g.modules():
if isinstance(submod, torch.fx.GraphModule):
_copy_metadata_to_bw_nodes_in_subgraph(submod, fwd_seq_nr_to_node)
|
Input: `fx_g` which contains the joint fwd+bwd FX graph created by
aot_autograd.
This function walks the graph and copies over metadata from forward nodes
to backward nodes, using the `seq_nr` field as a one-to-many mapping
from forward node to backward node. This metadata is useful for performance
profiling and debugging.
This function supports matching forward and backward nodes across different
subgraphs (e.g., in recursive submodules from HOPs), enabling backward nodes
in any submodule to match forward nodes in any submodule.
|
python
|
torch/_functorch/_aot_autograd/utils.py
| 607
|
[
"fx_g"
] |
None
| true
| 7
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
toBooleanObject
|
public static Boolean toBooleanObject(final String str, final String trueString, final String falseString, final String nullString) {
if (str == null) {
if (trueString == null) {
return Boolean.TRUE;
}
if (falseString == null) {
return Boolean.FALSE;
}
if (nullString == null) {
return null;
}
} else if (str.equals(trueString)) {
return Boolean.TRUE;
} else if (str.equals(falseString)) {
return Boolean.FALSE;
} else if (str.equals(nullString)) {
return null;
}
// no match
throw new IllegalArgumentException("The String did not match any specified value");
}
|
Converts a String to a Boolean throwing an exception if no match.
<p>NOTE: This method may return {@code null} and may throw a {@link NullPointerException}
if unboxed to a {@code boolean}.</p>
<pre>
BooleanUtils.toBooleanObject("true", "true", "false", "null") = Boolean.TRUE
BooleanUtils.toBooleanObject(null, null, "false", "null") = Boolean.TRUE
BooleanUtils.toBooleanObject(null, null, null, "null") = Boolean.TRUE
BooleanUtils.toBooleanObject(null, null, null, null) = Boolean.TRUE
BooleanUtils.toBooleanObject("false", "true", "false", "null") = Boolean.FALSE
BooleanUtils.toBooleanObject("false", "true", "false", "false") = Boolean.FALSE
BooleanUtils.toBooleanObject(null, "true", null, "false") = Boolean.FALSE
BooleanUtils.toBooleanObject(null, "true", null, null) = Boolean.FALSE
BooleanUtils.toBooleanObject("null", "true", "false", "null") = null
</pre>
@param str the String to check
@param trueString the String to match for {@code true} (case-sensitive), may be {@code null}
@param falseString the String to match for {@code false} (case-sensitive), may be {@code null}
@param nullString the String to match for {@code null} (case-sensitive), may be {@code null}
@return the Boolean value of the string, {@code null} if either the String matches {@code nullString}
or if {@code null} input and {@code nullString} is {@code null}
@throws IllegalArgumentException if the String doesn't match
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 852
|
[
"str",
"trueString",
"falseString",
"nullString"
] |
Boolean
| true
| 8
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
build
|
public @Nullable BeanFactoryInitializationAotContribution build() {
return (!this.classes.isEmpty() ? new AotContribution(this.classes) : null);
}
|
Scan the given {@code packageNames} and their sub-packages for classes
that uses {@link Reflective}.
<p>This performs a "deep scan" by loading every class in the specified
packages and search for {@link Reflective} on types, constructors, methods,
and fields. Enclosed classes are candidates as well. Classes that fail to
load are ignored.
@param classLoader the classloader to use
@param packageNames the package names to scan
|
java
|
spring-context/src/main/java/org/springframework/context/aot/ReflectiveProcessorAotContributionBuilder.java
| 99
|
[] |
BeanFactoryInitializationAotContribution
| true
| 2
| 6.8
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
startScheduler
|
protected void startScheduler(final Scheduler scheduler, final int startupDelay) throws SchedulerException {
if (startupDelay <= 0) {
logger.info("Starting Quartz Scheduler now");
scheduler.start();
}
else {
if (logger.isInfoEnabled()) {
logger.info("Will start Quartz Scheduler [" + scheduler.getSchedulerName() +
"] in " + startupDelay + " seconds");
}
// Not using the Quartz startDelayed method since we explicitly want a daemon
// thread here, not keeping the JVM alive in case of all other threads ending.
Thread schedulerThread = new Thread(() -> {
try {
TimeUnit.SECONDS.sleep(startupDelay);
}
catch (InterruptedException ex) {
Thread.currentThread().interrupt();
// simply proceed
}
if (logger.isInfoEnabled()) {
logger.info("Starting Quartz Scheduler now, after delay of " + startupDelay + " seconds");
}
try {
scheduler.start();
}
catch (SchedulerException ex) {
throw new SchedulingException("Could not start Quartz Scheduler after delay", ex);
}
});
schedulerThread.setName("Quartz Scheduler [" + scheduler.getSchedulerName() + "]");
schedulerThread.setDaemon(true);
schedulerThread.start();
}
}
|
Start the Quartz Scheduler, respecting the "startupDelay" setting.
@param scheduler the Scheduler to start
@param startupDelay the number of seconds to wait before starting
the Scheduler asynchronously
|
java
|
spring-context-support/src/main/java/org/springframework/scheduling/quartz/SchedulerFactoryBean.java
| 716
|
[
"scheduler",
"startupDelay"
] |
void
| true
| 6
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
repeat
|
function repeat(s: string, count: number): string {
let result = '';
for (let i = 0; i < count; i++) {
result += s;
}
return result;
}
|
Creates a formatted string out of the object passed as argument, using the given formatting options
@param any The object to stringify and format
@param options The formatting options to use
|
typescript
|
src/vs/base/common/jsonFormatter.ts
| 218
|
[
"s",
"count"
] | true
| 2
| 6.24
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
update
|
private void update(MessageDigest digest, @Nullable Object source) {
if (source != null) {
digest.update(getUpdateSourceBytes(source));
}
}
|
Return a subdirectory of the application temp.
@param subDir the subdirectory name
@return a subdirectory
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/system/ApplicationTemp.java
| 166
|
[
"digest",
"source"
] |
void
| true
| 2
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
process
|
private void process(final ShareAcknowledgementCommitCallbackRegistrationEvent event) {
if (requestManagers.shareConsumeRequestManager.isEmpty()) {
return;
}
ShareConsumeRequestManager manager = requestManagers.shareConsumeRequestManager.get();
manager.setAcknowledgementCommitCallbackRegistered(event.isCallbackRegistered());
}
|
Process event indicating whether the AcknowledgeCommitCallbackHandler is configured by the user.
@param event Event containing a boolean to indicate if the callback handler is configured or not.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java
| 597
|
[
"event"
] |
void
| true
| 2
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
getInputStream
|
private InputStream getInputStream(ZipContent.Entry contentEntry) throws IOException {
int compression = contentEntry.getCompressionMethod();
if (compression != ZipEntry.STORED && compression != ZipEntry.DEFLATED) {
throw new ZipException("invalid compression method");
}
synchronized (this) {
ensureOpen();
InputStream inputStream = new JarEntryInputStream(contentEntry);
try {
if (compression == ZipEntry.DEFLATED) {
inputStream = new JarEntryInflaterInputStream((JarEntryInputStream) inputStream, this.resources);
}
this.resources.addInputStream(inputStream);
return inputStream;
}
catch (RuntimeException ex) {
inputStream.close();
throw ex;
}
}
}
|
Return if an entry with the given name exists.
@param name the name to check
@return if the entry exists
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jar/NestedJarFile.java
| 352
|
[
"contentEntry"
] |
InputStream
| true
| 5
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
setupDebugEnv
|
function setupDebugEnv() {
require('internal/util/debuglog').initializeDebugEnv(process.env.NODE_DEBUG);
if (getOptionValue('--expose-internals')) {
require('internal/bootstrap/realm').BuiltinModule.exposeInternals();
}
}
|
Patch the process object with legacy properties and normalizations.
Replace `process.argv[0]` with `process.execPath`, preserving the original `argv[0]` value as `process.argv0`.
Replace `process.argv[1]` with the resolved absolute file path of the entry point, if found.
@param {boolean} expandArgv1 - Whether to replace `process.argv[1]` with the resolved absolute file path of
the main entry point.
@returns {string}
|
javascript
|
lib/internal/process/pre_execution.js
| 444
|
[] | false
| 2
| 6.8
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
cacheExpression
|
function cacheExpression(node: Expression): Identifier {
if (isGeneratedIdentifier(node) || getEmitFlags(node) & EmitFlags.HelperName) {
return node as Identifier;
}
const temp = factory.createTempVariable(hoistVariableDeclaration);
emitAssignment(temp, node, /*location*/ node);
return temp;
}
|
Visits an ElementAccessExpression that contains a YieldExpression.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/generators.ts
| 2,088
|
[
"node"
] | true
| 3
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
to_sql
|
def to_sql(
self,
frame,
name: str,
if_exists: Literal["fail", "replace", "append", "delete_rows"] = "fail",
index: bool = True,
index_label=None,
schema: str | None = None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
method: Literal["multi"] | Callable | None = None,
engine: str = "auto",
**engine_kwargs,
) -> int | None:
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
- delete_rows: If a table exists, delete all records and insert data.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Raises NotImplementedError
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
Raises NotImplementedError
dtype : single type or dict of column name to SQL type, default None
Raises NotImplementedError
method : {None', 'multi', callable}, default None
Raises NotImplementedError
engine : {'auto', 'sqlalchemy'}, default 'auto'
Raises NotImplementedError if not set to 'auto'
"""
pa = import_optional_dependency("pyarrow")
from adbc_driver_manager import Error
if index_label:
raise NotImplementedError(
"'index_label' is not implemented for ADBC drivers"
)
if chunksize:
raise NotImplementedError("'chunksize' is not implemented for ADBC drivers")
if dtype:
raise NotImplementedError("'dtype' is not implemented for ADBC drivers")
if method:
raise NotImplementedError("'method' is not implemented for ADBC drivers")
if engine != "auto":
raise NotImplementedError(
"engine != 'auto' not implemented for ADBC drivers"
)
if schema:
table_name = f"{schema}.{name}"
else:
table_name = name
# pandas if_exists="append" will still create the
# table if it does not exist; ADBC is more explicit with append/create
# as applicable modes, so the semantics get blurred across
# the libraries
mode = "create"
if self.has_table(name, schema):
if if_exists == "fail":
raise ValueError(f"Table '{table_name}' already exists.")
elif if_exists == "replace":
sql_statement = f"DROP TABLE {table_name}"
self.execute(sql_statement).close()
elif if_exists == "append":
mode = "append"
elif if_exists == "delete_rows":
mode = "append"
self.delete_rows(name, schema)
try:
tbl = pa.Table.from_pandas(frame, preserve_index=index)
except pa.ArrowNotImplementedError as exc:
raise ValueError("datatypes not supported") from exc
with self.con.cursor() as cur:
try:
total_inserted = cur.adbc_ingest(
table_name=name, data=tbl, mode=mode, db_schema_name=schema
)
except Error as exc:
raise DatabaseError(
f"Failed to insert records on table={name} with {mode=}"
) from exc
self.con.commit()
return total_inserted
|
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
- delete_rows: If a table exists, delete all records and insert data.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Raises NotImplementedError
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
Raises NotImplementedError
dtype : single type or dict of column name to SQL type, default None
Raises NotImplementedError
method : {None', 'multi', callable}, default None
Raises NotImplementedError
engine : {'auto', 'sqlalchemy'}, default 'auto'
Raises NotImplementedError if not set to 'auto'
|
python
|
pandas/io/sql.py
| 2,324
|
[
"self",
"frame",
"name",
"if_exists",
"index",
"index_label",
"schema",
"chunksize",
"dtype",
"method",
"engine"
] |
int | None
| true
| 13
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
flatten
|
def flatten(self, order='C'):
"""
Return a flattened copy of the matrix.
All `N` elements of the matrix are placed into a single row.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order. 'F' means to
flatten in column-major (Fortran-style) order. 'A' means to
flatten in column-major order if `m` is Fortran *contiguous* in
memory, row-major order otherwise. 'K' means to flatten `m` in
the order the elements occur in memory. The default is 'C'.
Returns
-------
y : matrix
A copy of the matrix, flattened to a `(1, N)` matrix where `N`
is the number of elements in the original matrix.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the matrix.
Examples
--------
>>> m = np.matrix([[1,2], [3,4]])
>>> m.flatten()
matrix([[1, 2, 3, 4]])
>>> m.flatten('F')
matrix([[1, 3, 2, 4]])
"""
return N.ndarray.flatten(self, order=order)
|
Return a flattened copy of the matrix.
All `N` elements of the matrix are placed into a single row.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order. 'F' means to
flatten in column-major (Fortran-style) order. 'A' means to
flatten in column-major order if `m` is Fortran *contiguous* in
memory, row-major order otherwise. 'K' means to flatten `m` in
the order the elements occur in memory. The default is 'C'.
Returns
-------
y : matrix
A copy of the matrix, flattened to a `(1, N)` matrix where `N`
is the number of elements in the original matrix.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the matrix.
Examples
--------
>>> m = np.matrix([[1,2], [3,4]])
>>> m.flatten()
matrix([[1, 2, 3, 4]])
>>> m.flatten('F')
matrix([[1, 3, 2, 4]])
|
python
|
numpy/matrixlib/defmatrix.py
| 382
|
[
"self",
"order"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
max
|
function max(array) {
return (array && array.length)
? baseExtremum(array, identity, baseGt)
: undefined;
}
|
Computes the maximum value of `array`. If `array` is empty or falsey,
`undefined` is returned.
@static
@since 0.1.0
@memberOf _
@category Math
@param {Array} array The array to iterate over.
@returns {*} Returns the maximum value.
@example
_.max([4, 2, 8, 6]);
// => 8
_.max([]);
// => undefined
|
javascript
|
lodash.js
| 16,415
|
[
"array"
] | false
| 3
| 7.52
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
same_two_models
|
def same_two_models(
gm: torch.fx.GraphModule,
opt_gm: torch.fx.GraphModule,
example_inputs: Sequence[Any],
only_fwd: bool = False,
*,
require_fp64: bool = False,
ignore_non_fp: bool = False,
) -> bool:
"""
Check two models have same accuracy.
require_fp64: if True, raise an error if we unable to calculate the fp64 reference
ignore_non_fp: if True, do not compare outputs which are not floating point. This
is mostly useful for the minifier (which wants to avoid quantizing floating point
error into integer/boolean error)
"""
from .utils import same
ref = run_fwd_maybe_bwd(gm, example_inputs, only_fwd)
fp64_ref = None
if config.same_two_models_use_fp64:
try:
fp64_model, fp64_examples = cast_to_fp64(
copy.deepcopy(gm), clone_inputs_retaining_gradness(example_inputs)
)
fp64_ref = run_fwd_maybe_bwd(fp64_model, fp64_examples, only_fwd)
except Exception:
if require_fp64:
raise RuntimeError( # noqa: B904
"Could not generate fp64 outputs, workaround with torch._dynamo.config.same_two_models_use_fp64 = False"
)
log.warning("Could not generate fp64 outputs")
try:
res = run_fwd_maybe_bwd(opt_gm, example_inputs, only_fwd)
except Exception:
# This means that the minified graph is bad/exposes a different problem.
# As we are checking accuracy here, lets log the exception and return True.
log.exception(
"While minifying the program in accuracy minification mode, "
"ran into a runtime exception which is likely an unrelated issue."
" Skipping this graph."
)
return True
passing = same(
ref,
res,
fp64_ref,
tol=config.repro_tolerance,
equal_nan=True,
ignore_non_fp=ignore_non_fp,
)
return passing
|
Check two models have same accuracy.
require_fp64: if True, raise an error if we unable to calculate the fp64 reference
ignore_non_fp: if True, do not compare outputs which are not floating point. This
is mostly useful for the minifier (which wants to avoid quantizing floating point
error into integer/boolean error)
|
python
|
torch/_dynamo/debug_utils.py
| 396
|
[
"gm",
"opt_gm",
"example_inputs",
"only_fwd",
"require_fp64",
"ignore_non_fp"
] |
bool
| true
| 3
| 6.64
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
getGuardNames
|
function getGuardNames(child: AngularRoute, type: RouteGuard): string[] {
const guards = child?.[type] || [];
const names = guards.map((g: any) => getClassOrFunctionName(g));
return names || [];
}
|
Gets the set of currently active Route configuration objects from the router state.
This function synchronously reads the current router state without waiting for navigation events.
@param router - The Angular Router instance
@returns A Set containing all Route configuration objects that are currently active
@example
```ts
const activeRoutes = getActiveRouteConfigs(router);
// activeRoutes is a Set<Route> containing all currently active route configurations
```
|
typescript
|
devtools/projects/ng-devtools-backend/src/lib/router-tree.ts
| 93
|
[
"child",
"type"
] | true
| 3
| 8.88
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
getParameters
|
@IgnoreJRERequirement
public final ImmutableList<Parameter> getParameters() {
Type[] parameterTypes = getGenericParameterTypes();
Annotation[][] annotations = getParameterAnnotations();
@Nullable Object[] annotatedTypes =
new Object[parameterTypes.length];
ImmutableList.Builder<Parameter> builder = ImmutableList.builder();
for (int i = 0; i < parameterTypes.length; i++) {
builder.add(
new Parameter(
this, i, TypeToken.of(parameterTypes[i]), annotations[i], annotatedTypes[i]));
}
return builder.build();
}
|
Returns all declared parameters of this {@code Invokable}. Note that if this is a constructor
of a non-static inner class, unlike {@link Constructor#getParameterTypes}, the hidden {@code
this} parameter of the enclosing class is excluded from the returned parameters.
|
java
|
android/guava/src/com/google/common/reflect/Invokable.java
| 270
|
[] | true
| 2
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
removeElements
|
public static boolean[] removeElements(final boolean[] array, final boolean... values) {
if (isEmpty(array) || isEmpty(values)) {
return clone(array);
}
final HashMap<Boolean, MutableInt> occurrences = new HashMap<>(2); // only two possible values here
for (final boolean v : values) {
increment(occurrences, Boolean.valueOf(v));
}
final BitSet toRemove = new BitSet();
for (int i = 0; i < array.length; i++) {
final boolean key = array[i];
final MutableInt count = occurrences.get(key);
if (count != null) {
if (count.decrementAndGet() == 0) {
occurrences.remove(key);
}
toRemove.set(i);
}
}
return (boolean[]) removeAt(array, toRemove);
}
|
Removes occurrences of specified elements, in specified quantities,
from the specified array. All subsequent elements are shifted left.
For any element-to-be-removed specified in greater quantities than
contained in the original array, no change occurs beyond the
removal of the existing matching items.
<p>
This method returns a new array with the same elements of the input
array except for the earliest-encountered occurrences of the specified
elements. The component type of the returned array is always the same
as that of the input array.
</p>
<pre>
ArrayUtils.removeElements(null, true, false) = null
ArrayUtils.removeElements([], true, false) = []
ArrayUtils.removeElements([true], false, false) = [true]
ArrayUtils.removeElements([true, false], true, true) = [false]
ArrayUtils.removeElements([true, false, true], true) = [false, true]
ArrayUtils.removeElements([true, false, true], true, true) = [false]
</pre>
@param array the input array, will not be modified, and may be {@code null}.
@param values the values to be removed.
@return A new array containing the existing elements except the
earliest-encountered occurrences of the specified elements.
@since 3.0.1
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 5,915
|
[
"array"
] | true
| 6
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
advance
|
final boolean advance() {
checkState(!successorIterator.hasNext());
if (!nodeIterator.hasNext()) {
return false;
}
node = nodeIterator.next();
successorIterator = graph.successors(node).iterator();
return true;
}
|
Called after {@link #successorIterator} is exhausted. Advances {@link #node} to the next node
and updates {@link #successorIterator} to iterate through the successors of {@link #node}.
|
java
|
android/guava/src/com/google/common/graph/EndpointPairIterator.java
| 55
|
[] | true
| 2
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
shimOrRewriteImportOrRequireCall
|
function shimOrRewriteImportOrRequireCall(node: CallExpression): CallExpression {
return factory.updateCallExpression(
node,
node.expression,
/*typeArguments*/ undefined,
visitNodes(node.arguments, (arg: Expression) => {
if (arg === node.arguments[0]) {
return isStringLiteralLike(arg)
? rewriteModuleSpecifier(arg, compilerOptions)
: emitHelpers().createRewriteRelativeImportExtensionsHelper(arg);
}
return visitor(arg);
}, isExpression),
);
}
|
Visits the body of a Block to hoist declarations.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/module/module.ts
| 1,192
|
[
"node"
] | true
| 3
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
index
|
def index(a, sub, start=0, end=None):
"""
Like `find`, but raises :exc:`ValueError` when the substring is not found.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
start, end : array_like, with any integer dtype, optional
Returns
-------
out : ndarray
Output array of ints.
See Also
--------
find, str.index
Examples
--------
>>> import numpy as np
>>> a = np.array(["Computer Science"])
>>> np.strings.index(a, "Science", start=0, end=None)
array([9])
"""
end = end if end is not None else MAX
return _index_ufunc(a, sub, start, end)
|
Like `find`, but raises :exc:`ValueError` when the substring is not found.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
start, end : array_like, with any integer dtype, optional
Returns
-------
out : ndarray
Output array of ints.
See Also
--------
find, str.index
Examples
--------
>>> import numpy as np
>>> a = np.array(["Computer Science"])
>>> np.strings.index(a, "Science", start=0, end=None)
array([9])
|
python
|
numpy/_core/strings.py
| 337
|
[
"a",
"sub",
"start",
"end"
] | false
| 2
| 7.36
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
forRequiredField
|
public static AutowiredFieldValueResolver forRequiredField(String fieldName) {
return new AutowiredFieldValueResolver(fieldName, true, null);
}
|
Create a new {@link AutowiredFieldValueResolver} for the specified field
where injection is required.
@param fieldName the field name
@return a new {@link AutowiredFieldValueResolver} instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/AutowiredFieldValueResolver.java
| 88
|
[
"fieldName"
] |
AutowiredFieldValueResolver
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
remove
|
boolean remove(K key, long value) {
AtomicLong atomic = map.get(key);
if (atomic == null) {
return false;
}
long oldValue = atomic.get();
if (oldValue != value) {
return false;
}
if (oldValue == 0L || atomic.compareAndSet(oldValue, 0L)) {
// only remove after setting to zero, to avoid concurrent updates
map.remove(key, atomic);
// succeed even if the remove fails, since the value was already adjusted
return true;
}
// value changed
return false;
}
|
If {@code (key, value)} is currently in the map, this method removes it and returns true;
otherwise, this method returns false.
|
java
|
android/guava/src/com/google/common/util/concurrent/AtomicLongMap.java
| 267
|
[
"key",
"value"
] | true
| 5
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
standalone_compile
|
def standalone_compile(
gm: torch.fx.GraphModule,
example_inputs: list[InputType],
*,
dynamic_shapes: Literal[
"from_example_inputs", "from_tracing_context", "from_graph"
] = "from_graph",
options: Optional[dict[str, Any]] = None,
aot: bool = False, # AOT mode, which uses BundledAOTAutogradCache
) -> CompiledArtifact:
"""
Precompilation API for inductor.
.. code-block:: python
compiled_artifact = torch._inductor.standalone_compile(gm, args)
compiled_artifact.save(path=path, format="binary")
# Later on a new process
loaded = torch._inductor.CompiledArtifact.load(path=path, format="binary")
compiled_out = loaded(*args)
Args:
gm: Graph Module
example_inputs: Inputs for the graph module
dynamic_shapes: If "from_graph" (default), we will use the dynamic
shapes in the passed-in graph module.
If "from_tracing_context", we use the dynamic shape info in the
ambient tracing context.
If "from_example_inputs", we will specialize the graph on the
example_inputs.
options: Inductor compilation options
Returns:
CompiledArtifact that can be saved to disk or invoked directly.
"""
from .standalone_compile import standalone_compile
options = options if options else {}
return standalone_compile(
gm, example_inputs, dynamic_shapes=dynamic_shapes, options=options, aot=aot
)
|
Precompilation API for inductor.
.. code-block:: python
compiled_artifact = torch._inductor.standalone_compile(gm, args)
compiled_artifact.save(path=path, format="binary")
# Later on a new process
loaded = torch._inductor.CompiledArtifact.load(path=path, format="binary")
compiled_out = loaded(*args)
Args:
gm: Graph Module
example_inputs: Inputs for the graph module
dynamic_shapes: If "from_graph" (default), we will use the dynamic
shapes in the passed-in graph module.
If "from_tracing_context", we use the dynamic shape info in the
ambient tracing context.
If "from_example_inputs", we will specialize the graph on the
example_inputs.
options: Inductor compilation options
Returns:
CompiledArtifact that can be saved to disk or invoked directly.
|
python
|
torch/_inductor/__init__.py
| 406
|
[
"gm",
"example_inputs",
"dynamic_shapes",
"options",
"aot"
] |
CompiledArtifact
| true
| 2
| 7.6
|
pytorch/pytorch
| 96,034
|
google
| false
|
_define_gemm_instance
|
def _define_gemm_instance(
self,
op: GemmOperation,
evt_name: Optional[str] = None,
) -> tuple[str, str]:
"""Defines and renders the Cutlass / CUDA C++ code for a given GEMM operation instance.
This function uses the Cutlass library to generate key parts of the codegen process. General Matrix Multiply
forms a core part of a number of scientific applications, so this efficient and adaptable implementation is
crucial.
Args:
op (cutlass_library.gemm_op.GemmOperation): This is the core GEMM operation that we are defining and rendering.
Returns:
tuple[str, str]: A tuple where the first part is a string that constitutes the defined GEMM operation in C++
code (render) and the second part is the string that specifies the operation type.
"""
assert cutlass_utils.try_import_cutlass()
import cutlass_library.gemm_operation as cutlass_gemm_op
import cutlass_library.library as cutlass_lib
if op.gemm_kind == cutlass_lib.GemmKind.Sparse:
emitter = cutlass_gemm_op.EmitSparseGemmInstance()
else:
emitter = cutlass_gemm_op.EmitGemmInstance()
op_def = emitter.emit(op)
op_def = op_def.replace(
"cutlass::gemm::device::Gemm", "cutlass::gemm::device::GemmUniversal"
)
if op.gemm_kind != cutlass_lib.GemmKind.Sparse:
op_def = op_def.replace("false,", "")
pattern = re.compile(r"\s*using\s(.*?)\s=")
decl = op_def.split("\n")[2]
match = pattern.match(decl)
if match is None:
raise RuntimeError("Invalid Gemm config: \n" + op_def)
op_type = match.groups()[0]
return op_def, op_type
|
Defines and renders the Cutlass / CUDA C++ code for a given GEMM operation instance.
This function uses the Cutlass library to generate key parts of the codegen process. General Matrix Multiply
forms a core part of a number of scientific applications, so this efficient and adaptable implementation is
crucial.
Args:
op (cutlass_library.gemm_op.GemmOperation): This is the core GEMM operation that we are defining and rendering.
Returns:
tuple[str, str]: A tuple where the first part is a string that constitutes the defined GEMM operation in C++
code (render) and the second part is the string that specifies the operation type.
|
python
|
torch/_inductor/codegen/cuda/gemm_template.py
| 1,827
|
[
"self",
"op",
"evt_name"
] |
tuple[str, str]
| true
| 5
| 7.92
|
pytorch/pytorch
| 96,034
|
google
| false
|
charsEndIndex
|
function charsEndIndex(strSymbols, chrSymbols) {
var index = strSymbols.length;
while (index-- && baseIndexOf(chrSymbols, strSymbols[index], 0) > -1) {}
return index;
}
|
Used by `_.trim` and `_.trimEnd` to get the index of the last string symbol
that is not found in the character symbols.
@private
@param {Array} strSymbols The string symbols to inspect.
@param {Array} chrSymbols The character symbols to find.
@returns {number} Returns the index of the last unmatched string symbol.
|
javascript
|
lodash.js
| 1,090
|
[
"strSymbols",
"chrSymbols"
] | false
| 3
| 6.08
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
filter
|
public FailableStream<O> filter(final FailablePredicate<O, ?> predicate) {
assertNotTerminated();
stream = stream.filter(Functions.asPredicate(predicate));
return this;
}
|
Returns a FailableStream consisting of the elements of this stream that match
the given FailablePredicate.
<p>
This is an intermediate operation.
</p>
@param predicate a non-interfering, stateless predicate to apply to each
element to determine if it should be included.
@return the new stream.
|
java
|
src/main/java/org/apache/commons/lang3/Streams.java
| 335
|
[
"predicate"
] | true
| 1
| 6.72
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
get_indexer_non_unique
|
def get_indexer_non_unique(
self, target
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
"""
Compute indexer and mask for new index given the current index.
The indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : Index
An iterable containing the values to be used for computing indexer.
Returns
-------
indexer : np.ndarray[np.intp]
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : np.ndarray[np.intp]
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array.
See Also
--------
Index.get_indexer : Computes indexer and mask for new index given
the current index.
Index.get_indexer_for : Returns an indexer even when non-unique.
Examples
--------
>>> index = pd.Index(["c", "b", "a", "b", "b"])
>>> index.get_indexer_non_unique(["b", "b"])
(array([1, 3, 4, 1, 3, 4]), array([], dtype=int64))
In the example below there are no matched values.
>>> index = pd.Index(["c", "b", "a", "b", "b"])
>>> index.get_indexer_non_unique(["q", "r", "t"])
(array([-1, -1, -1]), array([0, 1, 2]))
For this reason, the returned ``indexer`` contains only integers equal to -1.
It demonstrates that there's no match between the index and the ``target``
values at these positions. The mask [0, 1, 2] in the return value shows that
the first, second, and third elements are missing.
Notice that the return value is a tuple contains two items. In the example
below the first item is an array of locations in ``index``. The second
item is a mask shows that the first and third elements are missing.
>>> index = pd.Index(["c", "b", "a", "b", "b"])
>>> index.get_indexer_non_unique(["f", "b", "s"])
(array([-1, 1, 3, 4, -1]), array([0, 2]))
"""
target = self._maybe_cast_listlike_indexer(target)
if not self._should_compare(target) and not self._should_partial_index(target):
# _should_partial_index e.g. IntervalIndex with numeric scalars
# that can be matched to Interval scalars.
return self._get_indexer_non_comparable(target, method=None, unique=False)
pself, ptarget = self._maybe_downcast_for_indexing(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.dtype != target.dtype:
# TODO: if object, could use infer_dtype to preempt costly
# conversion if still non-comparable?
dtype = self._find_common_type_compat(target)
this = self.astype(dtype, copy=False)
that = target.astype(dtype, copy=False)
return this.get_indexer_non_unique(that)
# TODO: get_indexer has fastpaths for both Categorical-self and
# Categorical-target. Can we do something similar here?
# Note: _maybe_downcast_for_indexing ensures we never get here
# with MultiIndex self and non-Multi target
if self._is_multi and target._is_multi:
engine = self._engine
# Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" has
# no attribute "_extract_level_codes"
tgt_values = engine._extract_level_codes(target) # type: ignore[union-attr]
else:
tgt_values = target._get_engine_target()
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return ensure_platform_int(indexer), ensure_platform_int(missing)
|
Compute indexer and mask for new index given the current index.
The indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : Index
An iterable containing the values to be used for computing indexer.
Returns
-------
indexer : np.ndarray[np.intp]
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : np.ndarray[np.intp]
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array.
See Also
--------
Index.get_indexer : Computes indexer and mask for new index given
the current index.
Index.get_indexer_for : Returns an indexer even when non-unique.
Examples
--------
>>> index = pd.Index(["c", "b", "a", "b", "b"])
>>> index.get_indexer_non_unique(["b", "b"])
(array([1, 3, 4, 1, 3, 4]), array([], dtype=int64))
In the example below there are no matched values.
>>> index = pd.Index(["c", "b", "a", "b", "b"])
>>> index.get_indexer_non_unique(["q", "r", "t"])
(array([-1, -1, -1]), array([0, 1, 2]))
For this reason, the returned ``indexer`` contains only integers equal to -1.
It demonstrates that there's no match between the index and the ``target``
values at these positions. The mask [0, 1, 2] in the return value shows that
the first, second, and third elements are missing.
Notice that the return value is a tuple contains two items. In the example
below the first item is an array of locations in ``index``. The second
item is a mask shows that the first and third elements are missing.
>>> index = pd.Index(["c", "b", "a", "b", "b"])
>>> index.get_indexer_non_unique(["f", "b", "s"])
(array([-1, 1, 3, 4, -1]), array([0, 2]))
|
python
|
pandas/core/indexes/base.py
| 6,070
|
[
"self",
"target"
] |
tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]
| true
| 9
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
generateKey
|
private Object generateKey(CacheOperationContext context, @Nullable Object result) {
Object key = context.generateKey(result);
if (key == null) {
throw new IllegalArgumentException("""
Null key returned for cache operation [%s]. If you are using named parameters, \
ensure that the compiler uses the '-parameters' flag."""
.formatted(context.metadata.operation));
}
if (logger.isTraceEnabled()) {
logger.trace("Computed cache key '" + key + "' for operation " + context.metadata.operation);
}
return key;
}
|
Collect a {@link CachePutRequest} for every {@link CacheOperation}
using the specified result value.
@param contexts the contexts to handle
@param result the result value
@param putRequests the collection to update
|
java
|
spring-context/src/main/java/org/springframework/cache/interceptor/CacheAspectSupport.java
| 742
|
[
"context",
"result"
] |
Object
| true
| 3
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.