function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
isWhitespace
|
public static boolean isWhitespace(final CharSequence cs) {
if (cs == null) {
return false;
}
final int sz = cs.length();
for (int i = 0; i < sz; i++) {
if (!Character.isWhitespace(cs.charAt(i))) {
return false;
}
}
return true;
}
|
Tests if the CharSequence contains only whitespace.
<p>
Whitespace is defined by {@link Character#isWhitespace(char)}.
</p>
<p>
{@code null} will return {@code false}. An empty CharSequence (length()=0) will return {@code true}.
</p>
<pre>
StringUtils.isWhitespace(null) = false
StringUtils.isWhitespace("") = true
StringUtils.isWhitespace(" ") = true
StringUtils.isWhitespace("abc") = false
StringUtils.isWhitespace("ab2c") = false
StringUtils.isWhitespace("ab-c") = false
</pre>
@param cs the CharSequence to check, may be null.
@return {@code true} if only contains whitespace, and is non-null.
@since 2.0
@since 3.0 Changed signature from isWhitespace(String) to isWhitespace(CharSequence)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 3,789
|
[
"cs"
] | true
| 4
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
wrapperClone
|
function wrapperClone(wrapper) {
if (wrapper instanceof LazyWrapper) {
return wrapper.clone();
}
var result = new LodashWrapper(wrapper.__wrapped__, wrapper.__chain__);
result.__actions__ = copyArray(wrapper.__actions__);
result.__index__ = wrapper.__index__;
result.__values__ = wrapper.__values__;
return result;
}
|
Creates a clone of `wrapper`.
@private
@param {Object} wrapper The wrapper to clone.
@returns {Object} Returns the cloned wrapper.
|
javascript
|
lodash.js
| 6,908
|
[
"wrapper"
] | false
| 2
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
nextTokenIsCurlyBraceOnSameLineAsCursor
|
function nextTokenIsCurlyBraceOnSameLineAsCursor(precedingToken: Node, current: Node, lineAtPosition: number, sourceFile: SourceFile): NextTokenKind {
const nextToken = findNextToken(precedingToken, current, sourceFile);
if (!nextToken) {
return NextTokenKind.Unknown;
}
if (nextToken.kind === SyntaxKind.OpenBraceToken) {
// open braces are always indented at the parent level
return NextTokenKind.OpenBrace;
}
else if (nextToken.kind === SyntaxKind.CloseBraceToken) {
// close braces are indented at the parent level if they are located on the same line with cursor
// this means that if new line will be added at $ position, this case will be indented
// class A {
// $
// }
/// and this one - not
// class A {
// $}
const nextTokenStartLine = getStartLineAndCharacterForNode(nextToken, sourceFile).line;
return lineAtPosition === nextTokenStartLine ? NextTokenKind.CloseBrace : NextTokenKind.Unknown;
}
return NextTokenKind.Unknown;
}
|
@param assumeNewLineBeforeCloseBrace
`false` when called on text from a real source file.
`true` when we need to assume `position` is on a newline.
This is useful for codefixes. Consider
```
function f() {
|}
```
with `position` at `|`.
When inserting some text after an open brace, we would like to get indentation as if a newline was already there.
By default indentation at `position` will be 0 so 'assumeNewLineBeforeCloseBrace' overrides this behavior.
|
typescript
|
src/services/formatting/smartIndenter.ts
| 365
|
[
"precedingToken",
"current",
"lineAtPosition",
"sourceFile"
] | true
| 6
| 8.48
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
of
|
public static DoubleRange of(final Double fromInclusive, final Double toInclusive) {
return new DoubleRange(fromInclusive, toInclusive);
}
|
Creates a range with the specified minimum and maximum values (both inclusive).
<p>
The range uses the natural ordering of the elements to determine where values lie in the range.
</p>
<p>
The arguments may be passed in the order (min,max) or (max,min). The getMinimum and getMaximum methods will return the correct values.
</p>
@param fromInclusive the first value that defines the edge of the range, inclusive.
@param toInclusive the second value that defines the edge of the range, inclusive.
@return the range object, not null.
@throws IllegalArgumentException if either element is null.
|
java
|
src/main/java/org/apache/commons/lang3/DoubleRange.java
| 68
|
[
"fromInclusive",
"toInclusive"
] |
DoubleRange
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
load
|
static AutoConfigurationReplacements load(Class<?> annotation, @Nullable ClassLoader classLoader) {
Assert.notNull(annotation, "'annotation' must not be null");
ClassLoader classLoaderToUse = decideClassloader(classLoader);
String location = String.format(LOCATION, annotation.getName());
Enumeration<URL> urls = findUrlsInClasspath(classLoaderToUse, location);
Map<String, String> replacements = new HashMap<>();
while (urls.hasMoreElements()) {
URL url = urls.nextElement();
replacements.putAll(readReplacements(url));
}
return new AutoConfigurationReplacements(replacements);
}
|
Loads the relocations from the classpath. Relocations are stored in files named
{@code META-INF/spring/full-qualified-annotation-name.replacements} on the
classpath. The file is loaded using {@link Properties#load(java.io.InputStream)}
with each entry containing an auto-configuration class name as the key and the
replacement class name as the value.
@param annotation annotation to load
@param classLoader class loader to use for loading
@return list of names of annotated classes
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationReplacements.java
| 93
|
[
"annotation",
"classLoader"
] |
AutoConfigurationReplacements
| true
| 2
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
column_or_1d
|
def column_or_1d(y, *, dtype=None, input_name="y", warn=False, device=None):
"""Ravel column or 1d numpy array, else raises an error.
Parameters
----------
y : array-like
Input data.
dtype : data-type, default=None
Data type for `y`.
.. versionadded:: 1.2
input_name : str, default="y"
The data name used to construct the error message.
.. versionadded:: 1.8
warn : bool, default=False
To control display of warnings.
device : device, default=None
`device` object.
See the :ref:`Array API User Guide <array_api>` for more details.
.. versionadded:: 1.6
Returns
-------
y : ndarray
Output data.
Raises
------
ValueError
If `y` is not a 1D array or a 2D array with a single row or column.
Examples
--------
>>> from sklearn.utils.validation import column_or_1d
>>> column_or_1d([1, 1])
array([1, 1])
"""
xp, _ = get_namespace(y)
y = check_array(
y,
ensure_2d=False,
dtype=dtype,
input_name=input_name,
ensure_all_finite=False,
ensure_min_samples=0,
)
shape = y.shape
if len(shape) == 1:
return _asarray_with_order(
xp.reshape(y, (-1,)), order="C", xp=xp, device=device
)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn(
(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel()."
),
DataConversionWarning,
stacklevel=2,
)
return _asarray_with_order(
xp.reshape(y, (-1,)), order="C", xp=xp, device=device
)
raise ValueError(
"y should be a 1d array, got an array of shape {} instead.".format(shape)
)
|
Ravel column or 1d numpy array, else raises an error.
Parameters
----------
y : array-like
Input data.
dtype : data-type, default=None
Data type for `y`.
.. versionadded:: 1.2
input_name : str, default="y"
The data name used to construct the error message.
.. versionadded:: 1.8
warn : bool, default=False
To control display of warnings.
device : device, default=None
`device` object.
See the :ref:`Array API User Guide <array_api>` for more details.
.. versionadded:: 1.6
Returns
-------
y : ndarray
Output data.
Raises
------
ValueError
If `y` is not a 1D array or a 2D array with a single row or column.
Examples
--------
>>> from sklearn.utils.validation import column_or_1d
>>> column_or_1d([1, 1])
array([1, 1])
|
python
|
sklearn/utils/validation.py
| 1,361
|
[
"y",
"dtype",
"input_name",
"warn",
"device"
] | false
| 5
| 7.44
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
visitBreakStatement
|
function visitBreakStatement(node: BreakStatement): Statement {
if (inStatementContainingYield) {
const label = findBreakTarget(node.label && idText(node.label));
if (label > 0) {
return createInlineBreak(label, /*location*/ node);
}
}
return visitEachChild(node, visitor, context);
}
|
Visits an ElementAccessExpression that contains a YieldExpression.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/generators.ts
| 1,775
|
[
"node"
] | true
| 4
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
bufferedPartitions
|
Set<TopicPartition> bufferedPartitions() {
try {
lock.lock();
final Set<TopicPartition> partitions = new HashSet<>();
if (nextInLineFetch != null && !nextInLineFetch.isConsumed()) {
partitions.add(nextInLineFetch.partition);
}
completedFetches.forEach(cf -> partitions.add(cf.partition));
return partitions;
} finally {
lock.unlock();
}
}
|
Return the set of {@link TopicPartition partitions} for which we have data in the buffer.
@return {@link TopicPartition Partition} set
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java
| 244
|
[] | true
| 3
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
sizeInBytes
|
@Override
public int sizeInBytes() {
return LOG_OVERHEAD + buffer.getInt(LENGTH_OFFSET);
}
|
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas.
@return The base timestamp
|
java
|
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
| 221
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
recode_for_groupby
|
def recode_for_groupby(c: Categorical, sort: bool, observed: bool) -> Categorical:
"""
Code the categories to ensure we can groupby for categoricals.
If observed=True, we return a new Categorical with the observed
categories only.
If sort=False, return a copy of self, coded with categories as
returned by .unique(), followed by any categories not appearing in
the data. If sort=True, return self.
This method is needed solely to ensure the categorical index of the
GroupBy result has categories in the order of appearance in the data
(GH-8868).
Parameters
----------
c : Categorical
sort : bool
The value of the sort parameter groupby was called with.
observed : bool
Account only for the observed values
Returns
-------
Categorical
If sort=False, the new categories are set to the order of
appearance in codes (unless ordered=True, in which case the
original order is preserved), followed by any unrepresented
categories in the original order.
"""
# we only care about observed values
if observed:
# In cases with c.ordered, this is equivalent to
# return c.remove_unused_categories(), c
take_codes = unique1d(c.codes[c.codes != -1])
if sort:
take_codes = np.sort(take_codes)
# we recode according to the uniques
categories = c.categories.take(take_codes)
codes = recode_for_categories(c.codes, c.categories, categories, copy=False)
# return a new categorical that maps our new codes
# and categories
dtype = CategoricalDtype(categories, ordered=c.ordered)
return Categorical._simple_new(codes, dtype=dtype)
# Already sorted according to c.categories; all is fine
if sort:
return c
# sort=False should order groups in as-encountered order (GH-8868)
# GH:46909: Re-ordering codes faster than using (set|add|reorder)_categories
# GH 38140: exclude nan from indexer for categories
unique_notnan_codes = unique1d(c.codes[c.codes != -1])
if sort:
unique_notnan_codes = np.sort(unique_notnan_codes)
if (num_cat := len(c.categories)) > len(unique_notnan_codes):
# GH 13179: All categories need to be present, even if missing from the data
missing_codes = np.setdiff1d(
np.arange(num_cat), unique_notnan_codes, assume_unique=True
)
take_codes = np.concatenate((unique_notnan_codes, missing_codes))
else:
take_codes = unique_notnan_codes
return Categorical(c, c.categories.take(take_codes))
|
Code the categories to ensure we can groupby for categoricals.
If observed=True, we return a new Categorical with the observed
categories only.
If sort=False, return a copy of self, coded with categories as
returned by .unique(), followed by any categories not appearing in
the data. If sort=True, return self.
This method is needed solely to ensure the categorical index of the
GroupBy result has categories in the order of appearance in the data
(GH-8868).
Parameters
----------
c : Categorical
sort : bool
The value of the sort parameter groupby was called with.
observed : bool
Account only for the observed values
Returns
-------
Categorical
If sort=False, the new categories are set to the order of
appearance in codes (unless ordered=True, in which case the
original order is preserved), followed by any unrepresented
categories in the original order.
|
python
|
pandas/core/groupby/categorical.py
| 13
|
[
"c",
"sort",
"observed"
] |
Categorical
| true
| 7
| 6.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
csc_median_axis_0
|
def csc_median_axis_0(X):
"""Find the median across axis 0 of a CSC matrix.
It is equivalent to doing np.median(X, axis=0).
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It should be of CSC format.
Returns
-------
median : ndarray of shape (n_features,)
Median.
"""
if not (sp.issparse(X) and X.format == "csc"):
raise TypeError("Expected matrix of CSC format, got %s" % X.format)
indptr = X.indptr
n_samples, n_features = X.shape
median = np.zeros(n_features)
for f_ind, (start, end) in enumerate(itertools.pairwise(indptr)):
# Prevent modifying X in place
data = np.copy(X.data[start:end])
nz = n_samples - data.size
median[f_ind] = _get_median(data, nz)
return median
|
Find the median across axis 0 of a CSC matrix.
It is equivalent to doing np.median(X, axis=0).
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Input data. It should be of CSC format.
Returns
-------
median : ndarray of shape (n_features,)
Median.
|
python
|
sklearn/utils/sparsefuncs.py
| 690
|
[
"X"
] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
isTypeMatch
|
@Override
public boolean isTypeMatch(String name, ResolvableType typeToMatch) throws NoSuchBeanDefinitionException {
String beanName = BeanFactoryUtils.transformedBeanName(name);
Object bean = obtainBean(beanName);
if (bean instanceof FactoryBean<?> factoryBean && !BeanFactoryUtils.isFactoryDereference(name)) {
return isTypeMatch(factoryBean, typeToMatch.toClass());
}
return typeToMatch.isInstance(bean);
}
|
Add a new singleton bean.
<p>Will overwrite any existing instance for the given name.
@param name the name of the bean
@param bean the bean instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/StaticListableBeanFactory.java
| 232
|
[
"name",
"typeToMatch"
] | true
| 3
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
onLoad
|
function onLoad(fillers: Fillers, args: esbuild.OnLoadArgs): esbuild.OnLoadResult {
// display useful info if no shim has been found
if (fillers[args.path].contents === undefined) {
throw `no shim for "${args.path}" imported by "${args.pluginData}"`
}
return fillers[args.path] // inject the contents
}
|
Handles the load step where esbuild loads the contents of the imports before
bundling them. This allows us to inject a filler via its `contents` if it was
provided. If not, the polyfill is empty and we display an error.
@param fillers to use the contents from
@param args from esbuild
|
typescript
|
helpers/compile/plugins/fill-plugin/fillPlugin.ts
| 125
|
[
"fillers",
"args"
] | true
| 2
| 7.2
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
describeLogDirs
|
@Override
public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options) {
final Map<Integer, KafkaFutureImpl<Map<String, LogDirDescription>>> futures = new HashMap<>(brokers.size());
final long now = time.milliseconds();
for (final Integer brokerId : brokers) {
KafkaFutureImpl<Map<String, LogDirDescription>> future = new KafkaFutureImpl<>();
futures.put(brokerId, future);
runnable.call(new Call("describeLogDirs", calcDeadlineMs(now, options.timeoutMs()),
new ConstantNodeIdProvider(brokerId)) {
@Override
public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) {
// Query selected partitions in all log directories
return new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(null));
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
Map<String, LogDirDescription> descriptions = logDirDescriptions(response);
if (!descriptions.isEmpty()) {
future.complete(descriptions);
} else {
// Up to v3 DescribeLogDirsResponse did not have an error code field, hence it defaults to None
Errors error = response.data().errorCode() == Errors.NONE.code()
? Errors.CLUSTER_AUTHORIZATION_FAILED
: Errors.forCode(response.data().errorCode());
future.completeExceptionally(error.exception());
}
}
@Override
void handleFailure(Throwable throwable) {
future.completeExceptionally(throwable);
}
}, now);
}
return new DescribeLogDirsResult(new HashMap<>(futures));
}
|
Fail futures in the given Map which were retried due to exceeding quota. We propagate
the initial error back to the caller if the request timed out.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 2,998
|
[
"brokers",
"options"
] |
DescribeLogDirsResult
| true
| 3
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
scale_mm_epilogue
|
def scale_mm_epilogue():
"""
Create an epilogue function that applies scaling to matrix multiplication result
using the given scale factors.
Args:
dtype: The data type of the output
scale_a: Scale factor for matrix A
scale_b: Scale factor for matrix B
Returns:
Epilogue function that takes the accumulator and applies scaling
"""
def epilogue(acc, inv_a_scale, inv_b_scale, bias=None):
# The epilogue function receives the accumulator (result of mat1 @ mat2)
# and applies the scaling factors
# In the original scaled_mm, we use inverse scales, so we multiply by them
mul_scales = V.ops.mul(inv_a_scale, inv_b_scale)
mul_acc = V.ops.mul(acc, mul_scales)
if bias is not None:
return V.ops.add(mul_acc, bias)
else:
return mul_acc
return epilogue
|
Create an epilogue function that applies scaling to matrix multiplication result
using the given scale factors.
Args:
dtype: The data type of the output
scale_a: Scale factor for matrix A
scale_b: Scale factor for matrix B
Returns:
Epilogue function that takes the accumulator and applies scaling
|
python
|
torch/_inductor/kernel/mm_common.py
| 106
|
[] | false
| 3
| 7.12
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
toFullyQualifiedName
|
public static String toFullyQualifiedName(final Package context, final String resourceName) {
Objects.requireNonNull(context, "context");
Objects.requireNonNull(resourceName, "resourceName");
return context.getName() + "." + resourceName;
}
|
Returns the fully qualified name for the resource with name {@code resourceName} relative to the given context.
<p>
Note that this method does not check whether the resource actually exists. It only constructs the name. Null inputs are not allowed.
</p>
<pre>
ClassPathUtils.toFullyQualifiedName(StringUtils.class.getPackage(), "StringUtils.properties") = "org.apache.commons.lang3.StringUtils.properties"
</pre>
@param context The context for constructing the name.
@param resourceName the resource name to construct the fully qualified name for.
@return the fully qualified name of the resource with name {@code resourceName}.
@throws NullPointerException if either {@code context} or {@code resourceName} is null.
|
java
|
src/main/java/org/apache/commons/lang3/ClassPathUtils.java
| 95
|
[
"context",
"resourceName"
] |
String
| true
| 1
| 6.4
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
proceed
|
@Nullable Object proceed() throws Throwable;
|
Proceed to the next interceptor in the chain.
<p>The implementation and the semantics of this method depends
on the actual joinpoint type (see the children interfaces).
@return see the children interfaces' proceed definition
@throws Throwable if the joinpoint throws an exception
|
java
|
spring-aop/src/main/java/org/aopalliance/intercept/Joinpoint.java
| 51
|
[] |
Object
| true
| 1
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
nextSequentialOffset
|
private long nextSequentialOffset() {
return lastOffset == null ? baseOffset : lastOffset + 1;
}
|
Get an estimate of the number of bytes written to the underlying buffer. The returned value
is exactly correct if the record set is not compressed or if the builder has been closed.
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
| 907
|
[] | true
| 2
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
joinWithPreposition
|
function joinWithPreposition(preposition: 'and' | 'or', items: string[]): string {
if (items.length === 1) {
return items[0]
}
const itemsCopy = [...items]
const lastItem = itemsCopy.pop()
return `${itemsCopy.join(', ')} ${preposition} ${lastItem}`
}
|
Given the validation error and arguments rendering tree, applies corresponding
formatting to an error tree and adds all relevant messages.
@param error
@param args
|
typescript
|
packages/client/src/runtime/core/errorRendering/applyValidationError.ts
| 638
|
[
"preposition",
"items"
] | true
| 2
| 6.72
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
getLength
|
private int getLength() throws IOException {
int i = derInputStream.read();
if (i == -1) throw new IOException("Invalid DER: length missing");
// A single byte short length
if ((i & ~0x7F) == 0) return i;
int num = i & 0x7F;
// We can't handle length longer than 4 bytes
if (i >= 0xFF || num > 4) throw new IOException("Invalid DER: length field too big (" + i + ")"); //$NON-NLS-2$
byte[] bytes = new byte[num];
int n = derInputStream.read(bytes);
if (n < num) throw new IOException("Invalid DER: length too short");
int len = new BigInteger(1, bytes).intValue();
if (len < 0) {
throw new IOException("Invalid DER: length larger than max-int");
}
return len;
}
|
Decode the length of the field. Can only support length
encoding up to 4 octets.
<p>
In BER/DER encoding, length can be encoded in 2 forms:
</p>
<ul>
<li>Short form. One octet. Bit 8 has value "0" and bits 7-1
give the length.
</li>
<li>Long form. Two to 127 octets (only 4 is supported here).
Bit 8 of first octet has value "1" and bits 7-1 give the
number of additional length octets. Second and following
octets give the length, base 256, most significant digit first.
</li>
</ul>
@return The length as integer
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DerParser.java
| 124
|
[] | true
| 7
| 8.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
acquire
|
private void acquire() {
final Thread thread = Thread.currentThread();
final long threadId = thread.getId();
if (threadId != currentThread.get() && !currentThread.compareAndSet(NO_CURRENT_THREAD, threadId))
throw new ConcurrentModificationException("KafkaShareConsumer is not safe for multi-threaded access. " +
"currentThread(name: " + thread.getName() + ", id: " + threadId + ")" +
" otherThread(id: " + currentThread.get() + ")"
);
if (acknowledgementCommitCallbackHandler != null && acknowledgementCommitCallbackHandler.hasEnteredCallback()) {
throw new IllegalStateException("KafkaShareConsumer methods are not accessible from user-defined " +
"acknowledgement commit callback.");
}
refCount.incrementAndGet();
}
|
Acquire the light lock protecting this consumer from multithreaded access. Instead of blocking
when the lock is not available, however, we just throw an exception (since multithreaded usage is not
supported).
@throws ConcurrentModificationException if another thread already has the lock
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java
| 1,085
|
[] |
void
| true
| 5
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
update_dtype
|
def update_dtype(self, dtype) -> SparseDtype:
"""
Convert the SparseDtype to a new dtype.
This takes care of converting the ``fill_value``.
Parameters
----------
dtype : Union[str, numpy.dtype, SparseDtype]
The new dtype to use.
* For a SparseDtype, it is simply returned
* For a NumPy dtype (or str), the current fill value
is converted to the new dtype, and a SparseDtype
with `dtype` and the new fill value is returned.
Returns
-------
SparseDtype
A new SparseDtype with the correct `dtype` and fill value
for that `dtype`.
Raises
------
ValueError
When the current fill value cannot be converted to the
new `dtype` (e.g. trying to convert ``np.nan`` to an
integer dtype).
Examples
--------
>>> SparseDtype(int, 0).update_dtype(float)
Sparse[float64, 0.0]
>>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
Sparse[float64, nan]
"""
from pandas.core.dtypes.astype import astype_array
from pandas.core.dtypes.common import pandas_dtype
cls = type(self)
dtype = pandas_dtype(dtype)
if not isinstance(dtype, cls):
if not isinstance(dtype, np.dtype):
raise TypeError("sparse arrays of extension dtypes not supported")
fv_asarray = np.atleast_1d(np.array(self.fill_value))
fvarr = astype_array(fv_asarray, dtype)
# NB: not fv_0d.item(), as that casts dt64->int
fill_value = fvarr[0]
dtype = cls(dtype, fill_value=fill_value)
return dtype
|
Convert the SparseDtype to a new dtype.
This takes care of converting the ``fill_value``.
Parameters
----------
dtype : Union[str, numpy.dtype, SparseDtype]
The new dtype to use.
* For a SparseDtype, it is simply returned
* For a NumPy dtype (or str), the current fill value
is converted to the new dtype, and a SparseDtype
with `dtype` and the new fill value is returned.
Returns
-------
SparseDtype
A new SparseDtype with the correct `dtype` and fill value
for that `dtype`.
Raises
------
ValueError
When the current fill value cannot be converted to the
new `dtype` (e.g. trying to convert ``np.nan`` to an
integer dtype).
Examples
--------
>>> SparseDtype(int, 0).update_dtype(float)
Sparse[float64, 0.0]
>>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
Sparse[float64, nan]
|
python
|
pandas/core/dtypes/dtypes.py
| 2,022
|
[
"self",
"dtype"
] |
SparseDtype
| true
| 3
| 8.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_generate_temporary_array_pointer
|
def _generate_temporary_array_pointer(
c_type: str, elements: Sequence[str], *, force_mutable: bool = False
) -> str:
"""Get a pointer to an array that only exists for the duration of the C++
statement it's used in."""
# If the c_type is already a pointer, return a mutable pointer to the array.
# Otherwise, return a const pointer. In the C-shim API, pointer types are only
# const-qualified with respect to the underlying value, not any nested pointers.
# e.g. const double** is possible, but not const double* const*. This means
# that an array containing pointers must _already_ be properly const-qualified
# by the c_type, and not add additional const-ness.
# MSVC does not support implicitly converting a const iterator to a const pointer.
ptr_call = (
"data()"
if force_mutable or c_type.endswith("*") or cpp_builder.is_msvc_cl()
else "cbegin()"
)
return (
f"std::array<{c_type}, {len(elements)}>{{{', '.join(elements)}}}.{ptr_call}"
)
|
Get a pointer to an array that only exists for the duration of the C++
statement it's used in.
|
python
|
torch/_inductor/codegen/cpp_wrapper_cpu.py
| 114
|
[
"c_type",
"elements",
"force_mutable"
] |
str
| true
| 4
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
bind_output_assets_to_tasks
|
def bind_output_assets_to_tasks(
edges: list[dict], serialized_dag: SerializedDagModel, version_number: int, session: Session
) -> None:
"""
Try to bind the downstream assets to the relevant task that produces them.
This function will mutate the `edges` in place.
"""
# bind normal assets present in the `task_outlet_asset_references`
outlet_asset_references = serialized_dag.dag_model.task_outlet_asset_references
downstream_asset_edges = [
edge
for edge in edges
if edge["target_id"].startswith("asset:") and not edge.get("resolved_from_alias")
]
for edge in downstream_asset_edges:
# Try to attach the outlet assets to the relevant tasks
asset_id = int(edge["target_id"].replace("asset:", "", 1))
outlet_asset_reference = next(
outlet_asset_reference
for outlet_asset_reference in outlet_asset_references
if outlet_asset_reference.asset_id == asset_id
)
edge["source_id"] = outlet_asset_reference.task_id
# bind assets resolved from aliases, they do not populate the `outlet_asset_references`
downstream_alias_resolved_edges = [
edge for edge in edges if edge["target_id"].startswith("asset:") and edge.get("resolved_from_alias")
]
aliases_names = {edges["resolved_from_alias"] for edges in downstream_alias_resolved_edges}
result = session.scalars(
select(AssetEvent)
.join(AssetEvent.source_aliases)
.join(AssetEvent.source_dag_run)
# That's a simplification, instead doing `version_number` in `DagRun.dag_versions`.
.join(DagRun.created_dag_version)
.where(AssetEvent.source_aliases.any(AssetAliasModel.name.in_(aliases_names)))
.where(AssetEvent.source_dag_run.has(DagRun.dag_id == serialized_dag.dag_model.dag_id))
.where(DagVersion.version_number == version_number)
).unique()
asset_id_to_task_ids = defaultdict(set)
for asset_event in result:
asset_id_to_task_ids[asset_event.asset_id].add(asset_event.source_task_id)
for edge in downstream_alias_resolved_edges:
asset_id = int(edge["target_id"].replace("asset:", "", 1))
task_ids = asset_id_to_task_ids.get(asset_id, set())
for index, task_id in enumerate(task_ids):
if index == 0:
edge["source_id"] = task_id
continue
edge_copy = {**edge, "source_id": task_id}
edges.append(edge_copy)
|
Try to bind the downstream assets to the relevant task that produces them.
This function will mutate the `edges` in place.
|
python
|
airflow-core/src/airflow/api_fastapi/core_api/services/ui/structure.py
| 127
|
[
"edges",
"serialized_dag",
"version_number",
"session"
] |
None
| true
| 8
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
logToConsole
|
function logToConsole(value: unknown) {
// tslint:disable-next-line:no-console
console.log(unwrapSignal(value));
}
|
Warning: This function mutates the `roots` arg!
Recursively traverse the DOM tree to find all Angular component root elements.
This function starts from the given element and traverses its children.
When it finds an Angular component, it adds that element to the `roots` set.
If we discover an angular component that we've already added to the `roots` set,
we skip traversing its children. This is to ensure that we only collect unique root elements.
Example:
Lets say we have the following DOM structure:
```html
<body>
<app-root-1>...</app-root-1>
<app-root-2>...</app-root-2>
<mat-dialog>
...
</mat-dialog>
<div id="not-angular">Not an angular component</div>
</body>
```
In this case, `app-root-1` and `app-root-2` are the root elements of Angular components.
The `mat-dialog` is a non application root Angular component.
We can discover the roots by searching for ng-version. This gives us a set of paths that we can skip traversing.
```ts
const rootSet = new Set(getAppRoots());
console.log(rootSet);
// Set(<app-root-1>, <app-root-2>)
discoverNonApplicationRootComponents(document.body, rootSet);
console.log(rootSet);
// Set(<app-root-1>, <app-root-2>, <mat-dialog>)
```
```md
traversing document.body.children:
- child: <app-root-1>
- Since we have this already in the `roots` set, we skip traversing its children.
- child: <app-root-2>
- Since we have this already in the `roots` set, we skip traversing its children.
- child: <mat-dialog>
- Since this is not in the `roots` set, we check if it is an Angular component.
- Since it is, we add it to the `roots` set and break the loop.
- child: <div id="not-angular">
- Since this is not an Angular component, we traverse its children to see if we can find any Angular components.
```
@param element The current DOM element being traversed.
@param roots A set of root elements found during the traversal.
|
typescript
|
devtools/projects/ng-devtools-backend/src/lib/component-tree/component-tree.ts
| 764
|
[
"value"
] | false
| 1
| 6.4
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
hashCode
|
@Override
public int hashCode() {
// racy single-check idiom
int h = hashCode;
if (h == 0) {
h = hash(type, subtype, parametersAsMap());
hashCode = h;
}
return h;
}
|
Parses a media type from its string representation.
@throws IllegalArgumentException if the input is not parsable
|
java
|
android/guava/src/com/google/common/net/MediaType.java
| 1,210
|
[] | true
| 2
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
create
|
public static ExponentialHistogramMerger create(int bucketLimit, ExponentialHistogramCircuitBreaker circuitBreaker) {
circuitBreaker.adjustBreaker(BASE_SIZE);
boolean success = false;
try {
ExponentialHistogramMerger result = new ExponentialHistogramMerger(bucketLimit, circuitBreaker);
success = true;
return result;
} finally {
if (success == false) {
circuitBreaker.adjustBreaker(-BASE_SIZE);
}
}
}
|
Creates a new instance with the specified bucket limit.
@param bucketLimit the maximum number of buckets the result histogram is allowed to have, must be at least 4
@param circuitBreaker the circuit breaker to use to limit memory allocations
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramMerger.java
| 71
|
[
"bucketLimit",
"circuitBreaker"
] |
ExponentialHistogramMerger
| true
| 2
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
append
|
@Override
public StrBuilder append(final CharSequence seq, final int startIndex, final int length) {
if (seq == null) {
return appendNull();
}
return append(seq.toString(), startIndex, length);
}
|
Appends part of a CharSequence to this string builder.
Appending null will call {@link #appendNull()}.
@param seq the CharSequence to append
@param startIndex the start index, inclusive, must be valid
@param length the length to append, must be valid
@return {@code this} instance.
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 503
|
[
"seq",
"startIndex",
"length"
] |
StrBuilder
| true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toUriString
|
public static String toUriString(InetAddress ip) {
if (ip instanceof Inet6Address) {
return "[" + toAddrString(ip) + "]";
}
return toAddrString(ip);
}
|
Returns the string representation of an {@link InetAddress} suitable for inclusion in a URI.
<p>For IPv4 addresses, this is identical to {@link InetAddress#getHostAddress()}, but for IPv6
addresses it compresses zeroes and surrounds the text with square brackets; for example {@code
"[2001:db8::1]"}.
<p>Per section 3.2.2 of <a target="_parent"
href="http://tools.ietf.org/html/rfc3986#section-3.2.2">RFC 3986</a>, a URI containing an IPv6
string literal is of the form {@code "http://[2001:db8::1]:8888/index.html"}.
<p>Use of either {@link InetAddresses#toAddrString}, {@link InetAddress#getHostAddress()}, or
this method is recommended over {@link InetAddress#toString()} when an IP address string
literal is desired. This is because {@link InetAddress#toString()} prints the hostname and the
IP address string joined by a "/".
@param ip {@link InetAddress} to be converted to URI string literal
@return {@code String} containing URI-safe string literal
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 580
|
[
"ip"
] |
String
| true
| 2
| 7.36
|
google/guava
| 51,352
|
javadoc
| false
|
deregister
|
def deregister() -> None:
"""
Remove pandas formatters and converters.
Removes the custom converters added by :func:`register`. This
attempts to set the state of the registry back to the state before
pandas registered its own units. Converters for pandas' own types like
Timestamp and Period are removed completely. Converters for types
pandas overwrites, like ``datetime.datetime``, are restored to their
original value.
See Also
--------
register_matplotlib_converters : Register pandas formatters and converters
with matplotlib.
Examples
--------
.. plot::
:context: close-figs
The following line is done automatically by pandas so
the plot can be rendered:
>>> pd.plotting.register_matplotlib_converters()
>>> df = pd.DataFrame(
... {"ts": pd.period_range("2020", periods=2, freq="M"), "y": [1, 2]}
... )
>>> plot = df.plot.line(x="ts", y="y")
Unsetting the register manually an error will be raised:
>>> pd.set_option(
... "plotting.matplotlib.register_converters", False
... ) # doctest: +SKIP
>>> df.plot.line(x="ts", y="y") # doctest: +SKIP
Traceback (most recent call last):
TypeError: float() argument must be a string or a real number, not 'Period'
"""
plot_backend = _get_plot_backend("matplotlib")
plot_backend.deregister()
|
Remove pandas formatters and converters.
Removes the custom converters added by :func:`register`. This
attempts to set the state of the registry back to the state before
pandas registered its own units. Converters for pandas' own types like
Timestamp and Period are removed completely. Converters for types
pandas overwrites, like ``datetime.datetime``, are restored to their
original value.
See Also
--------
register_matplotlib_converters : Register pandas formatters and converters
with matplotlib.
Examples
--------
.. plot::
:context: close-figs
The following line is done automatically by pandas so
the plot can be rendered:
>>> pd.plotting.register_matplotlib_converters()
>>> df = pd.DataFrame(
... {"ts": pd.period_range("2020", periods=2, freq="M"), "y": [1, 2]}
... )
>>> plot = df.plot.line(x="ts", y="y")
Unsetting the register manually an error will be raised:
>>> pd.set_option(
... "plotting.matplotlib.register_converters", False
... ) # doctest: +SKIP
>>> df.plot.line(x="ts", y="y") # doctest: +SKIP
Traceback (most recent call last):
TypeError: float() argument must be a string or a real number, not 'Period'
|
python
|
pandas/plotting/_misc.py
| 133
|
[] |
None
| true
| 1
| 6.64
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
WriteHeaderMessageForwardDecls
|
void WriteHeaderMessageForwardDecls(const google::protobuf::FileDescriptor* file,
Context& ctx) {
// Import forward-declaration of types defined in this file.
if (ctx.options().backend == Backend::UPB) {
ctx.Emit({{"upb_filename", UpbCFilename(file)}},
"#include \"$upb_filename$\"\n");
}
WriteForwardDecls(file, ctx);
// Import forward-declaration of types in dependencies.
for (int i = 0; i < file->dependency_count(); ++i) {
if (ctx.options().strip_feature_includes &&
compiler::IsKnownFeatureProto(file->dependency(i)->name())) {
// Strip feature imports for editions codegen tests.
continue;
}
WriteForwardDecls(file->dependency(i), ctx);
}
ctx.Emit("\n");
}
|
Writes includes for upb C minitables and fwd.h for transitive typedefs.
|
cpp
|
hpb_generator/generator.cc
| 304
|
[] | true
| 5
| 7.2
|
protocolbuffers/protobuf
| 69,904
|
doxygen
| false
|
|
_validate_codes
|
def _validate_codes(self, level: Index, code: np.ndarray) -> np.ndarray:
"""
Reassign code values as -1 if their corresponding levels are NaN.
Parameters
----------
code : Index
Code to reassign.
level : np.ndarray
Level to check for missing values (NaN, NaT, None).
Returns
-------
new code where code value = -1 if it corresponds
to a level with missing values (NaN, NaT, None).
"""
null_mask = isna(level)
if np.any(null_mask):
code = np.where(null_mask[code], -1, code)
return code
|
Reassign code values as -1 if their corresponding levels are NaN.
Parameters
----------
code : Index
Code to reassign.
level : np.ndarray
Level to check for missing values (NaN, NaT, None).
Returns
-------
new code where code value = -1 if it corresponds
to a level with missing values (NaN, NaT, None).
|
python
|
pandas/core/indexes/multi.py
| 346
|
[
"self",
"level",
"code"
] |
np.ndarray
| true
| 2
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
comparePathComponents
|
function comparePathComponents(one: string, other: string, caseSensitive = false): number {
if (!caseSensitive) {
one = one && one.toLowerCase();
other = other && other.toLowerCase();
}
if (one === other) {
return 0;
}
return one < other ? -1 : 1;
}
|
Compares the case of the provided strings - uppercase before lowercase
@returns
```text
-1 if one is uppercase and other is lowercase
1 if one is lowercase and other is uppercase
0 otherwise
```
|
typescript
|
src/vs/base/common/comparers.ts
| 265
|
[
"one",
"other",
"caseSensitive"
] | true
| 6
| 7.52
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
parseGroup
|
private void parseGroup(JSONObject group, Map<String, Dependency> dependencies) throws JSONException {
if (group.has(VALUES_EL)) {
JSONArray content = group.getJSONArray(VALUES_EL);
for (int i = 0; i < content.length(); i++) {
Dependency dependency = parseDependency(content.getJSONObject(i));
dependencies.put(dependency.getId(), dependency);
}
}
}
|
Returns the defaults applicable to the service.
@return the defaults of the service
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/InitializrServiceMetadata.java
| 178
|
[
"group",
"dependencies"
] |
void
| true
| 3
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
registerListeners
|
protected void registerListeners() throws SchedulerException {
ListenerManager listenerManager = getScheduler().getListenerManager();
if (this.schedulerListeners != null) {
for (SchedulerListener listener : this.schedulerListeners) {
listenerManager.addSchedulerListener(listener);
}
}
if (this.globalJobListeners != null) {
for (JobListener listener : this.globalJobListeners) {
listenerManager.addJobListener(listener);
}
}
if (this.globalTriggerListeners != null) {
for (TriggerListener listener : this.globalTriggerListeners) {
listenerManager.addTriggerListener(listener);
}
}
}
|
Register all specified listeners with the Scheduler.
|
java
|
spring-context-support/src/main/java/org/springframework/scheduling/quartz/SchedulerAccessor.java
| 341
|
[] |
void
| true
| 4
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
indexOf
|
static int indexOf(final CharSequence cs, final CharSequence searchChar, final int start) {
if (cs == null || searchChar == null) {
return StringUtils.INDEX_NOT_FOUND;
}
if (cs instanceof String) {
return ((String) cs).indexOf(searchChar.toString(), start);
}
if (cs instanceof StringBuilder) {
return ((StringBuilder) cs).indexOf(searchChar.toString(), start);
}
if (cs instanceof StringBuffer) {
return ((StringBuffer) cs).indexOf(searchChar.toString(), start);
}
return cs.toString().indexOf(searchChar.toString(), start);
// if (cs instanceof String && searchChar instanceof String) {
// // TODO: Do we assume searchChar is usually relatively small;
// // If so then calling toString() on it is better than reverting to
// // the green implementation in the else block
// return ((String) cs).indexOf((String) searchChar, start);
// } else {
// // TODO: Implement rather than convert to String
// return cs.toString().indexOf(searchChar.toString(), start);
// }
}
|
Used by the indexOf(CharSequence methods) as a green implementation of indexOf.
@param cs the {@link CharSequence} to be processed.
@param searchChar the {@link CharSequence} to be searched for.
@param start the start index.
@return the index where the search sequence was found, or {@code -1} if there is no such occurrence.
|
java
|
src/main/java/org/apache/commons/lang3/CharSequenceUtils.java
| 49
|
[
"cs",
"searchChar",
"start"
] | true
| 6
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
state
|
public MemberState state() {
return state;
}
|
@return Current state of this member in relationship to a group, as defined in
{@link MemberState}.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 1,318
|
[] |
MemberState
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
describe_option
|
def describe_option(pat: str = "", _print_desc: bool = True) -> str | None:
"""
Print the description for one or more registered options.
Call with no arguments to get a listing for all registered options.
Parameters
----------
pat : str, default ""
String or string regexp pattern.
Empty string will return all options.
For regexp strings, all matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a string
(for testing).
Returns
-------
None
If ``_print_desc=True``.
str
If the description(s) as a string if ``_print_desc=False``.
See Also
--------
get_option : Retrieve the value of the specified option.
set_option : Set the value of the specified option or options.
reset_option : Reset one or more options to their default value.
Notes
-----
For all available options, please view the
:ref:`User Guide <options.available>`.
Examples
--------
>>> pd.describe_option("display.max_columns") # doctest: +SKIP
display.max_columns : int
If max_cols is exceeded, switch to truncate view...
"""
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError(f"No such keys(s) for {pat=}")
s = "\n".join([_build_option_description(k) for k in keys])
if _print_desc:
print(s)
return None
return s
|
Print the description for one or more registered options.
Call with no arguments to get a listing for all registered options.
Parameters
----------
pat : str, default ""
String or string regexp pattern.
Empty string will return all options.
For regexp strings, all matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a string
(for testing).
Returns
-------
None
If ``_print_desc=True``.
str
If the description(s) as a string if ``_print_desc=False``.
See Also
--------
get_option : Retrieve the value of the specified option.
set_option : Set the value of the specified option or options.
reset_option : Reset one or more options to their default value.
Notes
-----
For all available options, please view the
:ref:`User Guide <options.available>`.
Examples
--------
>>> pd.describe_option("display.max_columns") # doctest: +SKIP
display.max_columns : int
If max_cols is exceeded, switch to truncate view...
|
python
|
pandas/_config/config.py
| 291
|
[
"pat",
"_print_desc"
] |
str | None
| true
| 3
| 8.32
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
get_plain_input_and_grad_nodes
|
def get_plain_input_and_grad_nodes(
graph: fx.Graph,
) -> dict[PlainAOTInput, tuple[fx.Node, Optional[fx.Node]]]:
"""Get plain input nodes and their corresponding gradient nodes from a joint graph.
Args:
graph: The FX joint graph with descriptors
Returns:
A dictionary mapping each PlainAOTInput descriptor to a tuple containing:
- The plain input node
- The gradient (output) node if it exists, None otherwise
"""
return {
desc: (n, g)
for desc, (n, g) in get_all_input_and_grad_nodes(graph).items()
if isinstance(desc, PlainAOTInput)
}
|
Get plain input nodes and their corresponding gradient nodes from a joint graph.
Args:
graph: The FX joint graph with descriptors
Returns:
A dictionary mapping each PlainAOTInput descriptor to a tuple containing:
- The plain input node
- The gradient (output) node if it exists, None otherwise
|
python
|
torch/_functorch/_aot_autograd/fx_utils.py
| 167
|
[
"graph"
] |
dict[PlainAOTInput, tuple[fx.Node, Optional[fx.Node]]]
| true
| 1
| 6.56
|
pytorch/pytorch
| 96,034
|
google
| false
|
column_stack
|
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
-------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
stack, hstack, vstack, concatenate
Examples
--------
>>> import numpy as np
>>> a = np.array((1,2,3))
>>> b = np.array((4,5,6))
>>> np.column_stack((a,b))
array([[1, 4],
[2, 5],
[3, 6]])
"""
arrays = []
for v in tup:
arr = asanyarray(v)
if arr.ndim < 2:
arr = array(arr, copy=None, subok=True, ndmin=2).T
arrays.append(arr)
return _nx.concatenate(arrays, 1)
|
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
-------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
stack, hstack, vstack, concatenate
Examples
--------
>>> import numpy as np
>>> a = np.array((1,2,3))
>>> b = np.array((4,5,6))
>>> np.column_stack((a,b))
array([[1, 4],
[2, 5],
[3, 6]])
|
python
|
numpy/lib/_shape_base_impl.py
| 608
|
[
"tup"
] | false
| 3
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
toLocalDateTime
|
public static LocalDateTime toLocalDateTime(final Date date, final TimeZone timeZone) {
return LocalDateTime.ofInstant(date.toInstant(), toZoneId(timeZone));
}
|
Converts a {@link Date} to a {@link LocalDateTime}.
@param date the Date to convert to a LocalDateTime, not null.
@param timeZone the time zone, null maps to to the default time zone.
@return a new LocalDateTime.
@since 3.19.0
|
java
|
src/main/java/org/apache/commons/lang3/time/DateUtils.java
| 1,651
|
[
"date",
"timeZone"
] |
LocalDateTime
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
add
|
@Override
public void add(TDigest other) {
reserve(other.size());
if (mergingDigest != null) {
mergingDigest.add(other);
} else {
sortingDigest.add(other);
}
}
|
Similar to the constructor above. The limit for switching from a {@link SortingDigest} to a {@link MergingDigest} implementation
is calculated based on the passed compression factor.
@param compression The compression factor for the MergingDigest
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/HybridDigest.java
| 109
|
[
"other"
] |
void
| true
| 2
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
shape
|
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with
``N>=1``.
ndarray.shape : Equivalent array method.
Examples
--------
>>> import numpy as np
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 3]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4), (5, 6)],
... dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(3,)
>>> a.shape
(3,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
|
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with
``N>=1``.
ndarray.shape : Equivalent array method.
Examples
--------
>>> import numpy as np
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 3]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4), (5, 6)],
... dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(3,)
>>> a.shape
(3,)
|
python
|
numpy/_core/fromnumeric.py
| 2,085
|
[
"a"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
asciiWords
|
function asciiWords(string) {
return string.match(reAsciiWord) || [];
}
|
Splits an ASCII `string` into an array of its words.
@private
@param {string} The string to inspect.
@returns {Array} Returns the words of `string`.
|
javascript
|
lodash.js
| 774
|
[
"string"
] | false
| 2
| 6.16
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
as_ordered
|
def as_ordered(self) -> Self:
"""
Set the Categorical to be ordered.
Returns
-------
Categorical
Ordered Categorical.
See Also
--------
as_unordered : Set the Categorical to be unordered.
Examples
--------
For :class:`pandas.Series`:
>>> ser = pd.Series(["a", "b", "c", "a"], dtype="category")
>>> ser.cat.ordered
False
>>> ser = ser.cat.as_ordered()
>>> ser.cat.ordered
True
For :class:`pandas.CategoricalIndex`:
>>> ci = pd.CategoricalIndex(["a", "b", "c", "a"])
>>> ci.ordered
False
>>> ci = ci.as_ordered()
>>> ci.ordered
True
"""
return self.set_ordered(True)
|
Set the Categorical to be ordered.
Returns
-------
Categorical
Ordered Categorical.
See Also
--------
as_unordered : Set the Categorical to be unordered.
Examples
--------
For :class:`pandas.Series`:
>>> ser = pd.Series(["a", "b", "c", "a"], dtype="category")
>>> ser.cat.ordered
False
>>> ser = ser.cat.as_ordered()
>>> ser.cat.ordered
True
For :class:`pandas.CategoricalIndex`:
>>> ci = pd.CategoricalIndex(["a", "b", "c", "a"])
>>> ci.ordered
False
>>> ci = ci.as_ordered()
>>> ci.ordered
True
|
python
|
pandas/core/arrays/categorical.py
| 1,008
|
[
"self"
] |
Self
| true
| 1
| 6.64
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
visitParenthesizedExpression
|
function visitParenthesizedExpression(node: ParenthesizedExpression, expressionResultIsUnused: boolean): ParenthesizedExpression {
return visitEachChild(node, expressionResultIsUnused ? visitorWithUnusedExpressionResult : visitor, context);
}
|
Visits a ParenthesizedExpression that may contain a destructuring assignment.
@param node A ParenthesizedExpression node.
@param expressionResultIsUnused Indicates the result of an expression is unused by the parent node (i.e., the left side of a comma or the
expression of an `ExpressionStatement`).
|
typescript
|
src/compiler/transformers/es2015.ts
| 2,672
|
[
"node",
"expressionResultIsUnused"
] | true
| 2
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
properties
|
public Set<Property> properties() {
return properties;
}
|
@return a set representing all the valid properties for this database
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/Database.java
| 214
|
[] | true
| 1
| 6.32
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
fuzz_torch_tensor_type
|
def fuzz_torch_tensor_type(template: str = "default") -> torch.dtype:
"""
Fuzzes PyTorch tensor data types by randomly selecting and returning different dtypes.
Args:
template: Template name to determine supported dtypes
Returns:
torch.dtype: A randomly selected PyTorch tensor data type based on template constraints
"""
# Get template-specific dtypes
if template == "dtensor":
# Import here to avoid circular imports
from torchfuzz.codegen import DTensorFuzzTemplate
fuzz_template = DTensorFuzzTemplate()
tensor_dtypes = fuzz_template.supported_dtypes()
elif template == "unbacked":
# Import here to avoid circular imports
from torchfuzz.codegen import UnbackedFuzzTemplate
fuzz_template = UnbackedFuzzTemplate()
tensor_dtypes = fuzz_template.supported_dtypes()
else:
from torchfuzz.codegen import DefaultFuzzTemplate
fuzz_template = DefaultFuzzTemplate()
tensor_dtypes = fuzz_template.supported_dtypes()
# Randomly select and return a data type
return random.choice(tensor_dtypes)
|
Fuzzes PyTorch tensor data types by randomly selecting and returning different dtypes.
Args:
template: Template name to determine supported dtypes
Returns:
torch.dtype: A randomly selected PyTorch tensor data type based on template constraints
|
python
|
tools/experimental/torchfuzz/tensor_fuzzer.py
| 37
|
[
"template"
] |
torch.dtype
| true
| 4
| 7.44
|
pytorch/pytorch
| 96,034
|
google
| false
|
split_date_version_and_suffix
|
def split_date_version_and_suffix(file_name: str, suffix: str) -> VersionedFile:
"""Split file name with date-based version (YYYY-MM-DD format) and suffix.
Example: apache_airflow_providers-2025-11-18-source.tar.gz
"""
from packaging.version import Version
no_suffix_file = file_name[: -len(suffix)]
# Date format is YYYY-MM-DD, so we need to extract last 3 parts
parts = no_suffix_file.rsplit("-", 3)
if len(parts) != 4:
raise ValueError(f"Invalid date-versioned file name format: {file_name}")
no_version_file = parts[0]
date_version = f"{parts[1]}-{parts[2]}-{parts[3]}"
# Validate date format
try:
datetime.strptime(date_version, "%Y-%m-%d")
except ValueError as e:
raise ValueError(f"Invalid date format in file name {file_name}: {e}")
no_version_file = no_version_file.replace("_", "-")
# Convert date to a comparable version format (YYYYMMDD as integer-like version)
comparable_date_str = date_version.replace("-", ".")
return VersionedFile(
base=no_version_file + "-",
version=date_version,
suffix=suffix,
type=no_version_file + "-" + suffix,
comparable_version=Version(comparable_date_str),
file_name=file_name,
)
|
Split file name with date-based version (YYYY-MM-DD format) and suffix.
Example: apache_airflow_providers-2025-11-18-source.tar.gz
|
python
|
dev/breeze/src/airflow_breeze/commands/release_management_commands.py
| 3,290
|
[
"file_name",
"suffix"
] |
VersionedFile
| true
| 2
| 6.72
|
apache/airflow
| 43,597
|
unknown
| false
|
polyadd
|
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> import numpy as np
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
|
Find the sum of two polynomials.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> import numpy as np
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
|
python
|
numpy/lib/_polynomial_impl.py
| 796
|
[
"a1",
"a2"
] | false
| 6
| 7.44
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
writeEntry
|
private void writeEntry(JarArchiveEntry entry, @Nullable Library library, @Nullable EntryWriter entryWriter)
throws IOException {
String name = entry.getName();
if (this.writtenEntries.add(name)) {
writeParentDirectoryEntries(name);
entry.setUnixMode(name.endsWith("/") ? UNIX_DIR_MODE : UNIX_FILE_MODE);
entry.getGeneralPurposeBit().useUTF8ForNames(true);
if (!entry.isDirectory() && entry.getSize() == -1) {
entryWriter = SizeCalculatingEntryWriter.get(entryWriter);
Assert.state(entryWriter != null, "'entryWriter' must not be null");
entry.setSize(entryWriter.size());
}
updateLayerIndex(entry, library);
writeToArchive(entry, entryWriter);
}
}
|
Perform the actual write of a {@link JarEntry}. All other write methods delegate to
this one.
@param entry the entry to write
@param library the library for the entry or {@code null}
@param entryWriter the entry writer or {@code null} if there is no content
@throws IOException in case of I/O errors
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/AbstractJarWriter.java
| 252
|
[
"entry",
"library",
"entryWriter"
] |
void
| true
| 5
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
add
|
@CanIgnoreReturnValue
public Builder<E> add(E... elements) {
for (E element : elements) {
add(element);
}
return this;
}
|
Adds each element of {@code elements} to the {@code ImmutableCollection} being built.
<p>Note that each builder class overrides this method in order to covariantly return its own
type.
@param elements the elements to add
@return this {@code Builder} instance
@throws NullPointerException if {@code elements} is null or contains a null element
|
java
|
android/guava/src/com/google/common/collect/ImmutableCollection.java
| 441
|
[] | true
| 1
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
bucket_collectives
|
def bucket_collectives(self) -> None:
"""Run the full bucketing and dep application flow.
Order is important:
1. Bucketing - merge collectives into buckets
2. Inline fusions - expand call_module back to original nodes
3. Transfer deps - move deps from erased nodes to their replacements
4. Add control deps - apply effect tokens and topo sort
Steps 2-3 MUST happen before step 4, because control deps need to
reference the final inlined nodes, not the erased fusion modules.
"""
# Step 1: Bucket collectives
if self.collective_bucketing:
self._bucket_collectives_impl()
# Step 2: Inline fusion regions (expand call_module -> original nodes)
replaced: dict[fx.Node, fx.Node] = {}
if self.region_of:
from torch._inductor.fx_passes.fusion_regions import expand_fusion_regions
gm = self.graph.owning_module
replaced = expand_fusion_regions(gm, self.region_of)
# Step 3: Transfer deps from erased fusion modules to inlined nodes
if replaced:
self.aug_graph.transfer_erased_node_deps(replaced)
# Step 4: Add control deps (MUST be after inline + transfer)
self._apply_deps_and_effect_tokens()
self.graph.lint()
|
Run the full bucketing and dep application flow.
Order is important:
1. Bucketing - merge collectives into buckets
2. Inline fusions - expand call_module back to original nodes
3. Transfer deps - move deps from erased nodes to their replacements
4. Add control deps - apply effect tokens and topo sort
Steps 2-3 MUST happen before step 4, because control deps need to
reference the final inlined nodes, not the erased fusion modules.
|
python
|
torch/_inductor/fx_passes/overlap_preserving_bucketer.py
| 345
|
[
"self"
] |
None
| true
| 4
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
retrieveMaxExpressionLength
|
private static int retrieveMaxExpressionLength() {
String value = SpringProperties.getProperty(MAX_SPEL_EXPRESSION_LENGTH_PROPERTY_NAME);
if (!StringUtils.hasText(value)) {
return SpelParserConfiguration.DEFAULT_MAX_EXPRESSION_LENGTH;
}
try {
int maxLength = Integer.parseInt(value.trim());
Assert.isTrue(maxLength > 0, () -> "Value [" + maxLength + "] for system property [" +
MAX_SPEL_EXPRESSION_LENGTH_PROPERTY_NAME + "] must be positive");
return maxLength;
}
catch (NumberFormatException ex) {
throw new IllegalArgumentException("Failed to parse value for system property [" +
MAX_SPEL_EXPRESSION_LENGTH_PROPERTY_NAME + "]: " + ex.getMessage(), ex);
}
}
|
Template method for customizing the expression evaluation context.
<p>The default implementation is empty.
|
java
|
spring-context/src/main/java/org/springframework/context/expression/StandardBeanExpressionResolver.java
| 196
|
[] | true
| 3
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
printStackTraceToString
|
default String printStackTraceToString(Throwable throwable) {
try {
StringBuilder out = new StringBuilder(4096);
printStackTrace(throwable, out);
return out.toString();
}
catch (IOException ex) {
throw new UncheckedIOException(ex);
}
}
|
Return a {@link String} containing the printed stack trace for a given
{@link Throwable}.
@param throwable the throwable that should have its stack trace printed
@return the stack trace string
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/StackTracePrinter.java
| 38
|
[
"throwable"
] |
String
| true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
valuesIterator
|
Iterator<V> valuesIterator() {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.values().iterator();
}
return new Itr<V>() {
@Override
@ParametricNullness
V getOutput(int entry) {
return value(entry);
}
};
}
|
Updates the index an iterator is pointing to after a call to remove: returns the index of the
entry that should be looked at after a removal on indexRemoved, with indexBeforeRemove as the
index that *was* the next entry that would be looked at.
|
java
|
android/guava/src/com/google/common/collect/CompactHashMap.java
| 932
|
[] | true
| 2
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
close
|
@Override
public void close() {
if (closed) {
assert false : "ExponentialHistogramGenerator closed multiple times";
} else {
closed = true;
resultMerger.close();
valueBuffer.close();
circuitBreaker.adjustBreaker(-estimateBaseSize(rawValueBuffer.length));
}
}
|
Returns the histogram representing the distribution of all accumulated values.
@return the histogram representing the distribution of all accumulated values
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramGenerator.java
| 192
|
[] |
void
| true
| 2
| 7.44
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
pipelinesWithGeoIpProcessor
|
@SuppressWarnings("unchecked")
private static Set<String> pipelinesWithGeoIpProcessor(ProjectMetadata projectMetadata, boolean downloadDatabaseOnPipelineCreation) {
List<PipelineConfiguration> configurations = IngestService.getPipelines(projectMetadata);
Map<String, PipelineConfiguration> pipelineConfigById = HashMap.newHashMap(configurations.size());
for (PipelineConfiguration configuration : configurations) {
pipelineConfigById.put(configuration.getId(), configuration);
}
// this map is used to keep track of pipelines that have already been checked
Map<String, Boolean> pipelineHasGeoProcessorById = HashMap.newHashMap(configurations.size());
Set<String> ids = new HashSet<>();
// note: this loop is unrolled rather than streaming-style because it's hot enough to show up in a flamegraph
for (PipelineConfiguration configuration : configurations) {
List<Map<String, Object>> processors = (List<Map<String, Object>>) configuration.getConfig().get(Pipeline.PROCESSORS_KEY);
String pipelineName = configuration.getId();
if (pipelineHasGeoProcessorById.containsKey(pipelineName) == false) {
if (hasAtLeastOneGeoipProcessor(
processors,
downloadDatabaseOnPipelineCreation,
pipelineConfigById,
pipelineHasGeoProcessorById
)) {
ids.add(pipelineName);
}
}
}
return Collections.unmodifiableSet(ids);
}
|
Retrieve the set of pipeline ids that have at least one geoip processor.
@param projectMetadata project metadata
@param downloadDatabaseOnPipelineCreation Filter the list to include only pipeline with the download_database_on_pipeline_creation
matching the param.
@return A set of pipeline ids matching criteria.
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java
| 302
|
[
"projectMetadata",
"downloadDatabaseOnPipelineCreation"
] | true
| 3
| 7.76
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
read
|
@Override
public NavigableMap<Integer, Object> read(ByteBuffer buffer) {
int numTaggedFields = ByteUtils.readUnsignedVarint(buffer);
if (numTaggedFields == 0) {
return Collections.emptyNavigableMap();
}
NavigableMap<Integer, Object> objects = new TreeMap<>();
int prevTag = -1;
for (int i = 0; i < numTaggedFields; i++) {
int tag = ByteUtils.readUnsignedVarint(buffer);
if (tag <= prevTag) {
throw new RuntimeException("Invalid or out-of-order tag " + tag);
}
prevTag = tag;
int size = ByteUtils.readUnsignedVarint(buffer);
if (size < 0)
throw new SchemaException("field size " + size + " cannot be negative");
if (size > buffer.remaining())
throw new SchemaException("Error reading field of size " + size + ", only " + buffer.remaining() + " bytes available");
Field field = fields.get(tag);
if (field == null) {
byte[] bytes = new byte[size];
buffer.get(bytes);
objects.put(tag, new RawTaggedField(tag, bytes));
} else {
objects.put(tag, field.type.read(buffer));
}
}
return objects;
}
|
Create a new TaggedFields object with the given tags and fields.
@param fields This is an array containing Integer tags followed
by associated Field objects.
@return The new {@link TaggedFields}
|
java
|
clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java
| 81
|
[
"buffer"
] | true
| 7
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getDependencyType
|
public Class<?> getDependencyType() {
if (this.field != null) {
if (this.nestingLevel > 1) {
Class<?> clazz = getResolvableType().getRawClass();
return (clazz != null ? clazz : Object.class);
}
else {
return this.field.getType();
}
}
else {
return obtainMethodParameter().getNestedParameterType();
}
}
|
Determine the declared (non-generic) type of the wrapped parameter/field.
@return the declared type (never {@code null})
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/DependencyDescriptor.java
| 346
|
[] | true
| 4
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
devices
|
def devices(self):
"""
The devices supported by PyTorch.
Returns
-------
devices : list[Device]
The devices supported by PyTorch.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.devices()
[device(type='cpu'), device(type='mps', index=0), device(type='meta')]
"""
# Torch doesn't have a straightforward way to get the list of all
# currently supported devices. To do this, we first parse the error
# message of torch.device to get the list of all possible types of
# device:
try:
torch.device('notadevice')
raise AssertionError("unreachable") # pragma: nocover
except RuntimeError as e:
# The error message is something like:
# "Expected one of cpu, cuda, ipu, xpu, mkldnn, opengl, opencl, ideep, hip, ve, fpga, ort, xla, lazy, vulkan, mps, meta, hpu, mtia, privateuseone device type at start of device string: notadevice"
devices_names = e.args[0].split('Expected one of ')[1].split(' device type')[0].split(', ')
# Next we need to check for different indices for different devices.
# device(device_name, index=index) doesn't actually check if the
# device name or index is valid. We have to try to create a tensor
# with it (which is why this function is cached).
devices = []
for device_name in devices_names:
i = 0
while True:
try:
a = torch.empty((0,), device=torch.device(device_name, index=i))
if a.device in devices:
break
devices.append(a.device)
except:
break
i += 1
return devices
|
The devices supported by PyTorch.
Returns
-------
devices : list[Device]
The devices supported by PyTorch.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.devices()
[device(type='cpu'), device(type='mps', index=0), device(type='meta')]
|
python
|
sklearn/externals/array_api_compat/torch/_info.py
| 317
|
[
"self"
] | false
| 4
| 7.04
|
scikit-learn/scikit-learn
| 64,340
|
unknown
| false
|
|
isPrivateOrNotVisible
|
private static boolean isPrivateOrNotVisible(Method method, Class<?> beanClass) {
int modifiers = method.getModifiers();
if (Modifier.isPrivate(modifiers)) {
return true;
}
// Method is declared in a class that resides in a different package
// than the bean class and the method is neither public nor protected?
return (!method.getDeclaringClass().getPackageName().equals(beanClass.getPackageName()) &&
!(Modifier.isPublic(modifiers) || Modifier.isProtected(modifiers)));
}
|
Determine if the supplied lifecycle {@link Method} is private or not
visible to the supplied bean {@link Class}.
@since 6.0.11
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/annotation/InitDestroyAnnotationBeanPostProcessor.java
| 472
|
[
"method",
"beanClass"
] | true
| 4
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
toKey
|
function toKey(value) {
if (typeof value == 'string' || isSymbol(value)) {
return value;
}
var result = (value + '');
return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result;
}
|
Converts `value` to a string key if it's not a string or symbol.
@private
@param {*} value The value to inspect.
@returns {string|symbol} Returns the key.
|
javascript
|
lodash.js
| 6,856
|
[
"value"
] | false
| 5
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
contextProtocol
|
private String contextProtocol() {
if (supportedProtocols.isEmpty()) {
throw new SslConfigException("no SSL/TLS protocols have been configured");
}
for (Entry<String, String> entry : ORDERED_PROTOCOL_ALGORITHM_MAP.entrySet()) {
if (supportedProtocols.contains(entry.getKey())) {
return entry.getValue();
}
}
throw new SslConfigException(
"no supported SSL/TLS protocol was found in the configured supported protocols: " + supportedProtocols
);
}
|
Picks the best (highest security / most recent standard) SSL/TLS protocol (/version) that is supported by the
{@link #supportedProtocols() configured protocols}.
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfiguration.java
| 147
|
[] |
String
| true
| 3
| 6.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
applyReadLocked
|
public <T> T applyReadLocked(final FailableFunction<O, T, ?> function) {
return lockApplyUnlock(readLockSupplier, function);
}
|
Provides read (shared, non-exclusive) access to The object to protect for the purpose of computing a
result object. More precisely, what the method will do (in the given order):
<ol>
<li>Obtain a read (shared) lock on The object to protect. The current thread may block, until such a
lock is granted.</li>
<li>Invokes the given {@link FailableFunction function}, passing the locked object as the parameter,
receiving the functions result.</li>
<li>Release the lock, as soon as the consumers invocation is done. If the invocation results in an error, the
lock will be released anyways.</li>
<li>Return the result object, that has been received from the functions invocation.</li>
</ol>
<p>
<em>Example:</em> Consider that the hidden object is a list, and we wish to know the current size of the
list. This might be achieved with the following:
</p>
<pre>{@code
private Lock<List<Object>> listLock;
public int getCurrentListSize() {
final Integer sizeInteger = listLock.applyReadLocked(list -> Integer.valueOf(list.size));
return sizeInteger.intValue();
}
}
</pre>
@param <T> The result type (both the functions, and this method's.)
@param function The function, which is being invoked to compute the result. The function will receive the
hidden object.
@return The result object, which has been returned by the functions invocation.
@throws IllegalStateException The result object would be, in fact, the hidden object. This would extend
access to the hidden object beyond this methods lifetime and will therefore be prevented.
@see #acceptReadLocked(FailableConsumer)
@see #applyWriteLocked(FailableFunction)
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/locks/LockingVisitors.java
| 370
|
[
"function"
] |
T
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
wrapperValue
|
function wrapperValue() {
return baseWrapperValue(this.__wrapped__, this.__actions__);
}
|
Executes the chain sequence to resolve the unwrapped value.
@name value
@memberOf _
@since 0.1.0
@alias toJSON, valueOf
@category Seq
@returns {*} Returns the resolved unwrapped value.
@example
_([1, 2, 3]).value();
// => [1, 2, 3]
|
javascript
|
lodash.js
| 9,152
|
[] | false
| 1
| 7.44
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
remove
|
private static Object remove(final Object array, final int index) {
final int length = getLength(array);
if (index < 0 || index >= length) {
throw new IndexOutOfBoundsException("Index: " + index + ", Length: " + length);
}
final Object result = Array.newInstance(array.getClass().getComponentType(), length - 1);
System.arraycopy(array, 0, result, 0, index);
if (index < length - 1) {
System.arraycopy(array, index + 1, result, index, length - index - 1);
}
return result;
}
|
Removes the element at the specified position from the specified array. All subsequent elements are shifted to the left (subtracts one from their
indices).
<p>
This method returns a new array with the same elements of the input array except the element on the specified position. The component type of the
returned array is always the same as that of the input array.
</p>
<p>
If the input array is {@code null}, an IndexOutOfBoundsException will be thrown, because in that case no valid index can be specified.
</p>
@param array the array to remove the element from, may not be {@code null}.
@param index the position of the element to be removed.
@return A new array containing the existing elements except the element at the specified position.
@throws IndexOutOfBoundsException if the index is out of range (index < 0 || index >= array.length), or if the array is {@code null}.
@since 2.1
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,872
|
[
"array",
"index"
] |
Object
| true
| 4
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
lastIndexOf
|
public int lastIndexOf(final CharSequence str, final CharSequence searchStr) {
if (str == null) {
return INDEX_NOT_FOUND;
}
return lastIndexOf(str, searchStr, str.length());
}
|
Finds the last index within a CharSequence, handling {@code null}. This method uses {@link String#lastIndexOf(String)} if possible.
<p>
A {@code null} CharSequence will return {@code -1}.
</p>
<p>
Case-sensitive examples
</p>
<pre>
Strings.CS.lastIndexOf(null, *) = -1
Strings.CS.lastIndexOf(*, null) = -1
Strings.CS.lastIndexOf("", "") = 0
Strings.CS.lastIndexOf("aabaabaa", "a") = 7
Strings.CS.lastIndexOf("aabaabaa", "b") = 5
Strings.CS.lastIndexOf("aabaabaa", "ab") = 4
Strings.CS.lastIndexOf("aabaabaa", "") = 8
</pre>
<p>
Case-insensitive examples
</p>
<pre>
Strings.CI.lastIndexOf(null, *) = -1
Strings.CI.lastIndexOf(*, null) = -1
Strings.CI.lastIndexOf("aabaabaa", "A") = 7
Strings.CI.lastIndexOf("aabaabaa", "B") = 5
Strings.CI.lastIndexOf("aabaabaa", "AB") = 4
</pre>
@param str the CharSequence to check, may be null
@param searchStr the CharSequence to find, may be null
@return the last index of the search String, -1 if no match or {@code null} string input
|
java
|
src/main/java/org/apache/commons/lang3/Strings.java
| 916
|
[
"str",
"searchStr"
] | true
| 2
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
load_arff_from_gzip_file
|
def load_arff_from_gzip_file(
gzip_file,
parser,
output_type,
openml_columns_info,
feature_names_to_select,
target_names_to_select,
shape=None,
read_csv_kwargs=None,
):
"""Load a compressed ARFF file using a given parser.
Parameters
----------
gzip_file : GzipFile instance
The file compressed to be read.
parser : {"pandas", "liac-arff"}
The parser used to parse the ARFF file. "pandas" is recommended
but only supports loading dense datasets.
output_type : {"numpy", "sparse", "pandas"}
The type of the arrays that will be returned. The possibilities ara:
- `"numpy"`: both `X` and `y` will be NumPy arrays;
- `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array;
- `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a
pandas Series or DataFrame.
openml_columns_info : dict
The information provided by OpenML regarding the columns of the ARFF
file.
feature_names_to_select : list of str
A list of the feature names to be selected.
target_names_to_select : list of str
A list of the target names to be selected.
read_csv_kwargs : dict, default=None
Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite
the default options.
Returns
-------
X : {ndarray, sparse matrix, dataframe}
The data matrix.
y : {ndarray, dataframe, series}
The target.
frame : dataframe or None
A dataframe containing both `X` and `y`. `None` if
`output_array_type != "pandas"`.
categories : list of str or None
The names of the features that are categorical. `None` if
`output_array_type == "pandas"`.
"""
if parser == "liac-arff":
return _liac_arff_parser(
gzip_file,
output_type,
openml_columns_info,
feature_names_to_select,
target_names_to_select,
shape,
)
elif parser == "pandas":
return _pandas_arff_parser(
gzip_file,
output_type,
openml_columns_info,
feature_names_to_select,
target_names_to_select,
read_csv_kwargs,
)
else:
raise ValueError(
f"Unknown parser: '{parser}'. Should be 'liac-arff' or 'pandas'."
)
|
Load a compressed ARFF file using a given parser.
Parameters
----------
gzip_file : GzipFile instance
The file compressed to be read.
parser : {"pandas", "liac-arff"}
The parser used to parse the ARFF file. "pandas" is recommended
but only supports loading dense datasets.
output_type : {"numpy", "sparse", "pandas"}
The type of the arrays that will be returned. The possibilities ara:
- `"numpy"`: both `X` and `y` will be NumPy arrays;
- `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array;
- `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a
pandas Series or DataFrame.
openml_columns_info : dict
The information provided by OpenML regarding the columns of the ARFF
file.
feature_names_to_select : list of str
A list of the feature names to be selected.
target_names_to_select : list of str
A list of the target names to be selected.
read_csv_kwargs : dict, default=None
Keyword arguments to pass to `pandas.read_csv`. It allows to overwrite
the default options.
Returns
-------
X : {ndarray, sparse matrix, dataframe}
The data matrix.
y : {ndarray, dataframe, series}
The target.
frame : dataframe or None
A dataframe containing both `X` and `y`. `None` if
`output_array_type != "pandas"`.
categories : list of str or None
The names of the features that are categorical. `None` if
`output_array_type == "pandas"`.
|
python
|
sklearn/datasets/_arff_parser.py
| 463
|
[
"gzip_file",
"parser",
"output_type",
"openml_columns_info",
"feature_names_to_select",
"target_names_to_select",
"shape",
"read_csv_kwargs"
] | false
| 4
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
describe_numeric_1d
|
def describe_numeric_1d(series: Series, percentiles: Sequence[float]) -> Series:
"""Describe series containing numerical data.
Parameters
----------
series : Series
Series to be described.
percentiles : list-like of numbers
The percentiles to include in the output.
"""
from pandas import Series
formatted_percentiles = format_percentiles(percentiles)
if len(percentiles) == 0:
quantiles = []
else:
quantiles = series.quantile(percentiles).tolist()
stat_index = ["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ quantiles
+ [series.max()]
)
# GH#48340 - always return float on non-complex numeric data
dtype: DtypeObj | None
if isinstance(series.dtype, ExtensionDtype):
if isinstance(series.dtype, ArrowDtype):
if series.dtype.kind == "m":
# GH53001: describe timedeltas with object dtype
dtype = None
else:
import pyarrow as pa
dtype = ArrowDtype(pa.float64())
else:
dtype = Float64Dtype()
elif series.dtype.kind in "iufb":
# i.e. numeric but exclude complex dtype
dtype = np.dtype("float")
else:
dtype = None
return Series(d, index=stat_index, name=series.name, dtype=dtype)
|
Describe series containing numerical data.
Parameters
----------
series : Series
Series to be described.
percentiles : list-like of numbers
The percentiles to include in the output.
|
python
|
pandas/core/methods/describe.py
| 221
|
[
"series",
"percentiles"
] |
Series
| true
| 10
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
get_installed_libraries
|
def get_installed_libraries():
"""
Return the built-in template tag libraries and those from installed
applications. Libraries are stored in a dictionary where keys are the
individual module names, not the full module paths. Example:
django.templatetags.i18n is stored as i18n.
"""
return {
module_name: full_name for module_name, full_name in get_template_tag_modules()
}
|
Return the built-in template tag libraries and those from installed
applications. Libraries are stored in a dictionary where keys are the
individual module names, not the full module paths. Example:
django.templatetags.i18n is stored as i18n.
|
python
|
django/template/backends/django.py
| 155
|
[] | false
| 1
| 6.64
|
django/django
| 86,204
|
unknown
| false
|
|
formatDurationWords
|
public static String formatDurationWords(
final long durationMillis,
final boolean suppressLeadingZeroElements,
final boolean suppressTrailingZeroElements) {
// This method is generally replaceable by the format method, but
// there are a series of tweaks and special cases that require
// trickery to replicate.
String duration = formatDuration(durationMillis, "d' days 'H' hours 'm' minutes 's' seconds'");
if (suppressLeadingZeroElements) {
// this is a temporary marker on the front. Like ^ in regexp.
duration = " " + duration;
final String text = duration;
String tmp = Strings.CS.replaceOnce(text, " 0 days", StringUtils.EMPTY);
if (tmp.length() != duration.length()) {
duration = tmp;
final String text1 = duration;
tmp = Strings.CS.replaceOnce(text1, " 0 hours", StringUtils.EMPTY);
if (tmp.length() != duration.length()) {
duration = tmp;
final String text2 = duration;
tmp = Strings.CS.replaceOnce(text2, " 0 minutes", StringUtils.EMPTY);
duration = tmp;
}
}
if (!duration.isEmpty()) {
// strip the space off again
duration = duration.substring(1);
}
}
if (suppressTrailingZeroElements) {
final String text = duration;
String tmp = Strings.CS.replaceOnce(text, " 0 seconds", StringUtils.EMPTY);
if (tmp.length() != duration.length()) {
duration = tmp;
final String text1 = duration;
tmp = Strings.CS.replaceOnce(text1, " 0 minutes", StringUtils.EMPTY);
if (tmp.length() != duration.length()) {
duration = tmp;
final String text2 = duration;
tmp = Strings.CS.replaceOnce(text2, " 0 hours", StringUtils.EMPTY);
if (tmp.length() != duration.length()) {
final String text3 = tmp;
duration = Strings.CS.replaceOnce(text3, " 0 days", StringUtils.EMPTY);
}
}
}
}
// handle plurals
duration = " " + duration;
final String text = duration;
duration = Strings.CS.replaceOnce(text, " 1 seconds", " 1 second");
final String text1 = duration;
duration = Strings.CS.replaceOnce(text1, " 1 minutes", " 1 minute");
final String text2 = duration;
duration = Strings.CS.replaceOnce(text2, " 1 hours", " 1 hour");
final String text3 = duration;
duration = Strings.CS.replaceOnce(text3, " 1 days", " 1 day");
return duration.trim();
}
|
Formats an elapsed time into a pluralization correct string.
<p>This method formats durations using the days and lower fields of the
format pattern. Months and larger are not used.</p>
@param durationMillis the elapsed time to report in milliseconds
@param suppressLeadingZeroElements suppresses leading 0 elements
@param suppressTrailingZeroElements suppresses trailing 0 elements
@return the formatted text in days/hours/minutes/seconds, not null
@throws IllegalArgumentException if durationMillis is negative
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationFormatUtils.java
| 430
|
[
"durationMillis",
"suppressLeadingZeroElements",
"suppressTrailingZeroElements"
] |
String
| true
| 9
| 7.52
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
create_bucket
|
def create_bucket(self, bucket_name: str | None = None, region_name: str | None = None) -> None:
"""
Create an Amazon S3 bucket.
.. seealso::
- :external+boto3:py:meth:`S3.Client.create_bucket`
:param bucket_name: The name of the bucket
:param region_name: The name of the aws region in which to create the bucket.
"""
if not region_name:
if self.conn_region_name == "aws-global":
raise AirflowException(
"Unable to create bucket if `region_name` not set "
"and boto3 configured to use s3 regional endpoints."
)
region_name = self.conn_region_name
if region_name == "us-east-1":
self.get_conn().create_bucket(Bucket=bucket_name)
else:
self.get_conn().create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={"LocationConstraint": region_name},
)
|
Create an Amazon S3 bucket.
.. seealso::
- :external+boto3:py:meth:`S3.Client.create_bucket`
:param bucket_name: The name of the bucket
:param region_name: The name of the aws region in which to create the bucket.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 340
|
[
"self",
"bucket_name",
"region_name"
] |
None
| true
| 5
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
matches
|
public abstract boolean matches(Method method);
|
Subclasses must override this to indicate whether they <em>match</em> the
given method. This allows for argument list checking as well as method
name checking.
@param method the method to check
@return whether this override matches the given method
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/MethodOverride.java
| 104
|
[
"method"
] | true
| 1
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
toBin
|
int toBin(double value);
|
Determine the 0-based bin number in which the supplied value should be placed.
@param value the value
@return the 0-based index of the bin
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/stats/Histogram.java
| 99
|
[
"value"
] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
optBoolean
|
public boolean optBoolean(int index) {
return optBoolean(index, false);
}
|
Returns the value at {@code index} if it exists and is a boolean or can be coerced
to a boolean. Returns false otherwise.
@param index the index to get the value from
@return the {@code value} or {@code false}
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 341
|
[
"index"
] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
build
|
@Override
public MetadataRequest build(short version) {
if (version < 1)
throw new UnsupportedVersionException("MetadataRequest versions older than 1 are not supported.");
if (!data.allowAutoTopicCreation() && version < 4)
throw new UnsupportedVersionException("MetadataRequest versions older than 4 don't support the " +
"allowAutoTopicCreation field");
if (data.topics() != null) {
data.topics().forEach(topic -> {
if (topic.name() == null && version < 12)
throw new UnsupportedVersionException("MetadataRequest version " + version +
" does not support null topic names.");
if (!Uuid.ZERO_UUID.equals(topic.topicId()) && version < 12)
throw new UnsupportedVersionException("MetadataRequest version " + version +
" does not support non-zero topic IDs.");
});
}
return new MetadataRequest(data, version);
}
|
@return Builder for metadata request using topic IDs.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java
| 125
|
[
"version"
] |
MetadataRequest
| true
| 9
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
getLocalPropertyHandler
|
@Override
protected @Nullable PropertyHandler getLocalPropertyHandler(String propertyName) {
PropertyDescriptor pd = getCachedIntrospectionResults().getPropertyDescriptor(propertyName);
return (pd != null ? new BeanPropertyHandler((GenericTypeAwarePropertyDescriptor) pd) : null);
}
|
Convert the given value for the specified property to the latter's type.
<p>This method is only intended for optimizations in a BeanFactory.
Use the {@code convertIfNecessary} methods for programmatic conversion.
@param value the value to convert
@param propertyName the target property
(note that nested or indexed properties are not supported here)
@return the new value, possibly the result of type conversion
@throws TypeMismatchException if type conversion failed
|
java
|
spring-beans/src/main/java/org/springframework/beans/BeanWrapperImpl.java
| 191
|
[
"propertyName"
] |
PropertyHandler
| true
| 2
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
min
|
def min(self, *, skipna: bool = True, **kwargs):
"""
The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`, NA value if empty
"""
nv.validate_minmax_axis(kwargs.get("axis", 0))
nv.validate_min((), kwargs)
self.check_for_ordered("min")
if not len(self._codes):
return self.dtype.na_value
good = self._codes != -1
if not good.all():
if skipna and good.any():
pointer = self._codes[good].min()
else:
return np.nan
else:
pointer = self._codes.min()
return self._wrap_reduction_result(None, pointer)
|
The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`, NA value if empty
|
python
|
pandas/core/arrays/categorical.py
| 2,464
|
[
"self",
"skipna"
] | true
| 7
| 6.56
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
|
to_frame
|
def to_frame(self, name: Hashable = lib.no_default) -> DataFrame:
"""
Convert Series to DataFrame.
Parameters
----------
name : object, optional
The passed name should substitute for the series name (if it has
one).
Returns
-------
DataFrame
DataFrame representation of Series.
See Also
--------
Series.to_dict : Convert Series to dict object.
Examples
--------
>>> s = pd.Series(["a", "b", "c"], name="vals")
>>> s.to_frame()
vals
0 a
1 b
2 c
"""
columns: Index
if name is lib.no_default:
name = self.name
if name is None:
# default to [0], same as we would get with DataFrame(self)
columns = default_index(1)
else:
columns = Index([name])
else:
columns = Index([name])
mgr = self._mgr.to_2d_mgr(columns)
df = self._constructor_expanddim_from_mgr(mgr, axes=mgr.axes)
return df.__finalize__(self, method="to_frame")
|
Convert Series to DataFrame.
Parameters
----------
name : object, optional
The passed name should substitute for the series name (if it has
one).
Returns
-------
DataFrame
DataFrame representation of Series.
See Also
--------
Series.to_dict : Convert Series to dict object.
Examples
--------
>>> s = pd.Series(["a", "b", "c"], name="vals")
>>> s.to_frame()
vals
0 a
1 b
2 c
|
python
|
pandas/core/series.py
| 1,810
|
[
"self",
"name"
] |
DataFrame
| true
| 5
| 8.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
get
|
public static ResourceLoader get(ResourceLoader resourceLoader, boolean preferFileResolution) {
Assert.notNull(resourceLoader, "'resourceLoader' must not be null");
return get(resourceLoader, SpringFactoriesLoader.forDefaultResourceLocation(resourceLoader.getClassLoader()),
preferFileResolution);
}
|
Return a {@link ResourceLoader} delegating to the given resource loader and
supporting additional {@link ProtocolResolver ProtocolResolvers} registered in
{@code spring.factories}. The factories file will be resolved using the default
class loader at the time this call is made.
@param resourceLoader the delegate resource loader
@param preferFileResolution if file based resolution is preferred when a suitable
{@link FilePathResolver} support the resource
@return a {@link ResourceLoader} instance
@since 3.4.1
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/io/ApplicationResourceLoader.java
| 139
|
[
"resourceLoader",
"preferFileResolution"
] |
ResourceLoader
| true
| 1
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
replace
|
function replace() {
var args = arguments,
string = toString(args[0]);
return args.length < 3 ? string : string.replace(args[1], args[2]);
}
|
Replaces matches for `pattern` in `string` with `replacement`.
**Note:** This method is based on
[`String#replace`](https://mdn.io/String/replace).
@static
@memberOf _
@since 4.0.0
@category String
@param {string} [string=''] The string to modify.
@param {RegExp|string} pattern The pattern to replace.
@param {Function|string} replacement The match replacement.
@returns {string} Returns the modified string.
@example
_.replace('Hi Fred', 'Fred', 'Barney');
// => 'Hi Barney'
|
javascript
|
lodash.js
| 14,643
|
[] | false
| 2
| 8.4
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
parseType
|
private ProjectType parseType(JSONObject object, @Nullable String defaultId) throws JSONException {
String id = getStringValue(object, ID_ATTRIBUTE, null);
String name = getStringValue(object, NAME_ATTRIBUTE, null);
String action = getStringValue(object, ACTION_ATTRIBUTE, null);
Assert.state(id != null, "'id' must not be null");
boolean defaultType = id.equals(defaultId);
Map<String, String> tags = new HashMap<>();
if (object.has("tags")) {
JSONObject jsonTags = object.getJSONObject("tags");
tags.putAll(parseStringItems(jsonTags));
}
Assert.state(name != null, "'name' must not be null");
Assert.state(action != null, "'action' must not be null");
return new ProjectType(id, name, action, defaultType, tags);
}
|
Returns the defaults applicable to the service.
@return the defaults of the service
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/InitializrServiceMetadata.java
| 195
|
[
"object",
"defaultId"
] |
ProjectType
| true
| 2
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getGenericReturnType
|
@Override
Type getGenericReturnType() {
Class<?> declaringClass = getDeclaringClass();
TypeVariable<?>[] typeParams = declaringClass.getTypeParameters();
if (typeParams.length > 0) {
return Types.newParameterizedType(declaringClass, typeParams);
} else {
return declaringClass;
}
}
|
If the class is parameterized, such as {@link java.util.ArrayList ArrayList}, this returns
{@code ArrayList<E>}.
|
java
|
android/guava/src/com/google/common/reflect/Invokable.java
| 423
|
[] |
Type
| true
| 2
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
ensure_index_from_sequences
|
def ensure_index_from_sequences(sequences, names=None) -> Index:
"""
Construct an index from sequences of data.
A single sequence returns an Index. Many sequences returns a
MultiIndex.
Parameters
----------
sequences : sequence of sequences
names : sequence of str
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> ensure_index_from_sequences([[1, 2, 4]], names=["name"])
Index([1, 2, 4], dtype='int64', name='name')
>>> ensure_index_from_sequences([["a", "a"], ["a", "b"]], names=["L1", "L2"])
MultiIndex([('a', 'a'),
('a', 'b')],
names=['L1', 'L2'])
See Also
--------
ensure_index
"""
from pandas.core.indexes.api import default_index
from pandas.core.indexes.multi import MultiIndex
if len(sequences) == 0:
return default_index(0)
elif len(sequences) == 1:
if names is not None:
names = names[0]
return Index(maybe_sequence_to_range(sequences[0]), name=names)
else:
# TODO: Apply maybe_sequence_to_range to sequences?
return MultiIndex.from_arrays(sequences, names=names)
|
Construct an index from sequences of data.
A single sequence returns an Index. Many sequences returns a
MultiIndex.
Parameters
----------
sequences : sequence of sequences
names : sequence of str
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> ensure_index_from_sequences([[1, 2, 4]], names=["name"])
Index([1, 2, 4], dtype='int64', name='name')
>>> ensure_index_from_sequences([["a", "a"], ["a", "b"]], names=["L1", "L2"])
MultiIndex([('a', 'a'),
('a', 'b')],
names=['L1', 'L2'])
See Also
--------
ensure_index
|
python
|
pandas/core/indexes/base.py
| 7,693
|
[
"sequences",
"names"
] |
Index
| true
| 5
| 8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
__contains__
|
def __contains__(self, key: Any) -> bool:
"""
Return a boolean indicating whether the provided key is in the index.
Parameters
----------
key : label
The key to check if it is present in the index.
Returns
-------
bool
Whether the key search is in the index.
Raises
------
TypeError
If the key is not hashable.
See Also
--------
Index.isin : Returns an ndarray of boolean dtype indicating whether the
list-like key is in the index.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Index([1, 2, 3, 4], dtype='int64')
>>> 2 in idx
True
>>> 6 in idx
False
"""
hash(key)
try:
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
|
Return a boolean indicating whether the provided key is in the index.
Parameters
----------
key : label
The key to check if it is present in the index.
Returns
-------
bool
Whether the key search is in the index.
Raises
------
TypeError
If the key is not hashable.
See Also
--------
Index.isin : Returns an ndarray of boolean dtype indicating whether the
list-like key is in the index.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Index([1, 2, 3, 4], dtype='int64')
>>> 2 in idx
True
>>> 6 in idx
False
|
python
|
pandas/core/indexes/base.py
| 5,243
|
[
"self",
"key"
] |
bool
| true
| 1
| 7.28
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
hashCode
|
@Override
public int hashCode() {
int hashCode = getProperty().hashCode();
hashCode = 29 * hashCode + (isIgnoreCase() ? 1 : 0);
hashCode = 29 * hashCode + (isAscending() ? 1 : 0);
return hashCode;
}
|
Return whether to toggle the ascending flag if the same property gets set again
(that is, {@code setProperty} gets called with already set property name again).
|
java
|
spring-beans/src/main/java/org/springframework/beans/support/MutableSortDefinition.java
| 166
|
[] | true
| 3
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
register
|
def register() -> None:
"""
Register pandas formatters and converters with matplotlib.
This function modifies the global ``matplotlib.units.registry``
dictionary. pandas adds custom converters for
* pd.Timestamp
* pd.Period
* np.datetime64
* datetime.datetime
* datetime.date
* datetime.time
See Also
--------
deregister_matplotlib_converters : Remove pandas formatters and converters.
Examples
--------
.. plot::
:context: close-figs
The following line is done automatically by pandas so
the plot can be rendered:
>>> pd.plotting.register_matplotlib_converters()
>>> df = pd.DataFrame(
... {"ts": pd.period_range("2020", periods=2, freq="M"), "y": [1, 2]}
... )
>>> plot = df.plot.line(x="ts", y="y")
Unsetting the register manually an error will be raised:
>>> pd.set_option(
... "plotting.matplotlib.register_converters", False
... ) # doctest: +SKIP
>>> df.plot.line(x="ts", y="y") # doctest: +SKIP
Traceback (most recent call last):
TypeError: float() argument must be a string or a real number, not 'Period'
"""
plot_backend = _get_plot_backend("matplotlib")
plot_backend.register()
|
Register pandas formatters and converters with matplotlib.
This function modifies the global ``matplotlib.units.registry``
dictionary. pandas adds custom converters for
* pd.Timestamp
* pd.Period
* np.datetime64
* datetime.datetime
* datetime.date
* datetime.time
See Also
--------
deregister_matplotlib_converters : Remove pandas formatters and converters.
Examples
--------
.. plot::
:context: close-figs
The following line is done automatically by pandas so
the plot can be rendered:
>>> pd.plotting.register_matplotlib_converters()
>>> df = pd.DataFrame(
... {"ts": pd.period_range("2020", periods=2, freq="M"), "y": [1, 2]}
... )
>>> plot = df.plot.line(x="ts", y="y")
Unsetting the register manually an error will be raised:
>>> pd.set_option(
... "plotting.matplotlib.register_converters", False
... ) # doctest: +SKIP
>>> df.plot.line(x="ts", y="y") # doctest: +SKIP
Traceback (most recent call last):
TypeError: float() argument must be a string or a real number, not 'Period'
|
python
|
pandas/plotting/_misc.py
| 86
|
[] |
None
| true
| 1
| 6.64
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
dumps
|
def dumps(obj):
'''Serialize an object representing the ARFF document, returning a string.
:param obj: a dictionary.
:return: a string with the ARFF document.
'''
encoder = ArffEncoder()
return encoder.encode(obj)
|
Serialize an object representing the ARFF document, returning a string.
:param obj: a dictionary.
:return: a string with the ARFF document.
|
python
|
sklearn/externals/_arff.py
| 1,099
|
[
"obj"
] | false
| 1
| 6.24
|
scikit-learn/scikit-learn
| 64,340
|
sphinx
| false
|
|
getLong
|
public long getLong(int index) throws JSONException {
Object object = get(index);
Long result = JSON.toLong(object);
if (result == null) {
throw JSON.typeMismatch(index, object, "long");
}
return result;
}
|
Returns the value at {@code index} if it exists and is a long or can be coerced to
a long.
@param index the index to get the value from
@return the {@code value}
@throws JSONException if the value at {@code index} doesn't exist or cannot be
coerced to a long.
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 446
|
[
"index"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
generateInstanceSupplierCode
|
@Override
public CodeBlock generateInstanceSupplierCode(
GenerationContext generationContext, BeanRegistrationCode beanRegistrationCode,
boolean allowDirectSupplierShortcut) {
if (hasInstanceSupplier()) {
throw new AotBeanProcessingException(this.registeredBean, "instance supplier is not supported");
}
return new InstanceSupplierCodeGenerator(generationContext,
beanRegistrationCode.getClassName(), beanRegistrationCode.getMethods(), allowDirectSupplierShortcut)
.generateCode(this.registeredBean, this.instantiationDescriptor.get());
}
|
Extract the target class of a public {@link FactoryBean} based on its
constructor. If the implementation does not resolve the target class
because it itself uses a generic, attempt to extract it from the bean type.
@param factoryBeanType the factory bean type
@param beanType the bean type
@return the target class to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/DefaultBeanRegistrationCodeFragments.java
| 225
|
[
"generationContext",
"beanRegistrationCode",
"allowDirectSupplierShortcut"
] |
CodeBlock
| true
| 2
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
weakCompareAndSet
|
public final boolean weakCompareAndSet(int i, double expect, double update) {
return longs.weakCompareAndSet(i, doubleToRawLongBits(expect), doubleToRawLongBits(update));
}
|
Atomically sets the element at position {@code i} to the given updated value if the current
value is <a href="#bitEquals">bitwise equal</a> to the expected value.
<p>May <a
href="http://download.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/package-summary.html#Spurious">
fail spuriously</a> and does not provide ordering guarantees, so is only rarely an appropriate
alternative to {@code compareAndSet}.
@param i the index
@param expect the expected value
@param update the new value
@return true if successful
|
java
|
android/guava/src/com/google/common/util/concurrent/AtomicDoubleArray.java
| 166
|
[
"i",
"expect",
"update"
] | true
| 1
| 6.16
|
google/guava
| 51,352
|
javadoc
| false
|
|
Buffer
|
function Buffer(arg, encodingOrOffset, length) {
showFlaggedDeprecation();
// Common case.
if (typeof arg === 'number') {
if (typeof encodingOrOffset === 'string') {
throw new ERR_INVALID_ARG_TYPE('string', 'string', arg);
}
return Buffer.alloc(arg);
}
return Buffer.from(arg, encodingOrOffset, length);
}
|
The Buffer() constructor is deprecated in documentation and should not be
used moving forward. Rather, developers should use one of the three new
factory APIs: Buffer.from(), Buffer.allocUnsafe() or Buffer.alloc() based on
their specific needs. There is no runtime deprecation because of the extent
to which the Buffer constructor is used in the ecosystem currently -- a
runtime deprecation would introduce too much breakage at this time. It's not
likely that the Buffer constructors would ever actually be removed.
Deprecation Code: DEP0005
@returns {Buffer}
|
javascript
|
lib/buffer.js
| 274
|
[
"arg",
"encodingOrOffset",
"length"
] | false
| 3
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
getPackageJSONURL
|
function getPackageJSONURL(specifier, base) {
const { packageName, packageSubpath, isScoped } = parsePackageName(specifier, base);
// ResolveSelf
const packageConfig = getPackageScopeConfig(base);
if (packageConfig.exists) {
if (packageConfig.exports != null && packageConfig.name === packageName) {
const packageJSONPath = packageConfig.pjsonPath;
return { packageJSONUrl: pathToFileURL(packageJSONPath), packageJSONPath, packageSubpath };
}
}
let packageJSONUrl = new URL(`./node_modules/${packageName}/package.json`, base);
let packageJSONPath = fileURLToPath(packageJSONUrl);
let lastPath;
do {
const stat = internalFsBinding.internalModuleStat(
StringPrototypeSlice(packageJSONPath, 0, packageJSONPath.length - 13),
);
// Check for !stat.isDirectory()
if (stat !== 1) {
lastPath = packageJSONPath;
packageJSONUrl = new URL(
`${isScoped ? '../' : ''}../../../node_modules/${packageName}/package.json`,
packageJSONUrl,
);
packageJSONPath = fileURLToPath(packageJSONUrl);
continue;
}
// Package match.
return { packageJSONUrl, packageJSONPath, packageSubpath };
} while (packageJSONPath.length !== lastPath.length);
throw new ERR_MODULE_NOT_FOUND(packageName, fileURLToPath(base), null);
}
|
Parse a package name from a specifier.
@param {string} specifier - The import specifier.
@param {string | URL | undefined} base - The parent URL.
@returns {object}
|
javascript
|
lib/internal/modules/package_json_reader.js
| 282
|
[
"specifier",
"base"
] | false
| 6
| 6.4
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
isOrderValid
|
static bool isOrderValid(const RecordDecl *RD, ArrayRef<unsigned> FieldOrder) {
if (FieldOrder.empty())
return false;
// If there is a flexible array member in the struct, it must remain the last
// field.
if (RD->hasFlexibleArrayMember() &&
FieldOrder.back() != FieldOrder.size() - 1) {
llvm::errs()
<< "Flexible array member must remain the last field in the struct\n";
return false;
}
return true;
}
|
\returns empty vector if the list of fields doesn't match the definition.
|
cpp
|
clang-tools-extra/clang-reorder-fields/ReorderFieldsAction.cpp
| 167
|
[
"FieldOrder"
] | true
| 4
| 6
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
loadKeyStore
|
private void loadKeyStore(KeyStore store, @Nullable String location, char @Nullable [] password) {
Assert.state(StringUtils.hasText(location), () -> "Location must not be empty or null");
try {
try (InputStream stream = this.resourceLoader.getResource(location).getInputStream()) {
store.load(stream, password);
}
}
catch (Exception ex) {
throw new IllegalStateException("Could not load store from '" + location + "'", ex);
}
}
|
Create a new {@link JksSslStoreBundle} instance.
@param keyStoreDetails the key store details
@param trustStoreDetails the trust store details
@param resourceLoader the resource loader used to load content
@since 3.3.5
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/jks/JksSslStoreBundle.java
| 134
|
[
"store",
"location",
"password"
] |
void
| true
| 2
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
IOBufIovecBuilder
|
IOBufIovecBuilder(IOBufIovecBuilder&&) = delete;
|
This is a helper class that is passed to IOBuf::takeOwnership()
for use as the custom free function.
This class allows multiple IOBuf objects to each point to non-overlapping
sections of the same buffer, allowing each IOBuf to consider its buffer
as non-shared even though they do share a single allocation. This class
performs additional reference counting to ensure that the entire allocation
is freed only when all IOBufs referring to it have been destroyed.
|
cpp
|
folly/io/IOBufIovecBuilder.h
| 109
|
[] | true
| 2
| 6.48
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
build
|
public Send build() {
flushPendingSend();
if (sends.size() == 1) {
return sends.poll();
} else {
return new MultiRecordsSend(sends, sizeOfSends);
}
}
|
Write a record set. The underlying record data will be retained
in the result of {@link #build()}. See {@link BaseRecords#toSend()}.
@param records the records to write
|
java
|
clients/src/main/java/org/apache/kafka/common/protocol/SendBuilder.java
| 173
|
[] |
Send
| true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
batch_is_authorized
|
def batch_is_authorized(
self,
*,
requests: Sequence[IsAuthorizedRequest],
user: AwsAuthManagerUser | None,
) -> bool:
"""
Make a batch authorization decision against Amazon Verified Permissions.
Check whether the user has permissions to access all resources.
:param requests: the list of requests containing the method, the entity_type and the entity ID
:param user: the user
"""
if user is None:
return False
results = self.get_batch_is_authorized_results(requests=requests, user=user)
return all(result["decision"] == "ALLOW" for result in results)
|
Make a batch authorization decision against Amazon Verified Permissions.
Check whether the user has permissions to access all resources.
:param requests: the list of requests containing the method, the entity_type and the entity ID
:param user: the user
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/auth_manager/avp/facade.py
| 189
|
[
"self",
"requests",
"user"
] |
bool
| true
| 2
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
finishConnect
|
public boolean finishConnect() throws IOException {
//we need to grab remoteAddr before finishConnect() is called otherwise
//it becomes inaccessible if the connection was refused.
SocketChannel socketChannel = transportLayer.socketChannel();
if (socketChannel != null) {
remoteAddress = socketChannel.getRemoteAddress();
}
boolean connected = transportLayer.finishConnect();
if (connected) {
if (ready()) {
state = ChannelState.READY;
} else if (remoteAddress != null) {
state = new ChannelState(ChannelState.State.AUTHENTICATE, remoteAddress.toString());
} else {
state = ChannelState.AUTHENTICATE;
}
}
return connected;
}
|
Does handshake of transportLayer and authentication using configured authenticator.
For SSL with client authentication enabled, {@link TransportLayer#handshake()} performs
authentication. For SASL, authentication is performed by {@link Authenticator#authenticate()}.
|
java
|
clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java
| 217
|
[] | true
| 5
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.