function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
read_sas
|
def read_sas(
filepath_or_buffer: FilePath | ReadBuffer[bytes],
*,
format: str | None = None,
index: Hashable | None = None,
encoding: str | None = None,
chunksize: int | None = None,
iterator: bool = False,
compression: CompressionOptions = "infer",
) -> DataFrame | SASReader:
"""
Read SAS files stored as either XPORT or SAS7BDAT format files.
Parameters
----------
filepath_or_buffer : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``read()`` function. The string could be a URL.
Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.sas7bdat``.
format : str {{'xport', 'sas7bdat'}} or None
If None, file format is inferred from file extension. If 'xport' or
'sas7bdat', uses the corresponding format.
index : identifier of index column, defaults to None
Identifier of column that should be used as index of the DataFrame.
encoding : str, default is None
Encoding for text data. If None, text data are stored as raw bytes.
chunksize : int
Read file `chunksize` lines at a time, returns iterator.
iterator : bool, defaults to False
If True, returns an iterator for reading the file incrementally.
{decompression_options}
Returns
-------
DataFrame, SAS7BDATReader, or XportReader
DataFrame if iterator=False and chunksize=None, else SAS7BDATReader
or XportReader, file format is inferred from file extension.
See Also
--------
read_csv : Read a comma-separated values (csv) file into a pandas DataFrame.
read_excel : Read an Excel file into a pandas DataFrame.
read_spss : Read an SPSS file into a pandas DataFrame.
read_orc : Load an ORC object into a pandas DataFrame.
read_feather : Load a feather-format object into a pandas DataFrame.
Examples
--------
>>> df = pd.read_sas("sas_data.sas7bdat") # doctest: +SKIP
"""
if format is None:
buffer_error_msg = (
"If this is a buffer object rather "
"than a string name, you must specify a format string"
)
filepath_or_buffer = stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, str):
raise ValueError(buffer_error_msg)
fname = filepath_or_buffer.lower()
if ".xpt" in fname:
format = "xport"
elif ".sas7bdat" in fname:
format = "sas7bdat"
else:
raise ValueError(
f"unable to infer format of SAS file from filename: {fname!r}"
)
reader: SASReader
if format.lower() == "xport":
from pandas.io.sas.sas_xport import XportReader
reader = XportReader(
filepath_or_buffer,
index=index,
encoding=encoding,
chunksize=chunksize,
compression=compression,
)
elif format.lower() == "sas7bdat":
from pandas.io.sas.sas7bdat import SAS7BDATReader
reader = SAS7BDATReader(
filepath_or_buffer,
index=index,
encoding=encoding,
chunksize=chunksize,
compression=compression,
)
else:
raise ValueError("unknown SAS format")
if iterator or chunksize:
return reader
with reader:
return reader.read()
|
Read SAS files stored as either XPORT or SAS7BDAT format files.
Parameters
----------
filepath_or_buffer : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``read()`` function. The string could be a URL.
Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.sas7bdat``.
format : str {{'xport', 'sas7bdat'}} or None
If None, file format is inferred from file extension. If 'xport' or
'sas7bdat', uses the corresponding format.
index : identifier of index column, defaults to None
Identifier of column that should be used as index of the DataFrame.
encoding : str, default is None
Encoding for text data. If None, text data are stored as raw bytes.
chunksize : int
Read file `chunksize` lines at a time, returns iterator.
iterator : bool, defaults to False
If True, returns an iterator for reading the file incrementally.
{decompression_options}
Returns
-------
DataFrame, SAS7BDATReader, or XportReader
DataFrame if iterator=False and chunksize=None, else SAS7BDATReader
or XportReader, file format is inferred from file extension.
See Also
--------
read_csv : Read a comma-separated values (csv) file into a pandas DataFrame.
read_excel : Read an Excel file into a pandas DataFrame.
read_spss : Read an SPSS file into a pandas DataFrame.
read_orc : Load an ORC object into a pandas DataFrame.
read_feather : Load a feather-format object into a pandas DataFrame.
Examples
--------
>>> df = pd.read_sas("sas_data.sas7bdat") # doctest: +SKIP
|
python
|
pandas/io/sas/sasreader.py
| 92
|
[
"filepath_or_buffer",
"format",
"index",
"encoding",
"chunksize",
"iterator",
"compression"
] |
DataFrame | SASReader
| true
| 11
| 8.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
ABSL_LOCKS_EXCLUDED
|
ABSL_LOCKS_EXCLUDED(call_mu_) {
ServerCallbackUnary* call = call_.load(std::memory_order_acquire);
if (call == nullptr) {
grpc::internal::MutexLock l(&call_mu_);
call = call_.load(std::memory_order_relaxed);
if (call == nullptr) {
backlog_.finish_wanted = true;
backlog_.status_wanted = std::move(s);
return;
}
}
call->Finish(std::move(s));
}
|
the client will only receive the status and any trailing metadata.
|
cpp
|
include/grpcpp/support/server_callback.h
| 727
|
[] | true
| 3
| 6.88
|
grpc/grpc
| 44,113
|
doxygen
| false
|
|
baseHas
|
function baseHas(object, key) {
return object != null && hasOwnProperty.call(object, key);
}
|
The base implementation of `_.has` without support for deep paths.
@private
@param {Object} [object] The object to query.
@param {Array|string} key The key to check.
@returns {boolean} Returns `true` if `key` exists, else `false`.
|
javascript
|
lodash.js
| 3,135
|
[
"object",
"key"
] | false
| 2
| 6.16
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
getIfElseKeywords
|
function getIfElseKeywords(ifStatement: IfStatement, sourceFile: SourceFile): Node[] {
const keywords: Node[] = [];
// Traverse upwards through all parent if-statements linked by their else-branches.
while (isIfStatement(ifStatement.parent) && ifStatement.parent.elseStatement === ifStatement) {
ifStatement = ifStatement.parent;
}
// Now traverse back down through the else branches, aggregating if/else keywords of if-statements.
while (true) {
const children = ifStatement.getChildren(sourceFile);
pushKeywordIf(keywords, children[0], SyntaxKind.IfKeyword);
// Generally the 'else' keyword is second-to-last, so we traverse backwards.
for (let i = children.length - 1; i >= 0; i--) {
if (pushKeywordIf(keywords, children[i], SyntaxKind.ElseKeyword)) {
break;
}
}
if (!ifStatement.elseStatement || !isIfStatement(ifStatement.elseStatement)) {
break;
}
ifStatement = ifStatement.elseStatement;
}
return keywords;
}
|
For lack of a better name, this function takes a throw statement and returns the
nearest ancestor that is a try-block (whose try statement has a catch clause),
function-block, or source file.
|
typescript
|
src/services/documentHighlights.ts
| 562
|
[
"ifStatement",
"sourceFile"
] | true
| 8
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
toString
|
@Override
public String toString() {
return toString(true);
}
|
Create a new child from this path with the specified name.
@param name the name of the child
@return a new {@link MemberPath} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 818
|
[] |
String
| true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
add
|
public Member<T> add(String name) {
return add(name, (instance) -> instance);
}
|
Add a new member with access to the instance being written.
@param name the member name
@return the added {@link Member} which may be configured further
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 203
|
[
"name"
] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
startTask
|
private void startTask() {
synchronized (lock) {
if (shutdown) {
throw new RejectedExecutionException("Executor already shutdown");
}
runningTasks++;
}
}
|
Checks if the executor has been shut down and increments the running task count.
@throws RejectedExecutionException if the executor has been previously shutdown
|
java
|
android/guava/src/com/google/common/util/concurrent/DirectExecutorService.java
| 110
|
[] |
void
| true
| 2
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
nop
|
@SuppressWarnings("unchecked")
static <E extends Throwable> FailableLongUnaryOperator<E> nop() {
return NOP;
}
|
Gets the NOP singleton.
@param <E> The kind of thrown exception or error.
@return The NOP singleton.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableLongUnaryOperator.java
| 51
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
fixExecutableFlag
|
private void fixExecutableFlag(File dir, String fileName) {
File f = new File(dir, fileName);
if (f.exists()) {
f.setExecutable(true, false);
}
}
|
Detect if the project should be extracted.
@param request the generation request
@param response the generation response
@return if the project should be extracted
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/ProjectGenerator.java
| 157
|
[
"dir",
"fileName"
] |
void
| true
| 2
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
powerSet
|
public static <E> Set<Set<E>> powerSet(Set<E> set) {
return new PowerSet<E>(set);
}
|
Returns the set of all possible subsets of {@code set}. For example, {@code
powerSet(ImmutableSet.of(1, 2))} returns the set {@code {{}, {1}, {2}, {1, 2}}}.
<p>Elements appear in these subsets in the same iteration order as they appeared in the input
set. The order in which these subsets appear in the outer set is undefined. Note that the power
set of the empty set is not the empty set, but a one-element set containing the empty set.
<p>The returned set and its constituent sets use {@code equals} to decide whether two elements
are identical, even if the input set uses a different concept of equivalence.
<p><i>Performance notes:</i> while the power set of a set with size {@code n} is of size {@code
2^n}, its memory usage is only {@code O(n)}. When the power set is constructed, the input set
is merely copied. Only as the power set is iterated are the individual subsets created, and
these subsets themselves occupy only a small constant amount of memory.
@param set the set of elements to construct a power set from
@return the power set, as an immutable set of immutable sets
@throws IllegalArgumentException if {@code set} has more than 30 unique elements (causing the
power set size to exceed the {@code int} range)
@throws NullPointerException if {@code set} is or contains {@code null}
@see <a href="http://en.wikipedia.org/wiki/Power_set">Power set article at Wikipedia</a>
@since 4.0
|
java
|
android/guava/src/com/google/common/collect/Sets.java
| 1,629
|
[
"set"
] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
deactivate_deleted_dags
|
def deactivate_deleted_dags(
cls,
bundle_name: str,
rel_filelocs: list[str],
session: Session = NEW_SESSION,
) -> bool:
"""
Set ``is_active=False`` on the DAGs for which the DAG files have been removed.
:param bundle_name: bundle for filelocs
:param rel_filelocs: relative filelocs for bundle
:param session: ORM Session
:return: True if any DAGs were marked as stale, False otherwise
"""
log.debug("Deactivating DAGs (for which DAG files are deleted) from %s table ", cls.__tablename__)
dag_models = session.scalars(
select(cls)
.where(
cls.bundle_name == bundle_name,
)
.options(
load_only(
cls.relative_fileloc,
cls.is_stale,
),
)
)
any_deactivated = False
for dm in dag_models:
if dm.relative_fileloc not in rel_filelocs:
dm.is_stale = True
any_deactivated = True
return any_deactivated
|
Set ``is_active=False`` on the DAGs for which the DAG files have been removed.
:param bundle_name: bundle for filelocs
:param rel_filelocs: relative filelocs for bundle
:param session: ORM Session
:return: True if any DAGs were marked as stale, False otherwise
|
python
|
airflow-core/src/airflow/models/dag.py
| 568
|
[
"cls",
"bundle_name",
"rel_filelocs",
"session"
] |
bool
| true
| 3
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
createMaybeNavigableAsMap
|
final Map<K, Collection<V>> createMaybeNavigableAsMap() {
if (map instanceof NavigableMap) {
return new NavigableAsMap((NavigableMap<K, Collection<V>>) map);
} else if (map instanceof SortedMap) {
return new SortedAsMap((SortedMap<K, Collection<V>>) map);
} else {
return new AsMap(map);
}
}
|
Returns an iterator across all key-value map entries, used by {@code entries().iterator()} and
{@code values().iterator()}. The default behavior, which traverses the values for one key, the
values for a second key, and so on, suffices for most {@code AbstractMapBasedMultimap}
implementations.
@return an iterator across map entries
|
java
|
android/guava/src/com/google/common/collect/AbstractMapBasedMultimap.java
| 1,279
|
[] | true
| 3
| 7.44
|
google/guava
| 51,352
|
javadoc
| false
|
|
withFileNameLength
|
ZipLocalFileHeaderRecord withFileNameLength(short fileNameLength) {
return new ZipLocalFileHeaderRecord(this.versionNeededToExtract, this.generalPurposeBitFlag,
this.compressionMethod, this.lastModFileTime, this.lastModFileDate, this.crc32, this.compressedSize,
this.uncompressedSize, fileNameLength, this.extraFieldLength);
}
|
Return a new {@link ZipLocalFileHeaderRecord} with a new {@link #fileNameLength()}.
@param fileNameLength the new file name length
@return a new {@link ZipLocalFileHeaderRecord} instance
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipLocalFileHeaderRecord.java
| 77
|
[
"fileNameLength"
] |
ZipLocalFileHeaderRecord
| true
| 1
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
isConditionMatch
|
private boolean isConditionMatch(MetadataReader metadataReader) {
if (this.conditionEvaluator == null) {
this.conditionEvaluator =
new ConditionEvaluator(getRegistry(), this.environment, this.resourcePatternResolver);
}
return !this.conditionEvaluator.shouldSkip(metadataReader.getAnnotationMetadata());
}
|
Determine whether the given class is a candidate component based on any
{@code @Conditional} annotations.
@param metadataReader the ASM ClassReader for the class
@return whether the class qualifies as a candidate component
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ClassPathScanningCandidateComponentProvider.java
| 554
|
[
"metadataReader"
] | true
| 2
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
topStep
|
function topStep<T>(array: ReadonlyArray<T>, compare: (a: T, b: T) => number, result: T[], i: number, m: number): void {
for (const n = result.length; i < m; i++) {
const element = array[i];
if (compare(element, result[n - 1]) < 0) {
result.pop();
const j = findFirstIdxMonotonousOrArrLen(result, e => compare(element, e) < 0);
result.splice(j, 0, element);
}
}
}
|
Asynchronous variant of `top()` allowing for splitting up work in batches between which the event loop can run.
Returns the top N elements from the array.
Faster than sorting the entire array when the array is a lot larger than N.
@param array The unsorted array.
@param compare A sort function for the elements.
@param n The number of elements to return.
@param batch The number of elements to examine before yielding to the event loop.
@return The first n elements from array when sorted with compare.
|
typescript
|
src/vs/base/common/arrays.ts
| 342
|
[
"array",
"compare",
"result",
"i",
"m"
] | true
| 3
| 8.24
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
isPlainAccessor
|
private boolean isPlainAccessor(Method method) {
if (Modifier.isStatic(method.getModifiers()) ||
method.getDeclaringClass() == Object.class || method.getDeclaringClass() == Class.class ||
method.getParameterCount() > 0 || method.getReturnType() == void.class ||
isInvalidReadOnlyPropertyType(method.getReturnType(), method.getDeclaringClass())) {
return false;
}
try {
// Accessor method referring to instance field of same name?
method.getDeclaringClass().getDeclaredField(method.getName());
return true;
}
catch (Exception ex) {
return false;
}
}
|
Create a new CachedIntrospectionResults instance for the given class.
@param beanClass the bean class to analyze
@throws BeansException in case of introspection failure
|
java
|
spring-beans/src/main/java/org/springframework/beans/CachedIntrospectionResults.java
| 345
|
[
"method"
] | true
| 8
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
fit_transform
|
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets.
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]`
is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR
format.
"""
if self.classes is not None:
return self.fit(y).transform(y)
self._cached_dict = None
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
# ensure yt.indices keeps its current dtype
yt.indices = np.asarray(inverse[yt.indices], dtype=yt.indices.dtype)
if not self.sparse_output:
yt = yt.toarray()
return yt
|
Fit the label sets binarizer and transform the given label sets.
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]`
is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR
format.
|
python
|
sklearn/preprocessing/_label.py
| 903
|
[
"self",
"y"
] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
maybe_disable_inference_mode
|
def maybe_disable_inference_mode() -> Generator[None, None, None]:
"""
Disables torch.inference_mode for the compilation (still on at runtime).
This simplifies the compile stack where we can assume that inference_mode
will always be off.
Since inference_mode is equivalent to no_grad + some optimizations (version
counts etc), we turn on no_grad here. The other optimizations are not
relevant to torch.compile.
"""
is_inference_mode_on = (
config.fake_tensor_disable_inference_mode and torch.is_inference_mode_enabled()
)
if is_inference_mode_on:
with (
torch.inference_mode(False),
torch.no_grad(),
):
yield
else:
yield
|
Disables torch.inference_mode for the compilation (still on at runtime).
This simplifies the compile stack where we can assume that inference_mode
will always be off.
Since inference_mode is equivalent to no_grad + some optimizations (version
counts etc), we turn on no_grad here. The other optimizations are not
relevant to torch.compile.
|
python
|
torch/_dynamo/utils.py
| 4,951
|
[] |
Generator[None, None, None]
| true
| 4
| 6.88
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
beanNamesForTypeIncludingAncestors
|
public static String[] beanNamesForTypeIncludingAncestors(ListableBeanFactory lbf, ResolvableType type) {
Assert.notNull(lbf, "ListableBeanFactory must not be null");
String[] result = lbf.getBeanNamesForType(type);
if (lbf instanceof HierarchicalBeanFactory hbf) {
if (hbf.getParentBeanFactory() instanceof ListableBeanFactory pbf) {
String[] parentResult = beanNamesForTypeIncludingAncestors(pbf, type);
result = mergeNamesWithParent(result, parentResult, hbf);
}
}
return result;
}
|
Get all bean names for the given type, including those defined in ancestor
factories. Will return unique names in case of overridden bean definitions.
<p>Does consider objects created by FactoryBeans, which means that FactoryBeans
will get initialized. If the object created by the FactoryBean doesn't match,
the raw FactoryBean itself will be matched against the type.
<p>This version of {@code beanNamesForTypeIncludingAncestors} automatically
includes prototypes and FactoryBeans.
@param lbf the bean factory
@param type the type that beans must match (as a {@code ResolvableType})
@return the array of matching bean names, or an empty array if none
@since 4.2
@see ListableBeanFactory#getBeanNamesForType(ResolvableType)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/BeanFactoryUtils.java
| 166
|
[
"lbf",
"type"
] | true
| 3
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
_split_sparse_columns
|
def _split_sparse_columns(
arff_data: ArffSparseDataType, include_columns: List
) -> ArffSparseDataType:
"""Obtains several columns from sparse ARFF representation. Additionally,
the column indices are re-labelled, given the columns that are not
included. (e.g., when including [1, 2, 3], the columns will be relabelled
to [0, 1, 2]).
Parameters
----------
arff_data : tuple
A tuple of three lists of equal size; first list indicating the value,
second the x coordinate and the third the y coordinate.
include_columns : list
A list of columns to include.
Returns
-------
arff_data_new : tuple
Subset of arff data with only the include columns indicated by the
include_columns argument.
"""
arff_data_new: ArffSparseDataType = (list(), list(), list())
reindexed_columns = {
column_idx: array_idx for array_idx, column_idx in enumerate(include_columns)
}
for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]):
if col_idx in include_columns:
arff_data_new[0].append(val)
arff_data_new[1].append(row_idx)
arff_data_new[2].append(reindexed_columns[col_idx])
return arff_data_new
|
Obtains several columns from sparse ARFF representation. Additionally,
the column indices are re-labelled, given the columns that are not
included. (e.g., when including [1, 2, 3], the columns will be relabelled
to [0, 1, 2]).
Parameters
----------
arff_data : tuple
A tuple of three lists of equal size; first list indicating the value,
second the x coordinate and the third the y coordinate.
include_columns : list
A list of columns to include.
Returns
-------
arff_data_new : tuple
Subset of arff data with only the include columns indicated by the
include_columns argument.
|
python
|
sklearn/datasets/_arff_parser.py
| 22
|
[
"arff_data",
"include_columns"
] |
ArffSparseDataType
| true
| 3
| 6.72
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
bucket_cap_mb_by_bucket_idx_default
|
def bucket_cap_mb_by_bucket_idx_default(bucket_id: int) -> float:
"""
Determine the size of a bucket based on its ID.
Args:
bucket_id (int): The ID of the bucket.
Returns:
float: The size of the bucket.
"""
return 2000.0
|
Determine the size of a bucket based on its ID.
Args:
bucket_id (int): The ID of the bucket.
Returns:
float: The size of the bucket.
|
python
|
torch/_inductor/fx_passes/bucketing.py
| 146
|
[
"bucket_id"
] |
float
| true
| 1
| 6.88
|
pytorch/pytorch
| 96,034
|
google
| false
|
getmaskarray
|
def getmaskarray(arr):
"""
Return the mask of a masked array, or full boolean array of False.
Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and
the mask is not `nomask`, else return a full boolean array of False of
the same shape as `arr`.
Parameters
----------
arr : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getdata : Return the data of a masked array as an ndarray.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getmaskarray(a)
array([[False, True],
[False, False]])
Result when mask == ``nomask``
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(
data=[[1, 2],
[3, 4]],
mask=False,
fill_value=999999)
>>> ma.getmaskarray(b)
array([[False, False],
[False, False]])
"""
mask = getmask(arr)
if mask is nomask:
mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None))
return mask
|
Return the mask of a masked array, or full boolean array of False.
Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and
the mask is not `nomask`, else return a full boolean array of False of
the same shape as `arr`.
Parameters
----------
arr : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getdata : Return the data of a masked array as an ndarray.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getmaskarray(a)
array([[False, True],
[False, False]])
Result when mask == ``nomask``
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(
data=[[1, 2],
[3, 4]],
mask=False,
fill_value=999999)
>>> ma.getmaskarray(b)
array([[False, False],
[False, False]])
|
python
|
numpy/ma/core.py
| 1,463
|
[
"arr"
] | false
| 2
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
_insert_update_mgr_locs
|
def _insert_update_mgr_locs(self, loc) -> None:
"""
When inserting a new Block at location 'loc', we increment
all of the mgr_locs of blocks above that by one.
"""
# Faster version of set(arr) for sequences of small numbers
blknos = np.bincount(self.blknos[loc:]).nonzero()[0]
for blkno in blknos:
# .620 this way, .326 of which is in increment_above
blk = self.blocks[blkno]
blk._mgr_locs = blk._mgr_locs.increment_above(loc)
|
When inserting a new Block at location 'loc', we increment
all of the mgr_locs of blocks above that by one.
|
python
|
pandas/core/internals/managers.py
| 1,547
|
[
"self",
"loc"
] |
None
| true
| 2
| 6
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
containsAllImpl
|
static boolean containsAllImpl(Collection<?> self, Collection<?> c) {
for (Object o : c) {
if (!self.contains(o)) {
return false;
}
}
return true;
}
|
Returns {@code true} if the collection {@code self} contains all of the elements in the
collection {@code c}.
<p>This method iterates over the specified collection {@code c}, checking each element returned
by the iterator in turn to see if it is contained in the specified collection {@code self}. If
all elements are so contained, {@code true} is returned, otherwise {@code false}.
@param self a collection which might contain all elements in {@code c}
@param c a collection whose elements might be contained by {@code self}
|
java
|
android/guava/src/com/google/common/collect/Collections2.java
| 301
|
[
"self",
"c"
] | true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
tryParse
|
public static @Nullable Long tryParse(String string) {
return tryParse(string, 10);
}
|
Parses the specified string as a signed decimal long value. The ASCII character {@code '-'} (
<code>'\u002D'</code>) is recognized as the minus sign.
<p>Unlike {@link Long#parseLong(String)}, this method returns {@code null} instead of throwing
an exception if parsing fails. Additionally, this method only accepts ASCII digits, and returns
{@code null} if non-ASCII digits are present in the string.
<p>Note that strings prefixed with ASCII {@code '+'} are rejected, even though {@link
Integer#parseInt(String)} accepts them.
@param string the string representation of a long value
@return the long value represented by {@code string}, or {@code null} if {@code string} has a
length of zero or cannot be parsed as a long value
@throws NullPointerException if {@code string} is {@code null}
@since 14.0
|
java
|
android/guava/src/com/google/common/primitives/Longs.java
| 374
|
[
"string"
] |
Long
| true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
is_authorized
|
def is_authorized(
self,
*,
method: ResourceMethod | str,
entity_type: AvpEntities,
user: AwsAuthManagerUser,
entity_id: str | None = None,
context: dict | None = None,
) -> bool:
"""
Make an authorization decision against Amazon Verified Permissions.
Check whether the user has permissions to access given resource.
:param method: the method to perform.
The method can also be a string if the action has been defined in a plugin.
In that case, the action can be anything (e.g. can_do).
See https://github.com/apache/airflow/issues/39144
:param entity_type: the entity type the user accesses
:param user: the user
:param entity_id: the entity ID the user accesses. If not provided, all entities of the type will be
considered.
:param context: optional additional context to pass to Amazon Verified Permissions.
"""
entity_list = self._get_user_group_entities(user)
self.log.debug(
"Making authorization request for user=%s, method=%s, entity_type=%s, entity_id=%s",
user.get_id(),
method,
entity_type,
entity_id,
)
request_params = prune_dict(
{
"policyStoreId": self.avp_policy_store_id,
"principal": {"entityType": get_entity_type(AvpEntities.USER), "entityId": user.get_id()},
"action": {
"actionType": get_entity_type(AvpEntities.ACTION),
"actionId": get_action_id(entity_type, method, entity_id),
},
"resource": {"entityType": get_entity_type(entity_type), "entityId": entity_id or "*"},
"entities": {"entityList": entity_list},
"context": self._build_context(context),
}
)
resp = self.avp_client.is_authorized(**request_params)
self.log.debug("Authorization response: %s", resp)
if len(resp.get("errors", [])) > 0:
self.log.error(
"Error occurred while making an authorization decision. Errors: %s", resp["errors"]
)
raise AirflowException("Error occurred while making an authorization decision.")
return resp["decision"] == "ALLOW"
|
Make an authorization decision against Amazon Verified Permissions.
Check whether the user has permissions to access given resource.
:param method: the method to perform.
The method can also be a string if the action has been defined in a plugin.
In that case, the action can be anything (e.g. can_do).
See https://github.com/apache/airflow/issues/39144
:param entity_type: the entity type the user accesses
:param user: the user
:param entity_id: the entity ID the user accesses. If not provided, all entities of the type will be
considered.
:param context: optional additional context to pass to Amazon Verified Permissions.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/auth_manager/avp/facade.py
| 83
|
[
"self",
"method",
"entity_type",
"user",
"entity_id",
"context"
] |
bool
| true
| 3
| 6.64
|
apache/airflow
| 43,597
|
sphinx
| false
|
resolveConstructorOrFactoryMethod
|
@Deprecated(since = "6.1.7")
public Executable resolveConstructorOrFactoryMethod() {
return new ConstructorResolver((AbstractAutowireCapableBeanFactory) getBeanFactory())
.resolveConstructorOrFactoryMethod(getBeanName(), getMergedBeanDefinition());
}
|
Resolve the constructor or factory method to use for this bean.
@return the {@link java.lang.reflect.Constructor} or {@link java.lang.reflect.Method}
@deprecated in favor of {@link #resolveInstantiationDescriptor()}
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/RegisteredBean.java
| 214
|
[] |
Executable
| true
| 1
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getPropertiesFromApplication
|
private Properties getPropertiesFromApplication(Environment environment, JsonParser parser) {
Properties properties = new Properties();
try {
String property = environment.getProperty("VCAP_APPLICATION", "{}");
Map<String, Object> map = parser.parseMap(property);
extractPropertiesFromApplication(properties, map);
}
catch (Exception ex) {
this.logger.error("Could not parse VCAP_APPLICATION", ex);
}
return properties;
}
|
Create a new {@link CloudFoundryVcapEnvironmentPostProcessor} instance.
@param logFactory the log factory to use
@since 3.0.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/cloud/CloudFoundryVcapEnvironmentPostProcessor.java
| 143
|
[
"environment",
"parser"
] |
Properties
| true
| 2
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_huber_loss_and_gradient
|
def _huber_loss_and_gradient(w, X, y, epsilon, alpha, sample_weight=None):
"""Returns the Huber loss and the gradient.
Parameters
----------
w : ndarray, shape (n_features + 1,) or (n_features + 2,)
Feature vector.
w[:n_features] gives the coefficients
w[-1] gives the scale factor and if the intercept is fit w[-2]
gives the intercept factor.
X : ndarray of shape (n_samples, n_features)
Input data.
y : ndarray of shape (n_samples,)
Target vector.
epsilon : float
Robustness of the Huber estimator.
alpha : float
Regularization parameter.
sample_weight : ndarray of shape (n_samples,), default=None
Weight assigned to each sample.
Returns
-------
loss : float
Huber loss.
gradient : ndarray, shape (len(w))
Returns the derivative of the Huber loss with respect to each
coefficient, intercept and the scale as a vector.
"""
_, n_features = X.shape
fit_intercept = n_features + 2 == w.shape[0]
if fit_intercept:
intercept = w[-2]
sigma = w[-1]
w = w[:n_features]
n_samples = np.sum(sample_weight)
# Calculate the values where |y - X'w -c / sigma| > epsilon
# The values above this threshold are outliers.
linear_loss = y - safe_sparse_dot(X, w)
if fit_intercept:
linear_loss -= intercept
abs_linear_loss = np.abs(linear_loss)
outliers_mask = abs_linear_loss > epsilon * sigma
# Calculate the linear loss due to the outliers.
# This is equal to (2 * M * |y - X'w -c / sigma| - M**2) * sigma
outliers = abs_linear_loss[outliers_mask]
num_outliers = np.count_nonzero(outliers_mask)
n_non_outliers = X.shape[0] - num_outliers
# n_sq_outliers includes the weight give to the outliers while
# num_outliers is just the number of outliers.
outliers_sw = sample_weight[outliers_mask]
n_sw_outliers = np.sum(outliers_sw)
outlier_loss = (
2.0 * epsilon * np.sum(outliers_sw * outliers)
- sigma * n_sw_outliers * epsilon**2
)
# Calculate the quadratic loss due to the non-outliers.-
# This is equal to |(y - X'w - c)**2 / sigma**2| * sigma
non_outliers = linear_loss[~outliers_mask]
weighted_non_outliers = sample_weight[~outliers_mask] * non_outliers
weighted_loss = np.dot(weighted_non_outliers.T, non_outliers)
squared_loss = weighted_loss / sigma
if fit_intercept:
grad = np.zeros(n_features + 2)
else:
grad = np.zeros(n_features + 1)
# Gradient due to the squared loss.
X_non_outliers = -axis0_safe_slice(X, ~outliers_mask, n_non_outliers)
grad[:n_features] = (
2.0 / sigma * safe_sparse_dot(weighted_non_outliers, X_non_outliers)
)
# Gradient due to the linear loss.
signed_outliers = np.ones_like(outliers)
signed_outliers_mask = linear_loss[outliers_mask] < 0
signed_outliers[signed_outliers_mask] = -1.0
X_outliers = axis0_safe_slice(X, outliers_mask, num_outliers)
sw_outliers = sample_weight[outliers_mask] * signed_outliers
grad[:n_features] -= 2.0 * epsilon * (safe_sparse_dot(sw_outliers, X_outliers))
# Gradient due to the penalty.
grad[:n_features] += alpha * 2.0 * w
# Gradient due to sigma.
grad[-1] = n_samples
grad[-1] -= n_sw_outliers * epsilon**2
grad[-1] -= squared_loss / sigma
# Gradient due to the intercept.
if fit_intercept:
grad[-2] = -2.0 * np.sum(weighted_non_outliers) / sigma
grad[-2] -= 2.0 * epsilon * np.sum(sw_outliers)
loss = n_samples * sigma + squared_loss + outlier_loss
loss += alpha * np.dot(w, w)
return loss, grad
|
Returns the Huber loss and the gradient.
Parameters
----------
w : ndarray, shape (n_features + 1,) or (n_features + 2,)
Feature vector.
w[:n_features] gives the coefficients
w[-1] gives the scale factor and if the intercept is fit w[-2]
gives the intercept factor.
X : ndarray of shape (n_samples, n_features)
Input data.
y : ndarray of shape (n_samples,)
Target vector.
epsilon : float
Robustness of the Huber estimator.
alpha : float
Regularization parameter.
sample_weight : ndarray of shape (n_samples,), default=None
Weight assigned to each sample.
Returns
-------
loss : float
Huber loss.
gradient : ndarray, shape (len(w))
Returns the derivative of the Huber loss with respect to each
coefficient, intercept and the scale as a vector.
|
python
|
sklearn/linear_model/_huber.py
| 19
|
[
"w",
"X",
"y",
"epsilon",
"alpha",
"sample_weight"
] | false
| 6
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
saturatedCast
|
public static char saturatedCast(long value) {
if (value > Character.MAX_VALUE) {
return Character.MAX_VALUE;
}
if (value < Character.MIN_VALUE) {
return Character.MIN_VALUE;
}
return (char) value;
}
|
Returns the {@code char} nearest in value to {@code value}.
@param value any {@code long} value
@return the same value cast to {@code char} if it is in the range of the {@code char} type,
{@link Character#MAX_VALUE} if it is too large, or {@link Character#MIN_VALUE} if it is too
small
|
java
|
android/guava/src/com/google/common/primitives/Chars.java
| 99
|
[
"value"
] | true
| 3
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
changeSubscription
|
private boolean changeSubscription(Set<String> topicsToSubscribe) {
if (subscription.equals(topicsToSubscribe))
return false;
subscription = topicsToSubscribe;
return true;
}
|
This method sets the subscription type if it is not already set (i.e. when it is NONE),
or verifies that the subscription type is equal to the give type when it is set (i.e.
when it is not NONE)
@param type The given subscription type
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 224
|
[
"topicsToSubscribe"
] | true
| 2
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
deleteExistingOutput
|
private void deleteExistingOutput(Path... paths) {
for (Path path : paths) {
try {
FileSystemUtils.deleteRecursively(path);
}
catch (IOException ex) {
throw new UncheckedIOException("Failed to delete existing output in '" + path + "'", ex);
}
}
}
|
Delete the source, resource, and class output directories.
|
java
|
spring-context/src/main/java/org/springframework/context/aot/AbstractAotProcessor.java
| 101
|
[] |
void
| true
| 2
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
astype
|
def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
"""
Cast to a NumPy array or ExtensionArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
np.ndarray or pandas.api.extensions.ExtensionArray
An ``ExtensionArray`` if ``dtype`` is ``ExtensionDtype``,
otherwise a Numpy ndarray with ``dtype`` for its dtype.
See Also
--------
Series.astype : Cast a Series to a different dtype.
DataFrame.astype : Cast a DataFrame to a different dtype.
api.extensions.ExtensionArray : Base class for ExtensionArray objects.
core.arrays.DatetimeArray._from_sequence : Create a DatetimeArray from a
sequence.
core.arrays.TimedeltaArray._from_sequence : Create a TimedeltaArray from
a sequence.
Examples
--------
>>> arr = pd.array([1, 2, 3])
>>> arr
<IntegerArray>
[1, 2, 3]
Length: 3, dtype: Int64
Casting to another ``ExtensionDtype`` returns an ``ExtensionArray``:
>>> arr1 = arr.astype("Float64")
>>> arr1
<FloatingArray>
[1.0, 2.0, 3.0]
Length: 3, dtype: Float64
>>> arr1.dtype
Float64Dtype()
Otherwise, we will get a Numpy ndarray:
>>> arr2 = arr.astype("float64")
>>> arr2
array([1., 2., 3.])
>>> arr2.dtype
dtype('float64')
"""
dtype = pandas_dtype(dtype)
if dtype == self.dtype:
if not copy:
return self
else:
return self.copy()
if isinstance(dtype, ExtensionDtype):
cls = dtype.construct_array_type()
return cls._from_sequence(self, dtype=dtype, copy=copy)
elif lib.is_np_dtype(dtype, "M"):
from pandas.core.arrays import DatetimeArray
return DatetimeArray._from_sequence(self, dtype=dtype, copy=copy)
elif lib.is_np_dtype(dtype, "m"):
from pandas.core.arrays import TimedeltaArray
return TimedeltaArray._from_sequence(self, dtype=dtype, copy=copy)
if not copy:
return np.asarray(self, dtype=dtype)
else:
return np.array(self, dtype=dtype, copy=copy)
|
Cast to a NumPy array or ExtensionArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
np.ndarray or pandas.api.extensions.ExtensionArray
An ``ExtensionArray`` if ``dtype`` is ``ExtensionDtype``,
otherwise a Numpy ndarray with ``dtype`` for its dtype.
See Also
--------
Series.astype : Cast a Series to a different dtype.
DataFrame.astype : Cast a DataFrame to a different dtype.
api.extensions.ExtensionArray : Base class for ExtensionArray objects.
core.arrays.DatetimeArray._from_sequence : Create a DatetimeArray from a
sequence.
core.arrays.TimedeltaArray._from_sequence : Create a TimedeltaArray from
a sequence.
Examples
--------
>>> arr = pd.array([1, 2, 3])
>>> arr
<IntegerArray>
[1, 2, 3]
Length: 3, dtype: Int64
Casting to another ``ExtensionDtype`` returns an ``ExtensionArray``:
>>> arr1 = arr.astype("Float64")
>>> arr1
<FloatingArray>
[1.0, 2.0, 3.0]
Length: 3, dtype: Float64
>>> arr1.dtype
Float64Dtype()
Otherwise, we will get a Numpy ndarray:
>>> arr2 = arr.astype("float64")
>>> arr2
array([1., 2., 3.])
>>> arr2.dtype
dtype('float64')
|
python
|
pandas/core/arrays/base.py
| 752
|
[
"self",
"dtype",
"copy"
] |
ArrayLike
| true
| 9
| 7.92
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
openBufferedStream
|
public OutputStream openBufferedStream() throws IOException {
OutputStream out = openStream();
return (out instanceof BufferedOutputStream)
? (BufferedOutputStream) out
: new BufferedOutputStream(out);
}
|
Opens a new buffered {@link OutputStream} for writing to this sink. The returned stream is not
required to be a {@link BufferedOutputStream} in order to allow implementations to simply
delegate to {@link #openStream()} when the stream returned by that method does not benefit from
additional buffering (for example, a {@code ByteArrayOutputStream}). This method returns a new,
independent stream each time it is called.
<p>The caller is responsible for ensuring that the returned stream is closed.
@throws IOException if an I/O error occurs while opening the stream
@since 15.0 (in 14.0 with return type {@link BufferedOutputStream})
|
java
|
android/guava/src/com/google/common/io/ByteSink.java
| 86
|
[] |
OutputStream
| true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
setLocaleContext
|
public static void setLocaleContext(@Nullable LocaleContext localeContext, boolean inheritable) {
if (localeContext == null) {
resetLocaleContext();
}
else {
if (inheritable) {
inheritableLocaleContextHolder.set(localeContext);
localeContextHolder.remove();
}
else {
localeContextHolder.set(localeContext);
inheritableLocaleContextHolder.remove();
}
}
}
|
Associate the given LocaleContext with the current thread.
<p>The given LocaleContext may be a {@link TimeZoneAwareLocaleContext},
containing a locale with associated time zone information.
@param localeContext the current LocaleContext,
or {@code null} to reset the thread-bound context
@param inheritable whether to expose the LocaleContext as inheritable
for child threads (using an {@link InheritableThreadLocal})
@see SimpleLocaleContext
@see SimpleTimeZoneAwareLocaleContext
|
java
|
spring-context/src/main/java/org/springframework/context/i18n/LocaleContextHolder.java
| 98
|
[
"localeContext",
"inheritable"
] |
void
| true
| 3
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getLastDefinedValue
|
function getLastDefinedValue<T>(data: T[], index: number): T {
for (let i = index; i > -1; i--) {
if (typeof data[i] !== 'undefined') {
return data[i];
}
}
throw new RuntimeError(
RuntimeErrorCode.LOCALE_DATA_UNDEFINED,
ngDevMode && 'Locale data API: locale data undefined',
);
}
|
Retrieves the first value that is defined in an array, going backwards from an index position.
To avoid repeating the same data (as when the "format" and "standalone" forms are the same)
add the first value to the locale data arrays, and add other values only if they are different.
@param data The data array to retrieve from.
@param index A 0-based index into the array to start from.
@returns The value immediately before the given index position.
@see [Internationalization (i18n) Guide](guide/i18n)
|
typescript
|
packages/common/src/i18n/locale_data_api.ts
| 717
|
[
"data",
"index"
] | true
| 4
| 8.24
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
scanDecimalEscape
|
function scanDecimalEscape(): boolean {
Debug.assertEqual(charCodeUnchecked(pos - 1), CharacterCodes.backslash);
const ch = charCodeChecked(pos);
if (ch >= CharacterCodes._1 && ch <= CharacterCodes._9) {
const start = pos;
scanDigits();
decimalEscapes = append(decimalEscapes, { pos: start, end: pos, value: +tokenValue });
return true;
}
return false;
}
|
A stack of scopes for named capturing groups. @see {scanGroupName}
|
typescript
|
src/compiler/scanner.ts
| 2,923
|
[] | true
| 3
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getLifecycleProcessor
|
LifecycleProcessor getLifecycleProcessor() throws IllegalStateException {
if (this.lifecycleProcessor == null) {
throw new IllegalStateException("LifecycleProcessor not initialized - " +
"call 'refresh' before invoking lifecycle methods via the context: " + this);
}
return this.lifecycleProcessor;
}
|
Return the internal LifecycleProcessor used by the context.
@return the internal LifecycleProcessor (never {@code null})
@throws IllegalStateException if the context has not been initialized yet
|
java
|
spring-context/src/main/java/org/springframework/context/support/AbstractApplicationContext.java
| 490
|
[] |
LifecycleProcessor
| true
| 2
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
toString
|
@Override
public String toString() {
T value = null;
Throwable exception = null;
try {
value = completableFuture.getNow(null);
} catch (CancellationException e) {
// In Java 23, When a CompletableFuture is cancelled, getNow() will throw a CancellationException wrapping a
// CancellationException. whereas in Java < 23, it throws a CompletionException directly.
// see https://bugs.openjdk.org/browse/JDK-8331987
if (e.getCause() instanceof CancellationException) {
exception = e.getCause();
} else {
exception = e;
}
} catch (CompletionException e) {
exception = e.getCause();
} catch (Exception e) {
exception = e;
}
return String.format("KafkaFuture{value=%s,exception=%s,done=%b}", value, exception, exception != null || value != null);
}
|
Returns true if completed in any fashion: normally, exceptionally, or via cancellation.
|
java
|
clients/src/main/java/org/apache/kafka/common/internals/KafkaFutureImpl.java
| 249
|
[] |
String
| true
| 6
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
validIndex
|
public static <T extends Collection<?>> T validIndex(final T collection, final int index, final String message, final Object... values) {
Objects.requireNonNull(collection, "collection");
if (index < 0 || index >= collection.size()) {
throw new IndexOutOfBoundsException(getMessage(message, values));
}
return collection;
}
|
Validates that the index is within the bounds of the argument
collection; otherwise throwing an exception with the specified message.
<pre>Validate.validIndex(myCollection, 2, "The collection index is invalid: ");</pre>
<p>If the collection is {@code null}, then the message of the
exception is "The validated object is null".</p>
@param <T> the collection type.
@param collection the collection to check, validated not null by this method.
@param index the index to check.
@param message the {@link String#format(String, Object...)} exception message if invalid, not null.
@param values the optional values for the formatted exception message, null array not recommended.
@return the validated collection (never {@code null} for chaining).
@throws NullPointerException if the collection is {@code null}.
@throws IndexOutOfBoundsException if the index is invalid.
@see #validIndex(Collection, int)
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 1,138
|
[
"collection",
"index",
"message"
] |
T
| true
| 3
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
uncompressedIterator
|
private CloseableIterator<Record> uncompressedIterator() {
final ByteBuffer buffer = this.buffer.duplicate();
buffer.position(RECORDS_OFFSET);
return new RecordIterator() {
@Override
protected Record readNext(long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) {
try {
return DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, logAppendTime);
} catch (BufferUnderflowException e) {
throw new InvalidRecordException("Incorrect declared batch size, premature EOF reached");
}
}
@Override
protected boolean ensureNoneRemaining() {
return !buffer.hasRemaining();
}
@Override
public void close() {}
};
}
|
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas.
@return The base timestamp
|
java
|
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
| 298
|
[] | true
| 2
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getAnnotation
|
public static <A extends Annotation> A getAnnotation(final Method method, final Class<A> annotationCls, final boolean searchSupers,
final boolean ignoreAccess) {
Objects.requireNonNull(method, "method");
Objects.requireNonNull(annotationCls, "annotationCls");
if (!ignoreAccess && !MemberUtils.isAccessible(method)) {
return null;
}
A annotation = method.getAnnotation(annotationCls);
if (annotation == null && searchSupers) {
final Class<?> mcls = method.getDeclaringClass();
final List<Class<?>> classes = getAllSuperclassesAndInterfaces(mcls);
for (final Class<?> acls : classes) {
final Method equivalentMethod = ignoreAccess ? getMatchingMethod(acls, method.getName(), method.getParameterTypes())
: getMatchingAccessibleMethod(acls, method.getName(), method.getParameterTypes());
if (equivalentMethod != null) {
annotation = equivalentMethod.getAnnotation(annotationCls);
if (annotation != null) {
break;
}
}
}
}
return annotation;
}
|
Gets the annotation object with the given annotation type that is present on the given method or optionally on any equivalent method in super classes and
interfaces. Returns null if the annotation type was not present.
<p>
Stops searching for an annotation once the first annotation of the specified type has been found. Additional annotations of the specified type will be
silently ignored.
</p>
@param <A> the annotation type.
@param method the {@link Method} to query, may be null.
@param annotationCls the {@link Annotation} to check if is present on the method.
@param searchSupers determines if a lookup in the entire inheritance hierarchy of the given class is performed if the annotation was not directly
present.
@param ignoreAccess determines if underlying method has to be accessible.
@return the first matching annotation, or {@code null} if not found.
@throws NullPointerException if either the method or annotation class is {@code null}.
@throws SecurityException if an underlying accessible object's method denies the request.
@see SecurityManager#checkPermission
@since 3.6
|
java
|
src/main/java/org/apache/commons/lang3/reflect/MethodUtils.java
| 265
|
[
"method",
"annotationCls",
"searchSupers",
"ignoreAccess"
] |
A
| true
| 8
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
send_mime_email
|
def send_mime_email(
e_from: str,
e_to: str | list[str],
mime_msg: MIMEMultipart,
conn_id: str = "smtp_default",
dryrun: bool = False,
) -> None:
"""
Send a MIME email.
:param e_from: The email address of the sender.
:param e_to: The email address or a list of email addresses of the recipient(s).
:param mime_msg: The MIME message to send.
:param conn_id: The ID of the SMTP connection to use.
:param dryrun: If True, the email will not be sent, but a log message will be generated.
"""
smtp_host = conf.get_mandatory_value("smtp", "SMTP_HOST")
smtp_port = conf.getint("smtp", "SMTP_PORT")
smtp_starttls = conf.getboolean("smtp", "SMTP_STARTTLS")
smtp_ssl = conf.getboolean("smtp", "SMTP_SSL")
smtp_retry_limit = conf.getint("smtp", "SMTP_RETRY_LIMIT")
smtp_timeout = conf.getint("smtp", "SMTP_TIMEOUT")
smtp_user = None
smtp_password = None
if conn_id is not None:
try:
from airflow.models import Connection
airflow_conn = Connection.get_connection_from_secrets(conn_id)
smtp_user = airflow_conn.login
smtp_password = airflow_conn.password
except AirflowException:
pass
if smtp_user is None or smtp_password is None:
log.debug("No user/password found for SMTP, so logging in with no authentication.")
if not dryrun:
for attempt in range(1, smtp_retry_limit + 1):
log.info("Email alerting: attempt %s", str(attempt))
try:
smtp_conn = _get_smtp_connection(smtp_host, smtp_port, smtp_timeout, smtp_ssl)
except smtplib.SMTPServerDisconnected:
if attempt == smtp_retry_limit:
raise
else:
if smtp_starttls:
smtp_conn.starttls()
if smtp_user and smtp_password:
smtp_conn.login(smtp_user, smtp_password)
log.info("Sent an alert email to %s", e_to)
smtp_conn.sendmail(e_from, e_to, mime_msg.as_string())
smtp_conn.quit()
break
|
Send a MIME email.
:param e_from: The email address of the sender.
:param e_to: The email address or a list of email addresses of the recipient(s).
:param mime_msg: The MIME message to send.
:param conn_id: The ID of the SMTP connection to use.
:param dryrun: If True, the email will not be sent, but a log message will be generated.
|
python
|
airflow-core/src/airflow/utils/email.py
| 222
|
[
"e_from",
"e_to",
"mime_msg",
"conn_id",
"dryrun"
] |
None
| true
| 11
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
memoize
|
function memoize(func, resolver) {
if (typeof func != 'function' || (resolver != null && typeof resolver != 'function')) {
throw new TypeError(FUNC_ERROR_TEXT);
}
var memoized = function() {
var args = arguments,
key = resolver ? resolver.apply(this, args) : args[0],
cache = memoized.cache;
if (cache.has(key)) {
return cache.get(key);
}
var result = func.apply(this, args);
memoized.cache = cache.set(key, result) || cache;
return result;
};
memoized.cache = new (memoize.Cache || MapCache);
return memoized;
}
|
Creates a function that memoizes the result of `func`. If `resolver` is
provided, it determines the cache key for storing the result based on the
arguments provided to the memoized function. By default, the first argument
provided to the memoized function is used as the map cache key. The `func`
is invoked with the `this` binding of the memoized function.
**Note:** The cache is exposed as the `cache` property on the memoized
function. Its creation may be customized by replacing the `_.memoize.Cache`
constructor with one whose instances implement the
[`Map`](http://ecma-international.org/ecma-262/7.0/#sec-properties-of-the-map-prototype-object)
method interface of `clear`, `delete`, `get`, `has`, and `set`.
@static
@memberOf _
@since 0.1.0
@category Function
@param {Function} func The function to have its output memoized.
@param {Function} [resolver] The function to resolve the cache key.
@returns {Function} Returns the new memoized function.
@example
var object = { 'a': 1, 'b': 2 };
var other = { 'c': 3, 'd': 4 };
var values = _.memoize(_.values);
values(object);
// => [1, 2]
values(other);
// => [3, 4]
object.a = 2;
values(object);
// => [1, 2]
// Modify the result cache.
values.cache.set(object, ['a', 'b']);
values(object);
// => ['a', 'b']
// Replace `_.memoize.Cache`.
_.memoize.Cache = WeakMap;
|
javascript
|
lodash.js
| 10,647
|
[
"func",
"resolver"
] | false
| 8
| 7.52
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
std
|
def std(
self,
ddof: int = 1,
numeric_only: bool = False,
):
"""
Compute standard deviation of groups, excluding missing values.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
Returns
-------
DataFrame or Series
Standard deviation of values within each group.
See Also
--------
core.resample.Resampler.mean : Compute mean of groups, excluding missing values.
core.resample.Resampler.median : Compute median of groups, excluding missing
values.
core.resample.Resampler.var : Compute variance of groups, excluding missing
values.
Examples
--------
>>> ser = pd.Series(
... [1, 3, 2, 4, 3, 8],
... index=pd.DatetimeIndex(
... [
... "2023-01-01",
... "2023-01-10",
... "2023-01-15",
... "2023-02-01",
... "2023-02-10",
... "2023-02-15",
... ]
... ),
... )
>>> ser.resample("MS").std()
2023-01-01 1.000000
2023-02-01 2.645751
Freq: MS, dtype: float64
"""
return self._downsample("std", ddof=ddof, numeric_only=numeric_only)
|
Compute standard deviation of groups, excluding missing values.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionchanged:: 2.0.0
numeric_only now defaults to ``False``.
Returns
-------
DataFrame or Series
Standard deviation of values within each group.
See Also
--------
core.resample.Resampler.mean : Compute mean of groups, excluding missing values.
core.resample.Resampler.median : Compute median of groups, excluding missing
values.
core.resample.Resampler.var : Compute variance of groups, excluding missing
values.
Examples
--------
>>> ser = pd.Series(
... [1, 3, 2, 4, 3, 8],
... index=pd.DatetimeIndex(
... [
... "2023-01-01",
... "2023-01-10",
... "2023-01-15",
... "2023-02-01",
... "2023-02-10",
... "2023-02-15",
... ]
... ),
... )
>>> ser.resample("MS").std()
2023-01-01 1.000000
2023-02-01 2.645751
Freq: MS, dtype: float64
|
python
|
pandas/core/resample.py
| 1,533
|
[
"self",
"ddof",
"numeric_only"
] | true
| 1
| 6.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
asInt
|
public <R extends Number> Source<Integer> asInt(Adapter<? super T, ? extends R> adapter) {
return as(adapter).as(Number::intValue);
}
|
Return an adapted version of the source with {@link Integer} type.
@param <R> the resulting type
@param adapter an adapter to convert the current value to a number.
@return a new adapted source instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/PropertyMapper.java
| 187
|
[
"adapter"
] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
isin
|
def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]:
"""
Pointwise comparison for set containment in the given values.
Roughly equivalent to `np.array([x in values for x in self])`
Parameters
----------
values : np.ndarray or ExtensionArray
Values to compare every element in the array against.
Returns
-------
np.ndarray[bool]
With true at indices where value is in `values`.
See Also
--------
DataFrame.isin: Whether each element in the DataFrame is contained in values.
Index.isin: Return a boolean array where the index values are in values.
Series.isin: Whether elements in Series are contained in values.
Examples
--------
>>> arr = pd.array([1, 2, 3])
>>> arr.isin([1])
<BooleanArray>
[True, False, False]
Length: 3, dtype: boolean
"""
return isin(np.asarray(self), values)
|
Pointwise comparison for set containment in the given values.
Roughly equivalent to `np.array([x in values for x in self])`
Parameters
----------
values : np.ndarray or ExtensionArray
Values to compare every element in the array against.
Returns
-------
np.ndarray[bool]
With true at indices where value is in `values`.
See Also
--------
DataFrame.isin: Whether each element in the DataFrame is contained in values.
Index.isin: Return a boolean array where the index values are in values.
Series.isin: Whether elements in Series are contained in values.
Examples
--------
>>> arr = pd.array([1, 2, 3])
>>> arr.isin([1])
<BooleanArray>
[True, False, False]
Length: 3, dtype: boolean
|
python
|
pandas/core/arrays/base.py
| 1,573
|
[
"self",
"values"
] |
npt.NDArray[np.bool_]
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_scrubbed_inductor_config_for_logging
|
def _scrubbed_inductor_config_for_logging() -> Optional[str]:
"""
Method to parse and scrub uninteresting configs from inductor config
"""
# TypeSafeSerializer for json.dumps()
# Skips complex types as values in config dict
class TypeSafeSerializer(json.JSONEncoder):
def default(self, o: Any) -> Any:
try:
return super().default(o)
except Exception:
return "Value is not JSON serializable"
keys_to_scrub: set[Any] = set()
inductor_conf_str = None
inductor_config_copy = None
if torch._inductor.config:
try:
inductor_config_copy = torch._inductor.config.get_config_copy()
except (TypeError, AttributeError, RuntimeError, AssertionError):
inductor_conf_str = "Inductor Config cannot be pickled"
if inductor_config_copy is not None:
try:
for key, val in inductor_config_copy.items():
if not isinstance(key, str):
keys_to_scrub.add(key)
# Convert set() to list for json.dumps()
if isinstance(val, set):
inductor_config_copy[key] = list(val)
# Evict unwanted keys
for key in keys_to_scrub:
del inductor_config_copy[key]
# Stringify Inductor config
inductor_conf_str = json.dumps(
inductor_config_copy,
cls=TypeSafeSerializer,
skipkeys=True,
sort_keys=True,
)
except Exception:
# Don't crash because of runtime logging errors
inductor_conf_str = "Inductor Config is not JSON serializable"
return inductor_conf_str
|
Method to parse and scrub uninteresting configs from inductor config
|
python
|
torch/_dynamo/utils.py
| 1,648
|
[] |
Optional[str]
| true
| 7
| 6.56
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
getTarget
|
@Override
public synchronized Object getTarget() throws Exception {
if (this.lazyTarget == null) {
logger.debug("Initializing lazy target object");
this.lazyTarget = createObject();
}
return this.lazyTarget;
}
|
Returns the lazy-initialized target object,
creating it on-the-fly if it doesn't exist already.
@see #createObject()
|
java
|
spring-aop/src/main/java/org/springframework/aop/target/AbstractLazyCreationTargetSource.java
| 78
|
[] |
Object
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
cov
|
def cov(
self, other: Series, min_periods: int | None = None, ddof: int | None = 1
) -> Series:
"""
Compute covariance between each group and another Series.
Parameters
----------
other : Series
Series to compute covariance with.
min_periods : int, optional
Minimum number of observations required per pair of columns to
have a valid result.
ddof : int, optional
Delta degrees of freedom for variance calculation.
Returns
-------
Series
Covariance value for each group.
See Also
--------
Series.cov : Equivalent method on ``Series``.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4], index=[0, 0, 1, 1])
>>> g = s.groupby([0, 0, 1, 1])
>>> g.cov() # doctest: +SKIP
"""
result = self._op_via_apply(
"cov", other=other, min_periods=min_periods, ddof=ddof
)
return result
|
Compute covariance between each group and another Series.
Parameters
----------
other : Series
Series to compute covariance with.
min_periods : int, optional
Minimum number of observations required per pair of columns to
have a valid result.
ddof : int, optional
Delta degrees of freedom for variance calculation.
Returns
-------
Series
Covariance value for each group.
See Also
--------
Series.cov : Equivalent method on ``Series``.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4], index=[0, 0, 1, 1])
>>> g = s.groupby([0, 0, 1, 1])
>>> g.cov() # doctest: +SKIP
|
python
|
pandas/core/groupby/generic.py
| 1,705
|
[
"self",
"other",
"min_periods",
"ddof"
] |
Series
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
validateIterator
|
void validateIterator() {
refreshIfEmpty();
if (delegate != originalDelegate) {
throw new ConcurrentModificationException();
}
}
|
If the delegate changed since the iterator was created, the iterator is no longer valid.
|
java
|
android/guava/src/com/google/common/collect/AbstractMapBasedMultimap.java
| 456
|
[] |
void
| true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
unwrapCacheValue
|
private @Nullable Object unwrapCacheValue(@Nullable Object cacheValue) {
return (cacheValue instanceof Cache.ValueWrapper wrapper ? wrapper.get() : cacheValue);
}
|
Find a cached value only for {@link CacheableOperation} that passes the condition.
@param contexts the cacheable operations
@return a {@link Cache.ValueWrapper} holding the cached value,
or {@code null} if none is found
|
java
|
spring-context/src/main/java/org/springframework/cache/interceptor/CacheAspectSupport.java
| 623
|
[
"cacheValue"
] |
Object
| true
| 2
| 7.84
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
all
|
public KafkaFuture<Collection<TransactionListing>> all() {
return allByBrokerId().thenApply(map -> {
List<TransactionListing> allListings = new ArrayList<>();
for (Collection<TransactionListing> listings : map.values()) {
allListings.addAll(listings);
}
return allListings;
});
}
|
Get all transaction listings. If any of the underlying requests fail, then the future
returned from this method will also fail with the first encountered error.
@return A future containing the collection of transaction listings. The future completes
when all transaction listings are available and fails after any non-retriable error.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsResult.java
| 48
|
[] | true
| 1
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
hashCode
|
@Override
public int hashCode() {
int hash = 1;
for (int i = start; i < end; i++) {
hash *= 31;
hash += Double.hashCode(array[i]);
}
return hash;
}
|
Returns an unspecified hash code for the contents of this immutable array.
|
java
|
android/guava/src/com/google/common/primitives/ImmutableDoubleArray.java
| 607
|
[] | true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
optimizedTextOrNull
|
@Override
public final XContentString optimizedTextOrNull() throws IOException {
if (currentToken() == Token.VALUE_NULL) {
return null;
}
return optimizedText();
}
|
Return the long that {@code stringValue} stores or throws an exception if the
stored value cannot be converted to a long that stores the exact same
value and {@code coerce} is false.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java
| 278
|
[] |
XContentString
| true
| 2
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
moduleFinder
|
InMemoryModuleFinder moduleFinder(Set<String> missingModules) throws IOException {
Path[] modulePath = modulePath();
assert modulePath.length >= 1;
InMemoryModuleFinder moduleFinder = InMemoryModuleFinder.of(missingModules, modulePath);
if (modulePath[0].getFileSystem().provider().getScheme().equals("jar")) {
modulePath[0].getFileSystem().close();
}
return moduleFinder;
}
|
Returns a module finder capable of finding the modules that are loadable by this embedded
impl class loader.
<p> The module finder returned by this method can be used during resolution in order to
create a configuration. This configuration can subsequently be materialized as a module layer
in which classes and resources are loaded by this embedded impl class loader.
@param missingModules a set of module names to ignore if not present
|
java
|
libs/core/src/main/java/org/elasticsearch/core/internal/provider/EmbeddedImplClassLoader.java
| 295
|
[
"missingModules"
] |
InMemoryModuleFinder
| true
| 2
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
build
|
public ImmutableRangeSet<C> build() {
ImmutableList.Builder<Range<C>> mergedRangesBuilder =
new ImmutableList.Builder<>(ranges.size());
sort(ranges, rangeLexOrdering());
PeekingIterator<Range<C>> peekingItr = peekingIterator(ranges.iterator());
while (peekingItr.hasNext()) {
Range<C> range = peekingItr.next();
while (peekingItr.hasNext()) {
Range<C> nextRange = peekingItr.peek();
if (range.isConnected(nextRange)) {
checkArgument(
range.intersection(nextRange).isEmpty(),
"Overlapping ranges not permitted but found %s overlapping %s",
range,
nextRange);
range = range.span(peekingItr.next());
} else {
break;
}
}
mergedRangesBuilder.add(range);
}
ImmutableList<Range<C>> mergedRanges = mergedRangesBuilder.build();
if (mergedRanges.isEmpty()) {
return of();
} else if (mergedRanges.size() == 1 && getOnlyElement(mergedRanges).equals(Range.all())) {
return all();
} else {
return new ImmutableRangeSet<>(mergedRanges);
}
}
|
Returns an {@code ImmutableRangeSet} containing the ranges added to this builder.
@throws IllegalArgumentException if any input ranges have nonempty overlap
|
java
|
android/guava/src/com/google/common/collect/ImmutableRangeSet.java
| 831
|
[] | true
| 7
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
getNameOrUnnamed
|
static SmallString<64> getNameOrUnnamed(const NamedDecl *ND) {
auto Name = getName(ND);
if (Name.empty())
Name = "<unnamed>";
return Name;
}
|
Returns the diagnostic-friendly name of the node, or a constant value.
|
cpp
|
clang-tools-extra/clang-tidy/bugprone/EasilySwappableParametersCheck.cpp
| 1,947
|
[] | true
| 2
| 6.88
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
base_dir
|
def base_dir(self: Self) -> Path:
"""
Get the base directory for the Inductor cache.
Returns:
Path: The base directory path for Inductor cache files.
"""
from torch._inductor.runtime.runtime_utils import default_cache_dir
return Path(default_cache_dir(), "cache", self.name)
|
Get the base directory for the Inductor cache.
Returns:
Path: The base directory path for Inductor cache files.
|
python
|
torch/_inductor/cache.py
| 411
|
[
"self"
] |
Path
| true
| 1
| 6.72
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
lagroots
|
def lagroots(c):
"""
Compute the roots of a Laguerre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
numpy.polynomial.polynomial.polyroots
numpy.polynomial.legendre.legroots
numpy.polynomial.chebyshev.chebroots
numpy.polynomial.hermite.hermroots
numpy.polynomial.hermite_e.hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Laguerre series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.laguerre import lagroots, lagfromroots
>>> coef = lagfromroots([0, 1, 2])
>>> coef
array([ 2., -8., 12., -6.])
>>> lagroots(coef)
array([-4.4408921e-16, 1.0000000e+00, 2.0000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([1 + c[0] / c[1]])
# rotated companion matrix reduces error
m = lagcompanion(c)[::-1, ::-1]
r = np.linalg.eigvals(m)
r.sort()
return r
|
Compute the roots of a Laguerre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
numpy.polynomial.polynomial.polyroots
numpy.polynomial.legendre.legroots
numpy.polynomial.chebyshev.chebroots
numpy.polynomial.hermite.hermroots
numpy.polynomial.hermite_e.hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Laguerre series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.laguerre import lagroots, lagfromroots
>>> coef = lagfromroots([0, 1, 2])
>>> coef
array([ 2., -8., 12., -6.])
>>> lagroots(coef)
array([-4.4408921e-16, 1.0000000e+00, 2.0000000e+00])
|
python
|
numpy/polynomial/laguerre.py
| 1,467
|
[
"c"
] | false
| 3
| 7.12
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
hashCode
|
@Override
public int hashCode() {
// See Map.Entry API specification
return Objects.hashCode(getLeft()) ^ Objects.hashCode(getMiddle()) ^ Objects.hashCode(getRight());
}
|
Returns a suitable hash code.
<p>
The hash code is adapted from the definition in {@code Map.Entry}.
</p>
@return the hash code.
|
java
|
src/main/java/org/apache/commons/lang3/tuple/Triple.java
| 171
|
[] | true
| 1
| 7.2
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
numRecords
|
public int numRecords() {
int numRecords = 0;
if (!batches.isEmpty()) {
Iterator<Map.Entry<TopicIdPartition, ShareInFlightBatch<K, V>>> iterator = batches.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<TopicIdPartition, ShareInFlightBatch<K, V>> entry = iterator.next();
ShareInFlightBatch<K, V> batch = entry.getValue();
if (batch.isEmpty()) {
if (!batch.hasRenewals()) {
iterator.remove();
}
} else {
numRecords += batch.numRecords();
}
}
}
return numRecords;
}
|
@return the total number of non-control messages for this fetch, across all partitions
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetch.java
| 92
|
[] | true
| 5
| 7.6
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
is_string_dtype
|
def is_string_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of the string dtype.
If an array is passed with an object dtype, the elements must be
inferred as strings.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of the string dtype.
See Also
--------
api.types.is_string_dtype : Check whether the provided array or dtype
is of the string dtype.
Examples
--------
>>> from pandas.api.types import is_string_dtype
>>> is_string_dtype(str)
True
>>> is_string_dtype(object)
True
>>> is_string_dtype(int)
False
>>> is_string_dtype(np.array(["a", "b"]))
True
>>> is_string_dtype(pd.Series([1, 2]))
False
>>> is_string_dtype(pd.Series([1, 2], dtype=object))
False
"""
if hasattr(arr_or_dtype, "dtype") and _get_dtype(arr_or_dtype).kind == "O":
return is_all_strings(arr_or_dtype)
def condition(dtype) -> bool:
if is_string_or_object_np_dtype(dtype):
return True
try:
return dtype == "string"
except TypeError:
return False
return _is_dtype(arr_or_dtype, condition)
|
Check whether the provided array or dtype is of the string dtype.
If an array is passed with an object dtype, the elements must be
inferred as strings.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of the string dtype.
See Also
--------
api.types.is_string_dtype : Check whether the provided array or dtype
is of the string dtype.
Examples
--------
>>> from pandas.api.types import is_string_dtype
>>> is_string_dtype(str)
True
>>> is_string_dtype(object)
True
>>> is_string_dtype(int)
False
>>> is_string_dtype(np.array(["a", "b"]))
True
>>> is_string_dtype(pd.Series([1, 2]))
False
>>> is_string_dtype(pd.Series([1, 2], dtype=object))
False
|
python
|
pandas/core/dtypes/common.py
| 611
|
[
"arr_or_dtype"
] |
bool
| true
| 4
| 8.16
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getAndAdd
|
public short getAndAdd(final Number operand) {
final short last = value;
this.value += operand.shortValue();
return last;
}
|
Increments this instance's value by {@code operand}; this method returns the value associated with the instance
immediately prior to the addition operation. This method is not thread safe.
@param operand the quantity to add, not null.
@throws NullPointerException if {@code operand} is null.
@return the value associated with this instance immediately before the operand was added.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableShort.java
| 207
|
[
"operand"
] | true
| 1
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
faceIjkToCellBoundaryClassII
|
private CellBoundary faceIjkToCellBoundaryClassII(int adjRes) {
final LatLng[] points = new LatLng[Constants.NUM_HEX_VERTS];
final FaceIJK fijk = new FaceIJK(this.face, new CoordIJK(0, 0, 0));
for (int vert = 0; vert < Constants.NUM_HEX_VERTS; vert++) {
fijk.coord.reset(
VERTEX_CLASSII[vert][0] + this.coord.i,
VERTEX_CLASSII[vert][1] + this.coord.j,
VERTEX_CLASSII[vert][2] + this.coord.k
);
fijk.coord.ijkNormalize();
fijk.face = this.face;
fijk.adjustOverageClassII(adjRes, false, true);
// convert vertex to lat/lng and add to the result
// vert == start + NUM_HEX_VERTS is only used to test for possible
// intersection on last edge
points[vert] = fijk.coord.ijkToGeo(fijk.face, adjRes, true);
}
return new CellBoundary(points, Constants.NUM_HEX_VERTS);
}
|
Generates the cell boundary in spherical coordinates for a cell given by this
FaceIJK address at a specified resolution.
@param res The H3 resolution of the cell.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/FaceIJK.java
| 557
|
[
"adjRes"
] |
CellBoundary
| true
| 2
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
nanmedian
|
def nanmedian(
values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask=None
) -> float | np.ndarray:
"""
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float | ndarray
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, np.nan, 2, 2])
>>> nanops.nanmedian(s.values)
2.0
>>> s = pd.Series([np.nan, np.nan, np.nan])
>>> nanops.nanmedian(s.values)
nan
"""
# for floats without mask, the data already uses NaN as missing value
# indicator, and `mask` will be calculated from that below -> in those
# cases we never need to set NaN to the masked values
using_nan_sentinel = values.dtype.kind == "f" and mask is None
def get_median(x: np.ndarray, _mask=None):
if _mask is None:
_mask = notna(x)
else:
_mask = ~_mask
if not skipna and not _mask.all():
return np.nan
with warnings.catch_warnings():
# Suppress RuntimeWarning about All-NaN slice
warnings.filterwarnings(
"ignore", "All-NaN slice encountered", RuntimeWarning
)
warnings.filterwarnings("ignore", "Mean of empty slice", RuntimeWarning)
res = np.nanmedian(x[_mask])
return res
dtype = values.dtype
values, mask = _get_values(values, skipna, mask=mask, fill_value=None)
if values.dtype.kind != "f":
if values.dtype == object:
# GH#34671 avoid casting strings to numeric
inferred = lib.infer_dtype(values)
if inferred in ["string", "mixed"]:
raise TypeError(f"Cannot convert {values} to numeric")
try:
values = values.astype("f8")
except ValueError as err:
# e.g. "could not convert string to float: 'a'"
raise TypeError(str(err)) from err
if not using_nan_sentinel and mask is not None:
if not values.flags.writeable:
values = values.copy()
values[mask] = np.nan
notempty = values.size
res: float | np.ndarray
# an array from a frame
if values.ndim > 1 and axis is not None:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
if not skipna:
res = np.apply_along_axis(get_median, axis, values)
else:
# fastpath for the skipna case
with warnings.catch_warnings():
# Suppress RuntimeWarning about All-NaN slice
warnings.filterwarnings(
"ignore", "All-NaN slice encountered", RuntimeWarning
)
if (values.shape[1] == 1 and axis == 0) or (
values.shape[0] == 1 and axis == 1
):
# GH52788: fastpath when squeezable, nanmedian for 2D array slow
res = np.nanmedian(np.squeeze(values), keepdims=True)
else:
res = np.nanmedian(values, axis=axis)
else:
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
res = _get_empty_reduction_result(values.shape, axis)
else:
# otherwise return a scalar value
res = get_median(values, mask) if notempty else np.nan
return _wrap_results(res, dtype)
|
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float | ndarray
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, np.nan, 2, 2])
>>> nanops.nanmedian(s.values)
2.0
>>> s = pd.Series([np.nan, np.nan, np.nan])
>>> nanops.nanmedian(s.values)
nan
|
python
|
pandas/core/nanops.py
| 731
|
[
"values",
"axis",
"skipna",
"mask"
] |
float | np.ndarray
| true
| 25
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
onSend
|
public ProducerRecord<K, V> onSend(ProducerRecord<K, V> record) {
ProducerRecord<K, V> interceptRecord = record;
for (Plugin<ProducerInterceptor<K, V>> interceptorPlugin : this.interceptorPlugins) {
try {
interceptRecord = interceptorPlugin.get().onSend(interceptRecord);
} catch (Exception e) {
// do not propagate interceptor exception, log and continue calling other interceptors
// be careful not to throw exception from here
if (record != null)
log.warn("Error executing interceptor onSend callback for topic: {}, partition: {}", record.topic(), record.partition(), e);
else
log.warn("Error executing interceptor onSend callback", e);
}
}
return interceptRecord;
}
|
This is called when client sends the record to KafkaProducer, before key and value gets serialized.
The method calls {@link ProducerInterceptor#onSend(ProducerRecord)} method. ProducerRecord
returned from the first interceptor's onSend() is passed to the second interceptor onSend(), and so on in the
interceptor chain. The record returned from the last interceptor is returned from this method.
This method does not throw exceptions. Exceptions thrown by any of interceptor methods are caught and ignored.
If an interceptor in the middle of the chain, that normally modifies the record, throws an exception,
the next interceptor in the chain will be called with a record returned by the previous interceptor that did not
throw an exception.
@param record the record from client
@return producer record to send to topic/partition
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java
| 63
|
[
"record"
] | true
| 3
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_get_executor_get_task_log
|
def _get_executor_get_task_log(
self, ti: TaskInstance | TaskInstanceHistory
) -> Callable[[TaskInstance | TaskInstanceHistory, int], tuple[list[str], list[str]]]:
"""
Get the get_task_log method from executor of current task instance.
Since there might be multiple executors, so we need to get the executor of current task instance instead of getting from default executor.
:param ti: task instance object
:return: get_task_log method of the executor
"""
executor_name = ti.executor or self.DEFAULT_EXECUTOR_KEY
executor = self.executor_instances.get(executor_name)
if executor is not None:
return executor.get_task_log
if executor_name == self.DEFAULT_EXECUTOR_KEY:
self.executor_instances[executor_name] = ExecutorLoader.get_default_executor()
else:
self.executor_instances[executor_name] = ExecutorLoader.load_executor(executor_name)
return self.executor_instances[executor_name].get_task_log
|
Get the get_task_log method from executor of current task instance.
Since there might be multiple executors, so we need to get the executor of current task instance instead of getting from default executor.
:param ti: task instance object
:return: get_task_log method of the executor
|
python
|
airflow-core/src/airflow/utils/log/file_task_handler.py
| 557
|
[
"self",
"ti"
] |
Callable[[TaskInstance | TaskInstanceHistory, int], tuple[list[str], list[str]]]
| true
| 5
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
nop
|
@SuppressWarnings("unchecked")
public static <T> Consumer<T> nop() {
return NOP;
}
|
Gets the NOP Consumer singleton.
@param <T> type type to consume.
@return the NOP Consumer singleton.
|
java
|
src/main/java/org/apache/commons/lang3/function/Consumers.java
| 54
|
[] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
reshape
|
def reshape(self, *s, **kwargs):
"""
Give a new shape to the array without changing its data.
Returns a masked array containing the same data, but with a new shape.
The result is a view on the original array; if this is not possible, a
ValueError is raised.
Parameters
----------
shape : int or tuple of ints
The new shape should be compatible with the original shape. If an
integer is supplied, then the result will be a 1-D array of that
length.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) or FORTRAN (column-major) order.
Returns
-------
reshaped_array : array
A new view on the array.
See Also
--------
reshape : Equivalent function in the masked array module.
numpy.ndarray.reshape : Equivalent method on ndarray object.
numpy.reshape : Equivalent function in the NumPy module.
Notes
-----
The reshaping operation cannot guarantee that a copy will not be made,
to modify the shape in place, use ``a.shape = s``
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
>>> x
masked_array(
data=[[--, 2],
[3, --]],
mask=[[ True, False],
[False, True]],
fill_value=999999)
>>> x = x.reshape((4,1))
>>> x
masked_array(
data=[[--],
[2],
[3],
[--]],
mask=[[ True],
[False],
[False],
[ True]],
fill_value=999999)
"""
result = self._data.reshape(*s, **kwargs).view(type(self))
result._update_from(self)
mask = self._mask
if mask is not nomask:
result._mask = mask.reshape(*s, **kwargs)
return result
|
Give a new shape to the array without changing its data.
Returns a masked array containing the same data, but with a new shape.
The result is a view on the original array; if this is not possible, a
ValueError is raised.
Parameters
----------
shape : int or tuple of ints
The new shape should be compatible with the original shape. If an
integer is supplied, then the result will be a 1-D array of that
length.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) or FORTRAN (column-major) order.
Returns
-------
reshaped_array : array
A new view on the array.
See Also
--------
reshape : Equivalent function in the masked array module.
numpy.ndarray.reshape : Equivalent method on ndarray object.
numpy.reshape : Equivalent function in the NumPy module.
Notes
-----
The reshaping operation cannot guarantee that a copy will not be made,
to modify the shape in place, use ``a.shape = s``
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
>>> x
masked_array(
data=[[--, 2],
[3, --]],
mask=[[ True, False],
[False, True]],
fill_value=999999)
>>> x = x.reshape((4,1))
>>> x
masked_array(
data=[[--],
[2],
[3],
[--]],
mask=[[ True],
[False],
[False],
[ True]],
fill_value=999999)
|
python
|
numpy/ma/core.py
| 4,751
|
[
"self"
] | false
| 2
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
createKeyStore
|
private static KeyStore createKeyStore(@Nullable String type)
throws KeyStoreException, IOException, NoSuchAlgorithmException, CertificateException {
KeyStore store = KeyStore.getInstance(StringUtils.hasText(type) ? type : KeyStore.getDefaultType());
store.load(null);
return store;
}
|
Create a new {@link PemSslStoreBundle} instance.
@param pemKeyStore the PEM key store
@param pemTrustStore the PEM trust store
@since 3.2.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemSslStoreBundle.java
| 117
|
[
"type"
] |
KeyStore
| true
| 2
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
visitTemplateExpression
|
function visitTemplateExpression(node: TemplateExpression): Expression {
let expression: Expression = factory.createStringLiteral(node.head.text);
for (const span of node.templateSpans) {
const args = [Debug.checkDefined(visitNode(span.expression, visitor, isExpression))];
if (span.literal.text.length > 0) {
args.push(factory.createStringLiteral(span.literal.text));
}
expression = factory.createCallExpression(
factory.createPropertyAccessExpression(expression, "concat"),
/*typeArguments*/ undefined,
args,
);
}
return setTextRange(expression, node);
}
|
Visits a TemplateExpression node.
@param node A TemplateExpression node.
|
typescript
|
src/compiler/transformers/es2015.ts
| 4,804
|
[
"node"
] | true
| 2
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
registerProcessedProperty
|
public void registerProcessedProperty(String propertyName) {
if (this.processedProperties == null) {
this.processedProperties = new HashSet<>(4);
}
this.processedProperties.add(propertyName);
}
|
Register the specified property as "processed" in the sense
of some processor calling the corresponding setter method
outside the PropertyValue(s) mechanism.
<p>This will lead to {@code true} being returned from
a {@link #contains} call for the specified property.
@param propertyName the name of the property.
|
java
|
spring-beans/src/main/java/org/springframework/beans/MutablePropertyValues.java
| 334
|
[
"propertyName"
] |
void
| true
| 2
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
reindex
|
def reindex(
self,
target,
method: ReindexMethod | None = None,
level=None,
limit: int | None = None,
tolerance: float | None = None,
) -> tuple[Index, npt.NDArray[np.intp] | None]:
"""
Create index with target's values.
Parameters
----------
target : an iterable
An iterable containing the values to be used for creating the new index.
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
level : int, optional
Level of multiindex.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : int, float, or list-like, optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray[np.intp] or None
Indices of output values in original index.
Raises
------
TypeError
If ``method`` passed along with ``level``.
ValueError
If non-unique multi-index
ValueError
If non-unique index and ``method`` or ``limit`` passed.
See Also
--------
Series.reindex : Conform Series to new index with optional filling logic.
DataFrame.reindex : Conform DataFrame to new index with optional filling logic.
Examples
--------
>>> idx = pd.Index(["car", "bike", "train", "tractor"])
>>> idx
Index(['car', 'bike', 'train', 'tractor'], dtype='object')
>>> idx.reindex(["car", "bike"])
(Index(['car', 'bike'], dtype='object'), array([0, 1]))
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, "name")
# GH7774: preserve dtype/tz if target is empty and not an Index.
if is_iterator(target):
target = list(target)
if not isinstance(target, Index) and len(target) == 0:
if level is not None and self._is_multi:
# "Index" has no attribute "levels"; maybe "nlevels"?
idx = self.levels[level] # type: ignore[attr-defined]
else:
idx = self
target = idx[:0]
else:
target = ensure_index(target)
if level is not None and (
isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex)
):
if method is not None:
raise TypeError("Fill method not supported if level passed")
# TODO: tests where passing `keep_order=not self._is_multi`
# makes a difference for non-MultiIndex case
target, indexer, _ = self._join_level(
target, level, how="right", keep_order=not self._is_multi
)
else:
if self.equals(target):
indexer = None
else:
if self._index_as_unique:
indexer = self.get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
elif self._is_multi:
raise ValueError("cannot handle a non-unique multi-index!")
elif not self.is_unique:
# GH#42568
raise ValueError("cannot reindex on an axis with duplicate labels")
else:
indexer, _ = self.get_indexer_non_unique(target)
target = self._wrap_reindex_result(target, indexer, preserve_names)
return target, indexer
|
Create index with target's values.
Parameters
----------
target : an iterable
An iterable containing the values to be used for creating the new index.
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
level : int, optional
Level of multiindex.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : int, float, or list-like, optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray[np.intp] or None
Indices of output values in original index.
Raises
------
TypeError
If ``method`` passed along with ``level``.
ValueError
If non-unique multi-index
ValueError
If non-unique index and ``method`` or ``limit`` passed.
See Also
--------
Series.reindex : Conform Series to new index with optional filling logic.
DataFrame.reindex : Conform DataFrame to new index with optional filling logic.
Examples
--------
>>> idx = pd.Index(["car", "bike", "train", "tractor"])
>>> idx
Index(['car', 'bike', 'train', 'tractor'], dtype='object')
>>> idx.reindex(["car", "bike"])
(Index(['car', 'bike'], dtype='object'), array([0, 1]))
|
python
|
pandas/core/indexes/base.py
| 4,149
|
[
"self",
"target",
"method",
"level",
"limit",
"tolerance"
] |
tuple[Index, npt.NDArray[np.intp] | None]
| true
| 19
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
hashCode
|
@Override
public int hashCode() {
final char[] buf = buffer;
int hash = 0;
for (int i = size - 1; i >= 0; i--) {
hash = 31 * hash + buf[i];
}
return hash;
}
|
Gets a suitable hash code for this builder.
@return a hash code
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,973
|
[] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_aggregate_score_dicts
|
def _aggregate_score_dicts(scores):
"""Aggregate the list of dict to dict of np ndarray
The aggregated output of _aggregate_score_dicts will be a list of dict
of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...]
Convert it to a dict of array {'prec': np.array([0.1 ...]), ...}
Parameters
----------
scores : list of dict
List of dicts of the scores for all scorers. This is a flat list,
assumed originally to be of row major order.
Example
-------
>>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3},
... {'a': 10, 'b': 10}] # doctest: +SKIP
>>> _aggregate_score_dicts(scores) # doctest: +SKIP
{'a': array([1, 2, 3, 10]),
'b': array([10, 2, 3, 10])}
"""
return {
key: (
np.asarray([score[key] for score in scores])
if isinstance(scores[0][key], numbers.Number)
else [score[key] for score in scores]
)
for key in scores[0]
}
|
Aggregate the list of dict to dict of np ndarray
The aggregated output of _aggregate_score_dicts will be a list of dict
of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, ...]
Convert it to a dict of array {'prec': np.array([0.1 ...]), ...}
Parameters
----------
scores : list of dict
List of dicts of the scores for all scorers. This is a flat list,
assumed originally to be of row major order.
Example
-------
>>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3},
... {'a': 10, 'b': 10}] # doctest: +SKIP
>>> _aggregate_score_dicts(scores) # doctest: +SKIP
{'a': array([1, 2, 3, 10]),
'b': array([10, 2, 3, 10])}
|
python
|
sklearn/model_selection/_validation.py
| 2,463
|
[
"scores"
] | false
| 2
| 7.68
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
producerEpoch
|
@Override
public short producerEpoch() {
return buffer.getShort(PRODUCER_EPOCH_OFFSET);
}
|
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas.
@return The base timestamp
|
java
|
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
| 194
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
CursorBase
|
CursorBase(BufType* buf, size_t len) noexcept : crtBuf_(buf), buffer_(buf) {
if (crtBuf_) {
crtPos_ = crtBegin_ = crtBuf_->data();
crtEnd_ = crtBuf_->tail();
if (uintptr_t(crtPos_) + len < uintptr_t(crtEnd_)) {
crtEnd_ = crtPos_ + len;
}
remainingLen_ = len - (crtEnd_ - crtPos_);
}
}
|
Constuct a bounded cursor wrapping an IOBuf.
@param len An upper bound on the number of bytes available to this cursor.
|
cpp
|
folly/io/Cursor.h
| 94
|
[
"len"
] | true
| 3
| 7.04
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
ClientReaderWriter
|
ClientReaderWriter(grpc::ChannelInterface* channel,
const grpc::internal::RpcMethod& method,
grpc::ClientContext* context)
: channel_(channel),
context_(context),
cq_(grpc_completion_queue_attributes{
GRPC_CQ_CURRENT_VERSION, GRPC_CQ_PLUCK, GRPC_CQ_DEFAULT_POLLING,
nullptr}), // Pluckable cq
call_(channel->CreateCall(method, context, &cq_)) {
if (!context_->initial_metadata_corked_) {
grpc::internal::CallOpSet<grpc::internal::CallOpSendInitialMetadata> ops;
ops.SendInitialMetadata(&context->send_initial_metadata_,
context->initial_metadata_flags());
call_.PerformOps(&ops);
cq_.Pluck(&ops);
}
}
|
used to send to the server when starting the call.
|
cpp
|
include/grpcpp/support/sync_stream.h
| 557
|
[] | true
| 2
| 6.72
|
grpc/grpc
| 44,113
|
doxygen
| false
|
|
consumingForArray
|
private static <I extends Iterator<?>> Iterator<I> consumingForArray(@Nullable I... elements) {
return new UnmodifiableIterator<I>() {
int index = 0;
@Override
public boolean hasNext() {
return index < elements.length;
}
@Override
public I next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
/*
* requireNonNull is safe because our callers always pass non-null arguments. Each element
* of the array becomes null only when we iterate past it and then clear it.
*/
I result = requireNonNull(elements[index]);
elements[index] = null;
index++;
return result;
}
};
}
|
Returns an Iterator that walks the specified array, nulling out elements behind it. This can
avoid memory leaks when an element is no longer necessary.
<p>This method accepts an array with element type {@code @Nullable T}, but callers must pass an
array whose contents are initially non-null. The {@code @Nullable} annotation indicates that
this method will write nulls into the array during iteration.
<p>This is mainly just to avoid the intermediate ArrayDeque in ConsumingQueueIterator.
|
java
|
android/guava/src/com/google/common/collect/Iterators.java
| 468
|
[] | true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
list_keys
|
def list_keys(
self,
bucket_name: str | None = None,
prefix: str | None = None,
delimiter: str | None = None,
page_size: int | None = None,
max_items: int | None = None,
start_after_key: str | None = None,
from_datetime: datetime | None = None,
to_datetime: datetime | None = None,
object_filter: Callable[..., list] | None = None,
apply_wildcard: bool = False,
) -> list:
"""
List keys in a bucket under prefix and not containing delimiter.
.. seealso::
- :external+boto3:py:class:`S3.Paginator.ListObjectsV2`
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:param page_size: pagination size
:param max_items: maximum items to return
:param start_after_key: should return only keys greater than this key
:param from_datetime: should return only keys with LastModified attr greater than this equal
from_datetime
:param to_datetime: should return only keys with LastModified attr less than this to_datetime
:param object_filter: Function that receives the list of the S3 objects, from_datetime and
to_datetime and returns the List of matched key.
:param apply_wildcard: whether to treat '*' as a wildcard or a plain symbol in the prefix.
**Example**: Returns the list of S3 object with LastModified attr greater than from_datetime
and less than to_datetime:
.. code-block:: python
def object_filter(
keys: list,
from_datetime: datetime | None = None,
to_datetime: datetime | None = None,
) -> list:
def _is_in_period(input_date: datetime) -> bool:
if from_datetime is not None and input_date < from_datetime:
return False
if to_datetime is not None and input_date > to_datetime:
return False
return True
return [k["Key"] for k in keys if _is_in_period(k["LastModified"])]
:return: a list of matched keys
"""
_original_prefix = prefix or ""
_apply_wildcard = bool(apply_wildcard and "*" in _original_prefix)
_prefix = _original_prefix.split("*", 1)[0] if _apply_wildcard else _original_prefix
delimiter = delimiter or ""
start_after_key = start_after_key or ""
object_filter_usr = object_filter
config = {
"PageSize": page_size,
"MaxItems": max_items,
}
paginator = self.get_conn().get_paginator("list_objects_v2")
params = {
"Bucket": bucket_name,
"Prefix": _prefix,
"Delimiter": delimiter,
"PaginationConfig": config,
"StartAfter": start_after_key,
}
if self._requester_pays:
params["RequestPayer"] = "requester"
response = paginator.paginate(**params)
keys: list[str] = []
for page in response:
if "Contents" in page:
new_keys = page["Contents"]
if _apply_wildcard:
new_keys = (k for k in new_keys if fnmatch.fnmatch(k["Key"], _original_prefix))
keys.extend(new_keys)
if object_filter_usr is not None:
return object_filter_usr(keys, from_datetime, to_datetime)
return self._list_key_object_filter(keys, from_datetime, to_datetime)
|
List keys in a bucket under prefix and not containing delimiter.
.. seealso::
- :external+boto3:py:class:`S3.Paginator.ListObjectsV2`
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:param page_size: pagination size
:param max_items: maximum items to return
:param start_after_key: should return only keys greater than this key
:param from_datetime: should return only keys with LastModified attr greater than this equal
from_datetime
:param to_datetime: should return only keys with LastModified attr less than this to_datetime
:param object_filter: Function that receives the list of the S3 objects, from_datetime and
to_datetime and returns the List of matched key.
:param apply_wildcard: whether to treat '*' as a wildcard or a plain symbol in the prefix.
**Example**: Returns the list of S3 object with LastModified attr greater than from_datetime
and less than to_datetime:
.. code-block:: python
def object_filter(
keys: list,
from_datetime: datetime | None = None,
to_datetime: datetime | None = None,
) -> list:
def _is_in_period(input_date: datetime) -> bool:
if from_datetime is not None and input_date < from_datetime:
return False
if to_datetime is not None and input_date > to_datetime:
return False
return True
return [k["Key"] for k in keys if _is_in_period(k["LastModified"])]
:return: a list of matched keys
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 836
|
[
"self",
"bucket_name",
"prefix",
"delimiter",
"page_size",
"max_items",
"start_after_key",
"from_datetime",
"to_datetime",
"object_filter",
"apply_wildcard"
] |
list
| true
| 11
| 7.36
|
apache/airflow
| 43,597
|
sphinx
| false
|
reconnect
|
URLConnection reconnect(JarFile jarFile, URLConnection existingConnection) throws IOException {
Boolean useCaches = (existingConnection != null) ? existingConnection.getUseCaches() : null;
URLConnection connection = openConnection(jarFile);
if (useCaches != null && connection != null) {
connection.setUseCaches(useCaches);
}
return connection;
}
|
Reconnect to the {@link JarFile}, returning a replacement {@link URLConnection}.
@param jarFile the jar file
@param existingConnection the existing connection
@return a newly opened connection inhering the same {@code useCaches} value as the
existing connection
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/UrlJarFiles.java
| 120
|
[
"jarFile",
"existingConnection"
] |
URLConnection
| true
| 4
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
asWriter
|
public Writer asWriter() {
return new StrBuilderWriter();
}
|
Gets this builder as a Writer that can be written to.
<p>
This method allows you to populate the contents of the builder
using any standard method that takes a Writer.
</p>
<p>
To use, simply create a {@link StrBuilder},
call {@code asWriter}, and populate away. The data is available
at any time using the methods of the {@link StrBuilder}.
</p>
<p>
The internal character array is shared between the builder and the writer.
This allows you to intermix calls that append to the builder and
write using the writer and the changes will be occur correctly.
Note however, that no synchronization occurs, so you must perform
all operations with the builder and the writer in one thread.
</p>
<p>
The returned writer ignores the close and flush methods.
</p>
@return a writer that populates this builder
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,554
|
[] |
Writer
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
scanNumberFragment
|
function scanNumberFragment(): string {
let start = pos;
let allowSeparator = false;
let isPreviousTokenSeparator = false;
let result = "";
while (true) {
const ch = charCodeUnchecked(pos);
if (ch === CharacterCodes._) {
tokenFlags |= TokenFlags.ContainsSeparator;
if (allowSeparator) {
allowSeparator = false;
isPreviousTokenSeparator = true;
result += text.substring(start, pos);
}
else {
tokenFlags |= TokenFlags.ContainsInvalidSeparator;
if (isPreviousTokenSeparator) {
error(Diagnostics.Multiple_consecutive_numeric_separators_are_not_permitted, pos, 1);
}
else {
error(Diagnostics.Numeric_separators_are_not_allowed_here, pos, 1);
}
}
pos++;
start = pos;
continue;
}
if (isDigit(ch)) {
allowSeparator = true;
isPreviousTokenSeparator = false;
pos++;
continue;
}
break;
}
if (charCodeUnchecked(pos - 1) === CharacterCodes._) {
tokenFlags |= TokenFlags.ContainsInvalidSeparator;
error(Diagnostics.Numeric_separators_are_not_allowed_here, pos - 1, 1);
}
return result + text.substring(start, pos);
}
|
Returns the char code for the character at the given position within `text`. If
`pos` is outside the bounds set for `text`, `CharacterCodes.EOF` is returned instead.
|
typescript
|
src/compiler/scanner.ts
| 1,171
|
[] | true
| 9
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
serialize
|
public static void serialize(XContentBuilder builder, @Nullable ExponentialHistogram histogram) throws IOException {
if (histogram == null) {
builder.nullValue();
return;
}
builder.startObject();
builder.field(SCALE_FIELD, histogram.scale());
if (histogram.sum() != 0.0 || histogram.valueCount() > 0) {
builder.field(SUM_FIELD, histogram.sum());
}
if (Double.isNaN(histogram.min()) == false) {
builder.field(MIN_FIELD, histogram.min());
}
if (Double.isNaN(histogram.max()) == false) {
builder.field(MAX_FIELD, histogram.max());
}
double zeroThreshold = histogram.zeroBucket().zeroThreshold();
long zeroCount = histogram.zeroBucket().count();
if (zeroCount != 0 || zeroThreshold != 0) {
builder.startObject(ZERO_FIELD);
if (zeroCount != 0) {
builder.field(ZERO_COUNT_FIELD, zeroCount);
}
if (zeroThreshold != 0) {
builder.field(ZERO_THRESHOLD_FIELD, zeroThreshold);
}
builder.endObject();
}
writeBuckets(builder, POSITIVE_FIELD, histogram.positiveBuckets());
writeBuckets(builder, NEGATIVE_FIELD, histogram.negativeBuckets());
builder.endObject();
}
|
Serializes an {@link ExponentialHistogram} to the provided {@link XContentBuilder}.
@param builder the XContentBuilder to write to
@param histogram the ExponentialHistogram to serialize
@throws IOException if the XContentBuilder throws an IOException
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramXContent.java
| 58
|
[
"builder",
"histogram"
] |
void
| true
| 10
| 6.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
isNextTokenParentJsxAttribute
|
function isNextTokenParentJsxAttribute(context: FormattingContext): boolean {
return context.nextTokenParent.kind === SyntaxKind.JsxAttribute || (
context.nextTokenParent.kind === SyntaxKind.JsxNamespacedName && context.nextTokenParent.parent.kind === SyntaxKind.JsxAttribute
);
}
|
A rule takes a two tokens (left/right) and a particular context
for which you're meant to look at them. You then declare what should the
whitespace annotation be between these tokens via the action param.
@param debugName Name to print
@param left The left side of the comparison
@param right The right side of the comparison
@param context A set of filters to narrow down the space in which this formatter rule applies
@param action a declaration of the expected whitespace
@param flags whether the rule deletes a line or not, defaults to no-op
|
typescript
|
src/services/formatting/rules.ts
| 779
|
[
"context"
] | true
| 3
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
attributes
|
private byte attributes() {
// note we're not using the second byte of attributes
return (byte) buffer.getShort(ATTRIBUTES_OFFSET);
}
|
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas.
@return The base timestamp
|
java
|
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
| 402
|
[] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
astype
|
def astype(self, dtype: AstypeArg | None = None, copy: bool = True):
"""
Change the dtype of a SparseArray.
The output will always be a SparseArray. To convert to a dense
ndarray with a certain dtype, use :meth:`numpy.asarray`.
Parameters
----------
dtype : np.dtype or ExtensionDtype
For SparseDtype, this changes the dtype of
``self.sp_values`` and the ``self.fill_value``.
For other dtypes, this only changes the dtype of
``self.sp_values``.
copy : bool, default True
Whether to ensure a copy is made, even if not necessary.
Returns
-------
SparseArray
Examples
--------
>>> arr = pd.arrays.SparseArray([0, 0, 1, 2])
>>> arr
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
>>> arr.astype(SparseDtype(np.dtype("int32")))
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
Using a NumPy dtype with a different kind (e.g. float) will coerce
just ``self.sp_values``.
>>> arr.astype(SparseDtype(np.dtype("float64")))
... # doctest: +NORMALIZE_WHITESPACE
[nan, nan, 1.0, 2.0]
Fill: nan
IntIndex
Indices: array([2, 3], dtype=int32)
Using a SparseDtype, you can also change the fill value as well.
>>> arr.astype(SparseDtype("float64", fill_value=0.0))
... # doctest: +NORMALIZE_WHITESPACE
[0.0, 0.0, 1.0, 2.0]
Fill: 0.0
IntIndex
Indices: array([2, 3], dtype=int32)
"""
if dtype == self._dtype:
if not copy:
return self
else:
return self.copy()
future_dtype = pandas_dtype(dtype)
if not isinstance(future_dtype, SparseDtype):
# GH#34457
values = np.asarray(self)
values = ensure_wrapped_if_datetimelike(values)
return astype_array(values, dtype=future_dtype, copy=False)
dtype = self.dtype.update_dtype(dtype)
subtype = pandas_dtype(dtype._subtype_with_str)
subtype = cast(np.dtype, subtype) # ensured by update_dtype
values = ensure_wrapped_if_datetimelike(self.sp_values)
sp_values = astype_array(values, subtype, copy=copy)
sp_values = np.asarray(sp_values)
return self._simple_new(sp_values, self.sp_index, dtype)
|
Change the dtype of a SparseArray.
The output will always be a SparseArray. To convert to a dense
ndarray with a certain dtype, use :meth:`numpy.asarray`.
Parameters
----------
dtype : np.dtype or ExtensionDtype
For SparseDtype, this changes the dtype of
``self.sp_values`` and the ``self.fill_value``.
For other dtypes, this only changes the dtype of
``self.sp_values``.
copy : bool, default True
Whether to ensure a copy is made, even if not necessary.
Returns
-------
SparseArray
Examples
--------
>>> arr = pd.arrays.SparseArray([0, 0, 1, 2])
>>> arr
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
>>> arr.astype(SparseDtype(np.dtype("int32")))
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
Using a NumPy dtype with a different kind (e.g. float) will coerce
just ``self.sp_values``.
>>> arr.astype(SparseDtype(np.dtype("float64")))
... # doctest: +NORMALIZE_WHITESPACE
[nan, nan, 1.0, 2.0]
Fill: nan
IntIndex
Indices: array([2, 3], dtype=int32)
Using a SparseDtype, you can also change the fill value as well.
>>> arr.astype(SparseDtype("float64", fill_value=0.0))
... # doctest: +NORMALIZE_WHITESPACE
[0.0, 0.0, 1.0, 2.0]
Fill: 0.0
IntIndex
Indices: array([2, 3], dtype=int32)
|
python
|
pandas/core/arrays/sparse/array.py
| 1,260
|
[
"self",
"dtype",
"copy"
] | true
| 5
| 8.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
describeTopics
|
default DescribeTopicsResult describeTopics(Collection<String> topicNames) {
return describeTopics(topicNames, new DescribeTopicsOptions());
}
|
Describe some topics in the cluster, with the default options.
<p>
This is a convenience method for {@link #describeTopics(Collection, DescribeTopicsOptions)} with
default options. See the overload for more details.
@param topicNames The names of the topics to describe.
@return The DescribeTopicsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 295
|
[
"topicNames"
] |
DescribeTopicsResult
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
is_object_dtype
|
def is_object_dtype(arr_or_dtype) -> bool:
"""
Check whether an array-like or dtype is of the object dtype.
This method examines the input to determine if it is of the
object data type. Object dtype is a generic data type that can
hold any Python objects, including strings, lists, and custom
objects.
Parameters
----------
arr_or_dtype : array-like or dtype
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the object dtype.
See Also
--------
api.types.is_numeric_dtype : Check whether the provided array or dtype is of a
numeric dtype.
api.types.is_string_dtype : Check whether the provided array or dtype is of
the string dtype.
api.types.is_bool_dtype : Check whether the provided array or dtype is of a
boolean dtype.
Examples
--------
>>> from pandas.api.types import is_object_dtype
>>> is_object_dtype(object)
True
>>> is_object_dtype(int)
False
>>> is_object_dtype(np.array([], dtype=object))
True
>>> is_object_dtype(np.array([], dtype=int))
False
>>> is_object_dtype([1, 2, 3])
False
"""
return _is_dtype_type(arr_or_dtype, classes(np.object_))
|
Check whether an array-like or dtype is of the object dtype.
This method examines the input to determine if it is of the
object data type. Object dtype is a generic data type that can
hold any Python objects, including strings, lists, and custom
objects.
Parameters
----------
arr_or_dtype : array-like or dtype
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the object dtype.
See Also
--------
api.types.is_numeric_dtype : Check whether the provided array or dtype is of a
numeric dtype.
api.types.is_string_dtype : Check whether the provided array or dtype is of
the string dtype.
api.types.is_bool_dtype : Check whether the provided array or dtype is of a
boolean dtype.
Examples
--------
>>> from pandas.api.types import is_object_dtype
>>> is_object_dtype(object)
True
>>> is_object_dtype(int)
False
>>> is_object_dtype(np.array([], dtype=object))
True
>>> is_object_dtype(np.array([], dtype=int))
False
>>> is_object_dtype([1, 2, 3])
False
|
python
|
pandas/core/dtypes/common.py
| 143
|
[
"arr_or_dtype"
] |
bool
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
isSorted
|
public static boolean isSorted(final boolean[] array) {
if (getLength(array) < 2) {
return true;
}
boolean previous = array[0];
final int n = array.length;
for (int i = 1; i < n; i++) {
final boolean current = array[i];
if (BooleanUtils.compare(previous, current) > 0) {
return false;
}
previous = current;
}
return true;
}
|
Tests whether whether the provided array is sorted according to natural ordering ({@code false} before {@code true}).
@param array the array to check.
@return whether the array is sorted according to natural ordering.
@since 3.4
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 3,545
|
[
"array"
] | true
| 4
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
applyAsFloat
|
float applyAsFloat(int value) throws E;
|
Applies this function to the given argument.
@param value the function argument
@return the function result
@throws E Thrown when the function fails.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableIntToFloatFunction.java
| 53
|
[
"value"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
sendOffsetCommitRequest
|
RequestFuture<Void> sendOffsetCommitRequest(final Map<TopicPartition, OffsetAndMetadata> offsets) {
if (offsets.isEmpty())
return RequestFuture.voidSuccess();
Node coordinator = checkAndGetCoordinator();
if (coordinator == null)
return RequestFuture.coordinatorNotAvailable();
// create the offset commit request
Map<String, OffsetCommitRequestData.OffsetCommitRequestTopic> requestTopicDataMap = new HashMap<>();
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) {
TopicPartition topicPartition = entry.getKey();
OffsetAndMetadata offsetAndMetadata = entry.getValue();
if (offsetAndMetadata.offset() < 0) {
return RequestFuture.failure(new IllegalArgumentException("Invalid offset: " + offsetAndMetadata.offset()));
}
OffsetCommitRequestData.OffsetCommitRequestTopic topic = requestTopicDataMap
.getOrDefault(topicPartition.topic(),
new OffsetCommitRequestData.OffsetCommitRequestTopic()
.setName(topicPartition.topic())
);
topic.partitions().add(new OffsetCommitRequestData.OffsetCommitRequestPartition()
.setPartitionIndex(topicPartition.partition())
.setCommittedOffset(offsetAndMetadata.offset())
.setCommittedLeaderEpoch(offsetAndMetadata.leaderEpoch().orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH))
.setCommittedMetadata(offsetAndMetadata.metadata())
);
requestTopicDataMap.put(topicPartition.topic(), topic);
}
final Generation generation;
final String groupInstanceId;
if (subscriptions.hasAutoAssignedPartitions()) {
synchronized (ConsumerCoordinator.this) {
generation = generationIfStable();
groupInstanceId = rebalanceConfig.groupInstanceId.orElse(null);
// if the generation is null, we are not part of an active group (and we expect to be).
// the only thing we can do is fail the commit and let the user rejoin the group in poll().
if (generation == null) {
log.info("Failing OffsetCommit request since the consumer is not part of an active group");
if (rebalanceInProgress()) {
// if the client knows it is already rebalancing, we can use RebalanceInProgressException instead of
// CommitFailedException to indicate this is not a fatal error
return RequestFuture.failure(new RebalanceInProgressException("Offset commit cannot be completed since the " +
"consumer is undergoing a rebalance for auto partition assignment. You can try completing the rebalance " +
"by calling poll() and then retry the operation."));
} else {
return RequestFuture.failure(new CommitFailedException("Offset commit cannot be completed since the " +
"consumer is not part of an active group for auto partition assignment; it is likely that the consumer " +
"was kicked out of the group."));
}
}
}
} else {
generation = Generation.NO_GENERATION;
groupInstanceId = null;
}
OffsetCommitRequest.Builder builder = OffsetCommitRequest.Builder.forTopicNames(
new OffsetCommitRequestData()
.setGroupId(this.rebalanceConfig.groupId)
.setGenerationIdOrMemberEpoch(generation.generationId)
.setMemberId(generation.memberId)
.setGroupInstanceId(groupInstanceId)
.setTopics(new ArrayList<>(requestTopicDataMap.values()))
);
log.trace("Sending OffsetCommit request with {} to coordinator {}", offsets, coordinator);
return client.send(coordinator, builder)
.compose(new OffsetCommitResponseHandler(offsets, generation));
}
|
Commit offsets for the specified list of topics and partitions. This is a non-blocking call
which returns a request future that can be polled in the case of a synchronous commit or ignored in the
asynchronous case.
NOTE: This is visible only for testing
@param offsets The list of offsets per partition that should be committed.
@return A request future whose value indicates whether the commit was successful or not
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java
| 1,272
|
[
"offsets"
] | true
| 7
| 8.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
readObject
|
@Serial
private void readObject(ObjectInputStream ois) throws IOException, ClassNotFoundException {
throw new NotSerializableException("DefaultListableBeanFactory itself is not deserializable - " +
"just a SerializedBeanFactoryReference is");
}
|
Public method to determine the applicable order value for a given bean.
@param beanName the name of the bean
@param beanInstance the bean instance to check
@return the corresponding order value (default is {@link Ordered#LOWEST_PRECEDENCE})
@since 7.0
@see #getOrder(String)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
| 2,404
|
[
"ois"
] |
void
| true
| 1
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
createNativeCaffeineCache
|
protected com.github.benmanes.caffeine.cache.Cache<Object, Object> createNativeCaffeineCache(String name) {
if (this.cacheLoader != null) {
if (this.cacheLoader instanceof CacheLoader<Object, Object> regularCacheLoader) {
return this.cacheBuilder.build(regularCacheLoader);
}
else {
throw new IllegalStateException(
"Cannot create regular Caffeine Cache with async-only cache loader: " + this.cacheLoader);
}
}
return this.cacheBuilder.build();
}
|
Build a common Caffeine Cache instance for the specified cache name,
using the common Caffeine configuration specified on this cache manager.
@param name the name of the cache
@return the native Caffeine Cache instance
@see #createCaffeineCache
|
java
|
spring-context-support/src/main/java/org/springframework/cache/caffeine/CaffeineCacheManager.java
| 384
|
[
"name"
] | true
| 3
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
markPrecedingCommentDirectiveLine
|
function markPrecedingCommentDirectiveLine(diagnostic: Diagnostic, directives: CommentDirectivesMap) {
const { file, start } = diagnostic;
if (!file) {
return -1;
}
// Start out with the line just before the text
const lineStarts = getLineStarts(file);
let line = computeLineAndCharacterOfPosition(lineStarts, start!).line - 1; // TODO: GH#18217
while (line >= 0) {
// As soon as that line is known to have a comment directive, use that
if (directives.markUsed(line)) {
return line;
}
// Stop searching if the line is not empty and not a comment
const lineText = file.text.slice(lineStarts[line], lineStarts[line + 1]).trim();
if (lineText !== "" && !/^\s*\/\/.*$/.test(lineText)) {
return -1;
}
line--;
}
return -1;
}
|
@returns The line index marked as preceding the diagnostic, or -1 if none was.
|
typescript
|
src/compiler/program.ts
| 2,960
|
[
"diagnostic",
"directives"
] | false
| 6
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
validate_fillna_kwargs
|
def validate_fillna_kwargs(value, method, validate_scalar_dict_value: bool = True):
"""
Validate the keyword arguments to 'fillna'.
This checks that exactly one of 'value' and 'method' is specified.
If 'method' is specified, this validates that it's a valid method.
Parameters
----------
value, method : object
The 'value' and 'method' keyword arguments for 'fillna'.
validate_scalar_dict_value : bool, default True
Whether to validate that 'value' is a scalar or dict. Specifically,
validate that it is not a list or tuple.
Returns
-------
value, method : object
"""
from pandas.core.missing import clean_fill_method
if value is None and method is None:
raise ValueError("Must specify a fill 'value' or 'method'.")
if value is None and method is not None:
method = clean_fill_method(method)
elif value is not None and method is None:
if validate_scalar_dict_value and isinstance(value, (list, tuple)):
raise TypeError(
'"value" parameter must be a scalar or dict, but '
f'you passed a "{type(value).__name__}"'
)
elif value is not None and method is not None:
raise ValueError("Cannot specify both 'value' and 'method'.")
return value, method
|
Validate the keyword arguments to 'fillna'.
This checks that exactly one of 'value' and 'method' is specified.
If 'method' is specified, this validates that it's a valid method.
Parameters
----------
value, method : object
The 'value' and 'method' keyword arguments for 'fillna'.
validate_scalar_dict_value : bool, default True
Whether to validate that 'value' is a scalar or dict. Specifically,
validate that it is not a list or tuple.
Returns
-------
value, method : object
|
python
|
pandas/util/_validators.py
| 300
|
[
"value",
"method",
"validate_scalar_dict_value"
] | true
| 11
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
remove
|
public Object remove(int index) {
if (index < 0 || index >= this.values.size()) {
return null;
}
return this.values.remove(index);
}
|
Removes and returns the value at {@code index}, or null if the array has no value
at {@code index}.
@param index the index of the value to remove
@return the previous value at {@code index}
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 311
|
[
"index"
] |
Object
| true
| 3
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
originalsWithPrefix
|
public Map<String, Object> originalsWithPrefix(String prefix, boolean strip) {
Map<String, Object> result = new RecordingMap<>(prefix, false);
result.putAll(Utils.entriesWithPrefix(originals, prefix, strip));
return result;
}
|
Gets all original settings with the given prefix.
@param prefix the prefix to use as a filter
@param strip strip the prefix before adding to the output if set true
@return a Map containing the settings with the prefix
|
java
|
clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
| 290
|
[
"prefix",
"strip"
] | true
| 1
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
HTML40_EXTENDED_ESCAPE
|
public static String[][] HTML40_EXTENDED_ESCAPE() {
return HTML40_EXTENDED_ESCAPE.clone();
}
|
Mapping to escape additional <a href="https://www.w3.org/TR/REC-html40/sgml/entities.html">character entity
references</a>. Note that this must be used with {@link #ISO8859_1_ESCAPE()} to get the full list of
HTML 4.0 character entities.
@return the mapping table.
|
java
|
src/main/java/org/apache/commons/lang3/text/translate/EntityArrays.java
| 402
|
[] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.