function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
sortedIndexBy
|
function sortedIndexBy(array, value, iteratee) {
return baseSortedIndexBy(array, value, getIteratee(iteratee, 2));
}
|
This method is like `_.sortedIndex` except that it accepts `iteratee`
which is invoked for `value` and each element of `array` to compute their
sort ranking. The iteratee is invoked with one argument: (value).
@static
@memberOf _
@since 4.0.0
@category Array
@param {Array} array The sorted array to inspect.
@param {*} value The value to evaluate.
@param {Function} [iteratee=_.identity] The iteratee invoked per element.
@returns {number} Returns the index at which `value` should be inserted
into `array`.
@example
var objects = [{ 'x': 4 }, { 'x': 5 }];
_.sortedIndexBy(objects, { 'x': 4 }, function(o) { return o.x; });
// => 0
// The `_.property` iteratee shorthand.
_.sortedIndexBy(objects, { 'x': 4 }, 'x');
// => 0
|
javascript
|
lodash.js
| 8,076
|
[
"array",
"value",
"iteratee"
] | false
| 1
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
_get_skiprows
|
def _get_skiprows(skiprows: int | Sequence[int] | slice | None) -> int | Sequence[int]:
"""
Get an iterator given an integer, slice or container.
Parameters
----------
skiprows : int, slice, container
The iterator to use to skip rows; can also be a slice.
Raises
------
TypeError
* If `skiprows` is not a slice, integer, or Container
Returns
-------
it : iterable
A proper iterator to use to skip rows of a DataFrame.
"""
if isinstance(skiprows, slice):
start, step = skiprows.start or 0, skiprows.step or 1
return list(range(start, skiprows.stop, step))
elif isinstance(skiprows, numbers.Integral) or is_list_like(skiprows):
return cast("int | Sequence[int]", skiprows)
elif skiprows is None:
return 0
raise TypeError(f"{type(skiprows).__name__} is not a valid type for skipping rows")
|
Get an iterator given an integer, slice or container.
Parameters
----------
skiprows : int, slice, container
The iterator to use to skip rows; can also be a slice.
Raises
------
TypeError
* If `skiprows` is not a slice, integer, or Container
Returns
-------
it : iterable
A proper iterator to use to skip rows of a DataFrame.
|
python
|
pandas/io/html.py
| 88
|
[
"skiprows"
] |
int | Sequence[int]
| true
| 7
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
is_tensor
|
def is_tensor(obj: _Any, /) -> _TypeIs["torch.Tensor"]:
r"""Returns True if `obj` is a PyTorch tensor.
Args:
obj (object): Object to test
Example::
>>> x = torch.tensor([1, 2, 3])
>>> torch.is_tensor(x)
True
"""
return isinstance(obj, torch.Tensor)
|
r"""Returns True if `obj` is a PyTorch tensor.
Args:
obj (object): Object to test
Example::
>>> x = torch.tensor([1, 2, 3])
>>> torch.is_tensor(x)
True
|
python
|
torch/__init__.py
| 1,143
|
[
"obj"
] |
_TypeIs["torch.Tensor"]
| true
| 1
| 7.28
|
pytorch/pytorch
| 96,034
|
google
| false
|
buildAspectJAdvisors
|
public List<Advisor> buildAspectJAdvisors() {
List<String> aspectNames = this.aspectBeanNames;
if (aspectNames == null) {
synchronized (this) {
aspectNames = this.aspectBeanNames;
if (aspectNames == null) {
List<Advisor> advisors = new ArrayList<>();
aspectNames = new ArrayList<>();
String[] beanNames = BeanFactoryUtils.beanNamesForTypeIncludingAncestors(
this.beanFactory, Object.class, true, false);
for (String beanName : beanNames) {
if (!isEligibleBean(beanName)) {
continue;
}
// We must be careful not to instantiate beans eagerly as in this case they
// would be cached by the Spring container but would not have been weaved.
Class<?> beanType = this.beanFactory.getType(beanName, false);
if (beanType == null) {
continue;
}
if (this.advisorFactory.isAspect(beanType)) {
try {
AspectMetadata amd = new AspectMetadata(beanType, beanName);
if (amd.getAjType().getPerClause().getKind() == PerClauseKind.SINGLETON) {
MetadataAwareAspectInstanceFactory factory =
new BeanFactoryAspectInstanceFactory(this.beanFactory, beanName);
List<Advisor> classAdvisors = this.advisorFactory.getAdvisors(factory);
if (this.beanFactory.isSingleton(beanName)) {
this.advisorsCache.put(beanName, classAdvisors);
}
else {
this.aspectFactoryCache.put(beanName, factory);
}
advisors.addAll(classAdvisors);
}
else {
// Per target or per this.
if (this.beanFactory.isSingleton(beanName)) {
throw new IllegalArgumentException("Bean with name '" + beanName +
"' is a singleton, but aspect instantiation model is not singleton");
}
MetadataAwareAspectInstanceFactory factory =
new PrototypeAspectInstanceFactory(this.beanFactory, beanName);
this.aspectFactoryCache.put(beanName, factory);
advisors.addAll(this.advisorFactory.getAdvisors(factory));
}
aspectNames.add(beanName);
}
catch (IllegalArgumentException | IllegalStateException | AopConfigException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Ignoring incompatible aspect [" + beanType.getName() + "]: " + ex);
}
}
}
}
this.aspectBeanNames = aspectNames;
return advisors;
}
}
}
if (aspectNames.isEmpty()) {
return Collections.emptyList();
}
List<Advisor> advisors = new ArrayList<>();
for (String aspectName : aspectNames) {
List<Advisor> cachedAdvisors = this.advisorsCache.get(aspectName);
if (cachedAdvisors != null) {
advisors.addAll(cachedAdvisors);
}
else {
MetadataAwareAspectInstanceFactory factory = this.aspectFactoryCache.get(aspectName);
Assert.state(factory != null, "Factory must not be null");
advisors.addAll(this.advisorFactory.getAdvisors(factory));
}
}
return advisors;
}
|
Look for AspectJ-annotated aspect beans in the current bean factory,
and return to a list of Spring AOP Advisors representing them.
<p>Creates a Spring Advisor for each AspectJ advice method.
@return the list of {@link org.springframework.aop.Advisor} beans
@see #isEligibleBean
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/annotation/BeanFactoryAspectJAdvisorsBuilder.java
| 87
|
[] | true
| 13
| 7.68
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
mapOf
|
public static <K, V> Bindable<Map<K, V>> mapOf(Class<K> keyType, Class<V> valueType) {
return of(ResolvableType.forClassWithGenerics(Map.class, keyType, valueType));
}
|
Create a new {@link Bindable} {@link Map} of the specified key and value type.
@param <K> the key type
@param <V> the value type
@param keyType the map key type
@param valueType the map value type
@return a {@link Bindable} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Bindable.java
| 303
|
[
"keyType",
"valueType"
] | true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
findFactoryMethod
|
private static @Nullable Method findFactoryMethod(ApplicationContext applicationContext, String beanName) {
if (applicationContext instanceof ConfigurableApplicationContext configurableContext) {
return findFactoryMethod(configurableContext, beanName);
}
return null;
}
|
Return a {@link ConfigurationPropertiesBean @ConfigurationPropertiesBean} instance
for the given bean details or {@code null} if the bean is not a
{@link ConfigurationProperties @ConfigurationProperties} object. Annotations are
considered both on the bean itself, as well as any factory method (for example a
{@link Bean @Bean} method).
@param applicationContext the source application context
@param bean the bean to consider
@param beanName the bean name
@return a configuration properties bean or {@code null} if the neither the bean nor
factory method are annotated with
{@link ConfigurationProperties @ConfigurationProperties}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/ConfigurationPropertiesBean.java
| 220
|
[
"applicationContext",
"beanName"
] |
Method
| true
| 2
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
render_dag
|
def render_dag(dag: DAG | SerializedDAG, tis: list[TaskInstance] | None = None) -> graphviz.Digraph:
"""
Render the DAG object to the DOT object.
If an task instance list is passed, the nodes will be painted according to task statuses.
:param dag: DAG that will be rendered.
:param tis: List of task instances
:return: Graphviz object
"""
if not graphviz:
raise AirflowException(
"Could not import graphviz. Install the graphviz python package to fix this error."
)
dot = graphviz.Digraph(
dag.dag_id,
graph_attr={
"rankdir": "LR",
"labelloc": "t",
"label": dag.dag_id,
},
)
states_by_task_id = None
if tis is not None:
states_by_task_id = {ti.task_id: ti.state for ti in tis}
_draw_nodes(dag.task_group, dot, states_by_task_id)
for edge in dag_edges(dag):
# Gets an optional label for the edge; this will be None if none is specified.
label = dag.get_edge_info(edge["source_id"], edge["target_id"]).get("label")
# Add the edge to the graph with optional label
# (we can just use the maybe-None label variable directly)
dot.edge(edge["source_id"], edge["target_id"], label)
return dot
|
Render the DAG object to the DOT object.
If an task instance list is passed, the nodes will be painted according to task statuses.
:param dag: DAG that will be rendered.
:param tis: List of task instances
:return: Graphviz object
|
python
|
airflow-core/src/airflow/utils/dot_renderer.py
| 197
|
[
"dag",
"tis"
] |
graphviz.Digraph
| true
| 4
| 8.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
lowercase
|
public static String lowercase(String value) {
return LowercaseProcessor.apply(value);
}
|
Uses {@link LowercaseProcessor} to convert a string to its lowercase
equivalent.
@param value string to convert
@return lowercase equivalent
|
java
|
modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Processors.java
| 40
|
[
"value"
] |
String
| true
| 1
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
write_batch_data
|
def write_batch_data(self, items: Iterable) -> bool:
"""
Write batch items to DynamoDB table with provisioned throughout capacity.
.. seealso::
- :external+boto3:py:meth:`DynamoDB.ServiceResource.Table`
- :external+boto3:py:meth:`DynamoDB.Table.batch_writer`
- :external+boto3:py:meth:`DynamoDB.Table.put_item`
:param items: list of DynamoDB items.
"""
try:
table = self.get_conn().Table(self.table_name)
with table.batch_writer(overwrite_by_pkeys=self.table_keys) as batch:
for item in items:
batch.put_item(Item=item)
return True
except Exception as general_error:
raise AirflowException(f"Failed to insert items in dynamodb, error: {general_error}")
|
Write batch items to DynamoDB table with provisioned throughout capacity.
.. seealso::
- :external+boto3:py:meth:`DynamoDB.ServiceResource.Table`
- :external+boto3:py:meth:`DynamoDB.Table.batch_writer`
- :external+boto3:py:meth:`DynamoDB.Table.put_item`
:param items: list of DynamoDB items.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/dynamodb.py
| 65
|
[
"self",
"items"
] |
bool
| true
| 2
| 6.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
resetStrategy
|
public synchronized AutoOffsetResetStrategy resetStrategy(TopicPartition partition) {
return assignedState(partition).resetStrategy();
}
|
Unset the preferred read replica. This causes the fetcher to go back to the leader for fetches.
@param tp The topic partition
@return the removed preferred read replica if set, Empty otherwise.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 822
|
[
"partition"
] |
AutoOffsetResetStrategy
| true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
asList
|
@Override
public ImmutableList<E> asList() {
ImmutableList<E> result = asList;
return (result == null) ? asList = createAsList() : result;
}
|
Returns {@code true} if the {@code hashCode()} method runs quickly.
|
java
|
android/guava/src/com/google/common/collect/ImmutableSet.java
| 370
|
[] | true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
get_query_info
|
def get_query_info(self, query_execution_id: str, use_cache: bool = False) -> dict:
"""
Get information about a single execution of a query.
.. seealso::
- :external+boto3:py:meth:`Athena.Client.get_query_execution`
:param query_execution_id: Id of submitted athena query
:param use_cache: If True, use execution information cache
"""
if use_cache and query_execution_id in self.__query_results:
return self.__query_results[query_execution_id]
response = self.get_conn().get_query_execution(QueryExecutionId=query_execution_id)
if use_cache:
self.__query_results[query_execution_id] = response
return response
|
Get information about a single execution of a query.
.. seealso::
- :external+boto3:py:meth:`Athena.Client.get_query_execution`
:param query_execution_id: Id of submitted athena query
:param use_cache: If True, use execution information cache
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/athena.py
| 131
|
[
"self",
"query_execution_id",
"use_cache"
] |
dict
| true
| 4
| 6.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
readSchemaFromSingleFile
|
async function readSchemaFromSingleFile(schemaPath: string): Promise<LookupResult> {
debug('Reading schema from single file', schemaPath)
const typeError = await ensureType(schemaPath, 'file')
if (typeError) {
return { ok: false, error: typeError }
}
const file = await readFile(schemaPath, { encoding: 'utf-8' })
const schemaTuple: MultipleSchemaTuple = [schemaPath, file]
return {
ok: true,
schema: { schemaPath, schemaRootDir: path.dirname(schemaPath), schemas: [schemaTuple] },
} as const
}
|
Loads the schema, returns null if it is not found
Throws an error if schema is specified explicitly in
any of the available ways (argument, package.json config), but
can not be loaded
@param schemaPathFromArgs
@param schemaPathFromConfig
@param opts
@returns
|
typescript
|
packages/internals/src/cli/getSchema.ts
| 115
|
[
"schemaPath"
] | true
| 2
| 7.44
|
prisma/prisma
| 44,834
|
jsdoc
| true
|
|
repeat
|
def repeat(self, repeats: int | Sequence[int], axis: AxisInt | None = None) -> Self:
"""
Repeat elements of an ExtensionArray.
Returns a new ExtensionArray where each element of the current ExtensionArray
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
ExtensionArray.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
ExtensionArray
Newly created ExtensionArray with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series.
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
ExtensionArray.take : Take arbitrary positions.
Examples
--------
>>> cat = pd.Categorical(["a", "b", "c"])
>>> cat
['a', 'b', 'c']
Categories (3, str): ['a', 'b', 'c']
>>> cat.repeat(2)
['a', 'a', 'b', 'b', 'c', 'c']
Categories (3, str): ['a', 'b', 'c']
>>> cat.repeat([1, 2, 3])
['a', 'b', 'b', 'c', 'c', 'c']
Categories (3, str): ['a', 'b', 'c']
"""
nv.validate_repeat((), {"axis": axis})
ind = np.arange(len(self)).repeat(repeats)
return self.take(ind)
|
Repeat elements of an ExtensionArray.
Returns a new ExtensionArray where each element of the current ExtensionArray
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
ExtensionArray.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
ExtensionArray
Newly created ExtensionArray with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series.
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
ExtensionArray.take : Take arbitrary positions.
Examples
--------
>>> cat = pd.Categorical(["a", "b", "c"])
>>> cat
['a', 'b', 'c']
Categories (3, str): ['a', 'b', 'c']
>>> cat.repeat(2)
['a', 'a', 'b', 'b', 'c', 'c']
Categories (3, str): ['a', 'b', 'c']
>>> cat.repeat([1, 2, 3])
['a', 'b', 'b', 'c', 'c', 'c']
Categories (3, str): ['a', 'b', 'c']
|
python
|
pandas/core/arrays/base.py
| 1,745
|
[
"self",
"repeats",
"axis"
] |
Self
| true
| 1
| 6.96
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
call
|
@Override
public T call() throws Exception {
try {
return initialize();
} finally {
if (execFinally != null) {
execFinally.shutdown();
}
}
}
|
Initiates initialization and returns the result.
@return the result object
@throws Exception if an error occurs
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/BackgroundInitializer.java
| 151
|
[] |
T
| true
| 2
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getBeanDefinitionNames
|
@Override
public String[] getBeanDefinitionNames() {
String[] frozenNames = this.frozenBeanDefinitionNames;
if (frozenNames != null) {
return frozenNames.clone();
}
else {
return StringUtils.toStringArray(this.beanDefinitionNames);
}
}
|
Return the autowire candidate resolver for this BeanFactory (never {@code null}).
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
| 422
|
[] | true
| 2
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
predict
|
def predict(self, X):
"""Predict class labels for samples in `X`.
Parameters
----------
X : {array-like, spare matrix} of shape (n_samples, n_features)
The data matrix for which we want to predict the targets.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Vector or matrix containing the predictions. In binary and
multiclass problems, this is a vector containing `n_samples`. In
a multilabel problem, it returns a matrix of shape
`(n_samples, n_outputs)`.
"""
check_is_fitted(self, attributes=["_label_binarizer"])
if self._label_binarizer.y_type_.startswith("multilabel"):
# Threshold such that the negative label is -1 and positive label
# is 1 to use the inverse transform of the label binarizer fitted
# during fit.
decision = self.decision_function(X)
xp, _ = get_namespace(decision)
scores = 2.0 * xp.astype(decision > 0, decision.dtype) - 1.0
return self._label_binarizer.inverse_transform(scores)
return super().predict(X)
|
Predict class labels for samples in `X`.
Parameters
----------
X : {array-like, spare matrix} of shape (n_samples, n_features)
The data matrix for which we want to predict the targets.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
Vector or matrix containing the predictions. In binary and
multiclass problems, this is a vector containing `n_samples`. In
a multilabel problem, it returns a matrix of shape
`(n_samples, n_outputs)`.
|
python
|
sklearn/linear_model/_ridge.py
| 1,341
|
[
"self",
"X"
] | false
| 2
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
calculateAllFilenames
|
protected List<String> calculateAllFilenames(String basename, Locale locale) {
Map<Locale, List<String>> localeMap = this.cachedFilenames.get(basename);
if (localeMap != null) {
List<String> filenames = localeMap.get(locale);
if (filenames != null) {
return filenames;
}
}
// Filenames for given Locale
List<String> filenames = new ArrayList<>(7);
filenames.addAll(calculateFilenamesForLocale(basename, locale));
// Filenames for default Locale, if any
Locale defaultLocale = getDefaultLocale();
if (defaultLocale != null && !defaultLocale.equals(locale)) {
List<String> fallbackFilenames = calculateFilenamesForLocale(basename, defaultLocale);
for (String fallbackFilename : fallbackFilenames) {
if (!filenames.contains(fallbackFilename)) {
// Entry for fallback locale that isn't already in filenames list.
filenames.add(fallbackFilename);
}
}
}
// Filename for default bundle file
filenames.add(basename);
if (localeMap == null) {
localeMap = new ConcurrentHashMap<>();
Map<Locale, List<String>> existing = this.cachedFilenames.putIfAbsent(basename, localeMap);
if (existing != null) {
localeMap = existing;
}
}
localeMap.put(locale, filenames);
return filenames;
}
|
Calculate all filenames for the given bundle basename and Locale.
Will calculate filenames for the given Locale, the system Locale
(if applicable), and the default file.
@param basename the basename of the bundle
@param locale the locale
@return the List of filenames to check
@see #setFallbackToSystemLocale
@see #calculateFilenamesForLocale
|
java
|
spring-context/src/main/java/org/springframework/context/support/ReloadableResourceBundleMessageSource.java
| 324
|
[
"basename",
"locale"
] | true
| 8
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
findBreakTarget
|
function findBreakTarget(labelText?: string): Label {
if (blockStack) {
if (labelText) {
for (let i = blockStack.length - 1; i >= 0; i--) {
const block = blockStack[i];
if (supportsLabeledBreakOrContinue(block) && block.labelText === labelText) {
return block.breakLabel;
}
else if (supportsUnlabeledBreak(block) && hasImmediateContainingLabeledBlock(labelText, i - 1)) {
return block.breakLabel;
}
}
}
else {
for (let i = blockStack.length - 1; i >= 0; i--) {
const block = blockStack[i];
if (supportsUnlabeledBreak(block)) {
return block.breakLabel;
}
}
}
}
return 0;
}
|
Finds the label that is the target for a `break` statement.
@param labelText An optional name of a containing labeled statement.
|
typescript
|
src/compiler/transformers/generators.ts
| 2,464
|
[
"labelText?"
] | true
| 12
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
list_prefixes
|
def list_prefixes(
self,
bucket_name: str | None = None,
prefix: str | None = None,
delimiter: str | None = None,
page_size: int | None = None,
max_items: int | None = None,
) -> list:
"""
List prefixes in a bucket under prefix.
.. seealso::
- :external+boto3:py:class:`S3.Paginator.ListObjectsV2`
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:param page_size: pagination size
:param max_items: maximum items to return
:return: a list of matched prefixes
"""
prefix = prefix or ""
delimiter = delimiter or ""
config = {
"PageSize": page_size,
"MaxItems": max_items,
}
paginator = self.get_conn().get_paginator("list_objects_v2")
params = {
"Bucket": bucket_name,
"Prefix": prefix,
"Delimiter": delimiter,
"PaginationConfig": config,
}
if self._requester_pays:
params["RequestPayer"] = "requester"
response = paginator.paginate(**params)
prefixes: list[str] = []
for page in response:
if "CommonPrefixes" in page:
prefixes.extend(common_prefix["Prefix"] for common_prefix in page["CommonPrefixes"])
return prefixes
|
List prefixes in a bucket under prefix.
.. seealso::
- :external+boto3:py:class:`S3.Paginator.ListObjectsV2`
:param bucket_name: the name of the bucket
:param prefix: a key prefix
:param delimiter: the delimiter marks key hierarchy.
:param page_size: pagination size
:param max_items: maximum items to return
:return: a list of matched prefixes
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 384
|
[
"self",
"bucket_name",
"prefix",
"delimiter",
"page_size",
"max_items"
] |
list
| true
| 6
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
describeProducers
|
DescribeProducersResult describeProducers(Collection<TopicPartition> partitions, DescribeProducersOptions options);
|
Describe active producer state on a set of topic partitions. Unless a specific broker
is requested through {@link DescribeProducersOptions#brokerId(int)}, this will
query the partition leader to find the producer state.
@param partitions The set of partitions to query
@param options Options to control the method behavior
@return The result
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,688
|
[
"partitions",
"options"
] |
DescribeProducersResult
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
fit
|
def fit(self, X, y=None):
"""Fit the GraphicalLasso model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the covariance estimate.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
# Covariance does not make sense for a single feature
X = validate_data(self, X, ensure_min_features=2, ensure_min_samples=2)
if self.covariance == "precomputed":
emp_cov = X.copy()
self.location_ = np.zeros(X.shape[1])
else:
emp_cov = empirical_covariance(X, assume_centered=self.assume_centered)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso(
emp_cov,
alpha=self.alpha,
cov_init=None,
mode=self.mode,
tol=self.tol,
enet_tol=self.enet_tol,
max_iter=self.max_iter,
verbose=self.verbose,
eps=self.eps,
)
return self
|
Fit the GraphicalLasso model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the covariance estimate.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
|
python
|
sklearn/covariance/_graph_lasso.py
| 550
|
[
"self",
"X",
"y"
] | false
| 5
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
equals
|
@Override
public boolean equals(@Nullable Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
MainClass other = (MainClass) obj;
return this.name.equals(other.name);
}
|
Creates a new {@code MainClass} rather represents the main class with the given
{@code name}. The class is annotated with the annotations with the given
{@code annotationNames}.
@param name the name of the class
@param annotationNames the names of the annotations on the class
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/MainClassFinder.java
| 403
|
[
"obj"
] | true
| 4
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
start
|
start() && {
folly::Promise<lift_unit_t<StorageType>> p;
auto sf = p.getSemiFuture();
std::move(*this).startImpl(
[promise = std::move(p)](Try<StorageType>&& result) mutable {
promise.setTry(std::move(result));
},
folly::CancellationToken{},
FOLLY_ASYNC_STACK_RETURN_ADDRESS());
return sf;
}
|
@returns folly::SemiFuture<T> that will complete with the result.
|
cpp
|
folly/coro/Task.h
| 338
|
[] | true
| 3
| 7.44
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
collect
|
public ShareFetch<K, V> collect(final ShareFetchBuffer fetchBuffer) {
ShareFetch<K, V> fetch = ShareFetch.empty();
int recordsRemaining = shareFetchConfig.maxPollRecords;
try {
while (recordsRemaining > 0) {
final ShareCompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch();
if (nextInLineFetch == null || nextInLineFetch.isConsumed()) {
final ShareCompletedFetch completedFetch = fetchBuffer.peek();
if (completedFetch == null) {
break;
}
if (!completedFetch.isInitialized()) {
try {
fetchBuffer.setNextInLineFetch(initialize(completedFetch));
} catch (Exception e) {
if (fetch.isEmpty()) {
fetchBuffer.poll();
}
throw e;
}
} else {
fetchBuffer.setNextInLineFetch(completedFetch);
}
fetchBuffer.poll();
} else {
final TopicIdPartition tp = nextInLineFetch.partition;
ShareInFlightBatch<K, V> batch = nextInLineFetch.fetchRecords(
deserializers,
recordsRemaining,
shareFetchConfig.checkCrcs);
if (batch.isEmpty()) {
nextInLineFetch.drain();
}
recordsRemaining -= batch.numRecords();
fetch.add(tp, batch);
if (batch.getException() != null) {
throw new ShareFetchException(fetch, batch.getException().cause());
} else if (batch.hasCachedException()) {
break;
}
}
}
} catch (KafkaException e) {
if (fetch.isEmpty()) {
throw e;
}
}
return fetch;
}
|
Return the fetched {@link ConsumerRecord records}.
@param fetchBuffer {@link ShareFetchBuffer} from which to retrieve the {@link ConsumerRecord records}
@return A {@link ShareFetch} for the requested partitions
@throws TopicAuthorizationException If there is TopicAuthorization error in fetchResponse.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollector.java
| 70
|
[
"fetchBuffer"
] | true
| 13
| 7.28
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
lock
|
def lock(self) -> locks._LockProtocol:
"""Get a pseudo lock that does nothing.
Most remote cache implementations don't have an ability to implement
any form of locking, so we provide a no-op pseudo-lock for consistency
with the interface.
Args:
timeout: Optional timeout in seconds (float). Ignored in this
Returns:
A callable that returns a no-op context manager.
"""
@contextmanager
def pseudo_lock(
timeout: float | None = None,
) -> Generator[None, None, None]:
yield
return pseudo_lock
|
Get a pseudo lock that does nothing.
Most remote cache implementations don't have an ability to implement
any form of locking, so we provide a no-op pseudo-lock for consistency
with the interface.
Args:
timeout: Optional timeout in seconds (float). Ignored in this
Returns:
A callable that returns a no-op context manager.
|
python
|
torch/_inductor/runtime/caching/implementations.py
| 377
|
[
"self"
] |
locks._LockProtocol
| true
| 1
| 6.72
|
pytorch/pytorch
| 96,034
|
google
| false
|
close
|
@Override
public void close() {
if (closed == false) {
closed = true;
arrays.adjustBreaker(-SHALLOW_SIZE);
Releasables.close(sortingDigest, mergingDigest);
}
}
|
Similar to the constructor above. The limit for switching from a {@link SortingDigest} to a {@link MergingDigest} implementation
is calculated based on the passed compression factor.
@param compression The compression factor for the MergingDigest
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/HybridDigest.java
| 222
|
[] |
void
| true
| 2
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
getAsLong
|
public static <E extends Throwable> long getAsLong(final FailableLongSupplier<E> supplier) {
try {
return supplier.getAsLong();
} catch (final Throwable t) {
throw rethrow(t);
}
}
|
Invokes a long supplier, and returns the result.
@param supplier The long supplier to invoke.
@param <E> The type of checked exception, which the supplier can throw.
@return The long, which has been created by the supplier
|
java
|
src/main/java/org/apache/commons/lang3/function/Failable.java
| 467
|
[
"supplier"
] | true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_check_object_for_strings
|
def _check_object_for_strings(values: np.ndarray) -> str:
"""
Check if we can use string hashtable instead of object hashtable.
Parameters
----------
values : ndarray
Returns
-------
str
"""
ndtype = values.dtype.name
if ndtype == "object":
# it's cheaper to use a String Hash Table than Object; we infer
# including nulls because that is the only difference between
# StringHashTable and ObjectHashtable
if lib.is_string_array(values, skipna=False):
ndtype = "string"
return ndtype
|
Check if we can use string hashtable instead of object hashtable.
Parameters
----------
values : ndarray
Returns
-------
str
|
python
|
pandas/core/algorithms.py
| 298
|
[
"values"
] |
str
| true
| 3
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
parallel_coordinates
|
def parallel_coordinates(
frame: DataFrame,
class_column: str,
cols: list[str] | None = None,
ax: Axes | None = None,
color: list[str] | tuple[str, ...] | None = None,
use_columns: bool = False,
xticks: list | tuple | None = None,
colormap: Colormap | str | None = None,
axvlines: bool = True,
axvlines_kwds: Mapping[str, Any] | None = None,
sort_labels: bool = False,
**kwargs,
) -> Axes:
"""
Parallel coordinates plotting.
Parameters
----------
frame : DataFrame
The DataFrame to be plotted.
class_column : str
Column name containing class names.
cols : list, optional
A list of column names to use.
ax : matplotlib.axis, optional
Matplotlib axis object.
color : list or tuple, optional
Colors to use for the different classes.
use_columns : bool, optional
If true, columns will be used as xticks.
xticks : list or tuple, optional
A list of values to use for xticks.
colormap : str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines : bool, optional
If true, vertical lines will be added at each xtick.
axvlines_kwds : keywords, optional
Options to be passed to axvline method for vertical lines.
sort_labels : bool, default False
Sort class_column labels, useful when assigning colors.
**kwargs
Options to pass to matplotlib plotting method.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the parallel coordinates plot.
See Also
--------
plotting.andrews_curves : Generate a matplotlib plot for visualizing clusters
of multivariate data.
plotting.radviz : Plot a multidimensional dataset in 2D.
Examples
--------
.. plot::
:context: close-figs
>>> df = pd.read_csv(
... "https://raw.githubusercontent.com/pandas-dev/"
... "pandas/main/pandas/tests/io/data/csv/iris.csv"
... ) # doctest: +SKIP
>>> pd.plotting.parallel_coordinates(
... df, "Name", color=("#556270", "#4ECDC4", "#C7F464")
... ) # doctest: +SKIP
"""
plot_backend = _get_plot_backend("matplotlib")
return plot_backend.parallel_coordinates(
frame=frame,
class_column=class_column,
cols=cols,
ax=ax,
color=color,
use_columns=use_columns,
xticks=xticks,
colormap=colormap,
axvlines=axvlines,
axvlines_kwds=axvlines_kwds,
sort_labels=sort_labels,
**kwargs,
)
|
Parallel coordinates plotting.
Parameters
----------
frame : DataFrame
The DataFrame to be plotted.
class_column : str
Column name containing class names.
cols : list, optional
A list of column names to use.
ax : matplotlib.axis, optional
Matplotlib axis object.
color : list or tuple, optional
Colors to use for the different classes.
use_columns : bool, optional
If true, columns will be used as xticks.
xticks : list or tuple, optional
A list of values to use for xticks.
colormap : str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines : bool, optional
If true, vertical lines will be added at each xtick.
axvlines_kwds : keywords, optional
Options to be passed to axvline method for vertical lines.
sort_labels : bool, default False
Sort class_column labels, useful when assigning colors.
**kwargs
Options to pass to matplotlib plotting method.
Returns
-------
matplotlib.axes.Axes
The matplotlib axes containing the parallel coordinates plot.
See Also
--------
plotting.andrews_curves : Generate a matplotlib plot for visualizing clusters
of multivariate data.
plotting.radviz : Plot a multidimensional dataset in 2D.
Examples
--------
.. plot::
:context: close-figs
>>> df = pd.read_csv(
... "https://raw.githubusercontent.com/pandas-dev/"
... "pandas/main/pandas/tests/io/data/csv/iris.csv"
... ) # doctest: +SKIP
>>> pd.plotting.parallel_coordinates(
... df, "Name", color=("#556270", "#4ECDC4", "#C7F464")
... ) # doctest: +SKIP
|
python
|
pandas/plotting/_misc.py
| 500
|
[
"frame",
"class_column",
"cols",
"ax",
"color",
"use_columns",
"xticks",
"colormap",
"axvlines",
"axvlines_kwds",
"sort_labels"
] |
Axes
| true
| 1
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
fill_missing_names
|
def fill_missing_names(names: Sequence[Hashable | None]) -> list[Hashable]:
"""
If a name is missing then replace it by level_n, where n is the count
Parameters
----------
names : list-like
list of column names or None values.
Returns
-------
list
list of column names with the None values replaced.
"""
return [f"level_{i}" if name is None else name for i, name in enumerate(names)]
|
If a name is missing then replace it by level_n, where n is the count
Parameters
----------
names : list-like
list of column names or None values.
Returns
-------
list
list of column names with the None values replaced.
|
python
|
pandas/core/common.py
| 641
|
[
"names"
] |
list[Hashable]
| true
| 2
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
handleAcknowledgeTimedOut
|
void handleAcknowledgeTimedOut(TopicIdPartition tip) {
Acknowledgements acks = incompleteAcknowledgements.get(tip);
if (acks != null) {
acks.complete(Errors.REQUEST_TIMED_OUT.exception());
// We do not know whether this is a renew ack, but handling the error as if it were, will ensure
// that we do not leave dangling acknowledgements
resultHandler.complete(tip, acks, requestType, true, Optional.empty());
}
}
|
Sets the error code for the acknowledgements which were timed out
after some retries.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 1,335
|
[
"tip"
] |
void
| true
| 2
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
round
|
def round(self, decimals: int = 0, *args, **kwargs) -> Series:
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int, default 0
Number of decimal places to round to. If decimals is negative,
it specifies the number of positions to the left of the decimal point.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Series
Rounded values of the Series.
See Also
--------
numpy.around : Round values of an np.array.
DataFrame.round : Round values of a DataFrame.
Series.dt.round : Round values of data to the specified freq.
Notes
-----
For values exactly halfway between rounded decimal values, pandas rounds
to the nearest even value (e.g. -0.5 and 0.5 round to 0.0, 1.5 and 2.5
round to 2.0, etc.).
Examples
--------
>>> s = pd.Series([-0.5, 0.1, 2.5, 1.3, 2.7])
>>> s.round()
0 -0.0
1 0.0
2 2.0
3 1.0
4 3.0
dtype: float64
"""
nv.validate_round(args, kwargs)
if self.dtype == "object":
raise TypeError("Expected numeric dtype, got object instead.")
new_mgr = self._mgr.round(decimals=decimals)
return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(
self, method="round"
)
|
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int, default 0
Number of decimal places to round to. If decimals is negative,
it specifies the number of positions to the left of the decimal point.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
Series
Rounded values of the Series.
See Also
--------
numpy.around : Round values of an np.array.
DataFrame.round : Round values of a DataFrame.
Series.dt.round : Round values of data to the specified freq.
Notes
-----
For values exactly halfway between rounded decimal values, pandas rounds
to the nearest even value (e.g. -0.5 and 0.5 round to 0.0, 1.5 and 2.5
round to 2.0, etc.).
Examples
--------
>>> s = pd.Series([-0.5, 0.1, 2.5, 1.3, 2.7])
>>> s.round()
0 -0.0
1 0.0
2 2.0
3 1.0
4 3.0
dtype: float64
|
python
|
pandas/core/series.py
| 2,540
|
[
"self",
"decimals"
] |
Series
| true
| 2
| 8.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
run
|
public final ExitStatus run(String... args) throws Exception {
String[] argsToUse = args.clone();
for (int i = 0; i < argsToUse.length; i++) {
if ("-cp".equals(argsToUse[i])) {
argsToUse[i] = "--cp";
}
argsToUse[i] = this.argumentProcessor.apply(argsToUse[i]);
}
OptionSet options = getParser().parse(argsToUse);
return run(options);
}
|
Create a new {@link OptionHandler} instance with an argument processor.
@param argumentProcessor strategy that can be used to manipulate arguments before
they are used.
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/options/OptionHandler.java
| 97
|
[] |
ExitStatus
| true
| 3
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
build
|
@Override
public ImmutableSet<E> build() {
requireNonNull(impl); // see the comment on the field
forceCopy = true;
impl = impl.review();
return impl.build();
}
|
Adds each element of {@code elements} to the {@code ImmutableSet}, ignoring duplicate
elements (only the first duplicate element is added).
@param elements the elements to add
@return this {@code Builder} object
@throws NullPointerException if {@code elements} is null or contains a null element
|
java
|
guava/src/com/google/common/collect/ImmutableSet.java
| 573
|
[] | true
| 1
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
postProcessApplicationContext
|
protected void postProcessApplicationContext(ConfigurableApplicationContext context) {
if (this.beanNameGenerator != null) {
context.getBeanFactory()
.registerSingleton(AnnotationConfigUtils.CONFIGURATION_BEAN_NAME_GENERATOR, this.beanNameGenerator);
}
if (this.resourceLoader != null) {
if (context instanceof GenericApplicationContext genericApplicationContext) {
genericApplicationContext.setResourceLoader(this.resourceLoader);
}
if (context instanceof DefaultResourceLoader defaultResourceLoader) {
defaultResourceLoader.setClassLoader(this.resourceLoader.getClassLoader());
}
}
if (this.addConversionService) {
context.getBeanFactory().setConversionService(context.getEnvironment().getConversionService());
}
}
|
Apply any relevant post-processing to the {@link ApplicationContext}. Subclasses
can apply additional processing as required.
@param context the application context
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 591
|
[
"context"
] |
void
| true
| 6
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
setSourceMapSource
|
function setSourceMapSource(source: SourceMapSource) {
if (sourceMapsDisabled) {
return;
}
sourceMapSource = source;
if (source === mostRecentlyAddedSourceMapSource) {
// Fast path for when the new source map is the most recently added, in which case
// we use its captured index without going through the source map generator.
sourceMapSourceIndex = mostRecentlyAddedSourceMapSourceIndex;
return;
}
if (isJsonSourceMapSource(source)) {
return;
}
sourceMapSourceIndex = sourceMapGenerator!.addSource(source.fileName);
if (printerOptions.inlineSources) {
sourceMapGenerator!.setSourceContent(sourceMapSourceIndex, source.text);
}
mostRecentlyAddedSourceMapSource = source;
mostRecentlyAddedSourceMapSourceIndex = sourceMapSourceIndex;
}
|
Emits a token of a node with possible leading and trailing source maps.
@param node The node containing the token.
@param token The token to emit.
@param tokenStartPos The start pos of the token.
@param emitCallback The callback used to emit the token.
|
typescript
|
src/compiler/emitter.ts
| 6,273
|
[
"source"
] | false
| 5
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
identical
|
def identical(self, other) -> bool:
"""
Similar to equals, but checks that object attributes and types are also equal.
Parameters
----------
other : Index
The Index object you want to compare with the current Index object.
Returns
-------
bool
If two Index objects have equal elements and same type True,
otherwise False.
See Also
--------
Index.equals: Determine if two Index object are equal.
Index.has_duplicates: Check if the Index has duplicate values.
Index.is_unique: Return if the index has unique values.
Examples
--------
>>> idx1 = pd.Index(["1", "2", "3"])
>>> idx2 = pd.Index(["1", "2", "3"])
>>> idx2.identical(idx1)
True
>>> idx1 = pd.Index(["1", "2", "3"], name="A")
>>> idx2 = pd.Index(["1", "2", "3"], name="B")
>>> idx2.identical(idx1)
False
"""
return (
self.equals(other)
and all(
getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables
)
and type(self) == type(other)
and self.dtype == other.dtype
)
|
Similar to equals, but checks that object attributes and types are also equal.
Parameters
----------
other : Index
The Index object you want to compare with the current Index object.
Returns
-------
bool
If two Index objects have equal elements and same type True,
otherwise False.
See Also
--------
Index.equals: Determine if two Index object are equal.
Index.has_duplicates: Check if the Index has duplicate values.
Index.is_unique: Return if the index has unique values.
Examples
--------
>>> idx1 = pd.Index(["1", "2", "3"])
>>> idx2 = pd.Index(["1", "2", "3"])
>>> idx2.identical(idx1)
True
>>> idx1 = pd.Index(["1", "2", "3"], name="A")
>>> idx2 = pd.Index(["1", "2", "3"], name="B")
>>> idx2.identical(idx1)
False
|
python
|
pandas/core/indexes/base.py
| 5,596
|
[
"self",
"other"
] |
bool
| true
| 4
| 8.32
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
ofNonNull
|
public static <L, R> Pair<L, R> ofNonNull(final L left, final R right) {
return ImmutablePair.ofNonNull(left, right);
}
|
Creates an immutable pair of two non-null objects inferring the generic types.
@param <L> the left element type.
@param <R> the right element type.
@param left the left element, may not be null.
@param right the right element, may not be null.
@return an immutable pair formed from the two parameters, not null.
@throws NullPointerException if any input is null.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/tuple/Pair.java
| 107
|
[
"left",
"right"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
write
|
public static long write(DataOutputStream out,
byte magic,
long timestamp,
ByteBuffer key,
ByteBuffer value,
CompressionType compressionType,
TimestampType timestampType) throws IOException {
byte attributes = computeAttributes(magic, compressionType, timestampType);
long crc = computeChecksum(magic, attributes, timestamp, key, value);
write(out, magic, crc, attributes, timestamp, key, value);
return crc;
}
|
Write the record data with the given compression type and return the computed crc.
@param out The output stream to write to
@param magic The magic value to be used
@param timestamp The timestamp of the record
@param key The record key
@param value The record value
@param compressionType The compression type
@param timestampType The timestamp type
@return the computed CRC for this record.
@throws IOException for any IO errors writing to the output stream.
|
java
|
clients/src/main/java/org/apache/kafka/common/record/LegacyRecord.java
| 410
|
[
"out",
"magic",
"timestamp",
"key",
"value",
"compressionType",
"timestampType"
] | true
| 1
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
size
|
long size() {
return MINIMUM_SIZE + this.commentLength;
}
|
Return the size of this record.
@return the record size
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipEndOfCentralDirectoryRecord.java
| 75
|
[] | true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
createInstance
|
@Override
@SuppressWarnings("unchecked")
protected Map<Object, Object> createInstance() {
if (this.sourceMap == null) {
throw new IllegalArgumentException("'sourceMap' is required");
}
Map<Object, Object> result = null;
if (this.targetMapClass != null) {
result = BeanUtils.instantiateClass(this.targetMapClass);
}
else {
result = CollectionUtils.newLinkedHashMap(this.sourceMap.size());
}
Class<?> keyType = null;
Class<?> valueType = null;
if (this.targetMapClass != null) {
ResolvableType mapType = ResolvableType.forClass(this.targetMapClass).asMap();
keyType = mapType.resolveGeneric(0);
valueType = mapType.resolveGeneric(1);
}
if (keyType != null || valueType != null) {
TypeConverter converter = getBeanTypeConverter();
for (Map.Entry<?, ?> entry : this.sourceMap.entrySet()) {
Object convertedKey = converter.convertIfNecessary(entry.getKey(), keyType);
Object convertedValue = converter.convertIfNecessary(entry.getValue(), valueType);
result.put(convertedKey, convertedValue);
}
}
else {
result.putAll(this.sourceMap);
}
return result;
}
|
Set the class to use for the target Map. Can be populated with a fully
qualified class name when defined in a Spring application context.
<p>Default is a linked HashMap, keeping the registration order.
@see java.util.LinkedHashMap
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/MapFactoryBean.java
| 76
|
[] | true
| 6
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
swap
|
public static void swap(final boolean[] array, int offset1, int offset2, int len) {
if (isEmpty(array) || offset1 >= array.length || offset2 >= array.length) {
return;
}
offset1 = max0(offset1);
offset2 = max0(offset2);
len = Math.min(Math.min(len, array.length - offset1), array.length - offset2);
for (int i = 0; i < len; i++, offset1++, offset2++) {
final boolean aux = array[offset1];
array[offset1] = array[offset2];
array[offset2] = aux;
}
}
|
Swaps a series of elements in the given boolean array.
<p>This method does nothing for a {@code null} or empty input array or
for overflow indices. Negative indices are promoted to 0(zero). If any
of the sub-arrays to swap falls outside of the given array, then the
swap is stopped at the end of the array and as many as possible elements
are swapped.</p>
Examples:
<ul>
<li>ArrayUtils.swap([true, false, true, false], 0, 2, 1) -> [true, false, true, false]</li>
<li>ArrayUtils.swap([true, false, true, false], 0, 0, 1) -> [true, false, true, false]</li>
<li>ArrayUtils.swap([true, false, true, false], 0, 2, 2) -> [true, false, true, false]</li>
<li>ArrayUtils.swap([true, false, true, false], -3, 2, 2) -> [true, false, true, false]</li>
<li>ArrayUtils.swap([true, false, true, false], 0, 3, 3) -> [false, false, true, true]</li>
</ul>
@param array the array to swap, may be {@code null}.
@param offset1 the index of the first element in the series to swap.
@param offset2 the index of the second element in the series to swap.
@param len the number of elements to swap starting with the given indices.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 8,051
|
[
"array",
"offset1",
"offset2",
"len"
] |
void
| true
| 5
| 8.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toZonedDateTime
|
public static ZonedDateTime toZonedDateTime(final Calendar calendar) {
return ZonedDateTime.ofInstant(calendar.toInstant(), toZoneId(calendar));
}
|
Converts a Calendar to a ZonedDateTime.
@param calendar the Calendar to convert.
@return a ZonedDateTime.
@since 3.17.0
|
java
|
src/main/java/org/apache/commons/lang3/time/CalendarUtils.java
| 96
|
[
"calendar"
] |
ZonedDateTime
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
orElseGet
|
public T orElseGet(Supplier<? extends T> other) {
return (this.value != null) ? this.value : other.get();
}
|
Return the object that was bound, or the result of invoking {@code other} if no
value has been bound.
@param other a {@link Supplier} of the value to be returned if there is no bound
value
@return the value, if bound, otherwise the supplied {@code other}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/BindResult.java
| 115
|
[
"other"
] |
T
| true
| 2
| 8.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
unzip
|
function unzip(array) {
if (!(array && array.length)) {
return [];
}
var length = 0;
array = arrayFilter(array, function(group) {
if (isArrayLikeObject(group)) {
length = nativeMax(group.length, length);
return true;
}
});
return baseTimes(length, function(index) {
return arrayMap(array, baseProperty(index));
});
}
|
This method is like `_.zip` except that it accepts an array of grouped
elements and creates an array regrouping the elements to their pre-zip
configuration.
@static
@memberOf _
@since 1.2.0
@category Array
@param {Array} array The array of grouped elements to process.
@returns {Array} Returns the new array of regrouped elements.
@example
var zipped = _.zip(['a', 'b'], [1, 2], [true, false]);
// => [['a', 1, true], ['b', 2, false]]
_.unzip(zipped);
// => [['a', 'b'], [1, 2], [true, false]]
|
javascript
|
lodash.js
| 8,568
|
[
"array"
] | false
| 4
| 7.68
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
forEach
|
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
checkNotNull(action);
for (Node<K, V> node = firstInKeyInsertionOrder;
node != null;
node = node.nextInKeyInsertionOrder) {
action.accept(node.key, node.value);
}
}
|
Returns {@code true} if this BiMap contains an entry whose value is equal to {@code value} (or,
equivalently, if this inverse view contains a key that is equal to {@code value}).
<p>Due to the property that values in a BiMap are unique, this will tend to execute in
faster-than-linear time.
@param value the object to search for in the values of this BiMap
@return true if a mapping exists from a key to the specified value
|
java
|
guava/src/com/google/common/collect/HashBiMap.java
| 588
|
[
"action"
] |
void
| true
| 2
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
get_names_flat
|
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Input datatype
must have fields otherwise error is raised.
Nested structure are flattened beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> import numpy as np
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None
False
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype)
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names is not None:
listnames.extend(get_names_flat(current))
return tuple(listnames)
|
Returns the field names of the input datatype as a tuple. Input datatype
must have fields otherwise error is raised.
Nested structure are flattened beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> import numpy as np
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None
False
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype)
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
|
python
|
numpy/lib/recfunctions.py
| 136
|
[
"adtype"
] | false
| 3
| 7.36
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
build_memory_profile
|
def build_memory_profile(
graph: fx.Graph,
is_releasable: Callable[[fx.Node], bool],
size_of: Callable[[int | torch.SymInt], int] | None = None,
) -> list[int]:
"""
Function to estimate the memory profile of an input FX graph.
Args:
- graph (fx.Graph): The input FX graph for which the memory profile
is to be estimated.
- is_releasable (Callable[[fx.Node], bool]): A function that
determines if a node's memory can be released (e.g. primal nodes
cannot be released).
- size_of (Callable[[int | torch.SymInt], int]): A function that converts
byte counts (possibly symbolic) to concrete integers.
Returns:
- List[int]: A list representing the memory profile over the execution
of the graph, where each entry corresponds to the memory usage at
a particular point in the execution.
"""
size_of = size_of or _size_of_default
nodes = list(graph.nodes)
alias_info = GraphAliasTracker(nodes)
# Build memory profile
current_memory = 0
for node in itertools.chain(
graph.find_nodes(op="placeholder"), graph.find_nodes(op="get_attr")
):
for storage_key in alias_info.get_fresh_allocations(node):
if device_filter(storage_key.device):
current_memory += size_of(storage_key.storage.nbytes())
memory_profile = [current_memory]
for node in nodes:
if node.op in ("placeholder", "get_attr", "output"):
continue
# Process allocations
for storage_key in alias_info.get_fresh_allocations(node):
if device_filter(storage_key.device):
current_memory += size_of(storage_key.storage.nbytes())
memory_profile.append(current_memory)
# Process deallocations
# pyrefly: ignore [bad-assignment]
for storage_key in alias_info.get_storages_last_used(node):
allocator = alias_info.storage_to_allocator[storage_key]
if is_releasable(allocator):
if device_filter(storage_key.device):
current_memory -= size_of(storage_key.storage.nbytes())
memory_profile.append(current_memory)
return memory_profile
|
Function to estimate the memory profile of an input FX graph.
Args:
- graph (fx.Graph): The input FX graph for which the memory profile
is to be estimated.
- is_releasable (Callable[[fx.Node], bool]): A function that
determines if a node's memory can be released (e.g. primal nodes
cannot be released).
- size_of (Callable[[int | torch.SymInt], int]): A function that converts
byte counts (possibly symbolic) to concrete integers.
Returns:
- List[int]: A list representing the memory profile over the execution
of the graph, where each entry corresponds to the memory usage at
a particular point in the execution.
|
python
|
torch/_inductor/fx_passes/memory_estimator.py
| 154
|
[
"graph",
"is_releasable",
"size_of"
] |
list[int]
| true
| 12
| 8.08
|
pytorch/pytorch
| 96,034
|
google
| false
|
byteSize
|
@Override
public int byteSize() {
if (mergingDigest != null) {
return mergingDigest.byteSize();
}
return sortingDigest.byteSize();
}
|
Similar to the constructor above. The limit for switching from a {@link SortingDigest} to a {@link MergingDigest} implementation
is calculated based on the passed compression factor.
@param compression The compression factor for the MergingDigest
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/HybridDigest.java
| 214
|
[] | true
| 2
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
failableStream
|
@SafeVarargs // Creating a stream from an array is safe
public static <T> FailableStream<T> failableStream(final T... values) {
return failableStream(of(values));
}
|
Shorthand for {@code Streams.failableStream(Streams.of(arrayValues))}.
@param <T> the type of stream elements.
@param values the elements of the new stream, may be {@code null}.
@return the new FailableStream on {@code values} or an empty stream.
@since 3.14.0
|
java
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
| 589
|
[] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
polycompanion
|
def polycompanion(c):
"""
Return the companion matrix of c.
The companion matrix for power series cannot be made symmetric by
scaling the basis, so this function differs from those for the
orthogonal polynomials.
Parameters
----------
c : array_like
1-D array of polynomial coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Companion matrix of dimensions (deg, deg).
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c = (1, 2, 3)
>>> P.polycompanion(c)
array([[ 0. , -0.33333333],
[ 1. , -0.66666667]])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0] / c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
bot = mat.reshape(-1)[n::n + 1]
bot[...] = 1
mat[:, -1] -= c[:-1] / c[-1]
return mat
|
Return the companion matrix of c.
The companion matrix for power series cannot be made symmetric by
scaling the basis, so this function differs from those for the
orthogonal polynomials.
Parameters
----------
c : array_like
1-D array of polynomial coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Companion matrix of dimensions (deg, deg).
Examples
--------
>>> from numpy.polynomial import polynomial as P
>>> c = (1, 2, 3)
>>> P.polycompanion(c)
array([[ 0. , -0.33333333],
[ 1. , -0.66666667]])
|
python
|
numpy/polynomial/polynomial.py
| 1,448
|
[
"c"
] | false
| 3
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
decrementAndGet
|
public int decrementAndGet() {
value--;
return value;
}
|
Decrements this instance's value by 1; this method returns the value associated with the instance
immediately after the decrement operation. This method is not thread safe.
@return the value associated with the instance after it is decremented.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableInt.java
| 157
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
setContextValue
|
@Override
public ContextedException setContextValue(final String label, final Object value) {
exceptionContext.setContextValue(label, value);
return this;
}
|
Sets information helpful to a developer in diagnosing and correcting the problem.
For the information to be meaningful, the value passed should have a reasonable
toString() implementation.
Any existing values with the same labels are removed before the new one is added.
<p>
Note: This exception is only serializable if the object added as value is serializable.
</p>
@param label a textual label associated with information, {@code null} not recommended
@param value information needed to understand exception, may be {@code null}
@return {@code this}, for method chaining, not {@code null}
|
java
|
src/main/java/org/apache/commons/lang3/exception/ContextedException.java
| 248
|
[
"label",
"value"
] |
ContextedException
| true
| 1
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
option_context
|
def option_context(*args) -> Generator[None]:
"""
Context manager to temporarily set options in a ``with`` statement.
This method allows users to set one or more pandas options temporarily
within a controlled block. The previous options' values are restored
once the block is exited. This is useful when making temporary adjustments
to pandas' behavior without affecting the global state.
Parameters
----------
*args : str | object | dict
An even amount of arguments provided in pairs which will be
interpreted as (pattern, value) pairs. Alternatively, a single
dictionary of {pattern: value} may be provided.
Returns
-------
None
No return value.
Yields
------
None
No yield value.
See Also
--------
get_option : Retrieve the value of the specified option.
set_option : Set the value of the specified option.
reset_option : Reset one or more options to their default value.
describe_option : Print the description for one or more registered options.
Notes
-----
For all available options, please view the :ref:`User Guide <options.available>`
or use ``pandas.describe_option()``.
Examples
--------
>>> from pandas import option_context
>>> with option_context("display.max_rows", 10, "display.max_columns", 5):
... pass
>>> with option_context({"display.max_rows": 10, "display.max_columns": 5}):
... pass
"""
if len(args) == 1 and isinstance(args[0], dict):
args = tuple(kv for item in args[0].items() for kv in item)
if len(args) % 2 != 0 or len(args) < 2:
raise ValueError(
"Provide an even amount of arguments as "
"option_context(pat, val, pat, val...)."
)
ops = tuple(zip(args[::2], args[1::2], strict=True))
undo: tuple[tuple[Any, Any], ...] = ()
try:
undo = tuple((pat, get_option(pat)) for pat, val in ops)
for pat, val in ops:
set_option(pat, val)
yield
finally:
for pat, val in undo:
set_option(pat, val)
|
Context manager to temporarily set options in a ``with`` statement.
This method allows users to set one or more pandas options temporarily
within a controlled block. The previous options' values are restored
once the block is exited. This is useful when making temporary adjustments
to pandas' behavior without affecting the global state.
Parameters
----------
*args : str | object | dict
An even amount of arguments provided in pairs which will be
interpreted as (pattern, value) pairs. Alternatively, a single
dictionary of {pattern: value} may be provided.
Returns
-------
None
No return value.
Yields
------
None
No yield value.
See Also
--------
get_option : Retrieve the value of the specified option.
set_option : Set the value of the specified option.
reset_option : Reset one or more options to their default value.
describe_option : Print the description for one or more registered options.
Notes
-----
For all available options, please view the :ref:`User Guide <options.available>`
or use ``pandas.describe_option()``.
Examples
--------
>>> from pandas import option_context
>>> with option_context("display.max_rows", 10, "display.max_columns", 5):
... pass
>>> with option_context({"display.max_rows": 10, "display.max_columns": 5}):
... pass
|
python
|
pandas/_config/config.py
| 454
|
[] |
Generator[None]
| true
| 7
| 8.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
stream
|
public static <O> FailableStream<O> stream(final Collection<O> stream) {
return stream(stream.stream());
}
|
Converts the given {@link Collection} into a {@link FailableStream}.
This is basically a simplified, reduced version of the {@link Stream}
class, with the same underlying element stream, except that failable
objects, like {@link FailablePredicate}, {@link FailableFunction}, or
{@link FailableConsumer} may be applied, instead of
{@link Predicate}, {@link Function}, or {@link Consumer}. The idea is
to rewrite a code snippet like this:
<pre>{@code
final List<O> list;
final Method m;
final Function<O,String> mapper = (o) -> {
try {
return (String) m.invoke(o);
} catch (Throwable t) {
throw Functions.rethrow(t);
}
};
final List<String> strList = list.stream()
.map(mapper).collect(Collectors.toList());
}</pre>
as follows:
<pre>{@code
final List<O> list;
final Method m;
final List<String> strList = Functions.stream(list.stream())
.map((o) -> (String) m.invoke(o)).collect(Collectors.toList());
}</pre>
While the second version may not be <em>quite</em> as
efficient (because it depends on the creation of additional,
intermediate objects, of type FailableStream), it is much more
concise, and readable, and meets the spirit of Lambdas better
than the first version.
@param <O> The streams element type.
@param stream The stream, which is being converted.
@return The {@link FailableStream}, which has been created by
converting the stream.
|
java
|
src/main/java/org/apache/commons/lang3/Streams.java
| 493
|
[
"stream"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
join
|
def join(
self,
other: Index,
*,
how: JoinHow = "left",
level: Level | None = None,
return_indexers: bool = False,
sort: bool = False,
) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]:
"""
Compute join_index and indexers to conform data structures to the new index.
Parameters
----------
other : Index
The other index on which join is performed.
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
It is either the integer position or the name of the level.
return_indexers : bool, default False
Whether to return the indexers or not for both the index objects.
sort : bool, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword).
Returns
-------
join_index, (left_indexer, right_indexer)
The new index.
See Also
--------
DataFrame.join : Join columns with `other` DataFrame either on index
or on a key.
DataFrame.merge : Merge DataFrame or named Series objects with a
database-style join.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3])
>>> idx2 = pd.Index([4, 5, 6])
>>> idx1.join(idx2, how="outer")
Index([1, 2, 3, 4, 5, 6], dtype='int64')
>>> idx1.join(other=idx2, how="outer", return_indexers=True)
(Index([1, 2, 3, 4, 5, 6], dtype='int64'),
array([ 0, 1, 2, -1, -1, -1]), array([-1, -1, -1, 0, 1, 2]))
"""
if not isinstance(other, Index):
warnings.warn(
f"Passing {type(other).__name__} to {type(self).__name__}.join "
"is deprecated and will raise in a future version. "
"Pass an Index instead.",
Pandas4Warning,
stacklevel=find_stack_level(),
)
other = ensure_index(other)
sort = sort or how == "outer"
if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex):
if (self.tz is None) ^ (other.tz is None):
# Raise instead of casting to object below.
raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex")
if not self._is_multi and not other._is_multi:
# We have specific handling for MultiIndex below
pself, pother = self._maybe_downcast_for_indexing(other)
if pself is not self or pother is not other:
return pself.join(
pother, how=how, level=level, return_indexers=True, sort=sort
)
# try to figure out the join level
# GH3662
if level is None and (self._is_multi or other._is_multi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how)
# join on the level
if level is not None and (self._is_multi or other._is_multi):
return self._join_level(other, level, how=how)
if len(self) == 0 or len(other) == 0:
try:
return self._join_empty(other, how, sort)
except TypeError:
# object dtype; non-comparable objects
pass
if self.dtype != other.dtype:
dtype = self._find_common_type_compat(other)
this = self.astype(dtype, copy=False)
other = other.astype(dtype, copy=False)
return this.join(other, how=how, return_indexers=True)
elif (
isinstance(self, ABCCategoricalIndex)
and isinstance(other, ABCCategoricalIndex)
and not self.ordered
and not self.categories.equals(other.categories)
):
# dtypes are "equal" but categories are in different order
other = Index(other._values.reorder_categories(self.categories))
_validate_join_method(how)
if (
self.is_monotonic_increasing
and other.is_monotonic_increasing
and self._can_use_libjoin
and other._can_use_libjoin
and (self.is_unique or other.is_unique)
):
try:
return self._join_monotonic(other, how=how)
except TypeError:
# object dtype; non-comparable objects
pass
elif not self.is_unique or not other.is_unique:
return self._join_non_unique(other, how=how, sort=sort)
return self._join_via_get_indexer(other, how, sort)
|
Compute join_index and indexers to conform data structures to the new index.
Parameters
----------
other : Index
The other index on which join is performed.
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
It is either the integer position or the name of the level.
return_indexers : bool, default False
Whether to return the indexers or not for both the index objects.
sort : bool, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword).
Returns
-------
join_index, (left_indexer, right_indexer)
The new index.
See Also
--------
DataFrame.join : Join columns with `other` DataFrame either on index
or on a key.
DataFrame.merge : Merge DataFrame or named Series objects with a
database-style join.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3])
>>> idx2 = pd.Index([4, 5, 6])
>>> idx1.join(idx2, how="outer")
Index([1, 2, 3, 4, 5, 6], dtype='int64')
>>> idx1.join(other=idx2, how="outer", return_indexers=True)
(Index([1, 2, 3, 4, 5, 6], dtype='int64'),
array([ 0, 1, 2, -1, -1, -1]), array([-1, -1, -1, 0, 1, 2]))
|
python
|
pandas/core/indexes/base.py
| 4,386
|
[
"self",
"other",
"how",
"level",
"return_indexers",
"sort"
] |
Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]
| true
| 33
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getAndDecrement
|
public byte getAndDecrement() {
final byte last = value;
value--;
return last;
}
|
Decrements this instance's value by 1; this method returns the value associated with the instance
immediately prior to the decrement operation. This method is not thread safe.
@return the value associated with the instance before it was decremented.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableByte.java
| 245
|
[] | true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
equals
|
@Override
public boolean equals(final Object obj) {
if (!(obj instanceof FastDatePrinter)) {
return false;
}
final FastDatePrinter other = (FastDatePrinter) obj;
return pattern.equals(other.pattern)
&& timeZone.equals(other.timeZone)
&& locale.equals(other.locale);
}
|
Compares two objects for equality.
@param obj the object to compare to.
@return {@code true} if equal.
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDatePrinter.java
| 1,107
|
[
"obj"
] | true
| 4
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
join
|
public static String join(String separator, long... array) {
checkNotNull(separator);
if (array.length == 0) {
return "";
}
// For pre-sizing a builder, just get the right order of magnitude
StringBuilder builder = new StringBuilder(array.length * 10);
builder.append(array[0]);
for (int i = 1; i < array.length; i++) {
builder.append(separator).append(array[i]);
}
return builder.toString();
}
|
Returns a string containing the supplied {@code long} values separated by {@code separator}.
For example, {@code join("-", 1L, 2L, 3L)} returns the string {@code "1-2-3"}.
@param separator the text that should appear between consecutive values in the resulting string
(but not at the start or end)
@param array an array of {@code long} values, possibly empty
|
java
|
android/guava/src/com/google/common/primitives/Longs.java
| 507
|
[
"separator"
] |
String
| true
| 3
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
generateBeanTypeCode
|
private CodeBlock generateBeanTypeCode(ResolvableType beanType) {
if (!beanType.hasGenerics()) {
return valueCodeGenerator.generateCode(ClassUtils.getUserClass(beanType.toClass()));
}
return valueCodeGenerator.generateCode(beanType);
}
|
Extract the target class of a public {@link FactoryBean} based on its
constructor. If the implementation does not resolve the target class
because it itself uses a generic, attempt to extract it from the bean type.
@param factoryBeanType the factory bean type
@param beanType the bean type
@return the target class to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/DefaultBeanRegistrationCodeFragments.java
| 148
|
[
"beanType"
] |
CodeBlock
| true
| 2
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
unzipWith
|
function unzipWith(array, iteratee) {
if (!(array && array.length)) {
return [];
}
var result = unzip(array);
if (iteratee == null) {
return result;
}
return arrayMap(result, function(group) {
return apply(iteratee, undefined, group);
});
}
|
This method is like `_.unzip` except that it accepts `iteratee` to specify
how regrouped values should be combined. The iteratee is invoked with the
elements of each group: (...group).
@static
@memberOf _
@since 3.8.0
@category Array
@param {Array} array The array of grouped elements to process.
@param {Function} [iteratee=_.identity] The function to combine
regrouped values.
@returns {Array} Returns the new array of regrouped elements.
@example
var zipped = _.zip([1, 2], [10, 20], [100, 200]);
// => [[1, 10, 100], [2, 20, 200]]
_.unzipWith(zipped, _.add);
// => [3, 30, 300]
|
javascript
|
lodash.js
| 8,605
|
[
"array",
"iteratee"
] | false
| 4
| 7.68
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
of
|
static RegisteredBean of(ConfigurableListableBeanFactory beanFactory, String beanName, RootBeanDefinition mbd) {
return new RegisteredBean(beanFactory, () -> beanName, false, () -> mbd, null);
}
|
Create a new {@link RegisteredBean} instance for a regular bean.
@param beanFactory the source bean factory
@param beanName the bean name
@param mbd the pre-determined merged bean definition
@return a new {@link RegisteredBean} instance
@since 6.0.7
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/RegisteredBean.java
| 98
|
[
"beanFactory",
"beanName",
"mbd"
] |
RegisteredBean
| true
| 1
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
task_state
|
def task_state(args) -> None:
"""
Return the state of a TaskInstance at the command line.
>>> airflow tasks state tutorial sleep 2015-01-01
success
"""
if not (dag := SerializedDagModel.get_dag(args.dag_id)):
raise SystemExit(f"Can not find dag {args.dag_id!r}")
task = dag.get_task(task_id=args.task_id)
ti, _ = _get_ti(task, args.map_index, logical_date_or_run_id=args.logical_date_or_run_id)
print(ti.state)
|
Return the state of a TaskInstance at the command line.
>>> airflow tasks state tutorial sleep 2015-01-01
success
|
python
|
airflow-core/src/airflow/cli/commands/task_command.py
| 263
|
[
"args"
] |
None
| true
| 2
| 7.12
|
apache/airflow
| 43,597
|
unknown
| false
|
_parse_content_type_header
|
def _parse_content_type_header(header):
"""Returns content type and parameters from given header
:param header: string
:return: tuple containing content type and dictionary of
parameters
"""
tokens = header.split(";")
content_type, params = tokens[0].strip(), tokens[1:]
params_dict = {}
items_to_strip = "\"' "
for param in params:
param = param.strip()
if param:
key, value = param, True
index_of_equals = param.find("=")
if index_of_equals != -1:
key = param[:index_of_equals].strip(items_to_strip)
value = param[index_of_equals + 1 :].strip(items_to_strip)
params_dict[key.lower()] = value
return content_type, params_dict
|
Returns content type and parameters from given header
:param header: string
:return: tuple containing content type and dictionary of
parameters
|
python
|
src/requests/utils.py
| 504
|
[
"header"
] | false
| 4
| 6
|
psf/requests
| 53,586
|
sphinx
| false
|
|
_aot_stage2b_compile_forward_or_inference
|
def _aot_stage2b_compile_forward_or_inference(
fw_module: torch.fx.GraphModule,
adjusted_flat_args: list[Any],
maybe_subclass_meta: Optional[SubclassMeta],
fw_metadata: ViewAndMutationMeta,
aot_config: AOTConfig,
*,
is_inference: bool,
num_fw_outs_saved_for_bw: Optional[int] = None,
) -> tuple[Optional[list[Optional[tuple[int, ...]]]], Callable]:
"""
Compile the forward or inference graph. Returns:
- the output strides of the forward graph
- the compiled forward/inference function
Args:
fw_module: The forward graph module to compile
adjusted_flat_args: Flattened arguments after adjustments
maybe_subclass_meta: Metadata for tensor subclasses
fw_metadata: View and mutation metadata
aot_config: AOT configuration
is_inference: If True, compile for inference; if False, compile for forward (autograd)
num_fw_outs_saved_for_bw: Number of forward outputs saved for backward (required if not is_inference)
Before compiling, we run pre_compile for the following wrappers:
- FakifiedOutWrapper
- FunctionalizedRngRuntimeWrapper
After compiling, we run post_compile for the following wrappers:
- EffectTokensWrapper
- AOTDispatchSubclassWrapper
- FunctionalizedRngRuntimeWrapper
- FakifiedOutWrapper
"""
# Validation
if not is_inference and num_fw_outs_saved_for_bw is None:
raise ValueError(
"num_fw_outs_saved_for_bw must be provided when is_inference=False"
)
# Determine grad context, autocast context, tracking mode, compiler
if is_inference:
grad_ctx: Any = nullcontext
autocast_ctx: Any = (
torch._C._DisableAutocast
if torch._C._is_any_autocast_enabled()
else nullcontext
)
tracking_mode: str = "inference"
compiler: Any = aot_config.inference_compiler
else:
grad_ctx = torch.no_grad
autocast_ctx = torch._C._DisableAutocast
tracking_mode = "forward"
compiler = aot_config.fw_compiler
with grad_ctx(), autocast_ctx(), track_graph_compiling(aot_config, tracking_mode):
# Setup wrappers
fakified_out_wrapper = FakifiedOutWrapper()
fakified_out_wrapper.pre_compile(
fw_module, adjusted_flat_args, aot_config, fw_metadata=fw_metadata
)
# Initialize RNG wrapper based on mode
functionalized_rng_wrapper = FunctionalizedRngRuntimeWrapper(
return_new_outs=is_inference
)
# Add RNG states for forward mode only
if not is_inference and fw_metadata.num_graphsafe_rng_states > 0:
index = fw_metadata.graphsafe_rng_state_index
assert index is not None
rng_states = [
get_cuda_generator_meta_val(index)
for _ in range(fw_metadata.num_graphsafe_rng_states)
]
adjusted_flat_args.extend(rng_states) # type: ignore[arg-type]
functionalized_rng_wrapper.pre_compile(
fw_module, adjusted_flat_args, aot_config, fw_metadata=fw_metadata
)
# Set tracing context
if tracing_context := torch._guards.TracingContext.try_get():
tracing_context.fw_metadata = _get_inner_meta(
maybe_subclass_meta, fw_metadata
)
with TracingContext.report_output_strides() as fwd_output_strides:
compiled_fw_func = compiler(fw_module, adjusted_flat_args)
# Make boxed if needed
if not getattr(compiled_fw_func, "_boxed_call", False):
compiled_fw_func = make_boxed_func(compiled_fw_func)
# Set forward output strides if needed
if fakified_out_wrapper.needs_post_compile:
fakified_out_wrapper.set_fwd_output_strides(fwd_output_strides)
# Apply post-compile wrappers
compiled_fw_func = EffectTokensWrapper().post_compile(
compiled_fw_func,
aot_config,
runtime_metadata=fw_metadata,
)
compiled_fw_func = AOTDispatchSubclassWrapper(
fw_only=None,
trace_joint=False,
maybe_subclass_meta=maybe_subclass_meta,
num_fw_outs_saved_for_bw=num_fw_outs_saved_for_bw,
).post_compile(
compiled_fw_func,
aot_config,
runtime_metadata=fw_metadata,
)
compiled_fw_func = functionalized_rng_wrapper.post_compile(
compiled_fw_func, aot_config, runtime_metadata=fw_metadata
)
compiled_fw_func = fakified_out_wrapper.post_compile(
compiled_fw_func,
aot_config,
runtime_metadata=fw_metadata,
)
return fwd_output_strides, compiled_fw_func
|
Compile the forward or inference graph. Returns:
- the output strides of the forward graph
- the compiled forward/inference function
Args:
fw_module: The forward graph module to compile
adjusted_flat_args: Flattened arguments after adjustments
maybe_subclass_meta: Metadata for tensor subclasses
fw_metadata: View and mutation metadata
aot_config: AOT configuration
is_inference: If True, compile for inference; if False, compile for forward (autograd)
num_fw_outs_saved_for_bw: Number of forward outputs saved for backward (required if not is_inference)
Before compiling, we run pre_compile for the following wrappers:
- FakifiedOutWrapper
- FunctionalizedRngRuntimeWrapper
After compiling, we run post_compile for the following wrappers:
- EffectTokensWrapper
- AOTDispatchSubclassWrapper
- FunctionalizedRngRuntimeWrapper
- FakifiedOutWrapper
|
python
|
torch/_functorch/_aot_autograd/graph_compile.py
| 2,270
|
[
"fw_module",
"adjusted_flat_args",
"maybe_subclass_meta",
"fw_metadata",
"aot_config",
"is_inference",
"num_fw_outs_saved_for_bw"
] |
tuple[Optional[list[Optional[tuple[int, ...]]]], Callable]
| true
| 11
| 6.72
|
pytorch/pytorch
| 96,034
|
google
| false
|
estimateMin
|
public static OptionalDouble estimateMin(
ZeroBucket zeroBucket,
ExponentialHistogram.Buckets negativeBuckets,
ExponentialHistogram.Buckets positiveBuckets
) {
int scale = negativeBuckets.iterator().scale();
assert scale == positiveBuckets.iterator().scale();
OptionalLong negativeMaxIndex = negativeBuckets.maxBucketIndex();
if (negativeMaxIndex.isPresent()) {
return OptionalDouble.of(-ExponentialScaleUtils.getUpperBucketBoundary(negativeMaxIndex.getAsLong(), scale));
}
if (zeroBucket.count() > 0) {
if (zeroBucket.zeroThreshold() == 0.0) {
// avoid negative zero
return OptionalDouble.of(0.0);
}
return OptionalDouble.of(-zeroBucket.zeroThreshold());
}
BucketIterator positiveBucketsIt = positiveBuckets.iterator();
if (positiveBucketsIt.hasNext()) {
return OptionalDouble.of(ExponentialScaleUtils.getLowerBucketBoundary(positiveBucketsIt.peekIndex(), scale));
}
return OptionalDouble.empty();
}
|
Estimates the minimum value of the histogram based on the populated buckets.
The returned value is guaranteed to be less than or equal to the exact minimum value of the histogram values.
If the histogram is empty, an empty Optional is returned.
<p>
Note that this method can return +-Infinity if the histogram bucket boundaries are not representable in a double.
@param zeroBucket the zero bucket of the histogram
@param negativeBuckets the negative buckets of the histogram
@param positiveBuckets the positive buckets of the histogram
@return the estimated minimum
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramUtils.java
| 78
|
[
"zeroBucket",
"negativeBuckets",
"positiveBuckets"
] |
OptionalDouble
| true
| 5
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
convert_object_array
|
def convert_object_array(
content: list[npt.NDArray[np.object_]],
dtype: DtypeObj | None,
dtype_backend: str = "numpy",
coerce_float: bool = False,
) -> list[ArrayLike]:
"""
Internal function to convert object array.
Parameters
----------
content: List[np.ndarray]
dtype: np.dtype or ExtensionDtype
dtype_backend: Controls if nullable/pyarrow dtypes are returned.
coerce_float: Cast floats that are integers to int.
Returns
-------
List[ArrayLike]
"""
# provide soft conversion of object dtypes
def convert(arr):
if dtype != np.dtype("O"):
# e.g. if dtype is UInt32 then we want to cast Nones to NA instead of
# NaN in maybe_convert_objects.
to_nullable = dtype_backend != "numpy" or isinstance(dtype, BaseMaskedDtype)
arr = lib.maybe_convert_objects(
arr,
try_float=coerce_float,
convert_to_nullable_dtype=to_nullable,
)
# Notes on cases that get here 2023-02-15
# 1) we DO get here when arr is all Timestamps and dtype=None
# 2) disabling this doesn't break the world, so this must be
# getting caught at a higher level
# 3) passing convert_non_numeric to maybe_convert_objects get this right
# 4) convert_non_numeric?
if dtype is None:
if arr.dtype == np.dtype("O"):
# i.e. maybe_convert_objects didn't convert
convert_to_nullable_dtype = dtype_backend != "numpy"
arr = lib.maybe_convert_objects(
arr,
# Here we do not convert numeric dtypes, as if we wanted that,
# numpy would have done it for us.
convert_numeric=False,
convert_non_numeric=True,
convert_to_nullable_dtype=convert_to_nullable_dtype,
dtype_if_all_nat=np.dtype("M8[s]"),
)
if convert_to_nullable_dtype and arr.dtype == np.dtype("O"):
new_dtype = StringDtype()
arr_cls = new_dtype.construct_array_type()
arr = arr_cls._from_sequence(arr, dtype=new_dtype)
elif dtype_backend != "numpy" and isinstance(arr, np.ndarray):
if arr.dtype.kind in "iufb":
arr = pd_array(arr, copy=False)
elif isinstance(dtype, ExtensionDtype):
# TODO: test(s) that get here
# TODO: try to de-duplicate this convert function with
# core.construction functions
cls = dtype.construct_array_type()
arr = cls._from_sequence(arr, dtype=dtype, copy=False)
elif dtype.kind in "mM":
# This restriction is harmless bc these are the only cases
# where maybe_cast_to_datetime is not a no-op.
# Here we know:
# 1) dtype.kind in "mM" and
# 2) arr is either object or numeric dtype
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays
|
Internal function to convert object array.
Parameters
----------
content: List[np.ndarray]
dtype: np.dtype or ExtensionDtype
dtype_backend: Controls if nullable/pyarrow dtypes are returned.
coerce_float: Cast floats that are integers to int.
Returns
-------
List[ArrayLike]
|
python
|
pandas/core/internals/construction.py
| 949
|
[
"content",
"dtype",
"dtype_backend",
"coerce_float"
] |
list[ArrayLike]
| true
| 12
| 6.32
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
tryParseAsyncSimpleArrowFunctionExpression
|
function tryParseAsyncSimpleArrowFunctionExpression(allowReturnTypeInArrowFunction: boolean): ArrowFunction | undefined {
// We do a check here so that we won't be doing unnecessarily call to "lookAhead"
if (token() === SyntaxKind.AsyncKeyword) {
if (lookAhead(isUnParenthesizedAsyncArrowFunctionWorker) === Tristate.True) {
const pos = getNodePos();
const hasJSDoc = hasPrecedingJSDocComment();
const asyncModifier = parseModifiersForArrowFunction();
const expr = parseBinaryExpressionOrHigher(OperatorPrecedence.Lowest);
return parseSimpleArrowFunctionExpression(pos, expr as Identifier, allowReturnTypeInArrowFunction, hasJSDoc, asyncModifier);
}
}
return undefined;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 5,395
|
[
"allowReturnTypeInArrowFunction"
] | true
| 3
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
delete_any_nodegroups
|
def delete_any_nodegroups(self) -> None:
"""
Delete all Amazon EKS managed node groups for a provided Amazon EKS Cluster.
Amazon EKS managed node groups can be deleted in parallel, so we can send all
delete commands in bulk and move on once the count of nodegroups is zero.
"""
nodegroups = self.hook.list_nodegroups(clusterName=self.cluster_name)
if nodegroups:
self.log.info(CAN_NOT_DELETE_MSG.format(compute=NODEGROUP_FULL_NAME, count=len(nodegroups)))
for group in nodegroups:
self.hook.delete_nodegroup(clusterName=self.cluster_name, nodegroupName=group)
# Note this is a custom waiter so we're using hook.get_waiter(), not hook.conn.get_waiter().
self.log.info("Waiting for all nodegroups to delete. This will take some time.")
self.hook.get_waiter("all_nodegroups_deleted").wait(clusterName=self.cluster_name)
self.log.info(SUCCESS_MSG.format(compute=NODEGROUP_FULL_NAME))
|
Delete all Amazon EKS managed node groups for a provided Amazon EKS Cluster.
Amazon EKS managed node groups can be deleted in parallel, so we can send all
delete commands in bulk and move on once the count of nodegroups is zero.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/operators/eks.py
| 764
|
[
"self"
] |
None
| true
| 3
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
buildKeyConfig
|
public SslKeyConfig buildKeyConfig(Path basePath) {
final String certificatePath = stringSetting(CERTIFICATE);
final String keyPath = stringSetting(KEY);
final String keyStorePath = stringSetting(KEYSTORE_PATH);
if (certificatePath != null && keyStorePath != null) {
throw new SslConfigException(
"cannot specify both [" + settingPrefix + CERTIFICATE + "] and [" + settingPrefix + KEYSTORE_PATH + "]"
);
}
if (certificatePath != null || keyPath != null) {
if (keyPath == null) {
throw new SslConfigException(
"cannot specify [" + settingPrefix + CERTIFICATE + "] without also setting [" + settingPrefix + KEY + "]"
);
}
if (certificatePath == null) {
throw new SslConfigException(
"cannot specify [" + settingPrefix + KEY + "] without also setting [" + settingPrefix + CERTIFICATE + "]"
);
}
final char[] password = resolvePasswordSetting(KEY_SECURE_PASSPHRASE, KEY_LEGACY_PASSPHRASE);
return new PemKeyConfig(certificatePath, keyPath, password, basePath);
}
if (keyStorePath != null) {
final char[] storePassword = resolvePasswordSetting(KEYSTORE_SECURE_PASSWORD, KEYSTORE_LEGACY_PASSWORD);
char[] keyPassword = resolvePasswordSetting(KEYSTORE_SECURE_KEY_PASSWORD, KEYSTORE_LEGACY_KEY_PASSWORD);
if (keyPassword.length == 0) {
keyPassword = storePassword;
}
final String storeType = resolveSetting(KEYSTORE_TYPE, Function.identity(), inferKeyStoreType(keyStorePath));
final String algorithm = resolveSetting(KEYSTORE_ALGORITHM, Function.identity(), KeyManagerFactory.getDefaultAlgorithm());
return new StoreKeyConfig(keyStorePath, storePassword, storeType, keyStoreFilter, keyPassword, algorithm, basePath);
}
return defaultKeyConfig;
}
|
Resolve all necessary configuration settings, and load a {@link SslConfiguration}.
@param basePath The base path to use for any settings that represent file paths. Typically points to the Elasticsearch
configuration directory.
@throws SslConfigException For any problems with the configuration, or with loading the required SSL classes.
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java
| 376
|
[
"basePath"
] |
SslKeyConfig
| true
| 9
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
is_categorical_dtype
|
def is_categorical_dtype(arr_or_dtype) -> bool:
"""
Check whether an array-like or dtype is of the Categorical dtype.
.. deprecated:: 2.2.0
Use isinstance(dtype, pd.CategoricalDtype) instead.
Parameters
----------
arr_or_dtype : array-like or dtype
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the Categorical dtype.
See Also
--------
api.types.is_list_like: Check if the object is list-like.
api.types.is_complex_dtype: Check whether the provided array or
dtype is of a complex dtype.
Examples
--------
>>> from pandas.api.types import is_categorical_dtype
>>> from pandas import CategoricalDtype
>>> is_categorical_dtype(object)
False
>>> is_categorical_dtype(CategoricalDtype())
True
>>> is_categorical_dtype([1, 2, 3])
False
>>> is_categorical_dtype(pd.Categorical([1, 2, 3]))
True
>>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))
True
"""
# GH#52527
warnings.warn(
"is_categorical_dtype is deprecated and will be removed in a future "
"version. Use isinstance(dtype, pd.CategoricalDtype) instead",
Pandas4Warning,
stacklevel=2,
)
if isinstance(arr_or_dtype, ExtensionDtype):
# GH#33400 fastpath for dtype object
return arr_or_dtype.name == "category"
if arr_or_dtype is None:
return False
return CategoricalDtype.is_dtype(arr_or_dtype)
|
Check whether an array-like or dtype is of the Categorical dtype.
.. deprecated:: 2.2.0
Use isinstance(dtype, pd.CategoricalDtype) instead.
Parameters
----------
arr_or_dtype : array-like or dtype
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the Categorical dtype.
See Also
--------
api.types.is_list_like: Check if the object is list-like.
api.types.is_complex_dtype: Check whether the provided array or
dtype is of a complex dtype.
Examples
--------
>>> from pandas.api.types import is_categorical_dtype
>>> from pandas import CategoricalDtype
>>> is_categorical_dtype(object)
False
>>> is_categorical_dtype(CategoricalDtype())
True
>>> is_categorical_dtype([1, 2, 3])
False
>>> is_categorical_dtype(pd.Categorical([1, 2, 3]))
True
>>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))
True
|
python
|
pandas/core/dtypes/common.py
| 549
|
[
"arr_or_dtype"
] |
bool
| true
| 3
| 7.84
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
partial_fit
|
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError(
"Minimum of desired feature range must be smaller than maximum. Got %s."
% str(feature_range)
)
if sparse.issparse(X):
raise TypeError(
"MinMaxScaler does not support sparse input. "
"Consider using MaxAbsScaler instead."
)
xp, _ = get_namespace(X)
first_pass = not hasattr(self, "n_samples_seen_")
X = validate_data(
self,
X,
reset=first_pass,
dtype=_array_api.supported_float_dtypes(xp),
ensure_all_finite="allow-nan",
)
device_ = device(X)
feature_range = (
xp.asarray(feature_range[0], dtype=X.dtype, device=device_),
xp.asarray(feature_range[1], dtype=X.dtype, device=device_),
)
data_min = _array_api._nanmin(X, axis=0, xp=xp)
data_max = _array_api._nanmax(X, axis=0, xp=xp)
if first_pass:
self.n_samples_seen_ = X.shape[0]
else:
data_min = xp.minimum(self.data_min_, data_min)
data_max = xp.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = (feature_range[1] - feature_range[0]) / _handle_zeros_in_scale(
data_range, copy=True
)
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
|
Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
|
python
|
sklearn/preprocessing/_data.py
| 474
|
[
"self",
"X",
"y"
] | false
| 5
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_idxmax_idxmin
|
def _idxmax_idxmin(
self,
how: Literal["idxmax", "idxmin"],
ignore_unobserved: bool = False,
skipna: bool = True,
numeric_only: bool = False,
) -> NDFrameT:
"""Compute idxmax/idxmin.
Parameters
----------
how : {'idxmin', 'idxmax'}
Whether to compute idxmin or idxmax.
numeric_only : bool, default False
Include only float, int, boolean columns.
skipna : bool, default True
Exclude NA/null values. If an entire group is NA, the result will be NA.
ignore_unobserved : bool, default False
When True and an unobserved group is encountered, do not raise. This used
for transform where unobserved groups do not play an impact on the result.
Returns
-------
Series or DataFrame
idxmax or idxmin for the groupby operation.
"""
if not self.observed and any(
ping._passed_categorical for ping in self._grouper.groupings
):
expected_len = len(self._grouper.result_index)
# TODO: Better way to find # of observed groups?
group_sizes = self._grouper.size()
result_len = group_sizes[group_sizes > 0].shape[0]
assert result_len <= expected_len
has_unobserved = result_len < expected_len
raise_err: bool | np.bool_ = not ignore_unobserved and has_unobserved
# Only raise an error if there are columns to compute; otherwise we return
# an empty DataFrame with an index (possibly including unobserved) but no
# columns
data = self._obj_with_exclusions
if raise_err and isinstance(data, DataFrame):
if numeric_only:
data = data._get_numeric_data()
raise_err = len(data.columns) > 0
if raise_err:
raise ValueError(
f"Can't get {how} of an empty group due to unobserved categories. "
"Specify observed=True in groupby instead."
)
elif not skipna and self._obj_with_exclusions.isna().any(axis=None):
raise ValueError(f"{how} with skipna=False encountered an NA value.")
result = self._agg_general(
numeric_only=numeric_only,
min_count=1,
alias=how,
skipna=skipna,
)
return result
|
Compute idxmax/idxmin.
Parameters
----------
how : {'idxmin', 'idxmax'}
Whether to compute idxmin or idxmax.
numeric_only : bool, default False
Include only float, int, boolean columns.
skipna : bool, default True
Exclude NA/null values. If an entire group is NA, the result will be NA.
ignore_unobserved : bool, default False
When True and an unobserved group is encountered, do not raise. This used
for transform where unobserved groups do not play an impact on the result.
Returns
-------
Series or DataFrame
idxmax or idxmin for the groupby operation.
|
python
|
pandas/core/groupby/groupby.py
| 5,668
|
[
"self",
"how",
"ignore_unobserved",
"skipna",
"numeric_only"
] |
NDFrameT
| true
| 10
| 6.96
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
acknowledge
|
public void acknowledge(final String topic, final int partition, final long offset, final AcknowledgeType type) {
for (Map.Entry<TopicIdPartition, ShareInFlightBatch<K, V>> tipBatch : batches.entrySet()) {
TopicIdPartition tip = tipBatch.getKey();
ShareInFlightBatchException exception = tipBatch.getValue().getException();
if (tip.topic().equals(topic) && (tip.partition() == partition) &&
exception != null &&
exception.offsets().contains(offset)) {
tipBatch.getValue().addAcknowledgement(offset, type);
return;
}
}
throw new IllegalStateException("The record cannot be acknowledged.");
}
|
Acknowledge a single record which experienced an exception during its delivery by its topic, partition
and offset in the current batch. This method is specifically for overriding the default acknowledge
type for records whose delivery failed.
@param topic The topic of the record to acknowledge
@param partition The partition of the record
@param offset The offset of the record
@param type The acknowledge type which indicates whether it was processed successfully
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetch.java
| 180
|
[
"topic",
"partition",
"offset",
"type"
] |
void
| true
| 5
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
drain
|
void drain() {
if (!isConsumed) {
maybeCloseRecordStream();
cachedRecordException = null;
this.isConsumed = true;
recordAggregatedMetrics(bytesRead, recordsRead);
// we move the partition to the end if we received some bytes. This way, it's more likely that partitions
// for the same topic can remain together (allowing for more efficient serialization).
if (bytesRead > 0)
subscriptions.movePartitionToEnd(partition);
}
}
|
Draining a {@link CompletedFetch} will signal that the data has been consumed and the underlying resources
are closed. This is somewhat analogous to {@link Closeable#close() closing}, though no error will result if a
caller invokes {@link #fetchRecords(FetchConfig, Deserializers, int)}; an empty {@link List list} will be
returned instead.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java
| 139
|
[] |
void
| true
| 3
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
unregisterBroker
|
@InterfaceStability.Unstable
UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOptions options);
|
Unregister a broker.
<p>
This operation does not have any effect on partition assignments.
The following exceptions can be anticipated when calling {@code get()} on the future from the
returned {@link UnregisterBrokerResult}:
<ul>
<li>{@link org.apache.kafka.common.errors.TimeoutException}
If the request timed out before the describe operation could finish.</li>
<li>{@link org.apache.kafka.common.errors.UnsupportedVersionException}
If the software is too old to support the unregistration API.
</ul>
<p>
@param brokerId the broker id to unregister.
@param options the options to use.
@return the {@link UnregisterBrokerResult} containing the result
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,665
|
[
"brokerId",
"options"
] |
UnregisterBrokerResult
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
appendSeparator
|
public StrBuilder appendSeparator(final char separator) {
if (isNotEmpty()) {
append(separator);
}
return this;
}
|
Appends a separator if the builder is currently non-empty.
The separator is appended using {@link #append(char)}.
<p>
This method is useful for adding a separator each time around the
loop except the first.
</p>
<pre>
for (Iterator it = list.iterator(); it.hasNext(); ) {
appendSeparator(',');
append(it.next());
}
</pre>
Note that for this simple example, you should use
{@link #appendWithSeparators(Iterable, String)}.
@param separator the separator to use
@return {@code this} instance.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,219
|
[
"separator"
] |
StrBuilder
| true
| 2
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
easy_dtype
|
def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
"""
Convenience function to create a `np.dtype` object.
The function processes the input `dtype` and matches it with the given
names.
Parameters
----------
ndtype : var
Definition of the dtype. Can be any string or dictionary recognized
by the `np.dtype` function, or a sequence of types.
names : str or sequence, optional
Sequence of strings to use as field names for a structured dtype.
For convenience, `names` can be a string of a comma-separated list
of names.
defaultfmt : str, optional
Format string used to define missing names, such as ``"f%i"``
(default) or ``"fields_%02i"``.
validationargs : optional
A series of optional arguments used to initialize a
`NameValidator`.
Examples
--------
>>> import numpy as np
>>> np.lib._iotools.easy_dtype(float)
dtype('float64')
>>> np.lib._iotools.easy_dtype("i4, f8")
dtype([('f0', '<i4'), ('f1', '<f8')])
>>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
dtype([('field_000', '<i4'), ('field_001', '<f8')])
>>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
>>> np.lib._iotools.easy_dtype(float, names="a,b,c")
dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
"""
try:
ndtype = np.dtype(ndtype)
except TypeError:
validate = NameValidator(**validationargs)
nbfields = len(ndtype)
if names is None:
names = [''] * len(ndtype)
elif isinstance(names, str):
names = names.split(",")
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
ndtype = np.dtype({"formats": ndtype, "names": names})
else:
# Explicit names
if names is not None:
validate = NameValidator(**validationargs)
if isinstance(names, str):
names = names.split(",")
# Simple dtype: repeat to match the nb of names
if ndtype.names is None:
formats = tuple([ndtype.type] * len(names))
names = validate(names, defaultfmt=defaultfmt)
ndtype = np.dtype(list(zip(names, formats)))
# Structured dtype: just validate the names as needed
else:
ndtype.names = validate(names, nbfields=len(ndtype.names),
defaultfmt=defaultfmt)
# No implicit names
elif ndtype.names is not None:
validate = NameValidator(**validationargs)
# Default initial names : should we change the format ?
numbered_names = tuple(f"f{i}" for i in range(len(ndtype.names)))
if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")):
ndtype.names = validate([''] * len(ndtype.names),
defaultfmt=defaultfmt)
# Explicit initial names : just validate
else:
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
return ndtype
|
Convenience function to create a `np.dtype` object.
The function processes the input `dtype` and matches it with the given
names.
Parameters
----------
ndtype : var
Definition of the dtype. Can be any string or dictionary recognized
by the `np.dtype` function, or a sequence of types.
names : str or sequence, optional
Sequence of strings to use as field names for a structured dtype.
For convenience, `names` can be a string of a comma-separated list
of names.
defaultfmt : str, optional
Format string used to define missing names, such as ``"f%i"``
(default) or ``"fields_%02i"``.
validationargs : optional
A series of optional arguments used to initialize a
`NameValidator`.
Examples
--------
>>> import numpy as np
>>> np.lib._iotools.easy_dtype(float)
dtype('float64')
>>> np.lib._iotools.easy_dtype("i4, f8")
dtype([('f0', '<i4'), ('f1', '<f8')])
>>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
dtype([('field_000', '<i4'), ('field_001', '<f8')])
>>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
>>> np.lib._iotools.easy_dtype(float, names="a,b,c")
dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
|
python
|
numpy/lib/_iotools.py
| 824
|
[
"ndtype",
"names",
"defaultfmt"
] | false
| 12
| 7.44
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
default_dtypes
|
def default_dtypes(self, *, device=None):
"""
The default data types used for new CuPy arrays.
For CuPy, this always returns the following dictionary:
- **"real floating"**: ``cupy.float64``
- **"complex floating"**: ``cupy.complex128``
- **"integral"**: ``cupy.intp``
- **"indexing"**: ``cupy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new CuPy
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': cupy.float64,
'complex floating': cupy.complex128,
'integral': cupy.int64,
'indexing': cupy.int64}
"""
# TODO: Does this depend on device?
return {
"real floating": dtype(float64),
"complex floating": dtype(complex128),
"integral": dtype(intp),
"indexing": dtype(intp),
}
|
The default data types used for new CuPy arrays.
For CuPy, this always returns the following dictionary:
- **"real floating"**: ``cupy.float64``
- **"complex floating"**: ``cupy.complex128``
- **"integral"**: ``cupy.intp``
- **"indexing"**: ``cupy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new CuPy
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': cupy.float64,
'complex floating': cupy.complex128,
'integral': cupy.int64,
'indexing': cupy.int64}
|
python
|
sklearn/externals/array_api_compat/cupy/_info.py
| 142
|
[
"self",
"device"
] | false
| 1
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
create
|
static ReleasableExponentialHistogram create(int maxBucketCount, ExponentialHistogramCircuitBreaker breaker, double... values) {
try (ExponentialHistogramGenerator generator = ExponentialHistogramGenerator.create(maxBucketCount, breaker)) {
for (double val : values) {
generator.add(val);
}
return generator.getAndClear();
}
}
|
Creates a histogram representing the distribution of the given values with at most the given number of buckets.
If the given {@code maxBucketCount} is greater than or equal to the number of values, the resulting histogram will have a
relative error of less than {@code 2^(2^-MAX_SCALE) - 1}.
@param maxBucketCount the maximum number of buckets
@param breaker the circuit breaker to use to limit memory allocations
@param values the values to be added to the histogram
@return a new {@link ReleasableExponentialHistogram}
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogram.java
| 253
|
[
"maxBucketCount",
"breaker"
] |
ReleasableExponentialHistogram
| true
| 1
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
lstat
|
function lstat(path, options = { bigint: false }, callback) {
if (typeof options === 'function') {
callback = options;
options = kEmptyObject;
}
callback = makeStatsCallback(callback);
path = getValidatedPath(path);
if (permission.isEnabled() && !permission.has('fs.read', path)) {
const resource = BufferIsBuffer(path) ? BufferToString(path) : path;
callback(new ERR_ACCESS_DENIED('Access to this API has been restricted', 'FileSystemRead', resource));
return;
}
const req = new FSReqCallback(options.bigint);
req.oncomplete = callback;
binding.lstat(path, options.bigint, req);
}
|
Retrieves the `fs.Stats` for the symbolic link
referred to by the `path`.
@param {string | Buffer | URL} path
@param {{ bigint?: boolean; }} [options]
@param {(
err?: Error,
stats?: Stats
) => any} callback
@returns {void}
|
javascript
|
lib/fs.js
| 1,589
|
[
"path",
"callback"
] | false
| 5
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
prepareSchedulerFactory
|
private SchedulerFactory prepareSchedulerFactory() throws SchedulerException, IOException {
SchedulerFactory schedulerFactory = this.schedulerFactory;
if (schedulerFactory == null) {
// Create local SchedulerFactory instance (typically a LocalSchedulerFactory)
schedulerFactory = (this.schedulerFactoryClass == LocalSchedulerFactory.class ?
new LocalSchedulerFactory() : BeanUtils.instantiateClass(this.schedulerFactoryClass));
if (schedulerFactory instanceof StdSchedulerFactory stdSchedulerFactory) {
initSchedulerFactory(stdSchedulerFactory);
}
else if (this.configLocation != null || this.quartzProperties != null ||
this.taskExecutor != null || this.dataSource != null) {
throw new IllegalArgumentException(
"StdSchedulerFactory required for applying Quartz properties: " + schedulerFactory);
}
// Otherwise, no local settings to be applied via StdSchedulerFactory.initialize(Properties)
}
// Otherwise, assume that externally provided factory has been initialized with appropriate settings
return schedulerFactory;
}
|
Create a SchedulerFactory if necessary and apply locally defined Quartz properties to it.
@return the initialized SchedulerFactory
|
java
|
spring-context-support/src/main/java/org/springframework/scheduling/quartz/SchedulerFactoryBean.java
| 512
|
[] |
SchedulerFactory
| true
| 8
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
register
|
private void register(@Nullable Deprecated annotation) {
if (annotation != null) {
if (annotation.forRemoval()) {
register("removal");
}
else {
register("deprecation");
}
}
}
|
Return the currently registered warnings.
@return the warnings
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/CodeWarnings.java
| 152
|
[
"annotation"
] |
void
| true
| 3
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
contains
|
public boolean contains(final CharRange range) {
Objects.requireNonNull(range, "range");
if (negated) {
if (range.negated) {
return start >= range.start && end <= range.end;
}
return range.end < start || range.start > end;
}
if (range.negated) {
return start == 0 && end == Character.MAX_VALUE;
}
return start <= range.start && end >= range.end;
}
|
Are all the characters of the passed in range contained in
this range.
@param range the range to check against.
@return {@code true} if this range entirely contains the input range.
@throws NullPointerException if {@code null} input.
|
java
|
src/main/java/org/apache/commons/lang3/CharRange.java
| 259
|
[
"range"
] | true
| 8
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getPropertySymbolsFromBaseTypes
|
function getPropertySymbolsFromBaseTypes<T>(symbol: Symbol, propertyName: string, checker: TypeChecker, cb: (symbol: Symbol) => T | undefined): T | undefined {
const seen = new Set<Symbol>();
return recur(symbol);
function recur(symbol: Symbol): T | undefined {
// Use `addToSeen` to ensure we don't infinitely recurse in this situation:
// interface C extends C {
// /*findRef*/propName: string;
// }
if (!(symbol.flags & (SymbolFlags.Class | SymbolFlags.Interface)) || !addToSeen(seen, symbol)) return;
return firstDefined(symbol.declarations, declaration =>
firstDefined(getAllSuperTypeNodes(declaration), typeReference => {
const type = checker.getTypeAtLocation(typeReference);
const propertySymbol = type.symbol && checker.getPropertyOfType(type, propertyName);
// Visit the typeReference as well to see if it directly or indirectly uses that property
// When `propertySymbol` is missing continue the recursion through parents as some parent up the chain might be an abstract class that implements interface having the property
return propertySymbol && firstDefined(checker.getRootSymbols(propertySymbol), cb) || type.symbol && recur(type.symbol);
}));
}
}
|
Find symbol of the given property-name and add the symbol to the given result array
@param symbol a symbol to start searching for the given propertyName
@param propertyName a name of property to search for
@param result an array of symbol of found property symbols
@param previousIterationSymbolsCache a cache of symbol from previous iterations of calling this function to prevent infinite revisiting of the same symbol.
The value of previousIterationSymbol is undefined when the function is first called.
|
typescript
|
src/services/findAllReferences.ts
| 2,668
|
[
"symbol",
"propertyName",
"checker",
"cb"
] | true
| 7
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
read_iceberg
|
def read_iceberg(
table_identifier: str,
catalog_name: str | None = None,
*,
catalog_properties: dict[str, Any] | None = None,
row_filter: str | None = None,
selected_fields: tuple[str] | None = None,
case_sensitive: bool = True,
snapshot_id: int | None = None,
limit: int | None = None,
scan_properties: dict[str, Any] | None = None,
) -> DataFrame:
"""
Read an Apache Iceberg table into a pandas DataFrame.
.. versionadded:: 3.0.0
.. warning::
read_iceberg is experimental and may change without warning.
Parameters
----------
table_identifier : str
Table identifier.
catalog_name : str, optional
The name of the catalog.
catalog_properties : dict of {str: str}, optional
The properties that are used next to the catalog configuration.
row_filter : str, optional
A string that describes the desired rows.
selected_fields : tuple of str, optional
A tuple of strings representing the column names to return in the output
dataframe.
case_sensitive : bool, default True
If True column matching is case sensitive.
snapshot_id : int, optional
Snapshot ID to time travel to. By default the table will be scanned as of the
current snapshot ID.
limit : int, optional
An integer representing the number of rows to return in the scan result.
By default all matching rows will be fetched.
scan_properties : dict of {str: obj}, optional
Additional Table properties as a dictionary of string key value pairs to use
for this scan.
Returns
-------
DataFrame
DataFrame based on the Iceberg table.
See Also
--------
read_parquet : Read a Parquet file.
Examples
--------
>>> df = pd.read_iceberg(
... table_identifier="my_table",
... catalog_name="my_catalog",
... catalog_properties={"s3.secret-access-key": "my-secret"},
... row_filter="trip_distance >= 10.0",
... selected_fields=("VendorID", "tpep_pickup_datetime"),
... ) # doctest: +SKIP
"""
pyiceberg_catalog = import_optional_dependency("pyiceberg.catalog")
pyiceberg_expressions = import_optional_dependency("pyiceberg.expressions")
if catalog_properties is None:
catalog_properties = {}
catalog = pyiceberg_catalog.load_catalog(catalog_name, **catalog_properties)
table = catalog.load_table(table_identifier)
if row_filter is None:
row_filter = pyiceberg_expressions.AlwaysTrue()
if selected_fields is None:
selected_fields = ("*",)
if scan_properties is None:
scan_properties = {}
result = table.scan(
row_filter=row_filter,
selected_fields=selected_fields,
case_sensitive=case_sensitive,
snapshot_id=snapshot_id,
options=scan_properties,
limit=limit,
)
return result.to_pandas()
|
Read an Apache Iceberg table into a pandas DataFrame.
.. versionadded:: 3.0.0
.. warning::
read_iceberg is experimental and may change without warning.
Parameters
----------
table_identifier : str
Table identifier.
catalog_name : str, optional
The name of the catalog.
catalog_properties : dict of {str: str}, optional
The properties that are used next to the catalog configuration.
row_filter : str, optional
A string that describes the desired rows.
selected_fields : tuple of str, optional
A tuple of strings representing the column names to return in the output
dataframe.
case_sensitive : bool, default True
If True column matching is case sensitive.
snapshot_id : int, optional
Snapshot ID to time travel to. By default the table will be scanned as of the
current snapshot ID.
limit : int, optional
An integer representing the number of rows to return in the scan result.
By default all matching rows will be fetched.
scan_properties : dict of {str: obj}, optional
Additional Table properties as a dictionary of string key value pairs to use
for this scan.
Returns
-------
DataFrame
DataFrame based on the Iceberg table.
See Also
--------
read_parquet : Read a Parquet file.
Examples
--------
>>> df = pd.read_iceberg(
... table_identifier="my_table",
... catalog_name="my_catalog",
... catalog_properties={"s3.secret-access-key": "my-secret"},
... row_filter="trip_distance >= 10.0",
... selected_fields=("VendorID", "tpep_pickup_datetime"),
... ) # doctest: +SKIP
|
python
|
pandas/io/iceberg.py
| 12
|
[
"table_identifier",
"catalog_name",
"catalog_properties",
"row_filter",
"selected_fields",
"case_sensitive",
"snapshot_id",
"limit",
"scan_properties"
] |
DataFrame
| true
| 5
| 8.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
createDestructuringPropertyAccess
|
function createDestructuringPropertyAccess(flattenContext: FlattenContext, value: Expression, propertyName: PropertyName): LeftHandSideExpression {
const { factory } = flattenContext.context;
if (isComputedPropertyName(propertyName)) {
const argumentExpression = ensureIdentifier(flattenContext, Debug.checkDefined(visitNode(propertyName.expression, flattenContext.visitor, isExpression)), /*reuseIdentifierExpressions*/ false, /*location*/ propertyName);
return flattenContext.context.factory.createElementAccessExpression(value, argumentExpression);
}
else if (isStringOrNumericLiteralLike(propertyName) || isBigIntLiteral(propertyName)) {
const argumentExpression = factory.cloneNode(propertyName);
return flattenContext.context.factory.createElementAccessExpression(value, argumentExpression);
}
else {
const name = flattenContext.context.factory.createIdentifier(idText(propertyName));
return flattenContext.context.factory.createPropertyAccessExpression(value, name);
}
}
|
Creates either a PropertyAccessExpression or an ElementAccessExpression for the
right-hand side of a transformed destructuring assignment.
@link https://tc39.github.io/ecma262/#sec-runtime-semantics-keyeddestructuringassignmentevaluation
@param flattenContext Options used to control flattening.
@param value The RHS value that is the source of the property.
@param propertyName The destructuring property name.
|
typescript
|
src/compiler/transformers/destructuring.ts
| 556
|
[
"flattenContext",
"value",
"propertyName"
] | true
| 6
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
checkNonAnimatableInTimelines
|
function checkNonAnimatableInTimelines(
timelines: AnimationTimelineInstruction[],
triggerName: string,
driver: AnimationDriver,
): void {
if (!driver.validateAnimatableStyleProperty) {
return;
}
const allowedNonAnimatableProps = new Set<string>([
// 'easing' is a utility/synthetic prop we use to represent
// easing functions, it represents a property of the animation
// which is not animatable but different values can be used
// in different steps
'easing',
]);
const invalidNonAnimatableProps = new Set<string>();
timelines.forEach(({keyframes}) => {
const nonAnimatablePropsInitialValues = new Map<string, string | number>();
keyframes.forEach((keyframe) => {
const entriesToCheck = Array.from(keyframe.entries()).filter(
([prop]) => !allowedNonAnimatableProps.has(prop),
);
for (const [prop, value] of entriesToCheck) {
if (!driver.validateAnimatableStyleProperty!(prop)) {
if (nonAnimatablePropsInitialValues.has(prop) && !invalidNonAnimatableProps.has(prop)) {
const propInitialValue = nonAnimatablePropsInitialValues.get(prop);
if (propInitialValue !== value) {
invalidNonAnimatableProps.add(prop);
}
} else {
nonAnimatablePropsInitialValues.set(prop, value);
}
}
}
});
});
if (invalidNonAnimatableProps.size > 0) {
console.warn(
`Warning: The animation trigger "${triggerName}" is attempting to animate the following` +
' not animatable properties: ' +
Array.from(invalidNonAnimatableProps).join(', ') +
'\n' +
'(to check the list of all animatable properties visit https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_animated_properties)',
);
}
}
|
Checks inside a set of timelines if they try to animate a css property which is not considered
animatable, in that case it prints a warning on the console.
Besides that the function doesn't have any other effect.
Note: this check is done here after the timelines are built instead of doing on a lower level so
that we can make sure that the warning appears only once per instruction (we can aggregate here
all the issues instead of finding them separately).
@param timelines The built timelines for the current instruction.
@param triggerName The name of the trigger for the current instruction.
@param driver Animation driver used to perform the check.
|
typescript
|
packages/animations/browser/src/dsl/animation_transition_factory.ts
| 166
|
[
"timelines",
"triggerName",
"driver"
] | true
| 8
| 7.04
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
forDirectFieldAccess
|
public static ConfigurablePropertyAccessor forDirectFieldAccess(Object target) {
return new DirectFieldAccessor(target);
}
|
Obtain a PropertyAccessor for the given target object,
accessing properties in direct field style.
@param target the target object to wrap
@return the property accessor
@see DirectFieldAccessor
|
java
|
spring-beans/src/main/java/org/springframework/beans/PropertyAccessorFactory.java
| 51
|
[
"target"
] |
ConfigurablePropertyAccessor
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
completeIfEmpty
|
public void completeIfEmpty() {
if (remainingResults != null && remainingResults.get() == 0) {
future.ifPresent(future -> future.complete(result));
}
}
|
Handles the case where there are no results pending after initialization.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 1,476
|
[] |
void
| true
| 3
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
getAllFieldsList
|
public static List<Field> getAllFieldsList(final Class<?> cls) {
Objects.requireNonNull(cls, "cls");
final List<Field> allFields = new ArrayList<>();
Class<?> currentClass = cls;
while (currentClass != null) {
Collections.addAll(allFields, currentClass.getDeclaredFields());
currentClass = currentClass.getSuperclass();
}
return allFields;
}
|
Gets all fields of the given class and its parents (if any).
@param cls
the {@link Class} to query
@return a list of Fields (possibly empty).
@throws NullPointerException
if the class is {@code null}.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/reflect/FieldUtils.java
| 71
|
[
"cls"
] | true
| 2
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
fuzz_non_contiguous_dense_tensor
|
def fuzz_non_contiguous_dense_tensor(
size: tuple[int, ...] | None = None, dtype: torch.dtype | None = None
) -> torch.Tensor:
"""
Specifically generates tensors that are non-contiguous but dense and non-overlapping.
Args:
size: Tensor shape/size. If None, auto-generated.
dtype: PyTorch tensor data type. If None, auto-generated.
Returns:
torch.Tensor: A non-contiguous but dense tensor
"""
if dtype is None:
dtype = fuzz_torch_tensor_type("default")
if size is None:
size = fuzz_tensor_size()
# Force non-contiguous but dense stride patterns
if len(size) <= 1:
# For 0D or 1D tensors, return contiguous (they're trivially dense)
tensor, _ = fuzz_tensor(size, None, dtype)
return tensor
# Choose from patterns that guarantee non-contiguous but dense
patterns = ["column_major", "transposed", "permuted_dense"]
pattern = random.choice(patterns)
if pattern == "column_major":
# Column-major order (non-contiguous but dense)
stride = tuple(_compute_non_contiguous_dense_strides(size))
elif pattern == "transposed":
# Simple transpose of last two dimensions
base_strides = _compute_contiguous_strides(size)
if len(base_strides) >= 2:
# Swap last two dimensions' strides
base_strides[-1], base_strides[-2] = base_strides[-2], base_strides[-1]
stride = tuple(base_strides)
else: # permuted_dense
# Random permutation that maintains density
stride = tuple(_compute_non_contiguous_dense_strides(size))
tensor, _ = fuzz_tensor(size, stride, dtype)
return tensor
|
Specifically generates tensors that are non-contiguous but dense and non-overlapping.
Args:
size: Tensor shape/size. If None, auto-generated.
dtype: PyTorch tensor data type. If None, auto-generated.
Returns:
torch.Tensor: A non-contiguous but dense tensor
|
python
|
tools/experimental/torchfuzz/tensor_fuzzer.py
| 447
|
[
"size",
"dtype"
] |
torch.Tensor
| true
| 8
| 7.6
|
pytorch/pytorch
| 96,034
|
google
| false
|
transform
|
function transform(object, iteratee, accumulator) {
var isArr = isArray(object),
isArrLike = isArr || isBuffer(object) || isTypedArray(object);
iteratee = getIteratee(iteratee, 4);
if (accumulator == null) {
var Ctor = object && object.constructor;
if (isArrLike) {
accumulator = isArr ? new Ctor : [];
}
else if (isObject(object)) {
accumulator = isFunction(Ctor) ? baseCreate(getPrototype(object)) : {};
}
else {
accumulator = {};
}
}
(isArrLike ? arrayEach : baseForOwn)(object, function(value, index, object) {
return iteratee(accumulator, value, index, object);
});
return accumulator;
}
|
An alternative to `_.reduce`; this method transforms `object` to a new
`accumulator` object which is the result of running each of its own
enumerable string keyed properties thru `iteratee`, with each invocation
potentially mutating the `accumulator` object. If `accumulator` is not
provided, a new object with the same `[[Prototype]]` will be used. The
iteratee is invoked with four arguments: (accumulator, value, key, object).
Iteratee functions may exit iteration early by explicitly returning `false`.
@static
@memberOf _
@since 1.3.0
@category Object
@param {Object} object The object to iterate over.
@param {Function} [iteratee=_.identity] The function invoked per iteration.
@param {*} [accumulator] The custom accumulator value.
@returns {*} Returns the accumulated value.
@example
_.transform([2, 3, 4], function(result, n) {
result.push(n *= n);
return n % 2 == 0;
}, []);
// => [4, 9]
_.transform({ 'a': 1, 'b': 2, 'c': 1 }, function(result, value, key) {
(result[value] || (result[value] = [])).push(key);
}, {});
// => { '1': ['a', 'c'], '2': ['b'] }
|
javascript
|
lodash.js
| 13,895
|
[
"object",
"iteratee",
"accumulator"
] | false
| 12
| 7.36
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
hashCode
|
@Override
public int hashCode() {
// See Map.Entry API specification
return Objects.hashCode(getKey()) ^ Objects.hashCode(getValue());
}
|
Returns a suitable hash code.
<p>
The hash code follows the definition in {@code Map.Entry}.
</p>
@return the hash code.
|
java
|
src/main/java/org/apache/commons/lang3/tuple/Pair.java
| 232
|
[] | true
| 1
| 7.2
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
generateParameterTypesCode
|
private CodeBlock generateParameterTypesCode(Class<?>[] parameterTypes) {
CodeBlock.Builder code = CodeBlock.builder();
for (int i = 0; i < parameterTypes.length; i++) {
code.add(i > 0 ? ", " : "");
code.add("$T.class", parameterTypes[i]);
}
return code.build();
}
|
Generate the instance supplier code.
@param registeredBean the bean to handle
@param instantiationDescriptor the executable to use to create the bean
@return the generated code
@since 6.1.7
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/InstanceSupplierCodeGenerator.java
| 389
|
[
"parameterTypes"
] |
CodeBlock
| true
| 3
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
centroids
|
@Override
public Collection<Centroid> centroids() {
mergeNewValues();
// we don't actually keep centroid structures around so we have to fake it
return new AbstractCollection<>() {
@Override
public Iterator<Centroid> iterator() {
return new Iterator<>() {
int i = 0;
@Override
public boolean hasNext() {
return i < lastUsedCell;
}
@Override
public Centroid next() {
Centroid rc = new Centroid(mean.get(i), (long) weight.get(i));
i++;
return rc;
}
@Override
public void remove() {
throw new UnsupportedOperationException("Default operation");
}
};
}
@Override
public int size() {
return lastUsedCell;
}
};
}
|
Merges any pending inputs and compresses the data down to the public setting.
Note that this typically loses a bit of precision and thus isn't a thing to
be doing all the time. It is best done only when we want to show results to
the outside world.
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java
| 555
|
[] | true
| 1
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
pendingToString
|
protected @Nullable String pendingToString() {
// TODO(diamondm) consider moving this into addPendingString so it's always in the output
if (this instanceof ScheduledFuture) {
return "remaining delay=[" + ((ScheduledFuture) this).getDelay(MILLISECONDS) + " ms]";
}
return null;
}
|
Provide a human-readable explanation of why this future has not yet completed.
@return null if an explanation cannot be provided (e.g. because the future is done).
@since 23.0
|
java
|
android/guava/src/com/google/common/util/concurrent/AbstractFuture.java
| 885
|
[] |
String
| true
| 2
| 8.4
|
google/guava
| 51,352
|
javadoc
| false
|
reserve
|
@Override
public void reserve(long size) {
if (mergingDigest != null) {
mergingDigest.reserve(size);
return;
}
// Check if we need to switch implementations.
assert sortingDigest != null;
if (sortingDigest.size() + size >= maxSortingSize) {
mergingDigest = TDigest.createMergingDigest(arrays, compression);
for (int i = 0; i < sortingDigest.values.size(); i++) {
mergingDigest.add(sortingDigest.values.get(i));
}
mergingDigest.reserve(size);
// Release the allocated SortingDigest.
sortingDigest.close();
sortingDigest = null;
} else {
sortingDigest.reserve(size);
}
}
|
Similar to the constructor above. The limit for switching from a {@link SortingDigest} to a {@link MergingDigest} implementation
is calculated based on the passed compression factor.
@param compression The compression factor for the MergingDigest
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/HybridDigest.java
| 119
|
[
"size"
] |
void
| true
| 4
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
addDefaultValueAssignmentsIfNeeded
|
function addDefaultValueAssignmentsIfNeeded(statements: Statement[], node: FunctionLikeDeclaration): boolean {
if (!some(node.parameters, hasDefaultValueOrBindingPattern)) {
return false;
}
let added = false;
for (const parameter of node.parameters) {
const { name, initializer, dotDotDotToken } = parameter;
// A rest parameter cannot have a binding pattern or an initializer,
// so let's just ignore it.
if (dotDotDotToken) {
continue;
}
if (isBindingPattern(name)) {
added = insertDefaultValueAssignmentForBindingPattern(statements, parameter, name, initializer) || added;
}
else if (initializer) {
insertDefaultValueAssignmentForInitializer(statements, parameter, name, initializer);
added = true;
}
}
return added;
}
|
Adds statements to the body of a function-like node if it contains parameters with
binding patterns or initializers.
@param statements The statements for the new function body.
@param node A function-like node.
|
typescript
|
src/compiler/transformers/es2015.ts
| 1,903
|
[
"statements",
"node"
] | true
| 7
| 7.04
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.