function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
resetTransactionState
|
private void resetTransactionState() {
if (clientSideEpochBumpRequired) {
transitionTo(State.INITIALIZING);
} else {
transitionTo(State.READY);
}
lastError = null;
clientSideEpochBumpRequired = false;
transactionStarted = false;
newPartitionsInTransaction.clear();
pendingPartitionsInTransaction.clear();
partitionsInTransaction.clear();
preparedTxnState = ProducerIdAndEpoch.NONE;
}
|
Determines if the coordinator can handle an abortable error.
Recovering from an abortable error requires an epoch bump which can be triggered by the client
or automatically taken care of at the end of every transaction (Transaction V2).
Use <code>needToTriggerEpochBumpFromClient</code> to check whether the epoch bump needs to be triggered
manually.
<b>NOTE:</b>
This method should only be used for transactional producers.
There is no concept of abortable errors for idempotent producers.
@return true if an abortable error can be handled, otherwise false.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
| 1,332
|
[] |
void
| true
| 2
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
isMaximum
|
public static boolean isMaximum(InetAddress address) {
byte[] addr = address.getAddress();
for (byte b : addr) {
if (b != (byte) 0xff) {
return false;
}
}
return true;
}
|
Returns true if the InetAddress is either 255.255.255.255 for IPv4 or
ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff for IPv6.
@return true if the InetAddress is either 255.255.255.255 for IPv4 or
ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff for IPv6
@since 10.0
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 1,224
|
[
"address"
] | true
| 2
| 7.44
|
google/guava
| 51,352
|
javadoc
| false
|
|
_get_webhook_endpoint
|
def _get_webhook_endpoint(self, conn_id: str) -> str:
"""
Given a Chime conn_id return the default webhook endpoint.
:param conn_id: The provided connection ID.
:return: Endpoint(str) for chime webhook.
"""
conn = self.get_connection(conn_id)
token = conn.password
if token is None:
raise AirflowException("Webhook token field is missing and is required.")
if not conn.schema:
raise AirflowException("Webook schema field is missing and is required")
if not conn.host:
raise AirflowException("Webhook host field is missing and is required.")
url = conn.schema + "://" + conn.host
endpoint = url + token
# Check to make sure the endpoint matches what Chime expects
if not re.fullmatch(r"[a-zA-Z0-9_-]+\?token=[a-zA-Z0-9_-]+", token):
raise AirflowException(
"Expected Chime webhook token in the form of '{webhook.id}?token={webhook.token}'."
)
return endpoint
|
Given a Chime conn_id return the default webhook endpoint.
:param conn_id: The provided connection ID.
:return: Endpoint(str) for chime webhook.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/chime.py
| 61
|
[
"self",
"conn_id"
] |
str
| true
| 5
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
setdefault
|
def setdefault(cls, key, default, description=None, deserialize_json=False):
"""
Return the current value for a key or store the default value and return it.
Works the same as the Python builtin dict object.
:param key: Dict key for this Variable
:param default: Default value to set and return if the variable
isn't already in the DB
:param description: Default value to set Description of the Variable
:param deserialize_json: Store this as a JSON encoded value in the DB
and un-encode it when retrieving a value
:param session: Session
:return: Mixed
"""
obj = Variable.get(key, default_var=None, deserialize_json=deserialize_json)
if obj is None:
if default is not None:
Variable.set(key=key, value=default, description=description, serialize_json=deserialize_json)
return default
raise ValueError("Default Value must be set")
return obj
|
Return the current value for a key or store the default value and return it.
Works the same as the Python builtin dict object.
:param key: Dict key for this Variable
:param default: Default value to set and return if the variable
isn't already in the DB
:param description: Default value to set Description of the Variable
:param deserialize_json: Store this as a JSON encoded value in the DB
and un-encode it when retrieving a value
:param session: Session
:return: Mixed
|
python
|
airflow-core/src/airflow/models/variable.py
| 113
|
[
"cls",
"key",
"default",
"description",
"deserialize_json"
] | false
| 3
| 6.96
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
array
|
def array(self) -> ExtensionArray:
"""
The ExtensionArray of the data backing this Series or Index.
This property provides direct access to the underlying array data of a
Series or Index without requiring conversion to a NumPy array. It
returns an ExtensionArray, which is the native storage format for
pandas extension dtypes.
Returns
-------
ExtensionArray
An ExtensionArray of the values stored within. For extension
types, this is the actual array. For NumPy native types, this
is a thin (no copy) wrapper around :class:`numpy.ndarray`.
``.array`` differs from ``.values``, which may require converting
the data to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Series.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within pandas.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
string StringArray
boolean BooleanArray
datetime64[ns, tz] DatetimeArray
================== =============================
For any 3rd-party extension types, the array type will be an
ExtensionArray.
For all remaining dtypes ``.array`` will be a
:class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
stored within. If you absolutely need a NumPy array (possibly with
copying / coercing data), then use :meth:`Series.to_numpy` instead.
Examples
--------
For regular NumPy types like int, and float, a NumpyExtensionArray
is returned.
>>> pd.Series([1, 2, 3]).array
<NumpyExtensionArray>
[1, 2, 3]
Length: 3, dtype: int64
For extension types, like Categorical, the actual ExtensionArray
is returned
>>> ser = pd.Series(pd.Categorical(["a", "b", "a"]))
>>> ser.array
['a', 'b', 'a']
Categories (2, str): ['a', 'b']
"""
raise AbstractMethodError(self)
|
The ExtensionArray of the data backing this Series or Index.
This property provides direct access to the underlying array data of a
Series or Index without requiring conversion to a NumPy array. It
returns an ExtensionArray, which is the native storage format for
pandas extension dtypes.
Returns
-------
ExtensionArray
An ExtensionArray of the values stored within. For extension
types, this is the actual array. For NumPy native types, this
is a thin (no copy) wrapper around :class:`numpy.ndarray`.
``.array`` differs from ``.values``, which may require converting
the data to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Series.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within pandas.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
string StringArray
boolean BooleanArray
datetime64[ns, tz] DatetimeArray
================== =============================
For any 3rd-party extension types, the array type will be an
ExtensionArray.
For all remaining dtypes ``.array`` will be a
:class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
stored within. If you absolutely need a NumPy array (possibly with
copying / coercing data), then use :meth:`Series.to_numpy` instead.
Examples
--------
For regular NumPy types like int, and float, a NumpyExtensionArray
is returned.
>>> pd.Series([1, 2, 3]).array
<NumpyExtensionArray>
[1, 2, 3]
Length: 3, dtype: int64
For extension types, like Categorical, the actual ExtensionArray
is returned
>>> ser = pd.Series(pd.Categorical(["a", "b", "a"]))
>>> ser.array
['a', 'b', 'a']
Categories (2, str): ['a', 'b']
|
python
|
pandas/core/base.py
| 505
|
[
"self"
] |
ExtensionArray
| true
| 1
| 6.56
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
skip
|
boolean skip() {
/*
* Future#cancel should return false whenever a task cannot be cancelled, most likely as it has already started. We don't
* trust it much though so we try to cancel hoping that it will work. At the same time we always call skip too, which means
* that if the task has already started the state change will fail. We could potentially not call skip when cancel returns
* false but we prefer to stay on the safe side.
*/
future.cancel(false);
return task.skip();
}
|
Cancels this task. Returns true if the task has been successfully cancelled, meaning it won't be executed
or if it is its execution won't have any effect. Returns false if the task cannot be cancelled (possibly it was
already cancelled or already completed).
|
java
|
client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java
| 196
|
[] | true
| 1
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
_get_json_content_from_openml_api
|
def _get_json_content_from_openml_api(
url: str,
error_message: Optional[str],
data_home: Optional[str],
n_retries: int = 3,
delay: float = 1.0,
) -> Dict:
"""
Loads json data from the openml api.
Parameters
----------
url : str
The URL to load from. Should be an official OpenML endpoint.
error_message : str or None
The error message to raise if an acceptable OpenML error is thrown
(acceptable error is, e.g., data id not found. Other errors, like 404's
will throw the native error message).
data_home : str or None
Location to cache the response. None if no cache is required.
n_retries : int, default=3
Number of retries when HTTP errors are encountered. Error with status
code 412 won't be retried as they represent OpenML generic errors.
delay : float, default=1.0
Number of seconds between retries.
Returns
-------
json_data : json
the json result from the OpenML server if the call was successful.
An exception otherwise.
"""
@_retry_with_clean_cache(url, data_home=data_home)
def _load_json():
with closing(
_open_openml_url(url, data_home, n_retries=n_retries, delay=delay)
) as response:
return json.loads(response.read().decode("utf-8"))
try:
return _load_json()
except HTTPError as error:
# 412 is an OpenML specific error code, indicating a generic error
# (e.g., data not found)
if error.code != 412:
raise error
# 412 error, not in except for nicer traceback
raise OpenMLError(error_message)
|
Loads json data from the openml api.
Parameters
----------
url : str
The URL to load from. Should be an official OpenML endpoint.
error_message : str or None
The error message to raise if an acceptable OpenML error is thrown
(acceptable error is, e.g., data id not found. Other errors, like 404's
will throw the native error message).
data_home : str or None
Location to cache the response. None if no cache is required.
n_retries : int, default=3
Number of retries when HTTP errors are encountered. Error with status
code 412 won't be retried as they represent OpenML generic errors.
delay : float, default=1.0
Number of seconds between retries.
Returns
-------
json_data : json
the json result from the OpenML server if the call was successful.
An exception otherwise.
|
python
|
sklearn/datasets/_openml.py
| 206
|
[
"url",
"error_message",
"data_home",
"n_retries",
"delay"
] |
Dict
| true
| 2
| 7.04
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
endLexicalEnvironment
|
function endLexicalEnvironment(): Statement[] | undefined {
Debug.assert(state > TransformationState.Uninitialized, "Cannot modify the lexical environment during initialization.");
Debug.assert(state < TransformationState.Completed, "Cannot modify the lexical environment after transformation has completed.");
Debug.assert(!lexicalEnvironmentSuspended, "Lexical environment is suspended.");
let statements: Statement[] | undefined;
if (
lexicalEnvironmentVariableDeclarations ||
lexicalEnvironmentFunctionDeclarations ||
lexicalEnvironmentStatements
) {
if (lexicalEnvironmentFunctionDeclarations) {
statements = [...lexicalEnvironmentFunctionDeclarations];
}
if (lexicalEnvironmentVariableDeclarations) {
const statement = factory.createVariableStatement(
/*modifiers*/ undefined,
factory.createVariableDeclarationList(lexicalEnvironmentVariableDeclarations),
);
setEmitFlags(statement, EmitFlags.CustomPrologue);
if (!statements) {
statements = [statement];
}
else {
statements.push(statement);
}
}
if (lexicalEnvironmentStatements) {
if (!statements) {
statements = [...lexicalEnvironmentStatements];
}
else {
statements = [...statements, ...lexicalEnvironmentStatements];
}
}
}
// Restore the previous lexical environment.
lexicalEnvironmentStackOffset--;
lexicalEnvironmentVariableDeclarations = lexicalEnvironmentVariableDeclarationsStack[lexicalEnvironmentStackOffset];
lexicalEnvironmentFunctionDeclarations = lexicalEnvironmentFunctionDeclarationsStack[lexicalEnvironmentStackOffset];
lexicalEnvironmentStatements = lexicalEnvironmentStatementsStack[lexicalEnvironmentStackOffset];
lexicalEnvironmentFlags = lexicalEnvironmentFlagsStack[lexicalEnvironmentStackOffset];
if (lexicalEnvironmentStackOffset === 0) {
lexicalEnvironmentVariableDeclarationsStack = [];
lexicalEnvironmentFunctionDeclarationsStack = [];
lexicalEnvironmentStatementsStack = [];
lexicalEnvironmentFlagsStack = [];
}
return statements;
}
|
Ends a lexical environment. The previous set of hoisted declarations are restored and
any hoisted declarations added in this environment are returned.
|
typescript
|
src/compiler/transformer.ts
| 520
|
[] | true
| 12
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
process
|
void process() {
List<DeferredImportSelectorHolder> deferredImports = this.deferredImportSelectors;
this.deferredImportSelectors = null;
try {
if (deferredImports != null) {
DeferredImportSelectorGroupingHandler handler = new DeferredImportSelectorGroupingHandler();
deferredImports.sort(DEFERRED_IMPORT_COMPARATOR);
deferredImports.forEach(handler::register);
handler.processGroupImports();
}
}
finally {
this.deferredImportSelectors = new ArrayList<>();
}
}
|
Handle the specified {@link DeferredImportSelector}. If deferred import
selectors are being collected, this registers this instance to the list. If
they are being processed, the {@link DeferredImportSelector} is also processed
immediately according to its {@link DeferredImportSelector.Group}.
@param configClass the source configuration class
@param importSelector the selector to handle
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ConfigurationClassParser.java
| 823
|
[] |
void
| true
| 2
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
setValueDelimiter
|
public StrSubstitutor setValueDelimiter(final String valueDelimiter) {
if (StringUtils.isEmpty(valueDelimiter)) {
setValueDelimiterMatcher(null);
return this;
}
return setValueDelimiterMatcher(StrMatcher.stringMatcher(valueDelimiter));
}
|
Sets the variable default value delimiter to use.
<p>
The variable default value delimiter is the character or characters that delimit the
variable name and the variable default value. This method allows a string
variable default value delimiter to be easily set.
</p>
<p>
If the {@code valueDelimiter} is null or empty string, then the variable default
value resolution becomes disabled.
</p>
@param valueDelimiter the variable default value delimiter string to use, may be null or empty.
@return {@code this} instance.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/text/StrSubstitutor.java
| 940
|
[
"valueDelimiter"
] |
StrSubstitutor
| true
| 2
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
filterPostProcessors
|
private static @Nullable List<DestructionAwareBeanPostProcessor> filterPostProcessors(
List<DestructionAwareBeanPostProcessor> processors, Object bean) {
List<DestructionAwareBeanPostProcessor> filteredPostProcessors = null;
if (!CollectionUtils.isEmpty(processors)) {
filteredPostProcessors = new ArrayList<>(processors.size());
for (DestructionAwareBeanPostProcessor processor : processors) {
if (processor.requiresDestruction(bean)) {
filteredPostProcessors.add(processor);
}
}
}
return filteredPostProcessors;
}
|
Search for all DestructionAwareBeanPostProcessors in the List.
@param processors the List to search
@return the filtered List of DestructionAwareBeanPostProcessors
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DisposableBeanAdapter.java
| 482
|
[
"processors",
"bean"
] | true
| 3
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
retainAll
|
@CanIgnoreReturnValue
public static boolean retainAll(Iterator<?> removeFrom, Collection<?> elementsToRetain) {
checkNotNull(elementsToRetain);
boolean result = false;
while (removeFrom.hasNext()) {
if (!elementsToRetain.contains(removeFrom.next())) {
removeFrom.remove();
result = true;
}
}
return result;
}
|
Traverses an iterator and removes every element that does not belong to the provided
collection. The iterator will be left exhausted: its {@code hasNext()} method will return
{@code false}.
@param removeFrom the iterator to (potentially) remove elements from
@param elementsToRetain the elements to retain
@return {@code true} if any element was removed from {@code iterator}
|
java
|
android/guava/src/com/google/common/collect/Iterators.java
| 250
|
[
"removeFrom",
"elementsToRetain"
] | true
| 3
| 7.6
|
google/guava
| 51,352
|
javadoc
| false
|
|
getAsText
|
@Override
public String getAsText() {
URL value = (URL) getValue();
return (value != null ? value.toExternalForm() : "");
}
|
Create a new URLEditor, using the given ResourceEditor underneath.
@param resourceEditor the ResourceEditor to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/URLEditor.java
| 81
|
[] |
String
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
max
|
public ExponentialHistogramBuilder max(double max) {
this.max = max;
return this;
}
|
Sets the max value of the histogram values. If not set, the max will be estimated from the buckets.
@param max the max value
@return the builder
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramBuilder.java
| 127
|
[
"max"
] |
ExponentialHistogramBuilder
| true
| 1
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
delete_bucket
|
def delete_bucket(self, bucket_name: str, force_delete: bool = False, max_retries: int = 5) -> None:
"""
To delete s3 bucket, delete all s3 bucket objects and then delete the bucket.
.. seealso::
- :external+boto3:py:meth:`S3.Client.delete_bucket`
:param bucket_name: Bucket name
:param force_delete: Enable this to delete bucket even if not empty
:param max_retries: A bucket must be empty to be deleted. If force_delete is true,
then retries may help prevent a race condition between deleting objects in the
bucket and trying to delete the bucket.
:return: None
"""
if force_delete:
for retry in range(max_retries):
bucket_keys = self.list_keys(bucket_name=bucket_name)
if not bucket_keys:
break
if retry: # Avoid first loop
time.sleep(500)
self.delete_objects(bucket=bucket_name, keys=bucket_keys)
self.conn.delete_bucket(Bucket=bucket_name)
|
To delete s3 bucket, delete all s3 bucket objects and then delete the bucket.
.. seealso::
- :external+boto3:py:meth:`S3.Client.delete_bucket`
:param bucket_name: Bucket name
:param force_delete: Enable this to delete bucket even if not empty
:param max_retries: A bucket must be empty to be deleted. If force_delete is true,
then retries may help prevent a race condition between deleting objects in the
bucket and trying to delete the bucket.
:return: None
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 1,463
|
[
"self",
"bucket_name",
"force_delete",
"max_retries"
] |
None
| true
| 5
| 7.92
|
apache/airflow
| 43,597
|
sphinx
| false
|
process
|
private void process(final UnsubscribeEvent event) {
if (requestManagers.consumerHeartbeatRequestManager.isPresent()) {
CompletableFuture<Void> future = requestManagers.consumerHeartbeatRequestManager.get().membershipManager().leaveGroup();
future.whenComplete(complete(event.future()));
} else if (requestManagers.streamsGroupHeartbeatRequestManager.isPresent()) {
CompletableFuture<Void> future = requestManagers.streamsGroupHeartbeatRequestManager.get().membershipManager().leaveGroup();
future.whenComplete(complete(event.future()));
} else {
// If the consumer is not using the group management capabilities, we still need to clear all assignments it may have.
subscriptions.unsubscribe();
event.future().complete(null);
}
}
|
Process event indicating that the consumer unsubscribed from all topics. This will make
the consumer release its assignment and send a request to leave the group.
@param event Unsubscribe event containing a future that will complete when the callback
execution for releasing the assignment completes, and the request to leave
the group is sent out.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java
| 415
|
[
"event"
] |
void
| true
| 3
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
entrySet
|
@Override
public ImmutableSet<Entry<K, V>> entrySet() {
ImmutableSet<Entry<K, V>> result = entrySet;
return (result == null) ? entrySet = createEntrySet() : result;
}
|
Returns an immutable set of the mappings in this map. The iteration order is specified by the
method used to create this map. Typically, this is insertion order.
|
java
|
android/guava/src/com/google/common/collect/ImmutableMap.java
| 936
|
[] | true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
assignPartitions
|
private CompletableFuture<Void> assignPartitions(
TopicIdPartitionSet assignedPartitions,
SortedSet<TopicPartition> addedPartitions) {
// Update assignment in the subscription state, and ensure that no fetching or positions
// initialization happens for the newly added partitions while the callback runs.
updateSubscriptionAwaitingCallback(assignedPartitions, addedPartitions);
// Invoke user call back.
CompletableFuture<Void> result = signalPartitionsAssigned(addedPartitions);
// Enable newly added partitions to start fetching and updating positions for them.
result.whenComplete((__, exception) -> {
if (exception == null) {
// Enable assigned partitions to start fetching and updating positions for them.
// We use assignedPartitions here instead of addedPartitions because there's a chance that the callback
// might throw an exception, leaving addedPartitions empty. This would result in the poll operation
// returning no records, as no topic partitions are marked as fetchable. In contrast, with the classic consumer,
// if the first callback fails but the next one succeeds, polling can still retrieve data. To align with
// this behavior, we rely on assignedPartitions to avoid such scenarios.
subscriptions.enablePartitionsAwaitingCallback(assignedPartitions.topicPartitions());
} else {
// Keeping newly added partitions as non-fetchable after the callback failure.
// They will be retried on the next reconciliation loop, until it succeeds or the
// broker removes them from the assignment.
if (!addedPartitions.isEmpty()) {
log.warn("Leaving newly assigned partitions {} marked as non-fetchable and not " +
"requiring initializing positions after onPartitionsAssigned callback failed.",
addedPartitions, exception);
}
}
});
// Clear topic names cache, removing topics that are not assigned to the member anymore.
Set<String> assignedTopics = assignedPartitions.topicNames();
assignedTopicNamesCache.values().retainAll(assignedTopics);
return result;
}
|
Make new assignment effective and trigger onPartitionsAssigned callback for the partitions
added. This will also update the local topic names cache, removing from it all topics that
are not assigned to the member anymore. This also ensures that records are not fetched and
positions are not initialized for the newly added partitions until the callback completes.
@param assignedPartitions Full assignment that will be updated in the member subscription
state. This includes previously owned and newly added partitions.
@param addedPartitions Partitions contained in the new assignment that were not owned by
the member before. These will be provided to the
onPartitionsAssigned callback.
@return Future that will complete when the callback execution completes.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 1,191
|
[
"assignedPartitions",
"addedPartitions"
] | true
| 3
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
beginningOffsets
|
@Override
public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions) {
return delegate.beginningOffsets(partitions);
}
|
Get the first offset for the given partitions.
<p>
This method does not change the current consumer position of the partitions.
@see #seekToBeginning(Collection)
@param partitions the partitions to get the earliest offsets.
@return The earliest available offsets for the given partitions
@throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details
@throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details
@throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before
expiration of the configured {@code default.api.timeout.ms}
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
| 1,632
|
[
"partitions"
] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
or
|
public FluentBitSet or(final FluentBitSet... set) {
for (final FluentBitSet e : set) {
this.bitSet.or(e.bitSet);
}
return this;
}
|
Performs a logical <strong>OR</strong> of this bit set with the bit set arguments. This bit set is modified so that a bit in it
has the value {@code true} if and only if it either already had the value {@code true} or the corresponding bit in
the bit set argument has the value {@code true}.
@param set a bit set.
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/util/FluentBitSet.java
| 364
|
[] |
FluentBitSet
| true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
customDefaultsMerge
|
function customDefaultsMerge(objValue, srcValue, key, object, source, stack) {
if (isObject(objValue) && isObject(srcValue)) {
// Recursively merge objects and arrays (susceptible to call stack limits).
stack.set(srcValue, objValue);
baseMerge(objValue, srcValue, undefined, customDefaultsMerge, stack);
stack['delete'](srcValue);
}
return objValue;
}
|
Used by `_.defaultsDeep` to customize its `_.merge` use to merge source
objects into destination objects that are passed thru.
@private
@param {*} objValue The destination value.
@param {*} srcValue The source value.
@param {string} key The key of the property to merge.
@param {Object} object The parent object of `objValue`.
@param {Object} source The parent object of `srcValue`.
@param {Object} [stack] Tracks traversed source values and their merged
counterparts.
@returns {*} Returns the value to assign.
|
javascript
|
lodash.js
| 5,678
|
[
"objValue",
"srcValue",
"key",
"object",
"source",
"stack"
] | false
| 3
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
incrementAndGet
|
public short incrementAndGet() {
value++;
return value;
}
|
Increments this instance's value by 1; this method returns the value associated with the instance
immediately after the increment operation. This method is not thread safe.
@return the value associated with the instance after it is incremented.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableShort.java
| 291
|
[] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
all
|
public KafkaFuture<Void> all() {
return this.future.thenApply(topicPartitionErrorsMap -> {
List<TopicPartition> partitionsFailed = topicPartitionErrorsMap.entrySet()
.stream()
.filter(e -> e.getValue() != null)
.map(Map.Entry::getKey)
.collect(Collectors.toList());
for (ApiException exception : topicPartitionErrorsMap.values()) {
if (exception != null) {
throw Errors.forException(exception).exception(
"Failed altering group offsets for the following partitions: " + partitionsFailed);
}
}
return null;
});
}
|
Return a future which succeeds if all the alter offsets succeed.
If not, the first topic error shall be returned.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/AlterShareGroupOffsetsResult.java
| 70
|
[] | true
| 2
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
close
|
@Override
public void close() {
close(closeTimeout);
}
|
Returns the delay for which the application thread can safely wait before it should be responsive
to results from the request managers. For example, the subscription state can change when heartbeats
are sent, so blocking for longer than the heartbeat interval might mean the application thread is not
responsive to changes.
Because this method is called by the application thread, it's not allowed to access the request managers
that actually provide the information. As a result, the consumer network thread periodically caches the
information from the request managers and this can then be read safely using this method.
@return The maximum delay in milliseconds
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java
| 344
|
[] |
void
| true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
randomGraph
|
@Deprecated
public static String randomGraph(final int count) {
return secure().nextGraph(count);
}
|
Creates a random string whose length is the number of characters specified.
<p>
Characters will be chosen from the set of characters which match the POSIX [:graph:] regular expression character
class. This class contains all visible ASCII characters (i.e. anything except spaces and control characters).
</p>
@param count the length of random string to create.
@return the random string.
@throws IllegalArgumentException if {@code count} < 0.
@since 3.5
@deprecated Use {@link #nextGraph(int)} from {@link #secure()}, {@link #secureStrong()}, or {@link #insecure()}.
|
java
|
src/main/java/org/apache/commons/lang3/RandomStringUtils.java
| 533
|
[
"count"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
parseInternal
|
@Override
protected final AbstractBeanDefinition parseInternal(Element element, ParserContext parserContext) {
BeanDefinitionBuilder builder = BeanDefinitionBuilder.genericBeanDefinition();
String parentName = getParentName(element);
if (parentName != null) {
builder.getRawBeanDefinition().setParentName(parentName);
}
Class<?> beanClass = getBeanClass(element);
if (beanClass != null) {
builder.getRawBeanDefinition().setBeanClass(beanClass);
}
else {
String beanClassName = getBeanClassName(element);
if (beanClassName != null) {
builder.getRawBeanDefinition().setBeanClassName(beanClassName);
}
}
builder.getRawBeanDefinition().setSource(parserContext.extractSource(element));
BeanDefinition containingBd = parserContext.getContainingBeanDefinition();
if (containingBd != null) {
// Inner bean definition must receive same scope as containing bean.
builder.setScope(containingBd.getScope());
}
if (parserContext.isDefaultLazyInit()) {
// Default-lazy-init applies to custom bean definitions as well.
builder.setLazyInit(true);
}
doParse(element, parserContext, builder);
return builder.getBeanDefinition();
}
|
Creates a {@link BeanDefinitionBuilder} instance for the
{@link #getBeanClass bean Class} and passes it to the
{@link #doParse} strategy method.
@param element the element that is to be parsed into a single BeanDefinition
@param parserContext the object encapsulating the current state of the parsing process
@return the BeanDefinition resulting from the parsing of the supplied {@link Element}
@throws IllegalStateException if the bean {@link Class} returned from
{@link #getBeanClass(org.w3c.dom.Element)} is {@code null}
@see #doParse
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/AbstractSingleBeanDefinitionParser.java
| 61
|
[
"element",
"parserContext"
] |
AbstractBeanDefinition
| true
| 6
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
filterOnDuration
|
public ListTransactionsOptions filterOnDuration(long durationMs) {
this.filteredDuration = durationMs;
return this;
}
|
Filter only the transactions that are running longer than the specified duration.
If no filter is specified or if the passed duration ms is less than 0,
then the all transactions will be returned.
@param durationMs the duration in milliseconds to filter by
@return this object
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java
| 69
|
[
"durationMs"
] |
ListTransactionsOptions
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
getInvokeMethod
|
private static Method getInvokeMethod(final boolean forceAccess, final String methodName, final Class<?>[] parameterTypes, final Class<?> cls) {
final Method method;
if (forceAccess) {
method = getMatchingMethod(cls, methodName, parameterTypes);
AccessibleObjects.setAccessible(method);
} else {
method = getMatchingAccessibleMethod(cls, methodName, parameterTypes);
}
return method;
}
|
Gets the annotation object with the given annotation type that is present on the given method or optionally on any equivalent method in super classes and
interfaces. Returns null if the annotation type was not present.
<p>
Stops searching for an annotation once the first annotation of the specified type has been found. Additional annotations of the specified type will be
silently ignored.
</p>
@param <A> the annotation type.
@param method the {@link Method} to query, may be null.
@param annotationCls the {@link Annotation} to check if is present on the method.
@param searchSupers determines if a lookup in the entire inheritance hierarchy of the given class is performed if the annotation was not directly
present.
@param ignoreAccess determines if underlying method has to be accessible.
@return the first matching annotation, or {@code null} if not found.
@throws NullPointerException if either the method or annotation class is {@code null}.
@throws SecurityException if an underlying accessible object's method denies the request.
@see SecurityManager#checkPermission
@since 3.6
|
java
|
src/main/java/org/apache/commons/lang3/reflect/MethodUtils.java
| 290
|
[
"forceAccess",
"methodName",
"parameterTypes",
"cls"
] |
Method
| true
| 2
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
sleepQuietly
|
public static void sleepQuietly(final Duration duration) {
try {
sleep(duration);
} catch (final InterruptedException ignore) {
// Ignore & be quiet.
}
}
|
Sleeps for the given duration while ignoring {@link InterruptedException}.
<p>
The sleep duration may be shorter than duration if we catch a {@link InterruptedException}.
</p>
@param duration the length of time to sleep.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/ThreadUtils.java
| 516
|
[
"duration"
] |
void
| true
| 2
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
nop
|
@SuppressWarnings("unchecked")
static <T, E extends Throwable> FailableObjDoubleConsumer<T, E> nop() {
return NOP;
}
|
Gets the NOP singleton.
@param <T> the type of the object argument to the operation.
@param <E> The kind of thrown exception or error.
@return The NOP singleton.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableObjDoubleConsumer.java
| 43
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
downsampleIndex
|
private void downsampleIndex(ProjectId projectId, DownsampleAction.Request request, ActionListener<Void> listener) {
String sourceIndex = request.getSourceIndex();
String downsampleIndex = request.getTargetIndex();
logger.info("Data stream lifecycle issuing request to downsample index [{}] to index [{}]", sourceIndex, downsampleIndex);
client.projectClient(projectId).execute(DownsampleAction.INSTANCE, request, new ActionListener<>() {
@Override
public void onResponse(AcknowledgedResponse acknowledgedResponse) {
assert acknowledgedResponse.isAcknowledged() : "the downsample response is always acknowledged";
logger.info("Data stream lifecycle successfully downsampled index [{}] to index [{}]", sourceIndex, downsampleIndex);
listener.onResponse(null);
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
|
This method sends requests to delete any indices in the datastream that exceed its retention policy. It returns the set of indices
it has sent delete requests for.
@param project The project metadata from which to get index metadata
@param dataStream The data stream
@param indicesToExcludeForRemainingRun Indices to exclude from retention even if it would be time for them to be deleted
@return The set of indices that delete requests have been sent for
|
java
|
modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java
| 1,308
|
[
"projectId",
"request",
"listener"
] |
void
| true
| 1
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
findMainMethodWithTimeoutWarning
|
private @Nullable String findMainMethodWithTimeoutWarning(JarFile source) throws IOException {
long startTime = System.currentTimeMillis();
String mainMethod = findMainMethod(source);
long duration = System.currentTimeMillis() - startTime;
if (duration > FIND_WARNING_TIMEOUT) {
for (MainClassTimeoutWarningListener listener : this.mainClassTimeoutListeners) {
listener.handleTimeoutWarning(duration, mainMethod);
}
}
return mainMethod;
}
|
Writes a signature file if necessary for the given {@code writtenLibraries}.
@param writtenLibraries the libraries
@param writer the writer to use to write the signature file if necessary
@throws IOException if a failure occurs when writing the signature file
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Packager.java
| 343
|
[
"source"
] |
String
| true
| 2
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
handleSigInt
|
public boolean handleSigInt() {
if (allowChildToHandleSigInt()) {
return true;
}
return doKill();
}
|
Return if the process was stopped.
@return {@code true} if stopped
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/RunProcess.java
| 117
|
[] | true
| 2
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
isAwaitExpression
|
function isAwaitExpression(): boolean {
if (token() === SyntaxKind.AwaitKeyword) {
if (inAwaitContext()) {
return true;
}
// here we are using similar heuristics as 'isYieldExpression'
return lookAhead(nextTokenIsIdentifierOrKeywordOrLiteralOnSameLine);
}
return false;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 5,713
|
[] | true
| 3
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
copyReaderToWriter
|
@CanIgnoreReturnValue
static long copyReaderToWriter(Reader from, Writer to) throws IOException {
checkNotNull(from);
checkNotNull(to);
char[] buf = new char[DEFAULT_BUF_SIZE];
int nRead;
long total = 0;
while ((nRead = from.read(buf)) != -1) {
to.write(buf, 0, nRead);
total += nRead;
}
return total;
}
|
Copies all characters between the {@link Reader} and {@link Writer} objects. Does not close or
flush the reader or writer.
<p>This is identical to {@link #copy(Readable, Appendable)} but optimized for these specific
types. CharBuffer has poor performance when being written into or read out of so round tripping
all the bytes through the buffer takes a long time. With these specialized types we can just
use a char array.
@param from the object to read from
@param to the object to write to
@return the number of characters copied
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/CharStreams.java
| 137
|
[
"from",
"to"
] | true
| 2
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
format
|
@Override
public <B extends Appendable> B format(final Date date, final B buf) {
final Calendar c = newCalendar();
c.setTime(date);
return applyRules(c, buf);
}
|
Compares two objects for equality.
@param obj the object to compare to.
@return {@code true} if equal.
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDatePrinter.java
| 1,162
|
[
"date",
"buf"
] |
B
| true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
obj2sctype
|
def obj2sctype(rep, default=None):
"""
Return the scalar dtype or NumPy equivalent of Python type of an object.
Parameters
----------
rep : any
The object of which the type is returned.
default : any, optional
If given, this is returned for objects whose types can not be
determined. If not given, None is returned for those objects.
Returns
-------
dtype : dtype or Python type
The data type of `rep`.
See Also
--------
sctype2char, issctype, issubsctype, issubdtype
Examples
--------
>>> from numpy._core.numerictypes import obj2sctype
>>> obj2sctype(np.int32)
<class 'numpy.int32'>
>>> obj2sctype(np.array([1., 2.]))
<class 'numpy.float64'>
>>> obj2sctype(np.array([1.j]))
<class 'numpy.complex128'>
>>> obj2sctype(dict)
<class 'numpy.object_'>
>>> obj2sctype('string')
>>> obj2sctype(1, default=list)
<class 'list'>
"""
# prevent abstract classes being upcast
if isinstance(rep, type) and issubclass(rep, generic):
return rep
# extract dtype from arrays
if isinstance(rep, ndarray):
return rep.dtype.type
# fall back on dtype to convert
try:
res = dtype(rep)
except Exception:
return default
else:
return res.type
|
Return the scalar dtype or NumPy equivalent of Python type of an object.
Parameters
----------
rep : any
The object of which the type is returned.
default : any, optional
If given, this is returned for objects whose types can not be
determined. If not given, None is returned for those objects.
Returns
-------
dtype : dtype or Python type
The data type of `rep`.
See Also
--------
sctype2char, issctype, issubsctype, issubdtype
Examples
--------
>>> from numpy._core.numerictypes import obj2sctype
>>> obj2sctype(np.int32)
<class 'numpy.int32'>
>>> obj2sctype(np.array([1., 2.]))
<class 'numpy.float64'>
>>> obj2sctype(np.array([1.j]))
<class 'numpy.complex128'>
>>> obj2sctype(dict)
<class 'numpy.object_'>
>>> obj2sctype('string')
>>> obj2sctype(1, default=list)
<class 'list'>
|
python
|
numpy/_core/numerictypes.py
| 175
|
[
"rep",
"default"
] | false
| 5
| 7.2
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
swapCase
|
public static String swapCase(final String str) {
if (StringUtils.isEmpty(str)) {
return str;
}
final char[] buffer = str.toCharArray();
boolean whitespace = true;
for (int i = 0; i < buffer.length; i++) {
final char ch = buffer[i];
if (Character.isUpperCase(ch) || Character.isTitleCase(ch)) {
buffer[i] = Character.toLowerCase(ch);
whitespace = false;
} else if (Character.isLowerCase(ch)) {
if (whitespace) {
buffer[i] = Character.toTitleCase(ch);
whitespace = false;
} else {
buffer[i] = Character.toUpperCase(ch);
}
} else {
whitespace = Character.isWhitespace(ch);
}
}
return new String(buffer);
}
|
Swaps the case of a String using a word based algorithm.
<ul>
<li>Upper case character converts to Lower case</li>
<li>Title case character converts to Lower case</li>
<li>Lower case character after Whitespace or at start converts to Title case</li>
<li>Other Lower case character converts to Upper case</li>
</ul>
<p>Whitespace is defined by {@link Character#isWhitespace(char)}.
A {@code null} input String returns {@code null}.</p>
<pre>
StringUtils.swapCase(null) = null
StringUtils.swapCase("") = ""
StringUtils.swapCase("The dog has a BONE") = "tHE DOG HAS A bone"
</pre>
@param str the String to swap case, may be null.
@return A new String, {@code null} if null String input.
|
java
|
src/main/java/org/apache/commons/lang3/text/WordUtils.java
| 318
|
[
"str"
] |
String
| true
| 7
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
isAccessLevelPublic
|
private boolean isAccessLevelPublic(MetadataGenerationEnvironment env, AnnotationMirror lombokAnnotation) {
Map<String, Object> values = env.getAnnotationElementValues(lombokAnnotation);
Object value = values.get("value");
return (value == null || value.toString().equals(LOMBOK_ACCESS_LEVEL_PUBLIC));
}
|
Determine if the current {@link #getField() field} defines a public accessor using
lombok annotations.
@param env the {@link MetadataGenerationEnvironment}
@param getter {@code true} to look for the read accessor, {@code false} for the
write accessor
@return {@code true} if this field has a public accessor of the specified type
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/LombokPropertyDescriptor.java
| 128
|
[
"env",
"lombokAnnotation"
] | true
| 2
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
explicit
|
public static <T> Ordering<T> explicit(List<T> valuesInOrder) {
return new ExplicitOrdering<>(valuesInOrder);
}
|
Returns an ordering that compares objects according to the order in which they appear in the
given list. Only objects present in the list (according to {@link Object#equals}) may be
compared. This comparator imposes a "partial ordering" over the type {@code T}. Subsequent
changes to the {@code valuesInOrder} list will have no effect on the returned comparator. Null
values in the list are not supported.
<p>The returned comparator throws a {@link ClassCastException} when it receives an input
parameter that isn't among the provided values.
<p>The generated comparator is serializable if all the provided values are serializable.
@param valuesInOrder the values that the returned comparator will be able to compare, in the
order the comparator should induce
@return the comparator described above
@throws NullPointerException if any of the provided values is null
@throws IllegalArgumentException if {@code valuesInOrder} contains any duplicate values
(according to {@link Object#equals})
|
java
|
android/guava/src/com/google/common/collect/Ordering.java
| 231
|
[
"valuesInOrder"
] | true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
calculateDeadlineMs
|
static long calculateDeadlineMs(final Time time, final long timeoutMs) {
return calculateDeadlineMs(requireNonNull(time).milliseconds(), timeoutMs);
}
|
Calculate the deadline timestamp based on {@link Timer#currentTimeMs()} and timeout.
@param time Time
@param timeoutMs Timeout, in milliseconds
@return Absolute time by which event should be completed
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEvent.java
| 109
|
[
"time",
"timeoutMs"
] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
loss
|
def loss(
self,
y_true,
raw_prediction,
sample_weight=None,
loss_out=None,
n_threads=1,
):
"""Compute the pointwise loss value for each input.
Parameters
----------
y_true : C-contiguous array of shape (n_samples,)
Observed, true target values.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space).
sample_weight : None or C-contiguous array of shape (n_samples,)
Sample weights.
loss_out : None or C-contiguous array of shape (n_samples,)
A location into which the result is stored. If None, a new array
might be created.
n_threads : int, default=1
Might use openmp thread parallelism.
Returns
-------
loss : array of shape (n_samples,)
Element-wise loss function.
"""
if loss_out is None:
loss_out = np.empty_like(y_true)
# Be graceful to shape (n_samples, 1) -> (n_samples,)
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
raw_prediction = raw_prediction.squeeze(1)
self.closs.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=loss_out,
n_threads=n_threads,
)
return loss_out
|
Compute the pointwise loss value for each input.
Parameters
----------
y_true : C-contiguous array of shape (n_samples,)
Observed, true target values.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space).
sample_weight : None or C-contiguous array of shape (n_samples,)
Sample weights.
loss_out : None or C-contiguous array of shape (n_samples,)
A location into which the result is stored. If None, a new array
might be created.
n_threads : int, default=1
Might use openmp thread parallelism.
Returns
-------
loss : array of shape (n_samples,)
Element-wise loss function.
|
python
|
sklearn/_loss/loss.py
| 160
|
[
"self",
"y_true",
"raw_prediction",
"sample_weight",
"loss_out",
"n_threads"
] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
awaitPendingAsyncCommitsAndExecuteCommitCallbacks
|
private void awaitPendingAsyncCommitsAndExecuteCommitCallbacks(Timer timer, boolean enableWakeup) {
if (lastPendingAsyncCommit == null || offsetCommitCallbackInvoker == null) {
return;
}
try {
final CompletableFuture<Void> futureToAwait = new CompletableFuture<>();
// We don't want the wake-up trigger to complete our pending async commit future,
// so create new future here. Any errors in the pending async commit will be handled
// by the async commit future / the commit callback - here, we just want to wait for it to complete.
lastPendingAsyncCommit.whenComplete((v, t) -> futureToAwait.complete(null));
if (enableWakeup) {
wakeupTrigger.setActiveTask(futureToAwait);
}
ConsumerUtils.getResult(futureToAwait, timer);
lastPendingAsyncCommit = null;
} finally {
if (enableWakeup) {
wakeupTrigger.clearTask();
}
timer.update();
}
offsetCommitCallbackInvoker.executeCallbacks();
}
|
This method sends a commit event to the EventHandler and waits for
the event to finish.
@param timeout max wait time for the blocking operation.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
| 1,726
|
[
"timer",
"enableWakeup"
] |
void
| true
| 5
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
hasExpiredRequest
|
private Boolean hasExpiredRequest(long now, Deque<NetworkClient.InFlightRequest> deque) {
for (NetworkClient.InFlightRequest request : deque) {
// We exclude throttle time here because we want to ensure that we don't expire requests while
// they are throttled. The request timeout should take effect only after the throttle time has elapsed.
if (request.timeElapsedSinceSendMs(now) - request.throttleTimeMs() > request.requestTimeoutMs)
return true;
}
return false;
}
|
Clear out all the in-flight requests for the given node and return them
@param node The node
@return All the in-flight requests for that node that have been removed
|
java
|
clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java
| 155
|
[
"now",
"deque"
] |
Boolean
| true
| 2
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
removeAllOccurrences
|
public static float[] removeAllOccurrences(final float[] array, final float element) {
return (float[]) removeAt(array, indexesOf(array, element));
}
|
Removes the occurrences of the specified element from the specified float array.
<p>
All subsequent elements are shifted to the left (subtracts one from their indices).
If the array doesn't contain such an element, no elements are removed from the array.
{@code null} will be returned if the input array is {@code null}.
</p>
@param array the input array, will not be modified, and may be {@code null}.
@param element the element to remove.
@return A new array containing the existing elements except the occurrences of the specified element.
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 5,504
|
[
"array",
"element"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
kurt
|
def kurt(
self,
skipna: bool = True,
numeric_only: bool = False,
**kwargs,
) -> Series:
"""
Return unbiased kurtosis within groups.
Parameters
----------
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns. Not implemented for Series.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
Series
Unbiased kurtosis within groups.
See Also
--------
Series.kurt : Return unbiased kurtosis over requested axis.
Examples
--------
>>> ser = pd.Series(
... [390.0, 350.0, 357.0, 333.0, np.nan, 22.0, 20.0, 30.0, 40.0, 41.0],
... index=[
... "Falcon",
... "Falcon",
... "Falcon",
... "Falcon",
... "Falcon",
... "Parrot",
... "Parrot",
... "Parrot",
... "Parrot",
... "Parrot",
... ],
... name="Max Speed",
... )
>>> ser
Falcon 390.0
Falcon 350.0
Falcon 357.0
Falcon 333.0
Falcon NaN
Parrot 22.0
Parrot 20.0
Parrot 30.0
Parrot 40.0
Parrot 41.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).kurt()
Falcon 1.622109
Parrot -2.878714
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).kurt(skipna=False)
Falcon NaN
Parrot -2.878714
Name: Max Speed, dtype: float64
"""
def alt(obj):
# This should not be reached since the cython path should raise
# TypeError and not NotImplementedError.
raise TypeError(f"'kurt' is not supported for dtype={obj.dtype}")
return self._cython_agg_general(
"kurt", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs
)
|
Return unbiased kurtosis within groups.
Parameters
----------
skipna : bool, default True
Exclude NA/null values when computing the result.
numeric_only : bool, default False
Include only float, int, boolean columns. Not implemented for Series.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
Series
Unbiased kurtosis within groups.
See Also
--------
Series.kurt : Return unbiased kurtosis over requested axis.
Examples
--------
>>> ser = pd.Series(
... [390.0, 350.0, 357.0, 333.0, np.nan, 22.0, 20.0, 30.0, 40.0, 41.0],
... index=[
... "Falcon",
... "Falcon",
... "Falcon",
... "Falcon",
... "Falcon",
... "Parrot",
... "Parrot",
... "Parrot",
... "Parrot",
... "Parrot",
... ],
... name="Max Speed",
... )
>>> ser
Falcon 390.0
Falcon 350.0
Falcon 357.0
Falcon 333.0
Falcon NaN
Parrot 22.0
Parrot 20.0
Parrot 30.0
Parrot 40.0
Parrot 41.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).kurt()
Falcon 1.622109
Parrot -2.878714
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).kurt(skipna=False)
Falcon NaN
Parrot -2.878714
Name: Max Speed, dtype: float64
|
python
|
pandas/core/groupby/generic.py
| 1,438
|
[
"self",
"skipna",
"numeric_only"
] |
Series
| true
| 1
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
startsWithArgumentClassName
|
private boolean startsWithArgumentClassName(String message, @Nullable Object argument) {
if (argument == null) {
return false;
}
Class<?> argumentType = argument.getClass();
// On Java 8, the message starts with the class name: "java.lang.String cannot
// be cast..."
if (message.startsWith(argumentType.getName())) {
return true;
}
// On Java 11, the message starts with "class ..." a.k.a. Class.toString()
if (message.startsWith(argumentType.toString())) {
return true;
}
// On Java 9, the message used to contain the module name:
// "java.base/java.lang.String cannot be cast..."
int moduleSeparatorIndex = message.indexOf('/');
if (moduleSeparatorIndex != -1 && message.startsWith(argumentType.getName(), moduleSeparatorIndex + 1)) {
return true;
}
if (CLASS_GET_MODULE != null && MODULE_GET_NAME != null) {
Object module = ReflectionUtils.invokeMethod(CLASS_GET_MODULE, argumentType);
Object moduleName = ReflectionUtils.invokeMethod(MODULE_GET_NAME, module);
return message.startsWith(moduleName + "/" + argumentType.getName());
}
return false;
}
|
Use a specific filter to determine when a callback should apply. If no explicit
filter is set filter will be attempted using the generic type on the callback
type.
@param filter the filter to use
@return this instance
@since 3.4.8
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/util/LambdaSafe.java
| 189
|
[
"message",
"argument"
] | true
| 8
| 8.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
createTopics
|
default CreateTopicsResult createTopics(Collection<NewTopic> newTopics) {
return createTopics(newTopics, new CreateTopicsOptions());
}
|
Create a batch of new topics with the default options.
<p>
This is a convenience method for {@link #createTopics(Collection, CreateTopicsOptions)} with default options.
See the overload for more details.
<p>
This operation is supported by brokers with version 0.10.1.0 or higher.
@param newTopics The new topics to create.
@return The CreateTopicsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 180
|
[
"newTopics"
] |
CreateTopicsResult
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
onResolve
|
function onResolve(fillers: Fillers, args: esbuild.OnResolveArgs, namespace: string): esbuild.OnResolveResult {
// removes trailing slashes in imports paths
const path = args.path.replace(/\/$/, '')
const item = fillers[path]
// if a path is provided, we just replace it
if (item.imports !== undefined) {
return { path: item.imports }
}
// if not, we defer action to the loaders cb
return {
namespace,
path: path,
pluginData: args.importer,
}
}
|
Handles the resolution step where esbuild resolves the imports before
bundling them. This allows us to inject a filler via its `path` if it was
provided. If not, we proceed to the next `onLoad` step.
@param fillers to use the path from
@param args from esbuild
@returns
|
typescript
|
helpers/compile/plugins/fill-plugin/fillPlugin.ts
| 100
|
[
"fillers",
"args",
"namespace"
] | true
| 2
| 8.24
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
pandas_dtype
|
def pandas_dtype(dtype) -> DtypeObj:
"""
Convert input into a pandas only dtype object or a numpy dtype object.
Parameters
----------
dtype : object
The object to be converted into a dtype.
Returns
-------
np.dtype or a pandas dtype
The converted dtype, which can be either a numpy dtype or a pandas dtype.
Raises
------
TypeError if not a dtype
See Also
--------
api.types.is_dtype : Return true if the condition is satisfied for the arr_or_dtype.
Examples
--------
>>> pd.api.types.pandas_dtype(int)
dtype('int64')
"""
# short-circuit
if isinstance(dtype, np.ndarray):
return dtype.dtype
elif isinstance(dtype, (np.dtype, ExtensionDtype)):
return dtype
# builtin aliases
if dtype is str and using_string_dtype():
from pandas.core.arrays.string_ import StringDtype
return StringDtype(na_value=np.nan)
# registered extension types
result = registry.find(dtype)
if result is not None:
if isinstance(result, type):
# GH 31356, GH 54592
warnings.warn(
f"Instantiating {result.__name__} without any arguments."
f"Pass a {result.__name__} instance to silence this warning.",
UserWarning,
stacklevel=find_stack_level(),
)
result = result()
return result
# try a numpy dtype
# raise a consistent TypeError if failed
try:
with warnings.catch_warnings():
# TODO: warnings.catch_warnings can be removed when numpy>2.3.0
# is the minimum version
# GH#51523 - Series.astype(np.integer) doesn't show
# numpy deprecation warning of np.integer
# Hence enabling DeprecationWarning
warnings.simplefilter("always", DeprecationWarning)
npdtype = np.dtype(dtype)
except SyntaxError as err:
# np.dtype uses `eval` which can raise SyntaxError
raise TypeError(f"data type '{dtype}' not understood") from err
# Any invalid dtype (such as pd.Timestamp) should raise an error.
# np.dtype(invalid_type).kind = 0 for such objects. However, this will
# also catch some valid dtypes such as object, np.object_ and 'object'
# which we safeguard against by catching them earlier and returning
# np.dtype(valid_dtype) before this condition is evaluated.
if is_hashable(dtype) and dtype in [
object,
np.object_,
"object",
"O",
"object_",
]:
# check hashability to avoid errors/DeprecationWarning when we get
# here and `dtype` is an array
return npdtype
elif npdtype.kind == "O":
raise TypeError(f"dtype '{dtype}' not understood")
return npdtype
|
Convert input into a pandas only dtype object or a numpy dtype object.
Parameters
----------
dtype : object
The object to be converted into a dtype.
Returns
-------
np.dtype or a pandas dtype
The converted dtype, which can be either a numpy dtype or a pandas dtype.
Raises
------
TypeError if not a dtype
See Also
--------
api.types.is_dtype : Return true if the condition is satisfied for the arr_or_dtype.
Examples
--------
>>> pd.api.types.pandas_dtype(int)
dtype('int64')
|
python
|
pandas/core/dtypes/common.py
| 1,822
|
[
"dtype"
] |
DtypeObj
| true
| 10
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getExceptionHandler
|
private @Nullable Method getExceptionHandler(Throwable exception) {
Class<?> exceptionClass = exception.getClass();
if (logger.isTraceEnabled()) {
logger.trace("Trying to find handler for exception of type [" + exceptionClass.getName() + "]");
}
Method handler = this.exceptionHandlerMap.get(exceptionClass);
while (handler == null && exceptionClass != Throwable.class) {
exceptionClass = exceptionClass.getSuperclass();
handler = this.exceptionHandlerMap.get(exceptionClass);
}
if (handler != null && logger.isTraceEnabled()) {
logger.trace("Found handler for exception of type [" + exceptionClass.getName() + "]: " + handler);
}
return handler;
}
|
Determine the exception handle method for the given exception.
@param exception the exception thrown
@return a handler for the given exception type, or {@code null} if none found
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/adapter/ThrowsAdviceInterceptor.java
| 152
|
[
"exception"
] |
Method
| true
| 6
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
wildcardType
|
public static WildcardTypeBuilder wildcardType() {
return new WildcardTypeBuilder();
}
|
Creates a new {@link WildcardTypeBuilder}.
@return a new {@link WildcardTypeBuilder}.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 1,701
|
[] |
WildcardTypeBuilder
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
bindForInOrForOfStatement
|
function bindForInOrForOfStatement(node: ForInOrOfStatement): void {
const preLoopLabel = setContinueTarget(node, createLoopLabel());
const postLoopLabel = createBranchLabel();
bind(node.expression);
addAntecedent(preLoopLabel, currentFlow);
currentFlow = preLoopLabel;
if (node.kind === SyntaxKind.ForOfStatement) {
bind(node.awaitModifier);
}
addAntecedent(postLoopLabel, currentFlow);
bind(node.initializer);
if (node.initializer.kind !== SyntaxKind.VariableDeclarationList) {
bindAssignmentTargetFlow(node.initializer);
}
bindIterativeStatement(node.statement, postLoopLabel, preLoopLabel);
addAntecedent(preLoopLabel, currentFlow);
currentFlow = finishFlowLabel(postLoopLabel);
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 1,558
|
[
"node"
] | true
| 3
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
withPassword
|
public JksSslStoreDetails withPassword(String password) {
return new JksSslStoreDetails(this.type, this.provider, this.location, password);
}
|
Return a new {@link JksSslStoreDetails} instance with a new password.
@param password the new password
@return a new {@link JksSslStoreDetails} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/jks/JksSslStoreDetails.java
| 46
|
[
"password"
] |
JksSslStoreDetails
| true
| 1
| 6.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
toPrimitive
|
public static boolean[] toPrimitive(final Boolean[] array, final boolean valueForNull) {
if (array == null) {
return null;
}
if (array.length == 0) {
return EMPTY_BOOLEAN_ARRAY;
}
final boolean[] result = new boolean[array.length];
for (int i = 0; i < array.length; i++) {
final Boolean b = array[i];
result[i] = b == null ? valueForNull : b.booleanValue();
}
return result;
}
|
Converts an array of object Booleans to primitives handling {@code null}.
<p>
This method returns {@code null} for a {@code null} input array.
</p>
@param array a {@link Boolean} array, may be {@code null}.
@param valueForNull the value to insert if {@code null} found.
@return a {@code boolean} array, {@code null} if null array input.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 8,834
|
[
"array",
"valueForNull"
] | true
| 5
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
return "SharePartitionOffsetInfo{" +
"startOffset=" + startOffset +
", leaderEpoch=" + leaderEpoch.orElse(null) +
", lag=" + lag.orElse(null) +
'}';
}
|
Get the lag for the partition.
@return The lag of the partition.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/SharePartitionOffsetInfo.java
| 90
|
[] |
String
| true
| 1
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
setProperty
|
@Override
public void setProperty(String property, Object newValue) {
if (newValue instanceof BeanDefinition beanDefinition) {
registerBeanDefinition(property, beanDefinition);
}
else {
this.metaClass.setProperty(this, property, newValue);
}
}
|
Load bean definitions from the given Groovy scripts or XML files.
<p>Note that ".xml" files will be parsed as XML content; all other kinds
of resources will be parsed as Groovy scripts.
@param relativeClass class whose package will be used as a prefix when
loading each specified resource name
@param resourceNames relatively-qualified names of resources to load
|
java
|
spring-context/src/main/java/org/springframework/context/support/GenericGroovyApplicationContext.java
| 243
|
[
"property",
"newValue"
] |
void
| true
| 2
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
to_typst
|
def to_typst(
self,
buf: FilePath | WriteBuffer[str] | None = None,
*,
encoding: str | None = None,
sparse_index: bool | None = None,
sparse_columns: bool | None = None,
max_rows: int | None = None,
max_columns: int | None = None,
) -> str | None:
"""
Write Styler to a file, buffer or string in Typst format.
.. versionadded:: 3.0.0
Parameters
----------
%(buf)s
%(encoding)s
sparse_index : bool, optional
Whether to sparsify the display of a hierarchical index. Setting to False
will display each explicit level element in a hierarchical key for each row.
Defaults to ``pandas.options.styler.sparse.index`` value.
sparse_columns : bool, optional
Whether to sparsify the display of a hierarchical index. Setting to False
will display each explicit level element in a hierarchical key for each
column. Defaults to ``pandas.options.styler.sparse.columns`` value.
max_rows : int, optional
The maximum number of rows that will be rendered. Defaults to
``pandas.options.styler.render.max_rows``, which is None.
max_columns : int, optional
The maximum number of columns that will be rendered. Defaults to
``pandas.options.styler.render.max_columns``, which is None.
Rows and columns may be reduced if the number of total elements is
large. This value is set to ``pandas.options.styler.render.max_elements``,
which is 262144 (18 bit browser rendering).
Returns
-------
str or None
If `buf` is None, returns the result as a string. Otherwise returns `None`.
See Also
--------
DataFrame.to_typst : Write a DataFrame to a file,
buffer or string in Typst format.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
>>> df.style.to_typst() # doctest: +SKIP
.. code-block:: typst
#table(
columns: 3,
[], [A], [B],
[0], [1], [3],
[1], [2], [4],
)
"""
obj = self._copy(deepcopy=True)
if sparse_index is None:
sparse_index = get_option("styler.sparse.index")
if sparse_columns is None:
sparse_columns = get_option("styler.sparse.columns")
text = obj._render_typst(
sparse_columns=sparse_columns,
sparse_index=sparse_index,
max_rows=max_rows,
max_cols=max_columns,
)
return save_to_buffer(
text, buf=buf, encoding=(encoding if buf is not None else None)
)
|
Write Styler to a file, buffer or string in Typst format.
.. versionadded:: 3.0.0
Parameters
----------
%(buf)s
%(encoding)s
sparse_index : bool, optional
Whether to sparsify the display of a hierarchical index. Setting to False
will display each explicit level element in a hierarchical key for each row.
Defaults to ``pandas.options.styler.sparse.index`` value.
sparse_columns : bool, optional
Whether to sparsify the display of a hierarchical index. Setting to False
will display each explicit level element in a hierarchical key for each
column. Defaults to ``pandas.options.styler.sparse.columns`` value.
max_rows : int, optional
The maximum number of rows that will be rendered. Defaults to
``pandas.options.styler.render.max_rows``, which is None.
max_columns : int, optional
The maximum number of columns that will be rendered. Defaults to
``pandas.options.styler.render.max_columns``, which is None.
Rows and columns may be reduced if the number of total elements is
large. This value is set to ``pandas.options.styler.render.max_elements``,
which is 262144 (18 bit browser rendering).
Returns
-------
str or None
If `buf` is None, returns the result as a string. Otherwise returns `None`.
See Also
--------
DataFrame.to_typst : Write a DataFrame to a file,
buffer or string in Typst format.
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
>>> df.style.to_typst() # doctest: +SKIP
.. code-block:: typst
#table(
columns: 3,
[], [A], [B],
[0], [1], [3],
[1], [2], [4],
)
|
python
|
pandas/io/formats/style.py
| 1,239
|
[
"self",
"buf",
"encoding",
"sparse_index",
"sparse_columns",
"max_rows",
"max_columns"
] |
str | None
| true
| 4
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
cellSet
|
@Override
public ImmutableSet<Cell<R, C, V>> cellSet() {
return (ImmutableSet<Cell<R, C, V>>) super.cellSet();
}
|
A builder for creating immutable table instances, especially {@code public static final} tables
("constant tables"). Example:
{@snippet :
static final ImmutableTable<Integer, Character, String> SPREADSHEET =
new ImmutableTable.Builder<Integer, Character, String>()
.put(1, 'A', "foo")
.put(1, 'B', "bar")
.put(2, 'A', "baz")
.buildOrThrow();
}
<p>By default, the order in which cells are added to the builder determines the iteration
ordering of all views in the returned table, with {@link #putAll} following the {@link
Table#cellSet()} iteration order. However, if {@link #orderRowsBy} or {@link #orderColumnsBy}
is called, the views are sorted by the supplied comparators.
<p>For empty or single-cell immutable tables, {@link #of()} and {@link #of(Object, Object,
Object)} are even more convenient.
<p>Builder instances can be reused - it is safe to call {@link #buildOrThrow} multiple times to
build multiple tables in series. Each table is a superset of the tables created before it.
@since 11.0
|
java
|
android/guava/src/com/google/common/collect/ImmutableTable.java
| 301
|
[] | true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
getAndAdd
|
@CanIgnoreReturnValue
public final double getAndAdd(int i, double delta) {
while (true) {
long current = longs.get(i);
double currentVal = longBitsToDouble(current);
double nextVal = currentVal + delta;
long next = doubleToRawLongBits(nextVal);
if (longs.compareAndSet(i, current, next)) {
return currentVal;
}
}
}
|
Atomically adds the given value to the element at index {@code i}.
@param i the index
@param delta the value to add
@return the previous value
|
java
|
android/guava/src/com/google/common/util/concurrent/AtomicDoubleArray.java
| 177
|
[
"i",
"delta"
] | true
| 3
| 8.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
add
|
@CanIgnoreReturnValue
@Override
public Builder<E> add(E element) {
checkNotNull(element);
ensureRoomFor(1);
contents[size++] = element;
return this;
}
|
Adds {@code element} to the {@code ImmutableList}.
@param element the element to add
@return this {@code Builder} object
@throws NullPointerException if {@code element} is null
|
java
|
guava/src/com/google/common/collect/ImmutableList.java
| 842
|
[
"element"
] | true
| 1
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
load
|
private Map<ConfigDataResolutionResult, ConfigData> load(ConfigDataLoaderContext loaderContext,
List<ConfigDataResolutionResult> candidates) throws IOException {
Map<ConfigDataResolutionResult, ConfigData> result = new LinkedHashMap<>();
for (int i = candidates.size() - 1; i >= 0; i--) {
ConfigDataResolutionResult candidate = candidates.get(i);
ConfigDataLocation location = candidate.getLocation();
ConfigDataResource resource = candidate.getResource();
this.logger.trace(LogMessage.format("Considering resource %s from location %s", resource, location));
if (resource.isOptional()) {
this.optionalLocations.add(location);
}
if (this.loaded.contains(resource)) {
this.logger
.trace(LogMessage.format("Already loaded resource %s ignoring location %s", resource, location));
this.loadedLocations.add(location);
}
else {
try {
ConfigData loaded = this.loaders.load(loaderContext, resource);
if (loaded != null) {
this.logger.trace(LogMessage.format("Loaded resource %s from location %s", resource, location));
this.loaded.add(resource);
this.loadedLocations.add(location);
result.put(candidate, loaded);
}
}
catch (ConfigDataNotFoundException ex) {
handle(ex, location, resource);
}
}
}
return Collections.unmodifiableMap(result);
}
|
Resolve and load the given list of locations, filtering any that have been
previously loaded.
@param activationContext the activation context
@param locationResolverContext the location resolver context
@param loaderContext the loader context
@param locations the locations to resolve
@return a map of the loaded locations and data
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataImporter.java
| 115
|
[
"loaderContext",
"candidates"
] | true
| 6
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
transitionToJoining
|
private void transitionToJoining() {
if (state == MemberState.FATAL) {
log.warn("No action taken to join the group with the updated subscription because " +
"the member is in FATAL state");
return;
}
if (reconciliationInProgress) {
rejoinedWhileReconciliationInProgress = true;
}
resetEpoch();
transitionTo(MemberState.JOINING);
clearCurrentTaskAssignment();
}
|
Transition to the {@link MemberState#JOINING} state, indicating that the member will
try to join the group on the next heartbeat request. This is expected to be invoked when
the user calls the subscribe API, or when the member wants to rejoin after getting fenced.
Visible for testing.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
| 405
|
[] |
void
| true
| 3
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
milliseconds
|
static Coercer milliseconds() {
return new Coercer((value) -> Long.parseLong(value) * 1000, NumberFormatException.class::isInstance);
}
|
Attempt to convert the specified value to epoch time.
@param value the value to coerce to
@return the epoch time in milliseconds or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/info/GitProperties.java
| 153
|
[] |
Coercer
| true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
apply
|
<T> Source<T> apply(Source<T> source);
|
Apply the operation to the given source.
@param <T> the source type
@param source the source to operate on
@return the updated source
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/PropertyMapper.java
| 144
|
[
"source"
] | true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
corrwith
|
def corrwith(
self,
other: DataFrame | Series,
drop: bool = False,
method: CorrelationMethod = "pearson",
numeric_only: bool = False,
) -> DataFrame:
"""
Compute pairwise correlation.
.. deprecated:: 3.0.0
Pairwise correlation is computed between rows or columns of
DataFrame with rows or columns of Series or DataFrame. DataFrames
are first aligned along both axes before computing the
correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionchanged:: 2.0.0
The default value of ``numeric_only`` is now ``False``.
Returns
-------
Series
Pairwise correlations.
See Also
--------
DataFrame.corr : Compute pairwise correlation of columns.
Examples
--------
>>> df1 = pd.DataFrame(
... {
... "Day": [1, 1, 1, 2, 2, 2, 3, 3, 3],
... "Data": [6, 6, 8, 5, 4, 2, 7, 3, 9],
... }
... )
>>> df2 = pd.DataFrame(
... {
... "Day": [1, 1, 1, 2, 2, 2, 3, 3, 3],
... "Data": [5, 3, 8, 3, 1, 1, 2, 3, 6],
... }
... )
>>> df1.groupby("Day").corrwith(df2)
Data Day
Day
1 0.917663 NaN
2 0.755929 NaN
3 0.576557 NaN
"""
warnings.warn(
"DataFrameGroupBy.corrwith is deprecated",
Pandas4Warning,
stacklevel=find_stack_level(),
)
result = self._op_via_apply(
"corrwith",
other=other,
drop=drop,
method=method,
numeric_only=numeric_only,
)
return result
|
Compute pairwise correlation.
.. deprecated:: 3.0.0
Pairwise correlation is computed between rows or columns of
DataFrame with rows or columns of Series or DataFrame. DataFrames
are first aligned along both axes before computing the
correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
.. versionchanged:: 2.0.0
The default value of ``numeric_only`` is now ``False``.
Returns
-------
Series
Pairwise correlations.
See Also
--------
DataFrame.corr : Compute pairwise correlation of columns.
Examples
--------
>>> df1 = pd.DataFrame(
... {
... "Day": [1, 1, 1, 2, 2, 2, 3, 3, 3],
... "Data": [6, 6, 8, 5, 4, 2, 7, 3, 9],
... }
... )
>>> df2 = pd.DataFrame(
... {
... "Day": [1, 1, 1, 2, 2, 2, 3, 3, 3],
... "Data": [5, 3, 8, 3, 1, 1, 2, 3, 6],
... }
... )
>>> df1.groupby("Day").corrwith(df2)
Data Day
Day
1 0.917663 NaN
2 0.755929 NaN
3 0.576557 NaN
|
python
|
pandas/core/groupby/generic.py
| 3,688
|
[
"self",
"other",
"drop",
"method",
"numeric_only"
] |
DataFrame
| true
| 1
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
get_head_object_async
|
async def get_head_object_async(
self, client: AioBaseClient, key: str, bucket_name: str | None = None
) -> dict[str, Any] | None:
"""
Retrieve metadata of an object.
:param client: aiobotocore client
:param bucket_name: Name of the bucket in which the file is stored
:param key: S3 key that will point to the file
"""
head_object_val: dict[str, Any] | None = None
try:
params = {
"Bucket": bucket_name,
"Key": key,
}
if self._requester_pays:
params["RequestPayer"] = "requester"
head_object_val = await client.head_object(**params)
return head_object_val
except ClientError as e:
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
return head_object_val
raise e
|
Retrieve metadata of an object.
:param client: aiobotocore client
:param bucket_name: Name of the bucket in which the file is stored
:param key: S3 key that will point to the file
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 432
|
[
"self",
"client",
"key",
"bucket_name"
] |
dict[str, Any] | None
| true
| 3
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
writeObject
|
private void writeObject(final ObjectOutputStream objectOutputStream) throws IOException {
final ArrayList<L> serializableListeners = new ArrayList<>();
// Don't just rely on instanceof Serializable:
ObjectOutputStream testObjectOutputStream = new ObjectOutputStream(new ByteArrayOutputStream());
for (final L listener : listeners) {
try {
testObjectOutputStream.writeObject(listener);
serializableListeners.add(listener);
} catch (final IOException exception) {
//recreate test stream in case of indeterminate state
testObjectOutputStream = new ObjectOutputStream(new ByteArrayOutputStream());
}
}
// We can reconstitute everything we need from an array of our listeners,
// which has the additional advantage of typically requiring less storage than a list:
objectOutputStream.writeObject(serializableListeners.toArray(prototypeArray));
}
|
Serializes this instance onto the given ObjectOutputStream.
@param objectOutputStream the output stream
@throws IOException if an IO error occurs
|
java
|
src/main/java/org/apache/commons/lang3/event/EventListenerSupport.java
| 343
|
[
"objectOutputStream"
] |
void
| true
| 2
| 6.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
createIndex
|
private IndexSearcher createIndex(final Directory directory, final boolean withDocValuesSkipper, final int commitEvery)
throws IOException {
final IndexWriterConfig config = new IndexWriterConfig(new StandardAnalyzer());
// NOTE: index sort config matching LogsDB's sort order
config.setIndexSort(
new Sort(
new SortField(HOSTNAME_FIELD, SortField.Type.STRING, false),
new SortedNumericSortField(TIMESTAMP_FIELD, SortField.Type.LONG, true)
)
);
final Random random = new Random(seed);
try (IndexWriter indexWriter = new IndexWriter(directory, config)) {
int docCountSinceLastCommit = 0;
for (int i = 0; i < nDocs; i++) {
final Document doc = new Document();
addFieldsToDocument(doc, i, withDocValuesSkipper, random);
indexWriter.addDocument(doc);
docCountSinceLastCommit++;
// Force commit periodically to create multiple Lucene segments
if (docCountSinceLastCommit >= commitEvery) {
indexWriter.commit();
docCountSinceLastCommit = 0;
}
}
indexWriter.commit();
// Open a reader and create a searcher on top of it using a single thread executor.
DirectoryReader reader = DirectoryReader.open(indexWriter);
return new IndexSearcher(reader, executorService);
}
}
|
Creates an {@link IndexSearcher} after indexing documents in batches.
Each batch commit forces multiple segments to be created.
@param directory the Lucene {@link Directory} for writing the index
@param withDocValuesSkipper true if we should enable doc values skipper on certain fields
@param commitEvery number of documents after which to commit (and thus segment)
@return an {@link IndexSearcher} for querying the newly built index
@throws IOException if an I/O error occurs during index writing
|
java
|
benchmarks/src/main/java/org/elasticsearch/benchmark/search/query/range/DateFieldMapperDocValuesSkipperBenchmark.java
| 196
|
[
"directory",
"withDocValuesSkipper",
"commitEvery"
] |
IndexSearcher
| true
| 3
| 7.76
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
lagmul
|
def lagmul(c1, c2):
"""
Multiply one Laguerre series by another.
Returns the product of two Laguerre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their product.
See Also
--------
lagadd, lagsub, lagmulx, lagdiv, lagpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Laguerre polynomial basis set. Thus, to express
the product as a Laguerre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagmul
>>> lagmul([1, 2, 3], [0, 1, 2])
array([ 8., -13., 38., -51., 36.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0] * xs
c1 = 0
elif len(c) == 2:
c0 = c[0] * xs
c1 = c[1] * xs
else:
nd = len(c)
c0 = c[-2] * xs
c1 = c[-1] * xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = lagsub(c[-i] * xs, (c1 * (nd - 1)) / nd)
c1 = lagadd(tmp, lagsub((2 * nd - 1) * c1, lagmulx(c1)) / nd)
return lagadd(c0, lagsub(c1, lagmulx(c1)))
|
Multiply one Laguerre series by another.
Returns the product of two Laguerre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their product.
See Also
--------
lagadd, lagsub, lagmulx, lagdiv, lagpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Laguerre polynomial basis set. Thus, to express
the product as a Laguerre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagmul
>>> lagmul([1, 2, 3], [0, 1, 2])
array([ 8., -13., 38., -51., 36.])
|
python
|
numpy/polynomial/laguerre.py
| 441
|
[
"c1",
"c2"
] | false
| 7
| 7.36
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
parseObject
|
Object parseObject(String source, ParsePosition pos);
|
Parses a date/time string according to the given parse position.
@param source A {@link String} whose beginning should be parsed.
@param pos the parse position.
@return a {@link java.util.Date} object.
@see java.text.DateFormat#parseObject(String, ParsePosition)
|
java
|
src/main/java/org/apache/commons/lang3/time/DateParser.java
| 123
|
[
"source",
"pos"
] |
Object
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
apply_str
|
def apply_str(self) -> DataFrame | Series:
"""
Compute apply in case of a string.
Returns
-------
result: Series or DataFrame
"""
# Caller is responsible for checking isinstance(self.f, str)
func = cast(str, self.func)
obj = self.obj
from pandas.core.groupby.generic import (
DataFrameGroupBy,
SeriesGroupBy,
)
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
method = getattr(obj, func, None)
if callable(method):
sig = inspect.getfullargspec(method)
arg_names = (*sig.args, *sig.kwonlyargs)
if self.axis != 0 and (
"axis" not in arg_names or func in ("corrwith", "skew")
):
raise ValueError(f"Operation {func} does not support axis=1")
if "axis" in arg_names and not isinstance(
obj, (SeriesGroupBy, DataFrameGroupBy)
):
self.kwargs["axis"] = self.axis
return self._apply_str(obj, func, *self.args, **self.kwargs)
|
Compute apply in case of a string.
Returns
-------
result: Series or DataFrame
|
python
|
pandas/core/apply.py
| 668
|
[
"self"
] |
DataFrame | Series
| true
| 7
| 6.72
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
users
|
public KafkaFuture<List<String>> users() {
final KafkaFutureImpl<List<String>> retval = new KafkaFutureImpl<>();
dataFuture.whenComplete((data, throwable) -> {
if (throwable != null) {
retval.completeExceptionally(throwable);
} else {
retval.complete(data.results().stream()
.filter(result -> result.errorCode() != Errors.RESOURCE_NOT_FOUND.code())
.map(result -> result.user()).collect(Collectors.toList()));
}
});
return retval;
}
|
@return a future indicating the distinct users that meet the request criteria and that have at least one
credential. The future will not complete successfully if the user is not authorized to perform the describe
operation; otherwise, it will complete successfully as long as the list of users with credentials can be
successfully determined within some hard-coded timeout period. Note that the returned list will not include users
that do not exist/have no credentials: a request to describe an explicit list of users, none of which existed/had
a credential, will result in a future that returns an empty list being returned here. A returned list will
include users that have a credential but that could not be described.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java
| 92
|
[] | true
| 2
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
rowMap
|
@Override
public Map<R, Map<C, V>> rowMap() {
Map<R, Map<C, V>> result = rowMap;
return (result == null) ? rowMap = createRowMap() : result;
}
|
{@inheritDoc}
<p>The collection's iterator traverses the values for the first row, the values for the second
row, and so on.
|
java
|
android/guava/src/com/google/common/collect/StandardTable.java
| 778
|
[] | true
| 2
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
createSingleLineStringWriter
|
function createSingleLineStringWriter(): EmitTextWriter {
// Why var? It avoids TDZ checks in the runtime which can be costly.
// See: https://github.com/microsoft/TypeScript/issues/52924
/* eslint-disable no-var */
var str = "";
/* eslint-enable no-var */
const writeText: (text: string) => void = text => str += text;
return {
getText: () => str,
write: writeText,
rawWrite: writeText,
writeKeyword: writeText,
writeOperator: writeText,
writePunctuation: writeText,
writeSpace: writeText,
writeStringLiteral: writeText,
writeLiteral: writeText,
writeParameter: writeText,
writeProperty: writeText,
writeSymbol: (s, _) => writeText(s),
writeTrailingSemicolon: writeText,
writeComment: writeText,
getTextPos: () => str.length,
getLine: () => 0,
getColumn: () => 0,
getIndent: () => 0,
isAtStartOfLine: () => false,
hasTrailingComment: () => false,
hasTrailingWhitespace: () => !!str.length && isWhiteSpaceLike(str.charCodeAt(str.length - 1)),
// Completely ignore indentation for string writers. And map newlines to
// a single space.
writeLine: () => str += " ",
increaseIndent: noop,
decreaseIndent: noop,
clear: () => str = "",
};
}
|
True if the symbol is for an external module, as opposed to a namespace.
@internal
|
typescript
|
src/compiler/utilities.ts
| 670
|
[] | true
| 2
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
resolveSource
|
SourceMetadata resolveSource(TypeElement typeElement) {
ConfigurationMetadata configurationMetadata = resolveConfigurationMetadata(typeElement);
return (configurationMetadata != null)
? new SourceMetadata(configurationMetadata.getItems(), configurationMetadata.getHints())
: SourceMetadata.EMPTY;
}
|
Resolve the {@link SourceMetadata} for the specified type. If the type has no
source metadata, return an {@link SourceMetadata#EMPTY} source.
@param typeElement the type to discover source metadata from
@return the source metadata for the specified type
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/ConfigurationPropertiesSourceResolver.java
| 59
|
[
"typeElement"
] |
SourceMetadata
| true
| 2
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
shape
|
def shape(self) -> Shape:
"""
Return a tuple of the shape of the underlying data.
See Also
--------
Series.ndim : Number of dimensions of the underlying data.
Series.size : Return the number of elements in the underlying data.
Series.nbytes : Return the number of bytes in the underlying data.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.shape
(3,)
"""
return self._values.shape
|
Return a tuple of the shape of the underlying data.
See Also
--------
Series.ndim : Number of dimensions of the underlying data.
Series.size : Return the number of elements in the underlying data.
Series.nbytes : Return the number of bytes in the underlying data.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.shape
(3,)
|
python
|
pandas/core/base.py
| 342
|
[
"self"
] |
Shape
| true
| 1
| 6.24
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
createBuildOptions
|
function createBuildOptions(options: BuildOptions[]) {
return flatten(
map(options, (options) => [
// we defer it so that we don't trigger glob immediately
() => applyDefaults(options),
// ... here can go more steps
]),
)
}
|
Create two deferred builds for esm and cjs. The one follows the other:
- 1. The code gets compiled to an optimized tree-shaken esm output
- 2. We take that output and compile it to an optimized cjs output
@param options the original build options
@returns if options = [a, b], we get [a-esm, a-cjs, b-esm, b-cjs]
|
typescript
|
helpers/compile/build.ts
| 70
|
[
"options"
] | false
| 1
| 6.24
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
get_loc_level
|
def get_loc_level(self, key, level: IndexLabel = 0, drop_level: bool = True):
"""
Get location and sliced index for requested label(s)/level(s).
The `get_loc_level` method is a more advanced form of `get_loc`, allowing
users to specify not just a label or sequence of labels, but also the level(s)
in which to search. This method is useful when you need to isolate particular
sections of a MultiIndex, either for further analysis or for slicing and
dicing the data. The method provides flexibility in terms of maintaining
or dropping levels from the resulting index based on the `drop_level`
parameter.
Parameters
----------
key : label or sequence of labels
The label(s) for which to get the location.
level : int/level name or list thereof, optional
The level(s) in the MultiIndex to consider. If not provided, defaults
to the first level.
drop_level : bool, default True
If ``False``, the resulting index will not drop any level.
Returns
-------
tuple
A 2-tuple where the elements :
Element 0: int, slice object or boolean array.
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list("abb"), list("def")], names=["A", "B"])
>>> mi.get_loc_level("b")
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level("e", level="B")
(array([False, True, False]), Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(["b", "e"])
(1, None)
"""
if not isinstance(level, (range, list, tuple)):
level = self._get_level_number(level)
else:
level = [self._get_level_number(lev) for lev in level]
loc, mi = self._get_loc_level(key, level=level)
if not drop_level:
if lib.is_integer(loc):
# Slice index must be an integer or None
mi = self[loc : loc + 1]
else:
mi = self[loc]
return loc, mi
|
Get location and sliced index for requested label(s)/level(s).
The `get_loc_level` method is a more advanced form of `get_loc`, allowing
users to specify not just a label or sequence of labels, but also the level(s)
in which to search. This method is useful when you need to isolate particular
sections of a MultiIndex, either for further analysis or for slicing and
dicing the data. The method provides flexibility in terms of maintaining
or dropping levels from the resulting index based on the `drop_level`
parameter.
Parameters
----------
key : label or sequence of labels
The label(s) for which to get the location.
level : int/level name or list thereof, optional
The level(s) in the MultiIndex to consider. If not provided, defaults
to the first level.
drop_level : bool, default True
If ``False``, the resulting index will not drop any level.
Returns
-------
tuple
A 2-tuple where the elements :
Element 0: int, slice object or boolean array.
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list("abb"), list("def")], names=["A", "B"])
>>> mi.get_loc_level("b")
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level("e", level="B")
(array([False, True, False]), Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(["b", "e"])
(1, None)
|
python
|
pandas/core/indexes/multi.py
| 3,395
|
[
"self",
"key",
"level",
"drop_level"
] | true
| 6
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
toString
|
@Override
public String toString() {
return "Member at index " + this.index + ((this.name != null) ? "{%s}".formatted(this.name) : "");
}
|
Whether this contributes one or more name/value pairs to the JSON.
@return whether a name/value pair is contributed
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 682
|
[] |
String
| true
| 2
| 8.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getCallerClass
|
@SuppressWarnings("unused") // Called reflectively from InstrumenterImpl
public static Class<?> getCallerClass() {
Optional<Class<?>> callerClassIfAny = StackWalker.getInstance(Set.of(RETAIN_CLASS_REFERENCE, SHOW_HIDDEN_FRAMES))
.walk(
frames -> frames.skip(2) // Skip this method and its caller
.filter(frame -> skipInternalPackages.contains(frame.getDeclaringClass().getPackageName()) == false)
.findFirst()
.map(StackWalker.StackFrame::getDeclaringClass)
);
return callerClassIfAny.orElse(NO_CLASS);
}
|
Why would we write this instead of using {@link StackWalker#getCallerClass()}?
Because that method throws {@link IllegalCallerException} if called from the "outermost frame",
which includes at least some cases of a method called from a native frame.
@return the class that called the method which called this; or {@link #NO_CLASS} from the outermost frame.
|
java
|
libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/Util.java
| 37
|
[] | true
| 1
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
CreateJobObjectW
|
Handle CreateJobObjectW();
|
Creates or opens a new job object
https://msdn.microsoft.com/en-us/library/windows/desktop/ms682409%28v=vs.85%29.aspx
Note: the two params to this are omitted because all implementations pass null for them both
@return job handle if the function succeeds
|
java
|
libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/Kernel32Library.java
| 127
|
[] |
Handle
| true
| 1
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
getMetaAnnotationTypes
|
private Set<String> getMetaAnnotationTypes(MergedAnnotation<Annotation> mergedAnnotation) {
Set<String> result = MergedAnnotations.from(mergedAnnotation.getType()).stream()
.map(metaAnnotation -> metaAnnotation.getType().getName())
.collect(Collectors.toCollection(LinkedHashSet::new));
return (result.isEmpty() ? Collections.emptySet() : result);
}
|
Derive a bean name from one of the annotations on the class.
@param annotatedDef the annotation-aware bean definition
@return the bean name, or {@code null} if none is found
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/AnnotationBeanNameGenerator.java
| 173
|
[
"mergedAnnotation"
] | true
| 2
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
replay
|
public static Log replay(Log source, Log destination) {
if (source instanceof DeferredLog deferredLog) {
deferredLog.replayTo(destination);
}
return destination;
}
|
Replay from a source log to a destination log when the source is deferred.
@param source the source logger
@param destination the destination logger
@return the destination
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/DeferredLog.java
| 243
|
[
"source",
"destination"
] |
Log
| true
| 2
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
get_authorized_dag_ids
|
def get_authorized_dag_ids(
self,
*,
user: T,
method: ResourceMethod = "GET",
session: Session = NEW_SESSION,
) -> set[str]:
"""
Get DAGs the user has access to.
:param user: the user
:param method: the method to filter on
:param session: the session
"""
stmt = (
select(DagModel.dag_id, dag_bundle_team_association_table.c.team_name)
.join(DagBundleModel, DagModel.bundle_name == DagBundleModel.name)
.join(
dag_bundle_team_association_table,
DagBundleModel.name == dag_bundle_team_association_table.c.dag_bundle_name,
isouter=True,
)
)
rows = session.execute(stmt).all()
dags_by_team: dict[str | None, set[str]] = defaultdict(set)
for dag_id, team_name in rows:
dags_by_team[team_name].add(dag_id)
dag_ids: set[str] = set()
for team_name, team_dag_ids in dags_by_team.items():
dag_ids.update(
self.filter_authorized_dag_ids(
dag_ids=team_dag_ids, user=user, method=method, team_name=team_name
)
)
return dag_ids
|
Get DAGs the user has access to.
:param user: the user
:param method: the method to filter on
:param session: the session
|
python
|
airflow-core/src/airflow/api_fastapi/auth/managers/base_auth_manager.py
| 512
|
[
"self",
"user",
"method",
"session"
] |
set[str]
| true
| 3
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
isFallback
|
private boolean isFallback(String beanName) {
String transformedBeanName = transformedBeanName(beanName);
if (containsBeanDefinition(transformedBeanName)) {
return getMergedLocalBeanDefinition(transformedBeanName).isFallback();
}
return (getParentBeanFactory() instanceof DefaultListableBeanFactory parent &&
parent.isFallback(transformedBeanName));
}
|
Return whether the bean definition for the given bean name has been
marked as a fallback bean.
@param beanName the name of the bean
@since 6.2
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
| 2,196
|
[
"beanName"
] | true
| 3
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
getNewPatchDevVersion
|
async function getNewPatchDevVersion(packages: Packages, patchBranch: string): Promise<string> {
const patchMajorMinor = getSemverFromPatchBranch(patchBranch)
if (!patchMajorMinor) {
throw new Error(`Could not get major and minor for ${patchBranch}`)
}
const currentPatch = await getCurrentPatchForPatchVersions(patchMajorMinor)
const newPatch = currentPatch + 1
const newVersion = `${patchMajorMinor.major}.${patchMajorMinor.minor}.${newPatch}`
const versions = [...(await getAllVersionsPublishedFor(packages, 'dev', newVersion))]
const maxIncrement = getMaxPatchVersionIncrement(versions)
return `${newVersion}-dev.${maxIncrement + 1}`
}
|
Takes the max dev version + 1
For now supporting X.Y.Z-dev.#
@param packages Local package definitions
|
typescript
|
scripts/ci/publish.ts
| 307
|
[
"packages",
"patchBranch"
] | true
| 2
| 6.72
|
prisma/prisma
| 44,834
|
jsdoc
| true
|
|
HasExtensionOrUnknown
|
bool HasExtensionOrUnknown(const upb_Message* msg,
const upb_MiniTableExtension* eid) {
MessageLock msg_lock(msg);
if (upb_Message_HasExtension(msg, eid)) return true;
const uint32_t number = upb_MiniTableExtension_Number(eid);
return upb_Message_FindUnknown(msg, number, 0).status == kUpb_FindUnknown_Ok;
}
|
MessageLock(msg) acquires lock on msg when constructed and releases it when
destroyed.
|
cpp
|
hpb/internal/message_lock.cc
| 55
|
[] | true
| 2
| 6.24
|
protocolbuffers/protobuf
| 69,904
|
doxygen
| false
|
|
synchronizedTable
|
@J2ktIncompatible // Synchronized
public static <R extends @Nullable Object, C extends @Nullable Object, V extends @Nullable Object>
Table<R, C, V> synchronizedTable(Table<R, C, V> table) {
return Synchronized.table(table, null);
}
|
Returns a synchronized (thread-safe) table backed by the specified table. In order to guarantee
serial access, it is critical that <b>all</b> access to the backing table is accomplished
through the returned table.
<p>It is imperative that the user manually synchronize on the returned table when accessing any
of its collection views:
{@snippet :
Table<R, C, V> table = Tables.synchronizedTable(HashBasedTable.create());
...
Map<C, V> row = table.row(rowKey); // Needn't be in synchronized block
...
synchronized (table) { // Synchronizing on table, not row!
Iterator<Entry<C, V>> i = row.entrySet().iterator(); // Must be in synchronized block
while (i.hasNext()) {
foo(i.next());
}
}
}
<p>Failure to follow this advice may result in non-deterministic behavior.
<p>The returned table will be serializable if the specified table is serializable.
@param table the table to be wrapped in a synchronized view
@return a synchronized view of the specified table
@since 22.0
|
java
|
android/guava/src/com/google/common/collect/Tables.java
| 690
|
[
"table"
] | true
| 1
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
getExpectedArgumentLength
|
function getExpectedArgumentLength(msg) {
let expectedLength = 0;
const regex = /%[dfijoOs]/g;
while (RegExpPrototypeExec(regex, msg) !== null) expectedLength++;
return expectedLength;
}
|
This function removes unnecessary frames from Node.js core errors.
@template {(...args: unknown[]) => unknown} T
@param {T} fn
@returns {T}
|
javascript
|
lib/internal/errors.js
| 588
|
[
"msg"
] | false
| 2
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
equals
|
@Override
public boolean equals(final Object obj) {
return obj instanceof MutableFloat
&& Float.floatToIntBits(((MutableFloat) obj).value) == Float.floatToIntBits(value);
}
|
Compares this object against some other object. The result is {@code true} if and only if the argument is not {@code null} and is a {@link Float} object
that represents a {@code float} that has the identical bit pattern to the bit pattern of the {@code float} represented by this object. For this purpose,
two float values are considered to be the same if and only if the method {@link Float#floatToIntBits(float)}returns the same int value when applied to
each.
<p>
Note that in most cases, for two instances of class {@link Float},{@code f1} and {@code f2}, the value of {@code f1.equals(f2)} is {@code true} if and
only if:
</p>
<pre>
f1.floatValue() == f2.floatValue()
</pre>
<p>
also has the value {@code true}. However, there are two exceptions:
</p>
<ul>
<li>If {@code f1} and {@code f2} both represent {@code Float.NaN}, then the {@code equals} method returns {@code true}, even though
{@code Float.NaN == Float.NaN} has the value {@code false}.</li>
<li>If {@code f1} represents {@code +0.0f} while {@code f2} represents {@code -0.0f}, or vice versa, the {@code equal} test has the value {@code false},
even though {@code 0.0f == -0.0f} has the value {@code true}.</li>
</ul>
<p>
This definition allows hashtables to operate properly.
</p>
@param obj the object to compare with, null returns false.
@return {@code true} if the objects are the same; {@code false} otherwise.
@see Float#floatToIntBits(float)
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableFloat.java
| 203
|
[
"obj"
] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
dict_hash
|
def dict_hash(dictionary: dict[str, Any], dedent_help_strings: bool = True, sort_opts: bool = True) -> str:
"""
MD5 hash of a dictionary of configuration for click.
Sorted and dumped via json to account for random sequence of keys in the dictionary. Also it
implements a few corrections to the dict because click does not always keep the same sorting order in
options or produced differently indented help strings.
:param dictionary: dictionary to hash
:param dedent_help_strings: whether to dedent help strings before hashing
:param sort_opts: whether to sort options before hashing
"""
if dedent_help_strings:
dedent_help(dictionary)
if sort_opts:
recursively_sort_opts(dictionary)
# noinspection InsecureHash
dhash = hashlib.md5()
try:
encoded = json.dumps(dictionary, sort_keys=True, default=vars).encode()
except TypeError:
get_console().print(dictionary)
raise
dhash.update(encoded)
return dhash.hexdigest()
|
MD5 hash of a dictionary of configuration for click.
Sorted and dumped via json to account for random sequence of keys in the dictionary. Also it
implements a few corrections to the dict because click does not always keep the same sorting order in
options or produced differently indented help strings.
:param dictionary: dictionary to hash
:param dedent_help_strings: whether to dedent help strings before hashing
:param sort_opts: whether to sort options before hashing
|
python
|
dev/breeze/src/airflow_breeze/commands/setup_commands.py
| 290
|
[
"dictionary",
"dedent_help_strings",
"sort_opts"
] |
str
| true
| 3
| 7.2
|
apache/airflow
| 43,597
|
sphinx
| false
|
optInt
|
public int optInt(String name, int fallback) {
Object object = opt(name);
Integer result = JSON.toInteger(object);
return result != null ? result : fallback;
}
|
Returns the value mapped by {@code name} if it exists and is an int or can be
coerced to an int. Returns {@code fallback} otherwise.
@param name the name of the property
@param fallback a fallback value
@return the value or {@code fallback}
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 500
|
[
"name",
"fallback"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
classify_jobs
|
def classify_jobs(
all_job_names: list[str], sha_grid: Any, filtered_jobs_names: set[str]
) -> tuple[list[JobStatus], list[Any]]:
"""
Creates Job Statuses which has the logic for if need to alert or if there's flaky jobs.
Classifies jobs into jobs to alert on and flaky jobs.
:param all_job_names: list of all job names as returned by the HUD
:param sha_grid: list of all job data as returned by the HUD (parallel index to all_job_names)
:param filtered_jobs_names: set of job names to actually consider
:return:
"""
job_data = map_job_data(all_job_names, sha_grid)
job_statuses: list[JobStatus] = []
for job in job_data:
job_statuses.append(JobStatus(job, job_data[job]))
jobs_to_alert_on = []
flaky_jobs = []
for job_status in job_statuses:
if job_status.job_name not in filtered_jobs_names:
continue
if job_status.should_alert():
jobs_to_alert_on.append(job_status)
flaky_jobs.extend(job_status.flaky_jobs)
return jobs_to_alert_on, flaky_jobs
|
Creates Job Statuses which has the logic for if need to alert or if there's flaky jobs.
Classifies jobs into jobs to alert on and flaky jobs.
:param all_job_names: list of all job names as returned by the HUD
:param sha_grid: list of all job data as returned by the HUD (parallel index to all_job_names)
:param filtered_jobs_names: set of job names to actually consider
:return:
|
python
|
tools/alerts/create_alerts.py
| 206
|
[
"all_job_names",
"sha_grid",
"filtered_jobs_names"
] |
tuple[list[JobStatus], list[Any]]
| true
| 5
| 8.08
|
pytorch/pytorch
| 96,034
|
sphinx
| false
|
total_seconds
|
def total_seconds(self) -> npt.NDArray[np.float64]:
"""
Return total duration of each element expressed in seconds.
This method is available directly on TimedeltaArray, TimedeltaIndex
and on Series containing timedelta values under the ``.dt`` namespace.
Returns
-------
ndarray, Index or Series
When the calling object is a TimedeltaArray, the return type
is ndarray. When the calling object is a TimedeltaIndex,
the return type is an Index with a float64 dtype. When the calling object
is a Series, the return type is Series of type `float64` whose
index is the same as the original.
See Also
--------
datetime.timedelta.total_seconds : Standard library version
of this method.
TimedeltaIndex.components : Return a DataFrame with components of
each Timedelta.
Examples
--------
**Series**
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="D"))
>>> s
0 0 days
1 1 days
2 2 days
3 3 days
4 4 days
dtype: timedelta64[ns]
>>> s.dt.total_seconds()
0 0.0
1 86400.0
2 172800.0
3 259200.0
4 345600.0
dtype: float64
**TimedeltaIndex**
>>> idx = pd.to_timedelta(np.arange(5), unit="D")
>>> idx
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[us]', freq=None)
>>> idx.total_seconds()
Index([0.0, 86400.0, 172800.0, 259200.0, 345600.0], dtype='float64')
"""
pps = periods_per_second(self._creso)
return self._maybe_mask_results(self.asi8 / pps, fill_value=None)
|
Return total duration of each element expressed in seconds.
This method is available directly on TimedeltaArray, TimedeltaIndex
and on Series containing timedelta values under the ``.dt`` namespace.
Returns
-------
ndarray, Index or Series
When the calling object is a TimedeltaArray, the return type
is ndarray. When the calling object is a TimedeltaIndex,
the return type is an Index with a float64 dtype. When the calling object
is a Series, the return type is Series of type `float64` whose
index is the same as the original.
See Also
--------
datetime.timedelta.total_seconds : Standard library version
of this method.
TimedeltaIndex.components : Return a DataFrame with components of
each Timedelta.
Examples
--------
**Series**
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="D"))
>>> s
0 0 days
1 1 days
2 2 days
3 3 days
4 4 days
dtype: timedelta64[ns]
>>> s.dt.total_seconds()
0 0.0
1 86400.0
2 172800.0
3 259200.0
4 345600.0
dtype: float64
**TimedeltaIndex**
>>> idx = pd.to_timedelta(np.arange(5), unit="D")
>>> idx
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[us]', freq=None)
>>> idx.total_seconds()
Index([0.0, 86400.0, 172800.0, 259200.0, 345600.0], dtype='float64')
|
python
|
pandas/core/arrays/timedeltas.py
| 770
|
[
"self"
] |
npt.NDArray[np.float64]
| true
| 1
| 6.96
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
_format_and_log_reordering_stats
|
def _format_and_log_reordering_stats(
stats: dict[BaseSchedulerNode, ReorderInfo],
head: BaseSchedulerNode,
next_dict: dict[BaseSchedulerNode, Optional[BaseSchedulerNode]],
original_snodes_num: int,
peak_memory: int,
name_to_freeable_input_buf: dict,
graph_outputs: OrderedSet[str],
) -> list[BaseSchedulerNode]:
"""
Format reordering statistics, log them, and return final node list.
Computes improvement metrics, creates a formatted table (using tabulate if
available), validates the reordered node count, recalculates peak memory,
and logs all information.
Args:
stats: Per-node reordering statistics
head: Head of the reordered linked list
next_dict: Linked list next pointers
original_snodes_num: Original number of nodes (for validation)
peak_memory: Initial peak memory before reordering
name_to_freeable_input_buf: Buffer memory tracking info
graph_outputs: Graph output names
Returns:
Final reordered list of scheduler nodes
"""
node_stats = stats
improvement = {snode: node_stats[snode].improvement for snode in node_stats}
total_improvement = sum([improvement[snode] for snode in improvement])
total_moves = sum([node_stats[snode].moves for snode in node_stats])
reorder_log_str = (
f"reorder_communication_preserving_peak_memory improved overlap by {total_improvement} ns"
f" after {total_moves} reorders.\n"
)
headers = [
"Collective node",
"comm_time(us)",
"comp_time(us)",
"initial exposed(us)",
"final exposed(us)",
"improvement(us)",
"limiting factor",
"moves",
"grouped",
"grouped_info",
"overlap_info",
]
rows = [
[
node_summary(snode),
node_info.comm_time / 1e3,
node_info.comp_time / 1e3,
node_info.initial_exposed / 1e3,
node_info.final_exposed / 1e3,
node_info.improvement / 1e3,
node_info.limiting_factor,
node_info.moves,
node_info.grouped,
node_info.grouped_info,
node_info.overlap_info,
]
for snode, node_info in node_stats.items()
]
if importlib.util.find_spec("tabulate"):
# pyrefly: ignore[import-error]
from tabulate import tabulate
reorder_log_str += tabulate(
rows,
headers=headers,
)
else:
reorder_log_str += (
"Please `pip install tabulate` to nicely render overlap stats.\n"
)
reorder_log_str += str(headers) + "\n"
reorder_log_str += "\n".join(map(str, rows))
new_snodes = _group_nodes_from_linked_list(head, None, next_dict)
assert len(new_snodes) == original_snodes_num
new_peak_memory, _, _, _ = estimate_peak_memory_allocfree(
new_snodes, name_to_freeable_input_buf, graph_outputs
)
reorder_log_str += f"\n peak_memory_before:{peak_memory}"
reorder_log_str += f"\n peak_memory_after:{new_peak_memory}"
overlap_log.info(reorder_log_str)
trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "reorder_communication_preserving_peak_memory",
"encoding": "string",
},
payload_fn=lambda: reorder_log_str,
)
return new_snodes
|
Format reordering statistics, log them, and return final node list.
Computes improvement metrics, creates a formatted table (using tabulate if
available), validates the reordered node count, recalculates peak memory,
and logs all information.
Args:
stats: Per-node reordering statistics
head: Head of the reordered linked list
next_dict: Linked list next pointers
original_snodes_num: Original number of nodes (for validation)
peak_memory: Initial peak memory before reordering
name_to_freeable_input_buf: Buffer memory tracking info
graph_outputs: Graph output names
Returns:
Final reordered list of scheduler nodes
|
python
|
torch/_inductor/comms.py
| 756
|
[
"stats",
"head",
"next_dict",
"original_snodes_num",
"peak_memory",
"name_to_freeable_input_buf",
"graph_outputs"
] |
list[BaseSchedulerNode]
| true
| 3
| 7.36
|
pytorch/pytorch
| 96,034
|
google
| false
|
shouldInvokeOnReturnValueOf
|
private boolean shouldInvokeOnReturnValueOf(Method method, @Nullable Object returnValue) {
Class<?> type = getDiscoveredReturningType();
Type genericType = getDiscoveredReturningGenericType();
// If we aren't dealing with a raw type, check if generic parameters are assignable.
return (matchesReturnValue(type, method, returnValue) &&
(genericType == null || genericType == type ||
TypeUtils.isAssignable(genericType, method.getGenericReturnType())));
}
|
Following AspectJ semantics, if a returning clause was specified, then the
advice is only invoked if the returned value is an instance of the given
returning type and generic type parameters, if any, match the assignment
rules. If the returning type is Object, the advice is *always* invoked.
@param returnValue the return value of the target method
@return whether to invoke the advice method for the given return value
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AspectJAfterReturningAdvice.java
| 80
|
[
"method",
"returnValue"
] | true
| 4
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
to_numpy
|
def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
copy: bool = False,
na_value: object = lib.no_default,
) -> np.ndarray:
"""
Convert the DataFrame to a NumPy array.
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the dtypes of the DataFrame columns.
Returns
-------
numpy.ndarray
The NumPy array representing the values in the DataFrame.
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df["C"] = pd.date_range("2000", periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
if dtype is not None:
dtype = np.dtype(dtype)
result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value)
if result.dtype is not dtype:
result = np.asarray(result, dtype=dtype)
return result
|
Convert the DataFrame to a NumPy array.
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the dtypes of the DataFrame columns.
Returns
-------
numpy.ndarray
The NumPy array representing the values in the DataFrame.
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df["C"] = pd.date_range("2000", periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
|
python
|
pandas/core/frame.py
| 2,013
|
[
"self",
"dtype",
"copy",
"na_value"
] |
np.ndarray
| true
| 3
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
prepareBeanFactory
|
protected void prepareBeanFactory(ConfigurableListableBeanFactory beanFactory) {
// Tell the internal bean factory to use the context's class loader etc.
beanFactory.setBeanClassLoader(getClassLoader());
beanFactory.setBeanExpressionResolver(new StandardBeanExpressionResolver(beanFactory.getBeanClassLoader()));
beanFactory.addPropertyEditorRegistrar(new ResourceEditorRegistrar(this, getEnvironment()));
// Configure the bean factory with context callbacks.
beanFactory.addBeanPostProcessor(new ApplicationContextAwareProcessor(this));
beanFactory.ignoreDependencyInterface(EnvironmentAware.class);
beanFactory.ignoreDependencyInterface(EmbeddedValueResolverAware.class);
beanFactory.ignoreDependencyInterface(ResourceLoaderAware.class);
beanFactory.ignoreDependencyInterface(ApplicationEventPublisherAware.class);
beanFactory.ignoreDependencyInterface(MessageSourceAware.class);
beanFactory.ignoreDependencyInterface(ApplicationContextAware.class);
beanFactory.ignoreDependencyInterface(ApplicationStartupAware.class);
// BeanFactory interface not registered as resolvable type in a plain factory.
// MessageSource registered (and found for autowiring) as a bean.
beanFactory.registerResolvableDependency(BeanFactory.class, beanFactory);
beanFactory.registerResolvableDependency(ResourceLoader.class, this);
beanFactory.registerResolvableDependency(ApplicationEventPublisher.class, this);
beanFactory.registerResolvableDependency(ApplicationContext.class, this);
// Register early post-processor for detecting inner beans as ApplicationListeners.
beanFactory.addBeanPostProcessor(new ApplicationListenerDetector(this));
// Detect a LoadTimeWeaver and prepare for weaving, if found.
if (!NativeDetector.inNativeImage() && beanFactory.containsBean(LOAD_TIME_WEAVER_BEAN_NAME)) {
beanFactory.addBeanPostProcessor(new LoadTimeWeaverAwareProcessor(beanFactory));
// Set a temporary ClassLoader for type matching.
beanFactory.setTempClassLoader(new ContextTypeMatchClassLoader(beanFactory.getBeanClassLoader()));
}
// Register default environment beans.
if (!beanFactory.containsLocalBean(ENVIRONMENT_BEAN_NAME)) {
beanFactory.registerSingleton(ENVIRONMENT_BEAN_NAME, getEnvironment());
}
if (!beanFactory.containsLocalBean(SYSTEM_PROPERTIES_BEAN_NAME)) {
beanFactory.registerSingleton(SYSTEM_PROPERTIES_BEAN_NAME, getEnvironment().getSystemProperties());
}
if (!beanFactory.containsLocalBean(SYSTEM_ENVIRONMENT_BEAN_NAME)) {
beanFactory.registerSingleton(SYSTEM_ENVIRONMENT_BEAN_NAME, getEnvironment().getSystemEnvironment());
}
if (!beanFactory.containsLocalBean(APPLICATION_STARTUP_BEAN_NAME)) {
beanFactory.registerSingleton(APPLICATION_STARTUP_BEAN_NAME, getApplicationStartup());
}
}
|
Configure the factory's standard context characteristics,
such as the context's ClassLoader and post-processors.
@param beanFactory the BeanFactory to configure
|
java
|
spring-context/src/main/java/org/springframework/context/support/AbstractApplicationContext.java
| 728
|
[
"beanFactory"
] |
void
| true
| 7
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
cloneByPath
|
function cloneByPath(object, path) {
path = toPath(path);
var index = -1,
length = path.length,
lastIndex = length - 1,
result = clone(Object(object)),
nested = result;
while (nested != null && ++index < length) {
var key = path[index],
value = nested[key];
if (value != null &&
!(isFunction(value) || isError(value) || isWeakMap(value))) {
nested[key] = clone(index == lastIndex ? value : Object(value));
}
nested = nested[key];
}
return result;
}
|
Creates a clone of `object` by `path`.
@private
@param {Object} object The object to clone.
@param {Array|string} path The path to clone by.
@returns {Object} Returns the cloned object.
|
javascript
|
fp/_baseConvert.js
| 348
|
[
"object",
"path"
] | false
| 8
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.