function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
of
|
public static <L, M, R> ImmutableTriple<L, M, R> of(final L left, final M middle, final R right) {
return left != null | middle != null || right != null ? new ImmutableTriple<>(left, middle, right) : nullTriple();
}
|
Creates an immutable triple of three objects inferring the generic types.
@param <L> the left element type.
@param <M> the middle element type.
@param <R> the right element type.
@param left the left element, may be null.
@param middle the middle element, may be null.
@param right the right element, may be null.
@return an immutable triple formed from the three parameters, not null.
|
java
|
src/main/java/org/apache/commons/lang3/tuple/ImmutableTriple.java
| 96
|
[
"left",
"middle",
"right"
] | true
| 3
| 8.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
newEnumMap
|
public static <K extends Enum<K>, V extends @Nullable Object> EnumMap<K, V> newEnumMap(
Map<K, ? extends V> map) {
return new EnumMap<>(map);
}
|
Creates an {@code EnumMap} with the same mappings as the specified map.
<p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead,
use the {@code EnumMap} constructor directly, taking advantage of <a
href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond"
syntax</a>.
@param map the map from which to initialize this {@code EnumMap}
@return a new {@code EnumMap} initialized with the mappings from {@code map}
@throws IllegalArgumentException if {@code m} is not an {@code EnumMap} instance and contains
no mappings
|
java
|
android/guava/src/com/google/common/collect/Maps.java
| 439
|
[
"map"
] | true
| 1
| 6.16
|
google/guava
| 51,352
|
javadoc
| false
|
|
process_response
|
def process_response(self, ctx: AppContext, response: Response) -> Response:
"""Can be overridden in order to modify the response object
before it's sent to the WSGI server. By default this will
call all the :meth:`after_request` decorated functions.
.. versionchanged:: 0.5
As of Flask 0.5 the functions registered for after request
execution are called in reverse order of registration.
:param response: a :attr:`response_class` object.
:return: a new response object or the same, has to be an
instance of :attr:`response_class`.
"""
for func in ctx._after_request_functions:
response = self.ensure_sync(func)(response)
for name in chain(ctx.request.blueprints, (None,)):
if name in self.after_request_funcs:
for func in reversed(self.after_request_funcs[name]):
response = self.ensure_sync(func)(response)
if not self.session_interface.is_null_session(ctx.session):
self.session_interface.save_session(self, ctx.session, response)
return response
|
Can be overridden in order to modify the response object
before it's sent to the WSGI server. By default this will
call all the :meth:`after_request` decorated functions.
.. versionchanged:: 0.5
As of Flask 0.5 the functions registered for after request
execution are called in reverse order of registration.
:param response: a :attr:`response_class` object.
:return: a new response object or the same, has to be an
instance of :attr:`response_class`.
|
python
|
src/flask/app.py
| 1,382
|
[
"self",
"ctx",
"response"
] |
Response
| true
| 6
| 7.92
|
pallets/flask
| 70,946
|
sphinx
| false
|
throwIfGroupIdNotDefined
|
private void throwIfGroupIdNotDefined() {
if (groupMetadata.get().isEmpty()) {
throw new InvalidGroupIdException("To use the group management or offset commit APIs, you must " +
"provide a valid " + ConsumerConfig.GROUP_ID_CONFIG + " in the consumer configuration.");
}
}
|
This method sends a commit event to the EventHandler and return.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
| 1,192
|
[] |
void
| true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
_ninja_build_file
|
def _ninja_build_file(self) -> str:
r"""Returns the path to build.ninja.
Returns:
string: The path to build.ninja.
"""
return os.path.join(self.build_dir, "build.ninja")
|
r"""Returns the path to build.ninja.
Returns:
string: The path to build.ninja.
|
python
|
tools/setup_helpers/cmake.py
| 80
|
[
"self"
] |
str
| true
| 1
| 6.56
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
_check_parser
|
def _check_parser(parser: str) -> None:
"""
Make sure a valid parser is passed.
Parameters
----------
parser : str
Raises
------
KeyError
* If an invalid parser is passed
"""
if parser not in PARSERS:
raise KeyError(
f"Invalid parser '{parser}' passed, valid parsers are {PARSERS.keys()}"
)
|
Make sure a valid parser is passed.
Parameters
----------
parser : str
Raises
------
KeyError
* If an invalid parser is passed
|
python
|
pandas/core/computation/eval.py
| 83
|
[
"parser"
] |
None
| true
| 2
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
exit
|
public static int exit(ApplicationContext context, ExitCodeGenerator... exitCodeGenerators) {
Assert.notNull(context, "'context' must not be null");
int exitCode = 0;
try {
try {
ExitCodeGenerators generators = new ExitCodeGenerators();
Collection<ExitCodeGenerator> beans = context.getBeansOfType(ExitCodeGenerator.class).values();
generators.addAll(exitCodeGenerators);
generators.addAll(beans);
exitCode = generators.getExitCode();
if (exitCode != 0) {
context.publishEvent(new ExitCodeEvent(context, exitCode));
}
}
finally {
close(context);
}
}
catch (Exception ex) {
ex.printStackTrace();
exitCode = (exitCode != 0) ? exitCode : 1;
}
return exitCode;
}
|
Static helper that can be used to exit a {@link SpringApplication} and obtain a
code indicating success (0) or otherwise. Does not throw exceptions but should
print stack traces of any encountered. Applies the specified
{@link ExitCodeGenerator ExitCodeGenerators} in addition to any Spring beans that
implement {@link ExitCodeGenerator}. When multiple generators are available, the
first non-zero exit code is used. Generators are ordered based on their
{@link Ordered} implementation and {@link Order @Order} annotation.
@param context the context to close if possible
@param exitCodeGenerators exit code generators
@return the outcome (0 if successful)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 1,396
|
[
"context"
] | true
| 4
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
maybeSendAndPollTransactionalRequest
|
private boolean maybeSendAndPollTransactionalRequest() {
if (transactionManager.hasInFlightRequest()) {
// as long as there are outstanding transactional requests, we simply wait for them to return
client.poll(retryBackoffMs, time.milliseconds());
return true;
}
if (transactionManager.hasAbortableError()) {
accumulator.abortUndrainedBatches(transactionManager.lastError());
} else if (transactionManager.isAborting()) {
accumulator.abortUndrainedBatches(new TransactionAbortedException());
}
TransactionManager.TxnRequestHandler nextRequestHandler = transactionManager.nextRequest(accumulator.hasIncomplete());
if (nextRequestHandler == null)
return false;
AbstractRequest.Builder<?> requestBuilder = nextRequestHandler.requestBuilder();
Node targetNode = null;
try {
FindCoordinatorRequest.CoordinatorType coordinatorType = nextRequestHandler.coordinatorType();
targetNode = coordinatorType != null ?
transactionManager.coordinator(coordinatorType) :
client.leastLoadedNode(time.milliseconds()).node();
if (targetNode != null) {
if (!awaitNodeReady(targetNode, coordinatorType)) {
log.trace("Target node {} not ready within request timeout, will retry when node is ready.", targetNode);
maybeFindCoordinatorAndRetry(nextRequestHandler);
return true;
}
} else if (coordinatorType != null) {
log.trace("Coordinator not known for {}, will retry {} after finding coordinator.", coordinatorType, requestBuilder.apiKey());
maybeFindCoordinatorAndRetry(nextRequestHandler);
return true;
} else {
log.trace("No nodes available to send requests, will poll and retry when until a node is ready.");
transactionManager.retry(nextRequestHandler);
client.poll(retryBackoffMs, time.milliseconds());
return true;
}
if (nextRequestHandler.isRetry())
time.sleep(nextRequestHandler.retryBackoffMs());
long currentTimeMs = time.milliseconds();
ClientRequest clientRequest = client.newClientRequest(targetNode.idString(), requestBuilder, currentTimeMs,
true, requestTimeoutMs, nextRequestHandler);
log.debug("Sending transactional request {} to node {} with correlation ID {}", requestBuilder, targetNode, clientRequest.correlationId());
client.send(clientRequest, currentTimeMs);
transactionManager.setInFlightCorrelationId(clientRequest.correlationId());
client.poll(retryBackoffMs, time.milliseconds());
return true;
} catch (IOException e) {
log.debug("Disconnect from {} while trying to send request {}. Going " +
"to back off and retry.", targetNode, requestBuilder, e);
// We break here so that we pick up the FindCoordinator request immediately.
maybeFindCoordinatorAndRetry(nextRequestHandler);
return true;
}
}
|
Returns true if a transactional request is sent or polled, or if a FindCoordinator request is enqueued
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java
| 450
|
[] | true
| 11
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
stop_pipeline
|
def stop_pipeline(
self,
pipeline_exec_arn: str,
fail_if_not_running: bool = False,
) -> str:
"""
Stop SageMaker pipeline execution.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.stop_pipeline_execution`
:param pipeline_exec_arn: Amazon Resource Name (ARN) of the pipeline execution.
It's the ARN of the pipeline itself followed by "/execution/" and an id.
:param fail_if_not_running: This method will raise an exception if the pipeline we're trying to stop
is not in an "Executing" state when the call is sent (which would mean that the pipeline is
already either stopping or stopped).
Note that setting this to True will raise an error if the pipeline finished successfully before it
was stopped.
:return: Status of the pipeline execution after the operation.
One of 'Executing'|'Stopping'|'Stopped'|'Failed'|'Succeeded'.
"""
for retries in reversed(range(5)):
try:
self.conn.stop_pipeline_execution(PipelineExecutionArn=pipeline_exec_arn)
except ClientError as ce:
# this can happen if the pipeline was transitioning between steps at that moment
if ce.response["Error"]["Code"] == "ConflictException" and retries:
self.log.warning(
"Got a conflict exception when trying to stop the pipeline, "
"retrying %s more times. Error was: %s",
retries,
ce,
)
time.sleep(0.3) # error is due to a race condition, so it should be very transient
else:
# we have to rely on the message to catch the right error here, because its type
# (ValidationException) is shared with other kinds of errors (e.g. badly formatted ARN)
if (
not fail_if_not_running
and "Only pipelines with 'Executing' status can be stopped"
in ce.response["Error"]["Message"]
):
self.log.warning("Cannot stop pipeline execution, as it was not running: %s", ce)
break
self.log.error(ce)
raise
else:
break
res = self.describe_pipeline_exec(pipeline_exec_arn)
return res["PipelineExecutionStatus"]
|
Stop SageMaker pipeline execution.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.stop_pipeline_execution`
:param pipeline_exec_arn: Amazon Resource Name (ARN) of the pipeline execution.
It's the ARN of the pipeline itself followed by "/execution/" and an id.
:param fail_if_not_running: This method will raise an exception if the pipeline we're trying to stop
is not in an "Executing" state when the call is sent (which would mean that the pipeline is
already either stopping or stopped).
Note that setting this to True will raise an error if the pipeline finished successfully before it
was stopped.
:return: Status of the pipeline execution after the operation.
One of 'Executing'|'Stopping'|'Stopped'|'Failed'|'Succeeded'.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
| 1,134
|
[
"self",
"pipeline_exec_arn",
"fail_if_not_running"
] |
str
| true
| 8
| 7.76
|
apache/airflow
| 43,597
|
sphinx
| false
|
parse_schedule_interval
|
def parse_schedule_interval(time_str: str):
"""
Parse a schedule interval string e.g. (2h13m) or "@once".
:param time_str: A string identifying a schedule interval. (eg. 2h13m, None, @once)
:return datetime.timedelta: A datetime.timedelta object or "@once" or None
"""
if time_str == "None":
return None
if time_str == "@once":
return "@once"
return parse_time_delta(time_str)
|
Parse a schedule interval string e.g. (2h13m) or "@once".
:param time_str: A string identifying a schedule interval. (eg. 2h13m, None, @once)
:return datetime.timedelta: A datetime.timedelta object or "@once" or None
|
python
|
dev/airflow_perf/dags/elastic_dag.py
| 52
|
[
"time_str"
] | true
| 3
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
readByte
|
@CanIgnoreReturnValue // to skip a byte
@Override
public byte readByte() throws IOException {
return (byte) readUnsignedByte();
}
|
Reads a char as specified by {@link DataInputStream#readChar()}, except using little-endian
byte order.
@return the next two bytes of the input stream, interpreted as a {@code char} in little-endian
byte order
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/LittleEndianDataInputStream.java
| 210
|
[] | true
| 1
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
_apply_tasks
|
def _apply_tasks(self, tasks, producer=None, app=None, p=None,
add_to_parent=None, chord=None,
args=None, kwargs=None, group_index=None, **options):
"""Run all the tasks in the group.
This is used by :meth:`apply_async` to run all the tasks in the group
and return a generator of their results.
Arguments:
tasks (list): List of tasks in the group.
producer (Producer): The producer to use to publish the tasks.
app (Celery): The Celery app instance.
p (barrier): Barrier object to synchronize the tasks results.
args (list): List of arguments to be prepended to
the arguments of each task.
kwargs (dict): Dict of keyword arguments to be merged with
the keyword arguments of each task.
**options (dict): Options to be merged with the options of each task.
Returns:
generator: A generator for the AsyncResult of the tasks in the group.
"""
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
app = app or self.app
with app.producer_or_acquire(producer) as producer:
# Iterate through tasks two at a time. If tasks is a generator,
# we are able to tell when we are at the end by checking if
# next_task is None. This enables us to set the chord size
# without burning through the entire generator. See #3021.
chord_size = 0
tasks_shifted, tasks = itertools.tee(tasks)
next(tasks_shifted, None)
next_task = next(tasks_shifted, None)
for task_index, current_task in enumerate(tasks):
# We expect that each task must be part of the same group which
# seems sensible enough. If that's somehow not the case we'll
# end up messing up chord counts and there are all sorts of
# awful race conditions to think about. We'll hope it's not!
sig, res, group_id = current_task
chord_obj = chord if chord is not None else sig.options.get("chord")
# We need to check the chord size of each contributing task so
# that when we get to the final one, we can correctly set the
# size in the backend and the chord can be sensible completed.
chord_size += _chord._descend(sig)
if chord_obj is not None and next_task is None:
# Per above, sanity check that we only saw one group
app.backend.set_chord_size(group_id, chord_size)
sig.apply_async(producer=producer, add_to_parent=False,
chord=chord_obj, args=args, kwargs=kwargs,
**options)
# adding callback to result, such that it will gradually
# fulfill the barrier.
#
# Using barrier.add would use result.then, but we need
# to add the weak argument here to only create a weak
# reference to the object.
if p and not p.cancelled and not p.ready:
p.size += 1
res.then(p, weak=True)
next_task = next(tasks_shifted, None)
yield res # <-- r.parent, etc set in the frozen result.
|
Run all the tasks in the group.
This is used by :meth:`apply_async` to run all the tasks in the group
and return a generator of their results.
Arguments:
tasks (list): List of tasks in the group.
producer (Producer): The producer to use to publish the tasks.
app (Celery): The Celery app instance.
p (barrier): Barrier object to synchronize the tasks results.
args (list): List of arguments to be prepended to
the arguments of each task.
kwargs (dict): Dict of keyword arguments to be merged with
the keyword arguments of each task.
**options (dict): Options to be merged with the options of each task.
Returns:
generator: A generator for the AsyncResult of the tasks in the group.
|
python
|
celery/canvas.py
| 1,742
|
[
"self",
"tasks",
"producer",
"app",
"p",
"add_to_parent",
"chord",
"args",
"kwargs",
"group_index"
] | false
| 9
| 7.36
|
celery/celery
| 27,741
|
google
| false
|
|
of
|
@SafeVarargs // Creating a stream from an array is safe
public static <T> Stream<T> of(final T... values) {
return values == null ? Stream.empty() : Stream.of(values);
}
|
Null-safe version of {@link Stream#of(Object[])}.
@param <T> the type of stream elements.
@param values the elements of the new stream, may be {@code null}.
@return the new stream on {@code values} or {@link Stream#empty()}.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
| 735
|
[] | true
| 2
| 8.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
maybeCreateNewBatch
|
private AcknowledgementBatch maybeCreateNewBatch(AcknowledgementBatch currentBatch, Long nextOffset, List<AcknowledgementBatch> batches) {
if (nextOffset != currentBatch.lastOffset() + 1) {
List<AcknowledgementBatch> optimalBatches = maybeOptimiseAcknowledgeTypes(currentBatch);
optimalBatches.forEach(batch -> {
if (canOptimiseForSingleAcknowledgeType(batch)) {
// If the batch had a single acknowledgement type, we optimise the array independent
// of the number of records.
batch.acknowledgeTypes().subList(1, batch.acknowledgeTypes().size()).clear();
}
batches.add(batch);
});
currentBatch = new AcknowledgementBatch();
currentBatch.setFirstOffset(nextOffset);
}
return currentBatch;
}
|
Creates a new current batch if the next offset is not one higher than the current batch's
last offset.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/Acknowledgements.java
| 214
|
[
"currentBatch",
"nextOffset",
"batches"
] |
AcknowledgementBatch
| true
| 3
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
is_re_compilable
|
def is_re_compilable(obj: object) -> bool:
"""
Check if the object can be compiled into a regex pattern instance.
Parameters
----------
obj : The object to check
The object to check if the object can be compiled into a regex pattern instance.
Returns
-------
bool
Whether `obj` can be compiled as a regex pattern.
See Also
--------
api.types.is_re : Check if the object is a regex pattern instance.
Examples
--------
>>> from pandas.api.types import is_re_compilable
>>> is_re_compilable(".*")
True
>>> is_re_compilable(1)
False
"""
try:
re.compile(obj) # type: ignore[call-overload]
except TypeError:
return False
else:
return True
|
Check if the object can be compiled into a regex pattern instance.
Parameters
----------
obj : The object to check
The object to check if the object can be compiled into a regex pattern instance.
Returns
-------
bool
Whether `obj` can be compiled as a regex pattern.
See Also
--------
api.types.is_re : Check if the object is a regex pattern instance.
Examples
--------
>>> from pandas.api.types import is_re_compilable
>>> is_re_compilable(".*")
True
>>> is_re_compilable(1)
False
|
python
|
pandas/core/dtypes/inference.py
| 192
|
[
"obj"
] |
bool
| true
| 2
| 8.32
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_safe_indexing
|
def _safe_indexing(X, indices, *, axis=0):
"""Return rows, items or columns of X using indices.
.. warning::
This utility is documented, but **private**. This means that
backward compatibility might be broken without any deprecation
cycle.
Parameters
----------
X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series
Data from which to sample rows, items or columns. `list` are only
supported when `axis=0`.
indices : bool, int, str, slice, array-like
- If `axis=0`, boolean and integer array-like, integer slice,
and scalar integer are supported.
- If `axis=1`:
- to select a single column, `indices` can be of `int` type for
all `X` types and `str` only for dataframe. The selected subset
will be 1D, unless `X` is a sparse matrix in which case it will
be 2D.
- to select multiples columns, `indices` can be one of the
following: `list`, `array`, `slice`. The type used in
these containers can be one of the following: `int`, 'bool' and
`str`. However, `str` is only supported when `X` is a dataframe.
The selected subset will be 2D.
axis : int, default=0
The axis along which `X` will be subsampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
subset
Subset of X on axis 0 or 1.
Notes
-----
CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are
not supported.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils import _safe_indexing
>>> data = np.array([[1, 2], [3, 4], [5, 6]])
>>> _safe_indexing(data, 0, axis=0) # select the first row
array([1, 2])
>>> _safe_indexing(data, 0, axis=1) # select the first column
array([1, 3, 5])
"""
if indices is None:
return X
if axis not in (0, 1):
raise ValueError(
"'axis' should be either 0 (to index rows) or 1 (to index "
" column). Got {} instead.".format(axis)
)
indices_dtype = _determine_key_type(indices)
if axis == 0 and indices_dtype == "str":
raise ValueError(
f"String indexing (indices={indices}) is not supported with 'axis=0'. "
"Did you mean to use axis=1 for column selection?"
)
if axis == 1 and isinstance(X, list):
raise ValueError("axis=1 is not supported for lists")
if axis == 1 and (ndim := len(getattr(X, "shape", [0]))) != 2:
raise ValueError(
"'X' should be a 2D NumPy array, 2D sparse matrix or "
"dataframe when indexing the columns (i.e. 'axis=1'). "
f"Got {type(X)} instead with {ndim} dimension(s)."
)
if (
axis == 1
and indices_dtype == "str"
and not (is_pandas_df(X) or _use_interchange_protocol(X))
):
raise ValueError(
"Specifying the columns using strings is only supported for dataframes."
)
if hasattr(X, "iloc"):
# TODO: we should probably use is_pandas_df_or_series(X) instead but:
# 1) Currently, it (probably) works for dataframes compliant to pandas' API.
# 2) Updating would require updating some tests such as
# test_train_test_split_mock_pandas.
return _pandas_indexing(X, indices, indices_dtype, axis=axis)
elif is_polars_df_or_series(X):
return _polars_indexing(X, indices, indices_dtype, axis=axis)
elif is_pyarrow_data(X):
return _pyarrow_indexing(X, indices, indices_dtype, axis=axis)
elif _use_interchange_protocol(X): # pragma: no cover
# Once the dataframe X is converted into its dataframe interchange protocol
# version by calling X.__dataframe__(), it becomes very hard to turn it back
# into its original type, e.g., a pyarrow.Table, see
# https://github.com/data-apis/dataframe-api/issues/85.
raise warnings.warn(
message="A data object with support for the dataframe interchange protocol"
"was passed, but scikit-learn does currently not know how to handle this "
"kind of data. Some array/list indexing will be tried.",
category=UserWarning,
)
if hasattr(X, "shape"):
return _array_indexing(X, indices, indices_dtype, axis=axis)
else:
return _list_indexing(X, indices, indices_dtype)
|
Return rows, items or columns of X using indices.
.. warning::
This utility is documented, but **private**. This means that
backward compatibility might be broken without any deprecation
cycle.
Parameters
----------
X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series
Data from which to sample rows, items or columns. `list` are only
supported when `axis=0`.
indices : bool, int, str, slice, array-like
- If `axis=0`, boolean and integer array-like, integer slice,
and scalar integer are supported.
- If `axis=1`:
- to select a single column, `indices` can be of `int` type for
all `X` types and `str` only for dataframe. The selected subset
will be 1D, unless `X` is a sparse matrix in which case it will
be 2D.
- to select multiples columns, `indices` can be one of the
following: `list`, `array`, `slice`. The type used in
these containers can be one of the following: `int`, 'bool' and
`str`. However, `str` is only supported when `X` is a dataframe.
The selected subset will be 2D.
axis : int, default=0
The axis along which `X` will be subsampled. `axis=0` will select
rows while `axis=1` will select columns.
Returns
-------
subset
Subset of X on axis 0 or 1.
Notes
-----
CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are
not supported.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils import _safe_indexing
>>> data = np.array([[1, 2], [3, 4], [5, 6]])
>>> _safe_indexing(data, 0, axis=0) # select the first row
array([1, 2])
>>> _safe_indexing(data, 0, axis=1) # select the first column
array([1, 3, 5])
|
python
|
sklearn/utils/_indexing.py
| 265
|
[
"X",
"indices",
"axis"
] | false
| 19
| 6.4
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
calculateArgumentBindings
|
public final void calculateArgumentBindings() {
// The simple case... nothing to bind.
if (this.argumentsIntrospected || this.parameterTypes.length == 0) {
return;
}
int numUnboundArgs = this.parameterTypes.length;
Class<?>[] parameterTypes = this.aspectJAdviceMethod.getParameterTypes();
if (maybeBindJoinPoint(parameterTypes[0]) || maybeBindProceedingJoinPoint(parameterTypes[0]) ||
maybeBindJoinPointStaticPart(parameterTypes[0])) {
numUnboundArgs--;
}
if (numUnboundArgs > 0) {
// need to bind arguments by name as returned from the pointcut match
bindArgumentsByName(numUnboundArgs);
}
this.argumentsIntrospected = true;
}
|
Do as much work as we can as part of the set-up so that argument binding
on subsequent advice invocations can be as fast as possible.
<p>If the first argument is of type JoinPoint or ProceedingJoinPoint then we
pass a JoinPoint in that position (ProceedingJoinPoint for around advice).
<p>If the first argument is of type {@code JoinPoint.StaticPart}
then we pass a {@code JoinPoint.StaticPart} in that position.
<p>Remaining arguments have to be bound by pointcut evaluation at
a given join point. We will get back a map from argument name to
value. We need to calculate which advice parameter needs to be bound
to which argument name. There are multiple strategies for determining
this binding, which are arranged in a ChainOfResponsibility.
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AbstractAspectJAdvice.java
| 374
|
[] |
void
| true
| 7
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
configure
|
public <T extends ThreadPoolTaskScheduler> T configure(T taskScheduler) {
PropertyMapper map = PropertyMapper.get();
map.from(this.poolSize).to(taskScheduler::setPoolSize);
map.from(this.awaitTermination).to(taskScheduler::setWaitForTasksToCompleteOnShutdown);
map.from(this.awaitTerminationPeriod).asInt(Duration::getSeconds).to(taskScheduler::setAwaitTerminationSeconds);
map.from(this.threadNamePrefix).to(taskScheduler::setThreadNamePrefix);
map.from(this.taskDecorator).to(taskScheduler::setTaskDecorator);
if (!CollectionUtils.isEmpty(this.customizers)) {
this.customizers.forEach((customizer) -> customizer.customize(taskScheduler));
}
return taskScheduler;
}
|
Configure the provided {@link ThreadPoolTaskScheduler} instance using this builder.
@param <T> the type of task scheduler
@param taskScheduler the {@link ThreadPoolTaskScheduler} to configure
@return the task scheduler instance
@see #build()
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/task/ThreadPoolTaskSchedulerBuilder.java
| 211
|
[
"taskScheduler"
] |
T
| true
| 2
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
findThreadById
|
public static Thread findThreadById(final long threadId) {
if (threadId <= 0) {
throw new IllegalArgumentException("The thread id must be greater than zero");
}
final Collection<Thread> result = findThreads((Predicate<Thread>) t -> t != null && t.getId() == threadId);
return result.isEmpty() ? null : result.iterator().next();
}
|
Finds the active thread with the specified id.
@param threadId The thread id.
@return The thread with the specified id or {@code null} if no such thread exists.
@throws IllegalArgumentException if the specified id is zero or negative.
@throws SecurityException if the current thread cannot access the system thread group.
@throws SecurityException if the current thread cannot modify thread groups from this thread's thread group up to the system thread group.
|
java
|
src/main/java/org/apache/commons/lang3/ThreadUtils.java
| 183
|
[
"threadId"
] |
Thread
| true
| 4
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
partitionsNeedingReset
|
public synchronized Set<TopicPartition> partitionsNeedingReset(long nowMs) {
return collectPartitions(state -> state.awaitingReset() && !state.awaitingRetryBackoff(nowMs));
}
|
Request reset for partitions that require a position, using the configured reset strategy.
@param initPartitionsToInclude Initializing partitions to include in the reset. Assigned partitions that
require a positions but are not included in this set won't be reset.
@throws NoOffsetForPartitionException If there are partitions assigned that require a position but
there is no reset strategy configured.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 878
|
[
"nowMs"
] | true
| 2
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
position
|
@Override
public synchronized long position(TopicPartition partition) {
ensureNotClosed();
if (!this.subscriptions.isAssigned(partition))
throw new IllegalArgumentException("You can only check the position for partitions assigned to this consumer.");
SubscriptionState.FetchPosition position = this.subscriptions.position(partition);
if (position == null) {
updateFetchPosition(partition);
position = this.subscriptions.position(partition);
}
return position.offset;
}
|
Sets the maximum number of records returned in a single call to {@link #poll(Duration)}.
@param maxPollRecords the max.poll.records.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java
| 419
|
[
"partition"
] | true
| 3
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
poly2herm
|
def poly2herm(pol):
"""
poly2herm(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herm2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import poly2herm
>>> poly2herm(np.arange(4))
array([1. , 2.75 , 0.5 , 0.375])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = hermadd(hermmulx(res), pol[i])
return res
|
poly2herm(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herm2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import poly2herm
>>> poly2herm(np.arange(4))
array([1. , 2.75 , 0.5 , 0.375])
|
python
|
numpy/polynomial/hermite.py
| 94
|
[
"pol"
] | false
| 2
| 7.36
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
transform
|
protected abstract Map<String, Object> transform(Result<RESPONSE> response);
|
Extract the configured properties from the retrieved response.
@param response the non-null response that was retrieved
@return a mapping of properties for the ip from the response
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IpinfoIpDataLookups.java
| 504
|
[
"response"
] | true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
all_displays
|
def all_displays():
"""Get a list of all displays from `sklearn`.
Returns
-------
displays : list of tuples
List of (name, class), where ``name`` is the display class name as
string and ``class`` is the actual type of the class.
Examples
--------
>>> from sklearn.utils.discovery import all_displays
>>> displays = all_displays()
>>> displays[0]
('CalibrationDisplay', <class 'sklearn.calibration.CalibrationDisplay'>)
"""
# lazy import to avoid circular imports from sklearn.base
from sklearn.utils._testing import ignore_warnings
all_classes = []
root = str(Path(__file__).parent.parent) # sklearn package
# Ignore deprecation warnings triggered at import time and from walking
# packages
with ignore_warnings(category=FutureWarning):
for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
module_parts = module_name.split(".")
if (
any(part in _MODULE_TO_IGNORE for part in module_parts)
or "._" in module_name
):
continue
module = import_module(module_name)
classes = inspect.getmembers(module, inspect.isclass)
classes = [
(name, display_class)
for name, display_class in classes
if not name.startswith("_") and name.endswith("Display")
]
all_classes.extend(classes)
return sorted(set(all_classes), key=itemgetter(0))
|
Get a list of all displays from `sklearn`.
Returns
-------
displays : list of tuples
List of (name, class), where ``name`` is the display class name as
string and ``class`` is the actual type of the class.
Examples
--------
>>> from sklearn.utils.discovery import all_displays
>>> displays = all_displays()
>>> displays[0]
('CalibrationDisplay', <class 'sklearn.calibration.CalibrationDisplay'>)
|
python
|
sklearn/utils/discovery.py
| 153
|
[] | false
| 5
| 7.36
|
scikit-learn/scikit-learn
| 64,340
|
unknown
| false
|
|
get
|
static Layers get(Context context) {
IndexedLayers indexedLayers = IndexedLayers.get(context);
if (indexedLayers == null) {
throw new JarModeErrorException("Layers are not enabled");
}
return indexedLayers;
}
|
Return a {@link Layers} instance for the currently running application.
@param context the command context
@return a new layers instance
|
java
|
loader/spring-boot-jarmode-tools/src/main/java/org/springframework/boot/jarmode/tools/Layers.java
| 68
|
[
"context"
] |
Layers
| true
| 2
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
listStreamsGroupOffsets
|
ListStreamsGroupOffsetsResult listStreamsGroupOffsets(Map<String, ListStreamsGroupOffsetsSpec> groupSpecs, ListStreamsGroupOffsetsOptions options);
|
List the streams group offsets available in the cluster for the specified streams groups.
<em>Note</em>: this method effectively does the same as the corresponding consumer group method {@link Admin#listConsumerGroupOffsets} does.
@param groupSpecs Map of streams group ids to a spec that specifies the topic partitions of the group to list offsets for.
@param options The options to use when listing the streams group offsets.
@return The ListStreamsGroupOffsetsResult
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 963
|
[
"groupSpecs",
"options"
] |
ListStreamsGroupOffsetsResult
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
iterator
|
@Override
default Iterator<ConfigurationPropertyName> iterator() {
return stream().iterator();
}
|
Return an iterator for the {@link ConfigurationPropertyName names} managed by this
source.
@return an iterator (never {@code null})
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/IterableConfigurationPropertySource.java
| 52
|
[] | true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
StringBuilder sb = new StringBuilder(ObjectUtils.identityToString(this));
sb.append(": defining beans [");
sb.append(StringUtils.collectionToCommaDelimitedString(this.beanDefinitionNames));
sb.append("]; ");
BeanFactory parent = getParentBeanFactory();
if (parent == null) {
sb.append("root of factory hierarchy");
}
else {
sb.append("parent: ").append(ObjectUtils.identityToString(parent));
}
return sb.toString();
}
|
Public method to determine the applicable order value for a given bean.
@param beanName the name of the bean
@param beanInstance the bean instance to check
@return the corresponding order value (default is {@link Ordered#LOWEST_PRECEDENCE})
@since 7.0
@see #getOrder(String)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
| 2,383
|
[] |
String
| true
| 2
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
isError
|
function isError(value) {
if (!isObjectLike(value)) {
return false;
}
var tag = baseGetTag(value);
return tag == errorTag || tag == domExcTag ||
(typeof value.message == 'string' && typeof value.name == 'string' && !isPlainObject(value));
}
|
Checks if `value` is an `Error`, `EvalError`, `RangeError`, `ReferenceError`,
`SyntaxError`, `TypeError`, or `URIError` object.
@static
@memberOf _
@since 3.0.0
@category Lang
@param {*} value The value to check.
@returns {boolean} Returns `true` if `value` is an error object, else `false`.
@example
_.isError(new Error);
// => true
_.isError(Error);
// => false
|
javascript
|
lodash.js
| 11,698
|
[
"value"
] | false
| 6
| 7.2
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
predictBeanType
|
@Override
protected @Nullable Class<?> predictBeanType(String beanName, RootBeanDefinition mbd, Class<?>... typesToMatch) {
Class<?> targetType = determineTargetType(beanName, mbd, typesToMatch);
// Apply SmartInstantiationAwareBeanPostProcessors to predict the
// eventual type after a before-instantiation shortcut.
if (targetType != null && !mbd.isSynthetic() && hasInstantiationAwareBeanPostProcessors()) {
boolean matchingOnlyFactoryBean = (typesToMatch.length == 1 && typesToMatch[0] == FactoryBean.class);
for (SmartInstantiationAwareBeanPostProcessor bp : getBeanPostProcessorCache().smartInstantiationAware) {
Class<?> predicted = bp.predictBeanType(targetType, beanName);
if (predicted != null &&
(!matchingOnlyFactoryBean || FactoryBean.class.isAssignableFrom(predicted))) {
return predicted;
}
}
}
return targetType;
}
|
Actually create the specified bean. Pre-creation processing has already happened
at this point, for example, checking {@code postProcessBeforeInstantiation} callbacks.
<p>Differentiates between default bean instantiation, use of a
factory method, and autowiring a constructor.
@param beanName the name of the bean
@param mbd the merged bean definition for the bean
@param args explicit arguments to use for constructor or factory method invocation
@return a new instance of the bean
@throws BeanCreationException if the bean could not be created
@see #instantiateBean
@see #instantiateUsingFactoryMethod
@see #autowireConstructor
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractAutowireCapableBeanFactory.java
| 653
|
[
"beanName",
"mbd"
] | true
| 8
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
listenersController
|
function listenersController() {
const listeners = [];
return {
addEventListener(emitter, event, handler, flags) {
eventTargetAgnosticAddListener(emitter, event, handler, flags);
ArrayPrototypePush(listeners, [emitter, event, handler, flags]);
},
removeAll() {
while (listeners.length > 0) {
ReflectApply(eventTargetAgnosticRemoveListener, undefined, ArrayPrototypePop(listeners));
}
},
};
}
|
Returns an `AsyncIterator` that iterates `event` events.
@param {EventEmitter} emitter
@param {string | symbol} event
@param {{
signal: AbortSignal;
close?: string[];
highWaterMark?: number,
lowWaterMark?: number
}} [options]
@returns {AsyncIterator}
|
javascript
|
lib/events.js
| 1,202
|
[] | false
| 2
| 6.64
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
multiplyBy
|
public Fraction multiplyBy(final Fraction fraction) {
Objects.requireNonNull(fraction, "fraction");
if (numerator == 0 || fraction.numerator == 0) {
return ZERO;
}
// knuth 4.5.1
// make sure we don't overflow unless the result *must* overflow.
final int d1 = greatestCommonDivisor(numerator, fraction.denominator);
final int d2 = greatestCommonDivisor(fraction.numerator, denominator);
return getReducedFraction(mulAndCheck(numerator / d1, fraction.numerator / d2), mulPosAndCheck(denominator / d2, fraction.denominator / d1));
}
|
Multiplies the value of this fraction by another, returning the
result in reduced form.
@param fraction the fraction to multiply by, must not be {@code null}
@return a {@link Fraction} instance with the resulting values
@throws NullPointerException if the fraction is {@code null}
@throws ArithmeticException if the resulting numerator or denominator exceeds
{@code Integer.MAX_VALUE}
|
java
|
src/main/java/org/apache/commons/lang3/math/Fraction.java
| 781
|
[
"fraction"
] |
Fraction
| true
| 3
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
make_mask
|
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interpreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has a
dtype of MaskType (bool). If the dtype is flexible, each field has
a boolean dtype. This is ignored when `m` is ``nomask``, in which
case ``nomask`` is always returned.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True])
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True])
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True])
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False])
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
... 'formats':[np.int64, np.int64]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i8'), ('mouse', '<i8')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
if m is nomask:
return nomask
# Make sure the input dtype is valid.
dtype = make_mask_descr(dtype)
# legacy boolean special case: "existence of fields implies true"
if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool:
return np.ones(m.shape, dtype=dtype)
# Fill the mask in case there are missing data; turn it into an ndarray.
copy = None if not copy else True
result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True)
# Bas les masques !
if shrink:
result = _shrink_mask(result)
return result
|
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interpreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has a
dtype of MaskType (bool). If the dtype is flexible, each field has
a boolean dtype. This is ignored when `m` is ``nomask``, in which
case ``nomask`` is always returned.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True])
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True])
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True])
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False])
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
... 'formats':[np.int64, np.int64]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i8'), ('mouse', '<i8')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
|
python
|
numpy/ma/core.py
| 1,596
|
[
"m",
"copy",
"shrink",
"dtype"
] | false
| 7
| 7.76
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
_add_log_from_parsed_log_streams_to_heap
|
def _add_log_from_parsed_log_streams_to_heap(
heap: list[tuple[int, StructuredLogMessage]],
parsed_log_streams: dict[int, ParsedLogStream],
) -> None:
"""
Add one log record from each parsed log stream to the heap, and will remove empty log stream from the dict after iterating.
:param heap: heap to store log records
:param parsed_log_streams: dict of parsed log streams
"""
# We intend to initialize the list lazily, as in most cases we don't need to remove any log streams.
# This reduces memory overhead, since this function is called repeatedly until all log streams are empty.
log_stream_to_remove: list[int] | None = None
for idx, log_stream in parsed_log_streams.items():
record: ParsedLog | None = next(log_stream, None)
if record is None:
if log_stream_to_remove is None:
log_stream_to_remove = []
log_stream_to_remove.append(idx)
continue
timestamp, line_num, line = record
# take int as sort key to avoid overhead of memory usage
heapq.heappush(heap, (_create_sort_key(timestamp, line_num), line))
# remove empty log stream from the dict
if log_stream_to_remove is not None:
for idx in log_stream_to_remove:
del parsed_log_streams[idx]
|
Add one log record from each parsed log stream to the heap, and will remove empty log stream from the dict after iterating.
:param heap: heap to store log records
:param parsed_log_streams: dict of parsed log streams
|
python
|
airflow-core/src/airflow/utils/log/file_task_handler.py
| 303
|
[
"heap",
"parsed_log_streams"
] |
None
| true
| 6
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
setCurrentInjectionPoint
|
static InjectionPoint setCurrentInjectionPoint(@Nullable InjectionPoint injectionPoint) {
InjectionPoint old = currentInjectionPoint.get();
if (injectionPoint != null) {
currentInjectionPoint.set(injectionPoint);
}
else {
currentInjectionPoint.remove();
}
return old;
}
|
Return a {@link Predicate} for a parameter type that checks if its target
value is a {@link Class} and the value type is a {@link String}. This is
a regular use case where a {@link Class} is defined in the bean definition
as a fully-qualified class name.
@param valueType the type of the value
@return a predicate to indicate a fallback match for a String to Class
parameter
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/ConstructorResolver.java
| 1,270
|
[
"injectionPoint"
] |
InjectionPoint
| true
| 2
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
replaceChars
|
public static String replaceChars(final String str, final String searchChars, String replaceChars) {
if (isEmpty(str) || isEmpty(searchChars)) {
return str;
}
replaceChars = ObjectUtils.toString(replaceChars);
boolean modified = false;
final int replaceCharsLength = replaceChars.length();
final int strLength = str.length();
final StringBuilder buf = new StringBuilder(strLength);
for (int i = 0; i < strLength; i++) {
final char ch = str.charAt(i);
final int index = searchChars.indexOf(ch);
if (index >= 0) {
modified = true;
if (index < replaceCharsLength) {
buf.append(replaceChars.charAt(index));
}
} else {
buf.append(ch);
}
}
if (modified) {
return buf.toString();
}
return str;
}
|
Replaces multiple characters in a String in one go. This method can also be used to delete characters.
<p>
For example:<br>
{@code replaceChars("hello", "ho", "jy") = jelly}.
</p>
<p>
A {@code null} string input returns {@code null}. An empty ("") string input returns an empty string. A null or empty set of search characters returns
the input string.
</p>
<p>
The length of the search characters should normally equal the length of the replace characters. If the search characters is longer, then the extra search
characters are deleted. If the search characters is shorter, then the extra replace characters are ignored.
</p>
<pre>
StringUtils.replaceChars(null, *, *) = null
StringUtils.replaceChars("", *, *) = ""
StringUtils.replaceChars("abc", null, *) = "abc"
StringUtils.replaceChars("abc", "", *) = "abc"
StringUtils.replaceChars("abc", "b", null) = "ac"
StringUtils.replaceChars("abc", "b", "") = "ac"
StringUtils.replaceChars("abcba", "bc", "yz") = "ayzya"
StringUtils.replaceChars("abcba", "bc", "y") = "ayya"
StringUtils.replaceChars("abcba", "bc", "yzx") = "ayzya"
</pre>
@param str String to replace characters in, may be null.
@param searchChars a set of characters to search for, may be null.
@param replaceChars a set of characters to replace, may be null.
@return modified String, {@code null} if null string input.
@since 2.0
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 6,306
|
[
"str",
"searchChars",
"replaceChars"
] |
String
| true
| 7
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
deserialize
|
def deserialize(cls, json_str: str) -> "GemmOperation": # type: ignore[name-defined] # noqa: F821
"""Deserialize JSON string to a GEMM operation.
Args:
json_str: JSON string of a GEMM operation
Returns:
GemmOperation: Reconstructed operation
"""
json_dict = json.loads(json_str)
return cls._json_to_gemm_operation(json_dict)
|
Deserialize JSON string to a GEMM operation.
Args:
json_str: JSON string of a GEMM operation
Returns:
GemmOperation: Reconstructed operation
|
python
|
torch/_inductor/codegen/cuda/serialization.py
| 47
|
[
"cls",
"json_str"
] |
"GemmOperation"
| true
| 1
| 6.08
|
pytorch/pytorch
| 96,034
|
google
| false
|
createBootstrapContext
|
private DefaultBootstrapContext createBootstrapContext() {
DefaultBootstrapContext bootstrapContext = new DefaultBootstrapContext();
this.bootstrapRegistryInitializers.forEach((initializer) -> initializer.initialize(bootstrapContext));
return bootstrapContext;
}
|
Run the Spring application, creating and refreshing a new
{@link ApplicationContext}.
@param args the application arguments (usually passed from a Java main method)
@return a running {@link ApplicationContext}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 344
|
[] |
DefaultBootstrapContext
| true
| 1
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
max
|
public static double max(final double a, final double b) {
if (Double.isNaN(a)) {
return b;
}
if (Double.isNaN(b)) {
return a;
}
return Math.max(a, b);
}
|
Gets the maximum of two {@code double} values.
<p>NaN is only returned if all numbers are NaN as per IEEE-754r.</p>
@param a value 1.
@param b value 2.
@return the largest of the values.
|
java
|
src/main/java/org/apache/commons/lang3/math/IEEE754rUtils.java
| 63
|
[
"a",
"b"
] | true
| 3
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
subscription
|
@Override
public Set<String> subscription() {
acquireAndEnsureOpen();
try {
return Collections.unmodifiableSet(subscriptions.subscription());
} finally {
release();
}
}
|
Get the current subscription. or an empty set if no such call has
been made.
@return The set of topics currently subscribed to
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
| 1,775
|
[] | true
| 1
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
newMetadataRequestBuilder
|
protected MetadataRequest.Builder newMetadataRequestBuilder() {
return MetadataRequest.Builder.allTopics();
}
|
Constructs and returns a metadata request builder for fetching cluster data and all active topics.
@return the constructed non-null metadata builder
|
java
|
clients/src/main/java/org/apache/kafka/clients/Metadata.java
| 740
|
[] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
constantFuture
|
public static <T> Future<T> constantFuture(final T value) {
return new ConstantFuture<>(value);
}
|
Gets an implementation of {@link Future} that is immediately done
and returns the specified constant value.
<p>
This can be useful to return a simple constant immediately from the
concurrent processing, perhaps as part of avoiding nulls.
A constant future can also be useful in testing.
</p>
@param <T> the type of the value used by this {@link Future} object
@param value the constant value to return, may be null
@return an instance of Future that will return the value, never null
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/ConcurrentUtils.java
| 127
|
[
"value"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
nbytes
|
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
See Also
--------
Series.ndim : Number of dimensions of the underlying data.
Series.size : Return the number of elements in the underlying data.
Examples
--------
For Series:
>>> s = pd.Series(["Ant", "Bear", "Cow"])
>>> s
0 Ant
1 Bear
2 Cow
dtype: str
>>> s.nbytes
34
For Index:
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.nbytes
24
"""
return self._values.nbytes
|
Return the number of bytes in the underlying data.
See Also
--------
Series.ndim : Number of dimensions of the underlying data.
Series.size : Return the number of elements in the underlying data.
Examples
--------
For Series:
>>> s = pd.Series(["Ant", "Bear", "Cow"])
>>> s
0 Ant
1 Bear
2 Cow
dtype: str
>>> s.nbytes
34
For Index:
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.nbytes
24
|
python
|
pandas/core/base.py
| 437
|
[
"self"
] |
int
| true
| 1
| 7.28
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
doIntValue
|
@Override
public int doIntValue() throws IOException {
try {
return parser.getIntValue();
} catch (IOException e) {
throw handleParserException(e);
}
}
|
Handle parser exception depending on type.
This converts known exceptions to XContentParseException and rethrows them.
|
java
|
libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java
| 263
|
[] | true
| 2
| 6.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
EvictingCacheMap
|
EvictingCacheMap(EvictingCacheMap&&) = default;
|
Construct a EvictingCacheMap
@param maxSize maximum size of the cache map. Once the map size exceeds
maxSize, the map will begin to evict.
@param clearSize the number of elements to clear at a time when automatic
eviction on insert is triggered.
|
cpp
|
folly/container/EvictingCacheMap.h
| 175
|
[] | true
| 2
| 6.64
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
toInteger
|
public static int toInteger(final boolean bool) {
return bool ? 1 : 0;
}
|
Converts a boolean to an int using the convention that
{@code true} is {@code 1} and {@code false} is {@code 0}.
<pre>
BooleanUtils.toInteger(true) = 1
BooleanUtils.toInteger(false) = 0
</pre>
@param bool the boolean to convert
@return one if {@code true}, zero if {@code false}
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 886
|
[
"bool"
] | true
| 2
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
standard
|
static <T> JsonWriter<T> standard() {
return of(Members::add);
}
|
Factory method to return a {@link JsonWriter} for standard Java types. See
{@link JsonValueWriter class-level javadoc} for details.
@param <T> the type to write
@return a {@link JsonWriter} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 140
|
[] | true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
deserialize
|
public static <T> T deserialize(final byte[] objectData) {
Objects.requireNonNull(objectData, "objectData");
return deserialize(new ByteArrayInputStream(objectData));
}
|
Deserializes a single {@link Object} from an array of bytes.
<p>
If the call site incorrectly types the return value, a {@link ClassCastException} is thrown from the call site.
Without Generics in this declaration, the call site must type cast and can cause the same ClassCastException.
Note that in both cases, the ClassCastException is in the call site, not in this method.
</p>
<p>
If you want to secure deserialization with a whitelist or blacklist, please use Apache Commons IO's
{@link org.apache.commons.io.serialization.ValidatingObjectInputStream ValidatingObjectInputStream}.
</p>
@param <T> the object type to be deserialized.
@param objectData
the serialized object, must not be null.
@return the deserialized object.
@throws NullPointerException if {@code objectData} is {@code null}.
@throws SerializationException (runtime) if the serialization fails.
@see org.apache.commons.io.serialization.ValidatingObjectInputStream
|
java
|
src/main/java/org/apache/commons/lang3/SerializationUtils.java
| 163
|
[
"objectData"
] |
T
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
nextAlphabetic
|
public String nextAlphabetic(final int count) {
return next(count, true, false);
}
|
Creates a random string whose length is the number of characters specified.
<p>
Characters will be chosen from the set of Latin alphabetic characters (a-z, A-Z).
</p>
@param count the length of random string to create.
@return the random string.
@throws IllegalArgumentException if {@code count} < 0.
|
java
|
src/main/java/org/apache/commons/lang3/RandomStringUtils.java
| 809
|
[
"count"
] |
String
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
isNestedOrIndexedProperty
|
public static boolean isNestedOrIndexedProperty(@Nullable String propertyPath) {
if (propertyPath == null) {
return false;
}
for (int i = 0; i < propertyPath.length(); i++) {
char ch = propertyPath.charAt(i);
if (ch == PropertyAccessor.NESTED_PROPERTY_SEPARATOR_CHAR ||
ch == PropertyAccessor.PROPERTY_KEY_PREFIX_CHAR) {
return true;
}
}
return false;
}
|
Check whether the given property path indicates an indexed or nested property.
@param propertyPath the property path to check
@return whether the path indicates an indexed or nested property
|
java
|
spring-beans/src/main/java/org/springframework/beans/PropertyAccessorUtils.java
| 47
|
[
"propertyPath"
] | true
| 5
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
prepareApplicationContext
|
@Override
protected GenericApplicationContext prepareApplicationContext(Class<?> application) {
return new AotProcessorHook(application).run(() -> {
Method mainMethod = getMainMethod(application);
mainMethod.setAccessible(true);
if (mainMethod.getParameterCount() == 0) {
ReflectionUtils.invokeMethod(mainMethod, null);
}
else {
ReflectionUtils.invokeMethod(mainMethod, null, new Object[] { this.applicationArgs });
}
return Void.class;
});
}
|
Create a new processor for the specified application and settings.
@param application the application main class
@param settings the general AOT processor settings
@param applicationArgs the arguments to provide to the main method
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplicationAotProcessor.java
| 58
|
[
"application"
] |
GenericApplicationContext
| true
| 2
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
read
|
int read(ByteBuffer dst, long pos) throws IOException;
|
Read a sequence of bytes from this channel into the given buffer, starting at the
given block position.
@param dst the buffer into which bytes are to be transferred
@param pos the position within the block at which the transfer is to begin
@return the number of bytes read, possibly zero, or {@code -1} if the given
position is greater than or equal to the block size
@throws IOException on I/O error
@see #readFully(ByteBuffer, long)
@see FileChannel#read(ByteBuffer, long)
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/DataBlock.java
| 51
|
[
"dst",
"pos"
] | true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
subSequence
|
public static CharSequence subSequence(final CharSequence cs, final int start) {
return cs == null ? null : cs.subSequence(start, cs.length());
}
|
Returns a new {@link CharSequence} that is a subsequence of this
sequence starting with the {@code char} value at the specified index.
<p>This provides the {@link CharSequence} equivalent to {@link String#substring(int)}.
The length (in {@code char}) of the returned sequence is {@code length() - start},
so if {@code start == end} then an empty sequence is returned.</p>
@param cs the specified subsequence, null returns null.
@param start the start index, inclusive, valid.
@return a new subsequence, may be null.
@throws IndexOutOfBoundsException if {@code start} is negative or if
{@code start} is greater than {@code length()}.
|
java
|
src/main/java/org/apache/commons/lang3/CharSequenceUtils.java
| 355
|
[
"cs",
"start"
] |
CharSequence
| true
| 2
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
get_debug_flag
|
def get_debug_flag() -> bool:
"""Get whether debug mode should be enabled for the app, indicated by the
:envvar:`FLASK_DEBUG` environment variable. The default is ``False``.
"""
val = os.environ.get("FLASK_DEBUG")
return bool(val and val.lower() not in {"0", "false", "no"})
|
Get whether debug mode should be enabled for the app, indicated by the
:envvar:`FLASK_DEBUG` environment variable. The default is ``False``.
|
python
|
src/flask/helpers.py
| 27
|
[] |
bool
| true
| 2
| 6.56
|
pallets/flask
| 70,946
|
unknown
| false
|
checkBitVectorable
|
private static <E extends Enum<E>> Class<E> checkBitVectorable(final Class<E> enumClass) {
final E[] constants = asEnum(enumClass).getEnumConstants();
Validate.isTrue(constants.length <= Long.SIZE, CANNOT_STORE_S_S_VALUES_IN_S_BITS, Integer.valueOf(constants.length), enumClass.getSimpleName(),
Integer.valueOf(Long.SIZE));
return enumClass;
}
|
Validate that {@code enumClass} is compatible with representation in a {@code long}.
@param <E> the type of the enumeration.
@param enumClass to check.
@return {@code enumClass}.
@throws NullPointerException if {@code enumClass} is {@code null}.
@throws IllegalArgumentException if {@code enumClass} is not an enum class or has more than 64 values.
@since 3.0.1
|
java
|
src/main/java/org/apache/commons/lang3/EnumUtils.java
| 73
|
[
"enumClass"
] | true
| 1
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
activation_offload_sink_wait
|
def activation_offload_sink_wait(fwd_module: fx.GraphModule) -> None:
"""
Sink wait_event operations for offload completion to the end of the graph.
This function identifies wait_event nodes for offload completion and moves them
to the end of the graph, allowing computation to overlap with offload operations.
Args:
fwd_module: Forward module graph
"""
graph: fx.Graph = fwd_module.graph
nodes_list: list[fx.Node] = list(graph.nodes)
node_to_idx: dict[fx.Node, int] = {node: idx for idx, node in enumerate(nodes_list)}
# Find all CPU offload device_put nodes
offload_nodes: list[fx.Node] = [
node
for node in graph.find_nodes(
op="call_function", target=torch.ops.prims.device_put.default
)
if CPU_OFFLOAD_PREFIX in node.name
]
# Collect all wait_event nodes that need to be moved
wait_nodes_to_sink: list[fx.Node] = []
for offload_node in offload_nodes:
offload_idx: int = node_to_idx[offload_node]
wait_event_node: fx.Node = nodes_list[offload_idx + 3]
# Validate it's actually a wait_event node
if not (
wait_event_node.op == "call_function"
and wait_event_node.target == torch.ops.streams.wait_event.default
):
raise ValueError(
f"Expected wait_event node three positions after {offload_node.name}"
)
wait_nodes_to_sink.append(wait_event_node)
# Find the output node, and move all wait_event nodes to just before the output node
output_node: fx.Node = graph.find_nodes(op="output")[0]
for wait_node in wait_nodes_to_sink:
output_node.prepend(wait_node)
|
Sink wait_event operations for offload completion to the end of the graph.
This function identifies wait_event nodes for offload completion and moves them
to the end of the graph, allowing computation to overlap with offload operations.
Args:
fwd_module: Forward module graph
|
python
|
torch/_functorch/_activation_offloading/activation_offloading.py
| 717
|
[
"fwd_module"
] |
None
| true
| 5
| 6.72
|
pytorch/pytorch
| 96,034
|
google
| false
|
createClassNameGenerator
|
protected ClassNameGenerator createClassNameGenerator() {
return new ClassNameGenerator(ClassName.get(getApplicationClass()));
}
|
Callback to customize the {@link ClassNameGenerator}.
<p>By default, a standard {@link ClassNameGenerator} using the configured
{@linkplain #getApplicationClass() application entry point} as the default
target is used.
@return the class name generator
|
java
|
spring-context/src/main/java/org/springframework/context/aot/ContextAotProcessor.java
| 122
|
[] |
ClassNameGenerator
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
explicit
|
public static <T> Ordering<T> explicit(T leastValue, T... remainingValuesInOrder) {
return explicit(Lists.asList(leastValue, remainingValuesInOrder));
}
|
Returns an ordering that compares objects according to the order in which they are given to
this method. Only objects present in the argument list (according to {@link Object#equals}) may
be compared. This comparator imposes a "partial ordering" over the type {@code T}. Null values
in the argument list are not supported.
<p>The returned comparator throws a {@link ClassCastException} when it receives an input
parameter that isn't among the provided values.
<p>The generated comparator is serializable if all the provided values are serializable.
@param leastValue the value which the returned comparator should consider the "least" of all
values
@param remainingValuesInOrder the rest of the values that the returned comparator will be able
to compare, in the order the comparator should follow
@return the comparator described above
@throws NullPointerException if any of the provided values is null
@throws IllegalArgumentException if any duplicate values (according to {@link
Object#equals(Object)}) are present among the method arguments
|
java
|
android/guava/src/com/google/common/collect/Ordering.java
| 256
|
[
"leastValue"
] | true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
numberValue
|
@Override
public Number numberValue() throws IOException {
try {
return parser.getNumberValue();
} catch (IOException e) {
throw handleParserException(e);
}
}
|
Handle parser exception depending on type.
This converts known exceptions to XContentParseException and rethrows them.
|
java
|
libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java
| 245
|
[] |
Number
| true
| 2
| 6.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
instancesOf
|
@SuppressWarnings("unchecked") // After the isInstance check, we still need to type-cast.
private static <E> Stream<E> instancesOf(final Class<? super E> clazz, final Stream<?> stream) {
return (Stream<E>) of(stream).filter(clazz::isInstance);
}
|
Streams only instances of the give Class in a collection.
<p>
This method shorthand for:
</p>
<pre>
{@code (Stream<E>) Streams.toStream(collection).filter(collection, SomeClass.class::isInstance);}
</pre>
@param <E> the type of elements in the collection we want to stream.
@param clazz the type of elements in the collection we want to stream.
@param collection the collection to stream or null.
@return A non-null stream that only provides instances we want.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
| 613
|
[
"clazz",
"stream"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
unregisterBroker
|
@InterfaceStability.Unstable
default UnregisterBrokerResult unregisterBroker(int brokerId) {
return unregisterBroker(brokerId, new UnregisterBrokerOptions());
}
|
Unregister a broker.
<p>
This operation does not have any effect on partition assignments.
This is a convenience method for {@link #unregisterBroker(int, UnregisterBrokerOptions)}
@param brokerId the broker id to unregister.
@return the {@link UnregisterBrokerResult} containing the result
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,640
|
[
"brokerId"
] |
UnregisterBrokerResult
| true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
freqstr
|
def freqstr(self) -> str | None:
"""
Return the frequency object as a string if it's set, otherwise None.
See Also
--------
DatetimeIndex.inferred_freq : Returns a string representing a frequency
generated by infer_freq.
Examples
--------
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00"], freq="D")
>>> idx.freqstr
'D'
The frequency can be inferred if there are more than 2 points:
>>> idx = pd.DatetimeIndex(
... ["2018-01-01", "2018-01-03", "2018-01-05"], freq="infer"
... )
>>> idx.freqstr
'2D'
For PeriodIndex:
>>> idx = pd.PeriodIndex(["2023-1", "2023-2", "2023-3"], freq="M")
>>> idx.freqstr
'M'
"""
if self.freq is None:
return None
return self.freq.freqstr
|
Return the frequency object as a string if it's set, otherwise None.
See Also
--------
DatetimeIndex.inferred_freq : Returns a string representing a frequency
generated by infer_freq.
Examples
--------
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00"], freq="D")
>>> idx.freqstr
'D'
The frequency can be inferred if there are more than 2 points:
>>> idx = pd.DatetimeIndex(
... ["2018-01-01", "2018-01-03", "2018-01-05"], freq="infer"
... )
>>> idx.freqstr
'2D'
For PeriodIndex:
>>> idx = pd.PeriodIndex(["2023-1", "2023-2", "2023-3"], freq="M")
>>> idx.freqstr
'M'
|
python
|
pandas/core/arrays/datetimelike.py
| 870
|
[
"self"
] |
str | None
| true
| 2
| 6.8
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
ndim
|
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> import numpy as np
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
|
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> import numpy as np
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
|
python
|
numpy/_core/fromnumeric.py
| 3,483
|
[
"a"
] | false
| 1
| 6.32
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
uniqueIndex
|
@CanIgnoreReturnValue
public static <K, V> ImmutableMap<K, V> uniqueIndex(
Iterable<V> values, Function<? super V, K> keyFunction) {
if (values instanceof Collection) {
return uniqueIndex(
values.iterator(),
keyFunction,
ImmutableMap.builderWithExpectedSize(((Collection<?>) values).size()));
}
return uniqueIndex(values.iterator(), keyFunction);
}
|
Returns a map with the given {@code values}, indexed by keys derived from those values. In
other words, each input value produces an entry in the map whose key is the result of applying
{@code keyFunction} to that value. These entries appear in the same order as the input values.
Example usage:
{@snippet :
Color red = new Color("red", 255, 0, 0);
...
ImmutableSet<Color> allColors = ImmutableSet.of(red, green, blue);
ImmutableMap<String, Color> colorForName =
uniqueIndex(allColors, c -> c.toString());
assertThat(colorForName).containsEntry("red", red);
}
<p>If your index may associate multiple values with each key, use {@link
Multimaps#index(Iterable, Function) Multimaps.index}.
<p><b>Note:</b> on Java 8+, it is usually better to use streams. For example:
{@snippet :
import static com.google.common.collect.ImmutableMap.toImmutableMap;
...
ImmutableMap<String, Color> colorForName =
allColors.stream().collect(toImmutableMap(c -> c.toString(), c -> c));
}
<p>Streams provide a more standard and flexible API and the lambdas make it clear what the keys
and values in the map are.
@param values the values to use when constructing the {@code Map}
@param keyFunction the function used to produce the key for each value
@return a map mapping the result of evaluating the function {@code keyFunction} on each value
in the input collection to that value
@throws IllegalArgumentException if {@code keyFunction} produces the same key for more than one
value in the input collection
@throws NullPointerException if any element of {@code values} is {@code null}, or if {@code
keyFunction} produces {@code null} for any value
|
java
|
android/guava/src/com/google/common/collect/Maps.java
| 1,290
|
[
"values",
"keyFunction"
] | true
| 2
| 7.6
|
google/guava
| 51,352
|
javadoc
| false
|
|
containsVariableTypeSameParametrizedTypeBound
|
private static boolean containsVariableTypeSameParametrizedTypeBound(final TypeVariable<?> typeVariable, final ParameterizedType parameterizedType) {
return ArrayUtils.contains(typeVariable.getBounds(), parameterizedType);
}
|
Tests, recursively, whether any of the type parameters associated with {@code type} are bound to variables.
@param type The type to check for type variables.
@return Whether any of the type parameters associated with {@code type} are bound to variables.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 398
|
[
"typeVariable",
"parameterizedType"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
predict_proba
|
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', or None
Test samples. If `None`, predictions for all indexed points are
returned; in this case, points are not considered their own
neighbors.
Returns
-------
p : ndarray of shape (n_queries, n_classes), or a list of n_outputs \
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
check_is_fitted(self, "_fit_method")
if self.weights == "uniform":
# TODO: systematize this mapping of metric for
# PairwiseDistancesReductions.
metric, metric_kwargs = _adjusted_metric(
metric=self.metric, metric_kwargs=self.metric_params, p=self.p
)
if (
self._fit_method == "brute"
and ArgKminClassMode.is_usable_for(X, self._fit_X, metric)
# TODO: Implement efficient multi-output solution
and not self.outputs_2d_
):
if self.metric == "precomputed":
X = _check_precomputed(X)
else:
X = validate_data(
self, X, accept_sparse="csr", reset=False, order="C"
)
probabilities = ArgKminClassMode.compute(
X,
self._fit_X,
k=self.n_neighbors,
weights=self.weights,
Y_labels=self._y,
unique_Y_labels=self.classes_,
metric=metric,
metric_kwargs=metric_kwargs,
# `strategy="parallel_on_X"` has in practice be shown
# to be more efficient than `strategy="parallel_on_Y``
# on many combination of datasets.
# Hence, we choose to enforce it here.
# For more information, see:
# https://github.com/scikit-learn/scikit-learn/pull/24076#issuecomment-1445258342
# TODO: adapt the heuristic for `strategy="auto"` for
# `ArgKminClassMode` and use `strategy="auto"`.
strategy="parallel_on_X",
)
return probabilities
# In that case, we do not need the distances to perform
# the weighting so we do not compute them.
neigh_ind = self.kneighbors(X, return_distance=False)
neigh_dist = None
else:
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_queries = _num_samples(self._fit_X if X is None else X)
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
elif _all_with_any_reduction_axis_1(weights, value=0):
raise ValueError(
"All neighbors of some sample is getting zero weights. "
"Please modify 'weights' to avoid this case if you are "
"using a user-defined function."
)
all_rows = np.arange(n_queries)
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_queries, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
|
Return probability estimates for the test data X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', or None
Test samples. If `None`, predictions for all indexed points are
returned; in this case, points are not considered their own
neighbors.
Returns
-------
p : ndarray of shape (n_queries, n_classes), or a list of n_outputs \
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
|
python
|
sklearn/neighbors/_classification.py
| 314
|
[
"self",
"X"
] | false
| 15
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
wrapInstance
|
public static <T> Plugin<T> wrapInstance(T instance, Metrics metrics, String key) {
return wrapInstance(instance, metrics, () -> tags(key, instance));
}
|
Wrap an instance into a Plugin.
@param instance the instance to wrap
@param metrics the metrics
@param key the value for the <code>config</code> tag
@return the plugin
|
java
|
clients/src/main/java/org/apache/kafka/common/internals/Plugin.java
| 73
|
[
"instance",
"metrics",
"key"
] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getUnassignedPartitions
|
private List<TopicPartition> getUnassignedPartitions(List<TopicPartition> sortedAssignedPartitions) {
List<String> sortedAllTopics = new ArrayList<>(partitionsPerTopic.keySet());
// sort all topics first, then we can have sorted all topic partitions by adding partitions starting from 0
Collections.sort(sortedAllTopics);
if (sortedAssignedPartitions.isEmpty()) {
// no assigned partitions means all partitions are unassigned partitions
return getAllTopicPartitions(sortedAllTopics);
}
List<TopicPartition> unassignedPartitions = new ArrayList<>(totalPartitionsCount - sortedAssignedPartitions.size());
sortedAssignedPartitions.sort(Comparator.comparing(TopicPartition::topic).thenComparing(TopicPartition::partition));
boolean shouldAddDirectly = false;
Iterator<TopicPartition> sortedAssignedPartitionsIter = sortedAssignedPartitions.iterator();
TopicPartition nextAssignedPartition = sortedAssignedPartitionsIter.next();
for (String topic : sortedAllTopics) {
int partitionCount = partitionsPerTopic.get(topic).size();
for (int i = 0; i < partitionCount; i++) {
if (shouldAddDirectly || !(nextAssignedPartition.topic().equals(topic) && nextAssignedPartition.partition() == i)) {
unassignedPartitions.add(new TopicPartition(topic, i));
} else {
// this partition is in assignedPartitions, don't add to unassignedPartitions, just get next assigned partition
if (sortedAssignedPartitionsIter.hasNext()) {
nextAssignedPartition = sortedAssignedPartitionsIter.next();
} else {
// add the remaining directly since there is no more sortedAssignedPartitions
shouldAddDirectly = true;
}
}
}
}
return unassignedPartitions;
}
|
get the unassigned partition list by computing the difference set of all sorted partitions
and sortedAssignedPartitions. If no assigned partitions, we'll just return all sorted topic partitions.
To compute the difference set, we use two pointers technique here:
We loop through the all sorted topics, and then iterate all partitions the topic has,
compared with the ith element in sortedAssignedPartitions(i starts from 0):
- if not equal to the ith element, add to unassignedPartitions
- if equal to the ith element, get next element from sortedAssignedPartitions
@param sortedAssignedPartitions sorted partitions, all are included in the sortedPartitions
@return the partitions not yet assigned to any consumers
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java
| 883
|
[
"sortedAssignedPartitions"
] | true
| 7
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
readFully
|
public static int readFully(InputStream reader, byte[] dest) throws IOException {
return readFully(reader, dest, 0, dest.length);
}
|
Read up to {code count} bytes from {@code input} and store them into {@code buffer}.
The buffers position will be incremented by the number of bytes read from the stream.
@param input stream to read from
@param buffer buffer to read into
@param count maximum number of bytes to read
@return number of bytes read from the stream
@throws IOException in case of I/O errors
|
java
|
libs/core/src/main/java/org/elasticsearch/core/Streams.java
| 123
|
[
"reader",
"dest"
] | true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
resolveEmbeddedValue
|
@Override
public @Nullable String resolveEmbeddedValue(@Nullable String value) {
if (value == null) {
return null;
}
String result = value;
for (StringValueResolver resolver : this.embeddedValueResolvers) {
result = resolver.resolveStringValue(result);
if (result == null) {
return null;
}
}
return result;
}
|
Return the custom TypeConverter to use, if any.
@return the custom TypeConverter, or {@code null} if none specified
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 952
|
[
"value"
] |
String
| true
| 3
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
isFileSystemCaseSensitive
|
function isFileSystemCaseSensitive(): boolean {
// win32\win64 are case insensitive platforms
if (platform === "win32" || platform === "win64") {
return false;
}
// If this file exists under a different case, we must be case-insensitve.
return !fileExists(swapCase(__filename));
}
|
Strips non-TS paths from the profile, so users with private projects shouldn't
need to worry about leaking paths by submitting a cpu profile to us
|
typescript
|
src/compiler/sys.ts
| 1,722
|
[] | true
| 3
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
configureTransactionState
|
private TransactionManager configureTransactionState(ProducerConfig config,
LogContext logContext) {
TransactionManager transactionManager = null;
if (config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) {
final String transactionalId = config.getString(ProducerConfig.TRANSACTIONAL_ID_CONFIG);
final boolean enable2PC = config.getBoolean(ProducerConfig.TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG);
final int transactionTimeoutMs = config.getInt(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG);
final long retryBackoffMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG);
transactionManager = new TransactionManager(
logContext,
transactionalId,
transactionTimeoutMs,
retryBackoffMs,
apiVersions,
enable2PC
);
if (transactionManager.isTransactional())
log.info("Instantiated a transactional producer.");
else
log.info("Instantiated an idempotent producer.");
} else {
// ignore unretrieved configurations related to producer transaction
config.ignore(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG);
}
return transactionManager;
}
|
A producer is instantiated by providing a set of key-value pairs as configuration, a key and a value {@link Serializer}.
Valid configuration strings are documented <a href="http://kafka.apache.org/documentation.html#producerconfigs">here</a>.
<p>
Note: after creating a {@code KafkaProducer} you must always {@link #close()} it to avoid resource leaks.
@param properties The producer configs
@param keySerializer The serializer for key that implements {@link Serializer}. The configure() method won't be
called in the producer when the serializer is passed in directly.
@param valueSerializer The serializer for value that implements {@link Serializer}. The configure() method won't
be called in the producer when the serializer is passed in directly.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
| 607
|
[
"config",
"logContext"
] |
TransactionManager
| true
| 3
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
degree
|
def degree(self):
"""The degree of the series.
Returns
-------
degree : int
Degree of the series, one less than the number of coefficients.
Examples
--------
Create a polynomial object for ``1 + 7*x + 4*x**2``:
>>> np.polynomial.set_default_printstyle("unicode")
>>> poly = np.polynomial.Polynomial([1, 7, 4])
>>> print(poly)
1.0 + 7.0·x + 4.0·x²
>>> poly.degree()
2
Note that this method does not check for non-zero coefficients.
You must trim the polynomial to remove any trailing zeroes:
>>> poly = np.polynomial.Polynomial([1, 7, 0])
>>> print(poly)
1.0 + 7.0·x + 0.0·x²
>>> poly.degree()
2
>>> poly.trim().degree()
1
"""
return len(self) - 1
|
The degree of the series.
Returns
-------
degree : int
Degree of the series, one less than the number of coefficients.
Examples
--------
Create a polynomial object for ``1 + 7*x + 4*x**2``:
>>> np.polynomial.set_default_printstyle("unicode")
>>> poly = np.polynomial.Polynomial([1, 7, 4])
>>> print(poly)
1.0 + 7.0·x + 4.0·x²
>>> poly.degree()
2
Note that this method does not check for non-zero coefficients.
You must trim the polynomial to remove any trailing zeroes:
>>> poly = np.polynomial.Polynomial([1, 7, 0])
>>> print(poly)
1.0 + 7.0·x + 0.0·x²
>>> poly.degree()
2
>>> poly.trim().degree()
1
|
python
|
numpy/polynomial/_polybase.py
| 670
|
[
"self"
] | false
| 1
| 6.32
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
_frommethod
|
def _frommethod(methodname: str, reversed: bool = False):
"""
Define functions from existing MaskedArray methods.
Parameters
----------
methodname : str
Name of the method to transform.
reversed : bool, optional
Whether to reverse the first two arguments of the method. Default is False.
"""
method = getattr(MaskedArray, methodname)
assert callable(method)
signature = inspect.signature(method)
params = list(signature.parameters.values())
params[0] = params[0].replace(name="a") # rename 'self' to 'a'
if reversed:
assert len(params) >= 2
params[0], params[1] = params[1], params[0]
def wrapper(a, b, *args, **params):
return getattr(asanyarray(b), methodname)(a, *args, **params)
else:
def wrapper(a, *args, **params):
return getattr(asanyarray(a), methodname)(*args, **params)
wrapper.__signature__ = signature.replace(parameters=params)
wrapper.__name__ = wrapper.__qualname__ = methodname
# __doc__ is None when using `python -OO ...`
if method.__doc__ is not None:
str_signature = f"{methodname}{signature}"
# TODO: For methods with a docstring "Parameters" section, that do not already
# mention `a` (see e.g. `MaskedArray.var.__doc__`), it should be inserted there.
wrapper.__doc__ = f" {str_signature}\n{method.__doc__}"
return wrapper
|
Define functions from existing MaskedArray methods.
Parameters
----------
methodname : str
Name of the method to transform.
reversed : bool, optional
Whether to reverse the first two arguments of the method. Default is False.
|
python
|
numpy/ma/core.py
| 7,037
|
[
"methodname",
"reversed"
] | true
| 4
| 6.88
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
pre_fork_setup
|
def pre_fork_setup():
"""
Setup that must be done prior to forking with a process pool.
"""
# ensure properties have been calculated before processes
# are forked
caching_device_properties()
# Computing the triton key can be slow. If we call it before fork,
# it will be cached for the forked subprocesses.
from torch._inductor.runtime.triton_compat import HAS_TRITON, triton_key
if HAS_TRITON:
triton_key()
|
Setup that must be done prior to forking with a process pool.
|
python
|
torch/_inductor/async_compile.py
| 82
|
[] | false
| 2
| 6.24
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
whenEqualTo
|
public Source<T> whenEqualTo(@Nullable Object object) {
return when((value) -> value.equals(object));
}
|
Return a filtered version of the source that will only map values equal to the
specified {@code object}.
@param object the object to match
@return a new filtered source instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/PropertyMapper.java
| 244
|
[
"object"
] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
export
|
def export(args, api_client: Client = NEW_API_CLIENT) -> None:
"""
Export all pools.
If output is json, write to file. Otherwise, print to console.
"""
try:
pools_response = api_client.pools.list()
pools_list = [
{
"name": pool.name,
"slots": pool.slots,
"description": pool.description,
"include_deferred": pool.include_deferred,
"occupied_slots": pool.occupied_slots,
"running_slots": pool.running_slots,
"queued_slots": pool.queued_slots,
"scheduled_slots": pool.scheduled_slots,
"open_slots": pool.open_slots,
"deferred_slots": pool.deferred_slots,
}
for pool in pools_response.pools
]
if args.output == "json":
file_path = Path(args.file)
with open(file_path, "w") as f:
json.dump(pools_list, f, indent=4, sort_keys=True)
rich.print(f"Exported {pools_response.total_entries} pool(s) to {args.file}")
else:
# For non-json formats, print the pools directly to console
rich.print(pools_list)
except Exception as e:
raise SystemExit(f"Failed to export pools: {e}")
|
Export all pools.
If output is json, write to file. Otherwise, print to console.
|
python
|
airflow-ctl/src/airflowctl/ctl/commands/pool_command.py
| 50
|
[
"args",
"api_client"
] |
None
| true
| 3
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
copy
|
@CanIgnoreReturnValue
public static long copy(InputStream from, OutputStream to) throws IOException {
checkNotNull(from);
checkNotNull(to);
byte[] buf = createBuffer();
long total = 0;
while (true) {
int r = from.read(buf);
if (r == -1) {
break;
}
to.write(buf, 0, r);
total += r;
}
return total;
}
|
Copies all bytes from the input stream to the output stream. Does not close or flush either
stream.
<p><b>Java 9 users and later:</b> this method should be treated as deprecated; use the
equivalent {@link InputStream#transferTo} method instead.
@param from the input stream to read from
@param to the output stream to write to
@return the number of bytes copied
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/ByteStreams.java
| 108
|
[
"from",
"to"
] | true
| 3
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
equalsImpl
|
static boolean equalsImpl(Table<?, ?, ?> table, @Nullable Object obj) {
if (obj == table) {
return true;
} else if (obj instanceof Table) {
Table<?, ?, ?> that = (Table<?, ?, ?>) obj;
return table.cellSet().equals(that.cellSet());
} else {
return false;
}
}
|
Returns a synchronized (thread-safe) table backed by the specified table. In order to guarantee
serial access, it is critical that <b>all</b> access to the backing table is accomplished
through the returned table.
<p>It is imperative that the user manually synchronize on the returned table when accessing any
of its collection views:
{@snippet :
Table<R, C, V> table = Tables.synchronizedTable(HashBasedTable.create());
...
Map<C, V> row = table.row(rowKey); // Needn't be in synchronized block
...
synchronized (table) { // Synchronizing on table, not row!
Iterator<Entry<C, V>> i = row.entrySet().iterator(); // Must be in synchronized block
while (i.hasNext()) {
foo(i.next());
}
}
}
<p>Failure to follow this advice may result in non-deterministic behavior.
<p>The returned table will be serializable if the specified table is serializable.
@param table the table to be wrapped in a synchronized view
@return a synchronized view of the specified table
@since 22.0
|
java
|
android/guava/src/com/google/common/collect/Tables.java
| 696
|
[
"table",
"obj"
] | true
| 3
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
|
delete_fargate_profile
|
def delete_fargate_profile(self, clusterName: str, fargateProfileName: str) -> dict:
"""
Delete an AWS Fargate profile from a specified Amazon EKS cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.delete_fargate_profile`
:param clusterName: The name of the Amazon EKS cluster associated with the Fargate profile to delete.
:param fargateProfileName: The name of the Fargate profile to delete.
:return: Returns descriptive information about the deleted Fargate profile.
"""
eks_client = self.conn
response = eks_client.delete_fargate_profile(
clusterName=clusterName, fargateProfileName=fargateProfileName
)
self.log.info(
"Deleted AWS Fargate profile with the name %s from Amazon EKS cluster %s.",
response.get("fargateProfile").get("fargateProfileName"),
response.get("fargateProfile").get("clusterName"),
)
return response
|
Delete an AWS Fargate profile from a specified Amazon EKS cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.delete_fargate_profile`
:param clusterName: The name of the Amazon EKS cluster associated with the Fargate profile to delete.
:param fargateProfileName: The name of the Fargate profile to delete.
:return: Returns descriptive information about the deleted Fargate profile.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/eks.py
| 287
|
[
"self",
"clusterName",
"fargateProfileName"
] |
dict
| true
| 1
| 6.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
getTypeForFactoryBeanFromMethod
|
private ResolvableType getTypeForFactoryBeanFromMethod(Class<?> beanClass, String factoryMethodName) {
// CGLIB subclass methods hide generic parameters; look at the original user class.
Class<?> factoryBeanClass = ClassUtils.getUserClass(beanClass);
FactoryBeanMethodTypeFinder finder = new FactoryBeanMethodTypeFinder(factoryMethodName);
ReflectionUtils.doWithMethods(factoryBeanClass, finder, ReflectionUtils.USER_DECLARED_METHODS);
return finder.getResult();
}
|
Introspect the factory method signatures on the given bean class,
trying to find a common {@code FactoryBean} object type declared there.
@param beanClass the bean class to find the factory method on
@param factoryMethodName the name of the factory method
@return the common {@code FactoryBean} object type, or {@code null} if none
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractAutowireCapableBeanFactory.java
| 952
|
[
"beanClass",
"factoryMethodName"
] |
ResolvableType
| true
| 1
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
format
|
<B extends Appendable> B format(Calendar calendar, B buf);
|
Formats a {@link Calendar} object into the supplied {@link Appendable}.
The TimeZone set on the Calendar is only used to adjust the time offset.
The TimeZone specified during the construction of the Parser will determine the TimeZone
used in the formatted string.
@param calendar the calendar to format.
@param buf the buffer to format into.
@param <B> the Appendable class type, usually StringBuilder or StringBuffer.
@return the specified string buffer.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/time/DatePrinter.java
| 61
|
[
"calendar",
"buf"
] |
B
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
baseGet
|
function baseGet(object, path) {
path = castPath(path, object);
var index = 0,
length = path.length;
while (object != null && index < length) {
object = object[toKey(path[index++])];
}
return (index && index == length) ? object : undefined;
}
|
The base implementation of `_.get` without support for default values.
@private
@param {Object} object The object to query.
@param {Array|string} path The path of the property to get.
@returns {*} Returns the resolved value.
|
javascript
|
lodash.js
| 3,070
|
[
"object",
"path"
] | false
| 5
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
load
|
public SslConfiguration load(Path basePath) {
Objects.requireNonNull(basePath, "Base Path cannot be null");
final List<String> protocols = resolveListSetting(PROTOCOLS, Function.identity(), defaultProtocols);
final List<String> ciphers = resolveListSetting(CIPHERS, Function.identity(), defaultCiphers);
final SslVerificationMode verificationMode = resolveSetting(VERIFICATION_MODE, SslVerificationMode::parse, defaultVerificationMode);
final SslClientAuthenticationMode clientAuth = resolveSetting(CLIENT_AUTH, SslClientAuthenticationMode::parse, defaultClientAuth);
final List<X509Field> trustRestrictionsX509Fields = resolveListSetting(
TRUST_RESTRICTIONS_X509_FIELDS,
X509Field::parseForRestrictedTrust,
defaultRestrictedTrustFields
);
final long handshakeTimeoutMillis = resolveSetting(
HANDSHAKE_TIMEOUT,
s -> TimeValue.parseTimeValue(s, HANDSHAKE_TIMEOUT),
DEFAULT_HANDSHAKE_TIMEOUT
).millis();
final SslKeyConfig keyConfig = buildKeyConfig(basePath);
final SslTrustConfig trustConfig = buildTrustConfig(basePath, verificationMode, keyConfig, Set.copyOf(trustRestrictionsX509Fields));
if (protocols == null || protocols.isEmpty()) {
throw new SslConfigException("no protocols configured in [" + settingPrefix + PROTOCOLS + "]");
}
if (ciphers == null || ciphers.isEmpty()) {
throw new SslConfigException("no cipher suites configured in [" + settingPrefix + CIPHERS + "]");
}
final boolean isExplicitlyConfigured = hasSettings(settingPrefix);
return new SslConfiguration(
settingPrefix,
isExplicitlyConfigured,
trustConfig,
keyConfig,
verificationMode,
clientAuth,
ciphers,
protocols,
handshakeTimeoutMillis
);
}
|
Resolve all necessary configuration settings, and load a {@link SslConfiguration}.
@param basePath The base path to use for any settings that represent file paths. Typically points to the Elasticsearch
configuration directory.
@throws SslConfigException For any problems with the configuration, or with loading the required SSL classes.
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java
| 298
|
[
"basePath"
] |
SslConfiguration
| true
| 5
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
cellIterator
|
@Override
Iterator<Cell<R, C, @Nullable V>> cellIterator() {
return new AbstractIndexedListIterator<Cell<R, C, @Nullable V>>(size()) {
@Override
protected Cell<R, C, @Nullable V> get(int index) {
return getCell(index);
}
};
}
|
Returns an unmodifiable set of all row key / column key / value triplets. Changes to the table
will update the returned set.
<p>The returned set's iterator traverses the mappings with the first row key, the mappings with
the second row key, and so on.
<p>The value in the returned cells may change if the table subsequently changes.
@return set of table cells consisting of row key / column key / value triplets
|
java
|
android/guava/src/com/google/common/collect/ArrayTable.java
| 545
|
[] | true
| 1
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
lastCaughtUpTimestamp
|
public OptionalLong lastCaughtUpTimestamp() {
return lastCaughtUpTimestamp;
}
|
Return the last millisecond timestamp at which this replica was known to be
caught up with the leader.
@return The value of the lastCaughtUpTime if known, empty otherwise
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/QuorumInfo.java
| 172
|
[] |
OptionalLong
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
__call__
|
def __call__(
self,
declarations: str | Iterable[tuple[str, str]],
inherited: dict[str, str] | None = None,
) -> dict[str, str]:
"""
The given declarations to atomic properties.
Parameters
----------
declarations_str : str | Iterable[tuple[str, str]]
A CSS string or set of CSS declaration tuples
e.g. "font-weight: bold; background: blue" or
{("font-weight", "bold"), ("background", "blue")}
inherited : dict, optional
Atomic properties indicating the inherited style context in which
declarations_str is to be resolved. ``inherited`` should already
be resolved, i.e. valid output of this method.
Returns
-------
dict
Atomic CSS 2.2 properties.
Examples
--------
>>> resolve = CSSResolver()
>>> inherited = {"font-family": "serif", "font-weight": "bold"}
>>> out = resolve(
... '''
... border-color: BLUE RED;
... font-size: 1em;
... font-size: 2em;
... font-weight: normal;
... font-weight: inherit;
... ''',
... inherited,
... )
>>> sorted(out.items()) # doctest: +NORMALIZE_WHITESPACE
[('border-bottom-color', 'blue'),
('border-left-color', 'red'),
('border-right-color', 'red'),
('border-top-color', 'blue'),
('font-family', 'serif'),
('font-size', '24pt'),
('font-weight', 'bold')]
"""
if isinstance(declarations, str):
declarations = self.parse(declarations)
props = dict(self.atomize(declarations))
if inherited is None:
inherited = {}
props = self._update_initial(props, inherited)
props = self._update_font_size(props, inherited)
return self._update_other_units(props)
|
The given declarations to atomic properties.
Parameters
----------
declarations_str : str | Iterable[tuple[str, str]]
A CSS string or set of CSS declaration tuples
e.g. "font-weight: bold; background: blue" or
{("font-weight", "bold"), ("background", "blue")}
inherited : dict, optional
Atomic properties indicating the inherited style context in which
declarations_str is to be resolved. ``inherited`` should already
be resolved, i.e. valid output of this method.
Returns
-------
dict
Atomic CSS 2.2 properties.
Examples
--------
>>> resolve = CSSResolver()
>>> inherited = {"font-family": "serif", "font-weight": "bold"}
>>> out = resolve(
... '''
... border-color: BLUE RED;
... font-size: 1em;
... font-size: 2em;
... font-weight: normal;
... font-weight: inherit;
... ''',
... inherited,
... )
>>> sorted(out.items()) # doctest: +NORMALIZE_WHITESPACE
[('border-bottom-color', 'blue'),
('border-left-color', 'red'),
('border-right-color', 'red'),
('border-top-color', 'blue'),
('font-family', 'serif'),
('font-size', '24pt'),
('font-weight', 'bold')]
|
python
|
pandas/io/formats/css.py
| 219
|
[
"self",
"declarations",
"inherited"
] |
dict[str, str]
| true
| 3
| 7.76
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
register_source
|
def register_source(app, env, modname):
"""
Registers source code.
:param app: application
:param env: environment of the plugin
:param modname: name of the module to load
:return: True if the code is registered successfully, False otherwise
"""
if modname is None:
return False
entry = env._viewcode_modules.get(modname, None)
if entry is False:
print(f"[{modname}] Entry is false for ")
return False
code_tags = app.emit_firstresult("viewcode-find-source", modname)
if code_tags is None:
try:
analyzer = ModuleAnalyzer.for_module(modname)
except Exception as ex:
logger.info(
'Module "%s" could not be loaded. Full source will not be available. "%s"', modname, ex
)
# We cannot use regular warnings or exception methods because those warnings are interpreted
# by running python process and converted into "real" warnings, so we need to print the
# traceback here at info level
tb = traceback.format_exc()
logger.info("%s", tb)
env._viewcode_modules[modname] = False
return False
if not isinstance(analyzer.code, str):
code = analyzer.code.decode(analyzer.encoding)
else:
code = analyzer.code
analyzer.find_tags()
tags = analyzer.tags
else:
code, tags = code_tags
if entry is None or entry[0] != code:
entry = code, tags, {}, ""
env._viewcode_modules[modname] = entry
return True
|
Registers source code.
:param app: application
:param env: environment of the plugin
:param modname: name of the module to load
:return: True if the code is registered successfully, False otherwise
|
python
|
devel-common/src/sphinx_exts/exampleinclude.py
| 130
|
[
"app",
"env",
"modname"
] | false
| 9
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
isVarThatIsPossiblyChanged
|
static bool isVarThatIsPossiblyChanged(const Decl *Func, const Stmt *LoopStmt,
const Stmt *Cond, ASTContext *Context) {
if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) {
if (const auto *VD = dyn_cast<ValueDecl>(DRE->getDecl()))
return isVarPossiblyChanged(Func, LoopStmt, VD, Context);
} else if (isa<MemberExpr, CallExpr, ObjCIvarRefExpr, ObjCPropertyRefExpr,
ObjCMessageExpr>(Cond)) {
// FIXME: Handle MemberExpr.
return true;
} else if (const auto *CE = dyn_cast<CastExpr>(Cond)) {
QualType T = CE->getType();
while (true) {
if (T.isVolatileQualified())
return true;
if (!T->isAnyPointerType() && !T->isReferenceType())
break;
T = T->getPointeeType();
}
}
return false;
}
|
Return whether `Cond` is a variable that is possibly changed in `LoopStmt`.
|
cpp
|
clang-tools-extra/clang-tidy/bugprone/InfiniteLoopCheck.cpp
| 91
|
[] | true
| 11
| 6.72
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
_translate
|
def _translate(self, styler: StylerRenderer, d: dict):
"""
Mutate the render dictionary to allow for tooltips:
- Add ``<span>`` HTML element to each data cells ``display_value``. Ignores
headers.
- Add table level CSS styles to control pseudo classes.
Parameters
----------
styler_data : DataFrame
Underlying ``Styler`` DataFrame used for reindexing.
uuid : str
The underlying ``Styler`` uuid for CSS id.
d : dict
The dictionary prior to final render
Returns
-------
render_dict : Dict
"""
self.tt_data = self.tt_data.reindex_like(styler.data)
if self.tt_data.empty:
return d
mask = (self.tt_data.isna()) | (self.tt_data.eq("")) # empty string = no ttip
# this conditional adds tooltips via pseudo css and <span> elements.
if not self.as_title_attribute:
name = self.class_name
self.table_styles = [
style
for sublist in [
self._pseudo_css(
styler.uuid, name, i, j, str(self.tt_data.iloc[i, j])
)
for i in range(len(self.tt_data.index))
for j in range(len(self.tt_data.columns))
if not (
mask.iloc[i, j]
or i in styler.hidden_rows
or j in styler.hidden_columns
)
]
for style in sublist
]
# add span class to every cell since there is at least 1 non-empty tooltip
if self.table_styles:
for row in d["body"]:
for item in row:
if item["type"] == "td":
item["display_value"] = (
str(item["display_value"])
+ f'<span class="{self.class_name}"></span>'
)
d["table_styles"].extend(self._class_styles)
d["table_styles"].extend(self.table_styles)
# this conditional adds tooltips as extra "title" attribute on a <td> element
else:
index_offset = self.tt_data.index.nlevels
body = d["body"]
for i in range(len(self.tt_data.index)):
for j in range(len(self.tt_data.columns)):
if (
not mask.iloc[i, j]
or i in styler.hidden_rows
or j in styler.hidden_columns
):
row = body[i]
item = row[j + index_offset]
value = self.tt_data.iloc[i, j]
item["attributes"] += f' title="{value}"'
return d
|
Mutate the render dictionary to allow for tooltips:
- Add ``<span>`` HTML element to each data cells ``display_value``. Ignores
headers.
- Add table level CSS styles to control pseudo classes.
Parameters
----------
styler_data : DataFrame
Underlying ``Styler`` DataFrame used for reindexing.
uuid : str
The underlying ``Styler`` uuid for CSS id.
d : dict
The dictionary prior to final render
Returns
-------
render_dict : Dict
|
python
|
pandas/io/formats/style_render.py
| 2,239
|
[
"self",
"styler",
"d"
] | true
| 15
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
wrapperToPrimitive
|
public static Class<?> wrapperToPrimitive(final Class<?> cls) {
return WRAPPER_PRIMITIVE_MAP.get(cls);
}
|
Converts the specified wrapper class to its corresponding primitive class.
<p>
This method is the counter part of {@code primitiveToWrapper()}. If the passed in class is a wrapper class for a
primitive type, this primitive type will be returned (e.g. {@code Integer.TYPE} for {@code Integer.class}). For other
classes, or if the parameter is <strong>null</strong>, the return value is <strong>null</strong>.
</p>
@param cls the class to convert, may be <strong>null</strong>.
@return the corresponding primitive type if {@code cls} is a wrapper class, <strong>null</strong> otherwise.
@see #primitiveToWrapper(Class)
@since 2.4
|
java
|
src/main/java/org/apache/commons/lang3/ClassUtils.java
| 1,702
|
[
"cls"
] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getAlgorithmNameFromOid
|
private static String getAlgorithmNameFromOid(String oidString) throws GeneralSecurityException {
return switch (oidString) {
case "1.2.840.10040.4.1" -> "DSA";
case "1.2.840.113549.1.1.1" -> "RSA";
case "1.2.840.10045.2.1" -> "EC";
case "1.3.14.3.2.7" -> "DES-CBC";
case "2.16.840.1.101.3.4.1.1" -> "AES-128_ECB";
case "2.16.840.1.101.3.4.1.2" -> "AES-128_CBC";
case "2.16.840.1.101.3.4.1.3" -> "AES-128_OFB";
case "2.16.840.1.101.3.4.1.4" -> "AES-128_CFB";
case "2.16.840.1.101.3.4.1.6" -> "AES-128_GCM";
case "2.16.840.1.101.3.4.1.21" -> "AES-192_ECB";
case "2.16.840.1.101.3.4.1.22" -> "AES-192_CBC";
case "2.16.840.1.101.3.4.1.23" -> "AES-192_OFB";
case "2.16.840.1.101.3.4.1.24" -> "AES-192_CFB";
case "2.16.840.1.101.3.4.1.26" -> "AES-192_GCM";
case "2.16.840.1.101.3.4.1.41" -> "AES-256_ECB";
case "2.16.840.1.101.3.4.1.42" -> "AES-256_CBC";
case "2.16.840.1.101.3.4.1.43" -> "AES-256_OFB";
case "2.16.840.1.101.3.4.1.44" -> "AES-256_CFB";
case "2.16.840.1.101.3.4.1.46" -> "AES-256_GCM";
case "2.16.840.1.101.3.4.1.5" -> "AESWrap-128";
case "2.16.840.1.101.3.4.1.25" -> "AESWrap-192";
case "2.16.840.1.101.3.4.1.45" -> "AESWrap-256";
default -> null;
};
}
|
Parses a DER encoded private key and reads its algorithm identifier Object OID.
@param keyBytes the private key raw bytes
@return A string identifier for the key algorithm (RSA, DSA, or EC)
@throws GeneralSecurityException if the algorithm oid that is parsed from ASN.1 is unknown
@throws IOException if the DER encoded key can't be parsed
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java
| 700
|
[
"oidString"
] |
String
| true
| 1
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
select_describe_func
|
def select_describe_func(
data: Series,
) -> Callable:
"""Select proper function for describing series based on data type.
Parameters
----------
data : Series
Series to be described.
"""
if is_bool_dtype(data.dtype):
return describe_categorical_1d
elif is_numeric_dtype(data):
return describe_numeric_1d
elif data.dtype.kind == "M" or isinstance(data.dtype, DatetimeTZDtype):
return describe_timestamp_1d
elif data.dtype.kind == "m":
return describe_numeric_1d
else:
return describe_categorical_1d
|
Select proper function for describing series based on data type.
Parameters
----------
data : Series
Series to be described.
|
python
|
pandas/core/methods/describe.py
| 323
|
[
"data"
] |
Callable
| true
| 7
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
reverse
|
GeneralRange<T> reverse() {
GeneralRange<T> result = reverse;
if (result == null) {
result =
new GeneralRange<>(
reverseComparator(comparator),
hasUpperBound,
getUpperEndpoint(),
getUpperBoundType(),
hasLowerBound,
getLowerEndpoint(),
getLowerBoundType());
result.reverse = this;
return this.reverse = result;
}
return result;
}
|
Returns the same range relative to the reversed comparator.
|
java
|
android/guava/src/com/google/common/collect/GeneralRange.java
| 269
|
[] | true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
trigger_tasks
|
def trigger_tasks(self, open_slots: int) -> None:
"""
Initiate async execution of the queued tasks, up to the number of available slots.
:param open_slots: Number of open slots
"""
sorted_queue = self.order_queued_tasks_by_priority()
workload_list = []
for _ in range(min((open_slots, len(self.queued_tasks)))):
key, item = sorted_queue.pop(0)
# If a task makes it here but is still understood by the executor
# to be running, it generally means that the task has been killed
# externally and not yet been marked as failed.
#
# However, when a task is deferred, there is also a possibility of
# a race condition where a task might be scheduled again during
# trigger processing, even before we are able to register that the
# deferred task has completed. In this case and for this reason,
# we make a small number of attempts to see if the task has been
# removed from the running set in the meantime.
if key in self.attempts:
del self.attempts[key]
if isinstance(item, workloads.ExecuteTask) and hasattr(item, "ti"):
ti = item.ti
# If it's None, then the span for the current id hasn't been started.
if self.active_spans is not None and self.active_spans.get("ti:" + str(ti.id)) is None:
if isinstance(ti, workloads.TaskInstance):
parent_context = Trace.extract(ti.parent_context_carrier)
else:
parent_context = Trace.extract(ti.dag_run.context_carrier)
# Start a new span using the context from the parent.
# Attributes will be set once the task has finished so that all
# values will be available (end_time, duration, etc.).
span = Trace.start_child_span(
span_name=f"{ti.task_id}",
parent_context=parent_context,
component="task",
start_as_current=False,
)
self.active_spans.set("ti:" + str(ti.id), span)
# Inject the current context into the carrier.
carrier = Trace.inject()
ti.context_carrier = carrier
workload_list.append(item)
if workload_list:
self._process_workloads(workload_list)
|
Initiate async execution of the queued tasks, up to the number of available slots.
:param open_slots: Number of open slots
|
python
|
airflow-core/src/airflow/executors/base_executor.py
| 352
|
[
"self",
"open_slots"
] |
None
| true
| 10
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
getModuleSpecifierText
|
function getModuleSpecifierText(promotedDeclaration: ImportClause | ImportEqualsDeclaration): string {
return promotedDeclaration.kind === SyntaxKind.ImportEqualsDeclaration
? tryCast(tryCast(promotedDeclaration.moduleReference, isExternalModuleReference)?.expression, isStringLiteralLike)?.text || promotedDeclaration.moduleReference.getText()
: cast(promotedDeclaration.parent.moduleSpecifier, isStringLiteral).text;
}
|
@param forceImportKeyword Indicates that the user has already typed `import`, so the result must start with `import`.
(In other words, do not allow `const x = require("...")` for JS files.)
@internal
|
typescript
|
src/services/codefixes/importFixes.ts
| 1,786
|
[
"promotedDeclaration"
] | true
| 3
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
isOsNameMatch
|
static boolean isOsNameMatch(final String osName, final String osNamePrefix) {
if (osName == null) {
return false;
}
return Strings.CI.startsWith(osName, osNamePrefix);
}
|
Tests whether the operating system matches with a case-insensitive comparison.
<p>
This method is package private instead of private to support unit test invocation.
</p>
@param osName the actual OS name.
@param osNamePrefix the prefix for the expected OS name.
@return true for a case-insensitive match, or false if not.
|
java
|
src/main/java/org/apache/commons/lang3/SystemUtils.java
| 2,433
|
[
"osName",
"osNamePrefix"
] | true
| 2
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
take_nd
|
def take_nd(
arr: ArrayLike,
indexer,
axis: AxisInt = 0,
fill_value=lib.no_default,
allow_fill: bool = True,
) -> ArrayLike:
"""
Specialized Cython take which sets NaN values in one pass
This dispatches to ``take`` defined on ExtensionArrays.
Note: this function assumes that the indexer is a valid(ated) indexer with
no out of bound indices.
Parameters
----------
arr : np.ndarray or ExtensionArray
Input array.
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indices are filed with fill_value
axis : int, default 0
Axis to take from
fill_value : any, default np.nan
Fill value to replace -1 values with
allow_fill : bool, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
Returns
-------
subarray : np.ndarray or ExtensionArray
May be the same type as the input, or cast to an ndarray.
"""
if fill_value is lib.no_default:
fill_value = na_value_for_dtype(arr.dtype, compat=False)
elif lib.is_np_dtype(arr.dtype, "mM"):
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if arr.dtype != dtype:
# EA.take is strict about returning a new object of the same type
# so for that case cast upfront
arr = arr.astype(dtype)
if not isinstance(arr, np.ndarray):
# i.e. ExtensionArray,
# includes for EA to catch DatetimeArray, TimedeltaArray
if not is_1d_only_ea_dtype(arr.dtype):
# i.e. DatetimeArray, TimedeltaArray
arr = cast("NDArrayBackedExtensionArray", arr)
return arr.take(
indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis
)
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
arr = np.asarray(arr)
return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill)
|
Specialized Cython take which sets NaN values in one pass
This dispatches to ``take`` defined on ExtensionArrays.
Note: this function assumes that the indexer is a valid(ated) indexer with
no out of bound indices.
Parameters
----------
arr : np.ndarray or ExtensionArray
Input array.
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indices are filed with fill_value
axis : int, default 0
Axis to take from
fill_value : any, default np.nan
Fill value to replace -1 values with
allow_fill : bool, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
Returns
-------
subarray : np.ndarray or ExtensionArray
May be the same type as the input, or cast to an ndarray.
|
python
|
pandas/core/array_algos/take.py
| 57
|
[
"arr",
"indexer",
"axis",
"fill_value",
"allow_fill"
] |
ArrayLike
| true
| 6
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
beforePrototypeCreation
|
@SuppressWarnings("unchecked")
protected void beforePrototypeCreation(String beanName) {
Object curVal = this.prototypesCurrentlyInCreation.get();
if (curVal == null) {
this.prototypesCurrentlyInCreation.set(beanName);
}
else if (curVal instanceof String strValue) {
Set<String> beanNameSet = CollectionUtils.newHashSet(2);
beanNameSet.add(strValue);
beanNameSet.add(beanName);
this.prototypesCurrentlyInCreation.set(beanNameSet);
}
else {
Set<String> beanNameSet = (Set<String>) curVal;
beanNameSet.add(beanName);
}
}
|
Callback before prototype creation.
<p>The default implementation registers the prototype as currently in creation.
@param beanName the name of the prototype about to be created
@see #isPrototypeCurrentlyInCreation
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 1,187
|
[
"beanName"
] |
void
| true
| 3
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
faceIjkPentToCellBoundaryClassIII
|
private CellBoundary faceIjkPentToCellBoundaryClassIII(int adjRes) {
final LatLng[] points = new LatLng[CellBoundary.MAX_CELL_BNDRY_VERTS];
int numPoints = 0;
final FaceIJK fijk = new FaceIJK(this.face, new CoordIJK(0, 0, 0));
final CoordIJK lastCoord = new CoordIJK(0, 0, 0);
int lastFace = this.face;
for (int vert = 0; vert < Constants.NUM_PENT_VERTS + 1; vert++) {
final int v = vert % Constants.NUM_PENT_VERTS;
// The center point is now in the same substrate grid as the origin
// cell vertices. Add the center point substate coordinates
// to each vertex to translate the vertices to that cell.
fijk.coord.reset(
VERTEX_CLASSIII[v][0] + this.coord.i,
VERTEX_CLASSIII[v][1] + this.coord.j,
VERTEX_CLASSIII[v][2] + this.coord.k
);
fijk.coord.ijkNormalize();
fijk.face = this.face;
fijk.adjustPentVertOverage(adjRes);
// all Class III pentagon edges cross icosa edges
// note that Class II pentagons have vertices on the edge,
// not edge intersections
if (vert > 0) {
// find hex2d of the two vertexes on the last face
final Vec2d orig2d0 = lastCoord.ijkToHex2d();
final int currentToLastDir = adjacentFaceDir[fijk.face][lastFace];
final FaceOrientIJK fijkOrient = faceNeighbors[fijk.face][currentToLastDir];
lastCoord.reset(fijk.coord.i, fijk.coord.j, fijk.coord.k);
// rotate and translate for adjacent face
for (int i = 0; i < fijkOrient.ccwRot60; i++) {
lastCoord.ijkRotate60ccw();
}
final int unitScale = unitScaleByCIIres[adjRes] * 3;
lastCoord.ijkAdd(fijkOrient.translateI * unitScale, fijkOrient.translateJ * unitScale, fijkOrient.translateK * unitScale);
lastCoord.ijkNormalize();
final Vec2d orig2d1 = lastCoord.ijkToHex2d();
// find the intersection and add the lat/lng point to the result
final Vec2d inter = findIntersectionPoint(orig2d0, orig2d1, adjRes, adjacentFaceDir[fijkOrient.face][fijk.face]);
if (inter != null) {
points[numPoints++] = inter.hex2dToGeo(fijkOrient.face, adjRes, true);
}
}
// convert vertex to lat/lng and add to the result
// vert == start + NUM_PENT_VERTS is only used to test for possible
// intersection on last edge
if (vert < Constants.NUM_PENT_VERTS) {
points[numPoints++] = fijk.coord.ijkToGeo(fijk.face, adjRes, true);
}
lastFace = fijk.face;
lastCoord.reset(fijk.coord.i, fijk.coord.j, fijk.coord.k);
}
return new CellBoundary(points, numPoints);
}
|
Computes the cell boundary in spherical coordinates for a pentagonal cell
for this FaceIJK address at a specified resolution.
@param res The H3 resolution of the cell.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/FaceIJK.java
| 461
|
[
"adjRes"
] |
CellBoundary
| true
| 6
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.