function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
translate
|
public final String translate(final CharSequence input) {
if (input == null) {
return null;
}
try {
final StringWriter writer = new StringWriter(input.length() * 2);
translate(input, writer);
return writer.toString();
} catch (final IOException ioe) {
// this should never ever happen while writing to a StringWriter
throw new UncheckedIOException(ioe);
}
}
|
Helper for non-Writer usage.
@param input CharSequence to be translated.
@return String output of translation.
|
java
|
src/main/java/org/apache/commons/lang3/text/translate/CharSequenceTranslator.java
| 67
|
[
"input"
] |
String
| true
| 3
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
shortArrayToInt
|
public static int shortArrayToInt(final short[] src, final int srcPos, final int dstInit, final int dstPos, final int nShorts) {
if (src.length == 0 && srcPos == 0 || 0 == nShorts) {
return dstInit;
}
if ((nShorts - 1) * Short.SIZE + dstPos >= Integer.SIZE) {
throw new IllegalArgumentException("(nShorts - 1) * 16 + dstPos >= 32");
}
int out = dstInit;
for (int i = 0; i < nShorts; i++) {
final int shift = i * Short.SIZE + dstPos;
final int bits = (0xffff & src[i + srcPos]) << shift;
final int mask = 0xffff << shift;
out = out & ~mask | bits;
}
return out;
}
|
Converts an array of short into an int using the default (little-endian, LSB0) byte and bit ordering.
@param src the short array to convert.
@param srcPos the position in {@code src}, in short unit, from where to start the conversion.
@param dstInit initial value of the destination int.
@param dstPos the position of the LSB, in bits, in the result int.
@param nShorts the number of shorts to convert.
@return an int containing the selected bits.
@throws NullPointerException if {@code src} is {@code null}.
@throws IllegalArgumentException if {@code (nShorts - 1) * 16 + dstPos >= 32}.
@throws ArrayIndexOutOfBoundsException if {@code srcPos + nShorts > src.length}.
|
java
|
src/main/java/org/apache/commons/lang3/Conversion.java
| 1,226
|
[
"src",
"srcPos",
"dstInit",
"dstPos",
"nShorts"
] | true
| 6
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
sortlevel
|
def sortlevel(
self,
level=None,
ascending: bool | list[bool] = True,
sort_remaining=None,
na_position: NaPosition = "first",
) -> tuple[Self, np.ndarray]:
"""
For internal compatibility with the Index API.
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : bool, default True
False to sort in descending order
na_position : {'first' or 'last'}, default 'first'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
.. versionadded:: 2.1.0
level, sort_remaining are compat parameters
Returns
-------
Index
"""
if not isinstance(ascending, (list, bool)):
raise TypeError(
"ascending must be a single bool value or"
"a list of bool values of length 1"
)
if isinstance(ascending, list):
if len(ascending) != 1:
raise TypeError("ascending must be a list of bool values of length 1")
ascending = ascending[0]
if not isinstance(ascending, bool):
raise TypeError("ascending must be a bool value")
return self.sort_values(
return_indexer=True, ascending=ascending, na_position=na_position
)
|
For internal compatibility with the Index API.
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : bool, default True
False to sort in descending order
na_position : {'first' or 'last'}, default 'first'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
.. versionadded:: 2.1.0
level, sort_remaining are compat parameters
Returns
-------
Index
|
python
|
pandas/core/indexes/base.py
| 2,158
|
[
"self",
"level",
"ascending",
"sort_remaining",
"na_position"
] |
tuple[Self, np.ndarray]
| true
| 5
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_merge
|
def _merge(self, args=None, kwargs=None, options=None, force=False):
"""Merge partial args/kwargs/options with existing ones.
If the signature is immutable and ``force`` is False, the existing
args/kwargs will be returned as-is and only the options will be merged.
Stamped headers are considered immutable and will not be merged regardless.
Arguments:
args (Tuple): Partial args to be prepended to the existing args.
kwargs (Dict): Partial kwargs to be merged with existing kwargs.
options (Dict): Partial options to be merged with existing options.
force (bool): If True, the args/kwargs will be merged even if the signature is
immutable. The stamped headers are not affected by this option and will not
be merged regardless.
Returns:
Tuple: (args, kwargs, options)
"""
args = args if args else ()
kwargs = kwargs if kwargs else {}
if options is not None:
# We build a new options dictionary where values in `options`
# override values in `self.options` except for keys which are
# noted as being immutable (unrelated to signature immutability)
# implying that allowing their value to change would stall tasks
immutable_options = self._IMMUTABLE_OPTIONS
if "stamped_headers" in self.options:
immutable_options = self._IMMUTABLE_OPTIONS.union(set(self.options.get("stamped_headers", [])))
# merge self.options with options without overriding stamped headers from self.options
new_options = {**self.options, **{
k: v for k, v in options.items()
if k not in immutable_options or k not in self.options
}}
else:
new_options = self.options
if self.immutable and not force:
return (self.args, self.kwargs, new_options)
return (tuple(args) + tuple(self.args) if args else self.args,
dict(self.kwargs, **kwargs) if kwargs else self.kwargs,
new_options)
|
Merge partial args/kwargs/options with existing ones.
If the signature is immutable and ``force`` is False, the existing
args/kwargs will be returned as-is and only the options will be merged.
Stamped headers are considered immutable and will not be merged regardless.
Arguments:
args (Tuple): Partial args to be prepended to the existing args.
kwargs (Dict): Partial kwargs to be merged with existing kwargs.
options (Dict): Partial options to be merged with existing options.
force (bool): If True, the args/kwargs will be merged even if the signature is
immutable. The stamped headers are not affected by this option and will not
be merged regardless.
Returns:
Tuple: (args, kwargs, options)
|
python
|
celery/canvas.py
| 402
|
[
"self",
"args",
"kwargs",
"options",
"force"
] | false
| 11
| 7.44
|
celery/celery
| 27,741
|
google
| false
|
|
upgradedb
|
def upgradedb(
*,
to_revision: str | None = None,
from_revision: str | None = None,
show_sql_only: bool = False,
session: Session = NEW_SESSION,
):
"""
Upgrades the DB.
:param to_revision: Optional Alembic revision ID to upgrade *to*.
If omitted, upgrades to latest revision.
:param from_revision: Optional Alembic revision ID to upgrade *from*.
Not compatible with ``sql_only=False``.
:param show_sql_only: if True, migration statements will be printed but not executed.
:param session: sqlalchemy session with connection to Airflow metadata database
:return: None
"""
if from_revision and not show_sql_only:
raise AirflowException("`from_revision` only supported with `sql_only=True`.")
# alembic adds significant import time, so we import it lazily
if not settings.SQL_ALCHEMY_CONN:
raise RuntimeError("The settings.SQL_ALCHEMY_CONN not set. This is a critical assertion.")
from alembic import command
import_all_models()
config = _get_alembic_config()
if show_sql_only:
if not from_revision:
from_revision = _get_current_revision(session)
if not to_revision:
script = _get_script_object()
to_revision = script.get_current_head()
if to_revision == from_revision:
print_happy_cat("No migrations to apply; nothing to do.")
return
_revisions_above_min_for_offline(config=config, revisions=[from_revision, to_revision])
_offline_migration(command.upgrade, config, f"{from_revision}:{to_revision}")
return # only running sql; our job is done
errors_seen = False
for err in _check_migration_errors(session=session):
if not errors_seen:
log.error("Automatic migration is not available")
errors_seen = True
log.error("%s", err)
if errors_seen:
exit(1)
if not _get_current_revision(session=session) and not to_revision:
# Don't load default connections
# New DB; initialize and exit
initdb(session=session)
return
with create_global_lock(session=session, lock=DBLocks.MIGRATIONS):
import sqlalchemy.pool
log.info("Migrating the Airflow database")
val = os.environ.get("AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_SIZE")
try:
# Reconfigure the ORM to use _EXACTLY_ one connection, otherwise some db engines hang forever
# trying to ALTER TABLEs
os.environ["AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_SIZE"] = "1"
settings.reconfigure_orm(pool_class=sqlalchemy.pool.SingletonThreadPool)
command.upgrade(config, revision=to_revision or "heads")
current_revision = _get_current_revision(session=session)
with _configured_alembic_environment() as env:
source_heads = env.script.get_heads()
if current_revision == source_heads[0]:
# Only run external DB upgrade migration if user upgraded to heads
external_db_manager = RunDBManager()
external_db_manager.upgradedb(session)
finally:
if val is None:
os.environ.pop("AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_SIZE")
else:
os.environ["AIRFLOW__DATABASE__SQL_ALCHEMY_MAX_SIZE"] = val
settings.reconfigure_orm()
add_default_pool_if_not_exists(session=session)
synchronize_log_template(session=session)
|
Upgrades the DB.
:param to_revision: Optional Alembic revision ID to upgrade *to*.
If omitted, upgrades to latest revision.
:param from_revision: Optional Alembic revision ID to upgrade *from*.
Not compatible with ``sql_only=False``.
:param show_sql_only: if True, migration statements will be printed but not executed.
:param session: sqlalchemy session with connection to Airflow metadata database
:return: None
|
python
|
airflow-core/src/airflow/utils/db.py
| 1,065
|
[
"to_revision",
"from_revision",
"show_sql_only",
"session"
] | true
| 17
| 6.48
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
getReader
|
@Override
public Reader getReader(Object templateSource, String encoding) throws IOException {
Resource resource = (Resource) templateSource;
try {
return new InputStreamReader(resource.getInputStream(), encoding);
}
catch (IOException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Could not find FreeMarker template: " + resource);
}
throw ex;
}
}
|
Create a new {@code SpringTemplateLoader}.
@param resourceLoader the Spring ResourceLoader to use
@param templateLoaderPath the template loader path to use
|
java
|
spring-context-support/src/main/java/org/springframework/ui/freemarker/SpringTemplateLoader.java
| 79
|
[
"templateSource",
"encoding"
] |
Reader
| true
| 3
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
listTopics
|
@Override
public Map<String, List<PartitionInfo>> listTopics(Duration timeout) {
return delegate.listTopics(timeout);
}
|
Get metadata about partitions for all topics that the user is authorized to view. This method will issue a
remote call to the server.
@param timeout The maximum time this operation will block to fetch topic metadata
@return The map of topics and its partitions
@throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this
function is called
@throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while
this function is called
@throws org.apache.kafka.common.errors.TimeoutException if the topic metadata could not be fetched before
expiration of the passed timeout
@throws org.apache.kafka.common.KafkaException for any other unrecoverable errors
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
| 1,483
|
[
"timeout"
] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
isSimpleBindingOrAssignmentElement
|
function isSimpleBindingOrAssignmentElement(element: BindingOrAssignmentElement): boolean {
const target = getTargetOfBindingOrAssignmentElement(element);
if (!target || isOmittedExpression(target)) return true;
const propertyName = tryGetPropertyNameOfBindingOrAssignmentElement(element);
if (propertyName && !isPropertyNameLiteral(propertyName)) return false;
const initializer = getInitializerOfBindingOrAssignmentElement(element);
if (initializer && !isSimpleInlineableExpression(initializer)) return false;
if (isBindingOrAssignmentPattern(target)) return every(getElementsOfBindingOrAssignmentPattern(target), isSimpleBindingOrAssignmentElement);
return isIdentifier(target);
}
|
Flattens an ArrayBindingOrAssignmentPattern into zero or more bindings or assignments.
@param flattenContext Options used to control flattening.
@param parent The parent element of the pattern.
@param pattern The ArrayBindingOrAssignmentPattern to flatten.
@param value The current RHS value to assign to the element.
@param location The location to use for source maps and comments.
|
typescript
|
src/compiler/transformers/destructuring.ts
| 522
|
[
"element"
] | true
| 8
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
including
|
public ErrorAttributeOptions including(Include... includes) {
EnumSet<Include> updated = copyIncludes();
updated.addAll(Arrays.asList(includes));
return new ErrorAttributeOptions(Collections.unmodifiableSet(updated));
}
|
Return an {@code ErrorAttributeOptions} that includes the specified attribute
{@link Include} options.
@param includes error attributes to include
@return an {@code ErrorAttributeOptions}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/web/error/ErrorAttributeOptions.java
| 67
|
[] |
ErrorAttributeOptions
| true
| 1
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
writeStatement
|
function writeStatement(statement: Statement): void {
if (statement) {
if (!statements) {
statements = [statement];
}
else {
statements.push(statement);
}
}
}
|
Writes a statement to the current label's statement list.
@param statement A statement to write.
|
typescript
|
src/compiler/transformers/generators.ts
| 3,078
|
[
"statement"
] | true
| 4
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
pyarrow_array_to_numpy_and_mask
|
def pyarrow_array_to_numpy_and_mask(
arr, dtype: np.dtype
) -> tuple[np.ndarray, np.ndarray]:
"""
Convert a primitive pyarrow.Array to a numpy array and boolean mask based
on the buffers of the Array.
At the moment pyarrow.BooleanArray is not supported.
Parameters
----------
arr : pyarrow.Array
dtype : numpy.dtype
Returns
-------
(data, mask)
Tuple of two numpy arrays with the raw data (with specified dtype) and
a boolean mask (validity mask, so False means missing)
"""
dtype = np.dtype(dtype)
if pyarrow.types.is_null(arr.type):
# No initialization of data is needed since everything is null
data = np.empty(len(arr), dtype=dtype)
mask = np.zeros(len(arr), dtype=bool)
return data, mask
buflist = arr.buffers()
# Since Arrow buffers might contain padding and the data might be offset,
# the buffer gets sliced here before handing it to numpy.
# See also https://github.com/pandas-dev/pandas/issues/40896
offset = arr.offset * dtype.itemsize
length = len(arr) * dtype.itemsize
data_buf = buflist[1][offset : offset + length]
data = np.frombuffer(data_buf, dtype=dtype)
bitmask = buflist[0]
if bitmask is not None:
mask = pyarrow.BooleanArray.from_buffers(
pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset
)
mask = np.asarray(mask)
else:
mask = np.ones(len(arr), dtype=bool)
return data, mask
|
Convert a primitive pyarrow.Array to a numpy array and boolean mask based
on the buffers of the Array.
At the moment pyarrow.BooleanArray is not supported.
Parameters
----------
arr : pyarrow.Array
dtype : numpy.dtype
Returns
-------
(data, mask)
Tuple of two numpy arrays with the raw data (with specified dtype) and
a boolean mask (validity mask, so False means missing)
|
python
|
pandas/core/arrays/arrow/_arrow_utils.py
| 7
|
[
"arr",
"dtype"
] |
tuple[np.ndarray, np.ndarray]
| true
| 4
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
replaceIn
|
public boolean replaceIn(final StrBuilder source) {
if (source == null) {
return false;
}
return substitute(source, 0, source.length());
}
|
Replaces all the occurrences of variables within the given source
builder with their matching values from the resolver.
@param source the builder to replace in, updated, null returns zero.
@return true if altered.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrSubstitutor.java
| 729
|
[
"source"
] | true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
standardLastEntry
|
protected @Nullable Entry<E> standardLastEntry() {
Iterator<Entry<E>> entryIterator = descendingMultiset().entrySet().iterator();
if (!entryIterator.hasNext()) {
return null;
}
Entry<E> entry = entryIterator.next();
return Multisets.immutableEntry(entry.getElement(), entry.getCount());
}
|
A sensible definition of {@link #lastEntry()} in terms of {@code
descendingMultiset().entrySet().iterator()}.
<p>If you override {@link #descendingMultiset} or {@link #entrySet()}, you may wish to override
{@link #firstEntry()} to forward to this implementation.
|
java
|
android/guava/src/com/google/common/collect/ForwardingSortedMultiset.java
| 143
|
[] | true
| 2
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
applyRules
|
@Deprecated
protected StringBuffer applyRules(final Calendar calendar, final StringBuffer buf) {
return printer.format(calendar, buf);
}
|
Performs the formatting by applying the rules to the specified calendar.
@param calendar the calendar to format.
@param buf the buffer to format into.
@return the specified string buffer.
@deprecated Use {@link #format(Calendar, Appendable)}
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDateFormat.java
| 378
|
[
"calendar",
"buf"
] |
StringBuffer
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
partial_fit
|
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Subset of the training data.
y : array-like of shape (n_samples,)
Subset of the target values.
classes : ndarray of shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : object
Fitted estimator.
"""
if not hasattr(self, "classes_"):
self._more_validate_params(for_partial_fit=True)
if self.class_weight == "balanced":
raise ValueError(
"class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter."
)
# For an explanation, see
# https://github.com/scikit-learn/scikit-learn/pull/1259#issuecomment-9818044
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(
X,
y,
alpha=1.0,
loss="hinge",
learning_rate=lr,
max_iter=1,
classes=classes,
sample_weight=None,
coef_init=None,
intercept_init=None,
)
|
Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Subset of the training data.
y : array-like of shape (n_samples,)
Subset of the target values.
classes : ndarray of shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : object
Fitted estimator.
|
python
|
sklearn/linear_model/_passive_aggressive.py
| 249
|
[
"self",
"X",
"y",
"classes"
] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
parse
|
static @Nullable PrivateKey parse(@Nullable String text, @Nullable String password) {
if (text == null) {
return null;
}
try {
for (PemParser pemParser : PEM_PARSERS) {
PrivateKey privateKey = pemParser.parse(text, password);
if (privateKey != null) {
return privateKey;
}
}
}
catch (Exception ex) {
throw new IllegalStateException("Error loading private key file: " + ex.getMessage(), ex);
}
throw new IllegalStateException("Missing private key or unrecognized format");
}
|
Parse a private key from the specified string, using the provided password for
decryption if necessary.
@param text the text to parse
@param password the password used to decrypt an encrypted private key
@return the parsed private key
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemPrivateKeyParser.java
| 205
|
[
"text",
"password"
] |
PrivateKey
| true
| 4
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
booleanValue
|
boolean booleanValue() throws IOException;
|
@return true iff the current value is either boolean (<code>true</code> or <code>false</code>) or one of "false", "true".
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentParser.java
| 175
|
[] | true
| 1
| 6.32
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
findSingleMainClass
|
public static @Nullable String findSingleMainClass(JarFile jarFile, @Nullable String classesLocation,
@Nullable String annotationName) throws IOException {
SingleMainClassCallback callback = new SingleMainClassCallback(annotationName);
MainClassFinder.doWithMainClasses(jarFile, classesLocation, callback);
return callback.getMainClassName();
}
|
Find a single main class in a given jar file. A main class annotated with an
annotation with the given {@code annotationName} will be preferred over a main
class with no such annotation.
@param jarFile the jar file to search
@param classesLocation the location within the jar containing classes
@param annotationName the name of the annotation that may be present on the main
class
@return the main class or {@code null}
@throws IOException if the jar file cannot be read
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/MainClassFinder.java
| 203
|
[
"jarFile",
"classesLocation",
"annotationName"
] |
String
| true
| 1
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
isParameterNameStart
|
function isParameterNameStart() {
// Be permissive about await and yield by calling isBindingIdentifier instead of isIdentifier; disallowing
// them during a speculative parse leads to many more follow-on errors than allowing the function to parse then later
// complaining about the use of the keywords.
return isBindingIdentifier() || token() === SyntaxKind.OpenBracketToken || token() === SyntaxKind.OpenBraceToken;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 4,019
|
[] | false
| 3
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
splitByWholeSeparatorPreserveAllTokens
|
public static String[] splitByWholeSeparatorPreserveAllTokens(final String str, final String separator, final int max) {
return splitByWholeSeparatorWorker(str, separator, max, true);
}
|
Splits the provided text into an array, separator string specified. Returns a maximum of {@code max} substrings.
<p>
The separator is not included in the returned String array. Adjacent separators are treated as separators for empty tokens. For more control over the
split use the StrTokenizer class.
</p>
<p>
A {@code null} input String returns {@code null}. A {@code null} separator splits on whitespace.
</p>
<pre>
StringUtils.splitByWholeSeparatorPreserveAllTokens(null, *, *) = null
StringUtils.splitByWholeSeparatorPreserveAllTokens("", *, *) = []
StringUtils.splitByWholeSeparatorPreserveAllTokens("ab de fg", null, 0) = ["ab", "de", "fg"]
StringUtils.splitByWholeSeparatorPreserveAllTokens("ab de fg", null, 0) = ["ab", "", "", "de", "fg"]
StringUtils.splitByWholeSeparatorPreserveAllTokens("ab:cd:ef", ":", 2) = ["ab", "cd:ef"]
StringUtils.splitByWholeSeparatorPreserveAllTokens("ab-!-cd-!-ef", "-!-", 5) = ["ab", "cd", "ef"]
StringUtils.splitByWholeSeparatorPreserveAllTokens("ab-!-cd-!-ef", "-!-", 2) = ["ab", "cd-!-ef"]
</pre>
@param str the String to parse, may be null.
@param separator String containing the String to be used as a delimiter, {@code null} splits on whitespace.
@param max the maximum number of elements to include in the returned array. A zero or negative value implies no limit.
@return an array of parsed Strings, {@code null} if null String was input.
@since 2.4
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 7,339
|
[
"str",
"separator",
"max"
] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
parseValue
|
Object parseValue(ConfigKey key, Object value, boolean isSet) {
Object parsedValue;
if (isSet) {
parsedValue = parseType(key.name, value, key.type);
// props map doesn't contain setting, the key is required because no default value specified - its an error
} else if (NO_DEFAULT_VALUE.equals(key.defaultValue)) {
throw new ConfigException("Missing required configuration \"" + key.name + "\" which has no default value.");
} else {
// otherwise assign setting its default value
parsedValue = key.defaultValue;
}
if (key.validator instanceof ValidList && parsedValue instanceof List) {
List<?> originalListValue = (List<?>) parsedValue;
parsedValue = originalListValue.stream().distinct().collect(Collectors.toList());
if (originalListValue.size() != ((List<?>) parsedValue).size()) {
LOGGER.warn("Configuration key \"{}\" contains duplicate values. Duplicates will be removed. The original value " +
"is: {}, the updated value is: {}", key.name, originalListValue, parsedValue);
}
}
if (key.validator != null) {
key.validator.ensureValid(key.name, parsedValue);
}
return parsedValue;
}
|
Parse and validate configs against this configuration definition. The input is a map of configs. It is expected
that the keys of the map are strings, but the values can either be strings or they may already be of the
appropriate type (int, string, etc). This will work equally well with either java.util.Properties instances or a
programmatically constructed map.
@param props The configs to parse and validate.
@return Parsed and validated configs. The key will be the config name and the value will be the value parsed into
the appropriate type (int, string, etc).
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 531
|
[
"key",
"value",
"isSet"
] |
Object
| true
| 7
| 8.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
close
|
@Override
public void close() throws IOException {
State prevState = state;
if (state == State.CLOSING) return;
state = State.CLOSING;
sslEngine.closeOutbound();
try {
if (prevState != State.NOT_INITIALIZED && isConnected()) {
if (!flush(netWriteBuffer)) {
throw new IOException("Remaining data in the network buffer, can't send SSL close message.");
}
//prep the buffer for the close message
netWriteBuffer.clear();
//perform the close, since we called sslEngine.closeOutbound
SSLEngineResult wrapResult = sslEngine.wrap(ByteUtils.EMPTY_BUF, netWriteBuffer);
//we should be in a close state
if (wrapResult.getStatus() != SSLEngineResult.Status.CLOSED) {
throw new IOException("Unexpected status returned by SSLEngine.wrap, expected CLOSED, received " +
wrapResult.getStatus() + ". Will not send close message to peer.");
}
netWriteBuffer.flip();
flush(netWriteBuffer);
}
} catch (IOException ie) {
log.debug("Failed to send SSL Close message", ie);
} finally {
try {
sslEngine.closeInbound();
} catch (SSLException e) {
// This log is for debugging purposes as an exception might occur frequently
// at this point due to peers not following the TLS specs and failing to send a close_notify alert.
// Even if they do, currently, we do not read data from the socket after invoking close().
log.debug("SSLEngine.closeInBound() raised an exception.", e);
}
socketChannel.socket().close();
socketChannel.close();
netReadBuffer = null;
netWriteBuffer = null;
appReadBuffer = null;
if (fileChannelBuffer != null) {
ByteBufferUnmapper.unmap("fileChannelBuffer", fileChannelBuffer);
fileChannelBuffer = null;
}
}
}
|
Sends an SSL close message and closes socketChannel.
|
java
|
clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java
| 177
|
[] |
void
| true
| 9
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
removeRequest
|
@Override
void removeRequest() {
if (!unsentOffsetFetchRequests().remove(this)) {
log.warn("OffsetFetch request to remove not found in the outbound buffer: {}", this);
}
}
|
Handle failed responses. This will retry if the error is retriable, or complete the
result future exceptionally in the case of non-recoverable or unexpected errors.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
| 1,105
|
[] |
void
| true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
pathChecks
|
private static Stream<InstrumentationService.InstrumentationInfo> pathChecks() {
var pathClasses = StreamSupport.stream(FileSystems.getDefault().getRootDirectories().spliterator(), false)
.map(Path::getClass)
.distinct();
return pathClasses.flatMap(pathClass -> {
InstrumentationInfoFactory instrumentation = (String methodName, Class<?>... parameterTypes) -> INSTRUMENTATION_SERVICE
.lookupImplementationMethod(
Path.class,
methodName,
pathClass,
EntitlementChecker.class,
"checkPath" + Character.toUpperCase(methodName.charAt(0)) + methodName.substring(1),
parameterTypes
);
try {
return Stream.of(
instrumentation.of("toRealPath", LinkOption[].class),
instrumentation.of("register", WatchService.class, WatchEvent.Kind[].class),
instrumentation.of("register", WatchService.class, WatchEvent.Kind[].class, WatchEvent.Modifier[].class)
);
} catch (NoSuchMethodException | ClassNotFoundException e) {
throw new RuntimeException(e);
}
});
}
|
Initializes the dynamic (agent-based) instrumentation:
<ol>
<li>
Finds the version-specific subclass of {@link EntitlementChecker} to use
</li>
<li>
Builds the set of methods to instrument using {@link InstrumentationService#lookupMethods}
</li>
<li>
Augment this set “dynamically” using {@link InstrumentationService#lookupImplementationMethod}
</li>
<li>
Creates an {@link Instrumenter} via {@link InstrumentationService#newInstrumenter}, and adds a new {@link Transformer} (derived from
{@link java.lang.instrument.ClassFileTransformer}) that uses it. Transformers are invoked when a class is about to load, after its
bytes have been deserialized to memory but before the class is initialized.
</li>
<li>
Re-transforms all already loaded classes: we force the {@link Instrumenter} to run on classes that might have been already loaded
before entitlement initialization by calling the {@link java.lang.instrument.Instrumentation#retransformClasses} method on all
classes that were already loaded.
</li>
</ol>
<p>
The third step is needed as the JDK exposes some API through interfaces that have different (internal) implementations
depending on the JVM host platform. As we cannot instrument an interfaces, we find its concrete implementation.
A prime example is {@link FileSystemProvider}, which has different implementations (e.g. {@code UnixFileSystemProvider} or
{@code WindowsFileSystemProvider}). At runtime, we find the implementation class which is currently used by the JVM, and add
its methods to the set of methods to instrument. See e.g. {@link DynamicInstrumentation#fileSystemProviderChecks}.
</p>
@param inst the JVM instrumentation class instance
@param checkerInterface the interface to use to find methods to instrument and to use in the injected instrumentation code
@param verifyBytecode whether we should perform bytecode verification before and after instrumenting each method
|
java
|
libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/DynamicInstrumentation.java
| 224
|
[] | true
| 2
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
_unsafe_acquire_flock_with_timeout
|
def _unsafe_acquire_flock_with_timeout(
flock: BaseFileLock,
timeout: float | None,
) -> None:
"""Acquire a FileLock with timeout without automatic release (unsafe).
This function acquires a file lock with timeout support but does NOT automatically
release it. The caller is responsible for releasing the lock explicitly.
Use this only when you need manual control over lock lifetime.
Args:
flock: The FileLock object to acquire
timeout: Timeout in seconds. If None, uses _DEFAULT_TIMEOUT.
- Use _BLOCKING (-1.0) for infinite wait
- Use _NON_BLOCKING (0.0) for immediate return
- Use positive value for finite timeout
Raises:
FileLockTimeoutError: If the file lock cannot be acquired within the timeout period
Warning:
This is an "unsafe" function because it does not automatically release
the lock. Always call flock.release() when done, preferably in a try/finally
block or use the safe _acquire_flock_with_timeout context manager instead.
Example:
flock = FileLock("/tmp/my_process.lock")
try:
_unsafe_acquire_flock_with_timeout(flock, timeout=30.0)
# Critical section - file lock is held
perform_exclusive_file_operation()
finally:
flock.release() # Must manually release!
"""
_timeout: float = timeout if timeout is not None else _DEFAULT_TIMEOUT
try:
_ = flock.acquire(timeout=_timeout)
except Timeout as err:
raise exceptions.FileLockTimeoutError(flock, _timeout) from err
|
Acquire a FileLock with timeout without automatic release (unsafe).
This function acquires a file lock with timeout support but does NOT automatically
release it. The caller is responsible for releasing the lock explicitly.
Use this only when you need manual control over lock lifetime.
Args:
flock: The FileLock object to acquire
timeout: Timeout in seconds. If None, uses _DEFAULT_TIMEOUT.
- Use _BLOCKING (-1.0) for infinite wait
- Use _NON_BLOCKING (0.0) for immediate return
- Use positive value for finite timeout
Raises:
FileLockTimeoutError: If the file lock cannot be acquired within the timeout period
Warning:
This is an "unsafe" function because it does not automatically release
the lock. Always call flock.release() when done, preferably in a try/finally
block or use the safe _acquire_flock_with_timeout context manager instead.
Example:
flock = FileLock("/tmp/my_process.lock")
try:
_unsafe_acquire_flock_with_timeout(flock, timeout=30.0)
# Critical section - file lock is held
perform_exclusive_file_operation()
finally:
flock.release() # Must manually release!
|
python
|
torch/_inductor/runtime/caching/locks.py
| 158
|
[
"flock",
"timeout"
] |
None
| true
| 2
| 8
|
pytorch/pytorch
| 96,034
|
google
| false
|
set_deterministic_debug_mode
|
def set_deterministic_debug_mode(debug_mode: builtins.int | str) -> None:
r"""Sets the debug mode for deterministic operations.
.. note:: This is an alternative interface for
:func:`torch.use_deterministic_algorithms`. Refer to that function's
documentation for details about affected operations.
Args:
debug_mode(str or int): If "default" or 0, don't error or warn on
nondeterministic operations. If "warn" or 1, warn on
nondeterministic operations. If "error" or 2, error on
nondeterministic operations.
"""
# NOTE: builtins.int is used here because int in this scope resolves
# to torch.int
if not isinstance(debug_mode, (builtins.int, str)):
raise TypeError(f"debug_mode must be str or int, but got {type(debug_mode)}")
if isinstance(debug_mode, str):
if debug_mode == "default":
debug_mode = 0
elif debug_mode == "warn":
debug_mode = 1
elif debug_mode == "error":
debug_mode = 2
else:
raise RuntimeError(
"invalid value of debug_mode, expected one of `default`, "
f"`warn`, `error`, but got {debug_mode}"
)
if debug_mode == 0:
_C._set_deterministic_algorithms(False)
elif debug_mode == 1:
_C._set_deterministic_algorithms(True, warn_only=True)
elif debug_mode == 2:
_C._set_deterministic_algorithms(True)
else:
raise RuntimeError(
f"invalid value of debug_mode, expected 0, 1, or 2, but got {debug_mode}"
)
|
r"""Sets the debug mode for deterministic operations.
.. note:: This is an alternative interface for
:func:`torch.use_deterministic_algorithms`. Refer to that function's
documentation for details about affected operations.
Args:
debug_mode(str or int): If "default" or 0, don't error or warn on
nondeterministic operations. If "warn" or 1, warn on
nondeterministic operations. If "error" or 2, error on
nondeterministic operations.
|
python
|
torch/__init__.py
| 1,534
|
[
"debug_mode"
] |
None
| true
| 11
| 6.4
|
pytorch/pytorch
| 96,034
|
google
| false
|
allNull
|
public static boolean allNull(final Object... values) {
return !anyNotNull(values);
}
|
Tests if all values in the given array are {@code null}.
<p>
If all the values are {@code null} or the array is {@code null} or empty, then {@code true} is returned, otherwise {@code false} is returned.
</p>
<pre>
ObjectUtils.allNull(*) = false
ObjectUtils.allNull(*, null) = false
ObjectUtils.allNull(null, *) = false
ObjectUtils.allNull(null, null, *, *) = false
ObjectUtils.allNull(null) = true
ObjectUtils.allNull(null, null) = true
</pre>
@param values the values to test, may be {@code null} or empty.
@return {@code true} if all values in the array are {@code null}s, {@code false} if there is at least one non-null value in the array.
@since 3.11
|
java
|
src/main/java/org/apache/commons/lang3/ObjectUtils.java
| 168
|
[] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
validate_release_date
|
def validate_release_date(ctx: click.core.Context, param: click.core.Option, value: str) -> str:
"""
Validate that the date follows YYYY-MM-DD[_NN] format.
:param ctx: Click context
:param param: Click parameter
:param value: The value to validate
:return: The validated value
:raises click.BadParameter: If the value doesn't match the required format
"""
if not value:
return value
# Check if the format matches YYYY-MM-DD or YYYY-MM-DD_NN
pattern = r"^\d{4}-\d{2}-\d{2}(_\d{2})?$"
if not re.match(pattern, value):
raise click.BadParameter(
"Date must be in YYYY-MM-DD or YYYY-MM-DD_NN format (e.g., 2025-11-16 or 2025-11-16_01)"
)
# Validate that the date part (YYYY-MM-DD) is a valid date
date_part = value.split("_")[0]
try:
datetime.strptime(date_part, "%Y-%m-%d")
except ValueError:
raise click.BadParameter(f"Invalid date: {date_part}. Please provide a valid date.")
return value
|
Validate that the date follows YYYY-MM-DD[_NN] format.
:param ctx: Click context
:param param: Click parameter
:param value: The value to validate
:return: The validated value
:raises click.BadParameter: If the value doesn't match the required format
|
python
|
dev/breeze/src/airflow_breeze/utils/click_validators.py
| 25
|
[
"ctx",
"param",
"value"
] |
str
| true
| 3
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
strip_newsgroup_footer
|
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
Parameters
----------
text : str
The text from which to remove the signature block.
"""
lines = text.strip().split("\n")
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip("-") == "":
break
if line_num > 0:
return "\n".join(lines[:line_num])
else:
return text
|
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
Parameters
----------
text : str
The text from which to remove the signature block.
|
python
|
sklearn/datasets/_twenty_newsgroups.py
| 137
|
[
"text"
] | false
| 5
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
parseAssertsTypePredicate
|
function parseAssertsTypePredicate(): TypeNode {
const pos = getNodePos();
const assertsModifier = parseExpectedToken(SyntaxKind.AssertsKeyword);
const parameterName = token() === SyntaxKind.ThisKeyword ? parseThisTypeNode() : parseIdentifier();
const type = parseOptional(SyntaxKind.IsKeyword) ? parseType() : undefined;
return finishNode(factory.createTypePredicateNode(assertsModifier, parameterName, type), pos);
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 4,932
|
[] | true
| 3
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
readFirstLine
|
@Deprecated
@InlineMe(
replacement = "Files.asCharSource(file, charset).readFirstLine()",
imports = "com.google.common.io.Files")
public
static @Nullable String readFirstLine(File file, Charset charset) throws IOException {
return asCharSource(file, charset).readFirstLine();
}
|
Reads the first line from a file. The line does not include line-termination characters, but
does include other leading and trailing whitespace.
@param file the file to read from
@param charset the charset used to decode the input stream; see {@link StandardCharsets} for
helpful predefined constants
@return the first line, or null if the file is empty
@throws IOException if an I/O error occurs
@deprecated Prefer {@code asCharSource(file, charset).readFirstLine()}.
|
java
|
android/guava/src/com/google/common/io/Files.java
| 518
|
[
"file",
"charset"
] |
String
| true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
getParentPath
|
private static @Nullable Path getParentPath(Path path) {
Path parent = path.getParent();
// Paths that have a parent:
if (parent != null) {
// "/foo" ("/")
// "foo/bar" ("foo")
// "C:\foo" ("C:\")
// "\foo" ("\" - current drive for process on Windows)
// "C:foo" ("C:" - working dir of drive C on Windows)
return parent;
}
// Paths that don't have a parent:
if (path.getNameCount() == 0) {
// "/", "C:\", "\" (no parent)
// "" (undefined, though typically parent of working dir)
// "C:" (parent of working dir of drive C on Windows)
//
// For working dir paths ("" and "C:"), return null because:
// A) it's not specified that "" is the path to the working directory.
// B) if we're getting this path for recursive delete, it's typically not possible to
// delete the working dir with a relative path anyway, so it's ok to fail.
// C) if we're getting it for opening a new SecureDirectoryStream, there's no need to get
// the parent path anyway since we can safely open a DirectoryStream to the path without
// worrying about a symlink.
return null;
} else {
// "foo" (working dir)
return path.getFileSystem().getPath(".");
}
}
|
Returns a path to the parent directory of the given path. If the path actually has a parent
path, this is simple. Otherwise, we need to do some trickier things. Returns null if the path
is a root or is the empty path.
|
java
|
android/guava/src/com/google/common/io/MoreFiles.java
| 712
|
[
"path"
] |
Path
| true
| 3
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
removeIfEmpty
|
void removeIfEmpty() {
if (ancestor != null) {
ancestor.removeIfEmpty();
} else if (delegate.isEmpty()) {
map.remove(key);
}
}
|
If collection is empty, remove it from {@code AbstractMapBasedMultimap.this.map}. For
subcollections, check whether the ancestor collection is empty.
|
java
|
android/guava/src/com/google/common/collect/AbstractMapBasedMultimap.java
| 369
|
[] |
void
| true
| 3
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
exponentialMapToSE3Inv
|
static void exponentialMapToSE3Inv(const Mat& twist, Mat& R1, Mat& t1)
{
//see Exponential Map in http://ethaneade.com/lie.pdf
/*
\begin{align*}
\boldsymbol{\delta} &= \left( \mathbf{u}, \boldsymbol{\omega} \right ) \in se(3) \\
\mathbf{u}, \boldsymbol{\omega} &\in \mathbb{R}^3 \\
\theta &= \sqrt{ \boldsymbol{\omega}^T \boldsymbol{\omega} } \\
A &= \frac{\sin \theta}{\theta} \\
B &= \frac{1 - \cos \theta}{\theta^2} \\
C &= \frac{1-A}{\theta^2} \\
\mathbf{R} &= \mathbf{I} + A \boldsymbol{\omega}_{\times} + B \boldsymbol{\omega}_{\times}^2 \\
\mathbf{V} &= \mathbf{I} + B \boldsymbol{\omega}_{\times} + C \boldsymbol{\omega}_{\times}^2 \\
\exp \begin{pmatrix}
\mathbf{u} \\
\boldsymbol{\omega}
\end{pmatrix} &=
\left(
\begin{array}{c|c}
\mathbf{R} & \mathbf{V} \mathbf{u} \\ \hline
\mathbf{0} & 1
\end{array}
\right )
\end{align*}
*/
double vx = twist.at<double>(0,0);
double vy = twist.at<double>(1,0);
double vz = twist.at<double>(2,0);
double wx = twist.at<double>(3,0);
double wy = twist.at<double>(4,0);
double wz = twist.at<double>(5,0);
Matx31d rvec(wx, wy, wz);
Mat R;
Rodrigues(rvec, R);
double theta = sqrt(wx*wx + wy*wy + wz*wz);
double sinc = std::fabs(theta) < 1e-8 ? 1 : sin(theta) / theta;
double mcosc = (std::fabs(theta) < 1e-8) ? 0.5 : (1-cos(theta)) / (theta*theta);
double msinc = (std::abs(theta) < 1e-8) ? (1/6.0) : (1-sinc) / (theta*theta);
Matx31d dt;
dt(0) = vx*(sinc + wx*wx*msinc) + vy*(wx*wy*msinc - wz*mcosc) + vz*(wx*wz*msinc + wy*mcosc);
dt(1) = vx*(wx*wy*msinc + wz*mcosc) + vy*(sinc + wy*wy*msinc) + vz*(wy*wz*msinc - wx*mcosc);
dt(2) = vx*(wx*wz*msinc - wy*mcosc) + vy*(wy*wz*msinc + wx*mcosc) + vz*(sinc + wz*wz*msinc);
R1 = R.t();
t1 = -R1 * dt;
}
|
@brief The exponential map from se(3) to SE(3).
@param twist A twist (v, w) represents the velocity of a rigid body as an angular velocity
around an axis and a linear velocity along this axis.
@param R1 Resultant rotation matrix from the twist.
@param t1 Resultant translation vector from the twist.
|
cpp
|
modules/calib3d/src/solvepnp.cpp
| 659
|
[] | true
| 4
| 6.96
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
of
|
public static ErrorAttributeOptions of(Collection<Include> includes) {
return new ErrorAttributeOptions(
(includes.isEmpty()) ? Collections.emptySet() : Collections.unmodifiableSet(EnumSet.copyOf(includes)));
}
|
Create an {@code ErrorAttributeOptions} that includes the specified attribute
{@link Include} options.
@param includes error attributes to include
@return an {@code ErrorAttributeOptions}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/web/error/ErrorAttributeOptions.java
| 126
|
[
"includes"
] |
ErrorAttributeOptions
| true
| 2
| 7.2
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_loss_grad_lbfgs
|
def _loss_grad_lbfgs(self, transformation, X, same_class_mask, sign=1.0):
"""Compute the loss and the loss gradient w.r.t. `transformation`.
Parameters
----------
transformation : ndarray of shape (n_components * n_features,)
The raveled linear transformation on which to compute loss and
evaluate gradient.
X : ndarray of shape (n_samples, n_features)
The training samples.
same_class_mask : ndarray of shape (n_samples, n_samples)
A mask where `mask[i, j] == 1` if `X[i]` and `X[j]` belong
to the same class, and `0` otherwise.
Returns
-------
loss : float
The loss computed for the given transformation.
gradient : ndarray of shape (n_components * n_features,)
The new (flattened) gradient of the loss.
"""
if self.n_iter_ == 0:
self.n_iter_ += 1
if self.verbose:
header_fields = ["Iteration", "Objective Value", "Time(s)"]
header_fmt = "{:>10} {:>20} {:>10}"
header = header_fmt.format(*header_fields)
cls_name = self.__class__.__name__
print("[{}]".format(cls_name))
print(
"[{}] {}\n[{}] {}".format(
cls_name, header, cls_name, "-" * len(header)
)
)
t_funcall = time.time()
transformation = transformation.reshape(-1, X.shape[1])
X_embedded = np.dot(X, transformation.T) # (n_samples, n_components)
# Compute softmax distances
p_ij = pairwise_distances(X_embedded, squared=True)
np.fill_diagonal(p_ij, np.inf)
p_ij = softmax(-p_ij) # (n_samples, n_samples)
# Compute loss
masked_p_ij = p_ij * same_class_mask
p = np.sum(masked_p_ij, axis=1, keepdims=True) # (n_samples, 1)
loss = np.sum(p)
# Compute gradient of loss w.r.t. `transform`
weighted_p_ij = masked_p_ij - p_ij * p
weighted_p_ij_sym = weighted_p_ij + weighted_p_ij.T
np.fill_diagonal(weighted_p_ij_sym, -weighted_p_ij.sum(axis=0))
gradient = 2 * X_embedded.T.dot(weighted_p_ij_sym).dot(X)
# time complexity of the gradient: O(n_components x n_samples x (
# n_samples + n_features))
if self.verbose:
t_funcall = time.time() - t_funcall
values_fmt = "[{}] {:>10} {:>20.6e} {:>10.2f}"
print(
values_fmt.format(
self.__class__.__name__, self.n_iter_, loss, t_funcall
)
)
sys.stdout.flush()
return sign * loss, sign * gradient.ravel()
|
Compute the loss and the loss gradient w.r.t. `transformation`.
Parameters
----------
transformation : ndarray of shape (n_components * n_features,)
The raveled linear transformation on which to compute loss and
evaluate gradient.
X : ndarray of shape (n_samples, n_features)
The training samples.
same_class_mask : ndarray of shape (n_samples, n_samples)
A mask where `mask[i, j] == 1` if `X[i]` and `X[j]` belong
to the same class, and `0` otherwise.
Returns
-------
loss : float
The loss computed for the given transformation.
gradient : ndarray of shape (n_components * n_features,)
The new (flattened) gradient of the loss.
|
python
|
sklearn/neighbors/_nca.py
| 452
|
[
"self",
"transformation",
"X",
"same_class_mask",
"sign"
] | false
| 4
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
parenthesizeBranchOfConditionalExpression
|
function parenthesizeBranchOfConditionalExpression(branch: Expression): Expression {
// per ES grammar both 'whenTrue' and 'whenFalse' parts of conditional expression are assignment expressions
// so in case when comma expression is introduced as a part of previous transformations
// if should be wrapped in parens since comma operator has the lowest precedence
const emittedExpression = skipPartiallyEmittedExpressions(branch);
return isCommaSequence(emittedExpression)
? factory.createParenthesizedExpression(branch)
: branch;
}
|
Wraps the operand to a BinaryExpression in parentheses if they are needed to preserve the intended
order of operations.
@param binaryOperator The operator for the BinaryExpression.
@param operand The operand for the BinaryExpression.
@param isLeftSideOfBinary A value indicating whether the operand is the left side of the
BinaryExpression.
|
typescript
|
src/compiler/factory/parenthesizerRules.ts
| 334
|
[
"branch"
] | true
| 2
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
make_response
|
def make_response(*args: t.Any) -> Response:
"""Sometimes it is necessary to set additional headers in a view. Because
views do not have to return response objects but can return a value that
is converted into a response object by Flask itself, it becomes tricky to
add headers to it. This function can be called instead of using a return
and you will get a response object which you can use to attach headers.
If view looked like this and you want to add a new header::
def index():
return render_template('index.html', foo=42)
You can now do something like this::
def index():
response = make_response(render_template('index.html', foo=42))
response.headers['X-Parachutes'] = 'parachutes are cool'
return response
This function accepts the very same arguments you can return from a
view function. This for example creates a response with a 404 error
code::
response = make_response(render_template('not_found.html'), 404)
The other use case of this function is to force the return value of a
view function into a response which is helpful with view
decorators::
response = make_response(view_function())
response.headers['X-Parachutes'] = 'parachutes are cool'
Internally this function does the following things:
- if no arguments are passed, it creates a new response argument
- if one argument is passed, :meth:`flask.Flask.make_response`
is invoked with it.
- if more than one argument is passed, the arguments are passed
to the :meth:`flask.Flask.make_response` function as tuple.
.. versionadded:: 0.6
"""
if not args:
return current_app.response_class()
if len(args) == 1:
args = args[0]
return current_app.make_response(args)
|
Sometimes it is necessary to set additional headers in a view. Because
views do not have to return response objects but can return a value that
is converted into a response object by Flask itself, it becomes tricky to
add headers to it. This function can be called instead of using a return
and you will get a response object which you can use to attach headers.
If view looked like this and you want to add a new header::
def index():
return render_template('index.html', foo=42)
You can now do something like this::
def index():
response = make_response(render_template('index.html', foo=42))
response.headers['X-Parachutes'] = 'parachutes are cool'
return response
This function accepts the very same arguments you can return from a
view function. This for example creates a response with a 404 error
code::
response = make_response(render_template('not_found.html'), 404)
The other use case of this function is to force the return value of a
view function into a response which is helpful with view
decorators::
response = make_response(view_function())
response.headers['X-Parachutes'] = 'parachutes are cool'
Internally this function does the following things:
- if no arguments are passed, it creates a new response argument
- if one argument is passed, :meth:`flask.Flask.make_response`
is invoked with it.
- if more than one argument is passed, the arguments are passed
to the :meth:`flask.Flask.make_response` function as tuple.
.. versionadded:: 0.6
|
python
|
src/flask/helpers.py
| 138
|
[] |
Response
| true
| 3
| 6.72
|
pallets/flask
| 70,946
|
unknown
| false
|
pop
|
def pop() -> None:
"""
Pop the key_stack and append an exception table entry if possible.
"""
nonlocal nexti
if key_stack:
key = key_stack.pop()
if nexti <= key[1]:
exn_tab.append(
ExceptionTableEntry(max(key[0], nexti), key[1], *exn_dict[key])
)
nexti = key[1] + 2
|
Pop the key_stack and append an exception table entry if possible.
|
python
|
torch/_dynamo/bytecode_transformation.py
| 1,021
|
[] |
None
| true
| 3
| 6.88
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
reject
|
function reject(collection, predicate) {
var func = isArray(collection) ? arrayFilter : baseFilter;
return func(collection, negate(getIteratee(predicate, 3)));
}
|
The opposite of `_.filter`; this method returns the elements of `collection`
that `predicate` does **not** return truthy for.
@static
@memberOf _
@since 0.1.0
@category Collection
@param {Array|Object} collection The collection to iterate over.
@param {Function} [predicate=_.identity] The function invoked per iteration.
@returns {Array} Returns the new filtered array.
@see _.filter
@example
var users = [
{ 'user': 'barney', 'age': 36, 'active': false },
{ 'user': 'fred', 'age': 40, 'active': true }
];
_.reject(users, function(o) { return !o.active; });
// => objects for ['fred']
// The `_.matches` iteratee shorthand.
_.reject(users, { 'age': 40, 'active': true });
// => objects for ['barney']
// The `_.matchesProperty` iteratee shorthand.
_.reject(users, ['active', false]);
// => objects for ['fred']
// The `_.property` iteratee shorthand.
_.reject(users, 'active');
// => objects for ['barney']
|
javascript
|
lodash.js
| 9,854
|
[
"collection",
"predicate"
] | false
| 2
| 6.96
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
getObjectType
|
@Override
public @Nullable Class<?> getObjectType() {
synchronized (this) {
if (this.singletonInstance != null) {
return this.singletonInstance.getClass();
}
}
try {
// This might be incomplete since it potentially misses introduced interfaces
// from Advisors that will be lazily retrieved via setInterceptorNames.
return createAopProxy().getProxyClass(this.proxyClassLoader);
}
catch (AopConfigException ex) {
if (getTargetClass() == null) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to determine early proxy class: " + ex.getMessage());
}
return null;
}
else {
throw ex;
}
}
}
|
Return the type of the proxy. Will check the singleton instance if
already created, else fall back to the proxy interface (in case of just
a single one), the target bean type, or the TargetSource's target class.
@see org.springframework.aop.framework.AopProxy#getProxyClass
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/ProxyFactoryBean.java
| 264
|
[] | true
| 5
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
description
|
public KafkaFuture<UserScramCredentialsDescription> description(String userName) {
final KafkaFutureImpl<UserScramCredentialsDescription> retval = new KafkaFutureImpl<>();
dataFuture.whenComplete((data, throwable) -> {
if (throwable != null) {
retval.completeExceptionally(throwable);
} else {
// it is possible that there is no future for this user (for example, the original describe request was
// for users 1, 2, and 3 but this is looking for user 4), so explicitly take care of that case
Optional<DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult> optionalUserResult =
data.results().stream().filter(result -> result.user().equals(userName)).findFirst();
if (optionalUserResult.isEmpty()) {
retval.completeExceptionally(new ResourceNotFoundException("No such user: " + userName));
} else {
DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult userResult = optionalUserResult.get();
if (userResult.errorCode() != Errors.NONE.code()) {
// RESOURCE_NOT_FOUND is included here
retval.completeExceptionally(Errors.forCode(userResult.errorCode()).exception(userResult.errorMessage()));
} else {
retval.complete(new UserScramCredentialsDescription(userResult.user(), getScramCredentialInfosFor(userResult)));
}
}
}
});
return retval;
}
|
@param userName the name of the user description being requested
@return a future indicating the description results for the given user. The future will complete exceptionally if
the future returned by {@link #users()} completes exceptionally. Note that if the given user does not exist in
the list of described users then the returned future will complete exceptionally with
{@link org.apache.kafka.common.errors.ResourceNotFoundException}.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java
| 114
|
[
"userName"
] | true
| 4
| 7.6
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
VerticalDelimiter
|
function VerticalDelimiter() {
const store = useContext(StoreContext);
const {ownerID, inspectedElementIndex} = useContext(TreeStateContext);
const {lineHeight} = useContext(SettingsContext);
if (ownerID != null || inspectedElementIndex == null) {
return null;
}
const element = store.getElementAtIndex(inspectedElementIndex);
if (element == null) {
return null;
}
const indexOfLowestDescendant =
store.getIndexOfLowestDescendantElement(element);
if (indexOfLowestDescendant == null) {
return null;
}
const delimiterLeft = calculateElementOffset(element.depth) + 12;
const delimiterTop = (inspectedElementIndex + 1) * lineHeight;
const delimiterHeight =
(indexOfLowestDescendant + 1) * lineHeight - delimiterTop;
return (
<div
className={styles.VerticalDelimiter}
style={{
left: delimiterLeft,
top: delimiterTop,
height: delimiterHeight,
}}
/>
);
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-devtools-shared/src/devtools/views/Components/Tree.js
| 568
|
[] | false
| 5
| 6.24
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
describeShareGroups
|
default DescribeShareGroupsResult describeShareGroups(Collection<String> groupIds) {
return describeShareGroups(groupIds, new DescribeShareGroupsOptions());
}
|
Describe some share groups in the cluster, with the default options.
<p>
This is a convenience method for {@link #describeShareGroups(Collection, DescribeShareGroupsOptions)}
with default options. See the overload for more details.
@param groupIds The IDs of the groups to describe.
@return The DescribeShareGroupsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,941
|
[
"groupIds"
] |
DescribeShareGroupsResult
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
_is_strictly_monotonic_decreasing
|
def _is_strictly_monotonic_decreasing(self) -> bool:
"""
Return if the index is strictly monotonic decreasing
(only decreasing) values.
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
|
Return if the index is strictly monotonic decreasing
(only decreasing) values.
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
|
python
|
pandas/core/indexes/base.py
| 2,442
|
[
"self"
] |
bool
| true
| 2
| 6.8
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
queued_slots
|
def queued_slots(self, session: Session = NEW_SESSION) -> int:
"""
Get the number of slots used by queued tasks at the moment.
:param session: SQLAlchemy ORM Session
:return: the used number of slots
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
return int(
session.scalar(
select(func.sum(TaskInstance.pool_slots))
.filter(TaskInstance.pool == self.pool)
.filter(TaskInstance.state == TaskInstanceState.QUEUED)
)
or 0
)
|
Get the number of slots used by queued tasks at the moment.
:param session: SQLAlchemy ORM Session
:return: the used number of slots
|
python
|
airflow-core/src/airflow/models/pool.py
| 291
|
[
"self",
"session"
] |
int
| true
| 2
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
equals
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
ConfigurationProperty other = (ConfigurationProperty) obj;
boolean result = true;
result = result && ObjectUtils.nullSafeEquals(this.name, other.name);
result = result && ObjectUtils.nullSafeEquals(this.value, other.value);
return result;
}
|
Return the value of the configuration property.
@return the configuration property value
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationProperty.java
| 93
|
[
"obj"
] | true
| 6
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
of
|
public static <E extends Throwable> Duration of(final FailableRunnable<E> runnable) throws E {
return of(start -> runnable.run());
}
|
Runs the lambda and returns the duration of its execution.
@param <E> The type of exception throw by the lambda.
@param runnable What to execute.
@return The Duration of execution.
@throws E thrown by the lambda.
@see StopWatch
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationUtils.java
| 183
|
[
"runnable"
] |
Duration
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
StringifiedBackend
|
inline std::string StringifiedBackend(Context& ctx) {
return ctx.options().backend == Backend::UPB ? "upb" : "cpp";
}
|
This Context object will be used throughout hpb generation.
It is a thin wrapper around an io::Printer and can be easily extended
to support more options.
Expected usage is:
SomeGenerationFunc(..., Context& context) {
context.Emit({{"some_key", some_computed_val}}, R"cc(
// hpb gencode ...
)cc);
}
|
cpp
|
hpb_generator/context.h
| 110
|
[] | true
| 2
| 6.32
|
protocolbuffers/protobuf
| 69,904
|
doxygen
| false
|
|
maxBy
|
function maxBy(array, iteratee) {
return (array && array.length)
? baseExtremum(array, getIteratee(iteratee, 2), baseGt)
: undefined;
}
|
This method is like `_.max` except that it accepts `iteratee` which is
invoked for each element in `array` to generate the criterion by which
the value is ranked. The iteratee is invoked with one argument: (value).
@static
@memberOf _
@since 4.0.0
@category Math
@param {Array} array The array to iterate over.
@param {Function} [iteratee=_.identity] The iteratee invoked per element.
@returns {*} Returns the maximum value.
@example
var objects = [{ 'n': 1 }, { 'n': 2 }];
_.maxBy(objects, function(o) { return o.n; });
// => { 'n': 2 }
// The `_.property` iteratee shorthand.
_.maxBy(objects, 'n');
// => { 'n': 2 }
|
javascript
|
lodash.js
| 16,444
|
[
"array",
"iteratee"
] | false
| 3
| 7.52
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
_get_no_sort_one_missing_indexer
|
def _get_no_sort_one_missing_indexer(
n: int, left_missing: bool
) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:
"""
Return join indexers where all of one side is selected without sorting
and none of the other side is selected.
Parameters
----------
n : int
Length of indexers to create.
left_missing : bool
If True, the left indexer will contain only -1's.
If False, the right indexer will contain only -1's.
Returns
-------
np.ndarray[np.intp]
Left indexer
np.ndarray[np.intp]
Right indexer
"""
idx = np.arange(n, dtype=np.intp)
idx_missing = np.full(shape=n, fill_value=-1, dtype=np.intp)
if left_missing:
return idx_missing, idx
return idx, idx_missing
|
Return join indexers where all of one side is selected without sorting
and none of the other side is selected.
Parameters
----------
n : int
Length of indexers to create.
left_missing : bool
If True, the left indexer will contain only -1's.
If False, the right indexer will contain only -1's.
Returns
-------
np.ndarray[np.intp]
Left indexer
np.ndarray[np.intp]
Right indexer
|
python
|
pandas/core/reshape/merge.py
| 2,694
|
[
"n",
"left_missing"
] |
tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]
| true
| 2
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
subscribe
|
@Override
public void subscribe(Collection<String> topics, ConsumerRebalanceListener listener) {
if (listener == null)
throw new IllegalArgumentException("RebalanceListener cannot be null");
subscribeInternal(topics, Optional.of(listener));
}
|
This method signals the background thread to {@link CreateFetchRequestsEvent create fetch requests} for the
pre-fetch case, i.e. right before {@link #poll(Duration)} exits. In the pre-fetch case, the application thread
will not wait for confirmation of the request creation before continuing.
<p/>
At the point this method is called, {@link KafkaConsumer#poll(Duration)} has data ready to return to the user,
which means the consumed position was already updated. In order to prevent potential gaps in records, this
method is designed to suppress all exceptions.
@param timer Provides an upper bound for the event and its {@link CompletableFuture future}
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
| 2,026
|
[
"topics",
"listener"
] |
void
| true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
close
|
@Override
public void close() throws Exception {
AtomicReference<Throwable> firstException = new AtomicReference<>();
if (instance instanceof AutoCloseable) {
Utils.closeQuietly((AutoCloseable) instance, instance.getClass().getSimpleName(), firstException);
}
pluginMetrics.ifPresent(metrics -> Utils.closeQuietly(metrics, "pluginMetrics", firstException));
Throwable throwable = firstException.get();
if (throwable != null) throw new KafkaException("failed closing plugin", throwable);
}
|
Wrap a list of instances into Plugins.
@param instances the instances to wrap
@param metrics the metrics
@param key the value for the <code>config</code> tag
@return the list of plugins
|
java
|
clients/src/main/java/org/apache/kafka/common/internals/Plugin.java
| 122
|
[] |
void
| true
| 3
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
sem
|
def sem(self, ddof: int = 1, numeric_only: bool = False):
"""
Calculate the rolling standard error of mean.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.rolling : Calling rolling with Series data.
DataFrame.rolling : Calling rolling with DataFrames.
Series.sem : Aggregating sem for Series.
DataFrame.sem : Aggregating sem for DataFrame.
Notes
-----
A minimum of one period is required for the calculation.
Examples
--------
>>> s = pd.Series([0, 1, 2, 3])
>>> s.rolling(2, min_periods=1).sem()
0 NaN
1 0.5
2 0.5
3 0.5
dtype: float64
"""
# Raise here so error message says sem instead of std
self._validate_numeric_only("sem", numeric_only)
return self.std(numeric_only=numeric_only, ddof=ddof) / (
self.count(numeric_only)
).pow(0.5)
|
Calculate the rolling standard error of mean.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.rolling : Calling rolling with Series data.
DataFrame.rolling : Calling rolling with DataFrames.
Series.sem : Aggregating sem for Series.
DataFrame.sem : Aggregating sem for DataFrame.
Notes
-----
A minimum of one period is required for the calculation.
Examples
--------
>>> s = pd.Series([0, 1, 2, 3])
>>> s.rolling(2, min_periods=1).sem()
0 NaN
1 0.5
2 0.5
3 0.5
dtype: float64
|
python
|
pandas/core/window/rolling.py
| 2,914
|
[
"self",
"ddof",
"numeric_only"
] | true
| 1
| 7.28
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
readLines
|
public static List<String> readLines(File file, Charset charset) throws IOException {
// don't use asCharSource(file, charset).readLines() because that returns
// an immutable list, which would change the behavior of this method
return asCharSource(file, charset)
.readLines(
new LineProcessor<List<String>>() {
final List<String> result = new ArrayList<>();
@Override
public boolean processLine(String line) {
result.add(line);
return true;
}
@Override
public List<String> getResult() {
return result;
}
});
}
|
Reads all of the lines from a file. The lines do not include line-termination characters, but
do include other leading and trailing whitespace.
<p>This method returns a mutable {@code List}. For an {@code ImmutableList}, use {@code
Files.asCharSource(file, charset).readLines()}.
<p><b>{@link java.nio.file.Path} equivalent:</b> {@link
java.nio.file.Files#readAllLines(java.nio.file.Path, Charset)}.
@param file the file to read from
@param charset the charset used to decode the input stream; see {@link StandardCharsets} for
helpful predefined constants
@return a mutable {@link List} containing all the lines
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/Files.java
| 543
|
[
"file",
"charset"
] | true
| 1
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
showUsage
|
protected void showUsage() {
Log.infoPrint("usage: " + this.name);
for (Command command : this.commands) {
if (isOptionCommand(command)) {
Log.infoPrint("[--" + command.getName() + "] ");
}
}
Log.info("");
Log.info(" <command> [<args>]");
Log.info("");
Log.info("Available commands are:");
for (Command command : this.commands) {
if (!isOptionCommand(command) && !isHiddenCommand(command)) {
String usageHelp = command.getUsageHelp();
String description = command.getDescription();
Log.info(String.format("%n %1$s %2$-15s%n %3$s", command.getName(),
(usageHelp != null) ? usageHelp : "", (description != null) ? description : ""));
}
}
Log.info("");
Log.info("Common options:");
Log.info(String.format("%n %1$s %2$-15s%n %3$s", "--debug", "Verbose mode",
"Print additional status information for the command you are running"));
Log.info("");
Log.info("");
Log.info("See '" + this.name + "help <command>' for more information on a specific command.");
}
|
Subclass hook called after a command has run.
@param command the command that has run
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/CommandRunner.java
| 268
|
[] |
void
| true
| 6
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_log_inference_graph
|
def _log_inference_graph(
fw_module: torch.fx.GraphModule,
aot_config: AOTConfig,
) -> Optional[str]:
"""
Log the inference graph to the structured logger.
Return a str representation of the graph.
"""
if aot_config.enable_log:
trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "torch._functorch.config",
"encoding": "string",
},
payload_fn=lambda: torch._functorch.config.get_serializable_config_copy(),
)
# Save the forward_graph_str right after aot_dispatch_base_graph,
# to save in the cache
aot_forward_graph_str = None
if aot_config.cache_info is not None:
aot_forward_graph_str = fw_module.print_readable(
print_output=False,
include_stride=True,
include_device=True,
fast_sympy_print=True,
expanded_def=True,
)
return aot_forward_graph_str
|
Log the inference graph to the structured logger.
Return a str representation of the graph.
|
python
|
torch/_functorch/_aot_autograd/graph_compile.py
| 363
|
[
"fw_module",
"aot_config"
] |
Optional[str]
| true
| 3
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
iterator
|
@Override
public Iterator<Record> iterator() {
return iterator(BufferSupplier.NO_CACHING);
}
|
Get an iterator for the nested entries contained within this batch. Note that
if the batch is not compressed, then this method will return an iterator over the
shallow record only (i.e. this object).
@return An iterator over the records contained within this batch
|
java
|
clients/src/main/java/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java
| 229
|
[] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getResources
|
Resource[] getResources(String location, ResourceType type) {
validatePattern(location, type);
String directoryPath = location.substring(0, location.indexOf("*/"));
String fileName = location.substring(location.lastIndexOf("/") + 1);
Resource resource = getResource(directoryPath);
if (!resource.exists()) {
return EMPTY_RESOURCES;
}
File file = getFile(location, resource);
if (!file.isDirectory()) {
return EMPTY_RESOURCES;
}
File[] subDirectories = file.listFiles(this::isVisibleDirectory);
if (subDirectories == null) {
return EMPTY_RESOURCES;
}
Arrays.sort(subDirectories, FILE_PATH_COMPARATOR);
if (type == ResourceType.DIRECTORY) {
return Arrays.stream(subDirectories).map(FileSystemResource::new).toArray(Resource[]::new);
}
List<Resource> resources = new ArrayList<>();
FilenameFilter filter = (dir, name) -> name.equals(fileName);
for (File subDirectory : subDirectories) {
File[] files = subDirectory.listFiles(filter);
if (files != null) {
Arrays.sort(files, FILE_NAME_COMPARATOR);
Arrays.stream(files).map(FileSystemResource::new).forEach(resources::add);
}
}
return resources.toArray(EMPTY_RESOURCES);
}
|
Get a multiple resources from a location pattern.
@param location the location pattern
@param type the type of resource to return
@return the resources
@see #isPattern(String)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/LocationResourceLoader.java
| 94
|
[
"location",
"type"
] | true
| 6
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
head
|
def head(self, n: int = 5) -> NDFrameT:
"""
Return first n rows of each group.
Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Parameters
----------
n : int
If positive: number of entries to include from start of each group.
If negative: number of entries to exclude from end of each group.
Returns
-------
Series or DataFrame
Subset of original Series or DataFrame as determined by n.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
>>> df.groupby("A").head(1)
A B
0 1 2
2 5 6
>>> df.groupby("A").head(-1)
A B
0 1 2
"""
mask = self._make_mask_from_positional_indexer(slice(None, n))
return self._mask_selected_obj(mask)
|
Return first n rows of each group.
Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Parameters
----------
n : int
If positive: number of entries to include from start of each group.
If negative: number of entries to exclude from end of each group.
Returns
-------
Series or DataFrame
Subset of original Series or DataFrame as determined by n.
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
>>> df.groupby("A").head(1)
A B
0 1 2
2 5 6
>>> df.groupby("A").head(-1)
A B
0 1 2
|
python
|
pandas/core/groupby/groupby.py
| 5,446
|
[
"self",
"n"
] |
NDFrameT
| true
| 1
| 7.28
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
read_fwf
|
def read_fwf(
filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
*,
colspecs: Sequence[tuple[int, int]] | str | None = "infer",
widths: Sequence[int] | None = None,
infer_nrows: int = 100,
iterator: bool = False,
chunksize: int | None = None,
**kwds: Unpack[_read_shared[HashableT]],
) -> DataFrame | TextFileReader:
r"""
Read a table of fixed-width formatted lines into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a text ``read()`` function.The string could be a URL.
Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to] ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser determine the
`colspecs`.
iterator : bool, default False
Return ``TextFileReader`` object for iteration or getting chunks with
``get_chunk()``.
chunksize : int, optional
Number of lines to read from the file per chunk.
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or TextFileReader
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
>>> pd.read_fwf("data.csv") # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
if colspecs not in (None, "infer") and widths is not None:
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.append((col, col + w))
col += w
# for mypy
assert colspecs is not None
# GH#40830
# Ensure length of `colspecs` matches length of `names`
names = kwds.get("names")
if names is not None and names is not lib.no_default:
if len(names) != len(colspecs) and colspecs != "infer":
# need to check len(index_col) as it might contain
# unnamed indices, in which case it's name is not required
len_index = 0
if kwds.get("index_col") is not None:
index_col: Any = kwds.get("index_col")
if index_col is not False:
if not is_list_like(index_col):
len_index = 1
else:
# for mypy: handled in the if-branch
assert index_col is not lib.no_default
len_index = len(index_col)
if kwds.get("usecols") is None and len(names) + len_index != len(colspecs):
# If usecols is used colspec may be longer than names
raise ValueError("Length of colspecs must match length of names")
check_dtype_backend(kwds.setdefault("dtype_backend", lib.no_default))
return _read(
filepath_or_buffer,
kwds
| {
"colspecs": colspecs,
"infer_nrows": infer_nrows,
"engine": "python-fwf",
"iterator": iterator,
"chunksize": chunksize,
},
)
|
r"""
Read a table of fixed-width formatted lines into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object, or file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a text ``read()`` function.The string could be a URL.
Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to] ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser determine the
`colspecs`.
iterator : bool, default False
Return ``TextFileReader`` object for iteration or getting chunks with
``get_chunk()``.
chunksize : int, optional
Number of lines to read from the file per chunk.
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or TextFileReader
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
>>> pd.read_fwf("data.csv") # doctest: +SKIP
|
python
|
pandas/io/parsers/readers.py
| 1,479
|
[
"filepath_or_buffer",
"colspecs",
"widths",
"infer_nrows",
"iterator",
"chunksize"
] |
DataFrame | TextFileReader
| true
| 17
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
init_PPC_64Bit
|
private static void init_PPC_64Bit() {
addProcessors(new Processor(Processor.Arch.BIT_64, Processor.Type.PPC), "ppc64", "power64", "powerpc64", "power_pc64", "power_rs64");
}
|
Gets a {@link Processor} object the given value {@link String}. The {@link String} must be like a value returned by the {@code "os.arch"} system
property.
@param value A {@link String} like a value returned by the {@code os.arch} System Property.
@return A {@link Processor} when it exists, else {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/ArchUtils.java
| 119
|
[] |
void
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
enterIf
|
public boolean enterIf(Guard guard) {
if (guard.monitor != this) {
throw new IllegalMonitorStateException();
}
ReentrantLock lock = this.lock;
lock.lock();
boolean satisfied = false;
try {
return satisfied = guard.isSatisfied();
} finally {
if (!satisfied) {
lock.unlock();
}
}
}
|
Enters this monitor if the guard is satisfied. Blocks indefinitely acquiring the lock, but does
not wait for the guard to be satisfied.
@return whether the monitor was entered, which guarantees that the guard is now satisfied
|
java
|
android/guava/src/com/google/common/util/concurrent/Monitor.java
| 681
|
[
"guard"
] | true
| 3
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
_addListener
|
function _addListener(target, type, listener, prepend) {
let m;
let events;
let existing;
checkListener(listener);
events = target._events;
if (events === undefined) {
events = target._events = { __proto__: null };
target._eventsCount = 0;
} else {
// To avoid recursion in the case that type === "newListener"! Before
// adding it to the listeners, first emit "newListener".
if (events.newListener !== undefined) {
target.emit('newListener', type,
listener.listener ?? listener);
// Re-assign `events` because a newListener handler could have caused the
// this._events to be assigned to a new object
events = target._events;
}
existing = events[type];
}
if (existing === undefined) {
// Optimize the case of one listener. Don't need the extra array object.
events[type] = listener;
++target._eventsCount;
} else {
if (typeof existing === 'function') {
// Adding the second element, need to change to array.
existing = events[type] =
prepend ? [listener, existing] : [existing, listener];
// If we've already got an array, just append.
} else if (prepend) {
existing.unshift(listener);
} else {
existing.push(listener);
}
// Check for listener leak
m = _getMaxListeners(target);
if (m > 0 && existing.length > m && !existing.warned) {
existing.warned = true;
// No error code for this since it is a Warning
const w = genericNodeError(
`Possible EventEmitter memory leak detected. ${existing.length} ${String(type)} listeners ` +
`added to ${inspect(target, { depth: -1 })}. MaxListeners is ${m}. Use emitter.setMaxListeners() to increase limit`,
{ name: 'MaxListenersExceededWarning', emitter: target, type: type, count: existing.length });
process.emitWarning(w);
}
}
return target;
}
|
Synchronously calls each of the listeners registered
for the event.
@param {string | symbol} type
@param {...any} [args]
@returns {boolean}
|
javascript
|
lib/events.js
| 536
|
[
"target",
"type",
"listener",
"prepend"
] | false
| 14
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
maybe_mangle_lambdas
|
def maybe_mangle_lambdas(agg_spec: Any) -> Any:
"""
Make new lambdas with unique names.
Parameters
----------
agg_spec : Any
An argument to GroupBy.agg.
Non-dict-like `agg_spec` are pass through as is.
For dict-like `agg_spec` a new spec is returned
with name-mangled lambdas.
Returns
-------
mangled : Any
Same type as the input.
Examples
--------
>>> maybe_mangle_lambdas("sum")
'sum'
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
[<function __main__.<lambda_0>,
<function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
"""
is_dict = is_dict_like(agg_spec)
if not (is_dict or is_list_like(agg_spec)):
return agg_spec
mangled_aggspec = type(agg_spec)() # dict or OrderedDict
if is_dict:
for key, aggfuncs in agg_spec.items():
if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
mangled_aggfuncs = _managle_lambda_list(aggfuncs)
else:
mangled_aggfuncs = aggfuncs
mangled_aggspec[key] = mangled_aggfuncs
else:
mangled_aggspec = _managle_lambda_list(agg_spec)
return mangled_aggspec
|
Make new lambdas with unique names.
Parameters
----------
agg_spec : Any
An argument to GroupBy.agg.
Non-dict-like `agg_spec` are pass through as is.
For dict-like `agg_spec` a new spec is returned
with name-mangled lambdas.
Returns
-------
mangled : Any
Same type as the input.
Examples
--------
>>> maybe_mangle_lambdas("sum")
'sum'
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
[<function __main__.<lambda_0>,
<function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
|
python
|
pandas/core/apply.py
| 2,049
|
[
"agg_spec"
] |
Any
| true
| 9
| 8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
toString
|
@Override
public String toString() {
String text = String.valueOf(getElement());
int n = getCount();
return (n == 1) ? text : (text + " x " + n);
}
|
Returns a string representation of this multiset entry. The string representation consists of
the associated element if the associated count is one, and otherwise the associated element
followed by the characters " x " (space, x and space) followed by the count. Elements and
counts are converted to strings as by {@code String.valueOf}.
|
java
|
android/guava/src/com/google/common/collect/Multisets.java
| 857
|
[] |
String
| true
| 2
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
of
|
static SslBundle of(@Nullable SslStoreBundle stores, @Nullable SslBundleKey key, @Nullable SslOptions options) {
return of(stores, key, options, null);
}
|
Factory method to create a new {@link SslBundle} instance.
@param stores the stores or {@code null}
@param key the key or {@code null}
@param options the options or {@code null}
@return a new {@link SslBundle} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/SslBundle.java
| 119
|
[
"stores",
"key",
"options"
] |
SslBundle
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
get_fargate_profile_state
|
def get_fargate_profile_state(self, clusterName: str, fargateProfileName: str) -> FargateProfileStates:
"""
Return the current status of a given AWS Fargate profile.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.describe_fargate_profile`
:param clusterName: The name of the Amazon EKS Cluster associated with the Fargate profile.
:param fargateProfileName: The name of the Fargate profile to check.
:return: Returns the current status of a given AWS Fargate profile.
"""
eks_client = self.conn
try:
return FargateProfileStates(
eks_client.describe_fargate_profile(
clusterName=clusterName, fargateProfileName=fargateProfileName
)
.get("fargateProfile")
.get("status")
)
except ClientError as ex:
if ex.response.get("Error").get("Code") == "ResourceNotFoundException":
return FargateProfileStates.NONEXISTENT
raise
|
Return the current status of a given AWS Fargate profile.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.describe_fargate_profile`
:param clusterName: The name of the Amazon EKS Cluster associated with the Fargate profile.
:param fargateProfileName: The name of the Fargate profile to check.
:return: Returns the current status of a given AWS Fargate profile.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/eks.py
| 412
|
[
"self",
"clusterName",
"fargateProfileName"
] |
FargateProfileStates
| true
| 2
| 7.6
|
apache/airflow
| 43,597
|
sphinx
| false
|
equals
|
@Override
public boolean equals(final Object obj) {
return obj instanceof StrBuilder && equals((StrBuilder) obj);
}
|
Checks the contents of this builder against another to see if they
contain the same character content.
@param obj the object to check, null returns false
@return true if the builders contain the same characters in the same order
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,855
|
[
"obj"
] | true
| 2
| 8.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
index_labels_to_array
|
def index_labels_to_array(
labels: np.ndarray | Iterable, dtype: NpDtype | None = None
) -> np.ndarray:
"""
Transform label or iterable of labels to array, for use in Index.
Parameters
----------
dtype : dtype
If specified, use as dtype of the resulting array, otherwise infer.
Returns
-------
array
"""
if isinstance(labels, (str, tuple)):
labels = [labels]
if not isinstance(labels, (list, np.ndarray)):
try:
labels = list(labels)
except TypeError: # non-iterable
labels = [labels]
rlabels = asarray_tuplesafe(labels, dtype=dtype)
return rlabels
|
Transform label or iterable of labels to array, for use in Index.
Parameters
----------
dtype : dtype
If specified, use as dtype of the resulting array, otherwise infer.
Returns
-------
array
|
python
|
pandas/core/common.py
| 268
|
[
"labels",
"dtype"
] |
np.ndarray
| true
| 3
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
requestOffsetReset
|
public void requestOffsetReset(TopicPartition partition) {
requestOffsetReset(partition, defaultResetStrategy);
}
|
Unset the preferred read replica. This causes the fetcher to go back to the leader for fetches.
@param tp The topic partition
@return the removed preferred read replica if set, Empty otherwise.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 796
|
[
"partition"
] |
void
| true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
getPropertyNameKeyEnd
|
private int getPropertyNameKeyEnd(String propertyName, int startIndex) {
int unclosedPrefixes = 0;
int length = propertyName.length();
for (int i = startIndex; i < length; i++) {
switch (propertyName.charAt(i)) {
case PropertyAccessor.PROPERTY_KEY_PREFIX_CHAR -> {
// The property name contains opening prefix(es)...
unclosedPrefixes++;
}
case PropertyAccessor.PROPERTY_KEY_SUFFIX_CHAR -> {
if (unclosedPrefixes == 0) {
// No unclosed prefix(es) in the property name (left) ->
// this is the suffix we are looking for.
return i;
}
else {
// This suffix does not close the initial prefix but rather
// just one that occurred within the property name.
unclosedPrefixes--;
}
}
}
}
return -1;
}
|
Parse the given property name into the corresponding property name tokens.
@param propertyName the property name to parse
@return representation of the parsed property tokens
|
java
|
spring-beans/src/main/java/org/springframework/beans/AbstractNestablePropertyAccessor.java
| 978
|
[
"propertyName",
"startIndex"
] | true
| 3
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
expandProfiles
|
private List<String> expandProfiles(@Nullable List<String> profiles) {
Deque<String> stack = new ArrayDeque<>();
asReversedList(profiles).forEach(stack::push);
Set<String> expandedProfiles = new LinkedHashSet<>();
while (!stack.isEmpty()) {
String current = stack.pop();
if (expandedProfiles.add(current)) {
asReversedList(this.groups.get(current)).forEach(stack::push);
}
}
return asUniqueItemList(expandedProfiles);
}
|
Create a new {@link Profiles} instance based on the {@link Environment} and
{@link Binder}.
@param environment the source environment
@param binder the binder for profile properties
@param additionalProfiles any additional active profiles
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/Profiles.java
| 139
|
[
"profiles"
] | true
| 3
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
items
|
public ConditionMessage items(Object @Nullable ... items) {
return items(Style.NORMAL, items);
}
|
Indicate the items. For example
{@code didNotFind("bean", "beans").items("x", "y")} results in the message "did
not find beans x, y".
@param items the items (may be {@code null})
@return a built {@link ConditionMessage}
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionMessage.java
| 348
|
[] |
ConditionMessage
| true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
flatten_structured_array
|
def flatten_structured_array(a):
"""
Flatten a structured array.
The data type of the output is chosen such that it can represent all of the
(nested) fields.
Parameters
----------
a : structured array
Returns
-------
output : masked array or ndarray
A flattened masked array if the input is a masked array, otherwise a
standard ndarray.
Examples
--------
>>> import numpy as np
>>> ndtype = [('a', int), ('b', float)]
>>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
>>> np.ma.flatten_structured_array(a)
array([[1., 1.],
[2., 2.]])
"""
def flatten_sequence(iterable):
"""
Flattens a compound of nested iterables.
"""
for elm in iter(iterable):
if hasattr(elm, '__iter__'):
yield from flatten_sequence(elm)
else:
yield elm
a = np.asanyarray(a)
inishape = a.shape
a = a.ravel()
if isinstance(a, MaskedArray):
out = np.array([tuple(flatten_sequence(d.item())) for d in a._data])
out = out.view(MaskedArray)
out._mask = np.array([tuple(flatten_sequence(d.item()))
for d in getmaskarray(a)])
else:
out = np.array([tuple(flatten_sequence(d.item())) for d in a])
if len(inishape) > 1:
newshape = list(out.shape)
newshape[0] = inishape
out.shape = tuple(flatten_sequence(newshape))
return out
|
Flatten a structured array.
The data type of the output is chosen such that it can represent all of the
(nested) fields.
Parameters
----------
a : structured array
Returns
-------
output : masked array or ndarray
A flattened masked array if the input is a masked array, otherwise a
standard ndarray.
Examples
--------
>>> import numpy as np
>>> ndtype = [('a', int), ('b', float)]
>>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
>>> np.ma.flatten_structured_array(a)
array([[1., 1.],
[2., 2.]])
|
python
|
numpy/ma/core.py
| 2,548
|
[
"a"
] | false
| 7
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
wasLastAddedBucketPositive
|
boolean wasLastAddedBucketPositive() {
return positiveBuckets.numBuckets > 0;
}
|
@return true, if the last bucket added successfully via {@link #tryAddBucket(long, long, boolean)} was a positive one.
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/FixedCapacityExponentialHistogram.java
| 221
|
[] | true
| 1
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
assign
|
@Override
public void assign(Collection<TopicPartition> partitions) {
acquireAndEnsureOpen();
try {
if (partitions == null) {
throw new IllegalArgumentException("Topic partition collection to assign to cannot be null");
} else if (partitions.isEmpty()) {
this.unsubscribe();
} else {
for (TopicPartition tp : partitions) {
String topic = (tp != null) ? tp.topic() : null;
if (isBlank(topic))
throw new IllegalArgumentException("Topic partitions to assign to cannot have null or empty topic");
}
fetcher.clearBufferedDataForUnassignedPartitions(partitions);
// make sure the offsets of topic partitions the consumer is unsubscribing from
// are committed since there will be no following rebalance
if (coordinator != null)
this.coordinator.maybeAutoCommitOffsetsAsync(time.milliseconds());
log.info("Assigned to partition(s): {}", partitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", ")));
if (this.subscriptions.assignFromUser(new HashSet<>(partitions)))
metadata.requestUpdateForNewTopics();
}
} finally {
release();
}
}
|
Internal helper method for {@link #subscribe(Pattern)} and
{@link #subscribe(Pattern, ConsumerRebalanceListener)}
<p>
Subscribe to all topics matching specified pattern to get dynamically assigned partitions.
The pattern matching will be done periodically against all topics existing at the time of check.
This can be controlled through the {@code metadata.max.age.ms} configuration: by lowering
the max metadata age, the consumer will refresh metadata more often and check for matching topics.
<p>
See {@link #subscribe(Collection, ConsumerRebalanceListener)} for details on the
use of the {@link ConsumerRebalanceListener}. Generally rebalances are triggered when there
is a change to the topics matching the provided pattern and when consumer group membership changes.
Group rebalances only take place during an active call to {@link #poll(Duration)}.
@param pattern Pattern to subscribe to
@param listener {@link Optional} listener instance to get notifications on partition assignment/revocation
for the subscribed topics
@throws IllegalArgumentException If pattern or listener is null
@throws IllegalStateException If {@code subscribe()} is called previously with topics, or assign is called
previously (without a subsequent call to {@link #unsubscribe()}), or if not
configured at-least one partition assignment strategy
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java
| 592
|
[
"partitions"
] |
void
| true
| 7
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
collectPutRequests
|
private void collectPutRequests(Collection<CacheOperationContext> contexts,
@Nullable Object result, Collection<CachePutRequest> putRequests) {
for (CacheOperationContext context : contexts) {
if (isConditionPassing(context, result)) {
putRequests.add(new CachePutRequest(context));
}
}
}
|
Collect a {@link CachePutRequest} for every {@link CacheOperation}
using the specified result value.
@param contexts the contexts to handle
@param result the result value
@param putRequests the collection to update
|
java
|
spring-context/src/main/java/org/springframework/cache/interceptor/CacheAspectSupport.java
| 723
|
[
"contexts",
"result",
"putRequests"
] |
void
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
registerBean
|
public <T> void registerBean(@Nullable String beanName, Class<T> beanClass,
@Nullable Supplier<T> supplier, BeanDefinitionCustomizer... customizers) {
ClassDerivedBeanDefinition beanDefinition = new ClassDerivedBeanDefinition(beanClass);
if (supplier != null) {
beanDefinition.setInstanceSupplier(supplier);
}
for (BeanDefinitionCustomizer customizer : customizers) {
customizer.customize(beanDefinition);
}
String nameToUse = (beanName != null ? beanName : beanClass.getName());
registerBeanDefinition(nameToUse, beanDefinition);
}
|
Register a bean from the given bean class, using the given supplier for
obtaining a new instance (typically declared as a lambda expression or
method reference), optionally customizing its bean definition metadata
(again typically declared as a lambda expression).
<p>This method can be overridden to adapt the registration mechanism for
all {@code registerBean} methods (since they all delegate to this one).
@param beanName the name of the bean (may be {@code null})
@param beanClass the class of the bean
@param supplier a callback for creating an instance of the bean (in case
of {@code null}, resolving a public constructor to be autowired instead)
@param customizers one or more callbacks for customizing the factory's
{@link BeanDefinition}, for example, setting a lazy-init or primary flag
@since 5.0
|
java
|
spring-context/src/main/java/org/springframework/context/support/GenericApplicationContext.java
| 581
|
[
"beanName",
"beanClass",
"supplier"
] |
void
| true
| 3
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
addPropertyEditorRegistrar
|
@Override
public void addPropertyEditorRegistrar(PropertyEditorRegistrar registrar) {
Assert.notNull(registrar, "PropertyEditorRegistrar must not be null");
if (registrar.overridesDefaultEditors()) {
this.defaultEditorRegistrars.add(registrar);
}
else {
this.propertyEditorRegistrars.add(registrar);
}
}
|
Internal extended variant of {@link #isTypeMatch(String, ResolvableType)}
to check whether the bean with the given name matches the specified type. Allow
additional constraints to be applied to ensure that beans are not created early.
@param name the name of the bean to query
@param typeToMatch the type to match against (as a {@code ResolvableType})
@return {@code true} if the bean type matches, {@code false} if it
doesn't match or cannot be determined yet
@throws NoSuchBeanDefinitionException if there is no bean with the given name
@since 5.2
@see #getBean
@see #getType
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 876
|
[
"registrar"
] |
void
| true
| 2
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
append
|
public StrBuilder append(final StrBuilder str, final int startIndex, final int length) {
if (str == null) {
return appendNull();
}
if (startIndex < 0 || startIndex > str.length()) {
throw new StringIndexOutOfBoundsException("startIndex must be valid");
}
if (length < 0 || startIndex + length > str.length()) {
throw new StringIndexOutOfBoundsException("length must be valid");
}
if (length > 0) {
final int len = length();
ensureCapacity(len + length);
str.getChars(startIndex, startIndex + length, buffer, len);
size += length;
}
return this;
}
|
Appends part of a string builder to this string builder.
Appending null will call {@link #appendNull()}.
@param str the string to append
@param startIndex the start index, inclusive, must be valid
@param length the length to append, must be valid
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 598
|
[
"str",
"startIndex",
"length"
] |
StrBuilder
| true
| 7
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
get_file_metadata_async
|
async def get_file_metadata_async(
self, client: AioBaseClient, bucket_name: str, key: str | None = None
) -> AsyncIterator[Any]:
"""
Get a list of files that a key matching a wildcard expression exists in a bucket asynchronously.
:param client: aiobotocore client
:param bucket_name: the name of the bucket
:param key: the path to the key
"""
prefix = re.split(r"[\[\*\?]", key, 1)[0] if key else ""
delimiter = ""
paginator = client.get_paginator("list_objects_v2")
params = {
"Bucket": bucket_name,
"Prefix": prefix,
"Delimiter": delimiter,
}
if self._requester_pays:
params["RequestPayer"] = "requester"
response = paginator.paginate(**params)
async for page in response:
if "Contents" in page:
for row in page["Contents"]:
yield row
|
Get a list of files that a key matching a wildcard expression exists in a bucket asynchronously.
:param client: aiobotocore client
:param bucket_name: the name of the bucket
:param key: the path to the key
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 504
|
[
"self",
"client",
"bucket_name",
"key"
] |
AsyncIterator[Any]
| true
| 6
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
prettyPrintException
|
static String prettyPrintException(Throwable throwable) {
if (throwable == null)
return "Null exception.";
if (throwable.getMessage() != null) {
return throwable.getClass().getSimpleName() + ": " + throwable.getMessage();
}
return throwable.getClass().getSimpleName();
}
|
Pretty-print an exception.
@param throwable The exception.
@return A compact human-readable string.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 506
|
[
"throwable"
] |
String
| true
| 3
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
putIfAbsent
|
@SuppressWarnings("unchecked")
<RESPONSE> RESPONSE putIfAbsent(ProjectId projectId, String ip, String databasePath, Function<String, RESPONSE> retrieveFunction) {
// can't use cache.computeIfAbsent due to the elevated permissions for the jackson (run via the cache loader)
CacheKey cacheKey = new CacheKey(projectId, ip, databasePath);
long cacheStart = relativeNanoTimeProvider.getAsLong();
// intentionally non-locking for simplicity...it's OK if we re-put the same key/value in the cache during a race condition.
Object response = cache.get(cacheKey);
long cacheRequestTime = relativeNanoTimeProvider.getAsLong() - cacheStart;
// populate the cache for this key, if necessary
if (response == null) {
long retrieveStart = relativeNanoTimeProvider.getAsLong();
response = retrieveFunction.apply(ip);
// if the response from the database was null, then use the no-result sentinel value
if (response == null) {
response = NO_RESULT;
}
// store the result or no-result in the cache
cache.put(cacheKey, response);
long databaseRequestAndCachePutTime = relativeNanoTimeProvider.getAsLong() - retrieveStart;
missesTimeInNanos.add(cacheRequestTime + databaseRequestAndCachePutTime);
} else {
hitsTimeInNanos.add(cacheRequestTime);
}
if (response == NO_RESULT) {
return null; // the no-result sentinel is an internal detail, don't expose it
} else {
return (RESPONSE) response;
}
}
|
Internal-only sentinel object for recording that a result from the geoip database was null (i.e. there was no result). By caching
this no-result we can distinguish between something not being in the cache because we haven't searched for that data yet, versus
something not being in the cache because the data doesn't exist in the database.
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java
| 63
|
[
"projectId",
"ip",
"databasePath",
"retrieveFunction"
] |
RESPONSE
| true
| 4
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
exclusiveBetween
|
@SuppressWarnings("boxing")
public static void exclusiveBetween(final double start, final double end, final double value) {
// TODO when breaking BC, consider returning value
if (value <= start || value >= end) {
throw new IllegalArgumentException(String.format(DEFAULT_EXCLUSIVE_BETWEEN_EX_MESSAGE, value, start, end));
}
}
|
Validate that the specified primitive value falls between the two
exclusive values specified; otherwise, throws an exception.
<pre>Validate.exclusiveBetween(0.1, 2.1, 1.1);</pre>
@param start the exclusive start value.
@param end the exclusive end value.
@param value the value to validate.
@throws IllegalArgumentException if the value falls out of the boundaries.
@since 3.3
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 92
|
[
"start",
"end",
"value"
] |
void
| true
| 3
| 6.4
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
isStartOfExpression
|
function isStartOfExpression(): boolean {
if (isStartOfLeftHandSideExpression()) {
return true;
}
switch (token()) {
case SyntaxKind.PlusToken:
case SyntaxKind.MinusToken:
case SyntaxKind.TildeToken:
case SyntaxKind.ExclamationToken:
case SyntaxKind.DeleteKeyword:
case SyntaxKind.TypeOfKeyword:
case SyntaxKind.VoidKeyword:
case SyntaxKind.PlusPlusToken:
case SyntaxKind.MinusMinusToken:
case SyntaxKind.LessThanToken:
case SyntaxKind.AwaitKeyword:
case SyntaxKind.YieldKeyword:
case SyntaxKind.PrivateIdentifier:
case SyntaxKind.AtToken:
// Yield/await always starts an expression. Either it is an identifier (in which case
// it is definitely an expression). Or it's a keyword (either because we're in
// a generator or async function, or in strict mode (or both)) and it started a yield or await expression.
return true;
default:
// Error tolerance. If we see the start of some binary operator, we consider
// that the start of an expression. That way we'll parse out a missing identifier,
// give a good message about an identifier being missing, and then consume the
// rest of the binary expression.
if (isBinaryOperator()) {
return true;
}
return isIdentifier();
}
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 4,995
|
[] | true
| 3
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| true
|
|
read_view_information_from_args
|
def read_view_information_from_args(
mutable_arg_names: list[str],
mutable_arg_types: list[torch.Type],
kwargs: dict[str, Any],
all_bases: list[Tensor],
):
"""
This reads the view information added by `write_view_information_to_args` from kwargs, pop them,
and returns a dict arg_name -> ViewInfo | [ViewInfo](if the input is list). that maps each mutable arg
to its view information.
mutable_arg_names: mutable custom operator arg names.
mutable_arg_types: mutable custom operator arg types.
kwargs : args of auto_functionalize(custom_op, kwargs)
"""
def get_arg(name):
return kwargs.pop(name)
def read_single_view(prefix):
base_index = get_arg(f"{prefix}_base_index")
if base_index is None:
return None
elif f"{prefix}_alias" in kwargs:
get_arg(f"{prefix}_alias")
return AliasViewInfo(base_index)
elif f"{prefix}_storage_offset" in kwargs:
# The view is regenerated using as_strided.
size = get_arg(f"{prefix}_size")
stride = get_arg(f"{prefix}_stride")
storage_offset = get_arg(f"{prefix}_storage_offset")
return AsStridedViewInfo(base_index, size, stride, storage_offset)
elif f"{prefix}_slice_dim" in kwargs:
dim = get_arg(f"{prefix}_slice_dim")
start = get_arg(f"{prefix}_slice_start")
end = get_arg(f"{prefix}_slice_end")
return SliceViewInfo(base_index, dim, start, end)
else:
# This means that the argument is the base tensor
return NotView(base_index)
args_view_info: dict[str, Any] = {}
for arg_name, arg_type in zip(mutable_arg_names, mutable_arg_types):
if library_utils.is_tensorlist_like_type(arg_type):
length = get_arg(f"_{arg_name}_length")
if length is None:
# The whole list is None.
args_view_info[arg_name] = None
else:
args_view_info[arg_name] = [
read_single_view(f"_{arg_name}_{i}") for i in range(length)
]
elif library_utils.is_tensor_like_type(arg_type):
args_view_info[arg_name] = read_single_view(f"_{arg_name}")
else:
raise RuntimeError(f"Unsupported type {arg_type}")
return args_view_info
|
This reads the view information added by `write_view_information_to_args` from kwargs, pop them,
and returns a dict arg_name -> ViewInfo | [ViewInfo](if the input is list). that maps each mutable arg
to its view information.
mutable_arg_names: mutable custom operator arg names.
mutable_arg_types: mutable custom operator arg types.
kwargs : args of auto_functionalize(custom_op, kwargs)
|
python
|
torch/_higher_order_ops/auto_functionalize.py
| 249
|
[
"mutable_arg_names",
"mutable_arg_types",
"kwargs",
"all_bases"
] | true
| 12
| 6.32
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
readMetadata
|
ConfigurationMetadata readMetadata(TypeElement typeElement) {
return readMetadata(SOURCE_METADATA_PATH.apply(typeElement, this.typeUtils));
}
|
Read the existing {@link ConfigurationMetadata} for the specified type or
{@code null} if it is not available yet.
@param typeElement the type to read metadata for
@return the metadata for the given type or {@code null}
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataStore.java
| 84
|
[
"typeElement"
] |
ConfigurationMetadata
| true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
registerStateListener
|
public void registerStateListener(MemberStateListener listener) {
Objects.requireNonNull(listener, "State updates listener cannot be null");
for (MemberStateListener registeredListener : stateUpdatesListeners) {
if (registeredListener == listener) {
throw new IllegalArgumentException("Listener is already registered.");
}
}
stateUpdatesListeners.add(listener);
}
|
Register a new listener that will be invoked whenever the member state changes, or a new
member ID or epoch is received.
@param listener Listener to invoke.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
| 371
|
[
"listener"
] |
void
| true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
outsideOf
|
public static JavaUnicodeEscaper outsideOf(final int codePointLow, final int codePointHigh) {
return new JavaUnicodeEscaper(codePointLow, codePointHigh, false);
}
|
Constructs a {@link JavaUnicodeEscaper} outside of the specified values (exclusive).
@param codePointLow
below which to escape.
@param codePointHigh
above which to escape.
@return the newly created {@link UnicodeEscaper} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/translate/JavaUnicodeEscaper.java
| 74
|
[
"codePointLow",
"codePointHigh"
] |
JavaUnicodeEscaper
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toChar
|
public static char toChar(final Character ch) {
return Objects.requireNonNull(ch, "ch").charValue();
}
|
Converts the Character to a char throwing an exception for {@code null}.
<pre>
CharUtils.toChar(' ') = ' '
CharUtils.toChar('A') = 'A'
CharUtils.toChar(null) throws IllegalArgumentException
</pre>
@param ch the character to convert
@return the char value of the Character
@throws NullPointerException if the Character is null
|
java
|
src/main/java/org/apache/commons/lang3/CharUtils.java
| 279
|
[
"ch"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
fit
|
public long fit(final long element) {
return super.fit(element).longValue();
}
|
Fits the given value into this range by returning the given value or, if out of bounds, the range minimum if
below, or the range maximum if above.
<pre>{@code
LongRange range = LongRange.of(16, 64);
range.fit(-9) --> 16
range.fit(0) --> 16
range.fit(15) --> 16
range.fit(16) --> 16
range.fit(17) --> 17
...
range.fit(63) --> 63
range.fit(64) --> 64
range.fit(99) --> 64
}</pre>
@param element the element to test.
@return the minimum, the element, or the maximum depending on the element's location relative to the range.
@since 3.19.0
|
java
|
src/main/java/org/apache/commons/lang3/LongRange.java
| 107
|
[
"element"
] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
processBuilder
|
public ProcessBuilder processBuilder(String... arguments) {
ProcessBuilder processBuilder = new ProcessBuilder(toString());
processBuilder.command().addAll(Arrays.asList(arguments));
return processBuilder;
}
|
Create a new {@link ProcessBuilder} that will run with the Java executable.
@param arguments the command arguments
@return a {@link ProcessBuilder}
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/JavaExecutable.java
| 55
|
[] |
ProcessBuilder
| true
| 1
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getJaroWinklerDistance
|
@Deprecated
public static double getJaroWinklerDistance(final CharSequence first, final CharSequence second) {
final double DEFAULT_SCALING_FACTOR = 0.1;
if (first == null || second == null) {
throw new IllegalArgumentException("Strings must not be null");
}
final int[] mtp = matches(first, second);
final double m = mtp[0];
if (m == 0) {
return 0D;
}
final double j = (m / first.length() + m / second.length() + (m - mtp[1]) / m) / 3;
final double jw = j < 0.7D ? j : j + Math.min(DEFAULT_SCALING_FACTOR, 1D / mtp[3]) * mtp[2] * (1D - j);
return Math.round(jw * 100.0D) / 100.0D;
}
|
Gets the Jaro Winkler Distance which indicates the similarity score between two Strings.
<p>
The Jaro measure is the weighted sum of percentage of matched characters from each file and transposed characters. Winkler increased this measure for
matching initial characters.
</p>
<p>
This implementation is based on the Jaro Winkler similarity algorithm from
<a href="https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance">https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance</a>.
</p>
<pre>
StringUtils.getJaroWinklerDistance(null, null) = Throws {@link IllegalArgumentException}
StringUtils.getJaroWinklerDistance("", "") = 0.0
StringUtils.getJaroWinklerDistance("", "a") = 0.0
StringUtils.getJaroWinklerDistance("aaapppp", "") = 0.0
StringUtils.getJaroWinklerDistance("frog", "fog") = 0.93
StringUtils.getJaroWinklerDistance("fly", "ant") = 0.0
StringUtils.getJaroWinklerDistance("elephant", "hippo") = 0.44
StringUtils.getJaroWinklerDistance("hippo", "elephant") = 0.44
StringUtils.getJaroWinklerDistance("hippo", "zzzzzzzz") = 0.0
StringUtils.getJaroWinklerDistance("hello", "hallo") = 0.88
StringUtils.getJaroWinklerDistance("ABC Corporation", "ABC Corp") = 0.93
StringUtils.getJaroWinklerDistance("D N H Enterprises Inc", "D & H Enterprises, Inc.") = 0.95
StringUtils.getJaroWinklerDistance("My Gym Children's Fitness Center", "My Gym. Childrens Fitness") = 0.92
StringUtils.getJaroWinklerDistance("PENNSYLVANIA", "PENNCISYLVNIA") = 0.88
</pre>
@param first the first String, must not be null.
@param second the second String, must not be null.
@return result distance.
@throws IllegalArgumentException if either String input {@code null}.
@since 3.3
@deprecated As of 3.6, use Apache Commons Text
<a href="https://commons.apache.org/proper/commons-text/javadocs/api-release/org/apache/commons/text/similarity/JaroWinklerDistance.html">
JaroWinklerDistance</a> instead.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 2,235
|
[
"first",
"second"
] | true
| 5
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
wrapCollection
|
@Override
Collection<V> wrapCollection(@ParametricNullness K key, Collection<V> collection) {
if (collection instanceof NavigableSet) {
return new WrappedNavigableSet(key, (NavigableSet<V>) collection, null);
} else {
return new WrappedSortedSet(key, (SortedSet<V>) collection, null);
}
}
|
Creates a new multimap that uses the provided map.
@param map place to store the mapping from each key to its corresponding values
|
java
|
android/guava/src/com/google/common/collect/AbstractSortedSetMultimap.java
| 69
|
[
"key",
"collection"
] | true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
fromroots
|
def fromroots(cls, roots, domain=[], window=None, symbol='x'):
"""Return series instance that has the specified roots.
Returns a series representing the product
``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a
list of roots.
Parameters
----------
roots : array_like
List of roots.
domain : {[], None, array_like}, optional
Domain for the resulting series. If None the domain is the
interval from the smallest root to the largest. If [] the
domain is the class domain. The default is [].
window : {None, array_like}, optional
Window for the returned series. If None the class window is
used. The default is None.
symbol : str, optional
Symbol representing the independent variable. Default is 'x'.
Returns
-------
new_series : series
Series with the specified roots.
"""
[roots] = pu.as_series([roots], trim=False)
if domain is None:
domain = pu.getdomain(roots)
elif isinstance(domain, list) and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
deg = len(roots)
off, scl = pu.mapparms(domain, window)
rnew = off + scl * roots
coef = cls._fromroots(rnew) / scl**deg
return cls(coef, domain=domain, window=window, symbol=symbol)
|
Return series instance that has the specified roots.
Returns a series representing the product
``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a
list of roots.
Parameters
----------
roots : array_like
List of roots.
domain : {[], None, array_like}, optional
Domain for the resulting series. If None the domain is the
interval from the smallest root to the largest. If [] the
domain is the class domain. The default is [].
window : {None, array_like}, optional
Window for the returned series. If None the class window is
used. The default is None.
symbol : str, optional
Symbol representing the independent variable. Default is 'x'.
Returns
-------
new_series : series
Series with the specified roots.
|
python
|
numpy/polynomial/_polybase.py
| 1,037
|
[
"cls",
"roots",
"domain",
"window",
"symbol"
] | false
| 5
| 6.08
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
mean
|
def mean(self, *, skipna: bool = True, axis: int | None = 0):
"""
Return the mean value of the Array.
Parameters
----------
skipna : bool, default True
Whether to ignore any NaT elements.
axis : int, optional, default 0
Axis for the function to be applied on.
Returns
-------
scalar
Timestamp or Timedelta.
See Also
--------
numpy.ndarray.mean : Returns the average of array elements along a given axis.
Series.mean : Return the mean value in a Series.
Notes
-----
mean is only defined for Datetime and Timedelta dtypes, not for Period.
Examples
--------
For :class:`pandas.DatetimeIndex`:
>>> idx = pd.date_range("2001-01-01 00:00", periods=3)
>>> idx
DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
dtype='datetime64[us]', freq='D')
>>> idx.mean()
Timestamp('2001-01-02 00:00:00')
For :class:`pandas.TimedeltaIndex`:
>>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit="D")
>>> tdelta_idx
TimedeltaIndex(['1 days', '2 days', '3 days'],
dtype='timedelta64[ns]', freq=None)
>>> tdelta_idx.mean()
Timedelta('2 days 00:00:00')
"""
return self._data.mean(skipna=skipna, axis=axis)
|
Return the mean value of the Array.
Parameters
----------
skipna : bool, default True
Whether to ignore any NaT elements.
axis : int, optional, default 0
Axis for the function to be applied on.
Returns
-------
scalar
Timestamp or Timedelta.
See Also
--------
numpy.ndarray.mean : Returns the average of array elements along a given axis.
Series.mean : Return the mean value in a Series.
Notes
-----
mean is only defined for Datetime and Timedelta dtypes, not for Period.
Examples
--------
For :class:`pandas.DatetimeIndex`:
>>> idx = pd.date_range("2001-01-01 00:00", periods=3)
>>> idx
DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'],
dtype='datetime64[us]', freq='D')
>>> idx.mean()
Timestamp('2001-01-02 00:00:00')
For :class:`pandas.TimedeltaIndex`:
>>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit="D")
>>> tdelta_idx
TimedeltaIndex(['1 days', '2 days', '3 days'],
dtype='timedelta64[ns]', freq=None)
>>> tdelta_idx.mean()
Timedelta('2 days 00:00:00')
|
python
|
pandas/core/indexes/datetimelike.py
| 94
|
[
"self",
"skipna",
"axis"
] | true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
should_we_run_the_build
|
def should_we_run_the_build(build_ci_params: BuildCiParams) -> bool:
"""
Check if we should run the build based on what files have been modified since last build and answer from
the user.
* If build is needed, the user is asked for confirmation
* If the branch is not rebased it warns the user to rebase (to make sure latest remote cache is useful)
* Builds Image/Skips/Quits depending on the answer
:param build_ci_params: parameters for the build
"""
# We import those locally so that click autocomplete works
from inputimeout import TimeoutOccurred
if not md5sum_check_if_build_is_needed(
build_ci_params=build_ci_params,
md5sum_cache_dir=build_ci_params.md5sum_cache_dir,
skip_provider_dependencies_check=build_ci_params.skip_provider_dependencies_check,
):
return False
try:
answer = user_confirm(
message="Do you want to build the image (this works best when you have good connection and "
"can take usually from 20 seconds to few minutes depending how old your image is)?",
timeout=STANDARD_TIMEOUT,
default_answer=Answer.NO,
)
if answer == answer.YES:
if is_repo_rebased(build_ci_params.github_repository, build_ci_params.airflow_branch):
return True
get_console().print(
"\n[warning]This might take a lot of time (more than 10 minutes) even if you have "
"a good network connection. We think you should attempt to rebase first.[/]\n"
)
answer = user_confirm(
"But if you really, really want - you can attempt it. Are you really sure?",
timeout=STANDARD_TIMEOUT,
default_answer=Answer.NO,
)
if answer == Answer.YES:
return True
get_console().print(
f"[info]Please rebase your code to latest {build_ci_params.airflow_branch} "
"before continuing.[/]\nCheck this link to find out how "
"https://github.com/apache/airflow/blob/main/contributing-docs/10_working_with_git.rst\n"
)
get_console().print("[error]Exiting the process[/]\n")
sys.exit(1)
elif answer == Answer.NO:
instruct_build_image(build_ci_params.python)
return False
else: # users_status == Answer.QUIT:
get_console().print("\n[warning]Quitting the process[/]\n")
sys.exit()
except TimeoutOccurred:
get_console().print("\nTimeout. Considering your response as No\n")
instruct_build_image(build_ci_params.python)
return False
except Exception as e:
get_console().print(f"\nTerminating the process on {e}")
sys.exit(1)
|
Check if we should run the build based on what files have been modified since last build and answer from
the user.
* If build is needed, the user is asked for confirmation
* If the branch is not rebased it warns the user to rebase (to make sure latest remote cache is useful)
* Builds Image/Skips/Quits depending on the answer
:param build_ci_params: parameters for the build
|
python
|
dev/breeze/src/airflow_breeze/commands/ci_image_commands.py
| 719
|
[
"build_ci_params"
] |
bool
| true
| 7
| 6.96
|
apache/airflow
| 43,597
|
sphinx
| false
|
toString
|
@Override
public String toString() {
if (toString == null) {
toString = "[" + minimum + ".." + maximum + "]";
}
return toString;
}
|
Gets the range as a {@link String}.
<p>The format of the String is '[<em>min</em>..<em>max</em>]'.</p>
@return the {@link String} representation of this range.
|
java
|
src/main/java/org/apache/commons/lang3/Range.java
| 537
|
[] |
String
| true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
get_indexer_for
|
def get_indexer_for(self, target) -> npt.NDArray[np.intp]:
"""
Guaranteed return of an indexer even when non-unique.
This dispatches to get_indexer or get_indexer_non_unique
as appropriate.
Parameters
----------
target : Index
An iterable containing the values to be used for computing indexer.
Returns
-------
np.ndarray[np.intp]
List of indices.
See Also
--------
Index.get_indexer : Computes indexer and mask for new index given
the current index.
Index.get_non_unique : Returns indexer and masks for new index given
the current index.
Examples
--------
>>> idx = pd.Index([np.nan, "var1", np.nan])
>>> idx.get_indexer_for([np.nan])
array([0, 2])
"""
if self._index_as_unique:
return self.get_indexer(target)
indexer, _ = self.get_indexer_non_unique(target)
return indexer
|
Guaranteed return of an indexer even when non-unique.
This dispatches to get_indexer or get_indexer_non_unique
as appropriate.
Parameters
----------
target : Index
An iterable containing the values to be used for computing indexer.
Returns
-------
np.ndarray[np.intp]
List of indices.
See Also
--------
Index.get_indexer : Computes indexer and mask for new index given
the current index.
Index.get_non_unique : Returns indexer and masks for new index given
the current index.
Examples
--------
>>> idx = pd.Index([np.nan, "var1", np.nan])
>>> idx.get_indexer_for([np.nan])
array([0, 2])
|
python
|
pandas/core/indexes/base.py
| 6,162
|
[
"self",
"target"
] |
npt.NDArray[np.intp]
| true
| 2
| 7.84
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.