function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
paddedValue
|
private static String paddedValue(final long value, final boolean padWithZeros, final int count) {
final String longString = Long.toString(value);
return padWithZeros ? StringUtils.leftPad(longString, count, '0') : longString;
}
|
Converts a {@code long} to a {@link String} with optional
zero padding.
@param value the value to convert
@param padWithZeros whether to pad with zeroes
@param count the size to pad to (ignored if {@code padWithZeros} is false)
@return the string result
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationFormatUtils.java
| 780
|
[
"value",
"padWithZeros",
"count"
] |
String
| true
| 2
| 7.84
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
parseBoolean
|
public static boolean parseBoolean(String value) {
if (isFalse(value)) {
return false;
}
if (isTrue(value)) {
return true;
}
throw new IllegalArgumentException("Failed to parse value [" + value + "] as only [true] or [false] are allowed.");
}
|
Parses a string representation of a boolean value to <code>boolean</code>.
@return <code>true</code> iff the provided value is "true". <code>false</code> iff the provided value is "false".
@throws IllegalArgumentException if the string cannot be parsed to boolean.
|
java
|
libs/core/src/main/java/org/elasticsearch/core/Booleans.java
| 56
|
[
"value"
] | true
| 3
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
restore_from_cluster_snapshot
|
def restore_from_cluster_snapshot(self, cluster_identifier: str, snapshot_identifier: str) -> dict | None:
"""
Restore a cluster from its snapshot.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.restore_from_cluster_snapshot`
:param cluster_identifier: unique identifier of a cluster
:param snapshot_identifier: unique identifier for a snapshot of a cluster
"""
response = self.conn.restore_from_cluster_snapshot(
ClusterIdentifier=cluster_identifier, SnapshotIdentifier=snapshot_identifier
)
return response["Cluster"] if response["Cluster"] else None
|
Restore a cluster from its snapshot.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.restore_from_cluster_snapshot`
:param cluster_identifier: unique identifier of a cluster
:param snapshot_identifier: unique identifier for a snapshot of a cluster
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/redshift_cluster.py
| 141
|
[
"self",
"cluster_identifier",
"snapshot_identifier"
] |
dict | None
| true
| 2
| 6.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
getFormatter
|
public static DateTimeFormatter getFormatter(DateTimeFormatter formatter, @Nullable Locale locale) {
DateTimeFormatter formatterToUse = (locale != null ? formatter.withLocale(locale) : formatter);
DateTimeContext context = getDateTimeContext();
return (context != null ? context.getFormatter(formatterToUse) : formatterToUse);
}
|
Obtain a DateTimeFormatter with user-specific settings applied to the given base formatter.
@param formatter the base formatter that establishes default formatting rules
(generally user independent)
@param locale the current user locale (may be {@code null} if not known)
@return the user-specific DateTimeFormatter
|
java
|
spring-context/src/main/java/org/springframework/format/datetime/standard/DateTimeContextHolder.java
| 79
|
[
"formatter",
"locale"
] |
DateTimeFormatter
| true
| 3
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
union
|
public static ClassFilter union(ClassFilter cf1, ClassFilter cf2) {
Assert.notNull(cf1, "First ClassFilter must not be null");
Assert.notNull(cf2, "Second ClassFilter must not be null");
return new UnionClassFilter(new ClassFilter[] {cf1, cf2});
}
|
Match all classes that <i>either</i> (or both) of the given ClassFilters matches.
@param cf1 the first ClassFilter
@param cf2 the second ClassFilter
@return a distinct ClassFilter that matches all classes that either
of the given ClassFilter matches
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/ClassFilters.java
| 49
|
[
"cf1",
"cf2"
] |
ClassFilter
| true
| 1
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
cloneDeepWith
|
function cloneDeepWith(value, customizer) {
customizer = typeof customizer == 'function' ? customizer : undefined;
return baseClone(value, CLONE_DEEP_FLAG | CLONE_SYMBOLS_FLAG, customizer);
}
|
This method is like `_.cloneWith` except that it recursively clones `value`.
@static
@memberOf _
@since 4.0.0
@category Lang
@param {*} value The value to recursively clone.
@param {Function} [customizer] The function to customize cloning.
@returns {*} Returns the deep cloned value.
@see _.cloneWith
@example
function customizer(value) {
if (_.isElement(value)) {
return value.cloneNode(true);
}
}
var el = _.cloneDeepWith(document.body, customizer);
console.log(el === document.body);
// => false
console.log(el.nodeName);
// => 'BODY'
console.log(el.childNodes.length);
// => 20
|
javascript
|
lodash.js
| 11,226
|
[
"value",
"customizer"
] | false
| 2
| 6.96
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
lastConnectAttemptMs
|
public long lastConnectAttemptMs(String id) {
NodeConnectionState nodeState = this.nodeState.get(id);
return nodeState == null ? 0 : nodeState.lastConnectAttemptMs;
}
|
Get the timestamp of the latest connection attempt of a given node
@param id the connection to fetch the state for
|
java
|
clients/src/main/java/org/apache/kafka/clients/ClusterConnectionStates.java
| 426
|
[
"id"
] | true
| 2
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
define
|
public ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation, String alternativeString) {
return define(name, type, defaultValue, null, importance, documentation, null, -1, Width.NONE,
name, Collections.emptyList(), null, alternativeString);
}
|
Define a new configuration with no special validation logic
@param name The name of the config parameter
@param type The type of the config
@param defaultValue The default value to use if this config isn't present
@param importance The importance of this config: is this something you will likely need to change.
@param documentation The documentation string for the config
@param alternativeString The string which will be used to override the string of defaultValue
@return This ConfigDef so you can chain calls
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 428
|
[
"name",
"type",
"defaultValue",
"importance",
"documentation",
"alternativeString"
] |
ConfigDef
| true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
lower_to_while_loop
|
def lower_to_while_loop(*args, **kwargs):
"""
The traced graph of this function will be used to replace the original scan fx_node.
"""
assert len(kwargs) == 0
# Step 1: construct necessary inputs to while_loop based on scan's input.
(
init,
xs,
additional_inputs,
) = pytree.tree_unflatten(args, tree_spec)
scan_length = xs[0].size(0)
loop_idx = torch.zeros([], dtype=torch.int64, device=torch.device("cpu"))
# NOTE [Pre-allocate scan's output buffer]
# In order to pre-allocate the output buffer for ys, we rely on the meta of scan's fx_node.
# However, the meta consists of concrete symints, we need to bind those symints with
# proxies in order to trace the torch.empty_strided call correctly.
#
# Also note that basic free symbols of tensor's shapes are guaranteed to be lifted as subgraph inputs
# in dynamo so we can always re-construct the sym expression from placeholders.
# See Note [Auto lift basic free symbols when create_graph_input] for how this is done.
bound_symbols = {
arg.node.expr: arg
for arg in pytree.tree_leaves((args, scan_length))
if isinstance(arg, torch.SymInt)
}
ys_outs = [
torch.empty_strided(
resolve_shape_to_proxy(ys_out.size(), bound_symbols),
resolve_shape_to_proxy(ys_out.stride(), bound_symbols),
device=ys_out.device,
dtype=ys_out.dtype,
layout=ys_out.layout,
requires_grad=ys_out.requires_grad,
)
for ys_out in ys_outputs
]
while_loop_operands = (loop_idx, ys_outs, init, xs)
flat_operands, operands_spec = pytree.tree_flatten(while_loop_operands)
_, operands_and_additional_inputs_spec = pytree.tree_flatten(
(*while_loop_operands, additional_inputs)
)
# Step 2: create the cond_fn and body_fn for while_loop
def cond_fn(*flat_args):
loop_idx, _, _, _, _ = pytree.tree_unflatten(
flat_args, operands_and_additional_inputs_spec
) # type: ignore[has-type]
return loop_idx < scan_length # type: ignore[has-type]
def body_fn(*flat_args):
loop_idx, ys_outs, carry, xs, additional_inputs = pytree.tree_unflatten(
flat_args,
operands_and_additional_inputs_spec, # type: ignore[has-type]
)
idx_int = loop_idx.item()
torch.ops.aten._assert_scalar.default(idx_int >= 0, "")
torch.ops.aten._assert_scalar.default(idx_int < scan_length, "")
sub_xs = [torch.ops.aten.select.int(x, 0, idx_int) for x in xs]
next_carry, ys = _extract_carry_and_out(
sub_gm(*(list(carry) + sub_xs + list(additional_inputs))),
num_init_leaves,
)
for y, y_out in zip(ys, ys_outs):
y_out_slice = torch.ops.aten.select.int(y_out, 0, idx_int)
y_out_slice.copy_(y)
return loop_idx + 1, *ys_outs, *next_carry, *xs
# Step 3: call the while_loop operator
_, ys_outs, last_carry, _ = pytree.tree_unflatten(
torch.ops.higher_order.while_loop(
cond_fn,
body_fn,
tuple(flat_operands),
tuple(additional_inputs),
),
operands_spec,
)
return list(last_carry) + list(ys_outs)
|
The traced graph of this function will be used to replace the original scan fx_node.
|
python
|
torch/_inductor/fx_passes/post_grad.py
| 615
|
[] | false
| 2
| 6.32
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
extract
|
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> import numpy as np
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]])
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
|
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> import numpy as np
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]])
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
|
python
|
numpy/lib/_function_base_impl.py
| 2,044
|
[
"condition",
"arr"
] | false
| 1
| 6.32
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
transposeHalfByteImpl
|
public static void transposeHalfByteImpl(int[] q, byte[] quantQueryByte) {
int limit = q.length - 7;
int i = 0;
int index = 0;
for (; i < limit; i += 8, index++) {
assert q[i] >= 0 && q[i] <= 15;
assert q[i + 1] >= 0 && q[i + 1] <= 15;
assert q[i + 2] >= 0 && q[i + 2] <= 15;
assert q[i + 3] >= 0 && q[i + 3] <= 15;
assert q[i + 4] >= 0 && q[i + 4] <= 15;
assert q[i + 5] >= 0 && q[i + 5] <= 15;
assert q[i + 6] >= 0 && q[i + 6] <= 15;
assert q[i + 7] >= 0 && q[i + 7] <= 15;
int lowerByte = (q[i] & 1) << 7 | (q[i + 1] & 1) << 6 | (q[i + 2] & 1) << 5 | (q[i + 3] & 1) << 4 | (q[i + 4] & 1) << 3 | (q[i
+ 5] & 1) << 2 | (q[i + 6] & 1) << 1 | (q[i + 7] & 1);
int lowerMiddleByte = ((q[i] >> 1) & 1) << 7 | ((q[i + 1] >> 1) & 1) << 6 | ((q[i + 2] >> 1) & 1) << 5 | ((q[i + 3] >> 1) & 1)
<< 4 | ((q[i + 4] >> 1) & 1) << 3 | ((q[i + 5] >> 1) & 1) << 2 | ((q[i + 6] >> 1) & 1) << 1 | ((q[i + 7] >> 1) & 1);
int upperMiddleByte = ((q[i] >> 2) & 1) << 7 | ((q[i + 1] >> 2) & 1) << 6 | ((q[i + 2] >> 2) & 1) << 5 | ((q[i + 3] >> 2) & 1)
<< 4 | ((q[i + 4] >> 2) & 1) << 3 | ((q[i + 5] >> 2) & 1) << 2 | ((q[i + 6] >> 2) & 1) << 1 | ((q[i + 7] >> 2) & 1);
int upperByte = ((q[i] >> 3) & 1) << 7 | ((q[i + 1] >> 3) & 1) << 6 | ((q[i + 2] >> 3) & 1) << 5 | ((q[i + 3] >> 3) & 1) << 4
| ((q[i + 4] >> 3) & 1) << 3 | ((q[i + 5] >> 3) & 1) << 2 | ((q[i + 6] >> 3) & 1) << 1 | ((q[i + 7] >> 3) & 1);
quantQueryByte[index] = (byte) lowerByte;
quantQueryByte[index + quantQueryByte.length / 4] = (byte) lowerMiddleByte;
quantQueryByte[index + quantQueryByte.length / 2] = (byte) upperMiddleByte;
quantQueryByte[index + 3 * quantQueryByte.length / 4] = (byte) upperByte;
}
if (i == q.length) {
return; // all done
}
int lowerByte = 0;
int lowerMiddleByte = 0;
int upperMiddleByte = 0;
int upperByte = 0;
for (int j = 7; i < q.length; j--, i++) {
lowerByte |= (q[i] & 1) << j;
lowerMiddleByte |= ((q[i] >> 1) & 1) << j;
upperMiddleByte |= ((q[i] >> 2) & 1) << j;
upperByte |= ((q[i] >> 3) & 1) << j;
}
quantQueryByte[index] = (byte) lowerByte;
quantQueryByte[index + quantQueryByte.length / 4] = (byte) lowerMiddleByte;
quantQueryByte[index + quantQueryByte.length / 2] = (byte) upperMiddleByte;
quantQueryByte[index + 3 * quantQueryByte.length / 4] = (byte) upperByte;
}
|
Packs two bit vector (values 0-3) into a byte array with lower bits first.
The striding is similar to transposeHalfByte
@param vector the input vector with values 0-3
@param packed the output packed byte array
|
java
|
libs/simdvec/src/main/java/org/elasticsearch/simdvec/internal/vectorization/DefaultESVectorUtilSupport.java
| 409
|
[
"q",
"quantQueryByte"
] |
void
| true
| 12
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
start_task_execution
|
def start_task_execution(self, task_arn: str, **kwargs) -> str:
"""
Start a TaskExecution for the specified task_arn.
Each task can have at most one TaskExecution.
Additional keyword arguments send to ``start_task_execution`` boto3 method.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.start_task_execution`
:param task_arn: TaskArn
:return: TaskExecutionArn
:raises ClientError: If a TaskExecution is already busy running for this ``task_arn``.
:raises AirflowBadRequest: If ``task_arn`` is empty.
"""
if not task_arn:
raise AirflowBadRequest("task_arn not specified")
task_execution = self.get_conn().start_task_execution(TaskArn=task_arn, **kwargs)
return task_execution["TaskExecutionArn"]
|
Start a TaskExecution for the specified task_arn.
Each task can have at most one TaskExecution.
Additional keyword arguments send to ``start_task_execution`` boto3 method.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.start_task_execution`
:param task_arn: TaskArn
:return: TaskExecutionArn
:raises ClientError: If a TaskExecution is already busy running for this ``task_arn``.
:raises AirflowBadRequest: If ``task_arn`` is empty.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/datasync.py
| 218
|
[
"self",
"task_arn"
] |
str
| true
| 2
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
get_commented_out_prs_from_provider_changelogs
|
def get_commented_out_prs_from_provider_changelogs() -> list[int]:
"""
Returns list of PRs that are commented out in the changelog.
:return: list of PR numbers that appear only in comments in changelog.rst files in "providers" dir
"""
pr_matcher = re.compile(r".*\(#([0-9]+)\).*")
commented_prs = set()
# Get all provider distributions
provider_distributions_metadata = get_provider_distributions_metadata()
for provider_id in provider_distributions_metadata.keys():
provider_details = get_provider_details(provider_id)
changelog_path = provider_details.changelog_path
print(f"[info]Checking changelog {changelog_path} for PRs to be excluded automatically.")
if not changelog_path.exists():
continue
changelog_lines = changelog_path.read_text().splitlines()
in_excluded_section = False
for line in changelog_lines:
# Check if we're entering an excluded/commented section
if line.strip().startswith(
".. Below changes are excluded from the changelog"
) or line.strip().startswith(".. Review and move the new changes"):
in_excluded_section = True
continue
# Check if we're exiting the excluded section (new version header or regular content)
# Version headers are lines that contain only dots (like "4.10.1" followed by "......")
# Or lines that start with actual content sections like "Misc", "Features", etc.
if (
in_excluded_section
and line
and not line.strip().startswith("..")
and not line.strip().startswith("*")
):
# end excluded section with empty line
if line.strip() == "":
in_excluded_section = False
# Extract PRs from excluded sections
if in_excluded_section and line.strip().startswith("*"):
match_result = pr_matcher.search(line)
if match_result:
commented_prs.add(int(match_result.group(1)))
return sorted(commented_prs)
|
Returns list of PRs that are commented out in the changelog.
:return: list of PR numbers that appear only in comments in changelog.rst files in "providers" dir
|
python
|
dev/breeze/src/airflow_breeze/commands/release_management_commands.py
| 2,444
|
[] |
list[int]
| true
| 14
| 8.4
|
apache/airflow
| 43,597
|
unknown
| false
|
_maybe_align_series_as_frame
|
def _maybe_align_series_as_frame(self, series: Series, axis: AxisInt):
"""
If the Series operand is not EA-dtype, we can broadcast to 2D and operate
blockwise.
"""
rvalues = series._values
if not isinstance(rvalues, np.ndarray):
# TODO(EA2D): no need to special-case with 2D EAs
if lib.is_np_dtype(rvalues.dtype, "mM"):
# i.e. DatetimeArray[tznaive] or TimedeltaArray
# We can losslessly+cheaply cast to ndarray
rvalues = np.asarray(rvalues)
else:
return series
if axis == 0:
rvalues = rvalues.reshape(-1, 1)
else:
rvalues = rvalues.reshape(1, -1)
rvalues = np.broadcast_to(rvalues, self.shape)
# pass dtype to avoid doing inference
return self._constructor(
rvalues,
index=self.index,
columns=self.columns,
dtype=rvalues.dtype,
).__finalize__(series)
|
If the Series operand is not EA-dtype, we can broadcast to 2D and operate
blockwise.
|
python
|
pandas/core/frame.py
| 9,000
|
[
"self",
"series",
"axis"
] | true
| 6
| 6
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
|
dateTimePattern
|
static Coercer dateTimePattern(String pattern) {
return new Coercer(
(value) -> DateTimeFormatter.ofPattern(pattern).parse(value, Instant::from).toEpochMilli(),
DateTimeParseException.class::isInstance);
}
|
Attempt to convert the specified value to epoch time.
@param value the value to coerce to
@return the epoch time in milliseconds or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/info/GitProperties.java
| 157
|
[
"pattern"
] |
Coercer
| true
| 1
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
temp_environ
|
def temp_environ(updates: dict[str, str]):
"""
Temporarily set environment variables and restore them after the block.
Args:
updates: Dict of environment variables to set.
"""
missing = object()
old: dict[str, str | object] = {k: os.environ.get(k, missing) for k in updates}
try:
os.environ.update(updates)
yield
finally:
for k, v in old.items():
if v is missing:
os.environ.pop(k, None)
else:
os.environ[k] = v # type: ignore[arg-type]
|
Temporarily set environment variables and restore them after the block.
Args:
updates: Dict of environment variables to set.
|
python
|
.ci/lumen_cli/cli/lib/common/utils.py
| 85
|
[
"updates"
] | true
| 4
| 6.4
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
applyAsBoolean
|
boolean applyAsBoolean(T t, U u);
|
Applies this function to the given arguments.
@param t the first function argument.
@param u the second function argument.
@return the function result.
|
java
|
src/main/java/org/apache/commons/lang3/function/ToBooleanBiFunction.java
| 41
|
[
"t",
"u"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
initialize
|
@Override
public void initialize(ConfigurableApplicationContext applicationContext) {
applicationContext.addApplicationListener(new ConditionEvaluationReportListener(applicationContext));
}
|
Static factory method that creates a
{@link ConditionEvaluationReportLoggingListener} which logs the report at the
specified log level.
@param logLevelForReport the log level to log the report at
@return a {@link ConditionEvaluationReportLoggingListener} instance.
@since 3.0.0
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/logging/ConditionEvaluationReportLoggingListener.java
| 82
|
[
"applicationContext"
] |
void
| true
| 1
| 6.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getObject
|
@Override
public MessageInterpolator getObject() throws BeansException {
MessageInterpolator messageInterpolator = getMessageInterpolator();
if (this.messageSource != null) {
return new MessageSourceMessageInterpolator(this.messageSource, messageInterpolator);
}
return messageInterpolator;
}
|
Creates a new {@link MessageInterpolatorFactory} that will produce a
{@link MessageInterpolator} that uses the given {@code messageSource} to resolve
any message parameters before final interpolation.
@param messageSource message source to be used by the interpolator
@since 2.6.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/validation/MessageInterpolatorFactory.java
| 69
|
[] |
MessageInterpolator
| true
| 2
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_compute_missing_values_in_feature_mask
|
def _compute_missing_values_in_feature_mask(self, X, estimator_name=None):
"""Return boolean mask denoting if there are missing values for each feature.
This method also ensures that X is finite.
Parameter
---------
X : array-like of shape (n_samples, n_features), dtype=DOUBLE
Input data.
estimator_name : str or None, default=None
Name to use when raising an error. Defaults to the class name.
Returns
-------
missing_values_in_feature_mask : ndarray of shape (n_features,), or None
Missing value mask. If missing values are not supported or there
are no missing values, return None.
"""
estimator_name = estimator_name or self.__class__.__name__
common_kwargs = dict(estimator_name=estimator_name, input_name="X")
if not self._support_missing_values(X):
assert_all_finite(X, **common_kwargs)
return None
with np.errstate(over="ignore"):
overall_sum = np.sum(X)
if not np.isfinite(overall_sum):
# Raise a ValueError in case of the presence of an infinite element.
_assert_all_finite_element_wise(X, xp=np, allow_nan=True, **common_kwargs)
# If the sum is not nan, then there are no missing values
if not np.isnan(overall_sum):
return None
missing_values_in_feature_mask = _any_isnan_axis0(X)
return missing_values_in_feature_mask
|
Return boolean mask denoting if there are missing values for each feature.
This method also ensures that X is finite.
Parameter
---------
X : array-like of shape (n_samples, n_features), dtype=DOUBLE
Input data.
estimator_name : str or None, default=None
Name to use when raising an error. Defaults to the class name.
Returns
-------
missing_values_in_feature_mask : ndarray of shape (n_features,), or None
Missing value mask. If missing values are not supported or there
are no missing values, return None.
|
python
|
sklearn/tree/_classes.py
| 194
|
[
"self",
"X",
"estimator_name"
] | false
| 5
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
unknown
| false
|
|
addCopies
|
@CanIgnoreReturnValue
@Override
public Builder<E> addCopies(E element, int occurrences) {
checkNotNull(element);
CollectPreconditions.checkNonnegative(occurrences, "occurrences");
if (occurrences == 0) {
return this;
}
maintenance();
elements[length] = element;
counts[length] = occurrences;
length++;
return this;
}
|
Adds a number of occurrences of an element to this {@code ImmutableSortedMultiset}.
@param element the element to add
@param occurrences the number of occurrences of the element to add. May be zero, in which
case no change will be made.
@return this {@code Builder} object
@throws NullPointerException if {@code element} is null
@throws IllegalArgumentException if {@code occurrences} is negative, or if this operation
would result in more than {@link Integer#MAX_VALUE} occurrences of the element
|
java
|
android/guava/src/com/google/common/collect/ImmutableSortedMultiset.java
| 593
|
[
"element",
"occurrences"
] | true
| 2
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
|
rec_array_to_mgr
|
def rec_array_to_mgr(
data: np.rec.recarray | np.ndarray,
index,
columns,
dtype: DtypeObj | None,
copy: bool,
) -> Manager:
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fdata = ma.getdata(data)
if index is None:
index = default_index(len(fdata))
else:
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# create the manager
arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, len(index))
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype)
if copy:
mgr = mgr.copy(deep=True)
return mgr
|
Extract from a masked rec array and create the manager.
|
python
|
pandas/core/internals/construction.py
| 154
|
[
"data",
"index",
"columns",
"dtype",
"copy"
] |
Manager
| true
| 6
| 6
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
resolve
|
@SuppressWarnings("unchecked")
public <T> @Nullable T resolve(RegisteredBean registeredBean, Class<T> requiredType) {
Object value = resolveObject(registeredBean);
Assert.isInstanceOf(requiredType, value);
return (T) value;
}
|
Resolve the field value for the specified registered bean.
@param registeredBean the registered bean
@param requiredType the required type
@return the resolved field value
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/AutowiredFieldValueResolver.java
| 125
|
[
"registeredBean",
"requiredType"
] |
T
| true
| 1
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
next
|
@CanIgnoreReturnValue
@Override
@ParametricNullness
public V next() {
if (next == null) {
throw new NoSuchElementException();
}
previous = current = next;
next = next.nextSibling;
nextIndex++;
return current.getValue();
}
|
Constructs a new iterator over all values for the specified key starting at the specified
index. This constructor is optimized so that it starts at either the head or the tail,
depending on which is closer to the specified index. This allows adds to the tail to be done
in constant time.
@throws IndexOutOfBoundsException if index is invalid
|
java
|
android/guava/src/com/google/common/collect/LinkedListMultimap.java
| 502
|
[] |
V
| true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
getLoggingSystem
|
@Override
public @Nullable LoggingSystem getLoggingSystem(ClassLoader classLoader) {
List<LoggingSystemFactory> delegates = (this.delegates != null) ? this.delegates.apply(classLoader) : null;
if (delegates != null) {
for (LoggingSystemFactory delegate : delegates) {
LoggingSystem loggingSystem = delegate.getLoggingSystem(classLoader);
if (loggingSystem != null) {
return loggingSystem;
}
}
}
return null;
}
|
Create a new {@link DelegatingLoggingSystemFactory} instance.
@param delegates a function that provides the delegates
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/DelegatingLoggingSystemFactory.java
| 41
|
[
"classLoader"
] |
LoggingSystem
| true
| 4
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
mapKeysToLayers
|
function mapKeysToLayers(layers: CompositeProxyLayer[]) {
const keysToLayerMap = new Map<string | symbol, CompositeProxyLayer>()
for (const layer of layers) {
const keys = layer.getKeys()
for (const key of keys) {
keysToLayerMap.set(key, layer)
}
}
return keysToLayerMap
}
|
Creates a proxy from a set of layers.
Each layer is a building for a proxy (potentially, reusable) that
can add or override property on top of the target.
When multiple layers define the same property, last one wins
@param target
@param layers
@returns
|
typescript
|
packages/client/src/runtime/core/compositeProxy/createCompositeProxy.ts
| 130
|
[
"layers"
] | false
| 1
| 6.08
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
isDeprecated
|
boolean isDeprecated(Element element) {
if (element == null) {
return false;
}
String elementName = element.getEnclosingElement() + "#" + element.getSimpleName();
if (DEPRECATION_EXCLUDES.contains(elementName)) {
return false;
}
if (isElementDeprecated(element)) {
return true;
}
if (element instanceof VariableElement || element instanceof ExecutableElement) {
return isElementDeprecated(element.getEnclosingElement());
}
return false;
}
|
Resolve the {@link SourceMetadata} for the specified property.
@param field the field of the property (can be {@code null})
@param getter the getter of the property (can be {@code null})
@return the {@link SourceMetadata} for the specified property
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataGenerationEnvironment.java
| 183
|
[
"element"
] | true
| 6
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
remaining_estimate
|
def remaining_estimate(self, last_run_at: datetime) -> timedelta:
"""Return estimate of next time to run.
Returns:
~datetime.timedelta: when the periodic task should
run next, or if it shouldn't run today (e.g., the sun does
not rise today), returns the time when the next check
should take place.
"""
last_run_at = self.maybe_make_aware(last_run_at)
last_run_at_utc = localize(last_run_at, timezone.utc)
self.cal.date = last_run_at_utc
try:
if self.use_center:
next_utc = getattr(self.cal, self.method)(
self.ephem.Sun(),
start=last_run_at_utc, use_center=self.use_center
)
else:
next_utc = getattr(self.cal, self.method)(
self.ephem.Sun(), start=last_run_at_utc
)
except self.ephem.CircumpolarError: # pragma: no cover
# Sun won't rise/set today. Check again tomorrow
# (specifically, after the next anti-transit).
next_utc = (
self.cal.next_antitransit(self.ephem.Sun()) +
timedelta(minutes=1)
)
next = self.maybe_make_aware(next_utc.datetime())
now = self.maybe_make_aware(self.now())
delta = next - now
return delta
|
Return estimate of next time to run.
Returns:
~datetime.timedelta: when the periodic task should
run next, or if it shouldn't run today (e.g., the sun does
not rise today), returns the time when the next check
should take place.
|
python
|
celery/schedules.py
| 828
|
[
"self",
"last_run_at"
] |
timedelta
| true
| 3
| 6.88
|
celery/celery
| 27,741
|
unknown
| false
|
builder
|
public static final Builder builder() {
return new Builder();
}
|
Constructs a new {@link Builder} instance.
@return a new {@link Builder} instance.
|
java
|
src/main/java/org/apache/commons/lang3/Strings.java
| 280
|
[] |
Builder
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
run
|
public static <V> FutureTask<V> run(final Callable<V> callable) {
final FutureTask<V> futureTask = new FutureTask<>(callable);
futureTask.run();
return futureTask;
}
|
Creates a {@link FutureTask} and runs the given {@link Callable}.
@param <V> The result type returned by this FutureTask's {@code get} methods.
@param callable the Callable task.
@return a new FutureTask.
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/FutureTasks.java
| 36
|
[
"callable"
] | true
| 1
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
get_optional_arg
|
def get_optional_arg(annotation: typing.Any) -> typing.Any:
"""Get the argument from an Optional[...] annotation, or None if it is no such annotation."""
origin = typing.get_origin(annotation)
if origin != typing.Union and (sys.version_info >= (3, 10) and origin != types.UnionType):
return None
union_args = typing.get_args(annotation)
if len(union_args) != 2: # Union does _not_ have two members, so it's not an Optional
return None
has_none_arg = any(is_none_type(arg) for arg in union_args)
# There will always be at least one type arg, as we have already established that this is a Union with exactly
# two members, and both cannot be None (`Union[None, None]` does not work).
type_arg = next(arg for arg in union_args if not is_none_type(arg)) # pragma: no branch
if has_none_arg:
return type_arg
return None
|
Get the argument from an Optional[...] annotation, or None if it is no such annotation.
|
python
|
celery/utils/annotations.py
| 17
|
[
"annotation"
] |
typing.Any
| true
| 6
| 6
|
celery/celery
| 27,741
|
unknown
| false
|
configure
|
private void configure(Supplier<@Nullable DateTimeFormatter> supplier, Consumer<DateTimeFormatter> consumer) {
DateTimeFormatter formatter = supplier.get();
if (formatter != null) {
consumer.accept(formatter);
}
}
|
Create a new WebConversionService that configures formatters with the provided
date, time, and date-time formats, or registers the default if no custom format is
provided.
@param dateTimeFormatters the formatters to use for date, time, and date-time
formatting
@since 2.3.0
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/format/WebConversionService.java
| 84
|
[
"supplier",
"consumer"
] |
void
| true
| 2
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
invertFrom
|
@CanIgnoreReturnValue
public static <K extends @Nullable Object, V extends @Nullable Object, M extends Multimap<K, V>>
M invertFrom(Multimap<? extends V, ? extends K> source, M dest) {
checkNotNull(dest);
for (Map.Entry<? extends V, ? extends K> entry : source.entries()) {
dest.put(entry.getValue(), entry.getKey());
}
return dest;
}
|
Copies each key-value mapping in {@code source} into {@code dest}, with its key and value
reversed.
<p>If {@code source} is an {@link ImmutableMultimap}, consider using {@link
ImmutableMultimap#inverse} instead.
@param source any multimap
@param dest the multimap to copy into; usually empty
@return {@code dest}
|
java
|
android/guava/src/com/google/common/collect/Multimaps.java
| 596
|
[
"source",
"dest"
] |
M
| true
| 1
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
getLong
|
public long getLong(String name) throws JSONException {
Object object = get(name);
Long result = JSON.toLong(object);
if (result == null) {
throw JSON.typeMismatch(name, object, "long");
}
return result;
}
|
Returns the value mapped by {@code name} if it exists and is a long or can be
coerced to a long. Note that JSON represents numbers as doubles, so this is
<a href="#lossy">lossy</a>; use strings to transfer numbers over JSON.
@param name the name of the property
@return the value
@throws JSONException if the mapping doesn't exist or cannot be coerced to a long.
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 514
|
[
"name"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
addConstructor
|
function addConstructor(statements: Statement[], node: ClassExpression | ClassDeclaration, name: Identifier, extendsClauseElement: ExpressionWithTypeArguments | undefined): void {
const savedConvertedLoopState = convertedLoopState;
convertedLoopState = undefined;
const ancestorFacts = enterSubtree(HierarchyFacts.ConstructorExcludes, HierarchyFacts.ConstructorIncludes);
const constructor = getFirstConstructorWithBody(node);
const hasSynthesizedSuper = hasSynthesizedDefaultSuperCall(constructor, extendsClauseElement !== undefined);
const constructorFunction = factory.createFunctionDeclaration(
/*modifiers*/ undefined,
/*asteriskToken*/ undefined,
name,
/*typeParameters*/ undefined,
transformConstructorParameters(constructor, hasSynthesizedSuper),
/*type*/ undefined,
transformConstructorBody(constructor, node, extendsClauseElement, hasSynthesizedSuper),
);
setTextRange(constructorFunction, constructor || node);
if (extendsClauseElement) {
setEmitFlags(constructorFunction, EmitFlags.CapturesThis);
}
statements.push(constructorFunction);
exitSubtree(ancestorFacts, HierarchyFacts.FunctionSubtreeExcludes, HierarchyFacts.None);
convertedLoopState = savedConvertedLoopState;
}
|
Adds the constructor of the class to a class body function.
@param statements The statements of the class body function.
@param node The ClassExpression or ClassDeclaration node.
@param extendsClauseElement The expression for the class `extends` clause.
|
typescript
|
src/compiler/transformers/es2015.ts
| 1,147
|
[
"statements",
"node",
"name",
"extendsClauseElement"
] | true
| 3
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
one_hot
|
def one_hot(
x: Array,
/,
num_classes: int,
*,
dtype: DType | None = None,
axis: int = -1,
xp: ModuleType | None = None,
) -> Array:
"""
One-hot encode the given indices.
Each index in the input `x` is encoded as a vector of zeros of length `num_classes`
with the element at the given index set to one.
Parameters
----------
x : array
An array with integral dtype whose values are between `0` and `num_classes - 1`.
num_classes : int
Number of classes in the one-hot dimension.
dtype : DType, optional
The dtype of the return value. Defaults to the default float dtype (usually
float64).
axis : int, optional
Position in the expanded axes where the new axis is placed. Default: -1.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
An array having the same shape as `x` except for a new axis at the position
given by `axis` having size `num_classes`. If `axis` is unspecified, it
defaults to -1, which appends a new axis.
If ``x < 0`` or ``x >= num_classes``, then the result is undefined, may raise
an exception, or may even cause a bad state. `x` is not checked.
Examples
--------
>>> import array_api_extra as xpx
>>> import array_api_strict as xp
>>> xpx.one_hot(xp.asarray([1, 2, 0]), 3)
Array([[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]], dtype=array_api_strict.float64)
"""
# Validate inputs.
if xp is None:
xp = array_namespace(x)
if not xp.isdtype(x.dtype, "integral"):
msg = "x must have an integral dtype."
raise TypeError(msg)
if dtype is None:
dtype = _funcs.default_dtype(xp, device=get_device(x))
# Delegate where possible.
if is_jax_namespace(xp):
from jax.nn import one_hot as jax_one_hot
return jax_one_hot(x, num_classes, dtype=dtype, axis=axis)
if is_torch_namespace(xp):
from torch.nn.functional import one_hot as torch_one_hot
x = xp.astype(x, xp.int64) # PyTorch only supports int64 here.
try:
out = torch_one_hot(x, num_classes)
except RuntimeError as e:
raise IndexError from e
else:
out = _funcs.one_hot(x, num_classes, xp=xp)
out = xp.astype(out, dtype, copy=False)
if axis != -1:
out = xp.moveaxis(out, -1, axis)
return out
|
One-hot encode the given indices.
Each index in the input `x` is encoded as a vector of zeros of length `num_classes`
with the element at the given index set to one.
Parameters
----------
x : array
An array with integral dtype whose values are between `0` and `num_classes - 1`.
num_classes : int
Number of classes in the one-hot dimension.
dtype : DType, optional
The dtype of the return value. Defaults to the default float dtype (usually
float64).
axis : int, optional
Position in the expanded axes where the new axis is placed. Default: -1.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
An array having the same shape as `x` except for a new axis at the position
given by `axis` having size `num_classes`. If `axis` is unspecified, it
defaults to -1, which appends a new axis.
If ``x < 0`` or ``x >= num_classes``, then the result is undefined, may raise
an exception, or may even cause a bad state. `x` is not checked.
Examples
--------
>>> import array_api_extra as xpx
>>> import array_api_strict as xp
>>> xpx.one_hot(xp.asarray([1, 2, 0]), 3)
Array([[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]], dtype=array_api_strict.float64)
|
python
|
sklearn/externals/array_api_extra/_delegation.py
| 195
|
[
"x",
"num_classes",
"dtype",
"axis",
"xp"
] |
Array
| true
| 8
| 8.4
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
explicitlyInheritsFrom
|
function explicitlyInheritsFrom(symbol: Symbol, parent: Symbol, cachedResults: Map<string, boolean>, checker: TypeChecker): boolean {
if (symbol === parent) {
return true;
}
const key = getSymbolId(symbol) + "," + getSymbolId(parent);
const cached = cachedResults.get(key);
if (cached !== undefined) {
return cached;
}
// Set the key so that we don't infinitely recurse
cachedResults.set(key, false);
const inherits = !!symbol.declarations && symbol.declarations.some(declaration =>
getAllSuperTypeNodes(declaration).some(typeReference => {
const type = checker.getTypeAtLocation(typeReference);
return !!type && !!type.symbol && explicitlyInheritsFrom(type.symbol, parent, cachedResults, checker);
})
);
cachedResults.set(key, inherits);
return inherits;
}
|
Determines if the parent symbol occurs somewhere in the child's ancestry. If the parent symbol
is an interface, determines if some ancestor of the child symbol extends or inherits from it.
Also takes in a cache of previous results which makes this slightly more efficient and is
necessary to avoid potential loops like so:
class A extends B { }
class B extends A { }
We traverse the AST rather than using the type checker because users are typically only interested
in explicit implementations of an interface/class when calling "Go to Implementation". Sibling
implementations of types that share a common ancestor with the type whose implementation we are
searching for need to be filtered out of the results. The type checker doesn't let us make the
distinction between structurally compatible implementations and explicit implementations, so we
must use the AST.
@param symbol A class or interface Symbol
@param parent Another class or interface Symbol
@param cachedResults A map of symbol id pairs (i.e. "child,parent") to booleans indicating previous results
|
typescript
|
src/services/findAllReferences.ts
| 2,338
|
[
"symbol",
"parent",
"cachedResults",
"checker"
] | true
| 6
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
omitEmptyStrings
|
public Splitter omitEmptyStrings() {
return new Splitter(strategy, true, trimmer, limit);
}
|
Returns a splitter that behaves equivalently to {@code this} splitter, but automatically omits
empty strings from the results. For example, {@code
Splitter.on(',').omitEmptyStrings().split(",a,,,b,c,,")} returns an iterable containing only
{@code ["a", "b", "c"]}.
<p>If either {@code trimResults} option is also specified when creating a splitter, that
splitter always trims results first before checking for emptiness. So, for example, {@code
Splitter.on(':').omitEmptyStrings().trimResults().split(": : : ")} returns an empty iterable.
<p>Note that it is ordinarily not possible for {@link #split(CharSequence)} to return an empty
iterable, but when using this option, it can (if the input sequence consists of nothing but
separators).
@return a splitter with the desired configuration
|
java
|
android/guava/src/com/google/common/base/Splitter.java
| 306
|
[] |
Splitter
| true
| 1
| 6.16
|
google/guava
| 51,352
|
javadoc
| false
|
wrapInstance
|
public static <T> Plugin<T> wrapInstance(T instance, Metrics metrics, String key, String name, String value) {
Supplier<Map<String, String>> tagsSupplier = () -> {
Map<String, String> tags = tags(key, instance);
tags.put(name, value);
return tags;
};
return wrapInstance(instance, metrics, tagsSupplier);
}
|
Wrap an instance into a Plugin.
@param instance the instance to wrap
@param metrics the metrics
@param name extra tag name to add
@param value extra tag value to add
@param key the value for the <code>config</code> tag
@return the plugin
|
java
|
clients/src/main/java/org/apache/kafka/common/internals/Plugin.java
| 86
|
[
"instance",
"metrics",
"key",
"name",
"value"
] | true
| 1
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_get_group_names
|
def _get_group_names(regex: re.Pattern) -> list[Hashable] | range:
"""
Get named groups from compiled regex.
Unnamed groups are numbered.
Parameters
----------
regex : compiled regex
Returns
-------
list of column labels
"""
rng = range(regex.groups)
names = {v: k for k, v in regex.groupindex.items()}
if not names:
return rng
result: list[Hashable] = [names.get(1 + i, i) for i in rng]
arr = np.array(result)
if arr.dtype.kind == "i" and lib.is_range_indexer(arr, len(arr)):
return rng
return result
|
Get named groups from compiled regex.
Unnamed groups are numbered.
Parameters
----------
regex : compiled regex
Returns
-------
list of column labels
|
python
|
pandas/core/strings/accessor.py
| 3,942
|
[
"regex"
] |
list[Hashable] | range
| true
| 4
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
requireNonNullMode
|
private static void requireNonNullMode(ConnectionMode connectionMode, SecurityProtocol securityProtocol) {
if (connectionMode == null)
throw new IllegalArgumentException("`mode` must be non-null if `securityProtocol` is `" + securityProtocol + "`");
}
|
@return a mutable RecordingMap. The elements got from RecordingMap are marked as "used".
|
java
|
clients/src/main/java/org/apache/kafka/common/network/ChannelBuilders.java
| 214
|
[
"connectionMode",
"securityProtocol"
] |
void
| true
| 2
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
toString
|
@Deprecated
public static String toString(final byte[] bytes, final String charsetName) {
return new String(bytes, Charsets.toCharset(charsetName));
}
|
Converts a {@code byte[]} to a String using the specified character encoding.
@param bytes the byte array to read from.
@param charsetName the encoding to use, if null then use the platform default.
@return a new String.
@throws NullPointerException if the input is null.
@deprecated Use {@link StringUtils#toEncodedString(byte[], Charset)} instead of String constants in your code.
@since 3.1
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 8,697
|
[
"bytes",
"charsetName"
] |
String
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
isAnyBlank
|
public static boolean isAnyBlank(final CharSequence... css) {
if (ArrayUtils.isEmpty(css)) {
return false;
}
for (final CharSequence cs : css) {
if (isBlank(cs)) {
return true;
}
}
return false;
}
|
Tests if any of the CharSequences are {@link #isBlank(CharSequence) blank} (whitespaces, empty ({@code ""}) or {@code null}).
<p>
Whitespace is defined by {@link Character#isWhitespace(char)}.
</p>
<pre>
StringUtils.isAnyBlank((String) null) = true
StringUtils.isAnyBlank((String[]) null) = false
StringUtils.isAnyBlank(null, "foo") = true
StringUtils.isAnyBlank(null, null) = true
StringUtils.isAnyBlank("", "bar") = true
StringUtils.isAnyBlank("bob", "") = true
StringUtils.isAnyBlank(" bob ", null) = true
StringUtils.isAnyBlank(" ", "bar") = true
StringUtils.isAnyBlank(new String[] {}) = false
StringUtils.isAnyBlank(new String[]{""}) = true
StringUtils.isAnyBlank("foo", "bar") = false
</pre>
@param css the CharSequences to check, may be null or empty.
@return {@code true} if any of the CharSequences are {@link #isBlank(CharSequence) blank} (whitespaces, empty ({@code ""}) or {@code null}).
@see #isBlank(CharSequence)
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 3,408
|
[] | true
| 3
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
emit_dispatch_case
|
def emit_dispatch_case(
overload: PythonSignatureGroup,
structseq_typenames: dict[str, str],
*,
symint: bool = True,
) -> str:
"""
Emit dispatch code for a single parsed signature. This corresponds to either
a single native function, or a pair that differ only in output params. In the
latter case, a single python signature is used for both and dispatching
switches on the presence/absence of passed output args.
"""
if overload.outplace is not None:
# dispatch output and no-output variants, branch on _r.isNone(<out_idx>)
return PY_VARIABLE_OUT.substitute(
out_idx=overload.signature.output_idx(),
call_dispatch=emit_single_dispatch(
overload.signature, overload.base, structseq_typenames, symint=symint
),
call_dispatch_out=emit_single_dispatch(
overload.signature,
overload.outplace,
structseq_typenames,
symint=symint,
),
)
else:
# no-output version only
return emit_single_dispatch(
overload.signature, overload.base, structseq_typenames, symint=symint
)
|
Emit dispatch code for a single parsed signature. This corresponds to either
a single native function, or a pair that differ only in output params. In the
latter case, a single python signature is used for both and dispatching
switches on the presence/absence of passed output args.
|
python
|
tools/autograd/gen_python_functions.py
| 1,014
|
[
"overload",
"structseq_typenames",
"symint"
] |
str
| true
| 3
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
toOffsetDateTime
|
public static OffsetDateTime toOffsetDateTime(final Date date) {
return toOffsetDateTime(date, TimeZone.getDefault());
}
|
Converts a {@link Date} to a {@link OffsetDateTime}.
@param date the Date to convert, not null.
@return a new OffsetDateTime.
@since 3.19.0
|
java
|
src/main/java/org/apache/commons/lang3/time/DateUtils.java
| 1,662
|
[
"date"
] |
OffsetDateTime
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toString
|
public static String toString(URL url, Charset charset) throws IOException {
return asCharSource(url, charset).read();
}
|
Reads all characters from a URL into a {@link String}, using the given character set.
@param url the URL to read from
@param charset the charset used to decode the input stream; see {@link StandardCharsets} for
helpful predefined constants
@return a string containing all the characters from the URL
@throws IOException if an I/O error occurs.
|
java
|
android/guava/src/com/google/common/io/Resources.java
| 107
|
[
"url",
"charset"
] |
String
| true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
guessPropertyTypeFromEditors
|
protected @Nullable Class<?> guessPropertyTypeFromEditors(String propertyName) {
if (this.customEditorsForPath != null) {
CustomEditorHolder editorHolder = this.customEditorsForPath.get(propertyName);
if (editorHolder == null) {
List<String> strippedPaths = new ArrayList<>();
addStrippedPropertyPaths(strippedPaths, "", propertyName);
for (Iterator<String> it = strippedPaths.iterator(); it.hasNext() && editorHolder == null;) {
String strippedName = it.next();
editorHolder = this.customEditorsForPath.get(strippedName);
}
}
if (editorHolder != null) {
return editorHolder.getRegisteredType();
}
}
return null;
}
|
Guess the property type of the specified property from the registered
custom editors (provided that they were registered for a specific type).
@param propertyName the name of the property
@return the property type, or {@code null} if not determinable
|
java
|
spring-beans/src/main/java/org/springframework/beans/PropertyEditorRegistrySupport.java
| 447
|
[
"propertyName"
] | true
| 6
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
toCharset
|
static Charset toCharset(final Charset charset) {
return charset == null ? Charset.defaultCharset() : charset;
}
|
Returns the given {@code charset} or the default Charset if {@code charset} is null.
@param charset a Charset or null.
@return the given {@code charset} or the default Charset if {@code charset} is null.
|
java
|
src/main/java/org/apache/commons/lang3/Charsets.java
| 43
|
[
"charset"
] |
Charset
| true
| 2
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
field
|
public XContentBuilder field(String name, Byte value) throws IOException {
return (value == null) ? nullField(name) : field(name, value.byteValue());
}
|
@return the value of the "human readable" flag. When the value is equal to true,
some types of values are written in a format easier to read for a human.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
| 431
|
[
"name",
"value"
] |
XContentBuilder
| true
| 2
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
shouldSkipHeartbeat
|
public boolean shouldSkipHeartbeat() {
return isNotInGroup();
}
|
@return True if the member should not send heartbeats, which is the case when it is in a
state where it is not an active member of the group.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
| 599
|
[] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
appendln
|
public StrBuilder appendln(final char ch) {
return append(ch).appendNewLine();
}
|
Appends a char value followed by a new line to the string builder.
@param ch the value to append
@return {@code this} instance.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 945
|
[
"ch"
] |
StrBuilder
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
token
|
protected abstract boolean token(
String aggregationName,
String currentFieldName,
XContentParser.Token token,
XContentParser parser,
Map<ParseField, Object> otherOptions
) throws IOException;
|
Allows subclasses of {@link ArrayValuesSourceParser} to parse extra
parameters and store them in a {@link Map} which will later be passed to
{@link #createFactory(String, ValuesSourceType, Map)}.
@param aggregationName
the name of the aggregation
@param currentFieldName
the name of the current field being parsed
@param token
the current token for the parser
@param parser
the parser
@param otherOptions
a {@link Map} of options to be populated by successive calls
to this method which will then be passed to the
{@link #createFactory(String, ValuesSourceType, Map)}
method
@return <code>true</code> if the current token was correctly parsed,
<code>false</code> otherwise
@throws IOException
if an error occurs whilst parsing
|
java
|
modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceParser.java
| 247
|
[
"aggregationName",
"currentFieldName",
"token",
"parser",
"otherOptions"
] | true
| 1
| 6.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
deactivate_stale_dags
|
def deactivate_stale_dags(
self,
last_parsed: dict[DagFileInfo, datetime | None],
session: Session = NEW_SESSION,
):
"""Detect and deactivate DAGs which are no longer present in files."""
to_deactivate = set()
bundle_names = {b.name for b in self._dag_bundles}
query = select(
DagModel.dag_id,
DagModel.bundle_name,
DagModel.fileloc,
DagModel.last_parsed_time,
DagModel.relative_fileloc,
).where(~DagModel.is_stale, DagModel.bundle_name.in_(bundle_names))
dags_parsed = session.execute(query)
for dag in dags_parsed:
# When the Dag's last_parsed_time is more than the stale_dag_threshold older than the
# Dag file's last_finish_time, the Dag is considered stale as has apparently been removed from the file,
# This is especially relevant for Dag files that generate Dags in a dynamic manner.
file_info = DagFileInfo(rel_path=Path(dag.relative_fileloc), bundle_name=dag.bundle_name)
if last_finish_time := last_parsed.get(file_info, None):
if dag.last_parsed_time + timedelta(seconds=self.stale_dag_threshold) < last_finish_time:
self.log.info("DAG %s is missing and will be deactivated.", dag.dag_id)
to_deactivate.add(dag.dag_id)
if to_deactivate:
deactivated_dagmodel = session.execute(
update(DagModel)
.where(DagModel.dag_id.in_(to_deactivate))
.values(is_stale=True)
.execution_options(synchronize_session="fetch")
)
deactivated = getattr(deactivated_dagmodel, "rowcount", 0)
if deactivated:
self.log.info("Deactivated %i DAGs which are no longer present in file.", deactivated)
|
Detect and deactivate DAGs which are no longer present in files.
|
python
|
airflow-core/src/airflow/dag_processing/manager.py
| 299
|
[
"self",
"last_parsed",
"session"
] | true
| 6
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
|
resolve
|
@Nullable Entry resolve(String name);
|
Resolve the entry with the specified name, return {@code null} if the entry should
not be handled.
@param name the name of the entry to handle
@return the resolved {@link Entry}
|
java
|
loader/spring-boot-jarmode-tools/src/main/java/org/springframework/boot/jarmode/tools/JarStructure.java
| 49
|
[
"name"
] |
Entry
| true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
createBuildInfo
|
protected Properties createBuildInfo(ProjectDetails project) {
Properties properties = CollectionFactory.createSortedProperties(true);
addIfHasValue(properties, "build.group", project.getGroup());
addIfHasValue(properties, "build.artifact", project.getArtifact());
addIfHasValue(properties, "build.name", project.getName());
addIfHasValue(properties, "build.version", project.getVersion());
if (project.getTime() != null) {
properties.put("build.time", DateTimeFormatter.ISO_INSTANT.format(project.getTime()));
}
if (project.getAdditionalProperties() != null) {
project.getAdditionalProperties().forEach((name, value) -> properties.put("build." + name, value));
}
return properties;
}
|
Creates a new {@code BuildPropertiesWriter} that will write to the given
{@code outputFile}.
@param outputFile the output file
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/BuildPropertiesWriter.java
| 76
|
[
"project"
] |
Properties
| true
| 3
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
autodiscover_modules
|
def autodiscover_modules(*args, **kwargs):
"""
Auto-discover INSTALLED_APPS modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
You may provide a register_to keyword parameter as a way to access a
registry. This register_to object must have a _registry instance variable
to access it.
"""
from django.apps import apps
register_to = kwargs.get("register_to")
for app_config in apps.get_app_configs():
for module_to_search in args:
# Attempt to import the app's module.
try:
if register_to:
before_import_registry = copy.copy(register_to._registry)
import_module("%s.%s" % (app_config.name, module_to_search))
except Exception:
# Reset the registry to the state before the last import
# as this import will have to reoccur on the next request and
# this could raise NotRegistered and AlreadyRegistered
# exceptions (see #8245).
if register_to:
register_to._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have the module in question, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(app_config.module, module_to_search):
raise
|
Auto-discover INSTALLED_APPS modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
You may provide a register_to keyword parameter as a way to access a
registry. This register_to object must have a _registry instance variable
to access it.
|
python
|
django/utils/module_loading.py
| 38
|
[] | false
| 6
| 6.4
|
django/django
| 86,204
|
unknown
| false
|
|
toBin
|
public int toBin(double x) {
int binNumber = (int) ((x - min) / bucketWidth);
if (binNumber < MIN_BIN_NUMBER) {
return MIN_BIN_NUMBER;
}
return Math.min(binNumber, maxBinNumber);
}
|
Create a bin scheme with the specified number of bins that all have the same width.
@param bins the number of bins; must be at least 2
@param min the minimum value to be counted in the bins
@param max the maximum value to be counted in the bins
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/stats/Histogram.java
| 153
|
[
"x"
] | true
| 2
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
resetCaches
|
default void resetCaches() {
for (String cacheName : getCacheNames()) {
Cache cache = getCache(cacheName);
if (cache != null) {
cache.clear();
}
}
}
|
Remove all registered caches from this cache manager if possible,
re-creating them on demand. After this call, {@link #getCacheNames()}
will possibly be empty and the cache provider will have dropped all
cache management state.
<p>Alternatively, an implementation may perform an equivalent reset
on fixed existing cache regions without actually dropping the cache.
This behavior will be indicated by {@link #getCacheNames()} still
exposing a non-empty set of names, whereas the corresponding cache
regions will not contain cache entries anymore.
<p>The default implementation calls {@link Cache#clear} on all
registered caches, retaining all caches as registered, satisfying
the alternative implementation path above. Custom implementations
may either drop the actual caches (re-creating them on demand) or
perform a more exhaustive reset at the actual cache provider level.
@since 7.0.2
@see Cache#clear()
|
java
|
spring-context/src/main/java/org/springframework/cache/CacheManager.java
| 69
|
[] |
void
| true
| 2
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getContent
|
public String getContent() {
if (chars == null) {
return null;
}
return new String(chars);
}
|
Gets the String content that the tokenizer is parsing.
@return the string content being parsed.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrTokenizer.java
| 481
|
[] |
String
| true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
deleteAcls
|
default DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters) {
return deleteAcls(filters, new DeleteAclsOptions());
}
|
This is a convenience method for {@link #deleteAcls(Collection, DeleteAclsOptions)} with default options.
See the overload for more details.
<p>
This operation is supported by brokers with version 0.11.0.0 or higher.
@param filters The filters to use.
@return The DeleteAclsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 422
|
[
"filters"
] |
DeleteAclsResult
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
order_queued_tasks_by_priority
|
def order_queued_tasks_by_priority(self) -> list[tuple[TaskInstanceKey, workloads.ExecuteTask]]:
"""
Orders the queued tasks by priority.
:return: List of workloads from the queued_tasks according to the priority.
"""
if not self.queued_tasks:
return []
# V3 + new executor that supports workloads
return sorted(
self.queued_tasks.items(),
key=lambda x: x[1].ti.priority_weight,
reverse=True,
)
|
Orders the queued tasks by priority.
:return: List of workloads from the queued_tasks according to the priority.
|
python
|
airflow-core/src/airflow/executors/base_executor.py
| 335
|
[
"self"
] |
list[tuple[TaskInstanceKey, workloads.ExecuteTask]]
| true
| 2
| 7.04
|
apache/airflow
| 43,597
|
unknown
| false
|
getInvalidChars
|
private static List<Character> getInvalidChars(Elements elements, int index) {
List<Character> invalidChars = new ArrayList<>();
for (int charIndex = 0; charIndex < elements.getLength(index); charIndex++) {
char ch = elements.charAt(index, charIndex);
if (!ElementsParser.isValidChar(ch, charIndex)) {
invalidChars.add(ch);
}
}
return invalidChars;
}
|
Return a {@link ConfigurationPropertyName} for the specified string.
@param name the source name
@param returnNullIfInvalid if null should be returned if the name is not valid
@return a {@link ConfigurationPropertyName} instance
@throws InvalidConfigurationPropertyNameException if the name is not valid and
{@code returnNullIfInvalid} is {@code false}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 701
|
[
"elements",
"index"
] | true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
predict
|
def predict(self, X, **params):
"""Transform the data, and apply `predict` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls `predict`
method. Only valid if the final estimator implements `predict`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
**params : dict of str -> object
- If `enable_metadata_routing=False` (default): Parameters to the
``predict`` called at the end of all transformations in the pipeline.
- If `enable_metadata_routing=True`: Parameters requested and accepted by
steps. Each step must have requested certain metadata for these parameters
to be forwarded to them.
.. versionadded:: 0.20
.. versionchanged:: 1.4
Parameters are now passed to the ``transform`` method of the
intermediate steps as well, if requested, and if
`enable_metadata_routing=True` is set via
:func:`~sklearn.set_config`.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Note that while this may be used to return uncertainties from some
models with ``return_std`` or ``return_cov``, uncertainties that are
generated by the transformations in the pipeline are not propagated
to the final estimator.
Returns
-------
y_pred : ndarray
Result of calling `predict` on the final estimator.
"""
check_is_fitted(self)
Xt = X
if not _routing_enabled():
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
return self.steps[-1][1].predict(Xt, **params)
# metadata routing enabled
routed_params = process_routing(self, "predict", **params)
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt, **routed_params[name].transform)
return self.steps[-1][1].predict(Xt, **routed_params[self.steps[-1][0]].predict)
|
Transform the data, and apply `predict` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls `predict`
method. Only valid if the final estimator implements `predict`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
**params : dict of str -> object
- If `enable_metadata_routing=False` (default): Parameters to the
``predict`` called at the end of all transformations in the pipeline.
- If `enable_metadata_routing=True`: Parameters requested and accepted by
steps. Each step must have requested certain metadata for these parameters
to be forwarded to them.
.. versionadded:: 0.20
.. versionchanged:: 1.4
Parameters are now passed to the ``transform`` method of the
intermediate steps as well, if requested, and if
`enable_metadata_routing=True` is set via
:func:`~sklearn.set_config`.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Note that while this may be used to return uncertainties from some
models with ``return_std`` or ``return_cov``, uncertainties that are
generated by the transformations in the pipeline are not propagated
to the final estimator.
Returns
-------
y_pred : ndarray
Result of calling `predict` on the final estimator.
|
python
|
sklearn/pipeline.py
| 699
|
[
"self",
"X"
] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
tryParse
|
@GwtIncompatible // regular expressions
public static @Nullable Double tryParse(String string) {
if (FLOATING_POINT_PATTERN.matcher(string).matches()) {
// TODO(lowasser): could be potentially optimized, but only with
// extensive testing
try {
return Double.parseDouble(string);
} catch (NumberFormatException e) {
// Double.parseDouble has changed specs several times, so fall through
// gracefully
}
}
return null;
}
|
Parses the specified string as a double-precision floating point value. The ASCII character
{@code '-'} (<code>'\u002D'</code>) is recognized as the minus sign.
<p>Unlike {@link Double#parseDouble(String)}, this method returns {@code null} instead of
throwing an exception if parsing fails. Valid inputs are exactly those accepted by {@link
Double#valueOf(String)}, except that leading and trailing whitespace is not permitted.
<p>This implementation is likely to be faster than {@code Double.parseDouble} if many failures
are expected.
@param string the string representation of a {@code double} value
@return the floating point value represented by {@code string}, or {@code null} if {@code
string} has a length of zero or cannot be parsed as a {@code double} value
@throws NullPointerException if {@code string} is {@code null}
@since 14.0
|
java
|
android/guava/src/com/google/common/primitives/Doubles.java
| 772
|
[
"string"
] |
Double
| true
| 3
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
clear
|
public FluentBitSet clear(final int bitIndex) {
bitSet.clear(bitIndex);
return this;
}
|
Sets the bit specified by the index to {@code false}.
@param bitIndex the index of the bit to be cleared.
@throws IndexOutOfBoundsException if the specified index is negative.
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/util/FluentBitSet.java
| 164
|
[
"bitIndex"
] |
FluentBitSet
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
set_context
|
def set_context(self, filename):
"""
Provide filename context to airflow task handler.
:param filename: filename in which the dag is located
"""
local_loc = self._init_file(filename)
self.handler = NonCachingFileHandler(local_loc)
self.handler.setFormatter(self.formatter)
self.handler.setLevel(self.level)
if self._cur_date < datetime.today():
self._symlink_latest_log_directory()
self._cur_date = datetime.today()
return SetContextPropagate.DISABLE_PROPAGATE
|
Provide filename context to airflow task handler.
:param filename: filename in which the dag is located
|
python
|
airflow-core/src/airflow/utils/log/file_processor_handler.py
| 57
|
[
"self",
"filename"
] | false
| 2
| 6.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
getTSVInstance
|
public static StrTokenizer getTSVInstance(final char[] input) {
final StrTokenizer tok = getTSVClone();
tok.reset(input);
return tok;
}
|
Gets a new tokenizer instance which parses Tab Separated Value strings.
The default for CSV processing will be trim whitespace from both ends
(which can be overridden with the setTrimmer method).
@param input the string to parse.
@return a new tokenizer instance which parses Tab Separated Value strings.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrTokenizer.java
| 199
|
[
"input"
] |
StrTokenizer
| true
| 1
| 6.72
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_matchesPrefix
|
function _matchesPrefix(ignoreCase: boolean, word: string, wordToMatchAgainst: string): IMatch[] | null {
if (!wordToMatchAgainst || wordToMatchAgainst.length < word.length) {
return null;
}
let matches: boolean;
if (ignoreCase) {
matches = strings.startsWithIgnoreCase(wordToMatchAgainst, word);
} else {
matches = wordToMatchAgainst.indexOf(word) === 0;
}
if (!matches) {
return null;
}
return word.length > 0 ? [{ start: 0, end: word.length }] : [];
}
|
@returns A filter which combines the provided set
of filters with an or. The *first* filters that
matches defined the return value of the returned
filter.
|
typescript
|
src/vs/base/common/filters.ts
| 47
|
[
"ignoreCase",
"word",
"wordToMatchAgainst"
] | true
| 7
| 7.04
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
size
|
@Override
public int size() {
Segment<K, V, E, S>[] segments = this.segments;
long sum = 0;
for (int i = 0; i < segments.length; ++i) {
sum += segments[i].count;
}
return Ints.saturatedCast(sum);
}
|
Concrete implementation of {@link Segment} for weak keys and {@link Dummy} values.
|
java
|
android/guava/src/com/google/common/collect/MapMakerInternalMap.java
| 2,345
|
[] | true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
fromProperties
|
@J2ktIncompatible
@GwtIncompatible // java.util.Properties
public static ImmutableMap<String, String> fromProperties(Properties properties) {
ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
for (Enumeration<?> e = properties.propertyNames(); e.hasMoreElements(); ) {
/*
* requireNonNull is safe because propertyNames contains only non-null elements.
*
* Accordingly, we have it annotated as returning `Enumeration<? extends Object>` in our
* prototype checker's JDK. However, the checker still sees the return type as plain
* `Enumeration<?>`, probably because of one of the following two bugs (and maybe those two
* bugs are themselves just symptoms of the same underlying problem):
*
* https://github.com/typetools/checker-framework/issues/3030
*
* https://github.com/typetools/checker-framework/issues/3236
*/
String key = (String) requireNonNull(e.nextElement());
/*
* requireNonNull is safe because the key came from propertyNames...
*
* ...except that it's possible for users to insert a string key with a non-string value, and
* in that case, getProperty *will* return null.
*
* TODO(b/192002623): Handle that case: Either:
*
* - Skip non-string keys and values entirely, as proposed in the linked bug.
*
* - Throw ClassCastException instead of NullPointerException, as documented in the current
* Javadoc. (Note that we can't necessarily "just" change our call to `getProperty` to `get`
* because `get` does not consult the default properties.)
*/
builder.put(key, requireNonNull(properties.getProperty(key)));
}
return builder.buildOrThrow();
}
|
Creates an {@code ImmutableMap<String, String>} from a {@code Properties} instance. Properties
normally derive from {@code Map<Object, Object>}, but they typically contain strings, which is
awkward. This method lets you get a plain-old-{@code Map} out of a {@code Properties}.
@param properties a {@code Properties} object to be converted
@return an immutable map containing all the entries in {@code properties}
@throws ClassCastException if any key in {@code properties} is not a {@code String}
@throws NullPointerException if any key or value in {@code properties} is null
|
java
|
android/guava/src/com/google/common/collect/Maps.java
| 1,363
|
[
"properties"
] | true
| 2
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
launderException
|
private RuntimeException launderException(final Throwable throwable) {
throw new IllegalStateException("Unchecked exception", ExceptionUtils.throwUnchecked(throwable));
}
|
This method launders a Throwable to either a RuntimeException, Error or any other Exception wrapped in an
IllegalStateException.
@param throwable the throwable to laundered
@return a RuntimeException, Error or an IllegalStateException
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/Memoizer.java
| 146
|
[
"throwable"
] |
RuntimeException
| true
| 1
| 6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
duration
|
public Optional<Duration> duration() {
return duration;
}
|
Return the timestamp to be used for the ListOffsetsRequest.
@return the timestamp for the OffsetResetStrategy,
if the strategy is EARLIEST or LATEST or duration is provided
else return Optional.empty()
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AutoOffsetResetStrategy.java
| 133
|
[] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
nonNull
|
@SafeVarargs
public static <E> Stream<E> nonNull(final E... array) {
return nonNull(of(array));
}
|
Streams the non-null elements of an array.
@param <E> the type of elements in the collection.
@param array the array to stream or null.
@return A non-null stream that filters out null elements.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
| 650
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
shouldParseReturnType
|
function shouldParseReturnType(returnToken: SyntaxKind.ColonToken | SyntaxKind.EqualsGreaterThanToken, isType: boolean): boolean {
if (returnToken === SyntaxKind.EqualsGreaterThanToken) {
parseExpected(returnToken);
return true;
}
else if (parseOptional(SyntaxKind.ColonToken)) {
return true;
}
else if (isType && token() === SyntaxKind.EqualsGreaterThanToken) {
// This is easy to get backward, especially in type contexts, so parse the type anyway
parseErrorAtCurrentToken(Diagnostics._0_expected, tokenToString(SyntaxKind.ColonToken));
nextToken();
return true;
}
return false;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 4,101
|
[
"returnToken",
"isType"
] | true
| 7
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getAsText
|
@Override
public String getAsText() {
File value = (File) getValue();
return (value != null ? value.getPath() : "");
}
|
Create a new FileEditor, using the given ResourceEditor underneath.
@param resourceEditor the ResourceEditor to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/FileEditor.java
| 116
|
[] |
String
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
doLoadBeanDefinitions
|
protected int doLoadBeanDefinitions(InputSource inputSource, Resource resource)
throws BeanDefinitionStoreException {
try {
Document doc = doLoadDocument(inputSource, resource);
int count = registerBeanDefinitions(doc, resource);
if (logger.isDebugEnabled()) {
logger.debug("Loaded " + count + " bean definitions from " + resource);
}
return count;
}
catch (BeanDefinitionStoreException ex) {
throw ex;
}
catch (SAXParseException ex) {
throw new XmlBeanDefinitionStoreException(resource.getDescription(),
"Line " + ex.getLineNumber() + " in XML document from " + resource + " is invalid", ex);
}
catch (SAXException ex) {
throw new XmlBeanDefinitionStoreException(resource.getDescription(),
"XML document from " + resource + " is invalid", ex);
}
catch (ParserConfigurationException ex) {
throw new BeanDefinitionStoreException(resource.getDescription(),
"Parser configuration exception parsing XML from " + resource, ex);
}
catch (IOException ex) {
throw new BeanDefinitionStoreException(resource.getDescription(),
"IOException parsing XML document from " + resource, ex);
}
catch (Throwable ex) {
throw new BeanDefinitionStoreException(resource.getDescription(),
"Unexpected exception parsing XML document from " + resource, ex);
}
}
|
Actually load bean definitions from the specified XML file.
@param inputSource the SAX InputSource to read from
@param resource the resource descriptor for the XML file
@return the number of bean definitions found
@throws BeanDefinitionStoreException in case of loading or parsing errors
@see #doLoadDocument
@see #registerBeanDefinitions
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/XmlBeanDefinitionReader.java
| 393
|
[
"inputSource",
"resource"
] | true
| 8
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
tryPreallocate
|
void tryPreallocate(Path file, long size);
|
Retrieves the actual number of bytes of disk storage used to store a specified file.
@param path the path to the file
@return an {@link OptionalLong} that contains the number of allocated bytes on disk for the file, or empty if the size is invalid
|
java
|
libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java
| 76
|
[
"file",
"size"
] |
void
| true
| 1
| 6.48
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
getMainMethod
|
private Method getMainMethod(Class<?> mainClass) throws Exception {
try {
return mainClass.getDeclaredMethod("main", String[].class);
}
catch (NoSuchMethodException ex) {
return mainClass.getDeclaredMethod("main");
}
}
|
Launch the application given the archive file and a fully configured classloader.
@param classLoader the classloader
@param mainClassName the main class to run
@param args the incoming arguments
@throws Exception if the launch fails
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/Launcher.java
| 110
|
[
"mainClass"
] |
Method
| true
| 2
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
partition
|
def partition(a, sep):
"""
Partition each element in ``a`` around ``sep``.
For each element in ``a``, split the element at the first
occurrence of ``sep``, and return a 3-tuple containing the part
before the separator, the separator itself, and the part after
the separator. If the separator is not found, the first item of
the tuple will contain the whole string, and the second and third
ones will be the empty string.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Input array
sep : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Separator to split each string element in ``a``.
Returns
-------
out : 3-tuple:
- array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the
part before the separator
- array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the
separator
- array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the
part after the separator
See Also
--------
str.partition
Examples
--------
>>> import numpy as np
>>> x = np.array(["Numpy is nice!"])
>>> np.strings.partition(x, " ")
(array(['Numpy'], dtype='<U5'),
array([' '], dtype='<U1'),
array(['is nice!'], dtype='<U8'))
"""
a = np.asanyarray(a)
sep = np.asanyarray(sep)
if np.result_type(a, sep).char == "T":
return _partition(a, sep)
sep = sep.astype(a.dtype, copy=False)
pos = _find_ufunc(a, sep, 0, MAX)
a_len = str_len(a)
sep_len = str_len(sep)
not_found = pos < 0
buffersizes1 = np.where(not_found, a_len, pos)
buffersizes3 = np.where(not_found, 0, a_len - pos - sep_len)
out_dtype = ",".join([f"{a.dtype.char}{n}" for n in (
buffersizes1.max(),
1 if np.all(not_found) else sep_len.max(),
buffersizes3.max(),
)])
shape = np.broadcast_shapes(a.shape, sep.shape)
out = np.empty_like(a, shape=shape, dtype=out_dtype)
return _partition_index(a, sep, pos, out=(out["f0"], out["f1"], out["f2"]))
|
Partition each element in ``a`` around ``sep``.
For each element in ``a``, split the element at the first
occurrence of ``sep``, and return a 3-tuple containing the part
before the separator, the separator itself, and the part after
the separator. If the separator is not found, the first item of
the tuple will contain the whole string, and the second and third
ones will be the empty string.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Input array
sep : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Separator to split each string element in ``a``.
Returns
-------
out : 3-tuple:
- array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the
part before the separator
- array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the
separator
- array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the
part after the separator
See Also
--------
str.partition
Examples
--------
>>> import numpy as np
>>> x = np.array(["Numpy is nice!"])
>>> np.strings.partition(x, " ")
(array(['Numpy'], dtype='<U5'),
array([' '], dtype='<U1'),
array(['is nice!'], dtype='<U8'))
|
python
|
numpy/_core/strings.py
| 1,538
|
[
"a",
"sep"
] | false
| 3
| 7.44
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
findDefaultEditor
|
private @Nullable PropertyEditor findDefaultEditor(@Nullable Class<?> requiredType) {
PropertyEditor editor = null;
if (requiredType != null) {
// No custom editor -> check BeanWrapperImpl's default editors.
editor = this.propertyEditorRegistry.getDefaultEditor(requiredType);
if (editor == null && String.class != requiredType) {
// No BeanWrapper default editor -> check standard JavaBean editor.
editor = BeanUtils.findEditorByConvention(requiredType);
}
}
return editor;
}
|
Find a default editor for the given type.
@param requiredType the type to find an editor for
@return the corresponding editor, or {@code null} if none
|
java
|
spring-beans/src/main/java/org/springframework/beans/TypeConverterDelegate.java
| 337
|
[
"requiredType"
] |
PropertyEditor
| true
| 4
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
from_dataframe
|
def from_dataframe(df, allow_copy: bool = True) -> pd.DataFrame:
"""
Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol.
.. note::
For new development, we highly recommend using the Arrow C Data Interface
alongside the Arrow PyCapsule Interface instead of the interchange protocol.
From pandas 3.0 onwards, `from_dataframe` uses the PyCapsule Interface,
only falling back to the interchange protocol if that fails.
From pandas 4.0 onwards, that fallback will no longer be available and only
the PyCapsule Interface will be used.
.. warning::
Due to severe implementation issues, we recommend only considering using the
interchange protocol in the following cases:
- converting to pandas: for pandas >= 2.0.3
- converting from pandas: for pandas >= 3.0.0
Parameters
----------
df : DataFrameXchg
Object supporting the interchange protocol, i.e. `__dataframe__` method.
allow_copy : bool, default: True
Whether to allow copying the memory to perform the conversion
(if false then zero-copy approach is requested).
Returns
-------
pd.DataFrame
A pandas DataFrame built from the provided interchange
protocol object.
See Also
--------
pd.DataFrame : DataFrame class which can be created from various input data
formats, including objects that support the interchange protocol.
Examples
--------
>>> df_not_necessarily_pandas = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
>>> interchange_object = df_not_necessarily_pandas.__dataframe__()
>>> interchange_object.column_names()
Index(['A', 'B'], dtype='str')
>>> df_pandas = pd.api.interchange.from_dataframe(
... interchange_object.select_columns_by_name(["A"])
... )
>>> df_pandas
A
0 1
1 2
These methods (``column_names``, ``select_columns_by_name``) should work
for any dataframe library which implements the interchange protocol.
"""
if isinstance(df, pd.DataFrame):
return df
if hasattr(df, "__arrow_c_stream__"):
try:
pa = import_optional_dependency("pyarrow", min_version="14.0.0")
except ImportError:
# fallback to _from_dataframe
warnings.warn(
"Conversion using Arrow PyCapsule Interface failed due to "
"missing PyArrow>=14 dependency, falling back to (deprecated) "
"interchange protocol. We recommend that you install "
"PyArrow>=14.0.0.",
UserWarning,
stacklevel=find_stack_level(),
)
else:
try:
return pa.table(df).to_pandas(zero_copy_only=not allow_copy)
except pa.ArrowInvalid as e:
raise RuntimeError(e) from e
if not hasattr(df, "__dataframe__"):
raise ValueError("`df` does not support __dataframe__")
warnings.warn(
"The Dataframe Interchange Protocol is deprecated.\n"
"For dataframe-agnostic code, you may want to look into:\n"
"- Arrow PyCapsule Interface: https://arrow.apache.org/docs/format/CDataInterface/PyCapsuleInterface.html\n"
"- Narwhals: https://github.com/narwhals-dev/narwhals\n",
Pandas4Warning,
stacklevel=find_stack_level(),
)
return _from_dataframe(
df.__dataframe__(allow_copy=allow_copy), allow_copy=allow_copy
)
|
Build a ``pd.DataFrame`` from any DataFrame supporting the interchange protocol.
.. note::
For new development, we highly recommend using the Arrow C Data Interface
alongside the Arrow PyCapsule Interface instead of the interchange protocol.
From pandas 3.0 onwards, `from_dataframe` uses the PyCapsule Interface,
only falling back to the interchange protocol if that fails.
From pandas 4.0 onwards, that fallback will no longer be available and only
the PyCapsule Interface will be used.
.. warning::
Due to severe implementation issues, we recommend only considering using the
interchange protocol in the following cases:
- converting to pandas: for pandas >= 2.0.3
- converting from pandas: for pandas >= 3.0.0
Parameters
----------
df : DataFrameXchg
Object supporting the interchange protocol, i.e. `__dataframe__` method.
allow_copy : bool, default: True
Whether to allow copying the memory to perform the conversion
(if false then zero-copy approach is requested).
Returns
-------
pd.DataFrame
A pandas DataFrame built from the provided interchange
protocol object.
See Also
--------
pd.DataFrame : DataFrame class which can be created from various input data
formats, including objects that support the interchange protocol.
Examples
--------
>>> df_not_necessarily_pandas = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
>>> interchange_object = df_not_necessarily_pandas.__dataframe__()
>>> interchange_object.column_names()
Index(['A', 'B'], dtype='str')
>>> df_pandas = pd.api.interchange.from_dataframe(
... interchange_object.select_columns_by_name(["A"])
... )
>>> df_pandas
A
0 1
1 2
These methods (``column_names``, ``select_columns_by_name``) should work
for any dataframe library which implements the interchange protocol.
|
python
|
pandas/core/interchange/from_dataframe.py
| 42
|
[
"df",
"allow_copy"
] |
pd.DataFrame
| true
| 5
| 7.92
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
maybeUpdateLeaderEpoch
|
void maybeUpdateLeaderEpoch(OptionalInt latestLeaderEpoch) {
if (latestLeaderEpoch.isPresent()
&& (currentLeaderEpoch.isEmpty() || currentLeaderEpoch.getAsInt() < latestLeaderEpoch.getAsInt())) {
log.trace("For {}, leader will be updated, currentLeaderEpoch: {}, attemptsWhenLeaderLastChanged:{}, latestLeaderEpoch: {}, current attempt: {}",
this, currentLeaderEpoch, attemptsWhenLeaderLastChanged, latestLeaderEpoch, attempts);
attemptsWhenLeaderLastChanged = attempts();
currentLeaderEpoch = latestLeaderEpoch;
} else {
log.trace("For {}, leader wasn't updated, currentLeaderEpoch: {}, attemptsWhenLeaderLastChanged:{}, latestLeaderEpoch: {}, current attempt: {}",
this, currentLeaderEpoch, attemptsWhenLeaderLastChanged, latestLeaderEpoch, attempts);
}
}
|
It will update the leader to which this batch will be produced for the ongoing attempt, if a newer leader is known.
@param latestLeaderEpoch latest leader's epoch.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java
| 113
|
[
"latestLeaderEpoch"
] |
void
| true
| 4
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
toSplitString
|
public String toSplitString() {
final String msgStr = Objects.toString(message, StringUtils.EMPTY);
final String formattedTime = formatSplitTime();
return msgStr.isEmpty() ? formattedTime : msgStr + StringUtils.SPACE + formattedTime;
}
|
Gets a summary of the last split time that this StopWatch recorded as a string.
<p>
The format used is ISO 8601-like, [<em>message</em> ]<em>hours</em>:<em>minutes</em>:<em>seconds</em>.<em>milliseconds</em>.
</p>
@return the split time as a String.
@since 2.1
@since 3.10 Returns the prefix {@code "message "} if the message is set.
|
java
|
src/main/java/org/apache/commons/lang3/time/StopWatch.java
| 799
|
[] |
String
| true
| 2
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
move_to
|
def move_to(*arrays, xp, device):
"""Move all arrays to `xp` and `device`.
Each array will be moved to the reference namespace and device if
it is not already using it. Otherwise the array is left unchanged.
`array` may contain `None` entries, these are left unchanged.
Sparse arrays are accepted (as pass through) if the reference namespace is
NumPy, in which case they are returned unchanged. Otherwise a `TypeError`
is raised.
Parameters
----------
*arrays : iterable of arrays
Arrays to (potentially) move.
xp : namespace
Array API namespace to move arrays to.
device : device
Array API device to move arrays to.
Returns
-------
arrays : tuple or array
Tuple of arrays with the same namespace and device as reference. Single array
returned if only one `arrays` input.
"""
sparse_mask = [sp.issparse(array) for array in arrays]
none_mask = [array is None for array in arrays]
if any(sparse_mask) and not _is_numpy_namespace(xp):
raise TypeError(
"Sparse arrays are only accepted (and passed through) when the target "
"namespace is Numpy"
)
converted_arrays = []
for array, is_sparse, is_none in zip(arrays, sparse_mask, none_mask):
if is_none:
converted_arrays.append(None)
elif is_sparse:
converted_arrays.append(array)
else:
xp_array, _, device_array = get_namespace_and_device(array)
if xp == xp_array and device == device_array:
converted_arrays.append(array)
else:
try:
# The dlpack protocol is the future proof and library agnostic
# method to transfer arrays across namespace and device boundaries
# hence this method is attempted first and going through NumPy is
# only used as fallback in case of failure.
# Note: copy=None is the default since array-api 2023.12. Namespace
# libraries should only trigger a copy automatically if needed.
array_converted = xp.from_dlpack(array, device=device)
# `AttributeError` occurs when `__dlpack__` and `__dlpack_device__`
# methods are not present on the input array
# `TypeError` and `NotImplementedError` for packages that do not
# yet support dlpack 1.0
# (i.e. the `device`/`copy` kwargs, e.g., torch <= 2.8.0)
# See https://github.com/data-apis/array-api/pull/741 for
# more details about the introduction of the `copy` and `device`
# kwargs in the from_dlpack method and their expected
# meaning by namespaces implementing the array API spec.
# TODO: try removing this once DLPack v1 more widely supported
# TODO: ValueError should not be needed but is in practice:
# https://github.com/numpy/numpy/issues/30341
except (
AttributeError,
TypeError,
NotImplementedError,
BufferError,
ValueError,
):
# Converting to numpy is tricky, handle this via dedicated function
if _is_numpy_namespace(xp):
array_converted = _convert_to_numpy(array, xp_array)
# Convert from numpy, all array libraries can do this
elif _is_numpy_namespace(xp_array):
array_converted = xp.asarray(array, device=device)
else:
# There is no generic way to convert from namespace A to B
# So we first convert from A to numpy and then from numpy to B
# The way to avoid this round trip is to lobby for DLpack
# support in libraries A and B
array_np = _convert_to_numpy(array, xp_array)
array_converted = xp.asarray(array_np, device=device)
converted_arrays.append(array_converted)
return (
converted_arrays[0] if len(converted_arrays) == 1 else tuple(converted_arrays)
)
|
Move all arrays to `xp` and `device`.
Each array will be moved to the reference namespace and device if
it is not already using it. Otherwise the array is left unchanged.
`array` may contain `None` entries, these are left unchanged.
Sparse arrays are accepted (as pass through) if the reference namespace is
NumPy, in which case they are returned unchanged. Otherwise a `TypeError`
is raised.
Parameters
----------
*arrays : iterable of arrays
Arrays to (potentially) move.
xp : namespace
Array API namespace to move arrays to.
device : device
Array API device to move arrays to.
Returns
-------
arrays : tuple or array
Tuple of arrays with the same namespace and device as reference. Single array
returned if only one `arrays` input.
|
python
|
sklearn/utils/_array_api.py
| 466
|
[
"xp",
"device"
] | false
| 14
| 6.16
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
nextCheckIntervalData
|
private CheckIntervalData nextCheckIntervalData(final int increment,
final CheckIntervalData currentData, final State currentState, final long time) {
final CheckIntervalData nextData;
if (stateStrategy(currentState).isCheckIntervalFinished(this, currentData, time)) {
nextData = new CheckIntervalData(increment, time);
} else {
nextData = currentData.increment(increment);
}
return nextData;
}
|
Calculates the next {@link CheckIntervalData} object based on the current data and
the current state. The next data object takes the counter increment and the current
time into account.
@param increment the increment for the internal counter
@param currentData the current check data object
@param currentState the current state of the circuit breaker
@param time the current time
@return the updated {@link CheckIntervalData} object
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/EventCountCircuitBreaker.java
| 504
|
[
"increment",
"currentData",
"currentState",
"time"
] |
CheckIntervalData
| true
| 2
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
contains
|
public boolean contains(final String str) {
return indexOf(str, 0) >= 0;
}
|
Checks if the string builder contains the specified string.
@param str the string to find
@return true if the builder contains the string
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,635
|
[
"str"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
isAutowirable
|
public static boolean isAutowirable(Parameter parameter, int parameterIndex) {
Assert.notNull(parameter, "Parameter must not be null");
AnnotatedElement annotatedParameter = getEffectiveAnnotatedParameter(parameter, parameterIndex);
return (AnnotatedElementUtils.hasAnnotation(annotatedParameter, Autowired.class) ||
AnnotatedElementUtils.hasAnnotation(annotatedParameter, Qualifier.class) ||
AnnotatedElementUtils.hasAnnotation(annotatedParameter, Value.class));
}
|
Determine if the supplied {@link Parameter} can <em>potentially</em> be
autowired from an {@link AutowireCapableBeanFactory}.
<p>Returns {@code true} if the supplied parameter is annotated or
meta-annotated with {@link Autowired @Autowired},
{@link Qualifier @Qualifier}, or {@link Value @Value}.
<p>Note that {@link #resolveDependency} may still be able to resolve the
dependency for the supplied parameter even if this method returns {@code false}.
@param parameter the parameter whose dependency should be autowired
(must not be {@code null})
@param parameterIndex the index of the parameter in the constructor or method
that declares the parameter
@see #resolveDependency
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/annotation/ParameterResolutionDelegate.java
| 84
|
[
"parameter",
"parameterIndex"
] | true
| 3
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
isPrototype
|
@Override
public boolean isPrototype(String name) throws NoSuchBeanDefinitionException {
String beanName = BeanFactoryUtils.transformedBeanName(name);
Object bean = obtainBean(beanName);
return (!BeanFactoryUtils.isFactoryDereference(name) &&
((bean instanceof SmartFactoryBean<?> smartFactoryBean && smartFactoryBean.isPrototype()) ||
(bean instanceof FactoryBean<?> factoryBean && !factoryBean.isSingleton())));
}
|
Add a new singleton bean.
<p>Will overwrite any existing instance for the given name.
@param name the name of the bean
@param bean the bean instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/StaticListableBeanFactory.java
| 223
|
[
"name"
] | true
| 5
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
shouldSkip
|
public boolean shouldSkip(AnnotatedTypeMetadata metadata) {
return shouldSkip(metadata, null);
}
|
Determine if an item should be skipped based on {@code @Conditional} annotations.
The {@link ConfigurationPhase} will be deduced from the type of item (i.e. a
{@code @Configuration} class will be {@link ConfigurationPhase#PARSE_CONFIGURATION})
@param metadata the meta data
@return if the item should be skipped
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ConditionEvaluator.java
| 71
|
[
"metadata"
] | true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
decimal_encoder
|
def decimal_encoder(dec_value: Decimal) -> Union[int, float]:
"""
Encodes a Decimal as int if there's no exponent, otherwise float
This is useful when we use ConstrainedDecimal to represent Numeric(x,0)
where an integer (but not int typed) is used. Encoding this as a float
results in failed round-tripping between encode and parse.
Our Id type is a prime example of this.
>>> decimal_encoder(Decimal("1.0"))
1.0
>>> decimal_encoder(Decimal("1"))
1
>>> decimal_encoder(Decimal("NaN"))
nan
"""
exponent = dec_value.as_tuple().exponent
if isinstance(exponent, int) and exponent >= 0:
return int(dec_value)
else:
return float(dec_value)
|
Encodes a Decimal as int if there's no exponent, otherwise float
This is useful when we use ConstrainedDecimal to represent Numeric(x,0)
where an integer (but not int typed) is used. Encoding this as a float
results in failed round-tripping between encode and parse.
Our Id type is a prime example of this.
>>> decimal_encoder(Decimal("1.0"))
1.0
>>> decimal_encoder(Decimal("1"))
1
>>> decimal_encoder(Decimal("NaN"))
nan
|
python
|
fastapi/encoders.py
| 38
|
[
"dec_value"
] |
Union[int, float]
| true
| 4
| 7.12
|
tiangolo/fastapi
| 93,264
|
unknown
| false
|
toString
|
@Override
public String toString() {
return "(timestamp=" + timestamp +
", leaderEpoch=" + leaderEpoch.orElse(null) +
", offset=" + offset + ")";
}
|
Get the leader epoch corresponding to the offset that was found (if one exists).
This can be provided to seek() to ensure that the log hasn't been truncated prior to fetching.
@return The leader epoch or empty if it is not known
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/OffsetAndTimestamp.java
| 64
|
[] |
String
| true
| 1
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
equals
|
@Override
public boolean equals(@Nullable Object object) {
if (object instanceof SipHashFunction) {
SipHashFunction other = (SipHashFunction) object;
return (c == other.c) && (d == other.d) && (k0 == other.k0) && (k1 == other.k1);
}
return false;
}
|
@param c the number of compression rounds (must be positive)
@param d the number of finalization rounds (must be positive)
@param k0 the first half of the key
@param k1 the second half of the key
|
java
|
android/guava/src/com/google/common/hash/SipHashFunction.java
| 83
|
[
"object"
] | true
| 5
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
reentrantReadWriteLockVisitor
|
public static <O> ReadWriteLockVisitor<O> reentrantReadWriteLockVisitor(final O object) {
return create(object, new ReentrantReadWriteLock());
}
|
Creates a new instance of {@link ReadWriteLockVisitor} with the given object.
@param <O> The type of the object to protect.
@param object The object to protect.
@return A new {@link ReadWriteLockVisitor}.
@see LockingVisitors
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/locks/LockingVisitors.java
| 734
|
[
"object"
] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
isStartOfFunctionTypeOrConstructorType
|
function isStartOfFunctionTypeOrConstructorType(): boolean {
if (token() === SyntaxKind.LessThanToken) {
return true;
}
if (token() === SyntaxKind.OpenParenToken && lookAhead(isUnambiguouslyStartOfFunctionType)) {
return true;
}
return token() === SyntaxKind.NewKeyword ||
token() === SyntaxKind.AbstractKeyword && lookAhead(nextTokenIsNewKeyword);
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 4,852
|
[] | true
| 6
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
findThreadGroups
|
@Deprecated
public static Collection<ThreadGroup> findThreadGroups(final ThreadGroupPredicate predicate) {
return findThreadGroups(getSystemThreadGroup(), true, predicate);
}
|
Finds all active thread groups which match the given predicate.
@param predicate the predicate.
@return An unmodifiable {@link Collection} of active thread groups matching the given predicate.
@throws NullPointerException if the predicate is null.
@throws SecurityException if the current thread cannot access the system thread group.
@throws SecurityException if the current thread cannot modify thread groups from this thread's thread group up to the system thread group.
@deprecated Use {@link #findThreadGroups(Predicate)}.
|
java
|
src/main/java/org/apache/commons/lang3/ThreadUtils.java
| 297
|
[
"predicate"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
get
|
def get(self: Self, key: Key) -> Value | None:
"""
Retrieve a value from the cache.
Args:
key (Key): The key to look up.
Returns:
Value | None: The cached value if present and version matches, else None.
Raises:
CacheError: If the value is corrupted or cannot be unpickled.
Side Effects:
Removes stale cache files if the version prefix does not match.
"""
fpath = self._fpath_from_key(key)
flock = self._flock_from_fpath(fpath)
with flock:
if not fpath.is_file():
return None
value_bytes = None
prefix_length = len(self.version_prefix)
with open(fpath, "rb") as fp:
if fp.read(prefix_length) == self.version_prefix:
value_bytes = fp.read()
if value_bytes is None:
# version_prefix did not match, so we can't read the stale
# cached value; we should also remove the stale cached value,
# so that key can be re-cached by the newer version
fpath.unlink()
return None
try:
value = pickle.loads(value_bytes)
except pickle.UnpicklingError as err:
raise CacheError(
f"Failed to get key {key!r}, value is potentially corrupted (value is not un-pickle-able)."
) from err
return value
|
Retrieve a value from the cache.
Args:
key (Key): The key to look up.
Returns:
Value | None: The cached value if present and version matches, else None.
Raises:
CacheError: If the value is corrupted or cannot be unpickled.
Side Effects:
Removes stale cache files if the version prefix does not match.
|
python
|
torch/_inductor/cache.py
| 323
|
[
"self",
"key"
] |
Value | None
| true
| 4
| 8.24
|
pytorch/pytorch
| 96,034
|
google
| false
|
isCompatIPv4Address
|
public static boolean isCompatIPv4Address(Inet6Address ip) {
if (!ip.isIPv4CompatibleAddress()) {
return false;
}
byte[] bytes = ip.getAddress();
if ((bytes[12] == 0)
&& (bytes[13] == 0)
&& (bytes[14] == 0)
&& ((bytes[15] == 0) || (bytes[15] == 1))) {
return false;
}
return true;
}
|
Evaluates whether the argument is an IPv6 "compat" address.
<p>An "IPv4 compatible", or "compat", address is one with 96 leading bits of zero, with the
remaining 32 bits interpreted as an IPv4 address. These are conventionally represented in
string literals as {@code "::192.168.0.1"}, though {@code "::c0a8:1"} is also considered an
IPv4 compatible address (and equivalent to {@code "::192.168.0.1"}).
<p>For more on IPv4 compatible addresses see section 2.5.5.1 of <a target="_parent"
href="http://tools.ietf.org/html/rfc4291#section-2.5.5.1">RFC 4291</a>.
<p>NOTE: This method is different from {@link Inet6Address#isIPv4CompatibleAddress} in that it
more correctly classifies {@code "::"} and {@code "::1"} as proper IPv6 addresses (which they
are), NOT IPv4 compatible addresses (which they are generally NOT considered to be).
@param ip {@link Inet6Address} to be examined for embedded IPv4 compatible address format
@return {@code true} if the argument is a valid "compat" address
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 679
|
[
"ip"
] | true
| 7
| 7.6
|
google/guava
| 51,352
|
javadoc
| false
|
|
visitParameter
|
function visitParameter(node: ParameterDeclaration) {
if (parameterIsThisKeyword(node)) {
return undefined;
}
const updated = factory.updateParameterDeclaration(
node,
visitNodes(node.modifiers, node => isDecorator(node) ? visitor(node) : undefined, isModifierLike),
node.dotDotDotToken,
Debug.checkDefined(visitNode(node.name, visitor, isBindingName)),
/*questionToken*/ undefined,
/*type*/ undefined,
visitNode(node.initializer, visitor, isExpression),
);
if (updated !== node) {
// While we emit the source map for the node after skipping decorators and modifiers,
// we need to emit the comments for the original range.
setCommentRange(updated, node);
setTextRange(updated, moveRangePastModifiers(node));
setSourceMapRange(updated, moveRangePastModifiers(node));
setEmitFlags(updated.name, EmitFlags.NoTrailingSourceMap);
}
return updated;
}
|
Determines whether to emit an accessor declaration. We should not emit the
declaration if it does not have a body and is abstract.
@param node The declaration node.
|
typescript
|
src/compiler/transformers/ts.ts
| 1,603
|
[
"node"
] | false
| 4
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getProperty
|
static String getProperty(final String property, final Supplier<String> defaultIfAbsent) {
try {
if (StringUtils.isEmpty(property)) {
return Suppliers.get(defaultIfAbsent);
}
return StringUtils.getIfEmpty(System.getProperty(property), defaultIfAbsent);
} catch (final SecurityException ignore) {
// We are not allowed to look at this property.
//
// System.err.println("Caught a SecurityException reading the system property '" + property
// + "'; the SystemUtils property value will default to null.");
return defaultIfAbsent.get();
}
}
|
Gets a System property, defaulting to {@code null} if the property cannot be read.
<p>
If a {@link SecurityException} is caught, the return value is {@code null}.
</p>
@param property the system property name.
@param defaultIfAbsent get this Supplier when the property is empty or throws SecurityException.
@return the system property value or {@code null} if a security problem occurs.
|
java
|
src/main/java/org/apache/commons/lang3/SystemProperties.java
| 3,908
|
[
"property",
"defaultIfAbsent"
] |
String
| true
| 3
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
read
|
function read(jsonPath, { base, specifier, isESM } = kEmptyObject) {
// This function will be called by both CJS and ESM, so we need to make sure
// non-null attributes are converted to strings.
const parsed = modulesBinding.readPackageJSON(
jsonPath,
isESM,
base == null ? undefined : `${base}`,
specifier == null ? undefined : `${specifier}`,
);
const result = deserializePackageJSON(jsonPath, parsed);
return {
__proto__: null,
...result.data,
exists: result.exists,
pjsonPath: result.path,
};
}
|
Reads a package.json file and returns the parsed contents.
@param {string} jsonPath
@param {{
base?: URL | string,
specifier?: URL | string,
isESM?: boolean,
}} options
@returns {PackageConfig}
|
javascript
|
lib/internal/modules/package_json_reader.js
| 113
|
[
"jsonPath"
] | false
| 3
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.